{"target": 0, "idx": 3, "func": "int ff_get_wav_header(AVFormatContext *s, AVIOContext *pb, AVCodecContext *codec, int size, int big_endian) { int id; uint64_t bitrate; if (size < 14) { avpriv_request_sample(codec, \"wav header size < 14\"); return AVERROR_INVALIDDATA; } codec->codec_type = AVMEDIA_TYPE_AUDIO; if (!big_endian) { id = avio_rl16(pb); if (id != 0x0165) { codec->channels = avio_rl16(pb); codec->sample_rate = avio_rl32(pb); bitrate = avio_rl32(pb) * 8LL; codec->block_align = avio_rl16(pb); } } else { id = avio_rb16(pb); codec->channels = avio_rb16(pb); codec->sample_rate = avio_rb32(pb); bitrate = avio_rb32(pb) * 8LL; codec->block_align = avio_rb16(pb); } if (size == 14) { /* We're dealing with plain vanilla WAVEFORMAT */ codec->bits_per_coded_sample = 8; } else { if (!big_endian) { codec->bits_per_coded_sample = avio_rl16(pb); } else { codec->bits_per_coded_sample = avio_rb16(pb); } } if (id == 0xFFFE) { codec->codec_tag = 0; } else { codec->codec_tag = id; codec->codec_id = ff_wav_codec_get_id(id, codec->bits_per_coded_sample); } if (size >= 18 && id != 0x0165) { /* We're obviously dealing with WAVEFORMATEX */ int cbSize = avio_rl16(pb); /* cbSize */ if (big_endian) { avpriv_report_missing_feature(codec, \"WAVEFORMATEX support for RIFX files\\n\"); return AVERROR_PATCHWELCOME; } size -= 18; cbSize = FFMIN(size, cbSize); if (cbSize >= 22 && id == 0xfffe) { /* WAVEFORMATEXTENSIBLE */ parse_waveformatex(pb, codec); cbSize -= 22; size -= 22; } if (cbSize > 0) { av_freep(&codec->extradata); if (ff_get_extradata(codec, pb, cbSize) < 0) return AVERROR(ENOMEM); size -= cbSize; } /* It is possible for the chunk to contain garbage at the end */ if (size > 0) avio_skip(pb, size); } else if (id == 0x0165 && size >= 32) { int nb_streams, i; size -= 4; av_freep(&codec->extradata); if (ff_get_extradata(codec, pb, size) < 0) return AVERROR(ENOMEM); nb_streams = AV_RL16(codec->extradata + 4); codec->sample_rate = AV_RL32(codec->extradata + 12); codec->channels = 0; bitrate = 0; if (size < 8 + nb_streams * 20) return AVERROR_INVALIDDATA; for (i = 0; i < nb_streams; i++) codec->channels += codec->extradata[8 + i * 20 + 17]; } if (bitrate > INT_MAX) { if (s->error_recognition & AV_EF_EXPLODE) { av_log(s, AV_LOG_ERROR, \"The bitrate %\"PRIu64\" is too large.\\n\", bitrate); return AVERROR_INVALIDDATA; } else { av_log(s, AV_LOG_WARNING, \"The bitrate %\"PRIu64\" is too large, resetting to 0.\", bitrate); codec->bit_rate = 0; } } else { codec->bit_rate = bitrate; } if (codec->sample_rate <= 0) { av_log(s, AV_LOG_ERROR, \"Invalid sample rate: %d\\n\", codec->sample_rate); return AVERROR_INVALIDDATA; } if (codec->codec_id == AV_CODEC_ID_AAC_LATM) { /* Channels and sample_rate values are those prior to applying SBR * and/or PS. */ codec->channels = 0; codec->sample_rate = 0; } /* override bits_per_coded_sample for G.726 */ if (codec->codec_id == AV_CODEC_ID_ADPCM_G726 && codec->sample_rate) codec->bits_per_coded_sample = codec->bit_rate / codec->sample_rate; return 0; }"} {"target": 1, "idx": 12, "func": "static int xen_9pfs_connect(struct XenDevice *xendev) { int i; Xen9pfsDev *xen_9pdev = container_of(xendev, Xen9pfsDev, xendev); V9fsState *s = &xen_9pdev->state; QemuOpts *fsdev; if (xenstore_read_fe_int(&xen_9pdev->xendev, \"num-rings\", &xen_9pdev->num_rings) == -1 || xen_9pdev->num_rings > MAX_RINGS || xen_9pdev->num_rings < 1) { return -1; } xen_9pdev->rings = g_malloc0(xen_9pdev->num_rings * sizeof(Xen9pfsRing)); for (i = 0; i < xen_9pdev->num_rings; i++) { char *str; int ring_order; xen_9pdev->rings[i].priv = xen_9pdev; xen_9pdev->rings[i].evtchn = -1; xen_9pdev->rings[i].local_port = -1; str = g_strdup_printf(\"ring-ref%u\", i); if (xenstore_read_fe_int(&xen_9pdev->xendev, str, &xen_9pdev->rings[i].ref) == -1) { goto out; } str = g_strdup_printf(\"event-channel-%u\", i); if (xenstore_read_fe_int(&xen_9pdev->xendev, str, &xen_9pdev->rings[i].evtchn) == -1) { goto out; } xen_9pdev->rings[i].intf = xengnttab_map_grant_ref( xen_9pdev->xendev.gnttabdev, xen_9pdev->xendev.dom, xen_9pdev->rings[i].ref, PROT_READ | PROT_WRITE); if (!xen_9pdev->rings[i].intf) { goto out; } ring_order = xen_9pdev->rings[i].intf->ring_order; if (ring_order > MAX_RING_ORDER) { goto out; } xen_9pdev->rings[i].ring_order = ring_order; xen_9pdev->rings[i].data = xengnttab_map_domain_grant_refs( xen_9pdev->xendev.gnttabdev, (1 << ring_order), xen_9pdev->xendev.dom, xen_9pdev->rings[i].intf->ref, PROT_READ | PROT_WRITE); if (!xen_9pdev->rings[i].data) { goto out; } xen_9pdev->rings[i].ring.in = xen_9pdev->rings[i].data; xen_9pdev->rings[i].ring.out = xen_9pdev->rings[i].data + XEN_FLEX_RING_SIZE(ring_order); xen_9pdev->rings[i].bh = qemu_bh_new(xen_9pfs_bh, &xen_9pdev->rings[i]); xen_9pdev->rings[i].out_cons = 0; xen_9pdev->rings[i].out_size = 0; xen_9pdev->rings[i].inprogress = false; xen_9pdev->rings[i].evtchndev = xenevtchn_open(NULL, 0); if (xen_9pdev->rings[i].evtchndev == NULL) { goto out; } fcntl(xenevtchn_fd(xen_9pdev->rings[i].evtchndev), F_SETFD, FD_CLOEXEC); xen_9pdev->rings[i].local_port = xenevtchn_bind_interdomain (xen_9pdev->rings[i].evtchndev, xendev->dom, xen_9pdev->rings[i].evtchn); if (xen_9pdev->rings[i].local_port == -1) { xen_pv_printf(xendev, 0, \"xenevtchn_bind_interdomain failed port=%d\\n\", xen_9pdev->rings[i].evtchn); goto out; } xen_pv_printf(xendev, 2, \"bind evtchn port %d\\n\", xendev->local_port); qemu_set_fd_handler(xenevtchn_fd(xen_9pdev->rings[i].evtchndev), xen_9pfs_evtchn_event, NULL, &xen_9pdev->rings[i]); } xen_9pdev->security_model = xenstore_read_be_str(xendev, \"security_model\"); xen_9pdev->path = xenstore_read_be_str(xendev, \"path\"); xen_9pdev->id = s->fsconf.fsdev_id = g_strdup_printf(\"xen9p%d\", xendev->dev); xen_9pdev->tag = s->fsconf.tag = xenstore_read_fe_str(xendev, \"tag\"); v9fs_register_transport(s, &xen_9p_transport); fsdev = qemu_opts_create(qemu_find_opts(\"fsdev\"), s->fsconf.tag, 1, NULL); qemu_opt_set(fsdev, \"fsdriver\", \"local\", NULL); qemu_opt_set(fsdev, \"path\", xen_9pdev->path, NULL); qemu_opt_set(fsdev, \"security_model\", xen_9pdev->security_model, NULL); qemu_opts_set_id(fsdev, s->fsconf.fsdev_id); qemu_fsdev_add(fsdev); v9fs_device_realize_common(s, NULL); return 0; out: xen_9pfs_free(xendev); return -1; }"} {"target": 1, "idx": 35, "func": "static int subframe_count_exact(FlacEncodeContext *s, FlacSubframe *sub, int pred_order) { int p, porder, psize; int i, part_end; int count = 0; /* subframe header */ count += 8; /* subframe */ if (sub->type == FLAC_SUBFRAME_CONSTANT) { count += sub->obits; } else if (sub->type == FLAC_SUBFRAME_VERBATIM) { count += s->frame.blocksize * sub->obits; } else { /* warm-up samples */ count += pred_order * sub->obits; /* LPC coefficients */ if (sub->type == FLAC_SUBFRAME_LPC) count += 4 + 5 + pred_order * s->options.lpc_coeff_precision; /* rice-encoded block */ count += 2; /* partition order */ porder = sub->rc.porder; psize = s->frame.blocksize >> porder; count += 4; /* residual */ i = pred_order; part_end = psize; for (p = 0; p < 1 << porder; p++) { int k = sub->rc.params[p]; count += 4; count += rice_count_exact(&sub->residual[i], part_end - i, k); i = part_end; part_end = FFMIN(s->frame.blocksize, part_end + psize); } } return count; }"} {"target": 0, "idx": 43, "func": "static void ppc_spapr_init(QEMUMachineInitArgs *args) { ram_addr_t ram_size = args->ram_size; const char *cpu_model = args->cpu_model; const char *kernel_filename = args->kernel_filename; const char *kernel_cmdline = args->kernel_cmdline; const char *initrd_filename = args->initrd_filename; const char *boot_device = args->boot_order; PowerPCCPU *cpu; CPUPPCState *env; PCIHostState *phb; int i; MemoryRegion *sysmem = get_system_memory(); MemoryRegion *ram = g_new(MemoryRegion, 1); hwaddr rma_alloc_size; uint32_t initrd_base = 0; long kernel_size = 0, initrd_size = 0; long load_limit, rtas_limit, fw_size; bool kernel_le = false; char *filename; msi_supported = true; spapr = g_malloc0(sizeof(*spapr)); QLIST_INIT(&spapr->phbs); cpu_ppc_hypercall = emulate_spapr_hypercall; /* Allocate RMA if necessary */ rma_alloc_size = kvmppc_alloc_rma(\"ppc_spapr.rma\", sysmem); if (rma_alloc_size == -1) { hw_error(\"qemu: Unable to create RMA\\n\"); exit(1); } if (rma_alloc_size && (rma_alloc_size < ram_size)) { spapr->rma_size = rma_alloc_size; } else { spapr->rma_size = ram_size; /* With KVM, we don't actually know whether KVM supports an * unbounded RMA (PR KVM) or is limited by the hash table size * (HV KVM using VRMA), so we always assume the latter * * In that case, we also limit the initial allocations for RTAS * etc... to 256M since we have no way to know what the VRMA size * is going to be as it depends on the size of the hash table * isn't determined yet. */ if (kvm_enabled()) { spapr->vrma_adjust = 1; spapr->rma_size = MIN(spapr->rma_size, 0x10000000); } } /* We place the device tree and RTAS just below either the top of the RMA, * or just below 2GB, whichever is lowere, so that it can be * processed with 32-bit real mode code if necessary */ rtas_limit = MIN(spapr->rma_size, 0x80000000); spapr->rtas_addr = rtas_limit - RTAS_MAX_SIZE; spapr->fdt_addr = spapr->rtas_addr - FDT_MAX_SIZE; load_limit = spapr->fdt_addr - FW_OVERHEAD; /* We aim for a hash table of size 1/128 the size of RAM. The * normal rule of thumb is 1/64 the size of RAM, but that's much * more than needed for the Linux guests we support. */ spapr->htab_shift = 18; /* Minimum architected size */ while (spapr->htab_shift <= 46) { if ((1ULL << (spapr->htab_shift + 7)) >= ram_size) { break; } spapr->htab_shift++; } /* Set up Interrupt Controller before we create the VCPUs */ spapr->icp = xics_system_init(smp_cpus * kvmppc_smt_threads() / smp_threads, XICS_IRQS); spapr->next_irq = XICS_IRQ_BASE; /* init CPUs */ if (cpu_model == NULL) { cpu_model = kvm_enabled() ? \"host\" : \"POWER7\"; } for (i = 0; i < smp_cpus; i++) { cpu = cpu_ppc_init(cpu_model); if (cpu == NULL) { fprintf(stderr, \"Unable to find PowerPC CPU definition\\n\"); exit(1); } env = &cpu->env; xics_cpu_setup(spapr->icp, cpu); /* Set time-base frequency to 512 MHz */ cpu_ppc_tb_init(env, TIMEBASE_FREQ); /* PAPR always has exception vectors in RAM not ROM. To ensure this, * MSR[IP] should never be set. */ env->msr_mask &= ~(1 << 6); /* Tell KVM that we're in PAPR mode */ if (kvm_enabled()) { kvmppc_set_papr(cpu); } qemu_register_reset(spapr_cpu_reset, cpu); } /* allocate RAM */ spapr->ram_limit = ram_size; if (spapr->ram_limit > rma_alloc_size) { ram_addr_t nonrma_base = rma_alloc_size; ram_addr_t nonrma_size = spapr->ram_limit - rma_alloc_size; memory_region_init_ram(ram, NULL, \"ppc_spapr.ram\", nonrma_size); vmstate_register_ram_global(ram); memory_region_add_subregion(sysmem, nonrma_base, ram); } filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, \"spapr-rtas.bin\"); spapr->rtas_size = load_image_targphys(filename, spapr->rtas_addr, rtas_limit - spapr->rtas_addr); if (spapr->rtas_size < 0) { hw_error(\"qemu: could not load LPAR rtas '%s'\\n\", filename); exit(1); } if (spapr->rtas_size > RTAS_MAX_SIZE) { hw_error(\"RTAS too big ! 0x%lx bytes (max is 0x%x)\\n\", spapr->rtas_size, RTAS_MAX_SIZE); exit(1); } g_free(filename); /* Set up EPOW events infrastructure */ spapr_events_init(spapr); /* Set up VIO bus */ spapr->vio_bus = spapr_vio_bus_init(); for (i = 0; i < MAX_SERIAL_PORTS; i++) { if (serial_hds[i]) { spapr_vty_create(spapr->vio_bus, serial_hds[i]); } } /* We always have at least the nvram device on VIO */ spapr_create_nvram(spapr); /* Set up PCI */ spapr_pci_msi_init(spapr, SPAPR_PCI_MSI_WINDOW); spapr_pci_rtas_init(); phb = spapr_create_phb(spapr, 0); for (i = 0; i < nb_nics; i++) { NICInfo *nd = &nd_table[i]; if (!nd->model) { nd->model = g_strdup(\"ibmveth\"); } if (strcmp(nd->model, \"ibmveth\") == 0) { spapr_vlan_create(spapr->vio_bus, nd); } else { pci_nic_init_nofail(&nd_table[i], phb->bus, nd->model, NULL); } } for (i = 0; i <= drive_get_max_bus(IF_SCSI); i++) { spapr_vscsi_create(spapr->vio_bus); } /* Graphics */ if (spapr_vga_init(phb->bus)) { spapr->has_graphics = true; } if (usb_enabled(spapr->has_graphics)) { pci_create_simple(phb->bus, -1, \"pci-ohci\"); if (spapr->has_graphics) { usbdevice_create(\"keyboard\"); usbdevice_create(\"mouse\"); } } if (spapr->rma_size < (MIN_RMA_SLOF << 20)) { fprintf(stderr, \"qemu: pSeries SLOF firmware requires >= \" \"%ldM guest RMA (Real Mode Area memory)\\n\", MIN_RMA_SLOF); exit(1); } if (kernel_filename) { uint64_t lowaddr = 0; kernel_size = load_elf(kernel_filename, translate_kernel_address, NULL, NULL, &lowaddr, NULL, 1, ELF_MACHINE, 0); if (kernel_size < 0) { kernel_size = load_elf(kernel_filename, translate_kernel_address, NULL, NULL, &lowaddr, NULL, 0, ELF_MACHINE, 0); kernel_le = kernel_size > 0; } if (kernel_size < 0) { kernel_size = load_image_targphys(kernel_filename, KERNEL_LOAD_ADDR, load_limit - KERNEL_LOAD_ADDR); } if (kernel_size < 0) { fprintf(stderr, \"qemu: could not load kernel '%s'\\n\", kernel_filename); exit(1); } /* load initrd */ if (initrd_filename) { /* Try to locate the initrd in the gap between the kernel * and the firmware. Add a bit of space just in case */ initrd_base = (KERNEL_LOAD_ADDR + kernel_size + 0x1ffff) & ~0xffff; initrd_size = load_image_targphys(initrd_filename, initrd_base, load_limit - initrd_base); if (initrd_size < 0) { fprintf(stderr, \"qemu: could not load initial ram disk '%s'\\n\", initrd_filename); exit(1); } } else { initrd_base = 0; initrd_size = 0; } } if (bios_name == NULL) { bios_name = FW_FILE_NAME; } filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); fw_size = load_image_targphys(filename, 0, FW_MAX_SIZE); if (fw_size < 0) { hw_error(\"qemu: could not load LPAR rtas '%s'\\n\", filename); exit(1); } g_free(filename); spapr->entry_point = 0x100; vmstate_register(NULL, 0, &vmstate_spapr, spapr); register_savevm_live(NULL, \"spapr/htab\", -1, 1, &savevm_htab_handlers, spapr); /* Prepare the device tree */ spapr->fdt_skel = spapr_create_fdt_skel(cpu_model, initrd_base, initrd_size, kernel_size, kernel_le, boot_device, kernel_cmdline, spapr->epow_irq); assert(spapr->fdt_skel != NULL); }"} {"target": 0, "idx": 60, "func": "static int mpeg1_decode_sequence(AVCodecContext *avctx, UINT8 *buf, int buf_size) { Mpeg1Context *s1 = avctx->priv_data; MpegEncContext *s = &s1->mpeg_enc_ctx; int width, height, i, v, j; float aspect; init_get_bits(&s->gb, buf, buf_size); width = get_bits(&s->gb, 12); height = get_bits(&s->gb, 12); s->aspect_ratio_info= get_bits(&s->gb, 4); if(!s->mpeg2){ aspect= mpeg1_aspect[s->aspect_ratio_info]; if(aspect!=0.0) avctx->aspect_ratio= width/(aspect*height); } s->frame_rate_index = get_bits(&s->gb, 4); if (s->frame_rate_index == 0) return -1; s->bit_rate = get_bits(&s->gb, 18) * 400; if (get_bits1(&s->gb) == 0) /* marker */ return -1; if (width <= 0 || height <= 0 || (width % 2) != 0 || (height % 2) != 0) return -1; if (width != s->width || height != s->height) { /* start new mpeg1 context decoding */ s->out_format = FMT_MPEG1; if (s1->mpeg_enc_ctx_allocated) { MPV_common_end(s); } s->width = width; s->height = height; avctx->has_b_frames= 1; s->avctx = avctx; avctx->width = width; avctx->height = height; if (s->frame_rate_index >= 9) { /* at least give a valid frame rate (some old mpeg1 have this) */ avctx->frame_rate = 25 * FRAME_RATE_BASE; } else { avctx->frame_rate = frame_rate_tab[s->frame_rate_index]; } s->frame_rate = avctx->frame_rate; avctx->bit_rate = s->bit_rate; if (MPV_common_init(s) < 0) return -1; s1->mpeg_enc_ctx_allocated = 1; } skip_bits(&s->gb, 10); /* vbv_buffer_size */ skip_bits(&s->gb, 1); /* get matrix */ if (get_bits1(&s->gb)) { for(i=0;i<64;i++) { v = get_bits(&s->gb, 8); j = s->intra_scantable.permutated[i]; s->intra_matrix[j] = v; s->chroma_intra_matrix[j] = v; } #ifdef DEBUG dprintf(\"intra matrix present\\n\"); for(i=0;i<64;i++) dprintf(\" %d\", s->intra_matrix[s->intra_scantable.permutated[i]]); printf(\"\\n\"); #endif } else { for(i=0;i<64;i++) { int j= s->idct_permutation[i]; v = ff_mpeg1_default_intra_matrix[i]; s->intra_matrix[j] = v; s->chroma_intra_matrix[j] = v; } } if (get_bits1(&s->gb)) { for(i=0;i<64;i++) { v = get_bits(&s->gb, 8); j = s->intra_scantable.permutated[i]; s->inter_matrix[j] = v; s->chroma_inter_matrix[j] = v; } #ifdef DEBUG dprintf(\"non intra matrix present\\n\"); for(i=0;i<64;i++) dprintf(\" %d\", s->inter_matrix[s->intra_scantable.permutated[i]]); printf(\"\\n\"); #endif } else { for(i=0;i<64;i++) { int j= s->idct_permutation[i]; v = ff_mpeg1_default_non_intra_matrix[i]; s->inter_matrix[j] = v; s->chroma_inter_matrix[j] = v; } } /* we set mpeg2 parameters so that it emulates mpeg1 */ s->progressive_sequence = 1; s->progressive_frame = 1; s->picture_structure = PICT_FRAME; s->frame_pred_frame_dct = 1; s->mpeg2 = 0; avctx->sub_id = 1; /* indicates mpeg1 */ return 0; }"} {"target": 0, "idx": 70, "func": "static uint32_t drc_set_unusable(sPAPRDRConnector *drc) { drc->allocation_state = SPAPR_DR_ALLOCATION_STATE_UNUSABLE; if (drc->awaiting_release) { uint32_t drc_index = spapr_drc_index(drc); trace_spapr_drc_set_allocation_state_finalizing(drc_index); spapr_drc_detach(drc); } return RTAS_OUT_SUCCESS; }"} {"target": 1, "idx": 83, "func": "static void scsi_read_request(SCSIDiskReq *r) { SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); uint32_t n; if (r->sector_count == (uint32_t)-1) { DPRINTF(\"Read buf_len=%zd\\n\", r->iov.iov_len); r->sector_count = 0; scsi_req_data(&r->req, r->iov.iov_len); return; } DPRINTF(\"Read sector_count=%d\\n\", r->sector_count); if (r->sector_count == 0) { scsi_command_complete(r, GOOD, NO_SENSE); return; } /* No data transfer may already be in progress */ assert(r->req.aiocb == NULL); n = r->sector_count; if (n > SCSI_DMA_BUF_SIZE / 512) n = SCSI_DMA_BUF_SIZE / 512; r->iov.iov_len = n * 512; qemu_iovec_init_external(&r->qiov, &r->iov, 1); r->req.aiocb = bdrv_aio_readv(s->bs, r->sector, &r->qiov, n, scsi_read_complete, r); if (r->req.aiocb == NULL) { scsi_read_complete(r, -EIO); } }"} {"target": 1, "idx": 84, "func": "static void lm32_evr_init(MachineState *machine) { const char *cpu_model = machine->cpu_model; const char *kernel_filename = machine->kernel_filename; LM32CPU *cpu; CPULM32State *env; DriveInfo *dinfo; MemoryRegion *address_space_mem = get_system_memory(); MemoryRegion *phys_ram = g_new(MemoryRegion, 1); qemu_irq irq[32]; ResetInfo *reset_info; int i; /* memory map */ hwaddr flash_base = 0x04000000; size_t flash_sector_size = 256 * 1024; size_t flash_size = 32 * 1024 * 1024; hwaddr ram_base = 0x08000000; size_t ram_size = 64 * 1024 * 1024; hwaddr timer0_base = 0x80002000; hwaddr uart0_base = 0x80006000; hwaddr timer1_base = 0x8000a000; int uart0_irq = 0; int timer0_irq = 1; int timer1_irq = 3; reset_info = g_malloc0(sizeof(ResetInfo)); if (cpu_model == NULL) { cpu_model = \"lm32-full\"; } cpu = LM32_CPU(cpu_generic_init(TYPE_LM32_CPU, cpu_model)); if (cpu == NULL) { fprintf(stderr, \"qemu: unable to find CPU '%s'\\n\", cpu_model); exit(1); } env = &cpu->env; reset_info->cpu = cpu; reset_info->flash_base = flash_base; memory_region_allocate_system_memory(phys_ram, NULL, \"lm32_evr.sdram\", ram_size); memory_region_add_subregion(address_space_mem, ram_base, phys_ram); dinfo = drive_get(IF_PFLASH, 0, 0); /* Spansion S29NS128P */ pflash_cfi02_register(flash_base, NULL, \"lm32_evr.flash\", flash_size, dinfo ? blk_by_legacy_dinfo(dinfo) : NULL, flash_sector_size, flash_size / flash_sector_size, 1, 2, 0x01, 0x7e, 0x43, 0x00, 0x555, 0x2aa, 1); /* create irq lines */ env->pic_state = lm32_pic_init(qemu_allocate_irq(cpu_irq_handler, cpu, 0)); for (i = 0; i < 32; i++) { irq[i] = qdev_get_gpio_in(env->pic_state, i); } lm32_uart_create(uart0_base, irq[uart0_irq], serial_hds[0]); sysbus_create_simple(\"lm32-timer\", timer0_base, irq[timer0_irq]); sysbus_create_simple(\"lm32-timer\", timer1_base, irq[timer1_irq]); /* make sure juart isn't the first chardev */ env->juart_state = lm32_juart_init(serial_hds[1]); reset_info->bootstrap_pc = flash_base; if (kernel_filename) { uint64_t entry; int kernel_size; kernel_size = load_elf(kernel_filename, NULL, NULL, &entry, NULL, NULL, 1, EM_LATTICEMICO32, 0, 0); reset_info->bootstrap_pc = entry; if (kernel_size < 0) { kernel_size = load_image_targphys(kernel_filename, ram_base, ram_size); reset_info->bootstrap_pc = ram_base; } if (kernel_size < 0) { fprintf(stderr, \"qemu: could not load kernel '%s'\\n\", kernel_filename); exit(1); } } qemu_register_reset(main_cpu_reset, reset_info); }"} {"target": 0, "idx": 108, "func": "aio_write_f(int argc, char **argv) { char *p; int count = 0; int nr_iov, i, c; int pattern = 0xcd; struct aio_ctx *ctx = calloc(1, sizeof(struct aio_ctx)); BlockDriverAIOCB *acb; while ((c = getopt(argc, argv, \"CqP:\")) != EOF) { switch (c) { case 'C': ctx->Cflag = 1; break; case 'q': ctx->qflag = 1; break; case 'P': pattern = atoi(optarg); break; default: return command_usage(&aio_write_cmd); } } if (optind > argc - 2) return command_usage(&aio_write_cmd); ctx->offset = cvtnum(argv[optind]); if (ctx->offset < 0) { printf(\"non-numeric length argument -- %s\\n\", argv[optind]); return 0; } optind++; if (ctx->offset & 0x1ff) { printf(\"offset %lld is not sector aligned\\n\", (long long)ctx->offset); return 0; } if (count & 0x1ff) { printf(\"count %d is not sector aligned\\n\", count); return 0; } for (i = optind; i < argc; i++) { size_t len; len = cvtnum(argv[optind]); if (len < 0) { printf(\"non-numeric length argument -- %s\\n\", argv[i]); return 0; } count += len; } nr_iov = argc - optind; qemu_iovec_init(&ctx->qiov, nr_iov); ctx->buf = p = qemu_io_alloc(count, pattern); for (i = 0; i < nr_iov; i++) { size_t len; len = cvtnum(argv[optind]); if (len < 0) { printf(\"non-numeric length argument -- %s\\n\", argv[optind]); return 0; } qemu_iovec_add(&ctx->qiov, p, len); p += len; optind++; } gettimeofday(&ctx->t1, NULL); acb = bdrv_aio_writev(bs, ctx->offset >> 9, &ctx->qiov, ctx->qiov.size >> 9, aio_write_done, ctx); if (!acb) return -EIO; return 0; }"} {"target": 1, "idx": 121, "func": "static void vc1_inv_trans_8x8_dc_c(uint8_t *dest, int linesize, DCTELEM *block) { int i; int dc = block[0]; const uint8_t *cm; dc = (3 * dc + 1) >> 1; dc = (3 * dc + 16) >> 5; cm = ff_cropTbl + MAX_NEG_CROP + dc; for(i = 0; i < 8; i++){ dest[0] = cm[dest[0]]; dest[1] = cm[dest[1]]; dest[2] = cm[dest[2]]; dest[3] = cm[dest[3]]; dest[4] = cm[dest[4]]; dest[5] = cm[dest[5]]; dest[6] = cm[dest[6]]; dest[7] = cm[dest[7]]; dest += linesize; } }"} {"target": 1, "idx": 122, "func": "static int vmdk_parse_extents(const char *desc, BlockDriverState *bs, const char *desc_file_path) { int ret; char access[11]; char type[11]; char fname[512]; const char *p = desc; int64_t sectors = 0; int64_t flat_offset; char extent_path[PATH_MAX]; BlockDriverState *extent_file; Error *local_err = NULL; while (*p) { /* parse extent line: * RW [size in sectors] FLAT \"file-name.vmdk\" OFFSET * or * RW [size in sectors] SPARSE \"file-name.vmdk\" */ flat_offset = -1; ret = sscanf(p, \"%10s %\" SCNd64 \" %10s \\\"%511[^\\n\\r\\\"]\\\" %\" SCNd64, access, §ors, type, fname, &flat_offset); if (ret < 4 || strcmp(access, \"RW\")) { goto next_line; } else if (!strcmp(type, \"FLAT\")) { if (ret != 5 || flat_offset < 0) { return -EINVAL; } } else if (ret != 4) { return -EINVAL; } if (sectors <= 0 || (strcmp(type, \"FLAT\") && strcmp(type, \"SPARSE\") && strcmp(type, \"VMFS\") && strcmp(type, \"VMFSSPARSE\")) || (strcmp(access, \"RW\"))) { goto next_line; } path_combine(extent_path, sizeof(extent_path), desc_file_path, fname); ret = bdrv_file_open(&extent_file, extent_path, NULL, bs->open_flags, &local_err); if (ret) { qerror_report_err(local_err); error_free(local_err); return ret; } /* save to extents array */ if (!strcmp(type, \"FLAT\") || !strcmp(type, \"VMFS\")) { /* FLAT extent */ VmdkExtent *extent; ret = vmdk_add_extent(bs, extent_file, true, sectors, 0, 0, 0, 0, sectors, &extent); if (ret < 0) { return ret; } extent->flat_start_offset = flat_offset << 9; } else if (!strcmp(type, \"SPARSE\") || !strcmp(type, \"VMFSSPARSE\")) { /* SPARSE extent and VMFSSPARSE extent are both \"COWD\" sparse file*/ ret = vmdk_open_sparse(bs, extent_file, bs->open_flags); if (ret) { bdrv_unref(extent_file); return ret; } } else { fprintf(stderr, \"VMDK: Not supported extent type \\\"%s\\\"\"\".\\n\", type); return -ENOTSUP; } next_line: /* move to next line */ while (*p && *p != '\\n') { p++; } p++; } return 0; }"} {"target": 1, "idx": 149, "func": "static void compute_rematrixing_strategy(AC3EncodeContext *s) { int nb_coefs; int blk, bnd, i; AC3Block *block, *block0; s->num_rematrixing_bands = 4; if (s->rematrixing & AC3_REMATRIXING_IS_STATIC) return; nb_coefs = FFMIN(s->nb_coefs[0], s->nb_coefs[1]); for (blk = 0; blk < AC3_MAX_BLOCKS; blk++) { block = &s->blocks[blk]; block->new_rematrixing_strategy = !blk; for (bnd = 0; bnd < s->num_rematrixing_bands; bnd++) { /* calculate calculate sum of squared coeffs for one band in one block */ int start = ff_ac3_rematrix_band_tab[bnd]; int end = FFMIN(nb_coefs, ff_ac3_rematrix_band_tab[bnd+1]); CoefSumType sum[4] = {0,}; for (i = start; i < end; i++) { CoefType lt = block->mdct_coef[0][i]; CoefType rt = block->mdct_coef[1][i]; CoefType md = lt + rt; CoefType sd = lt - rt; sum[0] += lt * lt; sum[1] += rt * rt; sum[2] += md * md; sum[3] += sd * sd; } /* compare sums to determine if rematrixing will be used for this band */ if (FFMIN(sum[2], sum[3]) < FFMIN(sum[0], sum[1])) block->rematrixing_flags[bnd] = 1; else block->rematrixing_flags[bnd] = 0; /* determine if new rematrixing flags will be sent */ if (blk && block->rematrixing_flags[bnd] != block0->rematrixing_flags[bnd]) { block->new_rematrixing_strategy = 1; } } block0 = block; } }"} {"target": 1, "idx": 164, "func": "void OPPROTO op_udiv_T1_T0(void) { uint64_t x0; uint32_t x1; x0 = T0 | ((uint64_t) (env->y) << 32); x1 = T1; x0 = x0 / x1; if (x0 > 0xffffffff) { T0 = 0xffffffff; T1 = 1; } else { T0 = x0; T1 = 0; FORCE_RET();"} {"target": 0, "idx": 176, "func": "void cpu_x86_init_mmu(CPUX86State *env) { a20_enabled = 1; a20_mask = 0xffffffff; last_pg_state = -1; cpu_x86_update_cr0(env); }"} {"target": 0, "idx": 178, "func": "int qemu_cpu_self(void *env) { return 1; }"} {"target": 0, "idx": 179, "func": "static void net_vhost_link_down(VhostUserState *s, bool link_down) { s->nc.link_down = link_down; if (s->nc.peer) { s->nc.peer->link_down = link_down; } if (s->nc.info->link_status_changed) { s->nc.info->link_status_changed(&s->nc); } if (s->nc.peer && s->nc.peer->info->link_status_changed) { s->nc.peer->info->link_status_changed(s->nc.peer); } }"} {"target": 0, "idx": 185, "func": "static int rv34_decode_mv(RV34DecContext *r, int block_type) { MpegEncContext *s = &r->s; GetBitContext *gb = &s->gb; int i, j, k, l; int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride; int next_bt; memset(r->dmv, 0, sizeof(r->dmv)); for(i = 0; i < num_mvs[block_type]; i++){ r->dmv[i][0] = svq3_get_se_golomb(gb); r->dmv[i][1] = svq3_get_se_golomb(gb); } switch(block_type){ case RV34_MB_TYPE_INTRA: case RV34_MB_TYPE_INTRA16x16: ZERO8x2(s->current_picture_ptr->f.motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride); return 0; case RV34_MB_SKIP: if(s->pict_type == AV_PICTURE_TYPE_P){ ZERO8x2(s->current_picture_ptr->f.motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride); rv34_mc_1mv (r, block_type, 0, 0, 0, 2, 2, 0); break; } case RV34_MB_B_DIRECT: //surprisingly, it uses motion scheme from next reference frame /* wait for the current mb row to be finished */ if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME)) ff_thread_await_progress(&s->next_picture_ptr->f, s->mb_y - 1, 0); next_bt = s->next_picture_ptr->f.mb_type[s->mb_x + s->mb_y * s->mb_stride]; if(IS_INTRA(next_bt) || IS_SKIP(next_bt)){ ZERO8x2(s->current_picture_ptr->f.motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride); ZERO8x2(s->current_picture_ptr->f.motion_val[1][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride); }else for(j = 0; j < 2; j++) for(i = 0; i < 2; i++) for(k = 0; k < 2; k++) for(l = 0; l < 2; l++) s->current_picture_ptr->f.motion_val[l][mv_pos + i + j*s->b8_stride][k] = calc_add_mv(r, l, s->next_picture_ptr->f.motion_val[0][mv_pos + i + j*s->b8_stride][k]); if(!(IS_16X8(next_bt) || IS_8X16(next_bt) || IS_8X8(next_bt))) //we can use whole macroblock MC rv34_mc_2mv(r, block_type); else rv34_mc_2mv_skip(r); ZERO8x2(s->current_picture_ptr->f.motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride); break; case RV34_MB_P_16x16: case RV34_MB_P_MIX16x16: rv34_pred_mv(r, block_type, 0, 0); rv34_mc_1mv (r, block_type, 0, 0, 0, 2, 2, 0); break; case RV34_MB_B_FORWARD: case RV34_MB_B_BACKWARD: r->dmv[1][0] = r->dmv[0][0]; r->dmv[1][1] = r->dmv[0][1]; if(r->rv30) rv34_pred_mv_rv3(r, block_type, block_type == RV34_MB_B_BACKWARD); else rv34_pred_mv_b (r, block_type, block_type == RV34_MB_B_BACKWARD); rv34_mc_1mv (r, block_type, 0, 0, 0, 2, 2, block_type == RV34_MB_B_BACKWARD); break; case RV34_MB_P_16x8: case RV34_MB_P_8x16: rv34_pred_mv(r, block_type, 0, 0); rv34_pred_mv(r, block_type, 1 + (block_type == RV34_MB_P_16x8), 1); if(block_type == RV34_MB_P_16x8){ rv34_mc_1mv(r, block_type, 0, 0, 0, 2, 1, 0); rv34_mc_1mv(r, block_type, 0, 8, s->b8_stride, 2, 1, 0); } if(block_type == RV34_MB_P_8x16){ rv34_mc_1mv(r, block_type, 0, 0, 0, 1, 2, 0); rv34_mc_1mv(r, block_type, 8, 0, 1, 1, 2, 0); } break; case RV34_MB_B_BIDIR: rv34_pred_mv_b (r, block_type, 0); rv34_pred_mv_b (r, block_type, 1); rv34_mc_2mv (r, block_type); break; case RV34_MB_P_8x8: for(i=0;i< 4;i++){ rv34_pred_mv(r, block_type, i, i); rv34_mc_1mv (r, block_type, (i&1)<<3, (i&2)<<2, (i&1)+(i>>1)*s->b8_stride, 1, 1, 0); } break; } return 0; }"} {"target": 1, "idx": 200, "func": "static void mirror_start_job(BlockDriverState *bs, BlockDriverState *target, const char *replaces, int64_t speed, uint32_t granularity, int64_t buf_size, BlockdevOnError on_source_error, BlockdevOnError on_target_error, bool unmap, BlockCompletionFunc *cb, void *opaque, Error **errp, const BlockJobDriver *driver, bool is_none_mode, BlockDriverState *base) { MirrorBlockJob *s; if (granularity == 0) { granularity = bdrv_get_default_bitmap_granularity(target); } assert ((granularity & (granularity - 1)) == 0); if ((on_source_error == BLOCKDEV_ON_ERROR_STOP || on_source_error == BLOCKDEV_ON_ERROR_ENOSPC) && (!bs->blk || !blk_iostatus_is_enabled(bs->blk))) { error_setg(errp, QERR_INVALID_PARAMETER, \"on-source-error\"); return; } if (buf_size < 0) { error_setg(errp, \"Invalid parameter 'buf-size'\"); return; } if (buf_size == 0) { buf_size = DEFAULT_MIRROR_BUF_SIZE; } /* We can't support this case as long as the block layer can't handle * multiple BlockBackends per BlockDriverState. */ if (replaces) { replaced_bs = bdrv_lookup_bs(replaces, replaces, errp); if (replaced_bs == NULL) { return; } } else { replaced_bs = bs; } if (replaced_bs->blk && target->blk) { error_setg(errp, \"Can't create node with two BlockBackends\"); return; } s = block_job_create(driver, bs, speed, cb, opaque, errp); if (!s) { return; } s->replaces = g_strdup(replaces); s->on_source_error = on_source_error; s->on_target_error = on_target_error; s->target = target; s->is_none_mode = is_none_mode; s->base = base; s->granularity = granularity; s->buf_size = ROUND_UP(buf_size, granularity); s->unmap = unmap; s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity, NULL, errp); if (!s->dirty_bitmap) { g_free(s->replaces); block_job_unref(&s->common); return; } bdrv_op_block_all(s->target, s->common.blocker); bdrv_set_enable_write_cache(s->target, true); if (s->target->blk) { blk_set_on_error(s->target->blk, on_target_error, on_target_error); blk_iostatus_enable(s->target->blk); } s->common.co = qemu_coroutine_create(mirror_run); trace_mirror_start(bs, s, s->common.co, opaque); qemu_coroutine_enter(s->common.co, s); }"} {"target": 0, "idx": 218, "func": "START_TEST(qint_get_int_test) { QInt *qi; const int value = 123456; qi = qint_from_int(value); fail_unless(qint_get_int(qi) == value); QDECREF(qi); }"} {"target": 1, "idx": 244, "func": "static int read_code_table(CLLCContext *ctx, GetBitContext *gb, VLC *vlc) { uint8_t symbols[256]; uint8_t bits[256]; uint16_t codes[256]; int num_lens, num_codes, num_codes_sum, prefix; int i, j, count; prefix = 0; count = 0; num_codes_sum = 0; num_lens = get_bits(gb, 5); for (i = 0; i < num_lens; i++) { num_codes = get_bits(gb, 9); num_codes_sum += num_codes; if (num_codes_sum > 256) { av_log(ctx->avctx, AV_LOG_ERROR, \"Too many VLCs (%d) to be read.\\n\", num_codes_sum); for (j = 0; j < num_codes; j++) { symbols[count] = get_bits(gb, 8); bits[count] = i + 1; codes[count] = prefix++; count++; if (prefix > (65535 - 256)/2) { prefix <<= 1; return ff_init_vlc_sparse(vlc, VLC_BITS, count, bits, 1, 1, codes, 2, 2, symbols, 1, 1, 0);"} {"target": 1, "idx": 245, "func": "static int cbs_h265_read_nal_unit(CodedBitstreamContext *ctx, CodedBitstreamUnit *unit) { BitstreamContext bc; int err; err = bitstream_init(&bc, unit->data, 8 * unit->data_size); if (err < 0) return err; switch (unit->type) { case HEVC_NAL_VPS: { H265RawVPS *vps; vps = av_mallocz(sizeof(*vps)); if (!vps) return AVERROR(ENOMEM); err = cbs_h265_read_vps(ctx, &bc, vps); if (err >= 0) err = cbs_h265_replace_vps(ctx, vps); if (err < 0) { av_free(vps); return err; } unit->content = vps; } break; case HEVC_NAL_SPS: { H265RawSPS *sps; sps = av_mallocz(sizeof(*sps)); if (!sps) return AVERROR(ENOMEM); err = cbs_h265_read_sps(ctx, &bc, sps); if (err >= 0) err = cbs_h265_replace_sps(ctx, sps); if (err < 0) { av_free(sps); return err; } unit->content = sps; } break; case HEVC_NAL_PPS: { H265RawPPS *pps; pps = av_mallocz(sizeof(*pps)); if (!pps) return AVERROR(ENOMEM); err = cbs_h265_read_pps(ctx, &bc, pps); if (err >= 0) err = cbs_h265_replace_pps(ctx, pps); if (err < 0) { av_free(pps); return err; } unit->content = pps; } break; case HEVC_NAL_TRAIL_N: case HEVC_NAL_TRAIL_R: case HEVC_NAL_TSA_N: case HEVC_NAL_TSA_R: case HEVC_NAL_STSA_N: case HEVC_NAL_STSA_R: case HEVC_NAL_RADL_N: case HEVC_NAL_RADL_R: case HEVC_NAL_RASL_N: case HEVC_NAL_RASL_R: case HEVC_NAL_BLA_W_LP: case HEVC_NAL_BLA_W_RADL: case HEVC_NAL_BLA_N_LP: case HEVC_NAL_IDR_W_RADL: case HEVC_NAL_IDR_N_LP: case HEVC_NAL_CRA_NUT: { H265RawSlice *slice; int pos, len; slice = av_mallocz(sizeof(*slice)); if (!slice) return AVERROR(ENOMEM); err = cbs_h265_read_slice_segment_header(ctx, &bc, &slice->header); if (err < 0) { av_free(slice); return err; } pos = bitstream_tell(&bc); len = unit->data_size; if (!unit->data[len - 1]) { int z; for (z = 0; z < len && !unit->data[len - z - 1]; z++); av_log(ctx->log_ctx, AV_LOG_DEBUG, \"Deleted %d trailing zeroes \" \"from slice data.\\n\", z); len -= z; } slice->data_size = len - pos / 8; slice->data = av_malloc(slice->data_size); if (!slice->data) { av_free(slice); return AVERROR(ENOMEM); } memcpy(slice->data, unit->data + pos / 8, slice->data_size); slice->data_bit_start = pos % 8; unit->content = slice; } break; case HEVC_NAL_AUD: { H265RawAUD *aud; aud = av_mallocz(sizeof(*aud)); if (!aud) return AVERROR(ENOMEM); err = cbs_h265_read_aud(ctx, &bc, aud); if (err < 0) { av_free(aud); return err; } unit->content = aud; } break; default: return AVERROR(ENOSYS); } return 0; }"} {"target": 1, "idx": 246, "func": "static void qpa_audio_fini (void *opaque) { (void) opaque; }"} {"target": 1, "idx": 248, "func": "static void usbredir_bulk_packet(void *priv, uint32_t id, struct usb_redir_bulk_packet_header *bulk_packet, uint8_t *data, int data_len) { USBRedirDevice *dev = priv; uint8_t ep = bulk_packet->endpoint; int len = bulk_packet->length; AsyncURB *aurb; DPRINTF(\"bulk-in status %d ep %02X len %d id %u\\n\", bulk_packet->status, ep, len, id); aurb = async_find(dev, id); if (!aurb) { free(data); return; } if (aurb->bulk_packet.endpoint != bulk_packet->endpoint || aurb->bulk_packet.stream_id != bulk_packet->stream_id) { ERROR(\"return bulk packet mismatch, please report this!\\n\"); len = USB_RET_NAK; } if (aurb->packet) { len = usbredir_handle_status(dev, bulk_packet->status, len); if (len > 0) { usbredir_log_data(dev, \"bulk data in:\", data, data_len); if (data_len <= aurb->packet->len) { memcpy(aurb->packet->data, data, data_len); } else { ERROR(\"bulk buffer too small (%d > %d)\\n\", data_len, aurb->packet->len); len = USB_RET_STALL; } } aurb->packet->len = len; usb_packet_complete(&dev->dev, aurb->packet); } async_free(dev, aurb); free(data); }"} {"target": 1, "idx": 262, "func": "static av_cold int hevc_init_context(AVCodecContext *avctx) { HEVCContext *s = avctx->priv_data; int i; s->avctx = avctx; s->HEVClc = av_mallocz(sizeof(HEVCLocalContext)); if (!s->HEVClc) goto fail; s->HEVClcList[0] = s->HEVClc; s->sList[0] = s; s->cabac_state = av_malloc(HEVC_CONTEXTS); if (!s->cabac_state) goto fail; s->output_frame = av_frame_alloc(); if (!s->output_frame) goto fail; for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) { s->DPB[i].frame = av_frame_alloc(); if (!s->DPB[i].frame) goto fail; s->DPB[i].tf.f = s->DPB[i].frame; } s->max_ra = INT_MAX; s->md5_ctx = av_md5_alloc(); if (!s->md5_ctx) goto fail; ff_bswapdsp_init(&s->bdsp); s->context_initialized = 1; s->eos = 0; return 0; fail: hevc_decode_free(avctx); return AVERROR(ENOMEM); }"} {"target": 1, "idx": 265, "func": "static int decode_plte_chunk(AVCodecContext *avctx, PNGDecContext *s, uint32_t length) { int n, i, r, g, b; if ((length % 3) != 0 || length > 256 * 3) return AVERROR_INVALIDDATA; /* read the palette */ n = length / 3; for (i = 0; i < n; i++) { r = bytestream2_get_byte(&s->gb); g = bytestream2_get_byte(&s->gb); b = bytestream2_get_byte(&s->gb); s->palette[i] = (0xFFU << 24) | (r << 16) | (g << 8) | b; } for (; i < 256; i++) s->palette[i] = (0xFFU << 24); s->state |= PNG_PLTE; bytestream2_skip(&s->gb, 4); /* crc */ return 0; }"} {"target": 1, "idx": 272, "func": "static MemTxResult vtd_mem_ir_write(void *opaque, hwaddr addr, uint64_t value, unsigned size, MemTxAttrs attrs) { int ret = 0; MSIMessage from = {0}, to = {0}; from.address = (uint64_t) addr + VTD_INTERRUPT_ADDR_FIRST; from.data = (uint32_t) value; ret = vtd_interrupt_remap_msi(opaque, &from, &to); if (ret) { /* TODO: report error */ VTD_DPRINTF(GENERAL, \"int remap fail for addr 0x%\"PRIx64 \" data 0x%\"PRIx32, from.address, from.data); /* Drop this interrupt */ return MEMTX_ERROR; } VTD_DPRINTF(IR, \"delivering MSI 0x%\"PRIx64\":0x%\"PRIx32 \" for device sid 0x%04x\", to.address, to.data, sid); if (dma_memory_write(&address_space_memory, to.address, &to.data, size)) { VTD_DPRINTF(GENERAL, \"error: fail to write 0x%\"PRIx64 \" value 0x%\"PRIx32, to.address, to.data); } return MEMTX_OK; }"} {"target": 1, "idx": 280, "func": "void st_flush_trace_buffer(void) { if (trace_file_enabled) { flush_trace_file(); } /* Discard written trace records */ trace_idx = 0; }"} {"target": 0, "idx": 289, "func": "VirtIODevice *virtio_blk_init(DeviceState *dev, BlockConf *conf, char **serial) { VirtIOBlock *s; int cylinders, heads, secs; static int virtio_blk_id; DriveInfo *dinfo; if (!conf->bs) { error_report(\"virtio-blk-pci: drive property not set\"); return NULL; } if (!bdrv_is_inserted(conf->bs)) { error_report(\"Device needs media, but drive is empty\"); return NULL; } if (!*serial) { /* try to fall back to value set with legacy -drive serial=... */ dinfo = drive_get_by_blockdev(conf->bs); if (*dinfo->serial) { *serial = strdup(dinfo->serial); } } s = (VirtIOBlock *)virtio_common_init(\"virtio-blk\", VIRTIO_ID_BLOCK, sizeof(struct virtio_blk_config), sizeof(VirtIOBlock)); s->vdev.get_config = virtio_blk_update_config; s->vdev.get_features = virtio_blk_get_features; s->vdev.reset = virtio_blk_reset; s->bs = conf->bs; s->conf = conf; s->serial = *serial; s->rq = NULL; s->sector_mask = (s->conf->logical_block_size / BDRV_SECTOR_SIZE) - 1; bdrv_guess_geometry(s->bs, &cylinders, &heads, &secs); s->vq = virtio_add_queue(&s->vdev, 128, virtio_blk_handle_output); qemu_add_vm_change_state_handler(virtio_blk_dma_restart_cb, s); s->qdev = dev; register_savevm(dev, \"virtio-blk\", virtio_blk_id++, 2, virtio_blk_save, virtio_blk_load, s); bdrv_set_dev_ops(s->bs, &virtio_block_ops, s); bdrv_set_buffer_alignment(s->bs, conf->logical_block_size); bdrv_iostatus_enable(s->bs); add_boot_device_path(conf->bootindex, dev, \"/disk@0,0\"); return &s->vdev; }"} {"target": 1, "idx": 304, "func": "static size_t header_ext_add(char *buf, uint32_t magic, const void *s, size_t len, size_t buflen) { QCowExtension *ext_backing_fmt = (QCowExtension*) buf; size_t ext_len = sizeof(QCowExtension) + ((len + 7) & ~7); if (buflen < ext_len) { return -ENOSPC; } *ext_backing_fmt = (QCowExtension) { .magic = cpu_to_be32(magic), .len = cpu_to_be32(len), }; memcpy(buf + sizeof(QCowExtension), s, len); return ext_len; }"} {"target": 1, "idx": 305, "func": "static int mov_read_strf(MOVContext *c, AVIOContext *pb, MOVAtom atom) { AVStream *st; if (c->fc->nb_streams < 1) return 0; if (atom.size <= 40) return 0; st = c->fc->streams[c->fc->nb_streams-1]; if ((uint64_t)atom.size > (1<<30)) return AVERROR_INVALIDDATA; av_free(st->codec->extradata); st->codec->extradata = av_mallocz(atom.size - 40 + FF_INPUT_BUFFER_PADDING_SIZE); if (!st->codec->extradata) return AVERROR(ENOMEM); st->codec->extradata_size = atom.size - 40; avio_skip(pb, 40); avio_read(pb, st->codec->extradata, atom.size - 40); return 0; }"} {"target": 0, "idx": 321, "func": "static void mem_begin(MemoryListener *listener) { AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener); d->phys_map.ptr = PHYS_MAP_NODE_NIL; }"} {"target": 0, "idx": 325, "func": "static uint32_t tight_palette_buf2rgb(int bpp, const uint8_t *buf) { uint32_t rgb = 0; if (bpp == 32) { rgb |= ((buf[0] & ~1) | !((buf[4] >> 3) & 1)) << 24; rgb |= ((buf[1] & ~1) | !((buf[4] >> 2) & 1)) << 16; rgb |= ((buf[2] & ~1) | !((buf[4] >> 1) & 1)) << 8; rgb |= ((buf[3] & ~1) | !((buf[4] >> 0) & 1)) << 0; } if (bpp == 16) { rgb |= ((buf[0] & ~1) | !((buf[2] >> 1) & 1)) << 8; rgb |= ((buf[1] & ~1) | !((buf[2] >> 0) & 1)) << 0; } return rgb; }"} {"target": 1, "idx": 338, "func": "int net_init_tap(const Netdev *netdev, const char *name, NetClientState *peer, Error **errp) { const NetdevTapOptions *tap; int fd, vnet_hdr = 0, i = 0, queues; /* for the no-fd, no-helper case */ const char *script = NULL; /* suppress wrong \"uninit'd use\" gcc warning */ const char *downscript = NULL; Error *err = NULL; const char *vhostfdname; char ifname[128]; assert(netdev->type == NET_CLIENT_DRIVER_TAP); tap = &netdev->u.tap; queues = tap->has_queues ? tap->queues : 1; vhostfdname = tap->has_vhostfd ? tap->vhostfd : NULL; /* QEMU vlans does not support multiqueue tap, in this case peer is set. * For -netdev, peer is always NULL. */ if (peer && (tap->has_queues || tap->has_fds || tap->has_vhostfds)) { error_setg(errp, \"Multiqueue tap cannot be used with QEMU vlans\"); return -1; } if (tap->has_fd) { if (tap->has_ifname || tap->has_script || tap->has_downscript || tap->has_vnet_hdr || tap->has_helper || tap->has_queues || tap->has_fds || tap->has_vhostfds) { error_setg(errp, \"ifname=, script=, downscript=, vnet_hdr=, \" \"helper=, queues=, fds=, and vhostfds= \" \"are invalid with fd=\"); return -1; } fd = monitor_fd_param(cur_mon, tap->fd, &err); if (fd == -1) { error_propagate(errp, err); return -1; } fcntl(fd, F_SETFL, O_NONBLOCK); vnet_hdr = tap_probe_vnet_hdr(fd); net_init_tap_one(tap, peer, \"tap\", name, NULL, script, downscript, vhostfdname, vnet_hdr, fd, &err); if (err) { error_propagate(errp, err); return -1; } } else if (tap->has_fds) { char **fds = g_new0(char *, MAX_TAP_QUEUES); char **vhost_fds = g_new0(char *, MAX_TAP_QUEUES); int nfds, nvhosts; if (tap->has_ifname || tap->has_script || tap->has_downscript || tap->has_vnet_hdr || tap->has_helper || tap->has_queues || tap->has_vhostfd) { error_setg(errp, \"ifname=, script=, downscript=, vnet_hdr=, \" \"helper=, queues=, and vhostfd= \" \"are invalid with fds=\"); return -1; } nfds = get_fds(tap->fds, fds, MAX_TAP_QUEUES); if (tap->has_vhostfds) { nvhosts = get_fds(tap->vhostfds, vhost_fds, MAX_TAP_QUEUES); if (nfds != nvhosts) { error_setg(errp, \"The number of fds passed does not match \" \"the number of vhostfds passed\"); goto free_fail; } } for (i = 0; i < nfds; i++) { fd = monitor_fd_param(cur_mon, fds[i], &err); if (fd == -1) { error_propagate(errp, err); goto free_fail; } fcntl(fd, F_SETFL, O_NONBLOCK); if (i == 0) { vnet_hdr = tap_probe_vnet_hdr(fd); } else if (vnet_hdr != tap_probe_vnet_hdr(fd)) { error_setg(errp, \"vnet_hdr not consistent across given tap fds\"); goto free_fail; } net_init_tap_one(tap, peer, \"tap\", name, ifname, script, downscript, tap->has_vhostfds ? vhost_fds[i] : NULL, vnet_hdr, fd, &err); if (err) { error_propagate(errp, err); goto free_fail; } } g_free(fds); g_free(vhost_fds); return 0; free_fail: for (i = 0; i < nfds; i++) { g_free(fds[i]); g_free(vhost_fds[i]); } g_free(fds); g_free(vhost_fds); return -1; } else if (tap->has_helper) { if (tap->has_ifname || tap->has_script || tap->has_downscript || tap->has_vnet_hdr || tap->has_queues || tap->has_vhostfds) { error_setg(errp, \"ifname=, script=, downscript=, vnet_hdr=, \" \"queues=, and vhostfds= are invalid with helper=\"); return -1; } fd = net_bridge_run_helper(tap->helper, tap->has_br ? tap->br : DEFAULT_BRIDGE_INTERFACE, errp); if (fd == -1) { return -1; } fcntl(fd, F_SETFL, O_NONBLOCK); vnet_hdr = tap_probe_vnet_hdr(fd); net_init_tap_one(tap, peer, \"bridge\", name, ifname, script, downscript, vhostfdname, vnet_hdr, fd, &err); if (err) { error_propagate(errp, err); close(fd); return -1; } } else { if (tap->has_vhostfds) { error_setg(errp, \"vhostfds= is invalid if fds= wasn't specified\"); return -1; } script = tap->has_script ? tap->script : DEFAULT_NETWORK_SCRIPT; downscript = tap->has_downscript ? tap->downscript : DEFAULT_NETWORK_DOWN_SCRIPT; if (tap->has_ifname) { pstrcpy(ifname, sizeof ifname, tap->ifname); } else { ifname[0] = '\\0'; } for (i = 0; i < queues; i++) { fd = net_tap_init(tap, &vnet_hdr, i >= 1 ? \"no\" : script, ifname, sizeof ifname, queues > 1, errp); if (fd == -1) { return -1; } if (queues > 1 && i == 0 && !tap->has_ifname) { if (tap_fd_get_ifname(fd, ifname)) { error_setg(errp, \"Fail to get ifname\"); close(fd); return -1; } } net_init_tap_one(tap, peer, \"tap\", name, ifname, i >= 1 ? \"no\" : script, i >= 1 ? \"no\" : downscript, vhostfdname, vnet_hdr, fd, &err); if (err) { error_propagate(errp, err); close(fd); return -1; } } } return 0; }"} {"target": 1, "idx": 369, "func": "void stream_start(const char *job_id, BlockDriverState *bs, BlockDriverState *base, const char *backing_file_str, int64_t speed, BlockdevOnError on_error, BlockCompletionFunc *cb, void *opaque, Error **errp) { StreamBlockJob *s; s = block_job_create(job_id, &stream_job_driver, bs, speed, cb, opaque, errp); if (!s) { return; } s->base = base; s->backing_file_str = g_strdup(backing_file_str); s->on_error = on_error; s->common.co = qemu_coroutine_create(stream_run); trace_stream_start(bs, base, s, s->common.co, opaque); qemu_coroutine_enter(s->common.co, s); }"} {"target": 1, "idx": 372, "func": "int attribute_align_arg avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture, int *got_picture_ptr, AVPacket *avpkt) { int ret; *got_picture_ptr = 0; if ((avctx->coded_width || avctx->coded_height) && av_image_check_size(avctx->coded_width, avctx->coded_height, 0, avctx)) return -1; avctx->pkt = avpkt; apply_param_change(avctx, avpkt); if ((avctx->codec->capabilities & CODEC_CAP_DELAY) || avpkt->size || (avctx->active_thread_type & FF_THREAD_FRAME)) { if (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME) ret = ff_thread_decode_frame(avctx, picture, got_picture_ptr, avpkt); else { ret = avctx->codec->decode(avctx, picture, got_picture_ptr, avpkt); picture->pkt_dts = avpkt->dts; picture->sample_aspect_ratio = avctx->sample_aspect_ratio; picture->width = avctx->width; picture->height = avctx->height; picture->format = avctx->pix_fmt; } emms_c(); //needed to avoid an emms_c() call before every return; if (*got_picture_ptr) avctx->frame_number++; } else ret = 0; /* many decoders assign whole AVFrames, thus overwriting extended_data; * make sure it's set correctly */ picture->extended_data = picture->data; return ret; }"} {"target": 1, "idx": 373, "func": "static int omap_gpio_init(SysBusDevice *sbd) { DeviceState *dev = DEVICE(sbd); struct omap_gpif_s *s = OMAP1_GPIO(dev); if (!s->clk) { hw_error(\"omap-gpio: clk not connected\\n\"); } qdev_init_gpio_in(dev, omap_gpio_set, 16); qdev_init_gpio_out(dev, s->omap1.handler, 16); sysbus_init_irq(sbd, &s->omap1.irq); memory_region_init_io(&s->iomem, OBJECT(s), &omap_gpio_ops, &s->omap1, \"omap.gpio\", 0x1000); sysbus_init_mmio(sbd, &s->iomem); return 0; }"} {"target": 1, "idx": 391, "func": "void omap_mcbsp_i2s_attach(struct omap_mcbsp_s *s, I2SCodec *slave) { s->codec = slave; slave->rx_swallow = qemu_allocate_irqs(omap_mcbsp_i2s_swallow, s, 1)[0]; slave->tx_start = qemu_allocate_irqs(omap_mcbsp_i2s_start, s, 1)[0]; }"} {"target": 0, "idx": 399, "func": "int floatx80_eq(floatx80 a, floatx80 b, float_status *status) { if ( ( ( extractFloatx80Exp( a ) == 0x7FFF ) && (uint64_t) ( extractFloatx80Frac( a )<<1 ) ) || ( ( extractFloatx80Exp( b ) == 0x7FFF ) && (uint64_t) ( extractFloatx80Frac( b )<<1 ) ) ) { float_raise(float_flag_invalid, status); return 0; } return ( a.low == b.low ) && ( ( a.high == b.high ) || ( ( a.low == 0 ) && ( (uint16_t) ( ( a.high | b.high )<<1 ) == 0 ) ) ); }"} {"target": 0, "idx": 402, "func": "void json_lexer_init(JSONLexer *lexer, JSONLexerEmitter func) { lexer->emit = func; lexer->state = IN_START; lexer->token = qstring_new(); lexer->x = lexer->y = 0; }"} {"target": 0, "idx": 404, "func": "BlockDriverAIOCB *paio_submit(BlockDriverState *bs, int fd, int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, BlockDriverCompletionFunc *cb, void *opaque, int type) { struct qemu_paiocb *acb; acb = qemu_aio_get(&raw_aio_pool, bs, cb, opaque); if (!acb) return NULL; acb->aio_type = type; acb->aio_fildes = fd; acb->ev_signo = SIGUSR2; acb->async_context_id = get_async_context_id(); if (qiov) { acb->aio_iov = qiov->iov; acb->aio_niov = qiov->niov; } acb->aio_nbytes = nb_sectors * 512; acb->aio_offset = sector_num * 512; acb->next = posix_aio_state->first_aio; posix_aio_state->first_aio = acb; trace_paio_submit(acb, opaque, sector_num, nb_sectors, type); qemu_paio_submit(acb); return &acb->common; }"} {"target": 1, "idx": 436, "func": "static int crypto_open(URLContext *h, const char *uri, int flags) { const char *nested_url; int ret; CryptoContext *c = h->priv_data; if (!av_strstart(uri, \"crypto+\", &nested_url) && !av_strstart(uri, \"crypto:\", &nested_url)) { av_log(h, AV_LOG_ERROR, \"Unsupported url %s\\n\", uri); ret = AVERROR(EINVAL); goto err; } if (c->keylen < BLOCKSIZE || c->ivlen < BLOCKSIZE) { av_log(h, AV_LOG_ERROR, \"Key or IV not set\\n\"); ret = AVERROR(EINVAL); goto err; } if (flags & AVIO_FLAG_WRITE) { av_log(h, AV_LOG_ERROR, \"Only decryption is supported currently\\n\"); ret = AVERROR(ENOSYS); goto err; } if ((ret = ffurl_open(&c->hd, nested_url, AVIO_FLAG_READ)) < 0) { av_log(h, AV_LOG_ERROR, \"Unable to open input\\n\"); goto err; } c->aes = av_mallocz(av_aes_size); if (!c->aes) { ret = AVERROR(ENOMEM); goto err; } av_aes_init(c->aes, c->key, 128, 1); h->is_streamed = 1; return 0; err: av_free(c->key); av_free(c->iv); return ret; }"} {"target": 1, "idx": 439, "func": "int ff_alloc_entries(AVCodecContext *avctx, int count) { int i; if (avctx->active_thread_type & FF_THREAD_SLICE) { SliceThreadContext *p = avctx->internal->thread_ctx; p->thread_count = avctx->thread_count; p->entries = av_mallocz_array(count, sizeof(int)); if (!p->entries) { return AVERROR(ENOMEM); } p->entries_count = count; p->progress_mutex = av_malloc_array(p->thread_count, sizeof(pthread_mutex_t)); p->progress_cond = av_malloc_array(p->thread_count, sizeof(pthread_cond_t)); for (i = 0; i < p->thread_count; i++) { pthread_mutex_init(&p->progress_mutex[i], NULL); pthread_cond_init(&p->progress_cond[i], NULL); } } return 0; }"} {"target": 1, "idx": 444, "func": "static int floppy_probe_device(const char *filename) { int fd, ret; int prio = 0; struct floppy_struct fdparam; struct stat st; if (strstart(filename, \"/dev/fd\", NULL) && !strstart(filename, \"/dev/fdset/\", NULL)) { prio = 50; } fd = qemu_open(filename, O_RDONLY | O_NONBLOCK); if (fd < 0) { goto out; } ret = fstat(fd, &st); if (ret == -1 || !S_ISBLK(st.st_mode)) { goto outc; } /* Attempt to detect via a floppy specific ioctl */ ret = ioctl(fd, FDGETPRM, &fdparam); if (ret >= 0) prio = 100; outc: qemu_close(fd); out: return prio; }"} {"target": 0, "idx": 461, "func": "static void sun4uv_init(ram_addr_t RAM_size, const char *boot_devices, const char *kernel_filename, const char *kernel_cmdline, const char *initrd_filename, const char *cpu_model, const struct hwdef *hwdef) { CPUState *env; char *filename; m48t59_t *nvram; int ret, linux_boot; unsigned int i; ram_addr_t ram_offset, prom_offset; long initrd_size, kernel_size; PCIBus *pci_bus, *pci_bus2, *pci_bus3; QEMUBH *bh; qemu_irq *irq; int drive_index; BlockDriverState *hd[MAX_IDE_BUS * MAX_IDE_DEVS]; BlockDriverState *fd[MAX_FD]; void *fw_cfg; ResetData *reset_info; linux_boot = (kernel_filename != NULL); /* init CPUs */ if (!cpu_model) cpu_model = hwdef->default_cpu_model; env = cpu_init(cpu_model); if (!env) { fprintf(stderr, \"Unable to find Sparc CPU definition\\n\"); exit(1); } bh = qemu_bh_new(tick_irq, env); env->tick = ptimer_init(bh); ptimer_set_period(env->tick, 1ULL); bh = qemu_bh_new(stick_irq, env); env->stick = ptimer_init(bh); ptimer_set_period(env->stick, 1ULL); bh = qemu_bh_new(hstick_irq, env); env->hstick = ptimer_init(bh); ptimer_set_period(env->hstick, 1ULL); reset_info = qemu_mallocz(sizeof(ResetData)); reset_info->env = env; reset_info->reset_addr = hwdef->prom_addr + 0x40ULL; qemu_register_reset(main_cpu_reset, reset_info); main_cpu_reset(reset_info); // Override warm reset address with cold start address env->pc = hwdef->prom_addr + 0x20ULL; env->npc = env->pc + 4; /* allocate RAM */ ram_offset = qemu_ram_alloc(RAM_size); cpu_register_physical_memory(0, RAM_size, ram_offset); prom_offset = qemu_ram_alloc(PROM_SIZE_MAX); cpu_register_physical_memory(hwdef->prom_addr, (PROM_SIZE_MAX + TARGET_PAGE_SIZE) & TARGET_PAGE_MASK, prom_offset | IO_MEM_ROM); if (bios_name == NULL) bios_name = PROM_FILENAME; filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); if (filename) { ret = load_elf(filename, hwdef->prom_addr - PROM_VADDR, NULL, NULL, NULL); if (ret < 0) { ret = load_image_targphys(filename, hwdef->prom_addr, (PROM_SIZE_MAX + TARGET_PAGE_SIZE) & TARGET_PAGE_MASK); } qemu_free(filename); } else { ret = -1; } if (ret < 0) { fprintf(stderr, \"qemu: could not load prom '%s'\\n\", bios_name); exit(1); } kernel_size = 0; initrd_size = 0; if (linux_boot) { /* XXX: put correct offset */ kernel_size = load_elf(kernel_filename, 0, NULL, NULL, NULL); if (kernel_size < 0) kernel_size = load_aout(kernel_filename, KERNEL_LOAD_ADDR, ram_size - KERNEL_LOAD_ADDR); if (kernel_size < 0) kernel_size = load_image_targphys(kernel_filename, KERNEL_LOAD_ADDR, ram_size - KERNEL_LOAD_ADDR); if (kernel_size < 0) { fprintf(stderr, \"qemu: could not load kernel '%s'\\n\", kernel_filename); exit(1); } /* load initrd */ if (initrd_filename) { initrd_size = load_image_targphys(initrd_filename, INITRD_LOAD_ADDR, ram_size - INITRD_LOAD_ADDR); if (initrd_size < 0) { fprintf(stderr, \"qemu: could not load initial ram disk '%s'\\n\", initrd_filename); exit(1); } } if (initrd_size > 0) { for (i = 0; i < 64 * TARGET_PAGE_SIZE; i += TARGET_PAGE_SIZE) { if (ldl_phys(KERNEL_LOAD_ADDR + i) == 0x48647253) { // HdrS stl_phys(KERNEL_LOAD_ADDR + i + 16, INITRD_LOAD_ADDR); stl_phys(KERNEL_LOAD_ADDR + i + 20, initrd_size); break; } } } } pci_bus = pci_apb_init(APB_SPECIAL_BASE, APB_MEM_BASE, NULL, &pci_bus2, &pci_bus3); isa_mem_base = VGA_BASE; pci_vga_init(pci_bus, 0, 0); // XXX Should be pci_bus3 pci_ebus_init(pci_bus, -1); i = 0; if (hwdef->console_serial_base) { serial_mm_init(hwdef->console_serial_base, 0, NULL, 115200, serial_hds[i], 1); i++; } for(; i < MAX_SERIAL_PORTS; i++) { if (serial_hds[i]) { serial_init(serial_io[i], NULL/*serial_irq[i]*/, 115200, serial_hds[i]); } } for(i = 0; i < MAX_PARALLEL_PORTS; i++) { if (parallel_hds[i]) { parallel_init(parallel_io[i], NULL/*parallel_irq[i]*/, parallel_hds[i]); } } for(i = 0; i < nb_nics; i++) pci_nic_init(&nd_table[i], \"ne2k_pci\", NULL); irq = qemu_allocate_irqs(cpu_set_irq, env, MAX_PILS); if (drive_get_max_bus(IF_IDE) >= MAX_IDE_BUS) { fprintf(stderr, \"qemu: too many IDE bus\\n\"); exit(1); } for(i = 0; i < MAX_IDE_BUS * MAX_IDE_DEVS; i++) { drive_index = drive_get_index(IF_IDE, i / MAX_IDE_DEVS, i % MAX_IDE_DEVS); if (drive_index != -1) hd[i] = drives_table[drive_index].bdrv; else hd[i] = NULL; } pci_cmd646_ide_init(pci_bus, hd, 1); /* FIXME: wire up interrupts. */ i8042_init(NULL/*1*/, NULL/*12*/, 0x60); for(i = 0; i < MAX_FD; i++) { drive_index = drive_get_index(IF_FLOPPY, 0, i); if (drive_index != -1) fd[i] = drives_table[drive_index].bdrv; else fd[i] = NULL; } floppy_controller = fdctrl_init(NULL/*6*/, 2, 0, 0x3f0, fd); nvram = m48t59_init(NULL/*8*/, 0, 0x0074, NVRAM_SIZE, 59); sun4u_NVRAM_set_params(nvram, NVRAM_SIZE, \"Sun4u\", RAM_size, boot_devices, KERNEL_LOAD_ADDR, kernel_size, kernel_cmdline, INITRD_LOAD_ADDR, initrd_size, /* XXX: need an option to load a NVRAM image */ 0, graphic_width, graphic_height, graphic_depth, (uint8_t *)&nd_table[0].macaddr); fw_cfg = fw_cfg_init(BIOS_CFG_IOPORT, BIOS_CFG_IOPORT + 1, 0, 0); fw_cfg_add_i32(fw_cfg, FW_CFG_ID, 1); fw_cfg_add_i64(fw_cfg, FW_CFG_RAM_SIZE, (uint64_t)ram_size); fw_cfg_add_i16(fw_cfg, FW_CFG_MACHINE_ID, hwdef->machine_id); fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_ADDR, KERNEL_LOAD_ADDR); fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_SIZE, kernel_size); if (kernel_cmdline) { fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_CMDLINE, CMDLINE_ADDR); pstrcpy_targphys(CMDLINE_ADDR, TARGET_PAGE_SIZE, kernel_cmdline); } else { fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_CMDLINE, 0); } fw_cfg_add_i32(fw_cfg, FW_CFG_INITRD_ADDR, INITRD_LOAD_ADDR); fw_cfg_add_i32(fw_cfg, FW_CFG_INITRD_SIZE, initrd_size); fw_cfg_add_i16(fw_cfg, FW_CFG_BOOT_DEVICE, boot_devices[0]); qemu_register_boot_set(fw_cfg_boot_set, fw_cfg); }"} {"target": 0, "idx": 474, "func": "static void fpu_init (CPUMIPSState *env, const mips_def_t *def) { int i; for (i = 0; i < MIPS_FPU_MAX; i++) env->fpus[i].fcr0 = def->CP1_fcr0; memcpy(&env->active_fpu, &env->fpus[0], sizeof(env->active_fpu)); if (env->user_mode_only) { if (env->CP0_Config1 & (1 << CP0C1_FP)) env->hflags |= MIPS_HFLAG_FPU; #ifdef TARGET_MIPS64 if (env->active_fpu.fcr0 & (1 << FCR0_F64)) env->hflags |= MIPS_HFLAG_F64; #endif } }"} {"target": 1, "idx": 490, "func": "static int bfi_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data, *buf_end = avpkt->data + avpkt->size; int buf_size = avpkt->size; BFIContext *bfi = avctx->priv_data; uint8_t *dst = bfi->dst; uint8_t *src, *dst_offset, colour1, colour2; uint8_t *frame_end = bfi->dst + avctx->width * avctx->height; uint32_t *pal; int i, j, height = avctx->height; if (bfi->frame.data[0]) avctx->release_buffer(avctx, &bfi->frame); bfi->frame.reference = 1; if (avctx->get_buffer(avctx, &bfi->frame) < 0) { av_log(avctx, AV_LOG_ERROR, \"get_buffer() failed\\n\"); return -1; } /* Set frame parameters and palette, if necessary */ if (!avctx->frame_number) { bfi->frame.pict_type = AV_PICTURE_TYPE_I; bfi->frame.key_frame = 1; /* Setting the palette */ if (avctx->extradata_size > 768) { av_log(NULL, AV_LOG_ERROR, \"Palette is too large.\\n\"); return -1; } pal = (uint32_t *)bfi->frame.data[1]; for (i = 0; i < avctx->extradata_size / 3; i++) { int shift = 16; *pal = 0; for (j = 0; j < 3; j++, shift -= 8) *pal += ((avctx->extradata[i * 3 + j] << 2) | (avctx->extradata[i * 3 + j] >> 4)) << shift; pal++; } bfi->frame.palette_has_changed = 1; } else { bfi->frame.pict_type = AV_PICTURE_TYPE_P; bfi->frame.key_frame = 0; } buf += 4; // Unpacked size, not required. while (dst != frame_end) { static const uint8_t lentab[4] = { 0, 2, 0, 1 }; unsigned int byte = *buf++, av_uninit(offset); unsigned int code = byte >> 6; unsigned int length = byte & ~0xC0; if (buf >= buf_end) { av_log(avctx, AV_LOG_ERROR, \"Input resolution larger than actual frame.\\n\"); return -1; } /* Get length and offset(if required) */ if (length == 0) { if (code == 1) { length = bytestream_get_byte(&buf); offset = bytestream_get_le16(&buf); } else { length = bytestream_get_le16(&buf); if (code == 2 && length == 0) break; } } else { if (code == 1) offset = bytestream_get_byte(&buf); } /* Do boundary check */ if (dst + (length << lentab[code]) > frame_end) break; switch (code) { case 0: //Normal Chain if (length >= buf_end - buf) { av_log(avctx, AV_LOG_ERROR, \"Frame larger than buffer.\\n\"); return -1; } bytestream_get_buffer(&buf, dst, length); dst += length; break; case 1: //Back Chain dst_offset = dst - offset; length *= 4; //Convert dwords to bytes. if (dst_offset < bfi->dst) break; while (length--) *dst++ = *dst_offset++; break; case 2: //Skip Chain dst += length; break; case 3: //Fill Chain colour1 = bytestream_get_byte(&buf); colour2 = bytestream_get_byte(&buf); while (length--) { *dst++ = colour1; *dst++ = colour2; } break; } } src = bfi->dst; dst = bfi->frame.data[0]; while (height--) { memcpy(dst, src, avctx->width); src += avctx->width; dst += bfi->frame.linesize[0]; } *data_size = sizeof(AVFrame); *(AVFrame *)data = bfi->frame; return buf_size; }"} {"target": 1, "idx": 491, "func": "DeviceState *qdev_device_add(QemuOpts *opts) { ObjectClass *oc; DeviceClass *dc; const char *driver, *path, *id; DeviceState *dev; BusState *bus = NULL; Error *err = NULL; driver = qemu_opt_get(opts, \"driver\"); if (!driver) { qerror_report(QERR_MISSING_PARAMETER, \"driver\"); return NULL; } /* find driver */ oc = object_class_by_name(driver); if (!oc) { const char *typename = find_typename_by_alias(driver); if (typename) { driver = typename; oc = object_class_by_name(driver); } } if (!object_class_dynamic_cast(oc, TYPE_DEVICE)) { qerror_report(ERROR_CLASS_GENERIC_ERROR, \"'%s' is not a valid device model name\", driver); return NULL; } if (object_class_is_abstract(oc)) { qerror_report(QERR_INVALID_PARAMETER_VALUE, \"driver\", \"non-abstract device type\"); return NULL; } dc = DEVICE_CLASS(oc); if (dc->cannot_instantiate_with_device_add_yet) { qerror_report(QERR_INVALID_PARAMETER_VALUE, \"driver\", \"pluggable device type\"); return NULL; } /* find bus */ path = qemu_opt_get(opts, \"bus\"); if (path != NULL) { bus = qbus_find(path); if (!bus) { return NULL; } if (!object_dynamic_cast(OBJECT(bus), dc->bus_type)) { qerror_report(QERR_BAD_BUS_FOR_DEVICE, driver, object_get_typename(OBJECT(bus))); return NULL; } } else if (dc->bus_type != NULL) { bus = qbus_find_recursive(sysbus_get_default(), NULL, dc->bus_type); if (!bus) { qerror_report(QERR_NO_BUS_FOR_DEVICE, dc->bus_type, driver); return NULL; } } if (qdev_hotplug && bus && !bus->allow_hotplug) { qerror_report(QERR_BUS_NO_HOTPLUG, bus->name); return NULL; } /* create device, set properties */ dev = DEVICE(object_new(driver)); if (bus) { qdev_set_parent_bus(dev, bus); } id = qemu_opts_id(opts); if (id) { dev->id = id; } if (qemu_opt_foreach(opts, set_property, dev, 1) != 0) { object_unparent(OBJECT(dev)); object_unref(OBJECT(dev)); return NULL; } if (dev->id) { object_property_add_child(qdev_get_peripheral(), dev->id, OBJECT(dev), NULL); } else { static int anon_count; gchar *name = g_strdup_printf(\"device[%d]\", anon_count++); object_property_add_child(qdev_get_peripheral_anon(), name, OBJECT(dev), NULL); g_free(name); } dev->opts = opts; object_property_set_bool(OBJECT(dev), true, \"realized\", &err); if (err != NULL) { qerror_report_err(err); error_free(err); dev->opts = NULL; object_unparent(OBJECT(dev)); object_unref(OBJECT(dev)); qerror_report(QERR_DEVICE_INIT_FAILED, driver); return NULL; } return dev; }"} {"target": 0, "idx": 501, "func": "void thread_pool_submit(ThreadPoolFunc *func, void *arg) { thread_pool_submit_aio(func, arg, NULL, NULL); }"} {"target": 0, "idx": 510, "func": "static void pl061_register_devices(void) { sysbus_register_dev(\"pl061\", sizeof(pl061_state), pl061_init_arm); sysbus_register_dev(\"pl061_luminary\", sizeof(pl061_state), pl061_init_luminary); }"} {"target": 1, "idx": 539, "func": "int cpu_exec(CPUState *cpu) { CPUClass *cc = CPU_GET_CLASS(cpu); int ret; SyncClocks sc; /* replay_interrupt may need current_cpu */ current_cpu = cpu; if (cpu_handle_halt(cpu)) { return EXCP_HALTED; } rcu_read_lock(); cc->cpu_exec_enter(cpu); /* Calculate difference between guest clock and host clock. * This delay includes the delay of the last cycle, so * what we have to do is sleep until it is 0. As for the * advance/delay we gain here, we try to fix it next time. */ init_delay_params(&sc, cpu); /* prepare setjmp context for exception handling */ if (sigsetjmp(cpu->jmp_env, 0) != 0) { #if defined(__clang__) || !QEMU_GNUC_PREREQ(4, 6) /* Some compilers wrongly smash all local variables after * siglongjmp. There were bug reports for gcc 4.5.0 and clang. * Reload essential local variables here for those compilers. * Newer versions of gcc would complain about this code (-Wclobbered). */ cpu = current_cpu; cc = CPU_GET_CLASS(cpu); #else /* buggy compiler */ /* Assert that the compiler does not smash local variables. */ g_assert(cpu == current_cpu); g_assert(cc == CPU_GET_CLASS(cpu)); #endif /* buggy compiler */ cpu->can_do_io = 1; tb_lock_reset(); if (qemu_mutex_iothread_locked()) { qemu_mutex_unlock_iothread(); } } /* if an exception is pending, we execute it here */ while (!cpu_handle_exception(cpu, &ret)) { TranslationBlock *last_tb = NULL; int tb_exit = 0; while (!cpu_handle_interrupt(cpu, &last_tb)) { TranslationBlock *tb = tb_find(cpu, last_tb, tb_exit); cpu_loop_exec_tb(cpu, tb, &last_tb, &tb_exit, &sc); /* Try to align the host and virtual clocks if the guest is in advance */ align_clocks(&sc, cpu); } } cc->cpu_exec_exit(cpu); rcu_read_unlock(); /* fail safe : never use current_cpu outside cpu_exec() */ current_cpu = NULL; return ret; }"} {"target": 0, "idx": 547, "func": "static void copy_irb_to_guest(IRB *dest, const IRB *src, PMCW *pmcw) { int i; uint16_t stctl = src->scsw.ctrl & SCSW_CTRL_MASK_STCTL; uint16_t actl = src->scsw.ctrl & SCSW_CTRL_MASK_ACTL; copy_scsw_to_guest(&dest->scsw, &src->scsw); for (i = 0; i < ARRAY_SIZE(dest->esw); i++) { dest->esw[i] = cpu_to_be32(src->esw[i]); } for (i = 0; i < ARRAY_SIZE(dest->ecw); i++) { dest->ecw[i] = cpu_to_be32(src->ecw[i]); } /* extended measurements enabled? */ if ((src->scsw.flags & SCSW_FLAGS_MASK_ESWF) || !(pmcw->flags & PMCW_FLAGS_MASK_TF) || !(pmcw->chars & PMCW_CHARS_MASK_XMWME)) { return; } /* extended measurements pending? */ if (!(stctl & SCSW_STCTL_STATUS_PEND)) { return; } if ((stctl & SCSW_STCTL_PRIMARY) || (stctl == SCSW_STCTL_SECONDARY) || ((stctl & SCSW_STCTL_INTERMEDIATE) && (actl & SCSW_ACTL_SUSP))) { for (i = 0; i < ARRAY_SIZE(dest->emw); i++) { dest->emw[i] = cpu_to_be32(src->emw[i]); } } }"} {"target": 0, "idx": 566, "func": "static int process_input_packet(InputStream *ist, const AVPacket *pkt) { int i; int got_output; AVPacket avpkt; if (ist->next_dts == AV_NOPTS_VALUE) ist->next_dts = ist->last_dts; if (pkt == NULL) { /* EOF handling */ av_init_packet(&avpkt); avpkt.data = NULL; avpkt.size = 0; goto handle_eof; } else { avpkt = *pkt; } if (pkt->dts != AV_NOPTS_VALUE) ist->next_dts = ist->last_dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q); // while we have more to decode or while the decoder did output something on EOF while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) { int ret = 0; handle_eof: ist->last_dts = ist->next_dts; if (avpkt.size && avpkt.size != pkt->size && !(ist->dec->capabilities & CODEC_CAP_SUBFRAMES)) { av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING, \"Multiple frames in a packet from stream %d\\n\", pkt->stream_index); ist->showed_multi_packet_warning = 1; } switch (ist->dec_ctx->codec_type) { case AVMEDIA_TYPE_AUDIO: ret = decode_audio (ist, &avpkt, &got_output); break; case AVMEDIA_TYPE_VIDEO: ret = decode_video (ist, &avpkt, &got_output); if (avpkt.duration) ist->next_dts += av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q); else if (ist->st->avg_frame_rate.num) ist->next_dts += av_rescale_q(1, av_inv_q(ist->st->avg_frame_rate), AV_TIME_BASE_Q); else if (ist->dec_ctx->time_base.num != 0) { int ticks = ist->st->parser ? ist->st->parser->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame; ist->next_dts += av_rescale_q(ticks, ist->dec_ctx->time_base, AV_TIME_BASE_Q); } break; case AVMEDIA_TYPE_SUBTITLE: ret = transcode_subtitles(ist, &avpkt, &got_output); break; default: return -1; } if (ret < 0) return ret; // touch data and size only if not EOF if (pkt) { avpkt.data += ret; avpkt.size -= ret; } if (!got_output) { continue; } } /* handle stream copy */ if (!ist->decoding_needed) { ist->last_dts = ist->next_dts; switch (ist->dec_ctx->codec_type) { case AVMEDIA_TYPE_AUDIO: ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) / ist->dec_ctx->sample_rate; break; case AVMEDIA_TYPE_VIDEO: if (ist->dec_ctx->time_base.num != 0) { int ticks = ist->st->parser ? ist->st->parser->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame; ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->time_base.num * ticks) / ist->dec_ctx->time_base.den; } break; } } for (i = 0; pkt && i < nb_output_streams; i++) { OutputStream *ost = output_streams[i]; if (!check_output_constraints(ist, ost) || ost->encoding_needed) continue; do_streamcopy(ist, ost, pkt); } return 0; }"} {"target": 1, "idx": 569, "func": "static target_ulong disas_insn(DisasContext *s, CPUState *cpu) { CPUX86State *env = cpu->env_ptr; int b, prefixes; int shift; TCGMemOp ot, aflag, dflag; int modrm, reg, rm, mod, op, opreg, val; target_ulong next_eip, tval; int rex_w, rex_r; target_ulong pc_start = s->base.pc_next; s->pc_start = s->pc = pc_start; prefixes = 0; s->override = -1; rex_w = -1; rex_r = 0; #ifdef TARGET_X86_64 s->rex_x = 0; s->rex_b = 0; x86_64_hregs = 0; #endif s->rip_offset = 0; /* for relative ip address */ s->vex_l = 0; s->vex_v = 0; if (sigsetjmp(s->jmpbuf, 0) != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); return s->pc; } next_byte: b = x86_ldub_code(env, s); /* Collect prefixes. */ switch (b) { case 0xf3: prefixes |= PREFIX_REPZ; goto next_byte; case 0xf2: prefixes |= PREFIX_REPNZ; goto next_byte; case 0xf0: prefixes |= PREFIX_LOCK; goto next_byte; case 0x2e: s->override = R_CS; goto next_byte; case 0x36: s->override = R_SS; goto next_byte; case 0x3e: s->override = R_DS; goto next_byte; case 0x26: s->override = R_ES; goto next_byte; case 0x64: s->override = R_FS; goto next_byte; case 0x65: s->override = R_GS; goto next_byte; case 0x66: prefixes |= PREFIX_DATA; goto next_byte; case 0x67: prefixes |= PREFIX_ADR; goto next_byte; #ifdef TARGET_X86_64 case 0x40 ... 0x4f: if (CODE64(s)) { /* REX prefix */ rex_w = (b >> 3) & 1; rex_r = (b & 0x4) << 1; s->rex_x = (b & 0x2) << 2; REX_B(s) = (b & 0x1) << 3; x86_64_hregs = 1; /* select uniform byte register addressing */ goto next_byte; } break; #endif case 0xc5: /* 2-byte VEX */ case 0xc4: /* 3-byte VEX */ /* VEX prefixes cannot be used except in 32-bit mode. Otherwise the instruction is LES or LDS. */ if (s->code32 && !s->vm86) { static const int pp_prefix[4] = { 0, PREFIX_DATA, PREFIX_REPZ, PREFIX_REPNZ }; int vex3, vex2 = x86_ldub_code(env, s); if (!CODE64(s) && (vex2 & 0xc0) != 0xc0) { /* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b, otherwise the instruction is LES or LDS. */ break; } s->pc++; /* 4.1.1-4.1.3: No preceding lock, 66, f2, f3, or rex prefixes. */ if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ | PREFIX_LOCK | PREFIX_DATA)) { goto illegal_op; } #ifdef TARGET_X86_64 if (x86_64_hregs) { goto illegal_op; } #endif rex_r = (~vex2 >> 4) & 8; if (b == 0xc5) { vex3 = vex2; b = x86_ldub_code(env, s); } else { #ifdef TARGET_X86_64 s->rex_x = (~vex2 >> 3) & 8; s->rex_b = (~vex2 >> 2) & 8; #endif vex3 = x86_ldub_code(env, s); rex_w = (vex3 >> 7) & 1; switch (vex2 & 0x1f) { case 0x01: /* Implied 0f leading opcode bytes. */ b = x86_ldub_code(env, s) | 0x100; break; case 0x02: /* Implied 0f 38 leading opcode bytes. */ b = 0x138; break; case 0x03: /* Implied 0f 3a leading opcode bytes. */ b = 0x13a; break; default: /* Reserved for future use. */ goto unknown_op; } } s->vex_v = (~vex3 >> 3) & 0xf; s->vex_l = (vex3 >> 2) & 1; prefixes |= pp_prefix[vex3 & 3] | PREFIX_VEX; } break; } /* Post-process prefixes. */ if (CODE64(s)) { /* In 64-bit mode, the default data size is 32-bit. Select 64-bit data with rex_w, and 16-bit data with 0x66; rex_w takes precedence over 0x66 if both are present. */ dflag = (rex_w > 0 ? MO_64 : prefixes & PREFIX_DATA ? MO_16 : MO_32); /* In 64-bit mode, 0x67 selects 32-bit addressing. */ aflag = (prefixes & PREFIX_ADR ? MO_32 : MO_64); } else { /* In 16/32-bit mode, 0x66 selects the opposite data size. */ if (s->code32 ^ ((prefixes & PREFIX_DATA) != 0)) { dflag = MO_32; } else { dflag = MO_16; } /* In 16/32-bit mode, 0x67 selects the opposite addressing. */ if (s->code32 ^ ((prefixes & PREFIX_ADR) != 0)) { aflag = MO_32; } else { aflag = MO_16; } } s->prefix = prefixes; s->aflag = aflag; s->dflag = dflag; /* now check op code */ reswitch: switch(b) { case 0x0f: /**************************/ /* extended op code */ b = x86_ldub_code(env, s) | 0x100; goto reswitch; /**************************/ /* arith & logic */ case 0x00 ... 0x05: case 0x08 ... 0x0d: case 0x10 ... 0x15: case 0x18 ... 0x1d: case 0x20 ... 0x25: case 0x28 ... 0x2d: case 0x30 ... 0x35: case 0x38 ... 0x3d: { int op, f, val; op = (b >> 3) & 7; f = (b >> 1) & 3; ot = mo_b_d(b, dflag); switch(f) { case 0: /* OP Ev, Gv */ modrm = x86_ldub_code(env, s); reg = ((modrm >> 3) & 7) | rex_r; mod = (modrm >> 6) & 3; rm = (modrm & 7) | REX_B(s); if (mod != 3) { gen_lea_modrm(env, s, modrm); opreg = OR_TMP0; } else if (op == OP_XORL && rm == reg) { xor_zero: /* xor reg, reg optimisation */ set_cc_op(s, CC_OP_CLR); tcg_gen_movi_tl(cpu_T0, 0); gen_op_mov_reg_v(ot, reg, cpu_T0); break; } else { opreg = rm; } gen_op_mov_v_reg(ot, cpu_T1, reg); gen_op(s, op, ot, opreg); break; case 1: /* OP Gv, Ev */ modrm = x86_ldub_code(env, s); mod = (modrm >> 6) & 3; reg = ((modrm >> 3) & 7) | rex_r; rm = (modrm & 7) | REX_B(s); if (mod != 3) { gen_lea_modrm(env, s, modrm); gen_op_ld_v(s, ot, cpu_T1, cpu_A0); } else if (op == OP_XORL && rm == reg) { goto xor_zero; } else { gen_op_mov_v_reg(ot, cpu_T1, rm); } gen_op(s, op, ot, reg); break; case 2: /* OP A, Iv */ val = insn_get(env, s, ot); tcg_gen_movi_tl(cpu_T1, val); gen_op(s, op, ot, OR_EAX); break; } } break; case 0x82: if (CODE64(s)) goto illegal_op; case 0x80: /* GRP1 */ case 0x81: case 0x83: { int val; ot = mo_b_d(b, dflag); modrm = x86_ldub_code(env, s); mod = (modrm >> 6) & 3; rm = (modrm & 7) | REX_B(s); op = (modrm >> 3) & 7; if (mod != 3) { if (b == 0x83) s->rip_offset = 1; else s->rip_offset = insn_const_size(ot); gen_lea_modrm(env, s, modrm); opreg = OR_TMP0; } else { opreg = rm; } switch(b) { default: case 0x80: case 0x81: case 0x82: val = insn_get(env, s, ot); break; case 0x83: val = (int8_t)insn_get(env, s, MO_8); break; } tcg_gen_movi_tl(cpu_T1, val); gen_op(s, op, ot, opreg); } break; /**************************/ /* inc, dec, and other misc arith */ case 0x40 ... 0x47: /* inc Gv */ ot = dflag; gen_inc(s, ot, OR_EAX + (b & 7), 1); break; case 0x48 ... 0x4f: /* dec Gv */ ot = dflag; gen_inc(s, ot, OR_EAX + (b & 7), -1); break; case 0xf6: /* GRP3 */ case 0xf7: ot = mo_b_d(b, dflag); modrm = x86_ldub_code(env, s); mod = (modrm >> 6) & 3; rm = (modrm & 7) | REX_B(s); op = (modrm >> 3) & 7; if (mod != 3) { if (op == 0) { s->rip_offset = insn_const_size(ot); } gen_lea_modrm(env, s, modrm); /* For those below that handle locked memory, don't load here. */ if (!(s->prefix & PREFIX_LOCK) || op != 2) { gen_op_ld_v(s, ot, cpu_T0, cpu_A0); } } else { gen_op_mov_v_reg(ot, cpu_T0, rm); } switch(op) { case 0: /* test */ val = insn_get(env, s, ot); tcg_gen_movi_tl(cpu_T1, val); gen_op_testl_T0_T1_cc(); set_cc_op(s, CC_OP_LOGICB + ot); break; case 2: /* not */ if (s->prefix & PREFIX_LOCK) { if (mod == 3) { goto illegal_op; } tcg_gen_movi_tl(cpu_T0, ~0); tcg_gen_atomic_xor_fetch_tl(cpu_T0, cpu_A0, cpu_T0, s->mem_index, ot | MO_LE); } else { tcg_gen_not_tl(cpu_T0, cpu_T0); if (mod != 3) { gen_op_st_v(s, ot, cpu_T0, cpu_A0); } else { gen_op_mov_reg_v(ot, rm, cpu_T0); } } break; case 3: /* neg */ if (s->prefix & PREFIX_LOCK) { TCGLabel *label1; TCGv a0, t0, t1, t2; if (mod == 3) { goto illegal_op; } a0 = tcg_temp_local_new(); t0 = tcg_temp_local_new(); label1 = gen_new_label(); tcg_gen_mov_tl(a0, cpu_A0); tcg_gen_mov_tl(t0, cpu_T0); gen_set_label(label1); t1 = tcg_temp_new(); t2 = tcg_temp_new(); tcg_gen_mov_tl(t2, t0); tcg_gen_neg_tl(t1, t0); tcg_gen_atomic_cmpxchg_tl(t0, a0, t0, t1, s->mem_index, ot | MO_LE); tcg_temp_free(t1); tcg_gen_brcond_tl(TCG_COND_NE, t0, t2, label1); tcg_temp_free(t2); tcg_temp_free(a0); tcg_gen_mov_tl(cpu_T0, t0); tcg_temp_free(t0); } else { tcg_gen_neg_tl(cpu_T0, cpu_T0); if (mod != 3) { gen_op_st_v(s, ot, cpu_T0, cpu_A0); } else { gen_op_mov_reg_v(ot, rm, cpu_T0); } } gen_op_update_neg_cc(); set_cc_op(s, CC_OP_SUBB + ot); break; case 4: /* mul */ switch(ot) { case MO_8: gen_op_mov_v_reg(MO_8, cpu_T1, R_EAX); tcg_gen_ext8u_tl(cpu_T0, cpu_T0); tcg_gen_ext8u_tl(cpu_T1, cpu_T1); /* XXX: use 32 bit mul which could be faster */ tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1); gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0); tcg_gen_mov_tl(cpu_cc_dst, cpu_T0); tcg_gen_andi_tl(cpu_cc_src, cpu_T0, 0xff00); set_cc_op(s, CC_OP_MULB); break; case MO_16: gen_op_mov_v_reg(MO_16, cpu_T1, R_EAX); tcg_gen_ext16u_tl(cpu_T0, cpu_T0); tcg_gen_ext16u_tl(cpu_T1, cpu_T1); /* XXX: use 32 bit mul which could be faster */ tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1); gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0); tcg_gen_mov_tl(cpu_cc_dst, cpu_T0); tcg_gen_shri_tl(cpu_T0, cpu_T0, 16); gen_op_mov_reg_v(MO_16, R_EDX, cpu_T0); tcg_gen_mov_tl(cpu_cc_src, cpu_T0); set_cc_op(s, CC_OP_MULW); break; default: case MO_32: tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0); tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EAX]); tcg_gen_mulu2_i32(cpu_tmp2_i32, cpu_tmp3_i32, cpu_tmp2_i32, cpu_tmp3_i32); tcg_gen_extu_i32_tl(cpu_regs[R_EAX], cpu_tmp2_i32); tcg_gen_extu_i32_tl(cpu_regs[R_EDX], cpu_tmp3_i32); tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]); tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]); set_cc_op(s, CC_OP_MULL); break; #ifdef TARGET_X86_64 case MO_64: tcg_gen_mulu2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX], cpu_T0, cpu_regs[R_EAX]); tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]); tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]); set_cc_op(s, CC_OP_MULQ); break; #endif } break; case 5: /* imul */ switch(ot) { case MO_8: gen_op_mov_v_reg(MO_8, cpu_T1, R_EAX); tcg_gen_ext8s_tl(cpu_T0, cpu_T0); tcg_gen_ext8s_tl(cpu_T1, cpu_T1); /* XXX: use 32 bit mul which could be faster */ tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1); gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0); tcg_gen_mov_tl(cpu_cc_dst, cpu_T0); tcg_gen_ext8s_tl(cpu_tmp0, cpu_T0); tcg_gen_sub_tl(cpu_cc_src, cpu_T0, cpu_tmp0); set_cc_op(s, CC_OP_MULB); break; case MO_16: gen_op_mov_v_reg(MO_16, cpu_T1, R_EAX); tcg_gen_ext16s_tl(cpu_T0, cpu_T0); tcg_gen_ext16s_tl(cpu_T1, cpu_T1); /* XXX: use 32 bit mul which could be faster */ tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1); gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0); tcg_gen_mov_tl(cpu_cc_dst, cpu_T0); tcg_gen_ext16s_tl(cpu_tmp0, cpu_T0); tcg_gen_sub_tl(cpu_cc_src, cpu_T0, cpu_tmp0); tcg_gen_shri_tl(cpu_T0, cpu_T0, 16); gen_op_mov_reg_v(MO_16, R_EDX, cpu_T0); set_cc_op(s, CC_OP_MULW); break; default: case MO_32: tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0); tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EAX]); tcg_gen_muls2_i32(cpu_tmp2_i32, cpu_tmp3_i32, cpu_tmp2_i32, cpu_tmp3_i32); tcg_gen_extu_i32_tl(cpu_regs[R_EAX], cpu_tmp2_i32); tcg_gen_extu_i32_tl(cpu_regs[R_EDX], cpu_tmp3_i32); tcg_gen_sari_i32(cpu_tmp2_i32, cpu_tmp2_i32, 31); tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]); tcg_gen_sub_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32); tcg_gen_extu_i32_tl(cpu_cc_src, cpu_tmp2_i32); set_cc_op(s, CC_OP_MULL); break; #ifdef TARGET_X86_64 case MO_64: tcg_gen_muls2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX], cpu_T0, cpu_regs[R_EAX]); tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]); tcg_gen_sari_tl(cpu_cc_src, cpu_regs[R_EAX], 63); tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_regs[R_EDX]); set_cc_op(s, CC_OP_MULQ); break; #endif } break; case 6: /* div */ switch(ot) { case MO_8: gen_helper_divb_AL(cpu_env, cpu_T0); break; case MO_16: gen_helper_divw_AX(cpu_env, cpu_T0); break; default: case MO_32: gen_helper_divl_EAX(cpu_env, cpu_T0); break; #ifdef TARGET_X86_64 case MO_64: gen_helper_divq_EAX(cpu_env, cpu_T0); break; #endif } break; case 7: /* idiv */ switch(ot) { case MO_8: gen_helper_idivb_AL(cpu_env, cpu_T0); break; case MO_16: gen_helper_idivw_AX(cpu_env, cpu_T0); break; default: case MO_32: gen_helper_idivl_EAX(cpu_env, cpu_T0); break; #ifdef TARGET_X86_64 case MO_64: gen_helper_idivq_EAX(cpu_env, cpu_T0); break; #endif } break; default: goto unknown_op; } break; case 0xfe: /* GRP4 */ case 0xff: /* GRP5 */ ot = mo_b_d(b, dflag); modrm = x86_ldub_code(env, s); mod = (modrm >> 6) & 3; rm = (modrm & 7) | REX_B(s); op = (modrm >> 3) & 7; if (op >= 2 && b == 0xfe) { goto unknown_op; } if (CODE64(s)) { if (op == 2 || op == 4) { /* operand size for jumps is 64 bit */ ot = MO_64; } else if (op == 3 || op == 5) { ot = dflag != MO_16 ? MO_32 + (rex_w == 1) : MO_16; } else if (op == 6) { /* default push size is 64 bit */ ot = mo_pushpop(s, dflag); } } if (mod != 3) { gen_lea_modrm(env, s, modrm); if (op >= 2 && op != 3 && op != 5) gen_op_ld_v(s, ot, cpu_T0, cpu_A0); } else { gen_op_mov_v_reg(ot, cpu_T0, rm); } switch(op) { case 0: /* inc Ev */ if (mod != 3) opreg = OR_TMP0; else opreg = rm; gen_inc(s, ot, opreg, 1); break; case 1: /* dec Ev */ if (mod != 3) opreg = OR_TMP0; else opreg = rm; gen_inc(s, ot, opreg, -1); break; case 2: /* call Ev */ /* XXX: optimize if memory (no 'and' is necessary) */ if (dflag == MO_16) { tcg_gen_ext16u_tl(cpu_T0, cpu_T0); } next_eip = s->pc - s->cs_base; tcg_gen_movi_tl(cpu_T1, next_eip); gen_push_v(s, cpu_T1); gen_op_jmp_v(cpu_T0); gen_bnd_jmp(s); gen_jr(s, cpu_T0); break; case 3: /* lcall Ev */ gen_op_ld_v(s, ot, cpu_T1, cpu_A0); gen_add_A0_im(s, 1 << ot); gen_op_ld_v(s, MO_16, cpu_T0, cpu_A0); do_lcall: if (s->pe && !s->vm86) { tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0); gen_helper_lcall_protected(cpu_env, cpu_tmp2_i32, cpu_T1, tcg_const_i32(dflag - 1), tcg_const_tl(s->pc - s->cs_base)); } else { tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0); gen_helper_lcall_real(cpu_env, cpu_tmp2_i32, cpu_T1, tcg_const_i32(dflag - 1), tcg_const_i32(s->pc - s->cs_base)); } tcg_gen_ld_tl(cpu_tmp4, cpu_env, offsetof(CPUX86State, eip)); gen_jr(s, cpu_tmp4); break; case 4: /* jmp Ev */ if (dflag == MO_16) { tcg_gen_ext16u_tl(cpu_T0, cpu_T0); } gen_op_jmp_v(cpu_T0); gen_bnd_jmp(s); gen_jr(s, cpu_T0); break; case 5: /* ljmp Ev */ gen_op_ld_v(s, ot, cpu_T1, cpu_A0); gen_add_A0_im(s, 1 << ot); gen_op_ld_v(s, MO_16, cpu_T0, cpu_A0); do_ljmp: if (s->pe && !s->vm86) { tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0); gen_helper_ljmp_protected(cpu_env, cpu_tmp2_i32, cpu_T1, tcg_const_tl(s->pc - s->cs_base)); } else { gen_op_movl_seg_T0_vm(R_CS); gen_op_jmp_v(cpu_T1); } tcg_gen_ld_tl(cpu_tmp4, cpu_env, offsetof(CPUX86State, eip)); gen_jr(s, cpu_tmp4); break; case 6: /* push Ev */ gen_push_v(s, cpu_T0); break; default: goto unknown_op; } break; case 0x84: /* test Ev, Gv */ case 0x85: ot = mo_b_d(b, dflag); modrm = x86_ldub_code(env, s); reg = ((modrm >> 3) & 7) | rex_r; gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); gen_op_mov_v_reg(ot, cpu_T1, reg); gen_op_testl_T0_T1_cc(); set_cc_op(s, CC_OP_LOGICB + ot); break; case 0xa8: /* test eAX, Iv */ case 0xa9: ot = mo_b_d(b, dflag); val = insn_get(env, s, ot); gen_op_mov_v_reg(ot, cpu_T0, OR_EAX); tcg_gen_movi_tl(cpu_T1, val); gen_op_testl_T0_T1_cc(); set_cc_op(s, CC_OP_LOGICB + ot); break; case 0x98: /* CWDE/CBW */ switch (dflag) { #ifdef TARGET_X86_64 case MO_64: gen_op_mov_v_reg(MO_32, cpu_T0, R_EAX); tcg_gen_ext32s_tl(cpu_T0, cpu_T0); gen_op_mov_reg_v(MO_64, R_EAX, cpu_T0); break; #endif case MO_32: gen_op_mov_v_reg(MO_16, cpu_T0, R_EAX); tcg_gen_ext16s_tl(cpu_T0, cpu_T0); gen_op_mov_reg_v(MO_32, R_EAX, cpu_T0); break; case MO_16: gen_op_mov_v_reg(MO_8, cpu_T0, R_EAX); tcg_gen_ext8s_tl(cpu_T0, cpu_T0); gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0); break; default: tcg_abort(); } break; case 0x99: /* CDQ/CWD */ switch (dflag) { #ifdef TARGET_X86_64 case MO_64: gen_op_mov_v_reg(MO_64, cpu_T0, R_EAX); tcg_gen_sari_tl(cpu_T0, cpu_T0, 63); gen_op_mov_reg_v(MO_64, R_EDX, cpu_T0); break; #endif case MO_32: gen_op_mov_v_reg(MO_32, cpu_T0, R_EAX); tcg_gen_ext32s_tl(cpu_T0, cpu_T0); tcg_gen_sari_tl(cpu_T0, cpu_T0, 31); gen_op_mov_reg_v(MO_32, R_EDX, cpu_T0); break; case MO_16: gen_op_mov_v_reg(MO_16, cpu_T0, R_EAX); tcg_gen_ext16s_tl(cpu_T0, cpu_T0); tcg_gen_sari_tl(cpu_T0, cpu_T0, 15); gen_op_mov_reg_v(MO_16, R_EDX, cpu_T0); break; default: tcg_abort(); } break; case 0x1af: /* imul Gv, Ev */ case 0x69: /* imul Gv, Ev, I */ case 0x6b: ot = dflag; modrm = x86_ldub_code(env, s); reg = ((modrm >> 3) & 7) | rex_r; if (b == 0x69) s->rip_offset = insn_const_size(ot); else if (b == 0x6b) s->rip_offset = 1; gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); if (b == 0x69) { val = insn_get(env, s, ot); tcg_gen_movi_tl(cpu_T1, val); } else if (b == 0x6b) { val = (int8_t)insn_get(env, s, MO_8); tcg_gen_movi_tl(cpu_T1, val); } else { gen_op_mov_v_reg(ot, cpu_T1, reg); } switch (ot) { #ifdef TARGET_X86_64 case MO_64: tcg_gen_muls2_i64(cpu_regs[reg], cpu_T1, cpu_T0, cpu_T1); tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]); tcg_gen_sari_tl(cpu_cc_src, cpu_cc_dst, 63); tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_T1); break; #endif case MO_32: tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0); tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1); tcg_gen_muls2_i32(cpu_tmp2_i32, cpu_tmp3_i32, cpu_tmp2_i32, cpu_tmp3_i32); tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32); tcg_gen_sari_i32(cpu_tmp2_i32, cpu_tmp2_i32, 31); tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]); tcg_gen_sub_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32); tcg_gen_extu_i32_tl(cpu_cc_src, cpu_tmp2_i32); break; default: tcg_gen_ext16s_tl(cpu_T0, cpu_T0); tcg_gen_ext16s_tl(cpu_T1, cpu_T1); /* XXX: use 32 bit mul which could be faster */ tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1); tcg_gen_mov_tl(cpu_cc_dst, cpu_T0); tcg_gen_ext16s_tl(cpu_tmp0, cpu_T0); tcg_gen_sub_tl(cpu_cc_src, cpu_T0, cpu_tmp0); gen_op_mov_reg_v(ot, reg, cpu_T0); break; } set_cc_op(s, CC_OP_MULB + ot); break; case 0x1c0: case 0x1c1: /* xadd Ev, Gv */ ot = mo_b_d(b, dflag); modrm = x86_ldub_code(env, s); reg = ((modrm >> 3) & 7) | rex_r; mod = (modrm >> 6) & 3; gen_op_mov_v_reg(ot, cpu_T0, reg); if (mod == 3) { rm = (modrm & 7) | REX_B(s); gen_op_mov_v_reg(ot, cpu_T1, rm); tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_T1); gen_op_mov_reg_v(ot, reg, cpu_T1); gen_op_mov_reg_v(ot, rm, cpu_T0); } else { gen_lea_modrm(env, s, modrm); if (s->prefix & PREFIX_LOCK) { tcg_gen_atomic_fetch_add_tl(cpu_T1, cpu_A0, cpu_T0, s->mem_index, ot | MO_LE); tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_T1); } else { gen_op_ld_v(s, ot, cpu_T1, cpu_A0); tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_T1); gen_op_st_v(s, ot, cpu_T0, cpu_A0); } gen_op_mov_reg_v(ot, reg, cpu_T1); } gen_op_update2_cc(); set_cc_op(s, CC_OP_ADDB + ot); break; case 0x1b0: case 0x1b1: /* cmpxchg Ev, Gv */ { TCGv oldv, newv, cmpv; ot = mo_b_d(b, dflag); modrm = x86_ldub_code(env, s); reg = ((modrm >> 3) & 7) | rex_r; mod = (modrm >> 6) & 3; oldv = tcg_temp_new(); newv = tcg_temp_new(); cmpv = tcg_temp_new(); gen_op_mov_v_reg(ot, newv, reg); tcg_gen_mov_tl(cmpv, cpu_regs[R_EAX]); if (s->prefix & PREFIX_LOCK) { if (mod == 3) { goto illegal_op; } gen_lea_modrm(env, s, modrm); tcg_gen_atomic_cmpxchg_tl(oldv, cpu_A0, cmpv, newv, s->mem_index, ot | MO_LE); gen_op_mov_reg_v(ot, R_EAX, oldv); } else { if (mod == 3) { rm = (modrm & 7) | REX_B(s); gen_op_mov_v_reg(ot, oldv, rm); } else { gen_lea_modrm(env, s, modrm); gen_op_ld_v(s, ot, oldv, cpu_A0); rm = 0; /* avoid warning */ } gen_extu(ot, oldv); gen_extu(ot, cmpv); /* store value = (old == cmp ? new : old); */ tcg_gen_movcond_tl(TCG_COND_EQ, newv, oldv, cmpv, newv, oldv); if (mod == 3) { gen_op_mov_reg_v(ot, R_EAX, oldv); gen_op_mov_reg_v(ot, rm, newv); } else { /* Perform an unconditional store cycle like physical cpu; must be before changing accumulator to ensure idempotency if the store faults and the instruction is restarted */ gen_op_st_v(s, ot, newv, cpu_A0); gen_op_mov_reg_v(ot, R_EAX, oldv); } } tcg_gen_mov_tl(cpu_cc_src, oldv); tcg_gen_mov_tl(cpu_cc_srcT, cmpv); tcg_gen_sub_tl(cpu_cc_dst, cmpv, oldv); set_cc_op(s, CC_OP_SUBB + ot); tcg_temp_free(oldv); tcg_temp_free(newv); tcg_temp_free(cmpv); } break; case 0x1c7: /* cmpxchg8b */ modrm = x86_ldub_code(env, s); mod = (modrm >> 6) & 3; if ((mod == 3) || ((modrm & 0x38) != 0x8)) goto illegal_op; #ifdef TARGET_X86_64 if (dflag == MO_64) { if (!(s->cpuid_ext_features & CPUID_EXT_CX16)) goto illegal_op; gen_lea_modrm(env, s, modrm); if ((s->prefix & PREFIX_LOCK) && parallel_cpus) { gen_helper_cmpxchg16b(cpu_env, cpu_A0); } else { gen_helper_cmpxchg16b_unlocked(cpu_env, cpu_A0); } } else #endif { if (!(s->cpuid_features & CPUID_CX8)) goto illegal_op; gen_lea_modrm(env, s, modrm); if ((s->prefix & PREFIX_LOCK) && parallel_cpus) { gen_helper_cmpxchg8b(cpu_env, cpu_A0); } else { gen_helper_cmpxchg8b_unlocked(cpu_env, cpu_A0); } } set_cc_op(s, CC_OP_EFLAGS); break; /**************************/ /* push/pop */ case 0x50 ... 0x57: /* push */ gen_op_mov_v_reg(MO_32, cpu_T0, (b & 7) | REX_B(s)); gen_push_v(s, cpu_T0); break; case 0x58 ... 0x5f: /* pop */ ot = gen_pop_T0(s); /* NOTE: order is important for pop %sp */ gen_pop_update(s, ot); gen_op_mov_reg_v(ot, (b & 7) | REX_B(s), cpu_T0); break; case 0x60: /* pusha */ if (CODE64(s)) goto illegal_op; gen_pusha(s); break; case 0x61: /* popa */ if (CODE64(s)) goto illegal_op; gen_popa(s); break; case 0x68: /* push Iv */ case 0x6a: ot = mo_pushpop(s, dflag); if (b == 0x68) val = insn_get(env, s, ot); else val = (int8_t)insn_get(env, s, MO_8); tcg_gen_movi_tl(cpu_T0, val); gen_push_v(s, cpu_T0); break; case 0x8f: /* pop Ev */ modrm = x86_ldub_code(env, s); mod = (modrm >> 6) & 3; ot = gen_pop_T0(s); if (mod == 3) { /* NOTE: order is important for pop %sp */ gen_pop_update(s, ot); rm = (modrm & 7) | REX_B(s); gen_op_mov_reg_v(ot, rm, cpu_T0); } else { /* NOTE: order is important too for MMU exceptions */ s->popl_esp_hack = 1 << ot; gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1); s->popl_esp_hack = 0; gen_pop_update(s, ot); } break; case 0xc8: /* enter */ { int level; val = x86_lduw_code(env, s); level = x86_ldub_code(env, s); gen_enter(s, val, level); } break; case 0xc9: /* leave */ gen_leave(s); break; case 0x06: /* push es */ case 0x0e: /* push cs */ case 0x16: /* push ss */ case 0x1e: /* push ds */ if (CODE64(s)) goto illegal_op; gen_op_movl_T0_seg(b >> 3); gen_push_v(s, cpu_T0); break; case 0x1a0: /* push fs */ case 0x1a8: /* push gs */ gen_op_movl_T0_seg((b >> 3) & 7); gen_push_v(s, cpu_T0); break; case 0x07: /* pop es */ case 0x17: /* pop ss */ case 0x1f: /* pop ds */ if (CODE64(s)) goto illegal_op; reg = b >> 3; ot = gen_pop_T0(s); gen_movl_seg_T0(s, reg); gen_pop_update(s, ot); /* Note that reg == R_SS in gen_movl_seg_T0 always sets is_jmp. */ if (s->base.is_jmp) { gen_jmp_im(s->pc - s->cs_base); if (reg == R_SS) { s->tf = 0; gen_eob_inhibit_irq(s, true); } else { gen_eob(s); } } break; case 0x1a1: /* pop fs */ case 0x1a9: /* pop gs */ ot = gen_pop_T0(s); gen_movl_seg_T0(s, (b >> 3) & 7); gen_pop_update(s, ot); if (s->base.is_jmp) { gen_jmp_im(s->pc - s->cs_base); gen_eob(s); } break; /**************************/ /* mov */ case 0x88: case 0x89: /* mov Gv, Ev */ ot = mo_b_d(b, dflag); modrm = x86_ldub_code(env, s); reg = ((modrm >> 3) & 7) | rex_r; /* generate a generic store */ gen_ldst_modrm(env, s, modrm, ot, reg, 1); break; case 0xc6: case 0xc7: /* mov Ev, Iv */ ot = mo_b_d(b, dflag); modrm = x86_ldub_code(env, s); mod = (modrm >> 6) & 3; if (mod != 3) { s->rip_offset = insn_const_size(ot); gen_lea_modrm(env, s, modrm); } val = insn_get(env, s, ot); tcg_gen_movi_tl(cpu_T0, val); if (mod != 3) { gen_op_st_v(s, ot, cpu_T0, cpu_A0); } else { gen_op_mov_reg_v(ot, (modrm & 7) | REX_B(s), cpu_T0); } break; case 0x8a: case 0x8b: /* mov Ev, Gv */ ot = mo_b_d(b, dflag); modrm = x86_ldub_code(env, s); reg = ((modrm >> 3) & 7) | rex_r; gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); gen_op_mov_reg_v(ot, reg, cpu_T0); break; case 0x8e: /* mov seg, Gv */ modrm = x86_ldub_code(env, s); reg = (modrm >> 3) & 7; if (reg >= 6 || reg == R_CS) goto illegal_op; gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); gen_movl_seg_T0(s, reg); /* Note that reg == R_SS in gen_movl_seg_T0 always sets is_jmp. */ if (s->base.is_jmp) { gen_jmp_im(s->pc - s->cs_base); if (reg == R_SS) { s->tf = 0; gen_eob_inhibit_irq(s, true); } else { gen_eob(s); } } break; case 0x8c: /* mov Gv, seg */ modrm = x86_ldub_code(env, s); reg = (modrm >> 3) & 7; mod = (modrm >> 6) & 3; if (reg >= 6) goto illegal_op; gen_op_movl_T0_seg(reg); ot = mod == 3 ? dflag : MO_16; gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1); break; case 0x1b6: /* movzbS Gv, Eb */ case 0x1b7: /* movzwS Gv, Eb */ case 0x1be: /* movsbS Gv, Eb */ case 0x1bf: /* movswS Gv, Eb */ { TCGMemOp d_ot; TCGMemOp s_ot; /* d_ot is the size of destination */ d_ot = dflag; /* ot is the size of source */ ot = (b & 1) + MO_8; /* s_ot is the sign+size of source */ s_ot = b & 8 ? MO_SIGN | ot : ot; modrm = x86_ldub_code(env, s); reg = ((modrm >> 3) & 7) | rex_r; mod = (modrm >> 6) & 3; rm = (modrm & 7) | REX_B(s); if (mod == 3) { if (s_ot == MO_SB && byte_reg_is_xH(rm)) { tcg_gen_sextract_tl(cpu_T0, cpu_regs[rm - 4], 8, 8); } else { gen_op_mov_v_reg(ot, cpu_T0, rm); switch (s_ot) { case MO_UB: tcg_gen_ext8u_tl(cpu_T0, cpu_T0); break; case MO_SB: tcg_gen_ext8s_tl(cpu_T0, cpu_T0); break; case MO_UW: tcg_gen_ext16u_tl(cpu_T0, cpu_T0); break; default: case MO_SW: tcg_gen_ext16s_tl(cpu_T0, cpu_T0); break; } } gen_op_mov_reg_v(d_ot, reg, cpu_T0); } else { gen_lea_modrm(env, s, modrm); gen_op_ld_v(s, s_ot, cpu_T0, cpu_A0); gen_op_mov_reg_v(d_ot, reg, cpu_T0); } } break; case 0x8d: /* lea */ modrm = x86_ldub_code(env, s); mod = (modrm >> 6) & 3; if (mod == 3) goto illegal_op; reg = ((modrm >> 3) & 7) | rex_r; { AddressParts a = gen_lea_modrm_0(env, s, modrm); TCGv ea = gen_lea_modrm_1(a); gen_lea_v_seg(s, s->aflag, ea, -1, -1); gen_op_mov_reg_v(dflag, reg, cpu_A0); } break; case 0xa0: /* mov EAX, Ov */ case 0xa1: case 0xa2: /* mov Ov, EAX */ case 0xa3: { target_ulong offset_addr; ot = mo_b_d(b, dflag); switch (s->aflag) { #ifdef TARGET_X86_64 case MO_64: offset_addr = x86_ldq_code(env, s); break; #endif default: offset_addr = insn_get(env, s, s->aflag); break; } tcg_gen_movi_tl(cpu_A0, offset_addr); gen_add_A0_ds_seg(s); if ((b & 2) == 0) { gen_op_ld_v(s, ot, cpu_T0, cpu_A0); gen_op_mov_reg_v(ot, R_EAX, cpu_T0); } else { gen_op_mov_v_reg(ot, cpu_T0, R_EAX); gen_op_st_v(s, ot, cpu_T0, cpu_A0); } } break; case 0xd7: /* xlat */ tcg_gen_mov_tl(cpu_A0, cpu_regs[R_EBX]); tcg_gen_ext8u_tl(cpu_T0, cpu_regs[R_EAX]); tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T0); gen_extu(s->aflag, cpu_A0); gen_add_A0_ds_seg(s); gen_op_ld_v(s, MO_8, cpu_T0, cpu_A0); gen_op_mov_reg_v(MO_8, R_EAX, cpu_T0); break; case 0xb0 ... 0xb7: /* mov R, Ib */ val = insn_get(env, s, MO_8); tcg_gen_movi_tl(cpu_T0, val); gen_op_mov_reg_v(MO_8, (b & 7) | REX_B(s), cpu_T0); break; case 0xb8 ... 0xbf: /* mov R, Iv */ #ifdef TARGET_X86_64 if (dflag == MO_64) { uint64_t tmp; /* 64 bit case */ tmp = x86_ldq_code(env, s); reg = (b & 7) | REX_B(s); tcg_gen_movi_tl(cpu_T0, tmp); gen_op_mov_reg_v(MO_64, reg, cpu_T0); } else #endif { ot = dflag; val = insn_get(env, s, ot); reg = (b & 7) | REX_B(s); tcg_gen_movi_tl(cpu_T0, val); gen_op_mov_reg_v(ot, reg, cpu_T0); } break; case 0x91 ... 0x97: /* xchg R, EAX */ do_xchg_reg_eax: ot = dflag; reg = (b & 7) | REX_B(s); rm = R_EAX; goto do_xchg_reg; case 0x86: case 0x87: /* xchg Ev, Gv */ ot = mo_b_d(b, dflag); modrm = x86_ldub_code(env, s); reg = ((modrm >> 3) & 7) | rex_r; mod = (modrm >> 6) & 3; if (mod == 3) { rm = (modrm & 7) | REX_B(s); do_xchg_reg: gen_op_mov_v_reg(ot, cpu_T0, reg); gen_op_mov_v_reg(ot, cpu_T1, rm); gen_op_mov_reg_v(ot, rm, cpu_T0); gen_op_mov_reg_v(ot, reg, cpu_T1); } else { gen_lea_modrm(env, s, modrm); gen_op_mov_v_reg(ot, cpu_T0, reg); /* for xchg, lock is implicit */ tcg_gen_atomic_xchg_tl(cpu_T1, cpu_A0, cpu_T0, s->mem_index, ot | MO_LE); gen_op_mov_reg_v(ot, reg, cpu_T1); } break; case 0xc4: /* les Gv */ /* In CODE64 this is VEX3; see above. */ op = R_ES; goto do_lxx; case 0xc5: /* lds Gv */ /* In CODE64 this is VEX2; see above. */ op = R_DS; goto do_lxx; case 0x1b2: /* lss Gv */ op = R_SS; goto do_lxx; case 0x1b4: /* lfs Gv */ op = R_FS; goto do_lxx; case 0x1b5: /* lgs Gv */ op = R_GS; do_lxx: ot = dflag != MO_16 ? MO_32 : MO_16; modrm = x86_ldub_code(env, s); reg = ((modrm >> 3) & 7) | rex_r; mod = (modrm >> 6) & 3; if (mod == 3) goto illegal_op; gen_lea_modrm(env, s, modrm); gen_op_ld_v(s, ot, cpu_T1, cpu_A0); gen_add_A0_im(s, 1 << ot); /* load the segment first to handle exceptions properly */ gen_op_ld_v(s, MO_16, cpu_T0, cpu_A0); gen_movl_seg_T0(s, op); /* then put the data */ gen_op_mov_reg_v(ot, reg, cpu_T1); if (s->base.is_jmp) { gen_jmp_im(s->pc - s->cs_base); gen_eob(s); } break; /************************/ /* shifts */ case 0xc0: case 0xc1: /* shift Ev,Ib */ shift = 2; grp2: { ot = mo_b_d(b, dflag); modrm = x86_ldub_code(env, s); mod = (modrm >> 6) & 3; op = (modrm >> 3) & 7; if (mod != 3) { if (shift == 2) { s->rip_offset = 1; } gen_lea_modrm(env, s, modrm); opreg = OR_TMP0; } else { opreg = (modrm & 7) | REX_B(s); } /* simpler op */ if (shift == 0) { gen_shift(s, op, ot, opreg, OR_ECX); } else { if (shift == 2) { shift = x86_ldub_code(env, s); } gen_shifti(s, op, ot, opreg, shift); } } break; case 0xd0: case 0xd1: /* shift Ev,1 */ shift = 1; goto grp2; case 0xd2: case 0xd3: /* shift Ev,cl */ shift = 0; goto grp2; case 0x1a4: /* shld imm */ op = 0; shift = 1; goto do_shiftd; case 0x1a5: /* shld cl */ op = 0; shift = 0; goto do_shiftd; case 0x1ac: /* shrd imm */ op = 1; shift = 1; goto do_shiftd; case 0x1ad: /* shrd cl */ op = 1; shift = 0; do_shiftd: ot = dflag; modrm = x86_ldub_code(env, s); mod = (modrm >> 6) & 3; rm = (modrm & 7) | REX_B(s); reg = ((modrm >> 3) & 7) | rex_r; if (mod != 3) { gen_lea_modrm(env, s, modrm); opreg = OR_TMP0; } else { opreg = rm; } gen_op_mov_v_reg(ot, cpu_T1, reg); if (shift) { TCGv imm = tcg_const_tl(x86_ldub_code(env, s)); gen_shiftd_rm_T1(s, ot, opreg, op, imm); tcg_temp_free(imm); } else { gen_shiftd_rm_T1(s, ot, opreg, op, cpu_regs[R_ECX]); } break; /************************/ /* floats */ case 0xd8 ... 0xdf: if (s->flags & (HF_EM_MASK | HF_TS_MASK)) { /* if CR0.EM or CR0.TS are set, generate an FPU exception */ /* XXX: what to do if illegal op ? */ gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); break; } modrm = x86_ldub_code(env, s); mod = (modrm >> 6) & 3; rm = modrm & 7; op = ((b & 7) << 3) | ((modrm >> 3) & 7); if (mod != 3) { /* memory op */ gen_lea_modrm(env, s, modrm); switch(op) { case 0x00 ... 0x07: /* fxxxs */ case 0x10 ... 0x17: /* fixxxl */ case 0x20 ... 0x27: /* fxxxl */ case 0x30 ... 0x37: /* fixxx */ { int op1; op1 = op & 7; switch(op >> 4) { case 0: tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0, s->mem_index, MO_LEUL); gen_helper_flds_FT0(cpu_env, cpu_tmp2_i32); break; case 1: tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0, s->mem_index, MO_LEUL); gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32); break; case 2: tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ); gen_helper_fldl_FT0(cpu_env, cpu_tmp1_i64); break; case 3: default: tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0, s->mem_index, MO_LESW); gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32); break; } gen_helper_fp_arith_ST0_FT0(op1); if (op1 == 3) { /* fcomp needs pop */ gen_helper_fpop(cpu_env); } } break; case 0x08: /* flds */ case 0x0a: /* fsts */ case 0x0b: /* fstps */ case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */ case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */ case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */ switch(op & 7) { case 0: switch(op >> 4) { case 0: tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0, s->mem_index, MO_LEUL); gen_helper_flds_ST0(cpu_env, cpu_tmp2_i32); break; case 1: tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0, s->mem_index, MO_LEUL); gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32); break; case 2: tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ); gen_helper_fldl_ST0(cpu_env, cpu_tmp1_i64); break; case 3: default: tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0, s->mem_index, MO_LESW); gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32); break; } break; case 1: /* XXX: the corresponding CPUID bit must be tested ! */ switch(op >> 4) { case 1: gen_helper_fisttl_ST0(cpu_tmp2_i32, cpu_env); tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0, s->mem_index, MO_LEUL); break; case 2: gen_helper_fisttll_ST0(cpu_tmp1_i64, cpu_env); tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ); break; case 3: default: gen_helper_fistt_ST0(cpu_tmp2_i32, cpu_env); tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0, s->mem_index, MO_LEUW); break; } gen_helper_fpop(cpu_env); break; default: switch(op >> 4) { case 0: gen_helper_fsts_ST0(cpu_tmp2_i32, cpu_env); tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0, s->mem_index, MO_LEUL); break; case 1: gen_helper_fistl_ST0(cpu_tmp2_i32, cpu_env); tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0, s->mem_index, MO_LEUL); break; case 2: gen_helper_fstl_ST0(cpu_tmp1_i64, cpu_env); tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ); break; case 3: default: gen_helper_fist_ST0(cpu_tmp2_i32, cpu_env); tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0, s->mem_index, MO_LEUW); break; } if ((op & 7) == 3) gen_helper_fpop(cpu_env); break; } break; case 0x0c: /* fldenv mem */ gen_helper_fldenv(cpu_env, cpu_A0, tcg_const_i32(dflag - 1)); break; case 0x0d: /* fldcw mem */ tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0, s->mem_index, MO_LEUW); gen_helper_fldcw(cpu_env, cpu_tmp2_i32); break; case 0x0e: /* fnstenv mem */ gen_helper_fstenv(cpu_env, cpu_A0, tcg_const_i32(dflag - 1)); break; case 0x0f: /* fnstcw mem */ gen_helper_fnstcw(cpu_tmp2_i32, cpu_env); tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0, s->mem_index, MO_LEUW); break; case 0x1d: /* fldt mem */ gen_helper_fldt_ST0(cpu_env, cpu_A0); break; case 0x1f: /* fstpt mem */ gen_helper_fstt_ST0(cpu_env, cpu_A0); gen_helper_fpop(cpu_env); break; case 0x2c: /* frstor mem */ gen_helper_frstor(cpu_env, cpu_A0, tcg_const_i32(dflag - 1)); break; case 0x2e: /* fnsave mem */ gen_helper_fsave(cpu_env, cpu_A0, tcg_const_i32(dflag - 1)); break; case 0x2f: /* fnstsw mem */ gen_helper_fnstsw(cpu_tmp2_i32, cpu_env); tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0, s->mem_index, MO_LEUW); break; case 0x3c: /* fbld */ gen_helper_fbld_ST0(cpu_env, cpu_A0); break; case 0x3e: /* fbstp */ gen_helper_fbst_ST0(cpu_env, cpu_A0); gen_helper_fpop(cpu_env); break; case 0x3d: /* fildll */ tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ); gen_helper_fildll_ST0(cpu_env, cpu_tmp1_i64); break; case 0x3f: /* fistpll */ gen_helper_fistll_ST0(cpu_tmp1_i64, cpu_env); tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ); gen_helper_fpop(cpu_env); break; default: goto unknown_op; } } else { /* register float ops */ opreg = rm; switch(op) { case 0x08: /* fld sti */ gen_helper_fpush(cpu_env); gen_helper_fmov_ST0_STN(cpu_env, tcg_const_i32((opreg + 1) & 7)); break; case 0x09: /* fxchg sti */ case 0x29: /* fxchg4 sti, undocumented op */ case 0x39: /* fxchg7 sti, undocumented op */ gen_helper_fxchg_ST0_STN(cpu_env, tcg_const_i32(opreg)); break; case 0x0a: /* grp d9/2 */ switch(rm) { case 0: /* fnop */ /* check exceptions (FreeBSD FPU probe) */ gen_helper_fwait(cpu_env); break; default: goto unknown_op; } break; case 0x0c: /* grp d9/4 */ switch(rm) { case 0: /* fchs */ gen_helper_fchs_ST0(cpu_env); break; case 1: /* fabs */ gen_helper_fabs_ST0(cpu_env); break; case 4: /* ftst */ gen_helper_fldz_FT0(cpu_env); gen_helper_fcom_ST0_FT0(cpu_env); break; case 5: /* fxam */ gen_helper_fxam_ST0(cpu_env); break; default: goto unknown_op; } break; case 0x0d: /* grp d9/5 */ { switch(rm) { case 0: gen_helper_fpush(cpu_env); gen_helper_fld1_ST0(cpu_env); break; case 1: gen_helper_fpush(cpu_env); gen_helper_fldl2t_ST0(cpu_env); break; case 2: gen_helper_fpush(cpu_env); gen_helper_fldl2e_ST0(cpu_env); break; case 3: gen_helper_fpush(cpu_env); gen_helper_fldpi_ST0(cpu_env); break; case 4: gen_helper_fpush(cpu_env); gen_helper_fldlg2_ST0(cpu_env); break; case 5: gen_helper_fpush(cpu_env); gen_helper_fldln2_ST0(cpu_env); break; case 6: gen_helper_fpush(cpu_env); gen_helper_fldz_ST0(cpu_env); break; default: goto unknown_op; } } break; case 0x0e: /* grp d9/6 */ switch(rm) { case 0: /* f2xm1 */ gen_helper_f2xm1(cpu_env); break; case 1: /* fyl2x */ gen_helper_fyl2x(cpu_env); break; case 2: /* fptan */ gen_helper_fptan(cpu_env); break; case 3: /* fpatan */ gen_helper_fpatan(cpu_env); break; case 4: /* fxtract */ gen_helper_fxtract(cpu_env); break; case 5: /* fprem1 */ gen_helper_fprem1(cpu_env); break; case 6: /* fdecstp */ gen_helper_fdecstp(cpu_env); break; default: case 7: /* fincstp */ gen_helper_fincstp(cpu_env); break; } break; case 0x0f: /* grp d9/7 */ switch(rm) { case 0: /* fprem */ gen_helper_fprem(cpu_env); break; case 1: /* fyl2xp1 */ gen_helper_fyl2xp1(cpu_env); break; case 2: /* fsqrt */ gen_helper_fsqrt(cpu_env); break; case 3: /* fsincos */ gen_helper_fsincos(cpu_env); break; case 5: /* fscale */ gen_helper_fscale(cpu_env); break; case 4: /* frndint */ gen_helper_frndint(cpu_env); break; case 6: /* fsin */ gen_helper_fsin(cpu_env); break; default: case 7: /* fcos */ gen_helper_fcos(cpu_env); break; } break; case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */ case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */ case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */ { int op1; op1 = op & 7; if (op >= 0x20) { gen_helper_fp_arith_STN_ST0(op1, opreg); if (op >= 0x30) gen_helper_fpop(cpu_env); } else { gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg)); gen_helper_fp_arith_ST0_FT0(op1); } } break; case 0x02: /* fcom */ case 0x22: /* fcom2, undocumented op */ gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg)); gen_helper_fcom_ST0_FT0(cpu_env); break; case 0x03: /* fcomp */ case 0x23: /* fcomp3, undocumented op */ case 0x32: /* fcomp5, undocumented op */ gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg)); gen_helper_fcom_ST0_FT0(cpu_env); gen_helper_fpop(cpu_env); break; case 0x15: /* da/5 */ switch(rm) { case 1: /* fucompp */ gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1)); gen_helper_fucom_ST0_FT0(cpu_env); gen_helper_fpop(cpu_env); gen_helper_fpop(cpu_env); break; default: goto unknown_op; } break; case 0x1c: switch(rm) { case 0: /* feni (287 only, just do nop here) */ break; case 1: /* fdisi (287 only, just do nop here) */ break; case 2: /* fclex */ gen_helper_fclex(cpu_env); break; case 3: /* fninit */ gen_helper_fninit(cpu_env); break; case 4: /* fsetpm (287 only, just do nop here) */ break; default: goto unknown_op; } break; case 0x1d: /* fucomi */ if (!(s->cpuid_features & CPUID_CMOV)) { goto illegal_op; } gen_update_cc_op(s); gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg)); gen_helper_fucomi_ST0_FT0(cpu_env); set_cc_op(s, CC_OP_EFLAGS); break; case 0x1e: /* fcomi */ if (!(s->cpuid_features & CPUID_CMOV)) { goto illegal_op; } gen_update_cc_op(s); gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg)); gen_helper_fcomi_ST0_FT0(cpu_env); set_cc_op(s, CC_OP_EFLAGS); break; case 0x28: /* ffree sti */ gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg)); break; case 0x2a: /* fst sti */ gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg)); break; case 0x2b: /* fstp sti */ case 0x0b: /* fstp1 sti, undocumented op */ case 0x3a: /* fstp8 sti, undocumented op */ case 0x3b: /* fstp9 sti, undocumented op */ gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg)); gen_helper_fpop(cpu_env); break; case 0x2c: /* fucom st(i) */ gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg)); gen_helper_fucom_ST0_FT0(cpu_env); break; case 0x2d: /* fucomp st(i) */ gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg)); gen_helper_fucom_ST0_FT0(cpu_env); gen_helper_fpop(cpu_env); break; case 0x33: /* de/3 */ switch(rm) { case 1: /* fcompp */ gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1)); gen_helper_fcom_ST0_FT0(cpu_env); gen_helper_fpop(cpu_env); gen_helper_fpop(cpu_env); break; default: goto unknown_op; } break; case 0x38: /* ffreep sti, undocumented op */ gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg)); gen_helper_fpop(cpu_env); break; case 0x3c: /* df/4 */ switch(rm) { case 0: gen_helper_fnstsw(cpu_tmp2_i32, cpu_env); tcg_gen_extu_i32_tl(cpu_T0, cpu_tmp2_i32); gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0); break; default: goto unknown_op; } break; case 0x3d: /* fucomip */ if (!(s->cpuid_features & CPUID_CMOV)) { goto illegal_op; } gen_update_cc_op(s); gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg)); gen_helper_fucomi_ST0_FT0(cpu_env); gen_helper_fpop(cpu_env); set_cc_op(s, CC_OP_EFLAGS); break; case 0x3e: /* fcomip */ if (!(s->cpuid_features & CPUID_CMOV)) { goto illegal_op; } gen_update_cc_op(s); gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg)); gen_helper_fcomi_ST0_FT0(cpu_env); gen_helper_fpop(cpu_env); set_cc_op(s, CC_OP_EFLAGS); break; case 0x10 ... 0x13: /* fcmovxx */ case 0x18 ... 0x1b: { int op1; TCGLabel *l1; static const uint8_t fcmov_cc[8] = { (JCC_B << 1), (JCC_Z << 1), (JCC_BE << 1), (JCC_P << 1), }; if (!(s->cpuid_features & CPUID_CMOV)) { goto illegal_op; } op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1); l1 = gen_new_label(); gen_jcc1_noeob(s, op1, l1); gen_helper_fmov_ST0_STN(cpu_env, tcg_const_i32(opreg)); gen_set_label(l1); } break; default: goto unknown_op; } } break; /************************/ /* string ops */ case 0xa4: /* movsS */ case 0xa5: ot = mo_b_d(b, dflag); if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) { gen_repz_movs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base); } else { gen_movs(s, ot); } break; case 0xaa: /* stosS */ case 0xab: ot = mo_b_d(b, dflag); if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) { gen_repz_stos(s, ot, pc_start - s->cs_base, s->pc - s->cs_base); } else { gen_stos(s, ot); } break; case 0xac: /* lodsS */ case 0xad: ot = mo_b_d(b, dflag); if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) { gen_repz_lods(s, ot, pc_start - s->cs_base, s->pc - s->cs_base); } else { gen_lods(s, ot); } break; case 0xae: /* scasS */ case 0xaf: ot = mo_b_d(b, dflag); if (prefixes & PREFIX_REPNZ) { gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1); } else if (prefixes & PREFIX_REPZ) { gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0); } else { gen_scas(s, ot); } break; case 0xa6: /* cmpsS */ case 0xa7: ot = mo_b_d(b, dflag); if (prefixes & PREFIX_REPNZ) { gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1); } else if (prefixes & PREFIX_REPZ) { gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0); } else { gen_cmps(s, ot); } break; case 0x6c: /* insS */ case 0x6d: ot = mo_b_d32(b, dflag); tcg_gen_ext16u_tl(cpu_T0, cpu_regs[R_EDX]); gen_check_io(s, ot, pc_start - s->cs_base, SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes) | 4); if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) { gen_repz_ins(s, ot, pc_start - s->cs_base, s->pc - s->cs_base); } else { gen_ins(s, ot); if (s->base.tb->cflags & CF_USE_ICOUNT) { gen_jmp(s, s->pc - s->cs_base); } } break; case 0x6e: /* outsS */ case 0x6f: ot = mo_b_d32(b, dflag); tcg_gen_ext16u_tl(cpu_T0, cpu_regs[R_EDX]); gen_check_io(s, ot, pc_start - s->cs_base, svm_is_rep(prefixes) | 4); if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) { gen_repz_outs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base); } else { gen_outs(s, ot); if (s->base.tb->cflags & CF_USE_ICOUNT) { gen_jmp(s, s->pc - s->cs_base); } } break; /************************/ /* port I/O */ case 0xe4: case 0xe5: ot = mo_b_d32(b, dflag); val = x86_ldub_code(env, s); tcg_gen_movi_tl(cpu_T0, val); gen_check_io(s, ot, pc_start - s->cs_base, SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes)); if (s->base.tb->cflags & CF_USE_ICOUNT) { gen_io_start(); } tcg_gen_movi_i32(cpu_tmp2_i32, val); gen_helper_in_func(ot, cpu_T1, cpu_tmp2_i32); gen_op_mov_reg_v(ot, R_EAX, cpu_T1); gen_bpt_io(s, cpu_tmp2_i32, ot); if (s->base.tb->cflags & CF_USE_ICOUNT) { gen_io_end(); gen_jmp(s, s->pc - s->cs_base); } break; case 0xe6: case 0xe7: ot = mo_b_d32(b, dflag); val = x86_ldub_code(env, s); tcg_gen_movi_tl(cpu_T0, val); gen_check_io(s, ot, pc_start - s->cs_base, svm_is_rep(prefixes)); gen_op_mov_v_reg(ot, cpu_T1, R_EAX); if (s->base.tb->cflags & CF_USE_ICOUNT) { gen_io_start(); } tcg_gen_movi_i32(cpu_tmp2_i32, val); tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1); gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32); gen_bpt_io(s, cpu_tmp2_i32, ot); if (s->base.tb->cflags & CF_USE_ICOUNT) { gen_io_end(); gen_jmp(s, s->pc - s->cs_base); } break; case 0xec: case 0xed: ot = mo_b_d32(b, dflag); tcg_gen_ext16u_tl(cpu_T0, cpu_regs[R_EDX]); gen_check_io(s, ot, pc_start - s->cs_base, SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes)); if (s->base.tb->cflags & CF_USE_ICOUNT) { gen_io_start(); } tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0); gen_helper_in_func(ot, cpu_T1, cpu_tmp2_i32); gen_op_mov_reg_v(ot, R_EAX, cpu_T1); gen_bpt_io(s, cpu_tmp2_i32, ot); if (s->base.tb->cflags & CF_USE_ICOUNT) { gen_io_end(); gen_jmp(s, s->pc - s->cs_base); } break; case 0xee: case 0xef: ot = mo_b_d32(b, dflag); tcg_gen_ext16u_tl(cpu_T0, cpu_regs[R_EDX]); gen_check_io(s, ot, pc_start - s->cs_base, svm_is_rep(prefixes)); gen_op_mov_v_reg(ot, cpu_T1, R_EAX); if (s->base.tb->cflags & CF_USE_ICOUNT) { gen_io_start(); } tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0); tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1); gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32); gen_bpt_io(s, cpu_tmp2_i32, ot); if (s->base.tb->cflags & CF_USE_ICOUNT) { gen_io_end(); gen_jmp(s, s->pc - s->cs_base); } break; /************************/ /* control */ case 0xc2: /* ret im */ val = x86_ldsw_code(env, s); ot = gen_pop_T0(s); gen_stack_update(s, val + (1 << ot)); /* Note that gen_pop_T0 uses a zero-extending load. */ gen_op_jmp_v(cpu_T0); gen_bnd_jmp(s); gen_jr(s, cpu_T0); break; case 0xc3: /* ret */ ot = gen_pop_T0(s); gen_pop_update(s, ot); /* Note that gen_pop_T0 uses a zero-extending load. */ gen_op_jmp_v(cpu_T0); gen_bnd_jmp(s); gen_jr(s, cpu_T0); break; case 0xca: /* lret im */ val = x86_ldsw_code(env, s); do_lret: if (s->pe && !s->vm86) { gen_update_cc_op(s); gen_jmp_im(pc_start - s->cs_base); gen_helper_lret_protected(cpu_env, tcg_const_i32(dflag - 1), tcg_const_i32(val)); } else { gen_stack_A0(s); /* pop offset */ gen_op_ld_v(s, dflag, cpu_T0, cpu_A0); /* NOTE: keeping EIP updated is not a problem in case of exception */ gen_op_jmp_v(cpu_T0); /* pop selector */ gen_add_A0_im(s, 1 << dflag); gen_op_ld_v(s, dflag, cpu_T0, cpu_A0); gen_op_movl_seg_T0_vm(R_CS); /* add stack offset */ gen_stack_update(s, val + (2 << dflag)); } gen_eob(s); break; case 0xcb: /* lret */ val = 0; goto do_lret; case 0xcf: /* iret */ gen_svm_check_intercept(s, pc_start, SVM_EXIT_IRET); if (!s->pe) { /* real mode */ gen_helper_iret_real(cpu_env, tcg_const_i32(dflag - 1)); set_cc_op(s, CC_OP_EFLAGS); } else if (s->vm86) { if (s->iopl != 3) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { gen_helper_iret_real(cpu_env, tcg_const_i32(dflag - 1)); set_cc_op(s, CC_OP_EFLAGS); } } else { gen_helper_iret_protected(cpu_env, tcg_const_i32(dflag - 1), tcg_const_i32(s->pc - s->cs_base)); set_cc_op(s, CC_OP_EFLAGS); } gen_eob(s); break; case 0xe8: /* call im */ { if (dflag != MO_16) { tval = (int32_t)insn_get(env, s, MO_32); } else { tval = (int16_t)insn_get(env, s, MO_16); } next_eip = s->pc - s->cs_base; tval += next_eip; if (dflag == MO_16) { tval &= 0xffff; } else if (!CODE64(s)) { tval &= 0xffffffff; } tcg_gen_movi_tl(cpu_T0, next_eip); gen_push_v(s, cpu_T0); gen_bnd_jmp(s); gen_jmp(s, tval); } break; case 0x9a: /* lcall im */ { unsigned int selector, offset; if (CODE64(s)) goto illegal_op; ot = dflag; offset = insn_get(env, s, ot); selector = insn_get(env, s, MO_16); tcg_gen_movi_tl(cpu_T0, selector); tcg_gen_movi_tl(cpu_T1, offset); } goto do_lcall; case 0xe9: /* jmp im */ if (dflag != MO_16) { tval = (int32_t)insn_get(env, s, MO_32); } else { tval = (int16_t)insn_get(env, s, MO_16); } tval += s->pc - s->cs_base; if (dflag == MO_16) { tval &= 0xffff; } else if (!CODE64(s)) { tval &= 0xffffffff; } gen_bnd_jmp(s); gen_jmp(s, tval); break; case 0xea: /* ljmp im */ { unsigned int selector, offset; if (CODE64(s)) goto illegal_op; ot = dflag; offset = insn_get(env, s, ot); selector = insn_get(env, s, MO_16); tcg_gen_movi_tl(cpu_T0, selector); tcg_gen_movi_tl(cpu_T1, offset); } goto do_ljmp; case 0xeb: /* jmp Jb */ tval = (int8_t)insn_get(env, s, MO_8); tval += s->pc - s->cs_base; if (dflag == MO_16) { tval &= 0xffff; } gen_jmp(s, tval); break; case 0x70 ... 0x7f: /* jcc Jb */ tval = (int8_t)insn_get(env, s, MO_8); goto do_jcc; case 0x180 ... 0x18f: /* jcc Jv */ if (dflag != MO_16) { tval = (int32_t)insn_get(env, s, MO_32); } else { tval = (int16_t)insn_get(env, s, MO_16); } do_jcc: next_eip = s->pc - s->cs_base; tval += next_eip; if (dflag == MO_16) { tval &= 0xffff; } gen_bnd_jmp(s); gen_jcc(s, b, tval, next_eip); break; case 0x190 ... 0x19f: /* setcc Gv */ modrm = x86_ldub_code(env, s); gen_setcc1(s, b, cpu_T0); gen_ldst_modrm(env, s, modrm, MO_8, OR_TMP0, 1); break; case 0x140 ... 0x14f: /* cmov Gv, Ev */ if (!(s->cpuid_features & CPUID_CMOV)) { goto illegal_op; } ot = dflag; modrm = x86_ldub_code(env, s); reg = ((modrm >> 3) & 7) | rex_r; gen_cmovcc1(env, s, ot, b, modrm, reg); break; /************************/ /* flags */ case 0x9c: /* pushf */ gen_svm_check_intercept(s, pc_start, SVM_EXIT_PUSHF); if (s->vm86 && s->iopl != 3) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { gen_update_cc_op(s); gen_helper_read_eflags(cpu_T0, cpu_env); gen_push_v(s, cpu_T0); } break; case 0x9d: /* popf */ gen_svm_check_intercept(s, pc_start, SVM_EXIT_POPF); if (s->vm86 && s->iopl != 3) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { ot = gen_pop_T0(s); if (s->cpl == 0) { if (dflag != MO_16) { gen_helper_write_eflags(cpu_env, cpu_T0, tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK | IOPL_MASK))); } else { gen_helper_write_eflags(cpu_env, cpu_T0, tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK | IOPL_MASK) & 0xffff)); } } else { if (s->cpl <= s->iopl) { if (dflag != MO_16) { gen_helper_write_eflags(cpu_env, cpu_T0, tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK))); } else { gen_helper_write_eflags(cpu_env, cpu_T0, tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK) & 0xffff)); } } else { if (dflag != MO_16) { gen_helper_write_eflags(cpu_env, cpu_T0, tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK))); } else { gen_helper_write_eflags(cpu_env, cpu_T0, tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK) & 0xffff)); } } } gen_pop_update(s, ot); set_cc_op(s, CC_OP_EFLAGS); /* abort translation because TF/AC flag may change */ gen_jmp_im(s->pc - s->cs_base); gen_eob(s); } break; case 0x9e: /* sahf */ if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM)) goto illegal_op; gen_op_mov_v_reg(MO_8, cpu_T0, R_AH); gen_compute_eflags(s); tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, CC_O); tcg_gen_andi_tl(cpu_T0, cpu_T0, CC_S | CC_Z | CC_A | CC_P | CC_C); tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_T0); break; case 0x9f: /* lahf */ if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM)) goto illegal_op; gen_compute_eflags(s); /* Note: gen_compute_eflags() only gives the condition codes */ tcg_gen_ori_tl(cpu_T0, cpu_cc_src, 0x02); gen_op_mov_reg_v(MO_8, R_AH, cpu_T0); break; case 0xf5: /* cmc */ gen_compute_eflags(s); tcg_gen_xori_tl(cpu_cc_src, cpu_cc_src, CC_C); break; case 0xf8: /* clc */ gen_compute_eflags(s); tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_C); break; case 0xf9: /* stc */ gen_compute_eflags(s); tcg_gen_ori_tl(cpu_cc_src, cpu_cc_src, CC_C); break; case 0xfc: /* cld */ tcg_gen_movi_i32(cpu_tmp2_i32, 1); tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df)); break; case 0xfd: /* std */ tcg_gen_movi_i32(cpu_tmp2_i32, -1); tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df)); break; /************************/ /* bit operations */ case 0x1ba: /* bt/bts/btr/btc Gv, im */ ot = dflag; modrm = x86_ldub_code(env, s); op = (modrm >> 3) & 7; mod = (modrm >> 6) & 3; rm = (modrm & 7) | REX_B(s); if (mod != 3) { s->rip_offset = 1; gen_lea_modrm(env, s, modrm); if (!(s->prefix & PREFIX_LOCK)) { gen_op_ld_v(s, ot, cpu_T0, cpu_A0); } } else { gen_op_mov_v_reg(ot, cpu_T0, rm); } /* load shift */ val = x86_ldub_code(env, s); tcg_gen_movi_tl(cpu_T1, val); if (op < 4) goto unknown_op; op -= 4; goto bt_op; case 0x1a3: /* bt Gv, Ev */ op = 0; goto do_btx; case 0x1ab: /* bts */ op = 1; goto do_btx; case 0x1b3: /* btr */ op = 2; goto do_btx; case 0x1bb: /* btc */ op = 3; do_btx: ot = dflag; modrm = x86_ldub_code(env, s); reg = ((modrm >> 3) & 7) | rex_r; mod = (modrm >> 6) & 3; rm = (modrm & 7) | REX_B(s); gen_op_mov_v_reg(MO_32, cpu_T1, reg); if (mod != 3) { AddressParts a = gen_lea_modrm_0(env, s, modrm); /* specific case: we need to add a displacement */ gen_exts(ot, cpu_T1); tcg_gen_sari_tl(cpu_tmp0, cpu_T1, 3 + ot); tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, ot); tcg_gen_add_tl(cpu_A0, gen_lea_modrm_1(a), cpu_tmp0); gen_lea_v_seg(s, s->aflag, cpu_A0, a.def_seg, s->override); if (!(s->prefix & PREFIX_LOCK)) { gen_op_ld_v(s, ot, cpu_T0, cpu_A0); } } else { gen_op_mov_v_reg(ot, cpu_T0, rm); } bt_op: tcg_gen_andi_tl(cpu_T1, cpu_T1, (1 << (3 + ot)) - 1); tcg_gen_movi_tl(cpu_tmp0, 1); tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T1); if (s->prefix & PREFIX_LOCK) { switch (op) { case 0: /* bt */ /* Needs no atomic ops; we surpressed the normal memory load for LOCK above so do it now. */ gen_op_ld_v(s, ot, cpu_T0, cpu_A0); break; case 1: /* bts */ tcg_gen_atomic_fetch_or_tl(cpu_T0, cpu_A0, cpu_tmp0, s->mem_index, ot | MO_LE); break; case 2: /* btr */ tcg_gen_not_tl(cpu_tmp0, cpu_tmp0); tcg_gen_atomic_fetch_and_tl(cpu_T0, cpu_A0, cpu_tmp0, s->mem_index, ot | MO_LE); break; default: case 3: /* btc */ tcg_gen_atomic_fetch_xor_tl(cpu_T0, cpu_A0, cpu_tmp0, s->mem_index, ot | MO_LE); break; } tcg_gen_shr_tl(cpu_tmp4, cpu_T0, cpu_T1); } else { tcg_gen_shr_tl(cpu_tmp4, cpu_T0, cpu_T1); switch (op) { case 0: /* bt */ /* Data already loaded; nothing to do. */ break; case 1: /* bts */ tcg_gen_or_tl(cpu_T0, cpu_T0, cpu_tmp0); break; case 2: /* btr */ tcg_gen_andc_tl(cpu_T0, cpu_T0, cpu_tmp0); break; default: case 3: /* btc */ tcg_gen_xor_tl(cpu_T0, cpu_T0, cpu_tmp0); break; } if (op != 0) { if (mod != 3) { gen_op_st_v(s, ot, cpu_T0, cpu_A0); } else { gen_op_mov_reg_v(ot, rm, cpu_T0); } } } /* Delay all CC updates until after the store above. Note that C is the result of the test, Z is unchanged, and the others are all undefined. */ switch (s->cc_op) { case CC_OP_MULB ... CC_OP_MULQ: case CC_OP_ADDB ... CC_OP_ADDQ: case CC_OP_ADCB ... CC_OP_ADCQ: case CC_OP_SUBB ... CC_OP_SUBQ: case CC_OP_SBBB ... CC_OP_SBBQ: case CC_OP_LOGICB ... CC_OP_LOGICQ: case CC_OP_INCB ... CC_OP_INCQ: case CC_OP_DECB ... CC_OP_DECQ: case CC_OP_SHLB ... CC_OP_SHLQ: case CC_OP_SARB ... CC_OP_SARQ: case CC_OP_BMILGB ... CC_OP_BMILGQ: /* Z was going to be computed from the non-zero status of CC_DST. We can get that same Z value (and the new C value) by leaving CC_DST alone, setting CC_SRC, and using a CC_OP_SAR of the same width. */ tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4); set_cc_op(s, ((s->cc_op - CC_OP_MULB) & 3) + CC_OP_SARB); break; default: /* Otherwise, generate EFLAGS and replace the C bit. */ gen_compute_eflags(s); tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, cpu_tmp4, ctz32(CC_C), 1); break; } break; case 0x1bc: /* bsf / tzcnt */ case 0x1bd: /* bsr / lzcnt */ ot = dflag; modrm = x86_ldub_code(env, s); reg = ((modrm >> 3) & 7) | rex_r; gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); gen_extu(ot, cpu_T0); /* Note that lzcnt and tzcnt are in different extensions. */ if ((prefixes & PREFIX_REPZ) && (b & 1 ? s->cpuid_ext3_features & CPUID_EXT3_ABM : s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)) { int size = 8 << ot; /* For lzcnt/tzcnt, C bit is defined related to the input. */ tcg_gen_mov_tl(cpu_cc_src, cpu_T0); if (b & 1) { /* For lzcnt, reduce the target_ulong result by the number of zeros that we expect to find at the top. */ tcg_gen_clzi_tl(cpu_T0, cpu_T0, TARGET_LONG_BITS); tcg_gen_subi_tl(cpu_T0, cpu_T0, TARGET_LONG_BITS - size); } else { /* For tzcnt, a zero input must return the operand size. */ tcg_gen_ctzi_tl(cpu_T0, cpu_T0, size); } /* For lzcnt/tzcnt, Z bit is defined related to the result. */ gen_op_update1_cc(); set_cc_op(s, CC_OP_BMILGB + ot); } else { /* For bsr/bsf, only the Z bit is defined and it is related to the input and not the result. */ tcg_gen_mov_tl(cpu_cc_dst, cpu_T0); set_cc_op(s, CC_OP_LOGICB + ot); /* ??? The manual says that the output is undefined when the input is zero, but real hardware leaves it unchanged, and real programs appear to depend on that. Accomplish this by passing the output as the value to return upon zero. */ if (b & 1) { /* For bsr, return the bit index of the first 1 bit, not the count of leading zeros. */ tcg_gen_xori_tl(cpu_T1, cpu_regs[reg], TARGET_LONG_BITS - 1); tcg_gen_clz_tl(cpu_T0, cpu_T0, cpu_T1); tcg_gen_xori_tl(cpu_T0, cpu_T0, TARGET_LONG_BITS - 1); } else { tcg_gen_ctz_tl(cpu_T0, cpu_T0, cpu_regs[reg]); } } gen_op_mov_reg_v(ot, reg, cpu_T0); break; /************************/ /* bcd */ case 0x27: /* daa */ if (CODE64(s)) goto illegal_op; gen_update_cc_op(s); gen_helper_daa(cpu_env); set_cc_op(s, CC_OP_EFLAGS); break; case 0x2f: /* das */ if (CODE64(s)) goto illegal_op; gen_update_cc_op(s); gen_helper_das(cpu_env); set_cc_op(s, CC_OP_EFLAGS); break; case 0x37: /* aaa */ if (CODE64(s)) goto illegal_op; gen_update_cc_op(s); gen_helper_aaa(cpu_env); set_cc_op(s, CC_OP_EFLAGS); break; case 0x3f: /* aas */ if (CODE64(s)) goto illegal_op; gen_update_cc_op(s); gen_helper_aas(cpu_env); set_cc_op(s, CC_OP_EFLAGS); break; case 0xd4: /* aam */ if (CODE64(s)) goto illegal_op; val = x86_ldub_code(env, s); if (val == 0) { gen_exception(s, EXCP00_DIVZ, pc_start - s->cs_base); } else { gen_helper_aam(cpu_env, tcg_const_i32(val)); set_cc_op(s, CC_OP_LOGICB); } break; case 0xd5: /* aad */ if (CODE64(s)) goto illegal_op; val = x86_ldub_code(env, s); gen_helper_aad(cpu_env, tcg_const_i32(val)); set_cc_op(s, CC_OP_LOGICB); break; /************************/ /* misc */ case 0x90: /* nop */ /* XXX: correct lock test for all insn */ if (prefixes & PREFIX_LOCK) { goto illegal_op; } /* If REX_B is set, then this is xchg eax, r8d, not a nop. */ if (REX_B(s)) { goto do_xchg_reg_eax; } if (prefixes & PREFIX_REPZ) { gen_update_cc_op(s); gen_jmp_im(pc_start - s->cs_base); gen_helper_pause(cpu_env, tcg_const_i32(s->pc - pc_start)); s->base.is_jmp = DISAS_NORETURN; } break; case 0x9b: /* fwait */ if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) == (HF_MP_MASK | HF_TS_MASK)) { gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); } else { gen_helper_fwait(cpu_env); } break; case 0xcc: /* int3 */ gen_interrupt(s, EXCP03_INT3, pc_start - s->cs_base, s->pc - s->cs_base); break; case 0xcd: /* int N */ val = x86_ldub_code(env, s); if (s->vm86 && s->iopl != 3) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { gen_interrupt(s, val, pc_start - s->cs_base, s->pc - s->cs_base); } break; case 0xce: /* into */ if (CODE64(s)) goto illegal_op; gen_update_cc_op(s); gen_jmp_im(pc_start - s->cs_base); gen_helper_into(cpu_env, tcg_const_i32(s->pc - pc_start)); break; #ifdef WANT_ICEBP case 0xf1: /* icebp (undocumented, exits to external debugger) */ gen_svm_check_intercept(s, pc_start, SVM_EXIT_ICEBP); #if 1 gen_debug(s, pc_start - s->cs_base); #else /* start debug */ tb_flush(CPU(x86_env_get_cpu(env))); qemu_set_log(CPU_LOG_INT | CPU_LOG_TB_IN_ASM); #endif break; #endif case 0xfa: /* cli */ if (!s->vm86) { if (s->cpl <= s->iopl) { gen_helper_cli(cpu_env); } else { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } } else { if (s->iopl == 3) { gen_helper_cli(cpu_env); } else { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } } break; case 0xfb: /* sti */ if (s->vm86 ? s->iopl == 3 : s->cpl <= s->iopl) { gen_helper_sti(cpu_env); /* interruptions are enabled only the first insn after sti */ gen_jmp_im(s->pc - s->cs_base); gen_eob_inhibit_irq(s, true); } else { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } break; case 0x62: /* bound */ if (CODE64(s)) goto illegal_op; ot = dflag; modrm = x86_ldub_code(env, s); reg = (modrm >> 3) & 7; mod = (modrm >> 6) & 3; if (mod == 3) goto illegal_op; gen_op_mov_v_reg(ot, cpu_T0, reg); gen_lea_modrm(env, s, modrm); tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0); if (ot == MO_16) { gen_helper_boundw(cpu_env, cpu_A0, cpu_tmp2_i32); } else { gen_helper_boundl(cpu_env, cpu_A0, cpu_tmp2_i32); } break; case 0x1c8 ... 0x1cf: /* bswap reg */ reg = (b & 7) | REX_B(s); #ifdef TARGET_X86_64 if (dflag == MO_64) { gen_op_mov_v_reg(MO_64, cpu_T0, reg); tcg_gen_bswap64_i64(cpu_T0, cpu_T0); gen_op_mov_reg_v(MO_64, reg, cpu_T0); } else #endif { gen_op_mov_v_reg(MO_32, cpu_T0, reg); tcg_gen_ext32u_tl(cpu_T0, cpu_T0); tcg_gen_bswap32_tl(cpu_T0, cpu_T0); gen_op_mov_reg_v(MO_32, reg, cpu_T0); } break; case 0xd6: /* salc */ if (CODE64(s)) goto illegal_op; gen_compute_eflags_c(s, cpu_T0); tcg_gen_neg_tl(cpu_T0, cpu_T0); gen_op_mov_reg_v(MO_8, R_EAX, cpu_T0); break; case 0xe0: /* loopnz */ case 0xe1: /* loopz */ case 0xe2: /* loop */ case 0xe3: /* jecxz */ { TCGLabel *l1, *l2, *l3; tval = (int8_t)insn_get(env, s, MO_8); next_eip = s->pc - s->cs_base; tval += next_eip; if (dflag == MO_16) { tval &= 0xffff; } l1 = gen_new_label(); l2 = gen_new_label(); l3 = gen_new_label(); b &= 3; switch(b) { case 0: /* loopnz */ case 1: /* loopz */ gen_op_add_reg_im(s->aflag, R_ECX, -1); gen_op_jz_ecx(s->aflag, l3); gen_jcc1(s, (JCC_Z << 1) | (b ^ 1), l1); break; case 2: /* loop */ gen_op_add_reg_im(s->aflag, R_ECX, -1); gen_op_jnz_ecx(s->aflag, l1); break; default: case 3: /* jcxz */ gen_op_jz_ecx(s->aflag, l1); break; } gen_set_label(l3); gen_jmp_im(next_eip); tcg_gen_br(l2); gen_set_label(l1); gen_jmp_im(tval); gen_set_label(l2); gen_eob(s); } break; case 0x130: /* wrmsr */ case 0x132: /* rdmsr */ if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { gen_update_cc_op(s); gen_jmp_im(pc_start - s->cs_base); if (b & 2) { gen_helper_rdmsr(cpu_env); } else { gen_helper_wrmsr(cpu_env); } } break; case 0x131: /* rdtsc */ gen_update_cc_op(s); gen_jmp_im(pc_start - s->cs_base); if (s->base.tb->cflags & CF_USE_ICOUNT) { gen_io_start(); } gen_helper_rdtsc(cpu_env); if (s->base.tb->cflags & CF_USE_ICOUNT) { gen_io_end(); gen_jmp(s, s->pc - s->cs_base); } break; case 0x133: /* rdpmc */ gen_update_cc_op(s); gen_jmp_im(pc_start - s->cs_base); gen_helper_rdpmc(cpu_env); break; case 0x134: /* sysenter */ /* For Intel SYSENTER is valid on 64-bit */ if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1) goto illegal_op; if (!s->pe) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { gen_helper_sysenter(cpu_env); gen_eob(s); } break; case 0x135: /* sysexit */ /* For Intel SYSEXIT is valid on 64-bit */ if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1) goto illegal_op; if (!s->pe) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { gen_helper_sysexit(cpu_env, tcg_const_i32(dflag - 1)); gen_eob(s); } break; #ifdef TARGET_X86_64 case 0x105: /* syscall */ /* XXX: is it usable in real mode ? */ gen_update_cc_op(s); gen_jmp_im(pc_start - s->cs_base); gen_helper_syscall(cpu_env, tcg_const_i32(s->pc - pc_start)); /* TF handling for the syscall insn is different. The TF bit is checked after the syscall insn completes. This allows #DB to not be generated after one has entered CPL0 if TF is set in FMASK. */ gen_eob_worker(s, false, true); break; case 0x107: /* sysret */ if (!s->pe) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { gen_helper_sysret(cpu_env, tcg_const_i32(dflag - 1)); /* condition codes are modified only in long mode */ if (s->lma) { set_cc_op(s, CC_OP_EFLAGS); } /* TF handling for the sysret insn is different. The TF bit is checked after the sysret insn completes. This allows #DB to be generated \"as if\" the syscall insn in userspace has just completed. */ gen_eob_worker(s, false, true); } break; #endif case 0x1a2: /* cpuid */ gen_update_cc_op(s); gen_jmp_im(pc_start - s->cs_base); gen_helper_cpuid(cpu_env); break; case 0xf4: /* hlt */ if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { gen_update_cc_op(s); gen_jmp_im(pc_start - s->cs_base); gen_helper_hlt(cpu_env, tcg_const_i32(s->pc - pc_start)); s->base.is_jmp = DISAS_NORETURN; } break; case 0x100: modrm = x86_ldub_code(env, s); mod = (modrm >> 6) & 3; op = (modrm >> 3) & 7; switch(op) { case 0: /* sldt */ if (!s->pe || s->vm86) goto illegal_op; gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_READ); tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State, ldt.selector)); ot = mod == 3 ? dflag : MO_16; gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1); break; case 2: /* lldt */ if (!s->pe || s->vm86) goto illegal_op; if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_WRITE); gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0); gen_helper_lldt(cpu_env, cpu_tmp2_i32); } break; case 1: /* str */ if (!s->pe || s->vm86) goto illegal_op; gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_READ); tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State, tr.selector)); ot = mod == 3 ? dflag : MO_16; gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1); break; case 3: /* ltr */ if (!s->pe || s->vm86) goto illegal_op; if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_WRITE); gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0); gen_helper_ltr(cpu_env, cpu_tmp2_i32); } break; case 4: /* verr */ case 5: /* verw */ if (!s->pe || s->vm86) goto illegal_op; gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); gen_update_cc_op(s); if (op == 4) { gen_helper_verr(cpu_env, cpu_T0); } else { gen_helper_verw(cpu_env, cpu_T0); } set_cc_op(s, CC_OP_EFLAGS); break; default: goto unknown_op; } break; case 0x101: modrm = x86_ldub_code(env, s); switch (modrm) { CASE_MODRM_MEM_OP(0): /* sgdt */ gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_READ); gen_lea_modrm(env, s, modrm); tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State, gdt.limit)); gen_op_st_v(s, MO_16, cpu_T0, cpu_A0); gen_add_A0_im(s, 2); tcg_gen_ld_tl(cpu_T0, cpu_env, offsetof(CPUX86State, gdt.base)); if (dflag == MO_16) { tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff); } gen_op_st_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0); break; case 0xc8: /* monitor */ if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || s->cpl != 0) { goto illegal_op; } gen_update_cc_op(s); gen_jmp_im(pc_start - s->cs_base); tcg_gen_mov_tl(cpu_A0, cpu_regs[R_EAX]); gen_extu(s->aflag, cpu_A0); gen_add_A0_ds_seg(s); gen_helper_monitor(cpu_env, cpu_A0); break; case 0xc9: /* mwait */ if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || s->cpl != 0) { goto illegal_op; } gen_update_cc_op(s); gen_jmp_im(pc_start - s->cs_base); gen_helper_mwait(cpu_env, tcg_const_i32(s->pc - pc_start)); gen_eob(s); break; case 0xca: /* clac */ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) || s->cpl != 0) { goto illegal_op; } gen_helper_clac(cpu_env); gen_jmp_im(s->pc - s->cs_base); gen_eob(s); break; case 0xcb: /* stac */ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) || s->cpl != 0) { goto illegal_op; } gen_helper_stac(cpu_env); gen_jmp_im(s->pc - s->cs_base); gen_eob(s); break; CASE_MODRM_MEM_OP(1): /* sidt */ gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_READ); gen_lea_modrm(env, s, modrm); tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State, idt.limit)); gen_op_st_v(s, MO_16, cpu_T0, cpu_A0); gen_add_A0_im(s, 2); tcg_gen_ld_tl(cpu_T0, cpu_env, offsetof(CPUX86State, idt.base)); if (dflag == MO_16) { tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff); } gen_op_st_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0); break; case 0xd0: /* xgetbv */ if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0 || (s->prefix & (PREFIX_LOCK | PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ))) { goto illegal_op; } tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_ECX]); gen_helper_xgetbv(cpu_tmp1_i64, cpu_env, cpu_tmp2_i32); tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], cpu_tmp1_i64); break; case 0xd1: /* xsetbv */ if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0 || (s->prefix & (PREFIX_LOCK | PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ))) { goto illegal_op; } if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); break; } tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX], cpu_regs[R_EDX]); tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_ECX]); gen_helper_xsetbv(cpu_env, cpu_tmp2_i32, cpu_tmp1_i64); /* End TB because translation flags may change. */ gen_jmp_im(s->pc - s->cs_base); gen_eob(s); break; case 0xd8: /* VMRUN */ if (!(s->flags & HF_SVME_MASK) || !s->pe) { goto illegal_op; } if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); break; } gen_update_cc_op(s); gen_jmp_im(pc_start - s->cs_base); gen_helper_vmrun(cpu_env, tcg_const_i32(s->aflag - 1), tcg_const_i32(s->pc - pc_start)); tcg_gen_exit_tb(0); s->base.is_jmp = DISAS_NORETURN; break; case 0xd9: /* VMMCALL */ if (!(s->flags & HF_SVME_MASK)) { goto illegal_op; } gen_update_cc_op(s); gen_jmp_im(pc_start - s->cs_base); gen_helper_vmmcall(cpu_env); break; case 0xda: /* VMLOAD */ if (!(s->flags & HF_SVME_MASK) || !s->pe) { goto illegal_op; } if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); break; } gen_update_cc_op(s); gen_jmp_im(pc_start - s->cs_base); gen_helper_vmload(cpu_env, tcg_const_i32(s->aflag - 1)); break; case 0xdb: /* VMSAVE */ if (!(s->flags & HF_SVME_MASK) || !s->pe) { goto illegal_op; } if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); break; } gen_update_cc_op(s); gen_jmp_im(pc_start - s->cs_base); gen_helper_vmsave(cpu_env, tcg_const_i32(s->aflag - 1)); break; case 0xdc: /* STGI */ if ((!(s->flags & HF_SVME_MASK) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) || !s->pe) { goto illegal_op; } if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); break; } gen_update_cc_op(s); gen_jmp_im(pc_start - s->cs_base); gen_helper_stgi(cpu_env); break; case 0xdd: /* CLGI */ if (!(s->flags & HF_SVME_MASK) || !s->pe) { goto illegal_op; } if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); break; } gen_update_cc_op(s); gen_jmp_im(pc_start - s->cs_base); gen_helper_clgi(cpu_env); break; case 0xde: /* SKINIT */ if ((!(s->flags & HF_SVME_MASK) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) || !s->pe) { goto illegal_op; } gen_update_cc_op(s); gen_jmp_im(pc_start - s->cs_base); gen_helper_skinit(cpu_env); break; case 0xdf: /* INVLPGA */ if (!(s->flags & HF_SVME_MASK) || !s->pe) { goto illegal_op; } if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); break; } gen_update_cc_op(s); gen_jmp_im(pc_start - s->cs_base); gen_helper_invlpga(cpu_env, tcg_const_i32(s->aflag - 1)); break; CASE_MODRM_MEM_OP(2): /* lgdt */ if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); break; } gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_WRITE); gen_lea_modrm(env, s, modrm); gen_op_ld_v(s, MO_16, cpu_T1, cpu_A0); gen_add_A0_im(s, 2); gen_op_ld_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0); if (dflag == MO_16) { tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff); } tcg_gen_st_tl(cpu_T0, cpu_env, offsetof(CPUX86State, gdt.base)); tcg_gen_st32_tl(cpu_T1, cpu_env, offsetof(CPUX86State, gdt.limit)); break; CASE_MODRM_MEM_OP(3): /* lidt */ if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); break; } gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_WRITE); gen_lea_modrm(env, s, modrm); gen_op_ld_v(s, MO_16, cpu_T1, cpu_A0); gen_add_A0_im(s, 2); gen_op_ld_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0); if (dflag == MO_16) { tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff); } tcg_gen_st_tl(cpu_T0, cpu_env, offsetof(CPUX86State, idt.base)); tcg_gen_st32_tl(cpu_T1, cpu_env, offsetof(CPUX86State, idt.limit)); break; CASE_MODRM_OP(4): /* smsw */ gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_CR0); tcg_gen_ld_tl(cpu_T0, cpu_env, offsetof(CPUX86State, cr[0])); if (CODE64(s)) { mod = (modrm >> 6) & 3; ot = (mod != 3 ? MO_16 : s->dflag); } else { ot = MO_16; } gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1); break; case 0xee: /* rdpkru */ if (prefixes & PREFIX_LOCK) { goto illegal_op; } tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_ECX]); gen_helper_rdpkru(cpu_tmp1_i64, cpu_env, cpu_tmp2_i32); tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], cpu_tmp1_i64); break; case 0xef: /* wrpkru */ if (prefixes & PREFIX_LOCK) { goto illegal_op; } tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX], cpu_regs[R_EDX]); tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_ECX]); gen_helper_wrpkru(cpu_env, cpu_tmp2_i32, cpu_tmp1_i64); break; CASE_MODRM_OP(6): /* lmsw */ if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); break; } gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0); gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); gen_helper_lmsw(cpu_env, cpu_T0); gen_jmp_im(s->pc - s->cs_base); gen_eob(s); break; CASE_MODRM_MEM_OP(7): /* invlpg */ if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); break; } gen_update_cc_op(s); gen_jmp_im(pc_start - s->cs_base); gen_lea_modrm(env, s, modrm); gen_helper_invlpg(cpu_env, cpu_A0); gen_jmp_im(s->pc - s->cs_base); gen_eob(s); break; case 0xf8: /* swapgs */ #ifdef TARGET_X86_64 if (CODE64(s)) { if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { tcg_gen_mov_tl(cpu_T0, cpu_seg_base[R_GS]); tcg_gen_ld_tl(cpu_seg_base[R_GS], cpu_env, offsetof(CPUX86State, kernelgsbase)); tcg_gen_st_tl(cpu_T0, cpu_env, offsetof(CPUX86State, kernelgsbase)); } break; } #endif goto illegal_op; case 0xf9: /* rdtscp */ if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP)) { goto illegal_op; } gen_update_cc_op(s); gen_jmp_im(pc_start - s->cs_base); if (s->base.tb->cflags & CF_USE_ICOUNT) { gen_io_start(); } gen_helper_rdtscp(cpu_env); if (s->base.tb->cflags & CF_USE_ICOUNT) { gen_io_end(); gen_jmp(s, s->pc - s->cs_base); } break; default: goto unknown_op; } break; case 0x108: /* invd */ case 0x109: /* wbinvd */ if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { gen_svm_check_intercept(s, pc_start, (b & 2) ? SVM_EXIT_INVD : SVM_EXIT_WBINVD); /* nothing to do */ } break; case 0x63: /* arpl or movslS (x86_64) */ #ifdef TARGET_X86_64 if (CODE64(s)) { int d_ot; /* d_ot is the size of destination */ d_ot = dflag; modrm = x86_ldub_code(env, s); reg = ((modrm >> 3) & 7) | rex_r; mod = (modrm >> 6) & 3; rm = (modrm & 7) | REX_B(s); if (mod == 3) { gen_op_mov_v_reg(MO_32, cpu_T0, rm); /* sign extend */ if (d_ot == MO_64) { tcg_gen_ext32s_tl(cpu_T0, cpu_T0); } gen_op_mov_reg_v(d_ot, reg, cpu_T0); } else { gen_lea_modrm(env, s, modrm); gen_op_ld_v(s, MO_32 | MO_SIGN, cpu_T0, cpu_A0); gen_op_mov_reg_v(d_ot, reg, cpu_T0); } } else #endif { TCGLabel *label1; TCGv t0, t1, t2, a0; if (!s->pe || s->vm86) goto illegal_op; t0 = tcg_temp_local_new(); t1 = tcg_temp_local_new(); t2 = tcg_temp_local_new(); ot = MO_16; modrm = x86_ldub_code(env, s); reg = (modrm >> 3) & 7; mod = (modrm >> 6) & 3; rm = modrm & 7; if (mod != 3) { gen_lea_modrm(env, s, modrm); gen_op_ld_v(s, ot, t0, cpu_A0); a0 = tcg_temp_local_new(); tcg_gen_mov_tl(a0, cpu_A0); } else { gen_op_mov_v_reg(ot, t0, rm); TCGV_UNUSED(a0); } gen_op_mov_v_reg(ot, t1, reg); tcg_gen_andi_tl(cpu_tmp0, t0, 3); tcg_gen_andi_tl(t1, t1, 3); tcg_gen_movi_tl(t2, 0); label1 = gen_new_label(); tcg_gen_brcond_tl(TCG_COND_GE, cpu_tmp0, t1, label1); tcg_gen_andi_tl(t0, t0, ~3); tcg_gen_or_tl(t0, t0, t1); tcg_gen_movi_tl(t2, CC_Z); gen_set_label(label1); if (mod != 3) { gen_op_st_v(s, ot, t0, a0); tcg_temp_free(a0); } else { gen_op_mov_reg_v(ot, rm, t0); } gen_compute_eflags(s); tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_Z); tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t2); tcg_temp_free(t0); tcg_temp_free(t1); tcg_temp_free(t2); } break; case 0x102: /* lar */ case 0x103: /* lsl */ { TCGLabel *label1; TCGv t0; if (!s->pe || s->vm86) goto illegal_op; ot = dflag != MO_16 ? MO_32 : MO_16; modrm = x86_ldub_code(env, s); reg = ((modrm >> 3) & 7) | rex_r; gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); t0 = tcg_temp_local_new(); gen_update_cc_op(s); if (b == 0x102) { gen_helper_lar(t0, cpu_env, cpu_T0); } else { gen_helper_lsl(t0, cpu_env, cpu_T0); } tcg_gen_andi_tl(cpu_tmp0, cpu_cc_src, CC_Z); label1 = gen_new_label(); tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1); gen_op_mov_reg_v(ot, reg, t0); gen_set_label(label1); set_cc_op(s, CC_OP_EFLAGS); tcg_temp_free(t0); } break; case 0x118: modrm = x86_ldub_code(env, s); mod = (modrm >> 6) & 3; op = (modrm >> 3) & 7; switch(op) { case 0: /* prefetchnta */ case 1: /* prefetchnt0 */ case 2: /* prefetchnt0 */ case 3: /* prefetchnt0 */ if (mod == 3) goto illegal_op; gen_nop_modrm(env, s, modrm); /* nothing more to do */ break; default: /* nop (multi byte) */ gen_nop_modrm(env, s, modrm); break; } break; case 0x11a: modrm = x86_ldub_code(env, s); if (s->flags & HF_MPX_EN_MASK) { mod = (modrm >> 6) & 3; reg = ((modrm >> 3) & 7) | rex_r; if (prefixes & PREFIX_REPZ) { /* bndcl */ if (reg >= 4 || (prefixes & PREFIX_LOCK) || s->aflag == MO_16) { goto illegal_op; } gen_bndck(env, s, modrm, TCG_COND_LTU, cpu_bndl[reg]); } else if (prefixes & PREFIX_REPNZ) { /* bndcu */ if (reg >= 4 || (prefixes & PREFIX_LOCK) || s->aflag == MO_16) { goto illegal_op; } TCGv_i64 notu = tcg_temp_new_i64(); tcg_gen_not_i64(notu, cpu_bndu[reg]); gen_bndck(env, s, modrm, TCG_COND_GTU, notu); tcg_temp_free_i64(notu); } else if (prefixes & PREFIX_DATA) { /* bndmov -- from reg/mem */ if (reg >= 4 || s->aflag == MO_16) { goto illegal_op; } if (mod == 3) { int reg2 = (modrm & 7) | REX_B(s); if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) { goto illegal_op; } if (s->flags & HF_MPX_IU_MASK) { tcg_gen_mov_i64(cpu_bndl[reg], cpu_bndl[reg2]); tcg_gen_mov_i64(cpu_bndu[reg], cpu_bndu[reg2]); } } else { gen_lea_modrm(env, s, modrm); if (CODE64(s)) { tcg_gen_qemu_ld_i64(cpu_bndl[reg], cpu_A0, s->mem_index, MO_LEQ); tcg_gen_addi_tl(cpu_A0, cpu_A0, 8); tcg_gen_qemu_ld_i64(cpu_bndu[reg], cpu_A0, s->mem_index, MO_LEQ); } else { tcg_gen_qemu_ld_i64(cpu_bndl[reg], cpu_A0, s->mem_index, MO_LEUL); tcg_gen_addi_tl(cpu_A0, cpu_A0, 4); tcg_gen_qemu_ld_i64(cpu_bndu[reg], cpu_A0, s->mem_index, MO_LEUL); } /* bnd registers are now in-use */ gen_set_hflag(s, HF_MPX_IU_MASK); } } else if (mod != 3) { /* bndldx */ AddressParts a = gen_lea_modrm_0(env, s, modrm); if (reg >= 4 || (prefixes & PREFIX_LOCK) || s->aflag == MO_16 || a.base < -1) { goto illegal_op; } if (a.base >= 0) { tcg_gen_addi_tl(cpu_A0, cpu_regs[a.base], a.disp); } else { tcg_gen_movi_tl(cpu_A0, 0); } gen_lea_v_seg(s, s->aflag, cpu_A0, a.def_seg, s->override); if (a.index >= 0) { tcg_gen_mov_tl(cpu_T0, cpu_regs[a.index]); } else { tcg_gen_movi_tl(cpu_T0, 0); } if (CODE64(s)) { gen_helper_bndldx64(cpu_bndl[reg], cpu_env, cpu_A0, cpu_T0); tcg_gen_ld_i64(cpu_bndu[reg], cpu_env, offsetof(CPUX86State, mmx_t0.MMX_Q(0))); } else { gen_helper_bndldx32(cpu_bndu[reg], cpu_env, cpu_A0, cpu_T0); tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndu[reg]); tcg_gen_shri_i64(cpu_bndu[reg], cpu_bndu[reg], 32); } gen_set_hflag(s, HF_MPX_IU_MASK); } } gen_nop_modrm(env, s, modrm); break; case 0x11b: modrm = x86_ldub_code(env, s); if (s->flags & HF_MPX_EN_MASK) { mod = (modrm >> 6) & 3; reg = ((modrm >> 3) & 7) | rex_r; if (mod != 3 && (prefixes & PREFIX_REPZ)) { /* bndmk */ if (reg >= 4 || (prefixes & PREFIX_LOCK) || s->aflag == MO_16) { goto illegal_op; } AddressParts a = gen_lea_modrm_0(env, s, modrm); if (a.base >= 0) { tcg_gen_extu_tl_i64(cpu_bndl[reg], cpu_regs[a.base]); if (!CODE64(s)) { tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndl[reg]); } } else if (a.base == -1) { /* no base register has lower bound of 0 */ tcg_gen_movi_i64(cpu_bndl[reg], 0); } else { /* rip-relative generates #ud */ goto illegal_op; } tcg_gen_not_tl(cpu_A0, gen_lea_modrm_1(a)); if (!CODE64(s)) { tcg_gen_ext32u_tl(cpu_A0, cpu_A0); } tcg_gen_extu_tl_i64(cpu_bndu[reg], cpu_A0); /* bnd registers are now in-use */ gen_set_hflag(s, HF_MPX_IU_MASK); break; } else if (prefixes & PREFIX_REPNZ) { /* bndcn */ if (reg >= 4 || (prefixes & PREFIX_LOCK) || s->aflag == MO_16) { goto illegal_op; } gen_bndck(env, s, modrm, TCG_COND_GTU, cpu_bndu[reg]); } else if (prefixes & PREFIX_DATA) { /* bndmov -- to reg/mem */ if (reg >= 4 || s->aflag == MO_16) { goto illegal_op; } if (mod == 3) { int reg2 = (modrm & 7) | REX_B(s); if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) { goto illegal_op; } if (s->flags & HF_MPX_IU_MASK) { tcg_gen_mov_i64(cpu_bndl[reg2], cpu_bndl[reg]); tcg_gen_mov_i64(cpu_bndu[reg2], cpu_bndu[reg]); } } else { gen_lea_modrm(env, s, modrm); if (CODE64(s)) { tcg_gen_qemu_st_i64(cpu_bndl[reg], cpu_A0, s->mem_index, MO_LEQ); tcg_gen_addi_tl(cpu_A0, cpu_A0, 8); tcg_gen_qemu_st_i64(cpu_bndu[reg], cpu_A0, s->mem_index, MO_LEQ); } else { tcg_gen_qemu_st_i64(cpu_bndl[reg], cpu_A0, s->mem_index, MO_LEUL); tcg_gen_addi_tl(cpu_A0, cpu_A0, 4); tcg_gen_qemu_st_i64(cpu_bndu[reg], cpu_A0, s->mem_index, MO_LEUL); } } } else if (mod != 3) { /* bndstx */ AddressParts a = gen_lea_modrm_0(env, s, modrm); if (reg >= 4 || (prefixes & PREFIX_LOCK) || s->aflag == MO_16 || a.base < -1) { goto illegal_op; } if (a.base >= 0) { tcg_gen_addi_tl(cpu_A0, cpu_regs[a.base], a.disp); } else { tcg_gen_movi_tl(cpu_A0, 0); } gen_lea_v_seg(s, s->aflag, cpu_A0, a.def_seg, s->override); if (a.index >= 0) { tcg_gen_mov_tl(cpu_T0, cpu_regs[a.index]); } else { tcg_gen_movi_tl(cpu_T0, 0); } if (CODE64(s)) { gen_helper_bndstx64(cpu_env, cpu_A0, cpu_T0, cpu_bndl[reg], cpu_bndu[reg]); } else { gen_helper_bndstx32(cpu_env, cpu_A0, cpu_T0, cpu_bndl[reg], cpu_bndu[reg]); } } } gen_nop_modrm(env, s, modrm); break; case 0x119: case 0x11c ... 0x11f: /* nop (multi byte) */ modrm = x86_ldub_code(env, s); gen_nop_modrm(env, s, modrm); break; case 0x120: /* mov reg, crN */ case 0x122: /* mov crN, reg */ if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { modrm = x86_ldub_code(env, s); /* Ignore the mod bits (assume (modrm&0xc0)==0xc0). * AMD documentation (24594.pdf) and testing of * intel 386 and 486 processors all show that the mod bits * are assumed to be 1's, regardless of actual values. */ rm = (modrm & 7) | REX_B(s); reg = ((modrm >> 3) & 7) | rex_r; if (CODE64(s)) ot = MO_64; else ot = MO_32; if ((prefixes & PREFIX_LOCK) && (reg == 0) && (s->cpuid_ext3_features & CPUID_EXT3_CR8LEG)) { reg = 8; } switch(reg) { case 0: case 2: case 3: case 4: case 8: gen_update_cc_op(s); gen_jmp_im(pc_start - s->cs_base); if (b & 2) { if (s->base.tb->cflags & CF_USE_ICOUNT) { gen_io_start(); } gen_op_mov_v_reg(ot, cpu_T0, rm); gen_helper_write_crN(cpu_env, tcg_const_i32(reg), cpu_T0); if (s->base.tb->cflags & CF_USE_ICOUNT) { gen_io_end(); } gen_jmp_im(s->pc - s->cs_base); gen_eob(s); } else { if (s->base.tb->cflags & CF_USE_ICOUNT) { gen_io_start(); } gen_helper_read_crN(cpu_T0, cpu_env, tcg_const_i32(reg)); gen_op_mov_reg_v(ot, rm, cpu_T0); if (s->base.tb->cflags & CF_USE_ICOUNT) { gen_io_end(); } } break; default: goto unknown_op; } } break; case 0x121: /* mov reg, drN */ case 0x123: /* mov drN, reg */ if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { modrm = x86_ldub_code(env, s); /* Ignore the mod bits (assume (modrm&0xc0)==0xc0). * AMD documentation (24594.pdf) and testing of * intel 386 and 486 processors all show that the mod bits * are assumed to be 1's, regardless of actual values. */ rm = (modrm & 7) | REX_B(s); reg = ((modrm >> 3) & 7) | rex_r; if (CODE64(s)) ot = MO_64; else ot = MO_32; if (reg >= 8) { goto illegal_op; } if (b & 2) { gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_DR0 + reg); gen_op_mov_v_reg(ot, cpu_T0, rm); tcg_gen_movi_i32(cpu_tmp2_i32, reg); gen_helper_set_dr(cpu_env, cpu_tmp2_i32, cpu_T0); gen_jmp_im(s->pc - s->cs_base); gen_eob(s); } else { gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_DR0 + reg); tcg_gen_movi_i32(cpu_tmp2_i32, reg); gen_helper_get_dr(cpu_T0, cpu_env, cpu_tmp2_i32); gen_op_mov_reg_v(ot, rm, cpu_T0); } } break; case 0x106: /* clts */ if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0); gen_helper_clts(cpu_env); /* abort block because static cpu state changed */ gen_jmp_im(s->pc - s->cs_base); gen_eob(s); } break; /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */ case 0x1c3: /* MOVNTI reg, mem */ if (!(s->cpuid_features & CPUID_SSE2)) goto illegal_op; ot = mo_64_32(dflag); modrm = x86_ldub_code(env, s); mod = (modrm >> 6) & 3; if (mod == 3) goto illegal_op; reg = ((modrm >> 3) & 7) | rex_r; /* generate a generic store */ gen_ldst_modrm(env, s, modrm, ot, reg, 1); break; case 0x1ae: modrm = x86_ldub_code(env, s); switch (modrm) { CASE_MODRM_MEM_OP(0): /* fxsave */ if (!(s->cpuid_features & CPUID_FXSR) || (prefixes & PREFIX_LOCK)) { goto illegal_op; } if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) { gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); break; } gen_lea_modrm(env, s, modrm); gen_helper_fxsave(cpu_env, cpu_A0); break; CASE_MODRM_MEM_OP(1): /* fxrstor */ if (!(s->cpuid_features & CPUID_FXSR) || (prefixes & PREFIX_LOCK)) { goto illegal_op; } if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) { gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); break; } gen_lea_modrm(env, s, modrm); gen_helper_fxrstor(cpu_env, cpu_A0); break; CASE_MODRM_MEM_OP(2): /* ldmxcsr */ if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK)) { goto illegal_op; } if (s->flags & HF_TS_MASK) { gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); break; } gen_lea_modrm(env, s, modrm); tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0, s->mem_index, MO_LEUL); gen_helper_ldmxcsr(cpu_env, cpu_tmp2_i32); break; CASE_MODRM_MEM_OP(3): /* stmxcsr */ if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK)) { goto illegal_op; } if (s->flags & HF_TS_MASK) { gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); break; } gen_lea_modrm(env, s, modrm); tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State, mxcsr)); gen_op_st_v(s, MO_32, cpu_T0, cpu_A0); break; CASE_MODRM_MEM_OP(4): /* xsave */ if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0 || (prefixes & (PREFIX_LOCK | PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ))) { goto illegal_op; } gen_lea_modrm(env, s, modrm); tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX], cpu_regs[R_EDX]); gen_helper_xsave(cpu_env, cpu_A0, cpu_tmp1_i64); break; CASE_MODRM_MEM_OP(5): /* xrstor */ if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0 || (prefixes & (PREFIX_LOCK | PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ))) { goto illegal_op; } gen_lea_modrm(env, s, modrm); tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX], cpu_regs[R_EDX]); gen_helper_xrstor(cpu_env, cpu_A0, cpu_tmp1_i64); /* XRSTOR is how MPX is enabled, which changes how we translate. Thus we need to end the TB. */ gen_update_cc_op(s); gen_jmp_im(s->pc - s->cs_base); gen_eob(s); break; CASE_MODRM_MEM_OP(6): /* xsaveopt / clwb */ if (prefixes & PREFIX_LOCK) { goto illegal_op; } if (prefixes & PREFIX_DATA) { /* clwb */ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLWB)) { goto illegal_op; } gen_nop_modrm(env, s, modrm); } else { /* xsaveopt */ if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0 || (s->cpuid_xsave_features & CPUID_XSAVE_XSAVEOPT) == 0 || (prefixes & (PREFIX_REPZ | PREFIX_REPNZ))) { goto illegal_op; } gen_lea_modrm(env, s, modrm); tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX], cpu_regs[R_EDX]); gen_helper_xsaveopt(cpu_env, cpu_A0, cpu_tmp1_i64); } break; CASE_MODRM_MEM_OP(7): /* clflush / clflushopt */ if (prefixes & PREFIX_LOCK) { goto illegal_op; } if (prefixes & PREFIX_DATA) { /* clflushopt */ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLFLUSHOPT)) { goto illegal_op; } } else { /* clflush */ if ((s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) || !(s->cpuid_features & CPUID_CLFLUSH)) { goto illegal_op; } } gen_nop_modrm(env, s, modrm); break; case 0xc0 ... 0xc7: /* rdfsbase (f3 0f ae /0) */ case 0xc8 ... 0xcf: /* rdgsbase (f3 0f ae /1) */ case 0xd0 ... 0xd7: /* wrfsbase (f3 0f ae /2) */ case 0xd8 ... 0xdf: /* wrgsbase (f3 0f ae /3) */ if (CODE64(s) && (prefixes & PREFIX_REPZ) && !(prefixes & PREFIX_LOCK) && (s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_FSGSBASE)) { TCGv base, treg, src, dst; /* Preserve hflags bits by testing CR4 at runtime. */ tcg_gen_movi_i32(cpu_tmp2_i32, CR4_FSGSBASE_MASK); gen_helper_cr4_testbit(cpu_env, cpu_tmp2_i32); base = cpu_seg_base[modrm & 8 ? R_GS : R_FS]; treg = cpu_regs[(modrm & 7) | REX_B(s)]; if (modrm & 0x10) { /* wr*base */ dst = base, src = treg; } else { /* rd*base */ dst = treg, src = base; } if (s->dflag == MO_32) { tcg_gen_ext32u_tl(dst, src); } else { tcg_gen_mov_tl(dst, src); } break; } goto unknown_op; case 0xf8: /* sfence / pcommit */ if (prefixes & PREFIX_DATA) { /* pcommit */ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_PCOMMIT) || (prefixes & PREFIX_LOCK)) { goto illegal_op; } break; } /* fallthru */ case 0xf9 ... 0xff: /* sfence */ if (!(s->cpuid_features & CPUID_SSE) || (prefixes & PREFIX_LOCK)) { goto illegal_op; } tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC); break; case 0xe8 ... 0xef: /* lfence */ if (!(s->cpuid_features & CPUID_SSE) || (prefixes & PREFIX_LOCK)) { goto illegal_op; } tcg_gen_mb(TCG_MO_LD_LD | TCG_BAR_SC); break; case 0xf0 ... 0xf7: /* mfence */ if (!(s->cpuid_features & CPUID_SSE2) || (prefixes & PREFIX_LOCK)) { goto illegal_op; } tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC); break; default: goto unknown_op; } break; case 0x10d: /* 3DNow! prefetch(w) */ modrm = x86_ldub_code(env, s); mod = (modrm >> 6) & 3; if (mod == 3) goto illegal_op; gen_nop_modrm(env, s, modrm); break; case 0x1aa: /* rsm */ gen_svm_check_intercept(s, pc_start, SVM_EXIT_RSM); if (!(s->flags & HF_SMM_MASK)) goto illegal_op; gen_update_cc_op(s); gen_jmp_im(s->pc - s->cs_base); gen_helper_rsm(cpu_env); gen_eob(s); break; case 0x1b8: /* SSE4.2 popcnt */ if ((prefixes & (PREFIX_REPZ | PREFIX_LOCK | PREFIX_REPNZ)) != PREFIX_REPZ) goto illegal_op; if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT)) goto illegal_op; modrm = x86_ldub_code(env, s); reg = ((modrm >> 3) & 7) | rex_r; if (s->prefix & PREFIX_DATA) { ot = MO_16; } else { ot = mo_64_32(dflag); } gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); gen_extu(ot, cpu_T0); tcg_gen_mov_tl(cpu_cc_src, cpu_T0); tcg_gen_ctpop_tl(cpu_T0, cpu_T0); gen_op_mov_reg_v(ot, reg, cpu_T0); set_cc_op(s, CC_OP_POPCNT); break; case 0x10e ... 0x10f: /* 3DNow! instructions, ignore prefixes */ s->prefix &= ~(PREFIX_REPZ | PREFIX_REPNZ | PREFIX_DATA); case 0x110 ... 0x117: case 0x128 ... 0x12f: case 0x138 ... 0x13a: case 0x150 ... 0x179: case 0x17c ... 0x17f: case 0x1c2: case 0x1c4 ... 0x1c6: case 0x1d0 ... 0x1fe: gen_sse(env, s, b, pc_start, rex_r); break; default: goto unknown_op; } return s->pc; illegal_op: gen_illegal_opcode(s); return s->pc; unknown_op: gen_unknown_opcode(env, s); return s->pc; }"} {"target": 1, "idx": 572, "func": "static int iscsi_open(BlockDriverState *bs, const char *filename, int flags) { IscsiLun *iscsilun = bs->opaque; struct iscsi_context *iscsi = NULL; struct iscsi_url *iscsi_url = NULL; struct IscsiTask task; char *initiator_name = NULL; int ret; if ((BDRV_SECTOR_SIZE % 512) != 0) { error_report(\"iSCSI: Invalid BDRV_SECTOR_SIZE. \" \"BDRV_SECTOR_SIZE(%lld) is not a multiple \" \"of 512\", BDRV_SECTOR_SIZE); return -EINVAL; } iscsi_url = iscsi_parse_full_url(iscsi, filename); if (iscsi_url == NULL) { error_report(\"Failed to parse URL : %s %s\", filename, iscsi_get_error(iscsi)); ret = -EINVAL; goto failed; } memset(iscsilun, 0, sizeof(IscsiLun)); initiator_name = parse_initiator_name(iscsi_url->target); iscsi = iscsi_create_context(initiator_name); if (iscsi == NULL) { error_report(\"iSCSI: Failed to create iSCSI context.\"); ret = -ENOMEM; goto failed; } if (iscsi_set_targetname(iscsi, iscsi_url->target)) { error_report(\"iSCSI: Failed to set target name.\"); ret = -EINVAL; goto failed; } if (iscsi_url->user != NULL) { ret = iscsi_set_initiator_username_pwd(iscsi, iscsi_url->user, iscsi_url->passwd); if (ret != 0) { error_report(\"Failed to set initiator username and password\"); ret = -EINVAL; goto failed; } } /* check if we got CHAP username/password via the options */ if (parse_chap(iscsi, iscsi_url->target) != 0) { error_report(\"iSCSI: Failed to set CHAP user/password\"); ret = -EINVAL; goto failed; } if (iscsi_set_session_type(iscsi, ISCSI_SESSION_NORMAL) != 0) { error_report(\"iSCSI: Failed to set session type to normal.\"); ret = -EINVAL; goto failed; } iscsi_set_header_digest(iscsi, ISCSI_HEADER_DIGEST_NONE_CRC32C); /* check if we got HEADER_DIGEST via the options */ parse_header_digest(iscsi, iscsi_url->target); task.iscsilun = iscsilun; task.status = 0; task.complete = 0; task.bs = bs; iscsilun->iscsi = iscsi; iscsilun->lun = iscsi_url->lun; if (iscsi_full_connect_async(iscsi, iscsi_url->portal, iscsi_url->lun, iscsi_connect_cb, &task) != 0) { error_report(\"iSCSI: Failed to start async connect.\"); ret = -EINVAL; goto failed; } while (!task.complete) { iscsi_set_events(iscsilun); qemu_aio_wait(); } if (task.status != 0) { error_report(\"iSCSI: Failed to connect to LUN : %s\", iscsi_get_error(iscsi)); ret = -EINVAL; goto failed; } if (iscsi_url != NULL) { iscsi_destroy_url(iscsi_url); } /* Medium changer or tape. We dont have any emulation for this so this must * be sg ioctl compatible. We force it to be sg, otherwise qemu will try * to read from the device to guess the image format. */ if (iscsilun->type == TYPE_MEDIUM_CHANGER || iscsilun->type == TYPE_TAPE) { bs->sg = 1; } return 0; failed: if (initiator_name != NULL) { g_free(initiator_name); } if (iscsi_url != NULL) { iscsi_destroy_url(iscsi_url); } if (iscsi != NULL) { iscsi_destroy_context(iscsi); } memset(iscsilun, 0, sizeof(IscsiLun)); return ret; }"} {"target": 1, "idx": 573, "func": "static void vmxnet3_rx_need_csum_calculate(struct VmxnetRxPkt *pkt, const void *pkt_data, size_t pkt_len) { struct virtio_net_hdr *vhdr; bool isip4, isip6, istcp, isudp; uint8_t *data; int len; if (!vmxnet_rx_pkt_has_virt_hdr(pkt)) { return; } vhdr = vmxnet_rx_pkt_get_vhdr(pkt); if (!VMXNET_FLAG_IS_SET(vhdr->flags, VIRTIO_NET_HDR_F_NEEDS_CSUM)) { return; } vmxnet_rx_pkt_get_protocols(pkt, &isip4, &isip6, &isudp, &istcp); if (!(isip4 || isip6) || !(istcp || isudp)) { return; } vmxnet3_dump_virt_hdr(vhdr); /* Validate packet len: csum_start + scum_offset + length of csum field */ if (pkt_len < (vhdr->csum_start + vhdr->csum_offset + 2)) { VMW_PKPRN(\"packet len:%d < csum_start(%d) + csum_offset(%d) + 2, \" \"cannot calculate checksum\", len, vhdr->csum_start, vhdr->csum_offset); return; } data = (uint8_t *)pkt_data + vhdr->csum_start; len = pkt_len - vhdr->csum_start; /* Put the checksum obtained into the packet */ stw_be_p(data + vhdr->csum_offset, net_raw_checksum(data, len)); vhdr->flags &= ~VIRTIO_NET_HDR_F_NEEDS_CSUM; vhdr->flags |= VIRTIO_NET_HDR_F_DATA_VALID; }"} {"target": 1, "idx": 574, "func": "CommandInfoList *qmp_query_commands(Error **errp) { CommandInfoList *list = NULL; qmp_for_each_command(&qmp_commands, query_commands_cb, &list); return list; }"} {"target": 1, "idx": 585, "func": "static int decode_blocks(ALSDecContext *ctx, unsigned int ra_frame, unsigned int c, const unsigned int *div_blocks, unsigned int *js_blocks) { ALSSpecificConfig *sconf = &ctx->sconf; unsigned int offset = 0; unsigned int b; ALSBlockData bd[2] = { { 0 } }; bd[0].ra_block = ra_frame; bd[0].const_block = ctx->const_block; bd[0].shift_lsbs = ctx->shift_lsbs; bd[0].opt_order = ctx->opt_order; bd[0].store_prev_samples = ctx->store_prev_samples; bd[0].use_ltp = ctx->use_ltp; bd[0].ltp_lag = ctx->ltp_lag; bd[0].ltp_gain = ctx->ltp_gain[0]; bd[0].quant_cof = ctx->quant_cof[0]; bd[0].lpc_cof = ctx->lpc_cof[0]; bd[0].prev_raw_samples = ctx->prev_raw_samples; bd[0].js_blocks = *js_blocks; bd[1].ra_block = ra_frame; bd[1].const_block = ctx->const_block; bd[1].shift_lsbs = ctx->shift_lsbs; bd[1].opt_order = ctx->opt_order; bd[1].store_prev_samples = ctx->store_prev_samples; bd[1].use_ltp = ctx->use_ltp; bd[1].ltp_lag = ctx->ltp_lag; bd[1].ltp_gain = ctx->ltp_gain[0]; bd[1].quant_cof = ctx->quant_cof[0]; bd[1].lpc_cof = ctx->lpc_cof[0]; bd[1].prev_raw_samples = ctx->prev_raw_samples; bd[1].js_blocks = *(js_blocks + 1); // decode all blocks for (b = 0; b < ctx->num_blocks; b++) { unsigned int s; bd[0].block_length = div_blocks[b]; bd[1].block_length = div_blocks[b]; bd[0].raw_samples = ctx->raw_samples[c ] + offset; bd[1].raw_samples = ctx->raw_samples[c + 1] + offset; bd[0].raw_other = bd[1].raw_samples; bd[1].raw_other = bd[0].raw_samples; if(read_decode_block(ctx, &bd[0]) || read_decode_block(ctx, &bd[1])) { // damaged block, write zero for the rest of the frame zero_remaining(b, ctx->num_blocks, div_blocks, bd[0].raw_samples); zero_remaining(b, ctx->num_blocks, div_blocks, bd[1].raw_samples); return -1; } // reconstruct joint-stereo blocks if (bd[0].js_blocks) { if (bd[1].js_blocks) av_log(ctx->avctx, AV_LOG_WARNING, \"Invalid channel pair!\\n\"); for (s = 0; s < div_blocks[b]; s++) bd[0].raw_samples[s] = bd[1].raw_samples[s] - bd[0].raw_samples[s]; } else if (bd[1].js_blocks) { for (s = 0; s < div_blocks[b]; s++) bd[1].raw_samples[s] = bd[1].raw_samples[s] + bd[0].raw_samples[s]; } offset += div_blocks[b]; bd[0].ra_block = 0; bd[1].ra_block = 0; } // store carryover raw samples, // the others channel raw samples are stored by the calling function. memmove(ctx->raw_samples[c] - sconf->max_order, ctx->raw_samples[c] - sconf->max_order + sconf->frame_length, sizeof(*ctx->raw_samples[c]) * sconf->max_order); return 0; }"} {"target": 0, "idx": 599, "func": "static int write_f(int argc, char **argv) { struct timeval t1, t2; int Cflag = 0, pflag = 0, qflag = 0, bflag = 0; int c, cnt; char *buf; int64_t offset; int count; /* Some compilers get confused and warn if this is not initialized. */ int total = 0; int pattern = 0xcd; while ((c = getopt(argc, argv, \"bCpP:q\")) != EOF) { switch (c) { case 'b': bflag = 1; break; case 'C': Cflag = 1; break; case 'p': pflag = 1; break; case 'P': pattern = parse_pattern(optarg); if (pattern < 0) { return 0; } break; case 'q': qflag = 1; break; default: return command_usage(&write_cmd); } } if (optind != argc - 2) { return command_usage(&write_cmd); } if (bflag && pflag) { printf(\"-b and -p cannot be specified at the same time\\n\"); return 0; } offset = cvtnum(argv[optind]); if (offset < 0) { printf(\"non-numeric length argument -- %s\\n\", argv[optind]); return 0; } optind++; count = cvtnum(argv[optind]); if (count < 0) { printf(\"non-numeric length argument -- %s\\n\", argv[optind]); return 0; } if (!pflag) { if (offset & 0x1ff) { printf(\"offset %\" PRId64 \" is not sector aligned\\n\", offset); return 0; } if (count & 0x1ff) { printf(\"count %d is not sector aligned\\n\", count); return 0; } } buf = qemu_io_alloc(count, pattern); gettimeofday(&t1, NULL); if (pflag) { cnt = do_pwrite(buf, offset, count, &total); } else if (bflag) { cnt = do_save_vmstate(buf, offset, count, &total); } else { cnt = do_write(buf, offset, count, &total); } gettimeofday(&t2, NULL); if (cnt < 0) { printf(\"write failed: %s\\n\", strerror(-cnt)); goto out; } if (qflag) { goto out; } /* Finally, report back -- -C gives a parsable format */ t2 = tsub(t2, t1); print_report(\"wrote\", &t2, offset, count, total, cnt, Cflag); out: qemu_io_free(buf); return 0; }"} {"target": 1, "idx": 657, "func": "static int libopenjpeg_copy_packed12(AVCodecContext *avctx, const AVFrame *frame, opj_image_t *image) { int compno; int x, y; int *image_line; int frame_index; const int numcomps = image->numcomps; uint16_t *frame_ptr = (uint16_t *)frame->data[0]; for (compno = 0; compno < numcomps; ++compno) { if (image->comps[compno].w > frame->linesize[0] / numcomps) { av_log(avctx, AV_LOG_ERROR, \"Error: frame's linesize is too small for the image\\n\"); return 0; } } for (compno = 0; compno < numcomps; ++compno) { for (y = 0; y < avctx->height; ++y) { image_line = image->comps[compno].data + y * image->comps[compno].w; frame_index = y * (frame->linesize[0] / 2) + compno; for (x = 0; x < avctx->width; ++x) { image_line[x] = frame_ptr[frame_index] >> 4; frame_index += numcomps; } for (; x < image->comps[compno].w; ++x) { image_line[x] = image_line[x - 1]; } } for (; y < image->comps[compno].h; ++y) { image_line = image->comps[compno].data + y * image->comps[compno].w; for (x = 0; x < image->comps[compno].w; ++x) { image_line[x] = image_line[x - image->comps[compno].w]; } } } return 1; }"} {"target": 0, "idx": 666, "func": "static void build_feed_streams(void) { FFStream *stream, *feed; int i; /* gather all streams */ for(stream = first_stream; stream != NULL; stream = stream->next) { feed = stream->feed; if (feed) { if (!stream->is_feed) { /* we handle a stream coming from a feed */ for(i=0;inb_streams;i++) stream->feed_streams[i] = add_av_stream(feed, stream->streams[i]); } } } /* gather all streams */ for(stream = first_stream; stream != NULL; stream = stream->next) { feed = stream->feed; if (feed) { if (stream->is_feed) { for(i=0;inb_streams;i++) stream->feed_streams[i] = i; } } } /* create feed files if needed */ for(feed = first_feed; feed != NULL; feed = feed->next_feed) { int fd; if (url_exist(feed->feed_filename)) { /* See if it matches */ AVFormatContext *s; int matches = 0; if (av_open_input_file(&s, feed->feed_filename, NULL, FFM_PACKET_SIZE, NULL) >= 0) { /* Now see if it matches */ if (s->nb_streams == feed->nb_streams) { matches = 1; for(i=0;inb_streams;i++) { AVStream *sf, *ss; sf = feed->streams[i]; ss = s->streams[i]; if (sf->index != ss->index || sf->id != ss->id) { http_log(\"Index & Id do not match for stream %d (%s)\\n\", i, feed->feed_filename); matches = 0; } else { AVCodecContext *ccf, *ccs; ccf = sf->codec; ccs = ss->codec; #define CHECK_CODEC(x) (ccf->x != ccs->x) if (CHECK_CODEC(codec_id) || CHECK_CODEC(codec_type)) { http_log(\"Codecs do not match for stream %d\\n\", i); matches = 0; } else if (CHECK_CODEC(bit_rate) || CHECK_CODEC(flags)) { http_log(\"Codec bitrates do not match for stream %d\\n\", i); matches = 0; } else if (ccf->codec_type == AVMEDIA_TYPE_VIDEO) { if (CHECK_CODEC(time_base.den) || CHECK_CODEC(time_base.num) || CHECK_CODEC(width) || CHECK_CODEC(height)) { http_log(\"Codec width, height and framerate do not match for stream %d\\n\", i); matches = 0; } } else if (ccf->codec_type == AVMEDIA_TYPE_AUDIO) { if (CHECK_CODEC(sample_rate) || CHECK_CODEC(channels) || CHECK_CODEC(frame_size)) { http_log(\"Codec sample_rate, channels, frame_size do not match for stream %d\\n\", i); matches = 0; } } else { http_log(\"Unknown codec type\\n\"); matches = 0; } } if (!matches) break; } } else http_log(\"Deleting feed file '%s' as stream counts differ (%d != %d)\\n\", feed->feed_filename, s->nb_streams, feed->nb_streams); av_close_input_file(s); } else http_log(\"Deleting feed file '%s' as it appears to be corrupt\\n\", feed->feed_filename); if (!matches) { if (feed->readonly) { http_log(\"Unable to delete feed file '%s' as it is marked readonly\\n\", feed->feed_filename); exit(1); } unlink(feed->feed_filename); } } if (!url_exist(feed->feed_filename)) { AVFormatContext s1 = {0}, *s = &s1; if (feed->readonly) { http_log(\"Unable to create feed file '%s' as it is marked readonly\\n\", feed->feed_filename); exit(1); } /* only write the header of the ffm file */ if (avio_open(&s->pb, feed->feed_filename, AVIO_FLAG_WRITE) < 0) { http_log(\"Could not open output feed file '%s'\\n\", feed->feed_filename); exit(1); } s->oformat = feed->fmt; s->nb_streams = feed->nb_streams; for(i=0;inb_streams;i++) { AVStream *st; st = feed->streams[i]; s->streams[i] = st; } av_set_parameters(s, NULL); if (av_write_header(s) < 0) { http_log(\"Container doesn't supports the required parameters\\n\"); exit(1); } /* XXX: need better api */ av_freep(&s->priv_data); avio_close(s->pb); } /* get feed size and write index */ fd = open(feed->feed_filename, O_RDONLY); if (fd < 0) { http_log(\"Could not open output feed file '%s'\\n\", feed->feed_filename); exit(1); } feed->feed_write_index = FFMAX(ffm_read_write_index(fd), FFM_PACKET_SIZE); feed->feed_size = lseek(fd, 0, SEEK_END); /* ensure that we do not wrap before the end of file */ if (feed->feed_max_size && feed->feed_max_size < feed->feed_size) feed->feed_max_size = feed->feed_size; close(fd); } }"} {"target": 1, "idx": 669, "func": "static void do_interrupt64(CPUX86State *env, int intno, int is_int, int error_code, target_ulong next_eip, int is_hw) { SegmentCache *dt; target_ulong ptr; int type, dpl, selector, cpl, ist; int has_error_code, new_stack; uint32_t e1, e2, e3, ss; target_ulong old_eip, esp, offset; has_error_code = 0; if (!is_int && !is_hw) { has_error_code = exception_has_error_code(intno); } if (is_int) { old_eip = next_eip; } else { old_eip = env->eip; } dt = &env->idt; if (intno * 16 + 15 > dt->limit) { raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2); } ptr = dt->base + intno * 16; e1 = cpu_ldl_kernel(env, ptr); e2 = cpu_ldl_kernel(env, ptr + 4); e3 = cpu_ldl_kernel(env, ptr + 8); /* check gate type */ type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; switch (type) { case 14: /* 386 interrupt gate */ case 15: /* 386 trap gate */ break; default: raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2); break; } dpl = (e2 >> DESC_DPL_SHIFT) & 3; cpl = env->hflags & HF_CPL_MASK; /* check privilege if software int */ if (is_int && dpl < cpl) { raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2); } /* check valid bit */ if (!(e2 & DESC_P_MASK)) { raise_exception_err(env, EXCP0B_NOSEG, intno * 16 + 2); } selector = e1 >> 16; offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff); ist = e2 & 7; if ((selector & 0xfffc) == 0) { raise_exception_err(env, EXCP0D_GPF, 0); } if (load_segment(env, &e1, &e2, selector) != 0) { raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); } if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) { raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); } dpl = (e2 >> DESC_DPL_SHIFT) & 3; if (dpl > cpl) { raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); } if (!(e2 & DESC_P_MASK)) { raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc); } if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) { raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); } if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) { /* to inner privilege */ new_stack = 1; esp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl); ss = 0; } else if ((e2 & DESC_C_MASK) || dpl == cpl) { /* to same privilege */ if (env->eflags & VM_MASK) { raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); } new_stack = 0; esp = env->regs[R_ESP]; dpl = cpl; } else { raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); new_stack = 0; /* avoid warning */ esp = 0; /* avoid warning */ } esp &= ~0xfLL; /* align stack */ PUSHQ(esp, env->segs[R_SS].selector); PUSHQ(esp, env->regs[R_ESP]); PUSHQ(esp, cpu_compute_eflags(env)); PUSHQ(esp, env->segs[R_CS].selector); PUSHQ(esp, old_eip); if (has_error_code) { PUSHQ(esp, error_code); } /* interrupt gate clear IF mask */ if ((type & 1) == 0) { env->eflags &= ~IF_MASK; } env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK); if (new_stack) { ss = 0 | dpl; cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0); } env->regs[R_ESP] = esp; selector = (selector & ~3) | dpl; cpu_x86_load_seg_cache(env, R_CS, selector, get_seg_base(e1, e2), get_seg_limit(e1, e2), e2); env->eip = offset; }"} {"target": 1, "idx": 674, "func": "void palette8tobgr24(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette) { long i; /* writes 1 byte o much and might cause alignment issues on some architectures? for(i=0; icodeblock_mode && !(s->old_delta_quant && blockcnt_one)) { if (is_arith) b->quant += dirac_get_arith_int(c, CTX_DELTA_Q_F, CTX_DELTA_Q_DATA); else b->quant += dirac_get_se_golomb(gb); } b->quant = FFMIN(b->quant, MAX_QUANT); qfactor = qscale_tab[b->quant]; /* TODO: context pointer? */ if (!s->num_refs) qoffset = qoffset_intra_tab[b->quant]; else qoffset = qoffset_inter_tab[b->quant]; buf = b->ibuf + top * b->stride; for (y = top; y < bottom; y++) { for (x = left; x < right; x++) { /* [DIRAC_STD] 13.4.4 Subband coefficients. coeff_unpack() */ if (is_arith) coeff_unpack_arith(c, qfactor, qoffset, b, buf+x, x, y); else buf[x] = coeff_unpack_golomb(gb, qfactor, qoffset); } buf += b->stride; } }"} {"target": 1, "idx": 683, "func": "static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){ FFV1Context *f = avctx->priv_data; CABACContext * const c= &f->c; AVFrame *pict = data; const int width= f->width; const int height= f->height; AVFrame * const p= &f->picture; int used_count= 0; if(avctx->strict_std_compliance >= 0){ av_log(avctx, AV_LOG_ERROR, \"this codec is under development, files encoded with it wont be decodeable with future versions!!!\\n\" \"use vstrict=-1 to use it anyway\\n\"); return -1; } ff_init_cabac_encoder(c, buf, buf_size); ff_init_cabac_states(c, ff_h264_lps_range, ff_h264_mps_state, ff_h264_lps_state, 64); c->lps_state[2] = 1; c->lps_state[3] = 0; *p = *pict; p->pict_type= FF_I_TYPE; if(avctx->gop_size==0 || f->picture_number % avctx->gop_size == 0){ put_cabac_bypass(c, 1); p->key_frame= 1; write_header(f); clear_state(f); }else{ put_cabac_bypass(c, 0); p->key_frame= 0; } if(!f->ac){ used_count += put_cabac_terminate(c, 1); //printf(\"pos=%d\\n\", used_count); init_put_bits(&f->pb, buf + used_count, buf_size - used_count); } if(f->colorspace==0){ const int chroma_width = -((-width )>>f->chroma_h_shift); const int chroma_height= -((-height)>>f->chroma_v_shift); encode_plane(f, p->data[0], width, height, p->linesize[0], 0); encode_plane(f, p->data[1], chroma_width, chroma_height, p->linesize[1], 1); encode_plane(f, p->data[2], chroma_width, chroma_height, p->linesize[2], 1); }else{ encode_rgb_frame(f, (uint32_t*)(p->data[0]), width, height, p->linesize[0]/4); } emms_c(); f->picture_number++; if(f->ac){ return put_cabac_terminate(c, 1); }else{ flush_put_bits(&f->pb); //nicer padding FIXME return used_count + (put_bits_count(&f->pb)+7)/8; } }"} {"target": 1, "idx": 686, "func": "void do_POWER_maskg (void) { uint32_t ret; if ((uint32_t)T0 == (uint32_t)(T1 + 1)) { ret = -1; } else { ret = (((uint32_t)(-1)) >> ((uint32_t)T0)) ^ (((uint32_t)(-1) >> ((uint32_t)T1)) >> 1); if ((uint32_t)T0 > (uint32_t)T1) ret = ~ret; } T0 = ret; }"} {"target": 1, "idx": 707, "func": "static void qed_aio_read_data(void *opaque, int ret, uint64_t offset, size_t len) { QEDAIOCB *acb = opaque; BDRVQEDState *s = acb_to_s(acb); BlockDriverState *bs = acb->common.bs; /* Adjust offset into cluster */ offset += qed_offset_into_cluster(s, acb->cur_pos); trace_qed_aio_read_data(s, acb, ret, offset, len); if (ret < 0) { goto err; } qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len); /* Handle zero cluster and backing file reads */ if (ret == QED_CLUSTER_ZERO) { qemu_iovec_memset(&acb->cur_qiov, 0, 0, acb->cur_qiov.size); qed_aio_next_io(acb, 0); return; } else if (ret != QED_CLUSTER_FOUND) { qed_read_backing_file(s, acb->cur_pos, &acb->cur_qiov, qed_aio_next_io, acb); return; } BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO); bdrv_aio_readv(bs->file, offset / BDRV_SECTOR_SIZE, &acb->cur_qiov, acb->cur_qiov.size / BDRV_SECTOR_SIZE, qed_aio_next_io, acb); return; err: qed_aio_complete(acb, ret); }"} {"target": 1, "idx": 710, "func": "int qcow2_update_header(BlockDriverState *bs) { BDRVQcowState *s = bs->opaque; QCowHeader *header; char *buf; size_t buflen = s->cluster_size; int ret; uint64_t total_size; uint32_t refcount_table_clusters; size_t header_length; Qcow2UnknownHeaderExtension *uext; buf = qemu_blockalign(bs, buflen); /* Header structure */ header = (QCowHeader*) buf; if (buflen < sizeof(*header)) { ret = -ENOSPC; goto fail; } header_length = sizeof(*header) + s->unknown_header_fields_size; total_size = bs->total_sectors * BDRV_SECTOR_SIZE; refcount_table_clusters = s->refcount_table_size >> (s->cluster_bits - 3); *header = (QCowHeader) { /* Version 2 fields */ .magic = cpu_to_be32(QCOW_MAGIC), .version = cpu_to_be32(s->qcow_version), .backing_file_offset = 0, .backing_file_size = 0, .cluster_bits = cpu_to_be32(s->cluster_bits), .size = cpu_to_be64(total_size), .crypt_method = cpu_to_be32(s->crypt_method_header), .l1_size = cpu_to_be32(s->l1_size), .l1_table_offset = cpu_to_be64(s->l1_table_offset), .refcount_table_offset = cpu_to_be64(s->refcount_table_offset), .refcount_table_clusters = cpu_to_be32(refcount_table_clusters), .nb_snapshots = cpu_to_be32(s->nb_snapshots), .snapshots_offset = cpu_to_be64(s->snapshots_offset), /* Version 3 fields */ .incompatible_features = cpu_to_be64(s->incompatible_features), .compatible_features = cpu_to_be64(s->compatible_features), .autoclear_features = cpu_to_be64(s->autoclear_features), .refcount_order = cpu_to_be32(3 + REFCOUNT_SHIFT), .header_length = cpu_to_be32(header_length), }; /* For older versions, write a shorter header */ switch (s->qcow_version) { case 2: ret = offsetof(QCowHeader, incompatible_features); break; case 3: ret = sizeof(*header); break; default: return -EINVAL; } buf += ret; buflen -= ret; memset(buf, 0, buflen); /* Preserve any unknown field in the header */ if (s->unknown_header_fields_size) { if (buflen < s->unknown_header_fields_size) { ret = -ENOSPC; goto fail; } memcpy(buf, s->unknown_header_fields, s->unknown_header_fields_size); buf += s->unknown_header_fields_size; buflen -= s->unknown_header_fields_size; } /* Backing file format header extension */ if (*bs->backing_format) { ret = header_ext_add(buf, QCOW2_EXT_MAGIC_BACKING_FORMAT, bs->backing_format, strlen(bs->backing_format), buflen); if (ret < 0) { goto fail; } buf += ret; buflen -= ret; } /* Feature table */ Qcow2Feature features[] = { /* no feature defined yet */ }; ret = header_ext_add(buf, QCOW2_EXT_MAGIC_FEATURE_TABLE, features, sizeof(features), buflen); if (ret < 0) { goto fail; } buf += ret; buflen -= ret; /* Keep unknown header extensions */ QLIST_FOREACH(uext, &s->unknown_header_ext, next) { ret = header_ext_add(buf, uext->magic, uext->data, uext->len, buflen); if (ret < 0) { goto fail; } buf += ret; buflen -= ret; } /* End of header extensions */ ret = header_ext_add(buf, QCOW2_EXT_MAGIC_END, NULL, 0, buflen); if (ret < 0) { goto fail; } buf += ret; buflen -= ret; /* Backing file name */ if (*bs->backing_file) { size_t backing_file_len = strlen(bs->backing_file); if (buflen < backing_file_len) { ret = -ENOSPC; goto fail; } strncpy(buf, bs->backing_file, buflen); header->backing_file_offset = cpu_to_be64(buf - ((char*) header)); header->backing_file_size = cpu_to_be32(backing_file_len); } /* Write the new header */ ret = bdrv_pwrite(bs->file, 0, header, s->cluster_size); if (ret < 0) { goto fail; } ret = 0; fail: qemu_vfree(header); return ret; }"} {"target": 1, "idx": 729, "func": "void tcp_start_incoming_migration(const char *host_port, Error **errp) { int s; s = inet_listen(host_port, NULL, 256, SOCK_STREAM, 0, errp); if (s < 0) { return; } qemu_set_fd_handler2(s, NULL, tcp_accept_incoming_migration, NULL, (void *)(intptr_t)s); }"} {"target": 1, "idx": 740, "func": "static int spapr_populate_pci_child_dt(PCIDevice *dev, void *fdt, int offset, sPAPRPHBState *sphb) { ResourceProps rp; bool is_bridge = false; int pci_status, err; char *buf = NULL; uint32_t drc_index = spapr_phb_get_pci_drc_index(sphb, dev); uint32_t ccode = pci_default_read_config(dev, PCI_CLASS_PROG, 3); uint32_t max_msi, max_msix; if (pci_default_read_config(dev, PCI_HEADER_TYPE, 1) == PCI_HEADER_TYPE_BRIDGE) { is_bridge = true; } /* in accordance with PAPR+ v2.7 13.6.3, Table 181 */ _FDT(fdt_setprop_cell(fdt, offset, \"vendor-id\", pci_default_read_config(dev, PCI_VENDOR_ID, 2))); _FDT(fdt_setprop_cell(fdt, offset, \"device-id\", pci_default_read_config(dev, PCI_DEVICE_ID, 2))); _FDT(fdt_setprop_cell(fdt, offset, \"revision-id\", pci_default_read_config(dev, PCI_REVISION_ID, 1))); _FDT(fdt_setprop_cell(fdt, offset, \"class-code\", ccode)); if (pci_default_read_config(dev, PCI_INTERRUPT_PIN, 1)) { _FDT(fdt_setprop_cell(fdt, offset, \"interrupts\", pci_default_read_config(dev, PCI_INTERRUPT_PIN, 1))); } if (!is_bridge) { _FDT(fdt_setprop_cell(fdt, offset, \"min-grant\", pci_default_read_config(dev, PCI_MIN_GNT, 1))); _FDT(fdt_setprop_cell(fdt, offset, \"max-latency\", pci_default_read_config(dev, PCI_MAX_LAT, 1))); } if (pci_default_read_config(dev, PCI_SUBSYSTEM_ID, 2)) { _FDT(fdt_setprop_cell(fdt, offset, \"subsystem-id\", pci_default_read_config(dev, PCI_SUBSYSTEM_ID, 2))); } if (pci_default_read_config(dev, PCI_SUBSYSTEM_VENDOR_ID, 2)) { _FDT(fdt_setprop_cell(fdt, offset, \"subsystem-vendor-id\", pci_default_read_config(dev, PCI_SUBSYSTEM_VENDOR_ID, 2))); } _FDT(fdt_setprop_cell(fdt, offset, \"cache-line-size\", pci_default_read_config(dev, PCI_CACHE_LINE_SIZE, 1))); /* the following fdt cells are masked off the pci status register */ pci_status = pci_default_read_config(dev, PCI_STATUS, 2); _FDT(fdt_setprop_cell(fdt, offset, \"devsel-speed\", PCI_STATUS_DEVSEL_MASK & pci_status)); if (pci_status & PCI_STATUS_FAST_BACK) { _FDT(fdt_setprop(fdt, offset, \"fast-back-to-back\", NULL, 0)); } if (pci_status & PCI_STATUS_66MHZ) { _FDT(fdt_setprop(fdt, offset, \"66mhz-capable\", NULL, 0)); } if (pci_status & PCI_STATUS_UDF) { _FDT(fdt_setprop(fdt, offset, \"udf-supported\", NULL, 0)); } _FDT(fdt_setprop_string(fdt, offset, \"name\", pci_find_device_name((ccode >> 16) & 0xff, (ccode >> 8) & 0xff, ccode & 0xff))); buf = spapr_phb_get_loc_code(sphb, dev); if (!buf) { error_report(\"Failed setting the ibm,loc-code\"); return -1; } err = fdt_setprop_string(fdt, offset, \"ibm,loc-code\", buf); g_free(buf); if (err < 0) { return err; } if (drc_index) { _FDT(fdt_setprop_cell(fdt, offset, \"ibm,my-drc-index\", drc_index)); } _FDT(fdt_setprop_cell(fdt, offset, \"#address-cells\", RESOURCE_CELLS_ADDRESS)); _FDT(fdt_setprop_cell(fdt, offset, \"#size-cells\", RESOURCE_CELLS_SIZE)); max_msi = msi_nr_vectors_allocated(dev); if (max_msi) { _FDT(fdt_setprop_cell(fdt, offset, \"ibm,req#msi\", max_msi)); } max_msix = dev->msix_entries_nr; if (max_msix) { _FDT(fdt_setprop_cell(fdt, offset, \"ibm,req#msi-x\", max_msix)); } populate_resource_props(dev, &rp); _FDT(fdt_setprop(fdt, offset, \"reg\", (uint8_t *)rp.reg, rp.reg_len)); _FDT(fdt_setprop(fdt, offset, \"assigned-addresses\", (uint8_t *)rp.assigned, rp.assigned_len)); if (sphb->pcie_ecs && pci_is_express(dev)) { _FDT(fdt_setprop_cell(fdt, offset, \"ibm,pci-config-space-type\", 0x1)); } return 0; }"} {"target": 1, "idx": 748, "func": "static int read_major_sync(MLPDecodeContext *m, GetBitContext *gb) { MLPHeaderInfo mh; int substr, ret; if ((ret = ff_mlp_read_major_sync(m->avctx, &mh, gb)) != 0) return ret; if (mh.group1_bits == 0) { av_log(m->avctx, AV_LOG_ERROR, \"invalid/unknown bits per sample\\n\"); return AVERROR_INVALIDDATA; if (mh.group2_bits > mh.group1_bits) { av_log(m->avctx, AV_LOG_ERROR, \"Channel group 2 cannot have more bits per sample than group 1.\\n\"); return AVERROR_INVALIDDATA; if (mh.group2_samplerate && mh.group2_samplerate != mh.group1_samplerate) { av_log(m->avctx, AV_LOG_ERROR, \"Channel groups with differing sample rates are not currently supported.\\n\"); return AVERROR_INVALIDDATA; if (mh.group1_samplerate == 0) { av_log(m->avctx, AV_LOG_ERROR, \"invalid/unknown sampling rate\\n\"); return AVERROR_INVALIDDATA; if (mh.group1_samplerate > MAX_SAMPLERATE) { av_log(m->avctx, AV_LOG_ERROR, \"Sampling rate %d is greater than the supported maximum (%d).\\n\", mh.group1_samplerate, MAX_SAMPLERATE); return AVERROR_INVALIDDATA; if (mh.access_unit_size > MAX_BLOCKSIZE) { av_log(m->avctx, AV_LOG_ERROR, \"Block size %d is greater than the supported maximum (%d).\\n\", mh.access_unit_size, MAX_BLOCKSIZE); return AVERROR_INVALIDDATA; if (mh.access_unit_size_pow2 > MAX_BLOCKSIZE_POW2) { av_log(m->avctx, AV_LOG_ERROR, \"Block size pow2 %d is greater than the supported maximum (%d).\\n\", mh.access_unit_size_pow2, MAX_BLOCKSIZE_POW2); return AVERROR_INVALIDDATA; if (mh.num_substreams == 0) return AVERROR_INVALIDDATA; if (m->avctx->codec_id == AV_CODEC_ID_MLP && mh.num_substreams > 2) { av_log(m->avctx, AV_LOG_ERROR, \"MLP only supports up to 2 substreams.\\n\"); return AVERROR_INVALIDDATA; if (mh.num_substreams > MAX_SUBSTREAMS) { \"%d substreams (more than the \" \"maximum supported by the decoder)\", mh.num_substreams); m->access_unit_size = mh.access_unit_size; m->access_unit_size_pow2 = mh.access_unit_size_pow2; m->num_substreams = mh.num_substreams; m->max_decoded_substream = m->num_substreams - 1; m->avctx->sample_rate = mh.group1_samplerate; m->avctx->frame_size = mh.access_unit_size; m->avctx->bits_per_raw_sample = mh.group1_bits; if (mh.group1_bits > 16) m->avctx->sample_fmt = AV_SAMPLE_FMT_S32; else m->avctx->sample_fmt = AV_SAMPLE_FMT_S16; m->params_valid = 1; for (substr = 0; substr < MAX_SUBSTREAMS; substr++) m->substream[substr].restart_seen = 0; /* Set the layout for each substream. When there's more than one, the first * substream is Stereo. Subsequent substreams' layouts are indicated in the * major sync. */ if (m->avctx->codec_id == AV_CODEC_ID_MLP) { if ((substr = (mh.num_substreams > 1))) m->substream[0].ch_layout = AV_CH_LAYOUT_STEREO; m->substream[substr].ch_layout = mh.channel_layout_mlp; } else { if ((substr = (mh.num_substreams > 1))) m->substream[0].ch_layout = AV_CH_LAYOUT_STEREO; if (mh.num_substreams > 2) if (mh.channel_layout_thd_stream2) m->substream[2].ch_layout = mh.channel_layout_thd_stream2; else m->substream[2].ch_layout = mh.channel_layout_thd_stream1; m->substream[substr].ch_layout = mh.channel_layout_thd_stream1; if (m->avctx->channels<=2 && m->substream[substr].ch_layout == AV_CH_LAYOUT_MONO && m->max_decoded_substream == 1) { av_log(m->avctx, AV_LOG_DEBUG, \"Mono stream with 2 substreams, ignoring 2nd\\n\"); m->max_decoded_substream = 0; if (m->avctx->channels==2) m->avctx->channel_layout = AV_CH_LAYOUT_STEREO; m->needs_reordering = mh.channel_arrangement >= 18 && mh.channel_arrangement <= 20; return 0;"} {"target": 1, "idx": 767, "func": "static AVStream * init_stream(AVFormatContext *s) { BinDemuxContext *bin = s->priv_data; AVStream *st = avformat_new_stream(s, NULL); if (!st) return NULL; st->codec->codec_tag = 0; st->codec->codec_type = AVMEDIA_TYPE_VIDEO; if (!bin->width) { st->codec->width = (80<<3); st->codec->height = (25<<4); } avpriv_set_pts_info(st, 60, bin->framerate.den, bin->framerate.num); /* simulate tty display speed */ bin->chars_per_frame = FFMAX(av_q2d(st->time_base) * bin->chars_per_frame, 1); return st; }"} {"target": 1, "idx": 781, "func": "static coroutine_fn int sd_co_writev(BlockDriverState *bs, int64_t sector_num, int nb_sectors, QEMUIOVector *qiov) { SheepdogAIOCB *acb; int ret; if (bs->growable && sector_num + nb_sectors > bs->total_sectors) { ret = sd_truncate(bs, (sector_num + nb_sectors) * BDRV_SECTOR_SIZE); if (ret < 0) { return ret; } bs->total_sectors = sector_num + nb_sectors; } acb = sd_aio_setup(bs, qiov, sector_num, nb_sectors); acb->aio_done_func = sd_write_done; acb->aiocb_type = AIOCB_WRITE_UDATA; ret = sd_co_rw_vector(acb); if (ret <= 0) { qemu_aio_release(acb); return ret; } qemu_coroutine_yield(); return acb->ret; }"} {"target": 0, "idx": 794, "func": "static void sdl_send_mouse_event(int dx, int dy, int x, int y, int state) { static uint32_t bmap[INPUT_BUTTON__MAX] = { [INPUT_BUTTON_LEFT] = SDL_BUTTON(SDL_BUTTON_LEFT), [INPUT_BUTTON_MIDDLE] = SDL_BUTTON(SDL_BUTTON_MIDDLE), [INPUT_BUTTON_RIGHT] = SDL_BUTTON(SDL_BUTTON_RIGHT), [INPUT_BUTTON_WHEEL_UP] = SDL_BUTTON(SDL_BUTTON_WHEELUP), [INPUT_BUTTON_WHEEL_DOWN] = SDL_BUTTON(SDL_BUTTON_WHEELDOWN), }; static uint32_t prev_state; if (prev_state != state) { qemu_input_update_buttons(dcl->con, bmap, prev_state, state); prev_state = state; } if (qemu_input_is_absolute()) { qemu_input_queue_abs(dcl->con, INPUT_AXIS_X, x, real_screen->w); qemu_input_queue_abs(dcl->con, INPUT_AXIS_Y, y, real_screen->h); } else { if (guest_cursor) { x -= guest_x; y -= guest_y; guest_x += x; guest_y += y; dx = x; dy = y; } qemu_input_queue_rel(dcl->con, INPUT_AXIS_X, dx); qemu_input_queue_rel(dcl->con, INPUT_AXIS_Y, dy); } qemu_input_event_sync(); }"} {"target": 0, "idx": 797, "func": "static void device_initfn(Object *obj) { DeviceState *dev = DEVICE(obj); Property *prop; if (qdev_hotplug) { dev->hotplugged = 1; qdev_hot_added = true; } dev->instance_id_alias = -1; dev->state = DEV_STATE_CREATED; qdev_prop_set_defaults(dev, qdev_get_props(dev)); for (prop = qdev_get_props(dev); prop && prop->name; prop++) { qdev_property_add_legacy(dev, prop, NULL); qdev_property_add_static(dev, prop, NULL); } object_property_add_str(OBJECT(dev), \"type\", qdev_get_type, NULL, NULL); }"} {"target": 1, "idx": 837, "func": "static void scsi_write_data(SCSIRequest *req) { SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); uint32_t n; /* No data transfer may already be in progress */ assert(r->req.aiocb == NULL); /* The request is used as the AIO opaque value, so add a ref. */ scsi_req_ref(&r->req); if (r->req.cmd.mode != SCSI_XFER_TO_DEV) { DPRINTF(\"Data transfer direction invalid\\n\"); scsi_write_complete(r, -EINVAL); return; } if (!r->req.sg && !r->qiov.size) { /* Called for the first time. Ask the driver to send us more data. */ r->started = true; scsi_write_complete(r, 0); return; } if (s->tray_open) { scsi_write_complete(r, -ENOMEDIUM); return; } if (r->req.cmd.buf[0] == VERIFY_10 || r->req.cmd.buf[0] == VERIFY_12 || r->req.cmd.buf[0] == VERIFY_16) { if (r->req.sg) { scsi_dma_complete(r, 0); } else { scsi_write_complete(r, 0); } return; } if (r->req.sg) { dma_acct_start(s->qdev.conf.bs, &r->acct, r->req.sg, BDRV_ACCT_WRITE); r->req.resid -= r->req.sg->size; r->req.aiocb = dma_bdrv_write(s->qdev.conf.bs, r->req.sg, r->sector, scsi_dma_complete, r); } else { n = r->qiov.size / 512; bdrv_acct_start(s->qdev.conf.bs, &r->acct, n * BDRV_SECTOR_SIZE, BDRV_ACCT_WRITE); r->req.aiocb = bdrv_aio_writev(s->qdev.conf.bs, r->sector, &r->qiov, n, scsi_write_complete, r); } }"} {"target": 1, "idx": 848, "func": "static void idiv64(uint64_t *plow, uint64_t *phigh, int64_t b) { int sa, sb; sa = ((int64_t)*phigh < 0); if (sa) neg128(plow, phigh); sb = (b < 0); if (sb) b = -b; div64(plow, phigh, b); if (sa ^ sb) *plow = - *plow; if (sa) *phigh = - *phigh; }"} {"target": 1, "idx": 855, "func": "void bios_linker_loader_alloc(GArray *linker, const char *file, uint32_t alloc_align, bool alloc_fseg) { BiosLinkerLoaderEntry entry; assert(!(alloc_align & (alloc_align - 1))); memset(&entry, 0, sizeof entry); strncpy(entry.alloc.file, file, sizeof entry.alloc.file - 1); entry.command = cpu_to_le32(BIOS_LINKER_LOADER_COMMAND_ALLOCATE); entry.alloc.align = cpu_to_le32(alloc_align); entry.alloc.zone = cpu_to_le32(alloc_fseg ? BIOS_LINKER_LOADER_ALLOC_ZONE_FSEG : BIOS_LINKER_LOADER_ALLOC_ZONE_HIGH); /* Alloc entries must come first, so prepend them */ g_array_prepend_vals(linker, &entry, sizeof entry); }"} {"target": 1, "idx": 860, "func": "static void iscsi_refresh_limits(BlockDriverState *bs, Error **errp) { /* We don't actually refresh here, but just return data queried in * iscsi_open(): iscsi targets don't change their limits. */ IscsiLun *iscsilun = bs->opaque; uint64_t max_xfer_len = iscsilun->use_16_for_rw ? 0xffffffff : 0xffff; bs->bl.request_alignment = iscsilun->block_size; if (iscsilun->bl.max_xfer_len) { max_xfer_len = MIN(max_xfer_len, iscsilun->bl.max_xfer_len); } if (max_xfer_len * iscsilun->block_size < INT_MAX) { bs->bl.max_transfer = max_xfer_len * iscsilun->block_size; } if (iscsilun->lbp.lbpu) { if (iscsilun->bl.max_unmap < 0xffffffff / iscsilun->block_size) { bs->bl.max_pdiscard = iscsilun->bl.max_unmap * iscsilun->block_size; } bs->bl.pdiscard_alignment = iscsilun->bl.opt_unmap_gran * iscsilun->block_size; } else { bs->bl.pdiscard_alignment = iscsilun->block_size; } if (iscsilun->bl.max_ws_len < 0xffffffff / iscsilun->block_size) { bs->bl.max_pwrite_zeroes = iscsilun->bl.max_ws_len * iscsilun->block_size; } if (iscsilun->lbp.lbpws) { bs->bl.pwrite_zeroes_alignment = iscsilun->bl.opt_unmap_gran * iscsilun->block_size; } else { bs->bl.pwrite_zeroes_alignment = iscsilun->block_size; } if (iscsilun->bl.opt_xfer_len && iscsilun->bl.opt_xfer_len < INT_MAX / iscsilun->block_size) { bs->bl.opt_transfer = pow2floor(iscsilun->bl.opt_xfer_len * iscsilun->block_size); } }"} {"target": 0, "idx": 883, "func": "static void tap_receive(void *opaque, const uint8_t *buf, size_t size) { TAPState *s = opaque; int ret; for(;;) { ret = write(s->fd, buf, size); if (ret < 0 && (errno == EINTR || errno == EAGAIN)) { } else { break; } } }"} {"target": 0, "idx": 886, "func": "static void scsi_disk_realize(SCSIDevice *dev, Error **errp) { DriveInfo *dinfo; Error *local_err = NULL; if (!dev->conf.bs) { scsi_realize(dev, &local_err); assert(local_err); error_propagate(errp, local_err); return; } dinfo = drive_get_by_blockdev(dev->conf.bs); if (dinfo->media_cd) { scsi_cd_realize(dev, errp); } else { scsi_hd_realize(dev, errp); } }"} {"target": 0, "idx": 899, "func": "static int sd_snapshot_create(BlockDriverState *bs, QEMUSnapshotInfo *sn_info) { BDRVSheepdogState *s = bs->opaque; int ret, fd; uint32_t new_vid; SheepdogInode *inode; unsigned int datalen; dprintf(\"sn_info: name %s id_str %s s: name %s vm_state_size %\" PRId64 \" \" \"is_snapshot %d\\n\", sn_info->name, sn_info->id_str, s->name, sn_info->vm_state_size, s->is_snapshot); if (s->is_snapshot) { error_report(\"You can't create a snapshot of a snapshot VDI, \" \"%s (%\" PRIu32 \").\", s->name, s->inode.vdi_id); return -EINVAL; } dprintf(\"%s %s\\n\", sn_info->name, sn_info->id_str); s->inode.vm_state_size = sn_info->vm_state_size; s->inode.vm_clock_nsec = sn_info->vm_clock_nsec; /* It appears that inode.tag does not require a NUL terminator, * which means this use of strncpy is ok. */ strncpy(s->inode.tag, sn_info->name, sizeof(s->inode.tag)); /* we don't need to update entire object */ datalen = SD_INODE_SIZE - sizeof(s->inode.data_vdi_id); /* refresh inode. */ fd = connect_to_sdog(s->addr, s->port); if (fd < 0) { ret = fd; goto cleanup; } ret = write_object(fd, (char *)&s->inode, vid_to_vdi_oid(s->inode.vdi_id), s->inode.nr_copies, datalen, 0, false, s->cache_enabled); if (ret < 0) { error_report(\"failed to write snapshot's inode.\"); goto cleanup; } ret = do_sd_create(s->name, s->inode.vdi_size, s->inode.vdi_id, &new_vid, 1, s->addr, s->port); if (ret < 0) { error_report(\"failed to create inode for snapshot. %s\", strerror(errno)); goto cleanup; } inode = (SheepdogInode *)g_malloc(datalen); ret = read_object(fd, (char *)inode, vid_to_vdi_oid(new_vid), s->inode.nr_copies, datalen, 0, s->cache_enabled); if (ret < 0) { error_report(\"failed to read new inode info. %s\", strerror(errno)); goto cleanup; } memcpy(&s->inode, inode, datalen); dprintf(\"s->inode: name %s snap_id %x oid %x\\n\", s->inode.name, s->inode.snap_id, s->inode.vdi_id); cleanup: closesocket(fd); return ret; }"} {"target": 0, "idx": 906, "func": "static int vnc_display_listen(VncDisplay *vd, SocketAddress **saddr, size_t nsaddr, SocketAddress **wsaddr, size_t nwsaddr, Error **errp) { size_t i; for (i = 0; i < nsaddr; i++) { if (vnc_display_listen_addr(vd, saddr[i], \"vnc-listen\", &vd->lsock, &vd->lsock_tag, &vd->nlsock, errp) < 0) { return -1; } } for (i = 0; i < nwsaddr; i++) { if (vnc_display_listen_addr(vd, wsaddr[i], \"vnc-ws-listen\", &vd->lwebsock, &vd->lwebsock_tag, &vd->nlwebsock, errp) < 0) { return -1; } } return 0; }"} {"target": 0, "idx": 913, "func": "static char *enumerate_cpus(unsigned long *cpus, int max_cpus) { int cpu; bool first = true; GString *s = g_string_new(NULL); for (cpu = find_first_bit(cpus, max_cpus); cpu < max_cpus; cpu = find_next_bit(cpus, max_cpus, cpu + 1)) { g_string_append_printf(s, \"%s%d\", first ? \"\" : \" \", cpu); first = false; } return g_string_free(s, FALSE); }"} {"target": 1, "idx": 921, "func": "MigrationState *exec_start_outgoing_migration(const char *command, int64_t bandwidth_limit, int async) { FdMigrationState *s; FILE *f; s = qemu_mallocz(sizeof(*s)); if (s == NULL) { dprintf(\"Unable to allocate FdMigrationState\\n\"); goto err; } f = popen(command, \"w\"); if (f == NULL) { dprintf(\"Unable to popen exec target\\n\"); goto err_after_alloc; } s->fd = fileno(f); if (s->fd == -1) { dprintf(\"Unable to retrieve file descriptor for popen'd handle\\n\"); goto err_after_open; } if (fcntl(s->fd, F_SETFD, O_NONBLOCK) == -1) { dprintf(\"Unable to set nonblocking mode on file descriptor\\n\"); goto err_after_open; } s->opaque = qemu_popen(f, \"w\"); s->get_error = file_errno; s->write = file_write; s->mig_state.cancel = migrate_fd_cancel; s->mig_state.get_status = migrate_fd_get_status; s->mig_state.release = migrate_fd_release; s->state = MIG_STATE_ACTIVE; s->detach = !async; s->bandwidth_limit = bandwidth_limit; if (s->detach == 1) { dprintf(\"detaching from monitor\\n\"); monitor_suspend(); s->detach = 2; } migrate_fd_connect(s); return &s->mig_state; err_after_open: pclose(f); err_after_alloc: qemu_free(s); err: return NULL; }"} {"target": 1, "idx": 924, "func": "static void register_core_rtas(void) { spapr_rtas_register(\"display-character\", rtas_display_character); spapr_rtas_register(\"get-time-of-day\", rtas_get_time_of_day); spapr_rtas_register(\"power-off\", rtas_power_off); }"} {"target": 1, "idx": 937, "func": "static target_ulong h_put_tce(CPUPPCState *env, sPAPREnvironment *spapr, target_ulong opcode, target_ulong *args) { target_ulong liobn = args[0]; target_ulong ioba = args[1]; target_ulong tce = args[2]; VIOsPAPRDevice *dev = spapr_vio_find_by_reg(spapr->vio_bus, liobn); VIOsPAPR_RTCE *rtce; if (!dev) { hcall_dprintf(\"LIOBN 0x\" TARGET_FMT_lx \" does not exist\\n\", liobn); return H_PARAMETER; } ioba &= ~(SPAPR_VIO_TCE_PAGE_SIZE - 1); #ifdef DEBUG_TCE fprintf(stderr, \"spapr_vio_put_tce on %s ioba 0x\" TARGET_FMT_lx \" TCE 0x\" TARGET_FMT_lx \"\\n\", dev->qdev.id, ioba, tce); #endif if (ioba >= dev->rtce_window_size) { hcall_dprintf(\"Out-of-bounds IOBA 0x\" TARGET_FMT_lx \"\\n\", ioba); return H_PARAMETER; } rtce = dev->rtce_table + (ioba >> SPAPR_VIO_TCE_PAGE_SHIFT); rtce->tce = tce; return H_SUCCESS; }"} {"target": 1, "idx": 941, "func": "static void nvme_get_bootindex(Object *obj, Visitor *v, void *opaque, const char *name, Error **errp) { NvmeCtrl *s = NVME(obj); visit_type_int32(v, &s->conf.bootindex, name, errp); }"} {"target": 1, "idx": 944, "func": "ff_rm_parse_packet (AVFormatContext *s, AVIOContext *pb, AVStream *st, RMStream *ast, int len, AVPacket *pkt, int *seq, int flags, int64_t timestamp) { RMDemuxContext *rm = s->priv_data; int ret; if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) { rm->current_stream= st->id; ret = rm_assemble_video_frame(s, pb, rm, ast, pkt, len, seq, ×tamp); if(ret) return ret < 0 ? ret : -1; //got partial frame or error } else if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) { if ((ast->deint_id == DEINT_ID_GENR) || (ast->deint_id == DEINT_ID_INT4) || (ast->deint_id == DEINT_ID_SIPR)) { int x; int sps = ast->sub_packet_size; int cfs = ast->coded_framesize; int h = ast->sub_packet_h; int y = ast->sub_packet_cnt; int w = ast->audio_framesize; if (flags & 2) y = ast->sub_packet_cnt = 0; if (!y) ast->audiotimestamp = timestamp; switch (ast->deint_id) { case DEINT_ID_INT4: for (x = 0; x < h/2; x++) avio_read(pb, ast->pkt.data+x*2*w+y*cfs, cfs); break; case DEINT_ID_GENR: for (x = 0; x < w/sps; x++) avio_read(pb, ast->pkt.data+sps*(h*x+((h+1)/2)*(y&1)+(y>>1)), sps); break; case DEINT_ID_SIPR: avio_read(pb, ast->pkt.data + y * w, w); break; } if (++(ast->sub_packet_cnt) < h) return -1; if (ast->deint_id == DEINT_ID_SIPR) ff_rm_reorder_sipr_data(ast->pkt.data, h, w); ast->sub_packet_cnt = 0; rm->audio_stream_num = st->index; rm->audio_pkt_cnt = h * w / st->codec->block_align; } else if ((ast->deint_id == DEINT_ID_VBRF) || (ast->deint_id == DEINT_ID_VBRS)) { int x; rm->audio_stream_num = st->index; ast->sub_packet_cnt = (avio_rb16(pb) & 0xf0) >> 4; if (ast->sub_packet_cnt) { for (x = 0; x < ast->sub_packet_cnt; x++) ast->sub_packet_lengths[x] = avio_rb16(pb); rm->audio_pkt_cnt = ast->sub_packet_cnt; ast->audiotimestamp = timestamp; } else return -1; } else { av_get_packet(pb, pkt, len); rm_ac3_swap_bytes(st, pkt); } } else av_get_packet(pb, pkt, len); pkt->stream_index = st->index; #if 0 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) { if(st->codec->codec_id == AV_CODEC_ID_RV20){ int seq= 128*(pkt->data[2]&0x7F) + (pkt->data[3]>>1); av_log(s, AV_LOG_DEBUG, \"%d %\"PRId64\" %d\\n\", *timestamp, *timestamp*512LL/25, seq); seq |= (timestamp&~0x3FFF); if(seq - timestamp > 0x2000) seq -= 0x4000; if(seq - timestamp < -0x2000) seq += 0x4000; } } #endif pkt->pts = timestamp; if (flags & 2) pkt->flags |= AV_PKT_FLAG_KEY; return st->codec->codec_type == AVMEDIA_TYPE_AUDIO ? rm->audio_pkt_cnt : 0; }"} {"target": 1, "idx": 946, "func": "Object *object_resolve_path_component(Object *parent, const gchar *part) { ObjectProperty *prop = object_property_find(parent, part, NULL); if (prop == NULL) { return NULL; } if (object_property_is_link(prop)) { return *(Object **)prop->opaque; } else if (object_property_is_child(prop)) { return prop->opaque; } else { return NULL; } }"} {"target": 1, "idx": 961, "func": "static void vscsi_process_login(VSCSIState *s, vscsi_req *req) { union viosrp_iu *iu = &req->iu; struct srp_login_rsp *rsp = &iu->srp.login_rsp; uint64_t tag = iu->srp.rsp.tag; trace_spapr_vscsi__process_login(); /* TODO handle case that requested size is wrong and * buffer format is wrong */ memset(iu, 0, sizeof(struct srp_login_rsp)); rsp->opcode = SRP_LOGIN_RSP; /* Don't advertise quite as many request as we support to * keep room for management stuff etc... */ rsp->req_lim_delta = cpu_to_be32(VSCSI_REQ_LIMIT-2); rsp->tag = tag; rsp->max_it_iu_len = cpu_to_be32(sizeof(union srp_iu)); rsp->max_ti_iu_len = cpu_to_be32(sizeof(union srp_iu)); /* direct and indirect */ rsp->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT); vscsi_send_iu(s, req, sizeof(*rsp), VIOSRP_SRP_FORMAT); }"} {"target": 1, "idx": 963, "func": "void cpu_tick_set_count(CPUTimer *timer, uint64_t count) { uint64_t real_count = count & ~timer->disabled_mask; uint64_t disabled_bit = count & timer->disabled_mask; int64_t vm_clock_offset = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - cpu_to_timer_ticks(real_count, timer->frequency); TIMER_DPRINTF(\"%s set_count count=0x%016lx (%s) p=%p\\n\", timer->name, real_count, timer->disabled?\"disabled\":\"enabled\", timer); timer->disabled = disabled_bit ? 1 : 0; timer->clock_offset = vm_clock_offset; }"} {"target": 1, "idx": 971, "func": "void commit_start(const char *job_id, BlockDriverState *bs, BlockDriverState *base, BlockDriverState *top, int64_t speed, BlockdevOnError on_error, const char *backing_file_str, const char *filter_node_name, Error **errp) { CommitBlockJob *s; BlockReopenQueue *reopen_queue = NULL; int orig_overlay_flags; int orig_base_flags; BlockDriverState *iter; BlockDriverState *overlay_bs; BlockDriverState *commit_top_bs = NULL; Error *local_err = NULL; int ret; assert(top != bs); if (top == base) { error_setg(errp, \"Invalid files for merge: top and base are the same\"); return; overlay_bs = bdrv_find_overlay(bs, top); if (overlay_bs == NULL) { error_setg(errp, \"Could not find overlay image for %s:\", top->filename); return; s = block_job_create(job_id, &commit_job_driver, bs, 0, BLK_PERM_ALL, speed, BLOCK_JOB_DEFAULT, NULL, NULL, errp); if (!s) { return; orig_base_flags = bdrv_get_flags(base); orig_overlay_flags = bdrv_get_flags(overlay_bs); /* convert base & overlay_bs to r/w, if necessary */ if (!(orig_base_flags & BDRV_O_RDWR)) { reopen_queue = bdrv_reopen_queue(reopen_queue, base, NULL, orig_base_flags | BDRV_O_RDWR); if (!(orig_overlay_flags & BDRV_O_RDWR)) { reopen_queue = bdrv_reopen_queue(reopen_queue, overlay_bs, NULL, orig_overlay_flags | BDRV_O_RDWR); if (reopen_queue) { bdrv_reopen_multiple(bdrv_get_aio_context(bs), reopen_queue, &local_err); if (local_err != NULL) { error_propagate(errp, local_err); goto fail; /* Insert commit_top block node above top, so we can block consistent read * on the backing chain below it */ commit_top_bs = bdrv_new_open_driver(&bdrv_commit_top, filter_node_name, 0, errp); if (commit_top_bs == NULL) { goto fail; commit_top_bs->total_sectors = top->total_sectors; bdrv_set_aio_context(commit_top_bs, bdrv_get_aio_context(top)); bdrv_set_backing_hd(commit_top_bs, top, &local_err); if (local_err) { bdrv_unref(commit_top_bs); commit_top_bs = NULL; error_propagate(errp, local_err); goto fail; bdrv_set_backing_hd(overlay_bs, commit_top_bs, &local_err); if (local_err) { bdrv_unref(commit_top_bs); commit_top_bs = NULL; error_propagate(errp, local_err); goto fail; s->commit_top_bs = commit_top_bs; bdrv_unref(commit_top_bs); /* Block all nodes between top and base, because they will * disappear from the chain after this operation. */ assert(bdrv_chain_contains(top, base)); for (iter = top; iter != base; iter = backing_bs(iter)) { /* XXX BLK_PERM_WRITE needs to be allowed so we don't block ourselves * at s->base (if writes are blocked for a node, they are also blocked * for its backing file). The other options would be a second filter * driver above s->base. */ ret = block_job_add_bdrv(&s->common, \"intermediate node\", iter, 0, BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE, errp); if (ret < 0) { goto fail; ret = block_job_add_bdrv(&s->common, \"base\", base, 0, BLK_PERM_ALL, errp); if (ret < 0) { goto fail; /* overlay_bs must be blocked because it needs to be modified to * update the backing image string. */ ret = block_job_add_bdrv(&s->common, \"overlay of top\", overlay_bs, BLK_PERM_GRAPH_MOD, BLK_PERM_ALL, errp); if (ret < 0) { goto fail; s->base = blk_new(BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE | BLK_PERM_RESIZE, BLK_PERM_CONSISTENT_READ | BLK_PERM_GRAPH_MOD | BLK_PERM_WRITE_UNCHANGED); ret = blk_insert_bs(s->base, base, errp); if (ret < 0) { goto fail; /* Required permissions are already taken with block_job_add_bdrv() */ s->top = blk_new(0, BLK_PERM_ALL); ret = blk_insert_bs(s->top, top, errp); if (ret < 0) { goto fail; s->active = bs; s->base_flags = orig_base_flags; s->orig_overlay_flags = orig_overlay_flags; s->backing_file_str = g_strdup(backing_file_str); s->on_error = on_error; trace_commit_start(bs, base, top, s); block_job_start(&s->common); return; fail: if (s->base) { blk_unref(s->base); if (s->top) { blk_unref(s->top); if (commit_top_bs) { bdrv_set_backing_hd(overlay_bs, top, &error_abort); block_job_early_fail(&s->common);"} {"target": 0, "idx": 982, "func": "int bdrv_is_removable(BlockDriverState *bs) { return bs->removable; }"} {"target": 1, "idx": 1035, "func": "static int null_filter_samples(AVFilterLink *link, AVFilterBufferRef *samplesref) { return 0; }"} {"target": 1, "idx": 1038, "func": "void qmp_blockdev_add(BlockdevOptions *options, Error **errp) { QmpOutputVisitor *ov = qmp_output_visitor_new(); QObject *obj; QDict *qdict; Error *local_err = NULL; /* Require an ID in the top level */ if (!options->has_id) { error_setg(errp, \"Block device needs an ID\"); goto fail; } /* TODO Sort it out in raw-posix and drive_init: Reject aio=native with * cache.direct=false instead of silently switching to aio=threads, except * if called from drive_init. * * For now, simply forbidding the combination for all drivers will do. */ if (options->has_aio && options->aio == BLOCKDEV_AIO_OPTIONS_NATIVE) { bool direct = options->cache->has_direct && options->cache->direct; if (!options->has_cache && !direct) { error_setg(errp, \"aio=native requires cache.direct=true\"); goto fail; } } visit_type_BlockdevOptions(qmp_output_get_visitor(ov), &options, NULL, &local_err); if (local_err) { error_propagate(errp, local_err); goto fail; } obj = qmp_output_get_qobject(ov); qdict = qobject_to_qdict(obj); qdict_flatten(qdict); blockdev_init(NULL, qdict, &local_err); if (local_err) { error_propagate(errp, local_err); goto fail; } fail: qmp_output_visitor_cleanup(ov); }"} {"target": 1, "idx": 1040, "func": "void do_divwuo (void) { if (likely((uint32_t)T1 != 0)) { xer_ov = 0; T0 = (uint32_t)T0 / (uint32_t)T1; } else { xer_so = 1; xer_ov = 1; T0 = 0; } }"} {"target": 1, "idx": 1046, "func": "static void kvmclock_pre_save(void *opaque) { KVMClockState *s = opaque; struct kvm_clock_data data; int ret; if (s->clock_valid) { return; } ret = kvm_vm_ioctl(kvm_state, KVM_GET_CLOCK, &data); if (ret < 0) { fprintf(stderr, \"KVM_GET_CLOCK failed: %s\\n\", strerror(ret)); data.clock = 0; } s->clock = data.clock; /* * If the VM is stopped, declare the clock state valid to avoid re-reading * it on next vmsave (which would return a different value). Will be reset * when the VM is continued. */ s->clock_valid = !runstate_is_running(); }"} {"target": 1, "idx": 1056, "func": "static void handle_satn(ESPState *s) { uint8_t buf[32]; int len; if (s->dma && !s->dma_enabled) { s->dma_cb = handle_satn; return; } len = get_cmd(s, buf); if (len) do_cmd(s, buf); }"} {"target": 1, "idx": 1059, "func": "static int check_checksum(ByteIOContext *bc){ unsigned long checksum= get_checksum(bc); // return checksum != get_be32(bc); av_log(NULL, AV_LOG_ERROR, \"%08X %08X\\n\", checksum, (int)get_be32(bc)); return 0; }"} {"target": 0, "idx": 1060, "func": "int ide_init_drive(IDEState *s, BlockBackend *blk, IDEDriveKind kind, const char *version, const char *serial, const char *model, uint64_t wwn, uint32_t cylinders, uint32_t heads, uint32_t secs, int chs_trans) { uint64_t nb_sectors; s->blk = blk; s->drive_kind = kind; blk_get_geometry(blk, &nb_sectors); s->cylinders = cylinders; s->heads = heads; s->sectors = secs; s->chs_trans = chs_trans; s->nb_sectors = nb_sectors; s->wwn = wwn; /* The SMART values should be preserved across power cycles but they aren't. */ s->smart_enabled = 1; s->smart_autosave = 1; s->smart_errors = 0; s->smart_selftest_count = 0; if (kind == IDE_CD) { blk_set_dev_ops(blk, &ide_cd_block_ops, s); blk_set_guest_block_size(blk, 2048); } else { if (!blk_is_inserted(s->blk)) { error_report(\"Device needs media, but drive is empty\"); return -1; } if (blk_is_read_only(blk)) { error_report(\"Can't use a read-only drive\"); return -1; } blk_set_dev_ops(blk, &ide_hd_block_ops, s); } if (serial) { pstrcpy(s->drive_serial_str, sizeof(s->drive_serial_str), serial); } else { snprintf(s->drive_serial_str, sizeof(s->drive_serial_str), \"QM%05d\", s->drive_serial); } if (model) { pstrcpy(s->drive_model_str, sizeof(s->drive_model_str), model); } else { switch (kind) { case IDE_CD: strcpy(s->drive_model_str, \"QEMU DVD-ROM\"); break; case IDE_CFATA: strcpy(s->drive_model_str, \"QEMU MICRODRIVE\"); break; default: strcpy(s->drive_model_str, \"QEMU HARDDISK\"); break; } } if (version) { pstrcpy(s->version, sizeof(s->version), version); } else { pstrcpy(s->version, sizeof(s->version), qemu_get_version()); } ide_reset(s); blk_iostatus_enable(blk); return 0; }"} {"target": 1, "idx": 1089, "func": "int vhost_backend_invalidate_device_iotlb(struct vhost_dev *dev, uint64_t iova, uint64_t len) { struct vhost_iotlb_msg imsg; imsg.iova = iova; imsg.size = len; imsg.type = VHOST_IOTLB_INVALIDATE; return dev->vhost_ops->vhost_send_device_iotlb_msg(dev, &imsg); }"} {"target": 0, "idx": 1107, "func": "static uint32_t gic_dist_readw(void *opaque, target_phys_addr_t offset) { uint32_t val; val = gic_dist_readb(opaque, offset); val |= gic_dist_readb(opaque, offset + 1) << 8; return val; }"} {"target": 0, "idx": 1108, "func": "static void test_qemu_strtoull_overflow(void) { const char *str = \"99999999999999999999999999999999999999999999\"; char f = 'X'; const char *endptr = &f; uint64_t res = 999; int err; err = qemu_strtoull(str, &endptr, 0, &res); g_assert_cmpint(err, ==, -ERANGE); g_assert_cmpint(res, ==, ULLONG_MAX); g_assert(endptr == str + strlen(str)); }"} {"target": 0, "idx": 1119, "func": "PXA2xxState *pxa255_init(unsigned int sdram_size) { PXA2xxState *s; int iomemtype, i; DriveInfo *dinfo; s = (PXA2xxState *) qemu_mallocz(sizeof(PXA2xxState)); s->env = cpu_init(\"pxa255\"); if (!s->env) { fprintf(stderr, \"Unable to find CPU definition\\n\"); exit(1); } s->reset = qemu_allocate_irqs(pxa2xx_reset, s, 1)[0]; /* SDRAM & Internal Memory Storage */ cpu_register_physical_memory(PXA2XX_SDRAM_BASE, sdram_size, qemu_ram_alloc(NULL, \"pxa255.sdram\", sdram_size) | IO_MEM_RAM); cpu_register_physical_memory(PXA2XX_INTERNAL_BASE, PXA2XX_INTERNAL_SIZE, qemu_ram_alloc(NULL, \"pxa255.internal\", PXA2XX_INTERNAL_SIZE) | IO_MEM_RAM); s->pic = pxa2xx_pic_init(0x40d00000, s->env); s->dma = pxa255_dma_init(0x40000000, s->pic[PXA2XX_PIC_DMA]); pxa25x_timer_init(0x40a00000, &s->pic[PXA2XX_PIC_OST_0]); s->gpio = pxa2xx_gpio_init(0x40e00000, s->env, s->pic, 85); dinfo = drive_get(IF_SD, 0, 0); if (!dinfo) { fprintf(stderr, \"qemu: missing SecureDigital device\\n\"); exit(1); } s->mmc = pxa2xx_mmci_init(0x41100000, dinfo->bdrv, s->pic[PXA2XX_PIC_MMC], s->dma); for (i = 0; pxa255_serial[i].io_base; i ++) if (serial_hds[i]) { #ifdef TARGET_WORDS_BIGENDIAN serial_mm_init(pxa255_serial[i].io_base, 2, s->pic[pxa255_serial[i].irqn], 14745600/16, serial_hds[i], 1, 1); #else serial_mm_init(pxa255_serial[i].io_base, 2, s->pic[pxa255_serial[i].irqn], 14745600/16, serial_hds[i], 1, 0); #endif } else { break; } if (serial_hds[i]) s->fir = pxa2xx_fir_init(0x40800000, s->pic[PXA2XX_PIC_ICP], s->dma, serial_hds[i]); s->lcd = pxa2xx_lcdc_init(0x44000000, s->pic[PXA2XX_PIC_LCD]); s->cm_base = 0x41300000; s->cm_regs[CCCR >> 2] = 0x02000210; /* 416.0 MHz */ s->clkcfg = 0x00000009; /* Turbo mode active */ iomemtype = cpu_register_io_memory(pxa2xx_cm_readfn, pxa2xx_cm_writefn, s, DEVICE_NATIVE_ENDIAN); cpu_register_physical_memory(s->cm_base, 0x1000, iomemtype); register_savevm(NULL, \"pxa2xx_cm\", 0, 0, pxa2xx_cm_save, pxa2xx_cm_load, s); cpu_arm_set_cp_io(s->env, 14, pxa2xx_cp14_read, pxa2xx_cp14_write, s); s->mm_base = 0x48000000; s->mm_regs[MDMRS >> 2] = 0x00020002; s->mm_regs[MDREFR >> 2] = 0x03ca4000; s->mm_regs[MECR >> 2] = 0x00000001; /* Two PC Card sockets */ iomemtype = cpu_register_io_memory(pxa2xx_mm_readfn, pxa2xx_mm_writefn, s, DEVICE_NATIVE_ENDIAN); cpu_register_physical_memory(s->mm_base, 0x1000, iomemtype); register_savevm(NULL, \"pxa2xx_mm\", 0, 0, pxa2xx_mm_save, pxa2xx_mm_load, s); s->pm_base = 0x40f00000; iomemtype = cpu_register_io_memory(pxa2xx_pm_readfn, pxa2xx_pm_writefn, s, DEVICE_NATIVE_ENDIAN); cpu_register_physical_memory(s->pm_base, 0x100, iomemtype); register_savevm(NULL, \"pxa2xx_pm\", 0, 0, pxa2xx_pm_save, pxa2xx_pm_load, s); for (i = 0; pxa255_ssp[i].io_base; i ++); s->ssp = (SSIBus **)qemu_mallocz(sizeof(SSIBus *) * i); for (i = 0; pxa255_ssp[i].io_base; i ++) { DeviceState *dev; dev = sysbus_create_simple(\"pxa2xx-ssp\", pxa255_ssp[i].io_base, s->pic[pxa255_ssp[i].irqn]); s->ssp[i] = (SSIBus *)qdev_get_child_bus(dev, \"ssi\"); } if (usb_enabled) { sysbus_create_simple(\"sysbus-ohci\", 0x4c000000, s->pic[PXA2XX_PIC_USBH1]); } s->pcmcia[0] = pxa2xx_pcmcia_init(0x20000000); s->pcmcia[1] = pxa2xx_pcmcia_init(0x30000000); s->rtc_base = 0x40900000; iomemtype = cpu_register_io_memory(pxa2xx_rtc_readfn, pxa2xx_rtc_writefn, s, DEVICE_NATIVE_ENDIAN); cpu_register_physical_memory(s->rtc_base, 0x1000, iomemtype); pxa2xx_rtc_init(s); register_savevm(NULL, \"pxa2xx_rtc\", 0, 0, pxa2xx_rtc_save, pxa2xx_rtc_load, s); s->i2c[0] = pxa2xx_i2c_init(0x40301600, s->pic[PXA2XX_PIC_I2C], 0xffff); s->i2c[1] = pxa2xx_i2c_init(0x40f00100, s->pic[PXA2XX_PIC_PWRI2C], 0xff); s->i2s = pxa2xx_i2s_init(0x40400000, s->pic[PXA2XX_PIC_I2S], s->dma); /* GPIO1 resets the processor */ /* The handler can be overridden by board-specific code */ qdev_connect_gpio_out(s->gpio, 1, s->reset); return s; }"} {"target": 0, "idx": 1121, "func": "int usb_device_delete_addr(int busnr, int addr) { USBBus *bus; USBPort *port; USBDevice *dev; bus = usb_bus_find(busnr); if (!bus) return -1; TAILQ_FOREACH(port, &bus->used, next) { if (port->dev->addr == addr) break; } if (!port) return -1; dev = port->dev; TAILQ_REMOVE(&bus->used, port, next); bus->nused--; usb_attach(port, NULL); dev->info->handle_destroy(dev); TAILQ_INSERT_TAIL(&bus->free, port, next); bus->nfree++; return 0; }"} {"target": 0, "idx": 1122, "func": "static void qmp_output_type_number(Visitor *v, const char *name, double *obj, Error **errp) { QmpOutputVisitor *qov = to_qov(v); qmp_output_add(qov, name, qfloat_from_double(*obj)); }"} {"target": 1, "idx": 1130, "func": "static int aio_write_f(BlockBackend *blk, int argc, char **argv) { int nr_iov, c; int pattern = 0xcd; struct aio_ctx *ctx = g_new0(struct aio_ctx, 1); ctx->blk = blk; while ((c = getopt(argc, argv, \"CqP:z\")) != -1) { switch (c) { case 'C': ctx->Cflag = 1; break; case 'q': ctx->qflag = 1; break; case 'P': pattern = parse_pattern(optarg); if (pattern < 0) { return 0; } break; case 'z': ctx->zflag = 1; break; default: return qemuio_command_usage(&aio_write_cmd); } } if (optind > argc - 2) { return qemuio_command_usage(&aio_write_cmd); } if (ctx->zflag && optind != argc - 2) { printf(\"-z supports only a single length parameter\\n\"); return 0; } if (ctx->zflag && ctx->Pflag) { printf(\"-z and -P cannot be specified at the same time\\n\"); return 0; } ctx->offset = cvtnum(argv[optind]); if (ctx->offset < 0) { print_cvtnum_err(ctx->offset, argv[optind]); return 0; } optind++; if (ctx->offset & 0x1ff) { printf(\"offset %\" PRId64 \" is not sector aligned\\n\", ctx->offset); block_acct_invalid(blk_get_stats(blk), BLOCK_ACCT_WRITE); return 0; } if (ctx->zflag) { int64_t count = cvtnum(argv[optind]); if (count < 0) { print_cvtnum_err(count, argv[optind]); return 0; } ctx->qiov.size = count; blk_aio_write_zeroes(blk, ctx->offset >> 9, count >> 9, 0, aio_write_done, ctx); } else { nr_iov = argc - optind; ctx->buf = create_iovec(blk, &ctx->qiov, &argv[optind], nr_iov, pattern); if (ctx->buf == NULL) { block_acct_invalid(blk_get_stats(blk), BLOCK_ACCT_WRITE); return 0; } gettimeofday(&ctx->t1, NULL); block_acct_start(blk_get_stats(blk), &ctx->acct, ctx->qiov.size, BLOCK_ACCT_WRITE); blk_aio_writev(blk, ctx->offset >> 9, &ctx->qiov, ctx->qiov.size >> 9, aio_write_done, ctx); } return 0; }"} {"target": 1, "idx": 1136, "func": "static int alloc_refcount_block(BlockDriverState *bs, int64_t cluster_index, uint16_t **refcount_block) { BDRVQcowState *s = bs->opaque; unsigned int refcount_table_index; int ret; BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC); /* Find the refcount block for the given cluster */ refcount_table_index = cluster_index >> s->refcount_block_bits; if (refcount_table_index < s->refcount_table_size) { uint64_t refcount_block_offset = s->refcount_table[refcount_table_index] & REFT_OFFSET_MASK; /* If it's already there, we're done */ if (refcount_block_offset) { if (offset_into_cluster(s, refcount_block_offset)) { qcow2_signal_corruption(bs, true, -1, -1, \"Refblock offset %#\" PRIx64 \" unaligned (reftable index: \" \"%#x)\", refcount_block_offset, refcount_table_index); return -EIO; } return load_refcount_block(bs, refcount_block_offset, (void**) refcount_block); } } /* * If we came here, we need to allocate something. Something is at least * a cluster for the new refcount block. It may also include a new refcount * table if the old refcount table is too small. * * Note that allocating clusters here needs some special care: * * - We can't use the normal qcow2_alloc_clusters(), it would try to * increase the refcount and very likely we would end up with an endless * recursion. Instead we must place the refcount blocks in a way that * they can describe them themselves. * * - We need to consider that at this point we are inside update_refcounts * and potentially doing an initial refcount increase. This means that * some clusters have already been allocated by the caller, but their * refcount isn't accurate yet. If we allocate clusters for metadata, we * need to return -EAGAIN to signal the caller that it needs to restart * the search for free clusters. * * - alloc_clusters_noref and qcow2_free_clusters may load a different * refcount block into the cache */ *refcount_block = NULL; /* We write to the refcount table, so we might depend on L2 tables */ ret = qcow2_cache_flush(bs, s->l2_table_cache); if (ret < 0) { return ret; } /* Allocate the refcount block itself and mark it as used */ int64_t new_block = alloc_clusters_noref(bs, s->cluster_size); if (new_block < 0) { return new_block; } #ifdef DEBUG_ALLOC2 fprintf(stderr, \"qcow2: Allocate refcount block %d for %\" PRIx64 \" at %\" PRIx64 \"\\n\", refcount_table_index, cluster_index << s->cluster_bits, new_block); #endif if (in_same_refcount_block(s, new_block, cluster_index << s->cluster_bits)) { /* Zero the new refcount block before updating it */ ret = qcow2_cache_get_empty(bs, s->refcount_block_cache, new_block, (void**) refcount_block); if (ret < 0) { goto fail_block; } memset(*refcount_block, 0, s->cluster_size); /* The block describes itself, need to update the cache */ int block_index = (new_block >> s->cluster_bits) & (s->refcount_block_size - 1); (*refcount_block)[block_index] = cpu_to_be16(1); } else { /* Described somewhere else. This can recurse at most twice before we * arrive at a block that describes itself. */ ret = update_refcount(bs, new_block, s->cluster_size, 1, false, QCOW2_DISCARD_NEVER); if (ret < 0) { goto fail_block; } ret = qcow2_cache_flush(bs, s->refcount_block_cache); if (ret < 0) { goto fail_block; } /* Initialize the new refcount block only after updating its refcount, * update_refcount uses the refcount cache itself */ ret = qcow2_cache_get_empty(bs, s->refcount_block_cache, new_block, (void**) refcount_block); if (ret < 0) { goto fail_block; } memset(*refcount_block, 0, s->cluster_size); } /* Now the new refcount block needs to be written to disk */ BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_WRITE); qcow2_cache_entry_mark_dirty(s->refcount_block_cache, *refcount_block); ret = qcow2_cache_flush(bs, s->refcount_block_cache); if (ret < 0) { goto fail_block; } /* If the refcount table is big enough, just hook the block up there */ if (refcount_table_index < s->refcount_table_size) { uint64_t data64 = cpu_to_be64(new_block); BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_HOOKUP); ret = bdrv_pwrite_sync(bs->file, s->refcount_table_offset + refcount_table_index * sizeof(uint64_t), &data64, sizeof(data64)); if (ret < 0) { goto fail_block; } s->refcount_table[refcount_table_index] = new_block; /* The new refcount block may be where the caller intended to put its * data, so let it restart the search. */ return -EAGAIN; } ret = qcow2_cache_put(bs, s->refcount_block_cache, (void**) refcount_block); if (ret < 0) { goto fail_block; } /* * If we come here, we need to grow the refcount table. Again, a new * refcount table needs some space and we can't simply allocate to avoid * endless recursion. * * Therefore let's grab new refcount blocks at the end of the image, which * will describe themselves and the new refcount table. This way we can * reference them only in the new table and do the switch to the new * refcount table at once without producing an inconsistent state in * between. */ BLKDBG_EVENT(bs->file, BLKDBG_REFTABLE_GROW); /* Calculate the number of refcount blocks needed so far */ uint64_t blocks_used = DIV_ROUND_UP(cluster_index, s->refcount_block_size); if (blocks_used > QCOW_MAX_REFTABLE_SIZE / sizeof(uint64_t)) { return -EFBIG; } /* And now we need at least one block more for the new metadata */ uint64_t table_size = next_refcount_table_size(s, blocks_used + 1); uint64_t last_table_size; uint64_t blocks_clusters; do { uint64_t table_clusters = size_to_clusters(s, table_size * sizeof(uint64_t)); blocks_clusters = 1 + ((table_clusters + s->refcount_block_size - 1) / s->refcount_block_size); uint64_t meta_clusters = table_clusters + blocks_clusters; last_table_size = table_size; table_size = next_refcount_table_size(s, blocks_used + ((meta_clusters + s->refcount_block_size - 1) / s->refcount_block_size)); } while (last_table_size != table_size); #ifdef DEBUG_ALLOC2 fprintf(stderr, \"qcow2: Grow refcount table %\" PRId32 \" => %\" PRId64 \"\\n\", s->refcount_table_size, table_size); #endif /* Create the new refcount table and blocks */ uint64_t meta_offset = (blocks_used * s->refcount_block_size) * s->cluster_size; uint64_t table_offset = meta_offset + blocks_clusters * s->cluster_size; uint64_t *new_table = g_try_new0(uint64_t, table_size); uint16_t *new_blocks = g_try_malloc0(blocks_clusters * s->cluster_size); assert(table_size > 0 && blocks_clusters > 0); if (new_table == NULL || new_blocks == NULL) { ret = -ENOMEM; goto fail_table; } /* Fill the new refcount table */ memcpy(new_table, s->refcount_table, s->refcount_table_size * sizeof(uint64_t)); new_table[refcount_table_index] = new_block; int i; for (i = 0; i < blocks_clusters; i++) { new_table[blocks_used + i] = meta_offset + (i * s->cluster_size); } /* Fill the refcount blocks */ uint64_t table_clusters = size_to_clusters(s, table_size * sizeof(uint64_t)); int block = 0; for (i = 0; i < table_clusters + blocks_clusters; i++) { new_blocks[block++] = cpu_to_be16(1); } /* Write refcount blocks to disk */ BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_WRITE_BLOCKS); ret = bdrv_pwrite_sync(bs->file, meta_offset, new_blocks, blocks_clusters * s->cluster_size); g_free(new_blocks); new_blocks = NULL; if (ret < 0) { goto fail_table; } /* Write refcount table to disk */ for(i = 0; i < table_size; i++) { cpu_to_be64s(&new_table[i]); } BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_WRITE_TABLE); ret = bdrv_pwrite_sync(bs->file, table_offset, new_table, table_size * sizeof(uint64_t)); if (ret < 0) { goto fail_table; } for(i = 0; i < table_size; i++) { be64_to_cpus(&new_table[i]); } /* Hook up the new refcount table in the qcow2 header */ uint8_t data[12]; cpu_to_be64w((uint64_t*)data, table_offset); cpu_to_be32w((uint32_t*)(data + 8), table_clusters); BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_SWITCH_TABLE); ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, refcount_table_offset), data, sizeof(data)); if (ret < 0) { goto fail_table; } /* And switch it in memory */ uint64_t old_table_offset = s->refcount_table_offset; uint64_t old_table_size = s->refcount_table_size; g_free(s->refcount_table); s->refcount_table = new_table; s->refcount_table_size = table_size; s->refcount_table_offset = table_offset; /* Free old table. */ qcow2_free_clusters(bs, old_table_offset, old_table_size * sizeof(uint64_t), QCOW2_DISCARD_OTHER); ret = load_refcount_block(bs, new_block, (void**) refcount_block); if (ret < 0) { return ret; } /* If we were trying to do the initial refcount update for some cluster * allocation, we might have used the same clusters to store newly * allocated metadata. Make the caller search some new space. */ return -EAGAIN; fail_table: g_free(new_blocks); g_free(new_table); fail_block: if (*refcount_block != NULL) { qcow2_cache_put(bs, s->refcount_block_cache, (void**) refcount_block); } return ret; }"} {"target": 1, "idx": 1137, "func": "static int alac_decode_frame(AVCodecContext *avctx, void *outbuffer, int *outputsize, const uint8_t *inbuffer, int input_buffer_size) { ALACContext *alac = avctx->priv_data; int channels; unsigned int outputsamples; int hassize; int readsamplesize; int wasted_bytes; int isnotcompressed; uint8_t interlacing_shift; uint8_t interlacing_leftweight; /* short-circuit null buffers */ if (!inbuffer || !input_buffer_size) return input_buffer_size; /* initialize from the extradata */ if (!alac->context_initialized) { if (alac->avctx->extradata_size != ALAC_EXTRADATA_SIZE) { av_log(avctx, AV_LOG_ERROR, \"alac: expected %d extradata bytes\\n\", ALAC_EXTRADATA_SIZE); return input_buffer_size; } if (alac_set_info(alac)) { av_log(avctx, AV_LOG_ERROR, \"alac: set_info failed\\n\"); return input_buffer_size; } alac->context_initialized = 1; } init_get_bits(&alac->gb, inbuffer, input_buffer_size * 8); channels = get_bits(&alac->gb, 3) + 1; if (channels > MAX_CHANNELS) { av_log(avctx, AV_LOG_ERROR, \"channels > %d not supported\\n\", MAX_CHANNELS); return input_buffer_size; } /* 2^result = something to do with output waiting. * perhaps matters if we read > 1 frame in a pass? */ skip_bits(&alac->gb, 4); skip_bits(&alac->gb, 12); /* unknown, skip 12 bits */ /* the output sample size is stored soon */ hassize = get_bits1(&alac->gb); wasted_bytes = get_bits(&alac->gb, 2); /* unknown ? */ /* whether the frame is compressed */ isnotcompressed = get_bits1(&alac->gb); if (hassize) { /* now read the number of samples as a 32bit integer */ outputsamples = get_bits_long(&alac->gb, 32); if(outputsamples > alac->setinfo_max_samples_per_frame){ av_log(avctx, AV_LOG_ERROR, \"outputsamples %d > %d\\n\", outputsamples, alac->setinfo_max_samples_per_frame); return -1; } } else outputsamples = alac->setinfo_max_samples_per_frame; if(outputsamples > *outputsize / alac->bytespersample){ av_log(avctx, AV_LOG_ERROR, \"sample buffer too small\\n\"); return -1; } *outputsize = outputsamples * alac->bytespersample; readsamplesize = alac->setinfo_sample_size - (wasted_bytes * 8) + channels - 1; if (!isnotcompressed) { /* so it is compressed */ int16_t predictor_coef_table[channels][32]; int predictor_coef_num[channels]; int prediction_type[channels]; int prediction_quantitization[channels]; int ricemodifier[channels]; int i, chan; interlacing_shift = get_bits(&alac->gb, 8); interlacing_leftweight = get_bits(&alac->gb, 8); for (chan = 0; chan < channels; chan++) { prediction_type[chan] = get_bits(&alac->gb, 4); prediction_quantitization[chan] = get_bits(&alac->gb, 4); ricemodifier[chan] = get_bits(&alac->gb, 3); predictor_coef_num[chan] = get_bits(&alac->gb, 5); /* read the predictor table */ for (i = 0; i < predictor_coef_num[chan]; i++) predictor_coef_table[chan][i] = (int16_t)get_bits(&alac->gb, 16); } if (wasted_bytes) av_log(avctx, AV_LOG_ERROR, \"FIXME: unimplemented, unhandling of wasted_bytes\\n\"); for (chan = 0; chan < channels; chan++) { bastardized_rice_decompress(alac, alac->predicterror_buffer[chan], outputsamples, readsamplesize, alac->setinfo_rice_initialhistory, alac->setinfo_rice_kmodifier, ricemodifier[chan] * alac->setinfo_rice_historymult / 4, (1 << alac->setinfo_rice_kmodifier) - 1); if (prediction_type[chan] == 0) { /* adaptive fir */ predictor_decompress_fir_adapt(alac->predicterror_buffer[chan], alac->outputsamples_buffer[chan], outputsamples, readsamplesize, predictor_coef_table[chan], predictor_coef_num[chan], prediction_quantitization[chan]); } else { av_log(avctx, AV_LOG_ERROR, \"FIXME: unhandled prediction type: %i\\n\", prediction_type[chan]); /* I think the only other prediction type (or perhaps this is * just a boolean?) runs adaptive fir twice.. like: * predictor_decompress_fir_adapt(predictor_error, tempout, ...) * predictor_decompress_fir_adapt(predictor_error, outputsamples ...) * little strange.. */ } } } else { /* not compressed, easy case */ int i, chan; for (i = 0; i < outputsamples; i++) for (chan = 0; chan < channels; chan++) { int32_t audiobits; audiobits = get_bits_long(&alac->gb, alac->setinfo_sample_size); audiobits = extend_sign32(audiobits, alac->setinfo_sample_size); alac->outputsamples_buffer[chan][i] = audiobits; } /* wasted_bytes = 0; */ interlacing_shift = 0; interlacing_leftweight = 0; } if (get_bits(&alac->gb, 3) != 7) av_log(avctx, AV_LOG_ERROR, \"Error : Wrong End Of Frame\\n\"); switch(alac->setinfo_sample_size) { case 16: if (channels == 2) { reconstruct_stereo_16(alac->outputsamples_buffer, (int16_t*)outbuffer, alac->numchannels, outputsamples, interlacing_shift, interlacing_leftweight); } else { int i; for (i = 0; i < outputsamples; i++) { int16_t sample = alac->outputsamples_buffer[0][i]; ((int16_t*)outbuffer)[i * alac->numchannels] = sample; } } break; case 20: case 24: // It is not clear if there exist any encoder that creates 24 bit ALAC // files. iTunes convert 24 bit raw files to 16 bit before encoding. case 32: av_log(avctx, AV_LOG_ERROR, \"FIXME: unimplemented sample size %i\\n\", alac->setinfo_sample_size); break; default: break; } if (input_buffer_size * 8 - get_bits_count(&alac->gb) > 8) av_log(avctx, AV_LOG_ERROR, \"Error : %d bits left\\n\", input_buffer_size * 8 - get_bits_count(&alac->gb)); return input_buffer_size; }"} {"target": 0, "idx": 1145, "func": "static int getopt(int argc, char *argv[], char *opts) { static int sp = 1; int c; char *cp; if (sp == 1) if (optind >= argc || argv[optind][0] != '-' || argv[optind][1] == '\\0') return EOF; else if (!strcmp(argv[optind], \"--\")) { optind++; return EOF; } optopt = c = argv[optind][sp]; if (c == ':' || (cp = strchr(opts, c)) == NULL) { fprintf(stderr, \": illegal option -- %c\\n\", c); if (argv[optind][++sp] == '\\0') { optind++; sp = 1; } return '?'; } if (*++cp == ':') { if (argv[optind][sp+1] != '\\0') optarg = &argv[optind++][sp+1]; else if(++optind >= argc) { fprintf(stderr, \": option requires an argument -- %c\\n\", c); sp = 1; return '?'; } else optarg = argv[optind++]; sp = 1; } else { if (argv[optind][++sp] == '\\0') { sp = 1; optind++; } optarg = NULL; } return c; }"} {"target": 0, "idx": 1167, "func": "static void slavio_timer_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val) { SLAVIO_TIMERState *s = opaque; uint32_t saddr; int reload = 0; DPRINTF(\"write \" TARGET_FMT_plx \" %08x\\n\", addr, val); saddr = (addr & TIMER_MAXADDR) >> 2; switch (saddr) { case TIMER_LIMIT: if (slavio_timer_is_user(s)) { // set user counter MSW, reset counter qemu_irq_lower(s->irq); s->limit = TIMER_MAX_COUNT64; DPRINTF(\"processor %d user timer reset\\n\", s->slave_index); ptimer_set_limit(s->timer, LIMIT_TO_PERIODS(s->limit), 1); } else { // set limit, reset counter qemu_irq_lower(s->irq); s->limit = val & TIMER_MAX_COUNT32; if (!s->limit) s->limit = TIMER_MAX_COUNT32; ptimer_set_limit(s->timer, s->limit >> 9, 1); } break; case TIMER_COUNTER: if (slavio_timer_is_user(s)) { // set user counter LSW, reset counter qemu_irq_lower(s->irq); s->limit = TIMER_MAX_COUNT64; DPRINTF(\"processor %d user timer reset\\n\", s->slave_index); ptimer_set_limit(s->timer, LIMIT_TO_PERIODS(s->limit), 1); } else DPRINTF(\"not user timer\\n\"); break; case TIMER_COUNTER_NORST: // set limit without resetting counter s->limit = val & TIMER_MAX_COUNT32; if (!s->limit) s->limit = TIMER_MAX_COUNT32; ptimer_set_limit(s->timer, LIMIT_TO_PERIODS(s->limit), reload); break; case TIMER_STATUS: if (slavio_timer_is_user(s)) { // start/stop user counter if ((val & 1) && !s->running) { DPRINTF(\"processor %d user timer started\\n\", s->slave_index); ptimer_run(s->timer, 0); s->running = 1; } else if (!(val & 1) && s->running) { DPRINTF(\"processor %d user timer stopped\\n\", s->slave_index); ptimer_stop(s->timer); s->running = 0; } } break; case TIMER_MODE: if (s->master == NULL) { unsigned int i; for (i = 0; i < s->num_slaves; i++) { if (val & (1 << i)) { qemu_irq_lower(s->slave[i]->irq); s->slave[i]->limit = -1ULL; } if ((val & (1 << i)) != (s->slave_mode & (1 << i))) { ptimer_stop(s->slave[i]->timer); ptimer_set_limit(s->slave[i]->timer, LIMIT_TO_PERIODS(s->slave[i]->limit), 1); DPRINTF(\"processor %d timer changed\\n\", s->slave[i]->slave_index); ptimer_run(s->slave[i]->timer, 0); } } s->slave_mode = val & ((1 << s->num_slaves) - 1); } else DPRINTF(\"not system timer\\n\"); break; default: DPRINTF(\"invalid write address \" TARGET_FMT_plx \"\\n\", addr); break; } }"} {"target": 0, "idx": 1194, "func": "void qemu_cpu_kick(void *env) { return; }"} {"target": 0, "idx": 1215, "func": "static void temp_allocate_frame(TCGContext *s, int temp) { TCGTemp *ts; ts = &s->temps[temp]; s->current_frame_offset = (s->current_frame_offset + sizeof(tcg_target_long) - 1) & ~(sizeof(tcg_target_long) - 1); if (s->current_frame_offset + sizeof(tcg_target_long) > s->frame_end) tcg_abort(); ts->mem_offset = s->current_frame_offset; ts->mem_reg = s->frame_reg; ts->mem_allocated = 1; s->current_frame_offset += sizeof(tcg_target_long); }"} {"target": 0, "idx": 1227, "func": "void qmp_block_stream(const char *device, bool has_base, const char *base, Error **errp) { BlockDriverState *bs; BlockDriverState *base_bs = NULL; Error *local_err = NULL; bs = bdrv_find(device); if (!bs) { error_set(errp, QERR_DEVICE_NOT_FOUND, device); return; } if (base) { base_bs = bdrv_find_backing_image(bs, base); if (base_bs == NULL) { error_set(errp, QERR_BASE_NOT_FOUND, base); return; } } stream_start(bs, base_bs, base, block_stream_cb, bs, &local_err); if (error_is_set(&local_err)) { error_propagate(errp, local_err); return; } /* Grab a reference so hotplug does not delete the BlockDriverState from * underneath us. */ drive_get_ref(drive_get_by_blockdev(bs)); trace_qmp_block_stream(bs, bs->job); }"} {"target": 1, "idx": 1277, "func": "static void mptsas_scsi_init(PCIDevice *dev, Error **errp) { DeviceState *d = DEVICE(dev); MPTSASState *s = MPT_SAS(dev); dev->config[PCI_LATENCY_TIMER] = 0; dev->config[PCI_INTERRUPT_PIN] = 0x01; memory_region_init_io(&s->mmio_io, OBJECT(s), &mptsas_mmio_ops, s, \"mptsas-mmio\", 0x4000); memory_region_init_io(&s->port_io, OBJECT(s), &mptsas_port_ops, s, \"mptsas-io\", 256); memory_region_init_io(&s->diag_io, OBJECT(s), &mptsas_diag_ops, s, \"mptsas-diag\", 0x10000); if (s->msi != ON_OFF_AUTO_OFF && msi_init(dev, 0, 1, true, false) >= 0) { /* TODO check for errors */ s->msi_in_use = true; } pci_register_bar(dev, 0, PCI_BASE_ADDRESS_SPACE_IO, &s->port_io); pci_register_bar(dev, 1, PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_32, &s->mmio_io); pci_register_bar(dev, 2, PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_32, &s->diag_io); if (!s->sas_addr) { s->sas_addr = ((NAA_LOCALLY_ASSIGNED_ID << 24) | IEEE_COMPANY_LOCALLY_ASSIGNED) << 36; s->sas_addr |= (pci_bus_num(dev->bus) << 16); s->sas_addr |= (PCI_SLOT(dev->devfn) << 8); s->sas_addr |= PCI_FUNC(dev->devfn); } s->max_devices = MPTSAS_NUM_PORTS; s->request_bh = qemu_bh_new(mptsas_fetch_requests, s); QTAILQ_INIT(&s->pending); scsi_bus_new(&s->bus, sizeof(s->bus), &dev->qdev, &mptsas_scsi_info, NULL); if (!d->hotplugged) { scsi_bus_legacy_handle_cmdline(&s->bus, errp); } }"} {"target": 0, "idx": 1290, "func": "static inline int num_effective_busses(XilinxSPIPS *s) { return (s->regs[R_LQSPI_STS] & LQSPI_CFG_SEP_BUS && s->regs[R_LQSPI_STS] & LQSPI_CFG_TWO_MEM) ? s->num_busses : 1; }"} {"target": 1, "idx": 1307, "func": "FFTContext *av_fft_init(int nbits, int inverse) { FFTContext *s = av_malloc(sizeof(*s)); if (s && ff_fft_init(s, nbits, inverse)) av_freep(&s); return s; }"} {"target": 1, "idx": 1308, "func": "static void gen_set_CF_bit31(TCGv var) { TCGv tmp = new_tmp(); tcg_gen_shri_i32(tmp, var, 31); gen_set_CF(tmp); dead_tmp(tmp); }"} {"target": 1, "idx": 1318, "func": "static int get_siz(Jpeg2000DecoderContext *s) { int i; int ncomponents; uint32_t log2_chroma_wh = 0; const enum AVPixelFormat *possible_fmts = NULL; int possible_fmts_nb = 0; if (bytestream2_get_bytes_left(&s->g) < 36) { av_log(s->avctx, AV_LOG_ERROR, \"Insufficient space for SIZ\\n\"); s->avctx->profile = bytestream2_get_be16u(&s->g); // Rsiz s->width = bytestream2_get_be32u(&s->g); // Width s->height = bytestream2_get_be32u(&s->g); // Height s->image_offset_x = bytestream2_get_be32u(&s->g); // X0Siz s->image_offset_y = bytestream2_get_be32u(&s->g); // Y0Siz s->tile_width = bytestream2_get_be32u(&s->g); // XTSiz s->tile_height = bytestream2_get_be32u(&s->g); // YTSiz s->tile_offset_x = bytestream2_get_be32u(&s->g); // XT0Siz s->tile_offset_y = bytestream2_get_be32u(&s->g); // YT0Siz ncomponents = bytestream2_get_be16u(&s->g); // CSiz if (s->image_offset_x || s->image_offset_y) { avpriv_request_sample(s->avctx, \"Support for image offsets\"); return AVERROR_PATCHWELCOME; if (av_image_check_size(s->width, s->height, 0, s->avctx)) { avpriv_request_sample(s->avctx, \"Large Dimensions\"); return AVERROR_PATCHWELCOME; if (ncomponents <= 0) { av_log(s->avctx, AV_LOG_ERROR, \"Invalid number of components: %d\\n\", if (ncomponents > 4) { avpriv_request_sample(s->avctx, \"Support for %d components\", ncomponents); return AVERROR_PATCHWELCOME; s->ncomponents = ncomponents; if (s->tile_width <= 0 || s->tile_height <= 0) { av_log(s->avctx, AV_LOG_ERROR, \"Invalid tile dimension %dx%d.\\n\", s->tile_width, s->tile_height); if (bytestream2_get_bytes_left(&s->g) < 3 * s->ncomponents) { av_log(s->avctx, AV_LOG_ERROR, \"Insufficient space for %d components in SIZ\\n\", s->ncomponents); for (i = 0; i < s->ncomponents; i++) { // Ssiz_i XRsiz_i, YRsiz_i uint8_t x = bytestream2_get_byteu(&s->g); s->cbps[i] = (x & 0x7f) + 1; s->precision = FFMAX(s->cbps[i], s->precision); s->sgnd[i] = !!(x & 0x80); s->cdx[i] = bytestream2_get_byteu(&s->g); s->cdy[i] = bytestream2_get_byteu(&s->g); if ( !s->cdx[i] || s->cdx[i] == 3 || s->cdx[i] > 4 || !s->cdy[i] || s->cdy[i] == 3 || s->cdy[i] > 4) { av_log(s->avctx, AV_LOG_ERROR, \"Invalid sample separation %d/%d\\n\", s->cdx[i], s->cdy[i]); log2_chroma_wh |= s->cdy[i] >> 1 << i * 4 | s->cdx[i] >> 1 << i * 4 + 2; s->numXtiles = ff_jpeg2000_ceildiv(s->width - s->tile_offset_x, s->tile_width); s->numYtiles = ff_jpeg2000_ceildiv(s->height - s->tile_offset_y, s->tile_height); if (s->numXtiles * (uint64_t)s->numYtiles > INT_MAX/sizeof(*s->tile)) { s->numXtiles = s->numYtiles = 0; return AVERROR(EINVAL); s->tile = av_mallocz_array(s->numXtiles * s->numYtiles, sizeof(*s->tile)); if (!s->tile) { s->numXtiles = s->numYtiles = 0; return AVERROR(ENOMEM); for (i = 0; i < s->numXtiles * s->numYtiles; i++) { Jpeg2000Tile *tile = s->tile + i; tile->comp = av_mallocz(s->ncomponents * sizeof(*tile->comp)); if (!tile->comp) return AVERROR(ENOMEM); /* compute image size with reduction factor */ s->avctx->width = ff_jpeg2000_ceildivpow2(s->width - s->image_offset_x, s->reduction_factor); s->avctx->height = ff_jpeg2000_ceildivpow2(s->height - s->image_offset_y, s->reduction_factor); if (s->avctx->profile == FF_PROFILE_JPEG2000_DCINEMA_2K || s->avctx->profile == FF_PROFILE_JPEG2000_DCINEMA_4K) { possible_fmts = xyz_pix_fmts; possible_fmts_nb = FF_ARRAY_ELEMS(xyz_pix_fmts); } else { switch (s->colour_space) { case 16: possible_fmts = rgb_pix_fmts; possible_fmts_nb = FF_ARRAY_ELEMS(rgb_pix_fmts); break; case 17: possible_fmts = gray_pix_fmts; possible_fmts_nb = FF_ARRAY_ELEMS(gray_pix_fmts); break; case 18: possible_fmts = yuv_pix_fmts; possible_fmts_nb = FF_ARRAY_ELEMS(yuv_pix_fmts); break; default: possible_fmts = all_pix_fmts; possible_fmts_nb = FF_ARRAY_ELEMS(all_pix_fmts); break; for (i = 0; i < possible_fmts_nb; ++i) { if (pix_fmt_match(possible_fmts[i], ncomponents, s->precision, log2_chroma_wh, s->pal8)) { s->avctx->pix_fmt = possible_fmts[i]; break; if (i == possible_fmts_nb) { if (ncomponents == 4 && s->cdy[0] == 1 && s->cdx[0] == 1 && s->cdy[1] == 1 && s->cdx[1] == 1 && s->cdy[2] == s->cdy[3] && s->cdx[2] == s->cdx[3]) { if (s->precision == 8 && s->cdy[2] == 2 && s->cdx[2] == 2 && !s->pal8) { s->avctx->pix_fmt = AV_PIX_FMT_YUVA420P; s->cdef[0] = 0; s->cdef[1] = 1; s->cdef[2] = 2; s->cdef[3] = 3; i = 0; if (i == possible_fmts_nb) { av_log(s->avctx, AV_LOG_ERROR, \"Unknown pix_fmt, profile: %d, colour_space: %d, \" \"components: %d, precision: %d\\n\" \"cdx[0]: %d, cdy[0]: %d\\n\" \"cdx[1]: %d, cdy[1]: %d\\n\" \"cdx[2]: %d, cdy[2]: %d\\n\" \"cdx[3]: %d, cdy[3]: %d\\n\", s->avctx->profile, s->colour_space, ncomponents, s->precision, s->cdx[0], s->cdy[0], ncomponents > 1 ? s->cdx[1] : 0, ncomponents > 1 ? s->cdy[1] : 0, ncomponents > 2 ? s->cdx[2] : 0, ncomponents > 2 ? s->cdy[2] : 0, ncomponents > 3 ? s->cdx[3] : 0, ncomponents > 3 ? s->cdy[3] : 0); return AVERROR_PATCHWELCOME; s->avctx->bits_per_raw_sample = s->precision; return 0;"} {"target": 1, "idx": 1335, "func": "static void commit_set_speed(BlockJob *job, int64_t speed, Error **errp) { CommitBlockJob *s = container_of(job, CommitBlockJob, common); if (speed < 0) { error_setg(errp, QERR_INVALID_PARAMETER, \"speed\"); return; } ratelimit_set_speed(&s->limit, speed / BDRV_SECTOR_SIZE, SLICE_TIME); }"} {"target": 1, "idx": 1338, "func": "static void sd_1d97_int(int *p, int i0, int i1) { int i; if (i1 <= i0 + 1) { if (i0 == 1) p[1] = (p[1] * I_LFTG_X + (1<<15)) >> 16; else p[0] = (p[0] * I_LFTG_K + (1<<15)) >> 16; return; } extend97_int(p, i0, i1); i0++; i1++; for (i = i0/2 - 2; i < i1/2 + 1; i++) p[2 * i + 1] -= (I_LFTG_ALPHA * (p[2 * i] + p[2 * i + 2]) + (1 << 15)) >> 16; for (i = i0/2 - 1; i < i1/2 + 1; i++) p[2 * i] -= (I_LFTG_BETA * (p[2 * i - 1] + p[2 * i + 1]) + (1 << 15)) >> 16; for (i = i0/2 - 1; i < i1/2; i++) p[2 * i + 1] += (I_LFTG_GAMMA * (p[2 * i] + p[2 * i + 2]) + (1 << 15)) >> 16; for (i = i0/2; i < i1/2; i++) p[2 * i] += (I_LFTG_DELTA * (p[2 * i - 1] + p[2 * i + 1]) + (1 << 15)) >> 16; }"} {"target": 1, "idx": 1341, "func": "static inline void RENAME(rgb32ToY)(uint8_t *dst, uint8_t *src, int width) { int i; for(i=0; i>8)&0xFF; int b= (((uint32_t*)src)[i]>>16)&0xFF; dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)) )>>RGB2YUV_SHIFT); } }"} {"target": 0, "idx": 1351, "func": "static uint32_t s390_pci_generate_fid(Error **errp) { uint32_t fid = 0; while (fid <= ZPCI_MAX_FID) { if (!s390_pci_find_dev_by_fid(fid)) { return fid; } if (fid == ZPCI_MAX_FID) { break; } fid++; } error_setg(errp, \"no free fid could be found\"); return 0; }"} {"target": 0, "idx": 1360, "func": "static void virtio_9p_device_unrealize(DeviceState *dev, Error **errp) { VirtIODevice *vdev = VIRTIO_DEVICE(dev); V9fsVirtioState *v = VIRTIO_9P(dev); V9fsState *s = &v->state; virtio_cleanup(vdev); v9fs_device_unrealize_common(s, errp); }"} {"target": 1, "idx": 1373, "func": "static int ipmovie_read_packet(AVFormatContext *s, AVPacket *pkt) { IPMVEContext *ipmovie = (IPMVEContext *)s->priv_data; ByteIOContext *pb = &s->pb; int ret; ret = process_ipmovie_chunk(ipmovie, pb, pkt); if (ret == CHUNK_BAD) ret = AVERROR_INVALIDDATA; else if (ret == CHUNK_EOF) ret = AVERROR_IO; else if (ret == CHUNK_NOMEM) ret = AVERROR_NOMEM; else ret = 0; return ret; }"} {"target": 1, "idx": 1377, "func": "static int opus_decode_frame(OpusStreamContext *s, const uint8_t *data, int size) { int samples = s->packet.frame_duration; int redundancy = 0; int redundancy_size, redundancy_pos; int ret, i, consumed; int delayed_samples = s->delayed_samples; ret = opus_rc_init(&s->rc, data, size); if (ret < 0) return ret; /* decode the silk frame */ if (s->packet.mode == OPUS_MODE_SILK || s->packet.mode == OPUS_MODE_HYBRID) { if (!swr_is_initialized(s->swr)) { ret = opus_init_resample(s); if (ret < 0) return ret; } samples = ff_silk_decode_superframe(s->silk, &s->rc, s->silk_output, FFMIN(s->packet.bandwidth, OPUS_BANDWIDTH_WIDEBAND), s->packet.stereo + 1, silk_frame_duration_ms[s->packet.config]); if (samples < 0) { av_log(s->avctx, AV_LOG_ERROR, \"Error decoding a SILK frame.\\n\"); return samples; } samples = swr_convert(s->swr, (uint8_t**)s->out, s->packet.frame_duration, (const uint8_t**)s->silk_output, samples); if (samples < 0) { av_log(s->avctx, AV_LOG_ERROR, \"Error resampling SILK data.\\n\"); return samples; } s->delayed_samples += s->packet.frame_duration - samples; } else ff_silk_flush(s->silk); // decode redundancy information consumed = opus_rc_tell(&s->rc); if (s->packet.mode == OPUS_MODE_HYBRID && consumed + 37 <= size * 8) redundancy = opus_rc_p2model(&s->rc, 12); else if (s->packet.mode == OPUS_MODE_SILK && consumed + 17 <= size * 8) redundancy = 1; if (redundancy) { redundancy_pos = opus_rc_p2model(&s->rc, 1); if (s->packet.mode == OPUS_MODE_HYBRID) redundancy_size = opus_rc_unimodel(&s->rc, 256) + 2; else redundancy_size = size - (consumed + 7) / 8; size -= redundancy_size; if (size < 0) { av_log(s->avctx, AV_LOG_ERROR, \"Invalid redundancy frame size.\\n\"); return AVERROR_INVALIDDATA; } if (redundancy_pos) { ret = opus_decode_redundancy(s, data + size, redundancy_size); if (ret < 0) return ret; ff_celt_flush(s->celt); } } /* decode the CELT frame */ if (s->packet.mode == OPUS_MODE_CELT || s->packet.mode == OPUS_MODE_HYBRID) { float *out_tmp[2] = { s->out[0], s->out[1] }; float **dst = (s->packet.mode == OPUS_MODE_CELT) ? out_tmp : s->celt_output; int celt_output_samples = samples; int delay_samples = av_audio_fifo_size(s->celt_delay); if (delay_samples) { if (s->packet.mode == OPUS_MODE_HYBRID) { av_audio_fifo_read(s->celt_delay, (void**)s->celt_output, delay_samples); for (i = 0; i < s->output_channels; i++) { s->fdsp->vector_fmac_scalar(out_tmp[i], s->celt_output[i], 1.0, delay_samples); out_tmp[i] += delay_samples; } celt_output_samples -= delay_samples; } else { av_log(s->avctx, AV_LOG_WARNING, \"Spurious CELT delay samples present.\\n\"); av_audio_fifo_drain(s->celt_delay, delay_samples); if (s->avctx->err_recognition & AV_EF_EXPLODE) return AVERROR_BUG; } } opus_raw_init(&s->rc, data + size, size); ret = ff_celt_decode_frame(s->celt, &s->rc, dst, s->packet.stereo + 1, s->packet.frame_duration, (s->packet.mode == OPUS_MODE_HYBRID) ? 17 : 0, celt_band_end[s->packet.bandwidth]); if (ret < 0) return ret; if (s->packet.mode == OPUS_MODE_HYBRID) { int celt_delay = s->packet.frame_duration - celt_output_samples; void *delaybuf[2] = { s->celt_output[0] + celt_output_samples, s->celt_output[1] + celt_output_samples }; for (i = 0; i < s->output_channels; i++) { s->fdsp->vector_fmac_scalar(out_tmp[i], s->celt_output[i], 1.0, celt_output_samples); } ret = av_audio_fifo_write(s->celt_delay, delaybuf, celt_delay); if (ret < 0) return ret; } } else ff_celt_flush(s->celt); if (s->redundancy_idx) { for (i = 0; i < s->output_channels; i++) opus_fade(s->out[i], s->out[i], s->redundancy_output[i] + 120 + s->redundancy_idx, ff_celt_window2 + s->redundancy_idx, 120 - s->redundancy_idx); s->redundancy_idx = 0; } if (redundancy) { if (!redundancy_pos) { ff_celt_flush(s->celt); ret = opus_decode_redundancy(s, data + size, redundancy_size); if (ret < 0) return ret; for (i = 0; i < s->output_channels; i++) { opus_fade(s->out[i] + samples - 120 + delayed_samples, s->out[i] + samples - 120 + delayed_samples, s->redundancy_output[i] + 120, ff_celt_window2, 120 - delayed_samples); if (delayed_samples) s->redundancy_idx = 120 - delayed_samples; } } else { for (i = 0; i < s->output_channels; i++) { memcpy(s->out[i] + delayed_samples, s->redundancy_output[i], 120 * sizeof(float)); opus_fade(s->out[i] + 120 + delayed_samples, s->redundancy_output[i] + 120, s->out[i] + 120 + delayed_samples, ff_celt_window2, 120); } } } return samples; }"} {"target": 1, "idx": 1381, "func": "bool qemu_co_enter_next(CoQueue *queue) { Coroutine *next; next = QSIMPLEQ_FIRST(&queue->entries); if (!next) { return false; } QSIMPLEQ_REMOVE_HEAD(&queue->entries, co_queue_next); qemu_coroutine_enter(next, NULL); return true; }"} {"target": 1, "idx": 1404, "func": "static int decodeChannelSoundUnit (ATRAC3Context *q, GetBitContext *gb, channel_unit *pSnd, float *pOut, int channelNum, int codingMode) { int band, result=0, numSubbands, lastTonal, numBands; if (codingMode == JOINT_STEREO && channelNum == 1) { if (get_bits(gb,2) != 3) { av_log(NULL,AV_LOG_ERROR,\"JS mono Sound Unit id != 3.\\n\"); return -1; } } else { if (get_bits(gb,6) != 0x28) { av_log(NULL,AV_LOG_ERROR,\"Sound Unit id != 0x28.\\n\"); return -1; } } /* number of coded QMF bands */ pSnd->bandsCoded = get_bits(gb,2); result = decodeGainControl (gb, &(pSnd->gainBlock[pSnd->gcBlkSwitch]), pSnd->bandsCoded); if (result) return result; pSnd->numComponents = decodeTonalComponents (gb, pSnd->components, pSnd->bandsCoded); if (pSnd->numComponents == -1) return -1; numSubbands = decodeSpectrum (gb, pSnd->spectrum); /* Merge the decoded spectrum and tonal components. */ lastTonal = addTonalComponents (pSnd->spectrum, pSnd->numComponents, pSnd->components); /* calculate number of used MLT/QMF bands according to the amount of coded spectral lines */ numBands = (subbandTab[numSubbands] - 1) >> 8; if (lastTonal >= 0) numBands = FFMAX((lastTonal + 256) >> 8, numBands); /* Reconstruct time domain samples. */ for (band=0; band<4; band++) { /* Perform the IMDCT step without overlapping. */ if (band <= numBands) { IMLT(&(pSnd->spectrum[band*256]), pSnd->IMDCT_buf, band&1); } else memset(pSnd->IMDCT_buf, 0, 512 * sizeof(float)); /* gain compensation and overlapping */ gainCompensateAndOverlap (pSnd->IMDCT_buf, &(pSnd->prevFrame[band*256]), &(pOut[band*256]), &((pSnd->gainBlock[1 - (pSnd->gcBlkSwitch)]).gBlock[band]), &((pSnd->gainBlock[pSnd->gcBlkSwitch]).gBlock[band])); } /* Swap the gain control buffers for the next frame. */ pSnd->gcBlkSwitch ^= 1; return 0; }"} {"target": 1, "idx": 1420, "func": "static target_long monitor_get_reg(const struct MonitorDef *md, int val) { CPUState *env = mon_get_cpu(); if (!env) return 0; return env->regwptr[val]; }"} {"target": 0, "idx": 1426, "func": "static void vp8_idct_dc_add4uv_c(uint8_t *dst, int16_t block[4][16], ptrdiff_t stride) { vp8_idct_dc_add_c(dst+stride*0+0, block[0], stride); vp8_idct_dc_add_c(dst+stride*0+4, block[1], stride); vp8_idct_dc_add_c(dst+stride*4+0, block[2], stride); vp8_idct_dc_add_c(dst+stride*4+4, block[3], stride); }"} {"target": 1, "idx": 1434, "func": "static void integratorcp_init(QEMUMachineInitArgs *args) { ram_addr_t ram_size = args->ram_size; const char *cpu_model = args->cpu_model; const char *kernel_filename = args->kernel_filename; const char *kernel_cmdline = args->kernel_cmdline; const char *initrd_filename = args->initrd_filename; ARMCPU *cpu; MemoryRegion *address_space_mem = get_system_memory(); MemoryRegion *ram = g_new(MemoryRegion, 1); MemoryRegion *ram_alias = g_new(MemoryRegion, 1); qemu_irq pic[32]; DeviceState *dev; int i; if (!cpu_model) { cpu_model = \"arm926\"; } cpu = cpu_arm_init(cpu_model); if (!cpu) { fprintf(stderr, \"Unable to find CPU definition\\n\"); exit(1); } memory_region_init_ram(ram, NULL, \"integrator.ram\", ram_size); vmstate_register_ram_global(ram); /* ??? On a real system the first 1Mb is mapped as SSRAM or boot flash. */ /* ??? RAM should repeat to fill physical memory space. */ /* SDRAM at address zero*/ memory_region_add_subregion(address_space_mem, 0, ram); /* And again at address 0x80000000 */ memory_region_init_alias(ram_alias, NULL, \"ram.alias\", ram, 0, ram_size); memory_region_add_subregion(address_space_mem, 0x80000000, ram_alias); dev = qdev_create(NULL, TYPE_INTEGRATOR_CM); qdev_prop_set_uint32(dev, \"memsz\", ram_size >> 20); qdev_init_nofail(dev); sysbus_mmio_map((SysBusDevice *)dev, 0, 0x10000000); dev = sysbus_create_varargs(TYPE_INTEGRATOR_PIC, 0x14000000, qdev_get_gpio_in(DEVICE(cpu), ARM_CPU_IRQ), qdev_get_gpio_in(DEVICE(cpu), ARM_CPU_FIQ), NULL); for (i = 0; i < 32; i++) { pic[i] = qdev_get_gpio_in(dev, i); } sysbus_create_simple(TYPE_INTEGRATOR_PIC, 0xca000000, pic[26]); sysbus_create_varargs(\"integrator_pit\", 0x13000000, pic[5], pic[6], pic[7], NULL); sysbus_create_simple(\"pl031\", 0x15000000, pic[8]); sysbus_create_simple(\"pl011\", 0x16000000, pic[1]); sysbus_create_simple(\"pl011\", 0x17000000, pic[2]); icp_control_init(0xcb000000); sysbus_create_simple(\"pl050_keyboard\", 0x18000000, pic[3]); sysbus_create_simple(\"pl050_mouse\", 0x19000000, pic[4]); sysbus_create_varargs(\"pl181\", 0x1c000000, pic[23], pic[24], NULL); if (nd_table[0].used) smc91c111_init(&nd_table[0], 0xc8000000, pic[27]); sysbus_create_simple(\"pl110\", 0xc0000000, pic[22]); integrator_binfo.ram_size = ram_size; integrator_binfo.kernel_filename = kernel_filename; integrator_binfo.kernel_cmdline = kernel_cmdline; integrator_binfo.initrd_filename = initrd_filename; arm_load_kernel(cpu, &integrator_binfo); }"} {"target": 1, "idx": 1435, "func": "static void set_vlan(Object *obj, Visitor *v, void *opaque, const char *name, Error **errp) { DeviceState *dev = DEVICE(obj); Property *prop = opaque; NICPeers *peers_ptr = qdev_get_prop_ptr(dev, prop); NetClientState **ptr = &peers_ptr->ncs[0]; Error *local_err = NULL; int32_t id; NetClientState *hubport; if (dev->realized) { qdev_prop_set_after_realize(dev, name, errp); visit_type_int32(v, &id, name, &local_err); if (local_err) { error_propagate(errp, local_err); if (id == -1) { *ptr = NULL; hubport = net_hub_port_find(id); if (!hubport) { error_set(errp, QERR_INVALID_PARAMETER_VALUE, name, prop->info->name); *ptr = hubport;"} {"target": 1, "idx": 1439, "func": "static av_cold int png_dec_end(AVCodecContext *avctx) { PNGDecContext *s = avctx->priv_data; ff_thread_release_buffer(avctx, &s->previous_picture); av_frame_free(&s->previous_picture.f); ff_thread_release_buffer(avctx, &s->last_picture); av_frame_free(&s->last_picture.f); ff_thread_release_buffer(avctx, &s->picture); av_frame_free(&s->picture.f); av_freep(&s->buffer); s->buffer_size = 0; av_freep(&s->last_row); s->last_row_size = 0; av_freep(&s->tmp_row); s->tmp_row_size = 0; av_freep(&s->extra_data); s->extra_data_size = 0; return 0; }"} {"target": 1, "idx": 1457, "func": "static void fd_chr_update_read_handler(CharDriverState *chr) { FDCharDriver *s = chr->opaque; if (s->fd_in_tag) { g_source_remove(s->fd_in_tag); s->fd_in_tag = 0; } if (s->fd_in) { s->fd_in_tag = io_add_watch_poll(s->fd_in, fd_chr_read_poll, fd_chr_read, chr); } }"} {"target": 1, "idx": 1469, "func": "static int probe_file(WriterContext *wctx, const char *filename) { AVFormatContext *fmt_ctx; int ret, i; int section_id; do_read_frames = do_show_frames || do_count_frames; do_read_packets = do_show_packets || do_count_packets; ret = open_input_file(&fmt_ctx, filename); if (ret < 0) return ret; #define CHECK_END if (ret < 0) goto end nb_streams_frames = av_calloc(fmt_ctx->nb_streams, sizeof(*nb_streams_frames)); nb_streams_packets = av_calloc(fmt_ctx->nb_streams, sizeof(*nb_streams_packets)); selected_streams = av_calloc(fmt_ctx->nb_streams, sizeof(*selected_streams)); for (i = 0; i < fmt_ctx->nb_streams; i++) { if (stream_specifier) { ret = avformat_match_stream_specifier(fmt_ctx, fmt_ctx->streams[i], stream_specifier); CHECK_END; else selected_streams[i] = ret; ret = 0; } else { selected_streams[i] = 1; } } if (do_read_frames || do_read_packets) { if (do_show_frames && do_show_packets && wctx->writer->flags & WRITER_FLAG_PUT_PACKETS_AND_FRAMES_IN_SAME_CHAPTER) section_id = SECTION_ID_PACKETS_AND_FRAMES; else if (do_show_packets && !do_show_frames) section_id = SECTION_ID_PACKETS; else // (!do_show_packets && do_show_frames) section_id = SECTION_ID_FRAMES; if (do_show_frames || do_show_packets) writer_print_section_header(wctx, section_id); ret = read_packets(wctx, fmt_ctx); if (do_show_frames || do_show_packets) writer_print_section_footer(wctx); CHECK_END; } if (do_show_programs) { ret = show_programs(wctx, fmt_ctx); CHECK_END; } if (do_show_streams) { ret = show_streams(wctx, fmt_ctx); CHECK_END; } if (do_show_chapters) { ret = show_chapters(wctx, fmt_ctx); CHECK_END; } if (do_show_format) { ret = show_format(wctx, fmt_ctx); CHECK_END; } end: close_input_file(&fmt_ctx); av_freep(&nb_streams_frames); av_freep(&nb_streams_packets); av_freep(&selected_streams); return ret; }"} {"target": 1, "idx": 1473, "func": "static int kvm_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr) { KVMState *s = kvm_state; unsigned long size, allocated_size = 0; KVMDirtyLog d; KVMSlot *mem; int ret = 0; d.dirty_bitmap = NULL; while (start_addr < end_addr) { mem = kvm_lookup_overlapping_slot(s, start_addr, end_addr); if (mem == NULL) { break; } size = ALIGN(((mem->memory_size) >> TARGET_PAGE_BITS), HOST_LONG_BITS) / 8; if (!d.dirty_bitmap) { d.dirty_bitmap = qemu_malloc(size); } else if (size > allocated_size) { d.dirty_bitmap = qemu_realloc(d.dirty_bitmap, size); } allocated_size = size; memset(d.dirty_bitmap, 0, allocated_size); d.slot = mem->slot; if (kvm_vm_ioctl(s, KVM_GET_DIRTY_LOG, &d) == -1) { DPRINTF(\"ioctl failed %d\\n\", errno); ret = -1; break; } kvm_get_dirty_pages_log_range(mem->start_addr, d.dirty_bitmap, mem->start_addr, mem->memory_size); start_addr = mem->start_addr + mem->memory_size; } qemu_free(d.dirty_bitmap); return ret; }"} {"target": 1, "idx": 1476, "func": "static void filter(USPPContext *p, uint8_t *dst[3], uint8_t *src[3], int dst_stride[3], int src_stride[3], int width, int height, uint8_t *qp_store, int qp_stride) { int x, y, i, j; const int count = 1<log2_count; for (i = 0; i < 3; i++) { int is_chroma = !!i; int w = width >> (is_chroma ? p->hsub : 0); int h = height >> (is_chroma ? p->vsub : 0); int stride = p->temp_stride[i]; int block = BLOCK >> (is_chroma ? p->hsub : 0); if (!src[i] || !dst[i]) continue; for (y = 0; y < h; y++) { int index = block + block * stride + y * stride; memcpy(p->src[i] + index, src[i] + y * src_stride[i], w ); for (x = 0; x < block; x++) { p->src[i][index - x - 1] = p->src[i][index + x ]; p->src[i][index + w + x ] = p->src[i][index + w - x - 1]; } } for (y = 0; y < block; y++) { memcpy(p->src[i] + ( block-1-y) * stride, p->src[i] + ( y+block ) * stride, stride); memcpy(p->src[i] + (h+block +y) * stride, p->src[i] + (h-y+block-1) * stride, stride); } p->frame->linesize[i] = stride; memset(p->temp[i], 0, (h + 2 * block) * stride * sizeof(int16_t)); } if (p->qp) p->frame->quality = p->qp * FF_QP2LAMBDA; else { int qpsum=0; int qpcount = (height>>4) * (height>>4); for (y = 0; y < (height>>4); y++) { for (x = 0; x < (width>>4); x++) qpsum += qp_store[x + y * qp_stride]; } p->frame->quality = norm_qscale((qpsum + qpcount/2) / qpcount, p->qscale_type) * FF_QP2LAMBDA; } // init per MB qscale stuff FIXME p->frame->height = height; p->frame->width = width; for (i = 0; i < count; i++) { const int x1 = offset[i+count-1][0]; const int y1 = offset[i+count-1][1]; const int x1c = x1 >> p->hsub; const int y1c = y1 >> p->vsub; const int BLOCKc = BLOCK >> p->hsub; int offset; AVPacket pkt; int got_pkt_ptr; av_init_packet(&pkt); pkt.data = p->outbuf; pkt.size = p->outbuf_size; p->frame->data[0] = p->src[0] + x1 + y1 * p->frame->linesize[0]; p->frame->data[1] = p->src[1] + x1c + y1c * p->frame->linesize[1]; p->frame->data[2] = p->src[2] + x1c + y1c * p->frame->linesize[2]; p->frame->format = p->avctx_enc[i]->pix_fmt; avcodec_encode_video2(p->avctx_enc[i], &pkt, p->frame, &got_pkt_ptr); p->frame_dec = p->avctx_enc[i]->coded_frame; offset = (BLOCK-x1) + (BLOCK-y1) * p->frame_dec->linesize[0]; for (y = 0; y < height; y++) for (x = 0; x < width; x++) p->temp[0][x + y * p->temp_stride[0]] += p->frame_dec->data[0][x + y * p->frame_dec->linesize[0] + offset]; if (!src[2] || !dst[2]) continue; offset = (BLOCKc-x1c) + (BLOCKc-y1c) * p->frame_dec->linesize[1]; for (y = 0; y < height>>p->vsub; y++) { for (x = 0; x < width>>p->hsub; x++) { p->temp[1][x + y * p->temp_stride[1]] += p->frame_dec->data[1][x + y * p->frame_dec->linesize[1] + offset]; p->temp[2][x + y * p->temp_stride[2]] += p->frame_dec->data[2][x + y * p->frame_dec->linesize[2] + offset]; } } } for (j = 0; j < 3; j++) { int is_chroma = !!j; if (!dst[j]) continue; store_slice_c(dst[j], p->temp[j], dst_stride[j], p->temp_stride[j], width >> (is_chroma ? p->hsub : 0), height >> (is_chroma ? p->vsub : 0), 8-p->log2_count); } }"} {"target": 0, "idx": 1487, "func": "int main(void) { int nf; Suite *s; SRunner *sr; s = qfloat_suite(); sr = srunner_create(s); srunner_run_all(sr, CK_NORMAL); nf = srunner_ntests_failed(sr); srunner_free(sr); return (nf == 0) ? EXIT_SUCCESS : EXIT_FAILURE; }"} {"target": 0, "idx": 1497, "func": "static target_ulong put_tce_emu(sPAPRTCETable *tcet, target_ulong ioba, target_ulong tce) { IOMMUTLBEntry entry; hwaddr page_mask = IOMMU_PAGE_MASK(tcet->page_shift); unsigned long index = (ioba - tcet->bus_offset) >> tcet->page_shift; if (index >= tcet->nb_table) { hcall_dprintf(\"spapr_vio_put_tce on out-of-bounds IOBA 0x\" TARGET_FMT_lx \"\\n\", ioba); return H_PARAMETER; } tcet->table[index] = tce; entry.target_as = &address_space_memory, entry.iova = ioba & page_mask; entry.translated_addr = tce & page_mask; entry.addr_mask = ~page_mask; entry.perm = spapr_tce_iommu_access_flags(tce); memory_region_notify_iommu(&tcet->iommu, entry); return H_SUCCESS; }"} {"target": 0, "idx": 1500, "func": "static target_ulong h_enter(CPUState *env, sPAPREnvironment *spapr, target_ulong opcode, target_ulong *args) { target_ulong flags = args[0]; target_ulong pte_index = args[1]; target_ulong pteh = args[2]; target_ulong ptel = args[3]; target_ulong i; uint8_t *hpte; /* only handle 4k and 16M pages for now */ if (pteh & HPTE_V_LARGE) { #if 0 /* We don't support 64k pages yet */ if ((ptel & 0xf000) == 0x1000) { /* 64k page */ } else #endif if ((ptel & 0xff000) == 0) { /* 16M page */ /* lowest AVA bit must be 0 for 16M pages */ if (pteh & 0x80) { return H_PARAMETER; } } else { return H_PARAMETER; } } /* FIXME: bounds check the pa? */ /* Check WIMG */ if ((ptel & HPTE_R_WIMG) != HPTE_R_M) { return H_PARAMETER; } pteh &= ~0x60ULL; if ((pte_index * HASH_PTE_SIZE_64) & ~env->htab_mask) { return H_PARAMETER; } if (likely((flags & H_EXACT) == 0)) { pte_index &= ~7ULL; hpte = env->external_htab + (pte_index * HASH_PTE_SIZE_64); for (i = 0; ; ++i) { if (i == 8) { return H_PTEG_FULL; } if (((ldq_p(hpte) & HPTE_V_VALID) == 0) && lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID)) { break; } hpte += HASH_PTE_SIZE_64; } } else { i = 0; hpte = env->external_htab + (pte_index * HASH_PTE_SIZE_64); if (!lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID)) { return H_PTEG_FULL; } } stq_p(hpte + (HASH_PTE_SIZE_64/2), ptel); /* eieio(); FIXME: need some sort of barrier for smp? */ stq_p(hpte, pteh); assert(!(ldq_p(hpte) & HPTE_V_HVLOCK)); args[0] = pte_index + i; return H_SUCCESS; }"} {"target": 1, "idx": 1507, "func": "static inline void gen_op_arith_add(DisasContext *ctx, TCGv ret, TCGv arg1, TCGv arg2, int add_ca, int compute_ca, int compute_ov) { TCGv t0, t1; if ((!compute_ca && !compute_ov) || (!TCGV_EQUAL(ret,arg1) && !TCGV_EQUAL(ret, arg2))) { t0 = ret; } else { t0 = tcg_temp_local_new(); } if (add_ca) { t1 = tcg_temp_local_new(); tcg_gen_mov_tl(t1, cpu_ca); } else { TCGV_UNUSED(t1); } if (compute_ca) { /* Start with XER CA disabled, the most likely case */ tcg_gen_movi_tl(cpu_ca, 0); } if (compute_ov) { /* Start with XER OV disabled, the most likely case */ tcg_gen_movi_tl(cpu_ov, 0); } tcg_gen_add_tl(t0, arg1, arg2); if (compute_ca) { gen_op_arith_compute_ca(ctx, t0, arg1, 0); } if (add_ca) { tcg_gen_add_tl(t0, t0, t1); gen_op_arith_compute_ca(ctx, t0, t1, 0); tcg_temp_free(t1); } if (compute_ov) { gen_op_arith_compute_ov(ctx, t0, arg1, arg2, 0); } if (unlikely(Rc(ctx->opcode) != 0)) gen_set_Rc0(ctx, t0); if (!TCGV_EQUAL(t0, ret)) { tcg_gen_mov_tl(ret, t0); tcg_temp_free(t0); } }"} {"target": 1, "idx": 1508, "func": "static inline void RENAME(rgb15tobgr24)(const uint8_t *src, uint8_t *dst, int src_size) { const uint16_t *end; const uint16_t *mm_end; uint8_t *d = dst; const uint16_t *s = (const uint16_t*)src; end = s + src_size/2; __asm__ volatile(PREFETCH\" %0\"::\"m\"(*s):\"memory\"); mm_end = end - 7; while (s < mm_end) { __asm__ volatile( PREFETCH\" 32%1 \\n\\t\" \"movq %1, %%mm0 \\n\\t\" \"movq %1, %%mm1 \\n\\t\" \"movq %1, %%mm2 \\n\\t\" \"pand %2, %%mm0 \\n\\t\" \"pand %3, %%mm1 \\n\\t\" \"pand %4, %%mm2 \\n\\t\" \"psllq $3, %%mm0 \\n\\t\" \"psrlq $2, %%mm1 \\n\\t\" \"psrlq $7, %%mm2 \\n\\t\" \"movq %%mm0, %%mm3 \\n\\t\" \"movq %%mm1, %%mm4 \\n\\t\" \"movq %%mm2, %%mm5 \\n\\t\" \"punpcklwd %5, %%mm0 \\n\\t\" \"punpcklwd %5, %%mm1 \\n\\t\" \"punpcklwd %5, %%mm2 \\n\\t\" \"punpckhwd %5, %%mm3 \\n\\t\" \"punpckhwd %5, %%mm4 \\n\\t\" \"punpckhwd %5, %%mm5 \\n\\t\" \"psllq $8, %%mm1 \\n\\t\" \"psllq $16, %%mm2 \\n\\t\" \"por %%mm1, %%mm0 \\n\\t\" \"por %%mm2, %%mm0 \\n\\t\" \"psllq $8, %%mm4 \\n\\t\" \"psllq $16, %%mm5 \\n\\t\" \"por %%mm4, %%mm3 \\n\\t\" \"por %%mm5, %%mm3 \\n\\t\" \"movq %%mm0, %%mm6 \\n\\t\" \"movq %%mm3, %%mm7 \\n\\t\" \"movq 8%1, %%mm0 \\n\\t\" \"movq 8%1, %%mm1 \\n\\t\" \"movq 8%1, %%mm2 \\n\\t\" \"pand %2, %%mm0 \\n\\t\" \"pand %3, %%mm1 \\n\\t\" \"pand %4, %%mm2 \\n\\t\" \"psllq $3, %%mm0 \\n\\t\" \"psrlq $2, %%mm1 \\n\\t\" \"psrlq $7, %%mm2 \\n\\t\" \"movq %%mm0, %%mm3 \\n\\t\" \"movq %%mm1, %%mm4 \\n\\t\" \"movq %%mm2, %%mm5 \\n\\t\" \"punpcklwd %5, %%mm0 \\n\\t\" \"punpcklwd %5, %%mm1 \\n\\t\" \"punpcklwd %5, %%mm2 \\n\\t\" \"punpckhwd %5, %%mm3 \\n\\t\" \"punpckhwd %5, %%mm4 \\n\\t\" \"punpckhwd %5, %%mm5 \\n\\t\" \"psllq $8, %%mm1 \\n\\t\" \"psllq $16, %%mm2 \\n\\t\" \"por %%mm1, %%mm0 \\n\\t\" \"por %%mm2, %%mm0 \\n\\t\" \"psllq $8, %%mm4 \\n\\t\" \"psllq $16, %%mm5 \\n\\t\" \"por %%mm4, %%mm3 \\n\\t\" \"por %%mm5, %%mm3 \\n\\t\" :\"=m\"(*d) :\"m\"(*s),\"m\"(mask15b),\"m\"(mask15g),\"m\"(mask15r), \"m\"(mmx_null) :\"memory\"); /* borrowed 32 to 24 */ __asm__ volatile( \"movq %%mm0, %%mm4 \\n\\t\" \"movq %%mm3, %%mm5 \\n\\t\" \"movq %%mm6, %%mm0 \\n\\t\" \"movq %%mm7, %%mm1 \\n\\t\" \"movq %%mm4, %%mm6 \\n\\t\" \"movq %%mm5, %%mm7 \\n\\t\" \"movq %%mm0, %%mm2 \\n\\t\" \"movq %%mm1, %%mm3 \\n\\t\" STORE_BGR24_MMX :\"=m\"(*d) :\"m\"(*s) :\"memory\"); d += 24; s += 8; } __asm__ volatile(SFENCE:::\"memory\"); __asm__ volatile(EMMS:::\"memory\"); while (s < end) { register uint16_t bgr; bgr = *s++; *d++ = (bgr&0x1F)<<3; *d++ = (bgr&0x3E0)>>2; *d++ = (bgr&0x7C00)>>7; } }"} {"target": 1, "idx": 1519, "func": "int inet_dgram_opts(QemuOpts *opts) { struct addrinfo ai, *peer = NULL, *local = NULL; const char *addr; const char *port; char uaddr[INET6_ADDRSTRLEN+1]; char uport[33]; int sock = -1, rc; /* lookup peer addr */ memset(&ai,0, sizeof(ai)); ai.ai_flags = AI_CANONNAME | AI_ADDRCONFIG; ai.ai_family = PF_UNSPEC; ai.ai_socktype = SOCK_DGRAM; addr = qemu_opt_get(opts, \"host\"); port = qemu_opt_get(opts, \"port\"); if (addr == NULL || strlen(addr) == 0) { addr = \"localhost\"; } if (port == NULL || strlen(port) == 0) { fprintf(stderr, \"inet_dgram: port not specified\\n\"); return -1; } if (qemu_opt_get_bool(opts, \"ipv4\", 0)) ai.ai_family = PF_INET; if (qemu_opt_get_bool(opts, \"ipv6\", 0)) ai.ai_family = PF_INET6; if (0 != (rc = getaddrinfo(addr, port, &ai, &peer))) { fprintf(stderr,\"getaddrinfo(%s,%s): %s\\n\", addr, port, gai_strerror(rc)); return -1; } if (sockets_debug) { fprintf(stderr, \"%s: peer (%s:%s)\\n\", __FUNCTION__, addr, port); inet_print_addrinfo(__FUNCTION__, peer); } /* lookup local addr */ memset(&ai,0, sizeof(ai)); ai.ai_flags = AI_PASSIVE; ai.ai_family = peer->ai_family; ai.ai_socktype = SOCK_DGRAM; addr = qemu_opt_get(opts, \"localaddr\"); port = qemu_opt_get(opts, \"localport\"); if (addr == NULL || strlen(addr) == 0) { addr = NULL; } if (!port || strlen(port) == 0) port = \"0\"; if (0 != (rc = getaddrinfo(addr, port, &ai, &local))) { fprintf(stderr,\"getaddrinfo(%s,%s): %s\\n\", addr, port, gai_strerror(rc)); return -1; } if (sockets_debug) { fprintf(stderr, \"%s: local (%s:%s)\\n\", __FUNCTION__, addr, port); inet_print_addrinfo(__FUNCTION__, local); } /* create socket */ sock = socket(peer->ai_family, peer->ai_socktype, peer->ai_protocol); if (sock < 0) { fprintf(stderr,\"%s: socket(%s): %s\\n\", __FUNCTION__, inet_strfamily(peer->ai_family), strerror(errno)); goto err; } setsockopt(sock,SOL_SOCKET,SO_REUSEADDR,(void*)&on,sizeof(on)); /* bind socket */ if (getnameinfo((struct sockaddr*)local->ai_addr,local->ai_addrlen, uaddr,INET6_ADDRSTRLEN,uport,32, NI_NUMERICHOST | NI_NUMERICSERV) != 0) { fprintf(stderr, \"%s: getnameinfo: oops\\n\", __FUNCTION__); goto err; } if (bind(sock, local->ai_addr, local->ai_addrlen) < 0) { fprintf(stderr,\"%s: bind(%s,%s,%d): OK\\n\", __FUNCTION__, inet_strfamily(local->ai_family), uaddr, inet_getport(local)); goto err; } /* connect to peer */ if (getnameinfo((struct sockaddr*)peer->ai_addr, peer->ai_addrlen, uaddr, INET6_ADDRSTRLEN, uport, 32, NI_NUMERICHOST | NI_NUMERICSERV) != 0) { fprintf(stderr, \"%s: getnameinfo: oops\\n\", __FUNCTION__); goto err; } if (connect(sock,peer->ai_addr,peer->ai_addrlen) < 0) { fprintf(stderr, \"%s: connect(%s,%s,%s,%s): %s\\n\", __FUNCTION__, inet_strfamily(peer->ai_family), peer->ai_canonname, uaddr, uport, strerror(errno)); goto err; } freeaddrinfo(local); freeaddrinfo(peer); return sock; err: if (-1 != sock) closesocket(sock); if (local) freeaddrinfo(local); if (peer) freeaddrinfo(peer); return -1; }"} {"target": 0, "idx": 1525, "func": "static int ftp_file_size(FTPContext *s) { char command[CONTROL_BUFFER_SIZE]; char *res = NULL; const int size_codes[] = {213, 0}; snprintf(command, sizeof(command), \"SIZE %s\\r\\n\", s->path); if (ftp_send_command(s, command, size_codes, &res)) { s->filesize = strtoll(&res[4], NULL, 10); } else { s->filesize = -1; av_free(res); return AVERROR(EIO); } av_free(res); return 0; }"} {"target": 1, "idx": 1533, "func": "static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) { TCGReg datalo, datahi, addrlo, rbase; TCGReg addrhi __attribute__((unused)); TCGMemOpIdx oi; TCGMemOp opc, s_bits; #ifdef CONFIG_SOFTMMU int mem_index; tcg_insn_unit *label_ptr; #endif datalo = *args++; datahi = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0); addrlo = *args++; addrhi = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0); oi = *args++; opc = get_memop(oi); s_bits = opc & MO_SIZE; #ifdef CONFIG_SOFTMMU mem_index = get_mmuidx(oi); addrlo = tcg_out_tlb_read(s, s_bits, addrlo, addrhi, mem_index, true); /* Load a pointer into the current opcode w/conditional branch-link. */ label_ptr = s->code_ptr; tcg_out_bc_noaddr(s, BC | BI(7, CR_EQ) | BO_COND_FALSE | LK); rbase = TCG_REG_R3; #else /* !CONFIG_SOFTMMU */ rbase = GUEST_BASE ? TCG_GUEST_BASE_REG : 0; if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) { tcg_out_ext32u(s, TCG_REG_TMP1, addrlo); addrlo = TCG_REG_TMP1; } #endif if (TCG_TARGET_REG_BITS == 32 && s_bits == MO_64) { if (opc & MO_BSWAP) { tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4)); tcg_out32(s, LWBRX | TAB(datalo, rbase, addrlo)); tcg_out32(s, LWBRX | TAB(datahi, rbase, TCG_REG_R0)); } else if (rbase != 0) { tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4)); tcg_out32(s, LWZX | TAB(datahi, rbase, addrlo)); tcg_out32(s, LWZX | TAB(datalo, rbase, TCG_REG_R0)); } else if (addrlo == datahi) { tcg_out32(s, LWZ | TAI(datalo, addrlo, 4)); tcg_out32(s, LWZ | TAI(datahi, addrlo, 0)); } else { tcg_out32(s, LWZ | TAI(datahi, addrlo, 0)); tcg_out32(s, LWZ | TAI(datalo, addrlo, 4)); } } else { uint32_t insn = qemu_ldx_opc[opc & (MO_BSWAP | MO_SSIZE)]; if (!HAVE_ISA_2_06 && insn == LDBRX) { tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4)); tcg_out32(s, LWBRX | TAB(datalo, rbase, addrlo)); tcg_out32(s, LWBRX | TAB(TCG_REG_R0, rbase, TCG_REG_R0)); tcg_out_rld(s, RLDIMI, datalo, TCG_REG_R0, 32, 0); } else if (insn) { tcg_out32(s, insn | TAB(datalo, rbase, addrlo)); } else { insn = qemu_ldx_opc[opc & (MO_SIZE | MO_BSWAP)]; tcg_out32(s, insn | TAB(datalo, rbase, addrlo)); insn = qemu_exts_opc[s_bits]; tcg_out32(s, insn | RA(datalo) | RS(datalo)); } } #ifdef CONFIG_SOFTMMU add_qemu_ldst_label(s, true, oi, datalo, datahi, addrlo, addrhi, s->code_ptr, label_ptr); #endif }"} {"target": 0, "idx": 1540, "func": "int ff_h264_decode_ref_pic_marking(const H264Context *h, H264SliceContext *sl, GetBitContext *gb) { int i; MMCO *mmco = sl->mmco; int nb_mmco = 0; if (h->nal_unit_type == NAL_IDR_SLICE) { // FIXME fields skip_bits1(gb); // broken_link if (get_bits1(gb)) { mmco[0].opcode = MMCO_LONG; mmco[0].long_arg = 0; nb_mmco = 1; } sl->explicit_ref_marking = 1; } else { sl->explicit_ref_marking = get_bits1(gb); if (sl->explicit_ref_marking) { for (i = 0; i < MAX_MMCO_COUNT; i++) { MMCOOpcode opcode = get_ue_golomb_31(gb); mmco[i].opcode = opcode; if (opcode == MMCO_SHORT2UNUSED || opcode == MMCO_SHORT2LONG) { mmco[i].short_pic_num = (sl->curr_pic_num - get_ue_golomb(gb) - 1) & (sl->max_pic_num - 1); #if 0 if (mmco[i].short_pic_num >= h->short_ref_count || !h->short_ref[mmco[i].short_pic_num]) { av_log(s->avctx, AV_LOG_ERROR, \"illegal short ref in memory management control \" \"operation %d\\n\", mmco); return -1; } #endif } if (opcode == MMCO_SHORT2LONG || opcode == MMCO_LONG2UNUSED || opcode == MMCO_LONG || opcode == MMCO_SET_MAX_LONG) { unsigned int long_arg = get_ue_golomb_31(gb); if (long_arg >= 32 || (long_arg >= 16 && !(opcode == MMCO_SET_MAX_LONG && long_arg == 16) && !(opcode == MMCO_LONG2UNUSED && FIELD_PICTURE(h)))) { av_log(h->avctx, AV_LOG_ERROR, \"illegal long ref in memory management control \" \"operation %d\\n\", opcode); return -1; } mmco[i].long_arg = long_arg; } if (opcode > (unsigned) MMCO_LONG) { av_log(h->avctx, AV_LOG_ERROR, \"illegal memory management control operation %d\\n\", opcode); return -1; } if (opcode == MMCO_END) break; } nb_mmco = i; } } sl->nb_mmco = nb_mmco; return 0; }"} {"target": 0, "idx": 1557, "func": "void cpu_sh4_invalidate_tlb(CPUSH4State *s) { int i; /* UTLB */ for (i = 0; i < UTLB_SIZE; i++) { tlb_t * entry = &s->utlb[i]; entry->v = 0; } /* ITLB */ for (i = 0; i < UTLB_SIZE; i++) { tlb_t * entry = &s->utlb[i]; entry->v = 0; } tlb_flush(s, 1); }"} {"target": 0, "idx": 1573, "func": "uint16_t net_checksum_finish(uint32_t sum) { while (sum>>16) sum = (sum & 0xFFFF)+(sum >> 16); return ~sum; }"} {"target": 0, "idx": 1616, "func": "static void pci_init_mask_bridge(PCIDevice *d) { /* PCI_PRIMARY_BUS, PCI_SECONDARY_BUS, PCI_SUBORDINATE_BUS and PCI_SEC_LETENCY_TIMER */ memset(d->wmask + PCI_PRIMARY_BUS, 0xff, 4); /* base and limit */ d->wmask[PCI_IO_BASE] = PCI_IO_RANGE_MASK & 0xff; d->wmask[PCI_IO_LIMIT] = PCI_IO_RANGE_MASK & 0xff; pci_set_word(d->wmask + PCI_MEMORY_BASE, PCI_MEMORY_RANGE_MASK & 0xffff); pci_set_word(d->wmask + PCI_MEMORY_LIMIT, PCI_MEMORY_RANGE_MASK & 0xffff); pci_set_word(d->wmask + PCI_PREF_MEMORY_BASE, PCI_PREF_RANGE_MASK & 0xffff); pci_set_word(d->wmask + PCI_PREF_MEMORY_LIMIT, PCI_PREF_RANGE_MASK & 0xffff); /* PCI_PREF_BASE_UPPER32 and PCI_PREF_LIMIT_UPPER32 */ memset(d->wmask + PCI_PREF_BASE_UPPER32, 0xff, 8); /* Supported memory and i/o types */ d->config[PCI_IO_BASE] |= PCI_IO_RANGE_TYPE_16; d->config[PCI_IO_LIMIT] |= PCI_IO_RANGE_TYPE_16; pci_word_test_and_set_mask(d->config + PCI_PREF_MEMORY_BASE, PCI_PREF_RANGE_TYPE_64); pci_word_test_and_set_mask(d->config + PCI_PREF_MEMORY_LIMIT, PCI_PREF_RANGE_TYPE_64); /* TODO: add this define to pci_regs.h in linux and then in qemu. */ #define PCI_BRIDGE_CTL_VGA_16BIT 0x10 /* VGA 16-bit decode */ #define PCI_BRIDGE_CTL_DISCARD 0x100 /* Primary discard timer */ #define PCI_BRIDGE_CTL_SEC_DISCARD 0x200 /* Secondary discard timer */ #define PCI_BRIDGE_CTL_DISCARD_STATUS 0x400 /* Discard timer status */ #define PCI_BRIDGE_CTL_DISCARD_SERR 0x800 /* Discard timer SERR# enable */ /* * TODO: Bridges default to 10-bit VGA decoding but we currently only * implement 16-bit decoding (no alias support). */ pci_set_word(d->wmask + PCI_BRIDGE_CONTROL, PCI_BRIDGE_CTL_PARITY | PCI_BRIDGE_CTL_SERR | PCI_BRIDGE_CTL_ISA | PCI_BRIDGE_CTL_VGA | PCI_BRIDGE_CTL_VGA_16BIT | PCI_BRIDGE_CTL_MASTER_ABORT | PCI_BRIDGE_CTL_BUS_RESET | PCI_BRIDGE_CTL_FAST_BACK | PCI_BRIDGE_CTL_DISCARD | PCI_BRIDGE_CTL_SEC_DISCARD | PCI_BRIDGE_CTL_DISCARD_SERR); /* Below does not do anything as we never set this bit, put here for * completeness. */ pci_set_word(d->w1cmask + PCI_BRIDGE_CONTROL, PCI_BRIDGE_CTL_DISCARD_STATUS); d->cmask[PCI_IO_BASE] |= PCI_IO_RANGE_TYPE_MASK; d->cmask[PCI_IO_LIMIT] |= PCI_IO_RANGE_TYPE_MASK; pci_word_test_and_set_mask(d->cmask + PCI_PREF_MEMORY_BASE, PCI_PREF_RANGE_TYPE_MASK); pci_word_test_and_set_mask(d->cmask + PCI_PREF_MEMORY_LIMIT, PCI_PREF_RANGE_TYPE_MASK); }"} {"target": 0, "idx": 1621, "func": "static TranslationBlock *tb_alloc(target_ulong pc) { TranslationBlock *tb; TBContext *ctx; assert_tb_locked(); tb = tcg_tb_alloc(&tcg_ctx); if (unlikely(tb == NULL)) { return NULL; } ctx = &tcg_ctx.tb_ctx; if (unlikely(ctx->nb_tbs == ctx->tbs_size)) { ctx->tbs_size *= 2; ctx->tbs = g_renew(TranslationBlock *, ctx->tbs, ctx->tbs_size); } ctx->tbs[ctx->nb_tbs++] = tb; tb->pc = pc; tb->cflags = 0; tb->invalid = false; return tb; }"} {"target": 0, "idx": 1626, "func": "static void gpollfds_from_select(void) { int fd; for (fd = 0; fd <= nfds; fd++) { int events = 0; if (FD_ISSET(fd, &rfds)) { events |= G_IO_IN | G_IO_HUP | G_IO_ERR; } if (FD_ISSET(fd, &wfds)) { events |= G_IO_OUT | G_IO_ERR; } if (FD_ISSET(fd, &xfds)) { events |= G_IO_PRI; } if (events) { GPollFD pfd = { .fd = fd, .events = events, }; g_array_append_val(gpollfds, pfd); } } }"} {"target": 0, "idx": 1628, "func": "static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { TiffContext *const s = avctx->priv_data; AVFrame *const p = data; ThreadFrame frame = { .f = data }; unsigned off; int le, ret, plane, planes; int i, j, entries, stride; unsigned soff, ssize; uint8_t *dst; GetByteContext stripsizes; GetByteContext stripdata; bytestream2_init(&s->gb, avpkt->data, avpkt->size); // parse image header if ((ret = ff_tdecode_header(&s->gb, &le, &off))) { av_log(avctx, AV_LOG_ERROR, \"Invalid TIFF header\\n\"); return ret; } else if (off >= UINT_MAX - 14 || avpkt->size < off + 14) { av_log(avctx, AV_LOG_ERROR, \"IFD offset is greater than image size\\n\"); return AVERROR_INVALIDDATA; } s->le = le; // TIFF_BPP is not a required tag and defaults to 1 s->bppcount = s->bpp = 1; s->photometric = TIFF_PHOTOMETRIC_NONE; s->compr = TIFF_RAW; s->fill_order = 0; free_geotags(s); // Reset these offsets so we can tell if they were set this frame s->stripsizesoff = s->strippos = 0; /* parse image file directory */ bytestream2_seek(&s->gb, off, SEEK_SET); entries = ff_tget_short(&s->gb, le); if (bytestream2_get_bytes_left(&s->gb) < entries * 12) return AVERROR_INVALIDDATA; for (i = 0; i < entries; i++) { if ((ret = tiff_decode_tag(s, p)) < 0) return ret; } for (i = 0; igeotag_count; i++) { const char *keyname = get_geokey_name(s->geotags[i].key); if (!keyname) { av_log(avctx, AV_LOG_WARNING, \"Unknown or unsupported GeoTIFF key %d\\n\", s->geotags[i].key); continue; } if (get_geokey_type(s->geotags[i].key) != s->geotags[i].type) { av_log(avctx, AV_LOG_WARNING, \"Type of GeoTIFF key %d is wrong\\n\", s->geotags[i].key); continue; } ret = av_dict_set(avpriv_frame_get_metadatap(p), keyname, s->geotags[i].val, 0); if (ret<0) { av_log(avctx, AV_LOG_ERROR, \"Writing metadata with key '%s' failed\\n\", keyname); return ret; } } if (!s->strippos && !s->stripoff) { av_log(avctx, AV_LOG_ERROR, \"Image data is missing\\n\"); return AVERROR_INVALIDDATA; } /* now we have the data and may start decoding */ if ((ret = init_image(s, &frame)) < 0) return ret; if (s->strips == 1 && !s->stripsize) { av_log(avctx, AV_LOG_WARNING, \"Image data size missing\\n\"); s->stripsize = avpkt->size - s->stripoff; } if (s->stripsizesoff) { if (s->stripsizesoff >= (unsigned)avpkt->size) return AVERROR_INVALIDDATA; bytestream2_init(&stripsizes, avpkt->data + s->stripsizesoff, avpkt->size - s->stripsizesoff); } if (s->strippos) { if (s->strippos >= (unsigned)avpkt->size) return AVERROR_INVALIDDATA; bytestream2_init(&stripdata, avpkt->data + s->strippos, avpkt->size - s->strippos); } if (s->rps <= 0) { av_log(avctx, AV_LOG_ERROR, \"rps %d invalid\\n\", s->rps); return AVERROR_INVALIDDATA; } planes = s->planar ? s->bppcount : 1; for (plane = 0; plane < planes; plane++) { stride = p->linesize[plane]; dst = p->data[plane]; for (i = 0; i < s->height; i += s->rps) { if (s->stripsizesoff) ssize = ff_tget(&stripsizes, s->sstype, le); else ssize = s->stripsize; if (s->strippos) soff = ff_tget(&stripdata, s->sot, le); else soff = s->stripoff; if (soff > avpkt->size || ssize > avpkt->size - soff) { av_log(avctx, AV_LOG_ERROR, \"Invalid strip size/offset\\n\"); return AVERROR_INVALIDDATA; } if ((ret = tiff_unpack_strip(s, p, dst, stride, avpkt->data + soff, ssize, i, FFMIN(s->rps, s->height - i))) < 0) { if (avctx->err_recognition & AV_EF_EXPLODE) return ret; break; } dst += s->rps * stride; } if (s->predictor == 2) { if (s->photometric == TIFF_PHOTOMETRIC_YCBCR) { av_log(s->avctx, AV_LOG_ERROR, \"predictor == 2 with YUV is unsupported\"); return AVERROR_PATCHWELCOME; } dst = p->data[plane]; soff = s->bpp >> 3; if (s->planar) soff = FFMAX(soff / s->bppcount, 1); ssize = s->width * soff; if (s->avctx->pix_fmt == AV_PIX_FMT_RGB48LE || s->avctx->pix_fmt == AV_PIX_FMT_RGBA64LE || s->avctx->pix_fmt == AV_PIX_FMT_GRAY16LE || s->avctx->pix_fmt == AV_PIX_FMT_YA16LE || s->avctx->pix_fmt == AV_PIX_FMT_GBRP16LE || s->avctx->pix_fmt == AV_PIX_FMT_GBRAP16LE) { for (i = 0; i < s->height; i++) { for (j = soff; j < ssize; j += 2) AV_WL16(dst + j, AV_RL16(dst + j) + AV_RL16(dst + j - soff)); dst += stride; } } else if (s->avctx->pix_fmt == AV_PIX_FMT_RGB48BE || s->avctx->pix_fmt == AV_PIX_FMT_RGBA64BE || s->avctx->pix_fmt == AV_PIX_FMT_GRAY16BE || s->avctx->pix_fmt == AV_PIX_FMT_YA16BE || s->avctx->pix_fmt == AV_PIX_FMT_GBRP16BE || s->avctx->pix_fmt == AV_PIX_FMT_GBRAP16BE) { for (i = 0; i < s->height; i++) { for (j = soff; j < ssize; j += 2) AV_WB16(dst + j, AV_RB16(dst + j) + AV_RB16(dst + j - soff)); dst += stride; } } else { for (i = 0; i < s->height; i++) { for (j = soff; j < ssize; j++) dst[j] += dst[j - soff]; dst += stride; } } } if (s->photometric == TIFF_PHOTOMETRIC_WHITE_IS_ZERO) { dst = p->data[plane]; for (i = 0; i < s->height; i++) { for (j = 0; j < stride; j++) dst[j] = (s->avctx->pix_fmt == AV_PIX_FMT_PAL8 ? (1<bpp) - 1 : 255) - dst[j]; dst += stride; } } } if (s->planar && s->bppcount > 2) { FFSWAP(uint8_t*, p->data[0], p->data[2]); FFSWAP(int, p->linesize[0], p->linesize[2]); FFSWAP(uint8_t*, p->data[0], p->data[1]); FFSWAP(int, p->linesize[0], p->linesize[1]); } *got_frame = 1; return avpkt->size; }"} {"target": 0, "idx": 1657, "func": "int attribute_align_arg avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture, int *got_picture_ptr, AVPacket *avpkt) { int ret; *got_picture_ptr = 0; if ((avctx->coded_width || avctx->coded_height) && av_image_check_size(avctx->coded_width, avctx->coded_height, 0, avctx)) return -1; avctx->pkt = avpkt; apply_param_change(avctx, avpkt); avcodec_get_frame_defaults(picture); if ((avctx->codec->capabilities & CODEC_CAP_DELAY) || avpkt->size || (avctx->active_thread_type & FF_THREAD_FRAME)) { if (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME) ret = ff_thread_decode_frame(avctx, picture, got_picture_ptr, avpkt); else { ret = avctx->codec->decode(avctx, picture, got_picture_ptr, avpkt); picture->pkt_dts = avpkt->dts; picture->sample_aspect_ratio = avctx->sample_aspect_ratio; picture->width = avctx->width; picture->height = avctx->height; picture->format = avctx->pix_fmt; } emms_c(); //needed to avoid an emms_c() call before every return; if (*got_picture_ptr) avctx->frame_number++; } else ret = 0; /* many decoders assign whole AVFrames, thus overwriting extended_data; * make sure it's set correctly */ picture->extended_data = picture->data; return ret; }"} {"target": 1, "idx": 1672, "func": "static int megasas_pd_get_info_submit(SCSIDevice *sdev, int lun, MegasasCmd *cmd) { struct mfi_pd_info *info = cmd->iov_buf; size_t dcmd_size = sizeof(struct mfi_pd_info); uint64_t pd_size; uint16_t pd_id = ((sdev->id & 0xFF) << 8) | (lun & 0xFF); uint8_t cmdbuf[6]; SCSIRequest *req; size_t len, resid; if (!cmd->iov_buf) { cmd->iov_buf = g_malloc0(dcmd_size); info = cmd->iov_buf; info->inquiry_data[0] = 0x7f; /* Force PQual 0x3, PType 0x1f */ info->vpd_page83[0] = 0x7f; megasas_setup_inquiry(cmdbuf, 0, sizeof(info->inquiry_data)); req = scsi_req_new(sdev, cmd->index, lun, cmdbuf, cmd); if (!req) { trace_megasas_dcmd_req_alloc_failed(cmd->index, \"PD get info std inquiry\"); g_free(cmd->iov_buf); cmd->iov_buf = NULL; return MFI_STAT_FLASH_ALLOC_FAIL; } trace_megasas_dcmd_internal_submit(cmd->index, \"PD get info std inquiry\", lun); len = scsi_req_enqueue(req); if (len > 0) { cmd->iov_size = len; scsi_req_continue(req); } return MFI_STAT_INVALID_STATUS; } else if (info->inquiry_data[0] != 0x7f && info->vpd_page83[0] == 0x7f) { megasas_setup_inquiry(cmdbuf, 0x83, sizeof(info->vpd_page83)); req = scsi_req_new(sdev, cmd->index, lun, cmdbuf, cmd); if (!req) { trace_megasas_dcmd_req_alloc_failed(cmd->index, \"PD get info vpd inquiry\"); return MFI_STAT_FLASH_ALLOC_FAIL; } trace_megasas_dcmd_internal_submit(cmd->index, \"PD get info vpd inquiry\", lun); len = scsi_req_enqueue(req); if (len > 0) { cmd->iov_size = len; scsi_req_continue(req); } return MFI_STAT_INVALID_STATUS; } /* Finished, set FW state */ if ((info->inquiry_data[0] >> 5) == 0) { if (megasas_is_jbod(cmd->state)) { info->fw_state = cpu_to_le16(MFI_PD_STATE_SYSTEM); } else { info->fw_state = cpu_to_le16(MFI_PD_STATE_ONLINE); } } else { info->fw_state = cpu_to_le16(MFI_PD_STATE_OFFLINE); } info->ref.v.device_id = cpu_to_le16(pd_id); info->state.ddf.pd_type = cpu_to_le16(MFI_PD_DDF_TYPE_IN_VD| MFI_PD_DDF_TYPE_INTF_SAS); blk_get_geometry(sdev->conf.blk, &pd_size); info->raw_size = cpu_to_le64(pd_size); info->non_coerced_size = cpu_to_le64(pd_size); info->coerced_size = cpu_to_le64(pd_size); info->encl_device_id = 0xFFFF; info->slot_number = (sdev->id & 0xFF); info->path_info.count = 1; info->path_info.sas_addr[0] = cpu_to_le64(megasas_get_sata_addr(pd_id)); info->connected_port_bitmap = 0x1; info->device_speed = 1; info->link_speed = 1; resid = dma_buf_read(cmd->iov_buf, dcmd_size, &cmd->qsg); g_free(cmd->iov_buf); cmd->iov_size = dcmd_size - resid; cmd->iov_buf = NULL; return MFI_STAT_OK; }"} {"target": 0, "idx": 1710, "func": "static void gen_isel(DisasContext *ctx) { int l1, l2; uint32_t bi = rC(ctx->opcode); uint32_t mask; TCGv_i32 t0; l1 = gen_new_label(); l2 = gen_new_label(); mask = 0x08 >> (bi & 0x03); t0 = tcg_temp_new_i32(); tcg_gen_andi_i32(t0, cpu_crf[bi >> 2], mask); tcg_gen_brcondi_i32(TCG_COND_EQ, t0, 0, l1); if (rA(ctx->opcode) == 0) tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], 0); else tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); tcg_gen_br(l2); gen_set_label(l1); tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); gen_set_label(l2); tcg_temp_free_i32(t0); }"} {"target": 0, "idx": 1712, "func": "static void stream_process_mem2s(struct Stream *s, StreamSlave *tx_dev) { uint32_t prev_d; unsigned char txbuf[16 * 1024]; unsigned int txlen; uint32_t app[6]; if (!stream_running(s) || stream_idle(s)) { return; } while (1) { stream_desc_load(s, s->regs[R_CURDESC]); if (s->desc.status & SDESC_STATUS_COMPLETE) { s->regs[R_DMASR] |= DMASR_HALTED; break; } if (stream_desc_sof(&s->desc)) { s->pos = 0; memcpy(app, s->desc.app, sizeof app); } txlen = s->desc.control & SDESC_CTRL_LEN_MASK; if ((txlen + s->pos) > sizeof txbuf) { hw_error(\"%s: too small internal txbuf! %d\\n\", __func__, txlen + s->pos); } cpu_physical_memory_read(s->desc.buffer_address, txbuf + s->pos, txlen); s->pos += txlen; if (stream_desc_eof(&s->desc)) { stream_push(tx_dev, txbuf, s->pos, app); s->pos = 0; stream_complete(s); } /* Update the descriptor. */ s->desc.status = txlen | SDESC_STATUS_COMPLETE; stream_desc_store(s, s->regs[R_CURDESC]); /* Advance. */ prev_d = s->regs[R_CURDESC]; s->regs[R_CURDESC] = s->desc.nxtdesc; if (prev_d == s->regs[R_TAILDESC]) { s->regs[R_DMASR] |= DMASR_IDLE; break; } } }"} {"target": 0, "idx": 1713, "func": "static int adpcm_decode_init(AVCodecContext * avctx) { ADPCMContext *c = avctx->priv_data; if(avctx->channels > 2U){ return -1; } c->channel = 0; c->status[0].predictor = c->status[1].predictor = 0; c->status[0].step_index = c->status[1].step_index = 0; c->status[0].step = c->status[1].step = 0; switch(avctx->codec->id) { case CODEC_ID_ADPCM_CT: c->status[0].step = c->status[1].step = 511; break; case CODEC_ID_ADPCM_IMA_WS: if (avctx->extradata && avctx->extradata_size == 2 * 4) { c->status[0].predictor = AV_RL32(avctx->extradata); c->status[1].predictor = AV_RL32(avctx->extradata + 4); } break; default: break; } return 0; }"} {"target": 1, "idx": 1721, "func": "static inline void bt_hci_event_complete_read_local_name(struct bt_hci_s *hci) { read_local_name_rp params; params.status = HCI_SUCCESS; memset(params.name, 0, sizeof(params.name)); if (hci->device.lmp_name) strncpy(params.name, hci->device.lmp_name, sizeof(params.name)); bt_hci_event_complete(hci, ¶ms, READ_LOCAL_NAME_RP_SIZE); }"} {"target": 1, "idx": 1726, "func": "static bool vnc_should_update(VncState *vs) { switch (vs->update) { case VNC_STATE_UPDATE_NONE: break; case VNC_STATE_UPDATE_INCREMENTAL: /* Only allow incremental updates if the output buffer * is empty, or if audio capture is enabled. */ if (!vs->output.offset || vs->audio_cap) { return true; } break; case VNC_STATE_UPDATE_FORCE: return true; } return false; }"} {"target": 1, "idx": 1736, "func": "static void test_qemu_strtoll_whitespace(void) { const char *str = \" \\t \"; char f = 'X'; const char *endptr = &f; int64_t res = 999; int err; err = qemu_strtoll(str, &endptr, 0, &res); g_assert_cmpint(err, ==, 0); g_assert_cmpint(res, ==, 0); g_assert(endptr == str); }"} {"target": 0, "idx": 1760, "func": "static void test_migrate(void) { char *uri = g_strdup_printf(\"unix:%s/migsocket\", tmpfs); QTestState *global = global_qtest, *from, *to; unsigned char dest_byte_a, dest_byte_b, dest_byte_c, dest_byte_d; gchar *cmd; QDict *rsp; char *bootpath = g_strdup_printf(\"%s/bootsect\", tmpfs); FILE *bootfile = fopen(bootpath, \"wb\"); got_stop = false; g_assert_cmpint(fwrite(bootsect, 512, 1, bootfile), ==, 1); fclose(bootfile); cmd = g_strdup_printf(\"-machine accel=kvm:tcg -m 150M\" \" -name pcsource,debug-threads=on\" \" -serial file:%s/src_serial\" \" -drive file=%s,format=raw\", tmpfs, bootpath); from = qtest_start(cmd); g_free(cmd); cmd = g_strdup_printf(\"-machine accel=kvm:tcg -m 150M\" \" -name pcdest,debug-threads=on\" \" -serial file:%s/dest_serial\" \" -drive file=%s,format=raw\" \" -incoming %s\", tmpfs, bootpath, uri); to = qtest_init(cmd); g_free(cmd); global_qtest = from; rsp = qmp(\"{ 'execute': 'migrate-set-capabilities',\" \"'arguments': { \" \"'capabilities': [ {\" \"'capability': 'postcopy-ram',\" \"'state': true } ] } }\"); g_assert(qdict_haskey(rsp, \"return\")); QDECREF(rsp); global_qtest = to; rsp = qmp(\"{ 'execute': 'migrate-set-capabilities',\" \"'arguments': { \" \"'capabilities': [ {\" \"'capability': 'postcopy-ram',\" \"'state': true } ] } }\"); g_assert(qdict_haskey(rsp, \"return\")); QDECREF(rsp); /* We want to pick a speed slow enough that the test completes * quickly, but that it doesn't complete precopy even on a slow * machine, so also set the downtime. */ global_qtest = from; rsp = qmp(\"{ 'execute': 'migrate_set_speed',\" \"'arguments': { 'value': 100000000 } }\"); g_assert(qdict_haskey(rsp, \"return\")); QDECREF(rsp); /* 1ms downtime - it should never finish precopy */ rsp = qmp(\"{ 'execute': 'migrate_set_downtime',\" \"'arguments': { 'value': 0.001 } }\"); g_assert(qdict_haskey(rsp, \"return\")); QDECREF(rsp); /* Wait for the first serial output from the source */ wait_for_serial(\"src_serial\"); cmd = g_strdup_printf(\"{ 'execute': 'migrate',\" \"'arguments': { 'uri': '%s' } }\", uri); rsp = qmp(cmd); g_free(cmd); g_assert(qdict_haskey(rsp, \"return\")); QDECREF(rsp); wait_for_migration_pass(); rsp = return_or_event(qmp(\"{ 'execute': 'migrate-start-postcopy' }\")); g_assert(qdict_haskey(rsp, \"return\")); QDECREF(rsp); if (!got_stop) { qmp_eventwait(\"STOP\"); } global_qtest = to; qmp_eventwait(\"RESUME\"); wait_for_serial(\"dest_serial\"); global_qtest = from; wait_for_migration_complete(); qtest_quit(from); global_qtest = to; qtest_memread(to, start_address, &dest_byte_a, 1); /* Destination still running, wait for a byte to change */ do { qtest_memread(to, start_address, &dest_byte_b, 1); usleep(10 * 1000); } while (dest_byte_a == dest_byte_b); qmp(\"{ 'execute' : 'stop'}\"); /* With it stopped, check nothing changes */ qtest_memread(to, start_address, &dest_byte_c, 1); sleep(1); qtest_memread(to, start_address, &dest_byte_d, 1); g_assert_cmpint(dest_byte_c, ==, dest_byte_d); check_guests_ram(); qtest_quit(to); g_free(uri); global_qtest = global; cleanup(\"bootsect\"); cleanup(\"migsocket\"); cleanup(\"src_serial\"); cleanup(\"dest_serial\"); }"} {"target": 0, "idx": 1770, "func": "static uint8_t net_tx_pkt_get_gso_type(struct NetTxPkt *pkt, bool tso_enable) { uint8_t rc = VIRTIO_NET_HDR_GSO_NONE; uint16_t l3_proto; l3_proto = eth_get_l3_proto(pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_base, pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_len); if (!tso_enable) { goto func_exit; } rc = eth_get_gso_type(l3_proto, pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_base, pkt->l4proto); func_exit: return rc; }"} {"target": 1, "idx": 1783, "func": "static int scsi_req_length(SCSIRequest *req, uint8_t *cmd) { switch (cmd[0] >> 5) { case 0: req->cmd.xfer = cmd[4]; req->cmd.len = 6; /* length 0 means 256 blocks */ if (req->cmd.xfer == 0) req->cmd.xfer = 256; break; case 1: case 2: req->cmd.xfer = cmd[8] | (cmd[7] << 8); req->cmd.len = 10; break; case 4: req->cmd.xfer = cmd[13] | (cmd[12] << 8) | (cmd[11] << 16) | (cmd[10] << 24); req->cmd.len = 16; break; case 5: req->cmd.xfer = cmd[9] | (cmd[8] << 8) | (cmd[7] << 16) | (cmd[6] << 24); req->cmd.len = 12; break; default: trace_scsi_req_parse_bad(req->dev->id, req->lun, req->tag, cmd[0]); return -1; } switch(cmd[0]) { case TEST_UNIT_READY: case START_STOP: case SEEK_6: case WRITE_FILEMARKS: case SPACE: case RESERVE: case RELEASE: case ERASE: case ALLOW_MEDIUM_REMOVAL: case VERIFY: case SEEK_10: case SYNCHRONIZE_CACHE: case LOCK_UNLOCK_CACHE: case LOAD_UNLOAD: case SET_CD_SPEED: case SET_LIMITS: case WRITE_LONG: case MOVE_MEDIUM: case UPDATE_BLOCK: req->cmd.xfer = 0; break; case MODE_SENSE: break; case WRITE_SAME: req->cmd.xfer = 1; break; case READ_CAPACITY: req->cmd.xfer = 8; break; case READ_BLOCK_LIMITS: req->cmd.xfer = 6; break; case READ_POSITION: req->cmd.xfer = 20; break; case SEND_VOLUME_TAG: req->cmd.xfer *= 40; break; case MEDIUM_SCAN: req->cmd.xfer *= 8; break; case WRITE_10: case WRITE_VERIFY: case WRITE_6: case WRITE_12: case WRITE_VERIFY_12: case WRITE_16: case WRITE_VERIFY_16: req->cmd.xfer *= req->dev->blocksize; break; case READ_10: case READ_6: case READ_REVERSE: case RECOVER_BUFFERED_DATA: case READ_12: case READ_16: req->cmd.xfer *= req->dev->blocksize; break; case INQUIRY: req->cmd.xfer = cmd[4] | (cmd[3] << 8); break; case MAINTENANCE_OUT: case MAINTENANCE_IN: if (req->dev->type == TYPE_ROM) { /* GPCMD_REPORT_KEY and GPCMD_SEND_KEY from multi media commands */ req->cmd.xfer = cmd[9] | (cmd[8] << 8); } break; } return 0; }"} {"target": 1, "idx": 1791, "func": "int ff_schro_queue_push_back(FFSchroQueue *queue, void *p_data) { FFSchroQueueElement *p_new = av_mallocz(sizeof(FFSchroQueueElement)); if (!p_new) return -1; p_new->data = p_data; if (!queue->p_head) queue->p_head = p_new; else queue->p_tail->next = p_new; queue->p_tail = p_new; ++queue->size; return 0; }"} {"target": 1, "idx": 1802, "func": "int kvmppc_reset_htab(int shift_hint) { uint32_t shift = shift_hint; if (!kvm_enabled()) { /* Full emulation, tell caller to allocate htab itself */ return 0; } if (kvm_check_extension(kvm_state, KVM_CAP_PPC_ALLOC_HTAB)) { int ret; ret = kvm_vm_ioctl(kvm_state, KVM_PPC_ALLOCATE_HTAB, &shift); if (ret == -ENOTTY) { /* At least some versions of PR KVM advertise the * capability, but don't implement the ioctl(). Oops. * Return 0 so that we allocate the htab in qemu, as is * correct for PR. */ return 0; } else if (ret < 0) { return ret; } return shift; } /* We have a kernel that predates the htab reset calls. For PR * KVM, we need to allocate the htab ourselves, for an HV KVM of * this era, it has allocated a 16MB fixed size hash table already. */ if (kvmppc_is_pr(kvm_state)) { /* PR - tell caller to allocate htab */ return 0; } else { /* HV - assume 16MB kernel allocated htab */ return 24; } }"} {"target": 1, "idx": 1804, "func": "static int ptx_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; PTXContext * const s = avctx->priv_data; AVFrame *picture = data; AVFrame * const p = &s->picture; unsigned int offset, w, h, y, stride, bytes_per_pixel; uint8_t *ptr; offset = AV_RL16(buf); w = AV_RL16(buf+8); h = AV_RL16(buf+10); bytes_per_pixel = AV_RL16(buf+12) >> 3; if (bytes_per_pixel != 2) { av_log_ask_for_sample(avctx, \"Image format is not RGB15.\\n\"); return -1; } avctx->pix_fmt = PIX_FMT_RGB555; if (buf_end - buf < offset) if (offset != 0x2c) av_log_ask_for_sample(avctx, \"offset != 0x2c\\n\"); buf += offset; if (p->data[0]) avctx->release_buffer(avctx, p); if (av_image_check_size(w, h, 0, avctx)) return -1; if (w != avctx->width || h != avctx->height) avcodec_set_dimensions(avctx, w, h); if (avctx->get_buffer(avctx, p) < 0) { av_log(avctx, AV_LOG_ERROR, \"get_buffer() failed\\n\"); return -1; } p->pict_type = AV_PICTURE_TYPE_I; ptr = p->data[0]; stride = p->linesize[0]; for (y=0; ypicture; *data_size = sizeof(AVPicture); return offset + w*h*bytes_per_pixel; }"} {"target": 1, "idx": 1817, "func": "static void vpc_close(BlockDriverState *bs) { BDRVVPCState *s = bs->opaque; g_free(s->pagetable); #ifdef CACHE g_free(s->pageentry_u8); #endif migrate_del_blocker(s->migration_blocker); error_free(s->migration_blocker); }"} {"target": 1, "idx": 1822, "func": "POWERPC_FAMILY(POWER9)(ObjectClass *oc, void *data) { DeviceClass *dc = DEVICE_CLASS(oc); PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); CPUClass *cc = CPU_CLASS(oc); dc->fw_name = \"PowerPC,POWER9\"; dc->desc = \"POWER9\"; dc->props = powerpc_servercpu_properties; pcc->pvr_match = ppc_pvr_match_power9; pcc->pcr_mask = PCR_COMPAT_2_05 | PCR_COMPAT_2_06 | PCR_COMPAT_2_07; pcc->pcr_supported = PCR_COMPAT_3_00 | PCR_COMPAT_2_07 | PCR_COMPAT_2_06 | PCR_COMPAT_2_05; pcc->init_proc = init_proc_POWER9; pcc->check_pow = check_pow_nocheck; cc->has_work = cpu_has_work_POWER9; pcc->insns_flags = PPC_INSNS_BASE | PPC_ISEL | PPC_STRING | PPC_MFTB | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | PPC_FLOAT_FRSQRTES | PPC_FLOAT_STFIWX | PPC_FLOAT_EXT | PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_64B | PPC_64BX | PPC_ALTIVEC | PPC_SEGMENT_64B | PPC_SLBI | PPC_POPCNTB | PPC_POPCNTWD | PPC_CILDST; pcc->insns_flags2 = PPC2_VSX | PPC2_VSX207 | PPC2_DFP | PPC2_DBRX | PPC2_PERM_ISA206 | PPC2_DIVE_ISA206 | PPC2_ATOMIC_ISA206 | PPC2_FP_CVT_ISA206 | PPC2_FP_TST_ISA206 | PPC2_BCTAR_ISA207 | PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207 | PPC2_ISA205 | PPC2_ISA207S | PPC2_FP_CVT_S64 | PPC2_TM | PPC2_PM_ISA206 | PPC2_ISA300; pcc->msr_mask = (1ull << MSR_SF) | (1ull << MSR_TM) | (1ull << MSR_VR) | (1ull << MSR_VSX) | (1ull << MSR_EE) | (1ull << MSR_PR) | (1ull << MSR_FP) | (1ull << MSR_ME) | (1ull << MSR_FE0) | (1ull << MSR_SE) | (1ull << MSR_DE) | (1ull << MSR_FE1) | (1ull << MSR_IR) | (1ull << MSR_DR) | (1ull << MSR_PMM) | (1ull << MSR_RI) | (1ull << MSR_LE); pcc->mmu_model = POWERPC_MMU_3_00; #if defined(CONFIG_SOFTMMU) pcc->handle_mmu_fault = ppc64_v3_handle_mmu_fault; /* segment page size remain the same */ pcc->sps = &POWER7_POWER8_sps; pcc->radix_page_info = &POWER9_radix_page_info; #endif pcc->excp_model = POWERPC_EXCP_POWER8; pcc->bus_model = PPC_FLAGS_INPUT_POWER7; pcc->bfd_mach = bfd_mach_ppc64; pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE | POWERPC_FLAG_BE | POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK | POWERPC_FLAG_CFAR | POWERPC_FLAG_VSX | POWERPC_FLAG_TM; pcc->l1_dcache_size = 0x8000; pcc->l1_icache_size = 0x8000; pcc->interrupts_big_endian = ppc_cpu_interrupts_big_endian_lpcr; }"} {"target": 1, "idx": 1878, "func": "static int mxf_read_header(AVFormatContext *s, AVFormatParameters *ap) { MXFContext *mxf = s->priv_data; KLVPacket klv; int64_t essence_offset = 0; mxf->last_forward_tell = INT64_MAX; if (!mxf_read_sync(s->pb, mxf_header_partition_pack_key, 14)) { av_log(s, AV_LOG_ERROR, \"could not find header partition pack key\\n\"); return AVERROR_INVALIDDATA; } avio_seek(s->pb, -14, SEEK_CUR); mxf->fc = s; mxf->run_in = avio_tell(s->pb); while (!s->pb->eof_reached) { const MXFMetadataReadTableEntry *metadata; if (klv_read_packet(&klv, s->pb) < 0) { /* EOF - seek to previous partition or stop */ if(mxf_parse_handle_partition_or_eof(mxf) <= 0) break; else continue; } PRINT_KEY(s, \"read header\", klv.key); av_dlog(s, \"size %\"PRIu64\" offset %#\"PRIx64\"\\n\", klv.length, klv.offset); if (IS_KLV_KEY(klv.key, mxf_encrypted_triplet_key) || IS_KLV_KEY(klv.key, mxf_essence_element_key) || IS_KLV_KEY(klv.key, mxf_avid_essence_element_key) || IS_KLV_KEY(klv.key, mxf_system_item_key)) { if (!mxf->current_partition->essence_offset) { compute_partition_essence_offset(s, mxf, &klv); } if (!essence_offset) essence_offset = klv.offset; /* seek to footer, previous partition or stop */ if (mxf_parse_handle_essence(mxf) <= 0) break; continue; } else if (!memcmp(klv.key, mxf_header_partition_pack_key, 13) && klv.key[13] >= 2 && klv.key[13] <= 4 && mxf->current_partition) { /* next partition pack - keep going, seek to previous partition or stop */ if(mxf_parse_handle_partition_or_eof(mxf) <= 0) break; } for (metadata = mxf_metadata_read_table; metadata->read; metadata++) { if (IS_KLV_KEY(klv.key, metadata->key)) { int res; if (klv.key[5] == 0x53) { res = mxf_read_local_tags(mxf, &klv, metadata->read, metadata->ctx_size, metadata->type); } else { uint64_t next = avio_tell(s->pb) + klv.length; res = metadata->read(mxf, s->pb, 0, klv.length, klv.key, klv.offset); avio_seek(s->pb, next, SEEK_SET); } if (res < 0) { av_log(s, AV_LOG_ERROR, \"error reading header metadata\\n\"); return res; } break; } } if (!metadata->read) avio_skip(s->pb, klv.length); } /* FIXME avoid seek */ if (!essence_offset) { av_log(s, AV_LOG_ERROR, \"no essence\\n\"); return AVERROR_INVALIDDATA; } avio_seek(s->pb, essence_offset, SEEK_SET); mxf_compute_essence_containers(mxf); return mxf_parse_structural_metadata(mxf); }"} {"target": 1, "idx": 1883, "func": "static opj_image_t *mj2_create_image(AVCodecContext *avctx, opj_cparameters_t *parameters) { const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt); opj_image_cmptparm_t cmptparm[4] = {{0}}; opj_image_t *img; int i; int sub_dx[4]; int sub_dy[4]; int numcomps; OPJ_COLOR_SPACE color_space = CLRSPC_UNKNOWN; sub_dx[0] = sub_dx[3] = 1; sub_dy[0] = sub_dy[3] = 1; sub_dx[1] = sub_dx[2] = 1 << desc->log2_chroma_w; sub_dy[1] = sub_dy[2] = 1 << desc->log2_chroma_h; numcomps = desc->nb_components; switch (avctx->pix_fmt) { case AV_PIX_FMT_GRAY8: case AV_PIX_FMT_YA8: case AV_PIX_FMT_GRAY16: case AV_PIX_FMT_YA16: color_space = CLRSPC_GRAY; break; case AV_PIX_FMT_RGB24: case AV_PIX_FMT_RGBA: case AV_PIX_FMT_RGB48: case AV_PIX_FMT_RGBA64: case AV_PIX_FMT_GBR24P: case AV_PIX_FMT_GBRP9: case AV_PIX_FMT_GBRP10: case AV_PIX_FMT_GBRP12: case AV_PIX_FMT_GBRP14: case AV_PIX_FMT_GBRP16: case AV_PIX_FMT_XYZ12: color_space = CLRSPC_SRGB; break; case AV_PIX_FMT_YUV410P: case AV_PIX_FMT_YUV411P: case AV_PIX_FMT_YUV420P: case AV_PIX_FMT_YUV422P: case AV_PIX_FMT_YUV440P: case AV_PIX_FMT_YUV444P: case AV_PIX_FMT_YUVA420P: case AV_PIX_FMT_YUVA422P: case AV_PIX_FMT_YUVA444P: case AV_PIX_FMT_YUV420P9: case AV_PIX_FMT_YUV422P9: case AV_PIX_FMT_YUV444P9: case AV_PIX_FMT_YUVA420P9: case AV_PIX_FMT_YUVA422P9: case AV_PIX_FMT_YUVA444P9: case AV_PIX_FMT_YUV420P10: case AV_PIX_FMT_YUV422P10: case AV_PIX_FMT_YUV444P10: case AV_PIX_FMT_YUVA420P10: case AV_PIX_FMT_YUVA422P10: case AV_PIX_FMT_YUVA444P10: case AV_PIX_FMT_YUV420P12: case AV_PIX_FMT_YUV422P12: case AV_PIX_FMT_YUV444P12: case AV_PIX_FMT_YUV420P14: case AV_PIX_FMT_YUV422P14: case AV_PIX_FMT_YUV444P14: case AV_PIX_FMT_YUV420P16: case AV_PIX_FMT_YUV422P16: case AV_PIX_FMT_YUV444P16: case AV_PIX_FMT_YUVA420P16: case AV_PIX_FMT_YUVA422P16: case AV_PIX_FMT_YUVA444P16: color_space = CLRSPC_SYCC; break; default: av_log(avctx, AV_LOG_ERROR, \"The requested pixel format '%s' is not supported\\n\", av_get_pix_fmt_name(avctx->pix_fmt)); } for (i = 0; i < numcomps; i++) { cmptparm[i].prec = desc->comp[i].depth_minus1 + 1; cmptparm[i].bpp = desc->comp[i].depth_minus1 + 1; cmptparm[i].sgnd = 0; cmptparm[i].dx = sub_dx[i]; cmptparm[i].dy = sub_dy[i]; cmptparm[i].w = (avctx->width + sub_dx[i] - 1) / sub_dx[i]; cmptparm[i].h = (avctx->height + sub_dy[i] - 1) / sub_dy[i]; } img = opj_image_create(numcomps, cmptparm, color_space); // x0, y0 is the top left corner of the image // x1, y1 is the width, height of the reference grid img->x0 = 0; img->y0 = 0; img->x1 = (avctx->width - 1) * parameters->subsampling_dx + 1; img->y1 = (avctx->height - 1) * parameters->subsampling_dy + 1; return img; }"} {"target": 0, "idx": 1894, "func": "static int xen_platform_initfn(PCIDevice *dev) { PCIXenPlatformState *d = DO_UPCAST(PCIXenPlatformState, pci_dev, dev); uint8_t *pci_conf; pci_conf = d->pci_dev.config; pci_set_word(pci_conf + PCI_COMMAND, PCI_COMMAND_IO | PCI_COMMAND_MEMORY); pci_config_set_prog_interface(pci_conf, 0); pci_conf[PCI_INTERRUPT_PIN] = 1; pci_register_bar(&d->pci_dev, 0, 0x100, PCI_BASE_ADDRESS_SPACE_IO, platform_ioport_map); /* reserve 16MB mmio address for share memory*/ pci_register_bar(&d->pci_dev, 1, 0x1000000, PCI_BASE_ADDRESS_MEM_PREFETCH, platform_mmio_map); platform_fixed_ioport_init(d); return 0; }"} {"target": 0, "idx": 1896, "func": "static int proxy_symlink(FsContext *fs_ctx, const char *oldpath, V9fsPath *dir_path, const char *name, FsCred *credp) { int retval; V9fsString fullname, target; v9fs_string_init(&fullname); v9fs_string_init(&target); v9fs_string_sprintf(&fullname, \"%s/%s\", dir_path->data, name); v9fs_string_sprintf(&target, \"%s\", oldpath); retval = v9fs_request(fs_ctx->private, T_SYMLINK, NULL, \"ssdd\", &target, &fullname, credp->fc_uid, credp->fc_gid); v9fs_string_free(&fullname); v9fs_string_free(&target); if (retval < 0) { errno = -retval; retval = -1; } return retval; }"} {"target": 0, "idx": 1900, "func": "static int vfio_enable_intx(VFIODevice *vdev) { VFIOIRQSetFD irq_set_fd = { .irq_set = { .argsz = sizeof(irq_set_fd), .flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER, .index = VFIO_PCI_INTX_IRQ_INDEX, .start = 0, .count = 1, }, }; uint8_t pin = vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1); int ret; if (vdev->intx.disabled || !pin) { return 0; } vfio_disable_interrupts(vdev); vdev->intx.pin = pin - 1; /* Pin A (1) -> irq[0] */ ret = event_notifier_init(&vdev->intx.interrupt, 0); if (ret) { error_report(\"vfio: Error: event_notifier_init failed\\n\"); return ret; } irq_set_fd.fd = event_notifier_get_fd(&vdev->intx.interrupt); qemu_set_fd_handler(irq_set_fd.fd, vfio_intx_interrupt, NULL, vdev); if (ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, &irq_set_fd)) { error_report(\"vfio: Error: Failed to setup INTx fd: %m\\n\"); return -errno; } /* * Disable mmaps so we can trap on BAR accesses. We interpret any * access as a response to an interrupt and unmask the physical * device. The device will re-assert if the interrupt is still * pending. We'll likely retrigger on the host multiple times per * guest interrupt, but without EOI notification it's better than * nothing. Acceleration paths through KVM will avoid this. */ vfio_mmap_set_enabled(vdev, false); vdev->interrupt = VFIO_INT_INTx; DPRINTF(\"%s(%04x:%02x:%02x.%x)\\n\", __func__, vdev->host.domain, vdev->host.bus, vdev->host.slot, vdev->host.function); return 0; }"} {"target": 0, "idx": 1902, "func": "void HELPER(mvc)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src) { int i = 0; int x = 0; uint32_t l_64 = (l + 1) / 8; HELPER_LOG(\"%s l %d dest %\" PRIx64 \" src %\" PRIx64 \"\\n\", __func__, l, dest, src); #ifndef CONFIG_USER_ONLY if ((l > 32) && (src & TARGET_PAGE_MASK) == ((src + l) & TARGET_PAGE_MASK) && (dest & TARGET_PAGE_MASK) == ((dest + l) & TARGET_PAGE_MASK)) { if (dest == (src + 1)) { mvc_fast_memset(env, l + 1, dest, cpu_ldub_data(env, src)); return; } else if ((src & TARGET_PAGE_MASK) != (dest & TARGET_PAGE_MASK)) { mvc_fast_memmove(env, l + 1, dest, src); return; } } #else if (dest == (src + 1)) { memset(g2h(dest), cpu_ldub_data(env, src), l + 1); return; /* mvc and memmove do not behave the same when areas overlap! */ } else if ((dest < src) || (src + l < dest)) { memmove(g2h(dest), g2h(src), l + 1); return; } #endif /* handle the parts that fit into 8-byte loads/stores */ if ((dest + 8 <= src) || (src + 8 <= dest)) { for (i = 0; i < l_64; i++) { cpu_stq_data(env, dest + x, cpu_ldq_data(env, src + x)); x += 8; } } /* slow version with byte accesses which always work */ for (i = x; i <= l; i++) { cpu_stb_data(env, dest + i, cpu_ldub_data(env, src + i)); } }"} {"target": 0, "idx": 1905, "func": "void helper_store_fpcr (uint64_t val) { #ifdef CONFIG_SOFTFLOAT set_float_exception_flags((val >> 52) & 0x3F, &FP_STATUS); #endif switch ((val >> 58) & 3) { case 0: set_float_rounding_mode(float_round_to_zero, &FP_STATUS); break; case 1: set_float_rounding_mode(float_round_down, &FP_STATUS); break; case 2: set_float_rounding_mode(float_round_nearest_even, &FP_STATUS); break; case 3: set_float_rounding_mode(float_round_up, &FP_STATUS); break; } }"} {"target": 0, "idx": 1909, "func": "void spapr_tce_set_bypass(sPAPRTCETable *tcet, bool bypass) { tcet->bypass = bypass; }"} {"target": 0, "idx": 1917, "func": "void qemu_system_wakeup_request(WakeupReason reason) { if (!is_suspended) { return; } if (!(wakeup_reason_mask & (1 << reason))) { return; } runstate_set(RUN_STATE_RUNNING); monitor_protocol_event(QEVENT_WAKEUP, NULL); notifier_list_notify(&wakeup_notifiers, &reason); reset_requested = 1; qemu_notify_event(); is_suspended = false; }"} {"target": 1, "idx": 1937, "func": "static int rtsp_listen(AVFormatContext *s) { RTSPState *rt = s->priv_data; char proto[128], host[128], path[512], auth[128]; char uri[500]; int port; int default_port = RTSP_DEFAULT_PORT; char tcpname[500]; const char *lower_proto = \"tcp\"; unsigned char rbuf[4096]; unsigned char method[10]; int rbuflen = 0; int ret; enum RTSPMethod methodcode; if (!rt->protocols) { rt->protocols = ffurl_get_protocols(NULL, NULL); if (!rt->protocols) return AVERROR(ENOMEM); } /* extract hostname and port */ av_url_split(proto, sizeof(proto), auth, sizeof(auth), host, sizeof(host), &port, path, sizeof(path), s->filename); /* ff_url_join. No authorization by now (NULL) */ ff_url_join(rt->control_uri, sizeof(rt->control_uri), proto, NULL, host, port, \"%s\", path); if (!strcmp(proto, \"rtsps\")) { lower_proto = \"tls\"; default_port = RTSPS_DEFAULT_PORT; } if (port < 0) port = default_port; /* Create TCP connection */ ff_url_join(tcpname, sizeof(tcpname), lower_proto, NULL, host, port, \"?listen&listen_timeout=%d\", rt->initial_timeout * 1000); if (ret = ffurl_open(&rt->rtsp_hd, tcpname, AVIO_FLAG_READ_WRITE, &s->interrupt_callback, NULL, rt->protocols)) { av_log(s, AV_LOG_ERROR, \"Unable to open RTSP for listening\\n\"); return ret; } rt->state = RTSP_STATE_IDLE; rt->rtsp_hd_out = rt->rtsp_hd; for (;;) { /* Wait for incoming RTSP messages */ ret = read_line(s, rbuf, sizeof(rbuf), &rbuflen); if (ret < 0) return ret; ret = parse_command_line(s, rbuf, rbuflen, uri, sizeof(uri), method, sizeof(method), &methodcode); if (ret) { av_log(s, AV_LOG_ERROR, \"RTSP: Unexpected Command\\n\"); return ret; } if (methodcode == ANNOUNCE) { ret = rtsp_read_announce(s); rt->state = RTSP_STATE_PAUSED; } else if (methodcode == OPTIONS) { ret = rtsp_read_options(s); } else if (methodcode == RECORD) { ret = rtsp_read_record(s); if (!ret) return 0; // We are ready for streaming } else if (methodcode == SETUP) ret = rtsp_read_setup(s, host, uri); if (ret) { ffurl_close(rt->rtsp_hd); return AVERROR_INVALIDDATA; } } return 0; }"} {"target": 1, "idx": 1945, "func": "int ff_wma_run_level_decode(AVCodecContext *avctx, GetBitContext *gb, VLC *vlc, const float *level_table, const uint16_t *run_table, int version, WMACoef *ptr, int offset, int num_coefs, int block_len, int frame_len_bits, int coef_nb_bits) { int code, level, sign; const uint32_t *ilvl = (const uint32_t *) level_table; uint32_t *iptr = (uint32_t *) ptr; const unsigned int coef_mask = block_len - 1; for (; offset < num_coefs; offset++) { code = get_vlc2(gb, vlc->table, VLCBITS, VLCMAX); if (code > 1) { /** normal code */ offset += run_table[code]; sign = get_bits1(gb) - 1; iptr[offset & coef_mask] = ilvl[code] ^ sign << 31; } else if (code == 1) { /** EOB */ break; } else { /** escape */ if (!version) { level = get_bits(gb, coef_nb_bits); /** NOTE: this is rather suboptimal. reading * block_len_bits would be better */ offset += get_bits(gb, frame_len_bits); } else { level = ff_wma_get_large_val(gb); /** escape decode */ if (get_bits1(gb)) { if (get_bits1(gb)) { if (get_bits1(gb)) { av_log(avctx, AV_LOG_ERROR, \"broken escape sequence\\n\"); return -1; } else offset += get_bits(gb, frame_len_bits) + 4; } else offset += get_bits(gb, 2) + 1; } } sign = get_bits1(gb) - 1; ptr[offset & coef_mask] = (level ^ sign) - sign; } } /** NOTE: EOB can be omitted */ if (offset > num_coefs) { av_log(avctx, AV_LOG_ERROR, \"overflow in spectral RLE, ignoring\\n\"); return -1; } return 0; }"} {"target": 0, "idx": 1946, "func": "int avfilter_graph_parse(AVFilterGraph *graph, const char *filters, AVFilterInOut *open_inputs, AVFilterInOut *open_outputs, AVClass *log_ctx) { int index = 0, ret; char chr = 0; AVFilterInOut *curr_inputs = NULL; do { AVFilterContext *filter; filters += strspn(filters, WHITESPACES); if ((ret = parse_inputs(&filters, &curr_inputs, &open_outputs, log_ctx)) < 0) goto fail; if ((ret = parse_filter(&filter, &filters, graph, index, log_ctx)) < 0) goto fail; if (filter->input_count == 1 && !curr_inputs && !index) { /* First input can be omitted if it is \"[in]\" */ const char *tmp = \"[in]\"; if ((ret = parse_inputs(&tmp, &curr_inputs, &open_outputs, log_ctx)) < 0) goto fail; } if ((ret = link_filter_inouts(filter, &curr_inputs, &open_inputs, log_ctx)) < 0) goto fail; if ((ret = parse_outputs(&filters, &curr_inputs, &open_inputs, &open_outputs, log_ctx)) < 0) goto fail; filters += strspn(filters, WHITESPACES); chr = *filters++; if (chr == ';' && curr_inputs) { av_log(log_ctx, AV_LOG_ERROR, \"Could not find a output to link when parsing \\\"%s\\\"\\n\", filters - 1); ret = AVERROR(EINVAL); goto fail; } index++; } while (chr == ',' || chr == ';'); if (chr) { av_log(log_ctx, AV_LOG_ERROR, \"Unable to parse graph description substring: \\\"%s\\\"\\n\", filters - 1); ret = AVERROR(EINVAL); goto fail; } if (open_inputs && !strcmp(open_inputs->name, \"out\") && curr_inputs) { /* Last output can be omitted if it is \"[out]\" */ const char *tmp = \"[out]\"; if ((ret = parse_outputs(&tmp, &curr_inputs, &open_inputs, &open_outputs, log_ctx)) < 0) goto fail; } return 0; fail: avfilter_graph_free(graph); free_inout(open_inputs); free_inout(open_outputs); free_inout(curr_inputs); return ret; }"} {"target": 0, "idx": 1981, "func": "static int virtio_net_device_exit(DeviceState *qdev) { VirtIONet *n = VIRTIO_NET(qdev); VirtIODevice *vdev = VIRTIO_DEVICE(qdev); int i; /* This will stop vhost backend if appropriate. */ virtio_net_set_status(vdev, 0); unregister_savevm(qdev, \"virtio-net\", n); if (n->netclient_name) { g_free(n->netclient_name); n->netclient_name = NULL; } if (n->netclient_type) { g_free(n->netclient_type); n->netclient_type = NULL; } g_free(n->mac_table.macs); g_free(n->vlans); for (i = 0; i < n->max_queues; i++) { VirtIONetQueue *q = &n->vqs[i]; NetClientState *nc = qemu_get_subqueue(n->nic, i); qemu_purge_queued_packets(nc); if (q->tx_timer) { timer_del(q->tx_timer); timer_free(q->tx_timer); } else { qemu_bh_delete(q->tx_bh); } } g_free(n->vqs); qemu_del_nic(n->nic); virtio_cleanup(vdev); return 0; }"} {"target": 0, "idx": 1983, "func": "static inline void gen_neon_widen(TCGv dest, TCGv src, int size, int u) { if (u) { switch (size) { case 0: gen_helper_neon_widen_u8(dest, src); break; case 1: gen_helper_neon_widen_u16(dest, src); break; case 2: tcg_gen_extu_i32_i64(dest, src); break; default: abort(); } } else { switch (size) { case 0: gen_helper_neon_widen_s8(dest, src); break; case 1: gen_helper_neon_widen_s16(dest, src); break; case 2: tcg_gen_ext_i32_i64(dest, src); break; default: abort(); } } dead_tmp(src); }"} {"target": 0, "idx": 1990, "func": "static int sd_create_branch(BDRVSheepdogState *s) { int ret, fd; uint32_t vid; char *buf; dprintf(\"%\" PRIx32 \" is snapshot.\\n\", s->inode.vdi_id); buf = g_malloc(SD_INODE_SIZE); ret = do_sd_create(s->name, s->inode.vdi_size, s->inode.vdi_id, &vid, 1, s->addr, s->port); if (ret) { goto out; } dprintf(\"%\" PRIx32 \" is created.\\n\", vid); fd = connect_to_sdog(s->addr, s->port); if (fd < 0) { error_report(\"failed to connect\"); ret = fd; goto out; } ret = read_object(fd, buf, vid_to_vdi_oid(vid), s->inode.nr_copies, SD_INODE_SIZE, 0, s->cache_enabled); closesocket(fd); if (ret < 0) { goto out; } memcpy(&s->inode, buf, sizeof(s->inode)); s->is_snapshot = false; ret = 0; dprintf(\"%\" PRIx32 \" was newly created.\\n\", s->inode.vdi_id); out: g_free(buf); return ret; }"} {"target": 1, "idx": 1994, "func": "static int parse_uint8(DeviceState *dev, Property *prop, const char *str) { uint8_t *ptr = qdev_get_prop_ptr(dev, prop); const char *fmt; /* accept both hex and decimal */ fmt = strncasecmp(str, \"0x\",2) == 0 ? \"%\" PRIx8 : \"%\" PRIu8; if (sscanf(str, fmt, ptr) != 1) return -EINVAL; return 0; }"} {"target": 1, "idx": 1998, "func": "av_cold int swri_rematrix_init(SwrContext *s){ int i, j; int nb_in = av_get_channel_layout_nb_channels(s->in_ch_layout); int nb_out = av_get_channel_layout_nb_channels(s->out_ch_layout); s->mix_any_f = NULL; if (!s->rematrix_custom) { int r = auto_matrix(s); if (r) return r; } if (s->midbuf.fmt == AV_SAMPLE_FMT_S16P){ s->native_matrix = av_calloc(nb_in * nb_out, sizeof(int)); s->native_one = av_mallocz(sizeof(int)); if (!s->native_matrix || !s->native_one) return AVERROR(ENOMEM); for (i = 0; i < nb_out; i++) { double rem = 0; for (j = 0; j < nb_in; j++) { double target = s->matrix[i][j] * 32768 + rem; ((int*)s->native_matrix)[i * nb_in + j] = lrintf(target); rem += target - ((int*)s->native_matrix)[i * nb_in + j]; } } *((int*)s->native_one) = 32768; s->mix_1_1_f = (mix_1_1_func_type*)copy_s16; s->mix_2_1_f = (mix_2_1_func_type*)sum2_s16; s->mix_any_f = (mix_any_func_type*)get_mix_any_func_s16(s); }else if(s->midbuf.fmt == AV_SAMPLE_FMT_FLTP){ s->native_matrix = av_calloc(nb_in * nb_out, sizeof(float)); s->native_one = av_mallocz(sizeof(float)); if (!s->native_matrix || !s->native_one) return AVERROR(ENOMEM); for (i = 0; i < nb_out; i++) for (j = 0; j < nb_in; j++) ((float*)s->native_matrix)[i * nb_in + j] = s->matrix[i][j]; *((float*)s->native_one) = 1.0; s->mix_1_1_f = (mix_1_1_func_type*)copy_float; s->mix_2_1_f = (mix_2_1_func_type*)sum2_float; s->mix_any_f = (mix_any_func_type*)get_mix_any_func_float(s); }else if(s->midbuf.fmt == AV_SAMPLE_FMT_DBLP){ s->native_matrix = av_calloc(nb_in * nb_out, sizeof(double)); s->native_one = av_mallocz(sizeof(double)); if (!s->native_matrix || !s->native_one) return AVERROR(ENOMEM); for (i = 0; i < nb_out; i++) for (j = 0; j < nb_in; j++) ((double*)s->native_matrix)[i * nb_in + j] = s->matrix[i][j]; *((double*)s->native_one) = 1.0; s->mix_1_1_f = (mix_1_1_func_type*)copy_double; s->mix_2_1_f = (mix_2_1_func_type*)sum2_double; s->mix_any_f = (mix_any_func_type*)get_mix_any_func_double(s); }else if(s->midbuf.fmt == AV_SAMPLE_FMT_S32P){ // Only for dithering currently // s->native_matrix = av_calloc(nb_in * nb_out, sizeof(double)); s->native_one = av_mallocz(sizeof(int)); if (!s->native_one) return AVERROR(ENOMEM); // for (i = 0; i < nb_out; i++) // for (j = 0; j < nb_in; j++) // ((double*)s->native_matrix)[i * nb_in + j] = s->matrix[i][j]; *((int*)s->native_one) = 32768; s->mix_1_1_f = (mix_1_1_func_type*)copy_s32; s->mix_2_1_f = (mix_2_1_func_type*)sum2_s32; s->mix_any_f = (mix_any_func_type*)get_mix_any_func_s32(s); }else av_assert0(0); //FIXME quantize for integeres for (i = 0; i < SWR_CH_MAX; i++) { int ch_in=0; for (j = 0; j < SWR_CH_MAX; j++) { s->matrix32[i][j]= lrintf(s->matrix[i][j] * 32768); if(s->matrix[i][j]) s->matrix_ch[i][++ch_in]= j; } s->matrix_ch[i][0]= ch_in; } if(HAVE_YASM && HAVE_MMX) return swri_rematrix_init_x86(s); return 0; }"} {"target": 1, "idx": 2009, "func": "void s390_machine_reset(void) { S390CPU *ipl_cpu = S390_CPU(qemu_get_cpu(0)); qemu_devices_reset(); s390_cmma_reset(); s390_crypto_reset(); /* all cpus are stopped - configure and start the ipl cpu only */ s390_ipl_prepare_cpu(ipl_cpu); s390_cpu_set_state(CPU_STATE_OPERATING, ipl_cpu); }"} {"target": 1, "idx": 2010, "func": "static int fic_decode_block(FICContext *ctx, GetBitContext *gb, uint8_t *dst, int stride, int16_t *block) { int i, num_coeff; /* Is it a skip block? */ if (get_bits1(gb)) { /* This is a P-frame. */ ctx->frame->key_frame = 0; ctx->frame->pict_type = AV_PICTURE_TYPE_P; return 0; } memset(block, 0, sizeof(*block) * 64); num_coeff = get_bits(gb, 7); if (num_coeff > 64) return AVERROR_INVALIDDATA; for (i = 0; i < num_coeff; i++) block[ff_zigzag_direct[i]] = get_se_golomb(gb) * ctx->qmat[ff_zigzag_direct[i]]; fic_idct_put(dst, stride, block); return 0; }"} {"target": 1, "idx": 2027, "func": "void ff_rtsp_undo_setup(AVFormatContext *s, int send_packets) { RTSPState *rt = s->priv_data; int i; for (i = 0; i < rt->nb_rtsp_streams; i++) { RTSPStream *rtsp_st = rt->rtsp_streams[i]; if (!rtsp_st) continue; if (rtsp_st->transport_priv) { if (s->oformat) { AVFormatContext *rtpctx = rtsp_st->transport_priv; av_write_trailer(rtpctx); if (rt->lower_transport == RTSP_LOWER_TRANSPORT_TCP) { uint8_t *ptr; if (CONFIG_RTSP_MUXER && rtpctx->pb && send_packets) ff_rtsp_tcp_write_packet(s, rtsp_st); avio_close_dyn_buf(rtpctx->pb, &ptr); av_free(ptr); } else { avio_close(rtpctx->pb); } avformat_free_context(rtpctx); } else if (rt->transport == RTSP_TRANSPORT_RDT && CONFIG_RTPDEC) ff_rdt_parse_close(rtsp_st->transport_priv); else if (rt->transport == RTSP_TRANSPORT_RTP && CONFIG_RTPDEC) ff_rtp_parse_close(rtsp_st->transport_priv); } rtsp_st->transport_priv = NULL; if (rtsp_st->rtp_handle) ffurl_close(rtsp_st->rtp_handle); rtsp_st->rtp_handle = NULL; } }"} {"target": 0, "idx": 2047, "func": "static void do_token_in(USBDevice *s, USBPacket *p) { int request, value, index; assert(p->ep->nr == 0); request = (s->setup_buf[0] << 8) | s->setup_buf[1]; value = (s->setup_buf[3] << 8) | s->setup_buf[2]; index = (s->setup_buf[5] << 8) | s->setup_buf[4]; switch(s->setup_state) { case SETUP_STATE_ACK: if (!(s->setup_buf[0] & USB_DIR_IN)) { usb_device_handle_control(s, p, request, value, index, s->setup_len, s->data_buf); if (p->status == USB_RET_ASYNC) { return; } s->setup_state = SETUP_STATE_IDLE; p->actual_length = 0; } break; case SETUP_STATE_DATA: if (s->setup_buf[0] & USB_DIR_IN) { int len = s->setup_len - s->setup_index; if (len > p->iov.size) { len = p->iov.size; } usb_packet_copy(p, s->data_buf + s->setup_index, len); s->setup_index += len; if (s->setup_index >= s->setup_len) { s->setup_state = SETUP_STATE_ACK; } return; } s->setup_state = SETUP_STATE_IDLE; p->status = USB_RET_STALL; break; default: p->status = USB_RET_STALL; } }"} {"target": 0, "idx": 2049, "func": "static int get_video_buffer(AVFrame *frame, int align) { const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format); int ret, i; if (!desc) return AVERROR(EINVAL); if ((ret = av_image_check_size(frame->width, frame->height, 0, NULL)) < 0) return ret; if (!frame->linesize[0]) { ret = av_image_fill_linesizes(frame->linesize, frame->format, frame->width); if (ret < 0) return ret; for (i = 0; i < 4 && frame->linesize[i]; i++) frame->linesize[i] = FFALIGN(frame->linesize[i], align); } for (i = 0; i < 4 && frame->linesize[i]; i++) { int h = FFALIGN(frame->height, 32); if (i == 1 || i == 2) h = -((-h) >> desc->log2_chroma_h); frame->buf[i] = av_buffer_alloc(frame->linesize[i] * h); if (!frame->buf[i]) goto fail; frame->data[i] = frame->buf[i]->data; } if (desc->flags & PIX_FMT_PAL || desc->flags & PIX_FMT_PSEUDOPAL) { av_buffer_unref(&frame->buf[1]); frame->buf[1] = av_buffer_alloc(1024); if (!frame->buf[1]) goto fail; frame->data[1] = frame->buf[1]->data; } frame->extended_data = frame->data; return 0; fail: av_frame_unref(frame); return AVERROR(ENOMEM); }"} {"target": 0, "idx": 2053, "func": "static int kvm_get_msrs(X86CPU *cpu) { CPUX86State *env = &cpu->env; struct kvm_msr_entry *msrs = cpu->kvm_msr_buf->entries; int ret, i; uint64_t mtrr_top_bits; kvm_msr_buf_reset(cpu); kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_CS, 0); kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_ESP, 0); kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_EIP, 0); kvm_msr_entry_add(cpu, MSR_PAT, 0); if (has_msr_star) { kvm_msr_entry_add(cpu, MSR_STAR, 0); } if (has_msr_hsave_pa) { kvm_msr_entry_add(cpu, MSR_VM_HSAVE_PA, 0); } if (has_msr_tsc_aux) { kvm_msr_entry_add(cpu, MSR_TSC_AUX, 0); } if (has_msr_tsc_adjust) { kvm_msr_entry_add(cpu, MSR_TSC_ADJUST, 0); } if (has_msr_tsc_deadline) { kvm_msr_entry_add(cpu, MSR_IA32_TSCDEADLINE, 0); } if (has_msr_misc_enable) { kvm_msr_entry_add(cpu, MSR_IA32_MISC_ENABLE, 0); } if (has_msr_smbase) { kvm_msr_entry_add(cpu, MSR_IA32_SMBASE, 0); } if (has_msr_feature_control) { kvm_msr_entry_add(cpu, MSR_IA32_FEATURE_CONTROL, 0); } if (has_msr_bndcfgs) { kvm_msr_entry_add(cpu, MSR_IA32_BNDCFGS, 0); } if (has_msr_xss) { kvm_msr_entry_add(cpu, MSR_IA32_XSS, 0); } if (!env->tsc_valid) { kvm_msr_entry_add(cpu, MSR_IA32_TSC, 0); env->tsc_valid = !runstate_is_running(); } #ifdef TARGET_X86_64 if (lm_capable_kernel) { kvm_msr_entry_add(cpu, MSR_CSTAR, 0); kvm_msr_entry_add(cpu, MSR_KERNELGSBASE, 0); kvm_msr_entry_add(cpu, MSR_FMASK, 0); kvm_msr_entry_add(cpu, MSR_LSTAR, 0); } #endif kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, 0); kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, 0); if (has_msr_async_pf_en) { kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_EN, 0); } if (has_msr_pv_eoi_en) { kvm_msr_entry_add(cpu, MSR_KVM_PV_EOI_EN, 0); } if (has_msr_kvm_steal_time) { kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, 0); } if (has_msr_architectural_pmu) { kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0); kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0); kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS, 0); kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL, 0); for (i = 0; i < MAX_FIXED_COUNTERS; i++) { kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i, 0); } for (i = 0; i < num_architectural_pmu_counters; i++) { kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i, 0); kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i, 0); } } if (env->mcg_cap) { kvm_msr_entry_add(cpu, MSR_MCG_STATUS, 0); kvm_msr_entry_add(cpu, MSR_MCG_CTL, 0); if (has_msr_mcg_ext_ctl) { kvm_msr_entry_add(cpu, MSR_MCG_EXT_CTL, 0); } for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) { kvm_msr_entry_add(cpu, MSR_MC0_CTL + i, 0); } } if (has_msr_hv_hypercall) { kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL, 0); kvm_msr_entry_add(cpu, HV_X64_MSR_GUEST_OS_ID, 0); } if (has_msr_hv_vapic) { kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE, 0); } if (has_msr_hv_tsc) { kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC, 0); } if (has_msr_hv_crash) { int j; for (j = 0; j < HV_X64_MSR_CRASH_PARAMS; j++) { kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_P0 + j, 0); } } if (has_msr_hv_runtime) { kvm_msr_entry_add(cpu, HV_X64_MSR_VP_RUNTIME, 0); } if (cpu->hyperv_synic) { uint32_t msr; kvm_msr_entry_add(cpu, HV_X64_MSR_SCONTROL, 0); kvm_msr_entry_add(cpu, HV_X64_MSR_SVERSION, 0); kvm_msr_entry_add(cpu, HV_X64_MSR_SIEFP, 0); kvm_msr_entry_add(cpu, HV_X64_MSR_SIMP, 0); for (msr = HV_X64_MSR_SINT0; msr <= HV_X64_MSR_SINT15; msr++) { kvm_msr_entry_add(cpu, msr, 0); } } if (has_msr_hv_stimer) { uint32_t msr; for (msr = HV_X64_MSR_STIMER0_CONFIG; msr <= HV_X64_MSR_STIMER3_COUNT; msr++) { kvm_msr_entry_add(cpu, msr, 0); } } if (has_msr_mtrr) { kvm_msr_entry_add(cpu, MSR_MTRRdefType, 0); kvm_msr_entry_add(cpu, MSR_MTRRfix64K_00000, 0); kvm_msr_entry_add(cpu, MSR_MTRRfix16K_80000, 0); kvm_msr_entry_add(cpu, MSR_MTRRfix16K_A0000, 0); kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C0000, 0); kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C8000, 0); kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D0000, 0); kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D8000, 0); kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E0000, 0); kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E8000, 0); kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F0000, 0); kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F8000, 0); for (i = 0; i < MSR_MTRRcap_VCNT; i++) { kvm_msr_entry_add(cpu, MSR_MTRRphysBase(i), 0); kvm_msr_entry_add(cpu, MSR_MTRRphysMask(i), 0); } } ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, cpu->kvm_msr_buf); if (ret < 0) { return ret; } assert(ret == cpu->kvm_msr_buf->nmsrs); /* * MTRR masks: Each mask consists of 5 parts * a 10..0: must be zero * b 11 : valid bit * c n-1.12: actual mask bits * d 51..n: reserved must be zero * e 63.52: reserved must be zero * * 'n' is the number of physical bits supported by the CPU and is * apparently always <= 52. We know our 'n' but don't know what * the destinations 'n' is; it might be smaller, in which case * it masks (c) on loading. It might be larger, in which case * we fill 'd' so that d..c is consistent irrespetive of the 'n' * we're migrating to. */ if (cpu->fill_mtrr_mask) { QEMU_BUILD_BUG_ON(TARGET_PHYS_ADDR_SPACE_BITS > 52); assert(cpu->phys_bits <= TARGET_PHYS_ADDR_SPACE_BITS); mtrr_top_bits = MAKE_64BIT_MASK(cpu->phys_bits, 52 - cpu->phys_bits); } else { mtrr_top_bits = 0; } for (i = 0; i < ret; i++) { uint32_t index = msrs[i].index; switch (index) { case MSR_IA32_SYSENTER_CS: env->sysenter_cs = msrs[i].data; break; case MSR_IA32_SYSENTER_ESP: env->sysenter_esp = msrs[i].data; break; case MSR_IA32_SYSENTER_EIP: env->sysenter_eip = msrs[i].data; break; case MSR_PAT: env->pat = msrs[i].data; break; case MSR_STAR: env->star = msrs[i].data; break; #ifdef TARGET_X86_64 case MSR_CSTAR: env->cstar = msrs[i].data; break; case MSR_KERNELGSBASE: env->kernelgsbase = msrs[i].data; break; case MSR_FMASK: env->fmask = msrs[i].data; break; case MSR_LSTAR: env->lstar = msrs[i].data; break; #endif case MSR_IA32_TSC: env->tsc = msrs[i].data; break; case MSR_TSC_AUX: env->tsc_aux = msrs[i].data; break; case MSR_TSC_ADJUST: env->tsc_adjust = msrs[i].data; break; case MSR_IA32_TSCDEADLINE: env->tsc_deadline = msrs[i].data; break; case MSR_VM_HSAVE_PA: env->vm_hsave = msrs[i].data; break; case MSR_KVM_SYSTEM_TIME: env->system_time_msr = msrs[i].data; break; case MSR_KVM_WALL_CLOCK: env->wall_clock_msr = msrs[i].data; break; case MSR_MCG_STATUS: env->mcg_status = msrs[i].data; break; case MSR_MCG_CTL: env->mcg_ctl = msrs[i].data; break; case MSR_MCG_EXT_CTL: env->mcg_ext_ctl = msrs[i].data; break; case MSR_IA32_MISC_ENABLE: env->msr_ia32_misc_enable = msrs[i].data; break; case MSR_IA32_SMBASE: env->smbase = msrs[i].data; break; case MSR_IA32_FEATURE_CONTROL: env->msr_ia32_feature_control = msrs[i].data; break; case MSR_IA32_BNDCFGS: env->msr_bndcfgs = msrs[i].data; break; case MSR_IA32_XSS: env->xss = msrs[i].data; break; default: if (msrs[i].index >= MSR_MC0_CTL && msrs[i].index < MSR_MC0_CTL + (env->mcg_cap & 0xff) * 4) { env->mce_banks[msrs[i].index - MSR_MC0_CTL] = msrs[i].data; } break; case MSR_KVM_ASYNC_PF_EN: env->async_pf_en_msr = msrs[i].data; break; case MSR_KVM_PV_EOI_EN: env->pv_eoi_en_msr = msrs[i].data; break; case MSR_KVM_STEAL_TIME: env->steal_time_msr = msrs[i].data; break; case MSR_CORE_PERF_FIXED_CTR_CTRL: env->msr_fixed_ctr_ctrl = msrs[i].data; break; case MSR_CORE_PERF_GLOBAL_CTRL: env->msr_global_ctrl = msrs[i].data; break; case MSR_CORE_PERF_GLOBAL_STATUS: env->msr_global_status = msrs[i].data; break; case MSR_CORE_PERF_GLOBAL_OVF_CTRL: env->msr_global_ovf_ctrl = msrs[i].data; break; case MSR_CORE_PERF_FIXED_CTR0 ... MSR_CORE_PERF_FIXED_CTR0 + MAX_FIXED_COUNTERS - 1: env->msr_fixed_counters[index - MSR_CORE_PERF_FIXED_CTR0] = msrs[i].data; break; case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR0 + MAX_GP_COUNTERS - 1: env->msr_gp_counters[index - MSR_P6_PERFCTR0] = msrs[i].data; break; case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL0 + MAX_GP_COUNTERS - 1: env->msr_gp_evtsel[index - MSR_P6_EVNTSEL0] = msrs[i].data; break; case HV_X64_MSR_HYPERCALL: env->msr_hv_hypercall = msrs[i].data; break; case HV_X64_MSR_GUEST_OS_ID: env->msr_hv_guest_os_id = msrs[i].data; break; case HV_X64_MSR_APIC_ASSIST_PAGE: env->msr_hv_vapic = msrs[i].data; break; case HV_X64_MSR_REFERENCE_TSC: env->msr_hv_tsc = msrs[i].data; break; case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: env->msr_hv_crash_params[index - HV_X64_MSR_CRASH_P0] = msrs[i].data; break; case HV_X64_MSR_VP_RUNTIME: env->msr_hv_runtime = msrs[i].data; break; case HV_X64_MSR_SCONTROL: env->msr_hv_synic_control = msrs[i].data; break; case HV_X64_MSR_SVERSION: env->msr_hv_synic_version = msrs[i].data; break; case HV_X64_MSR_SIEFP: env->msr_hv_synic_evt_page = msrs[i].data; break; case HV_X64_MSR_SIMP: env->msr_hv_synic_msg_page = msrs[i].data; break; case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15: env->msr_hv_synic_sint[index - HV_X64_MSR_SINT0] = msrs[i].data; break; case HV_X64_MSR_STIMER0_CONFIG: case HV_X64_MSR_STIMER1_CONFIG: case HV_X64_MSR_STIMER2_CONFIG: case HV_X64_MSR_STIMER3_CONFIG: env->msr_hv_stimer_config[(index - HV_X64_MSR_STIMER0_CONFIG)/2] = msrs[i].data; break; case HV_X64_MSR_STIMER0_COUNT: case HV_X64_MSR_STIMER1_COUNT: case HV_X64_MSR_STIMER2_COUNT: case HV_X64_MSR_STIMER3_COUNT: env->msr_hv_stimer_count[(index - HV_X64_MSR_STIMER0_COUNT)/2] = msrs[i].data; break; case MSR_MTRRdefType: env->mtrr_deftype = msrs[i].data; break; case MSR_MTRRfix64K_00000: env->mtrr_fixed[0] = msrs[i].data; break; case MSR_MTRRfix16K_80000: env->mtrr_fixed[1] = msrs[i].data; break; case MSR_MTRRfix16K_A0000: env->mtrr_fixed[2] = msrs[i].data; break; case MSR_MTRRfix4K_C0000: env->mtrr_fixed[3] = msrs[i].data; break; case MSR_MTRRfix4K_C8000: env->mtrr_fixed[4] = msrs[i].data; break; case MSR_MTRRfix4K_D0000: env->mtrr_fixed[5] = msrs[i].data; break; case MSR_MTRRfix4K_D8000: env->mtrr_fixed[6] = msrs[i].data; break; case MSR_MTRRfix4K_E0000: env->mtrr_fixed[7] = msrs[i].data; break; case MSR_MTRRfix4K_E8000: env->mtrr_fixed[8] = msrs[i].data; break; case MSR_MTRRfix4K_F0000: env->mtrr_fixed[9] = msrs[i].data; break; case MSR_MTRRfix4K_F8000: env->mtrr_fixed[10] = msrs[i].data; break; case MSR_MTRRphysBase(0) ... MSR_MTRRphysMask(MSR_MTRRcap_VCNT - 1): if (index & 1) { env->mtrr_var[MSR_MTRRphysIndex(index)].mask = msrs[i].data | mtrr_top_bits; } else { env->mtrr_var[MSR_MTRRphysIndex(index)].base = msrs[i].data; } break; } } return 0; }"} {"target": 0, "idx": 2063, "func": "host_memory_backend_can_be_deleted(UserCreatable *uc, Error **errp) { MemoryRegion *mr; mr = host_memory_backend_get_memory(MEMORY_BACKEND(uc), errp); if (memory_region_is_mapped(mr)) { return false; } else { return true; } }"} {"target": 0, "idx": 2083, "func": "av_cold int ffv1_init_slice_contexts(FFV1Context *f) { int i; f->slice_count = f->num_h_slices * f->num_v_slices; if (f->slice_count <= 0) { av_log(f->avctx, AV_LOG_ERROR, \"Invalid number of slices\\n\"); return AVERROR(EINVAL); } for (i = 0; i < f->slice_count; i++) { FFV1Context *fs = av_mallocz(sizeof(*fs)); int sx = i % f->num_h_slices; int sy = i / f->num_h_slices; int sxs = f->avctx->width * sx / f->num_h_slices; int sxe = f->avctx->width * (sx + 1) / f->num_h_slices; int sys = f->avctx->height * sy / f->num_v_slices; int sye = f->avctx->height * (sy + 1) / f->num_v_slices; f->slice_context[i] = fs; memcpy(fs, f, sizeof(*fs)); memset(fs->rc_stat2, 0, sizeof(fs->rc_stat2)); fs->slice_width = sxe - sxs; fs->slice_height = sye - sys; fs->slice_x = sxs; fs->slice_y = sys; fs->sample_buffer = av_malloc(3 * MAX_PLANES * (fs->width + 6) * sizeof(*fs->sample_buffer)); if (!fs->sample_buffer) return AVERROR(ENOMEM); } return 0; }"} {"target": 0, "idx": 2084, "func": "static void omap_pwt_write(void *opaque, target_phys_addr_t addr, uint64_t value, unsigned size) { struct omap_pwt_s *s = (struct omap_pwt_s *) opaque; int offset = addr & OMAP_MPUI_REG_MASK; if (size != 1) { return omap_badwidth_write8(opaque, addr, value); } switch (offset) { case 0x00: /* FRC */ s->frc = value & 0x3f; break; case 0x04: /* VRC */ if ((value ^ s->vrc) & 1) { if (value & 1) printf(\"%s: %iHz buzz on\\n\", __FUNCTION__, (int) /* 1.5 MHz from a 12-MHz or 13-MHz PWT_CLK */ ((omap_clk_getrate(s->clk) >> 3) / /* Pre-multiplexer divider */ ((s->gcr & 2) ? 1 : 154) / /* Octave multiplexer */ (2 << (value & 3)) * /* 101/107 divider */ ((value & (1 << 2)) ? 101 : 107) * /* 49/55 divider */ ((value & (1 << 3)) ? 49 : 55) * /* 50/63 divider */ ((value & (1 << 4)) ? 50 : 63) * /* 80/127 divider */ ((value & (1 << 5)) ? 80 : 127) / (107 * 55 * 63 * 127))); else printf(\"%s: silence!\\n\", __FUNCTION__); } s->vrc = value & 0x7f; break; case 0x08: /* GCR */ s->gcr = value & 3; break; default: OMAP_BAD_REG(addr); return; } }"} {"target": 0, "idx": 2093, "func": "static void predictor_decode_mono(APEContext *ctx, int count) { APEPredictor *p = &ctx->predictor; int32_t *decoded0 = ctx->decoded[0]; int32_t predictionA, currentA, A, sign; currentA = p->lastA[0]; while (count--) { A = *decoded0; p->buf[YDELAYA] = currentA; p->buf[YDELAYA - 1] = p->buf[YDELAYA] - p->buf[YDELAYA - 1]; predictionA = p->buf[YDELAYA ] * p->coeffsA[0][0] + p->buf[YDELAYA - 1] * p->coeffsA[0][1] + p->buf[YDELAYA - 2] * p->coeffsA[0][2] + p->buf[YDELAYA - 3] * p->coeffsA[0][3]; currentA = A + (predictionA >> 10); p->buf[YADAPTCOEFFSA] = APESIGN(p->buf[YDELAYA ]); p->buf[YADAPTCOEFFSA - 1] = APESIGN(p->buf[YDELAYA - 1]); sign = APESIGN(A); p->coeffsA[0][0] += p->buf[YADAPTCOEFFSA ] * sign; p->coeffsA[0][1] += p->buf[YADAPTCOEFFSA - 1] * sign; p->coeffsA[0][2] += p->buf[YADAPTCOEFFSA - 2] * sign; p->coeffsA[0][3] += p->buf[YADAPTCOEFFSA - 3] * sign; p->buf++; /* Have we filled the history buffer? */ if (p->buf == p->historybuffer + HISTORY_SIZE) { memmove(p->historybuffer, p->buf, PREDICTOR_SIZE * sizeof(*p->historybuffer)); p->buf = p->historybuffer; } p->filterA[0] = currentA + ((p->filterA[0] * 31) >> 5); *(decoded0++) = p->filterA[0]; } p->lastA[0] = currentA; }"} {"target": 0, "idx": 2110, "func": "static void print_report(AVFormatContext **output_files, AVOutputStream **ost_table, int nb_ostreams, int is_last_report) { char buf[1024]; AVOutputStream *ost; AVFormatContext *oc; int64_t total_size; AVCodecContext *enc; int frame_number, vid, i; double bitrate, ti1, pts; static int64_t last_time = -1; static int qp_histogram[52]; if (!is_last_report) { int64_t cur_time; /* display the report every 0.5 seconds */ cur_time = av_gettime(); if (last_time == -1) { last_time = cur_time; return; } if ((cur_time - last_time) < 500000) return; last_time = cur_time; } oc = output_files[0]; total_size = avio_size(oc->pb); if(total_size<0) // FIXME improve avio_size() so it works with non seekable output too total_size= avio_tell(oc->pb); buf[0] = '\\0'; ti1 = 1e10; vid = 0; for(i=0;ist->codec; if(!ost->st->stream_copy && enc->coded_frame) q= enc->coded_frame->quality/(float)FF_QP2LAMBDA; if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) { snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), \"q=%2.1f \", q); } if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) { float t = (av_gettime()-timer_start) / 1000000.0; frame_number = ost->frame_number; snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), \"frame=%5d fps=%3d q=%3.1f \", frame_number, (t>1)?(int)(frame_number/t+0.5) : 0, q); if(is_last_report) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), \"L\"); if(qp_hist){ int j; int qp= lrintf(q); if(qp>=0 && qpflags&CODEC_FLAG_PSNR){ int j; double error, error_sum=0; double scale, scale_sum=0; char type[3]= {'Y','U','V'}; snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), \"PSNR=\"); for(j=0; j<3; j++){ if(is_last_report){ error= enc->error[j]; scale= enc->width*enc->height*255.0*255.0*frame_number; }else{ error= enc->coded_frame->error[j]; scale= enc->width*enc->height*255.0*255.0; } if(j) scale/=4; error_sum += error; scale_sum += scale; snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), \"%c:%2.2f \", type[j], psnr(error/scale)); } snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), \"*:%2.2f \", psnr(error_sum/scale_sum)); } vid = 1; } /* compute min output value */ pts = (double)ost->st->pts.val * av_q2d(ost->st->time_base); if ((pts < ti1) && (pts > 0)) ti1 = pts; } if (ti1 < 0.01) ti1 = 0.01; if (verbose || is_last_report) { bitrate = (double)(total_size * 8) / ti1 / 1000.0; snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), \"size=%8.0fkB time=%0.2f bitrate=%6.1fkbits/s\", (double)total_size / 1024, ti1, bitrate); if (nb_frames_dup || nb_frames_drop) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), \" dup=%d drop=%d\", nb_frames_dup, nb_frames_drop); if (verbose >= 0) fprintf(stderr, \"%s \\r\", buf); fflush(stderr); } if (is_last_report && verbose >= 0){ int64_t raw= audio_size + video_size + extra_size; fprintf(stderr, \"\\n\"); fprintf(stderr, \"video:%1.0fkB audio:%1.0fkB global headers:%1.0fkB muxing overhead %f%%\\n\", video_size/1024.0, audio_size/1024.0, extra_size/1024.0, 100.0*(total_size - raw)/raw ); } }"} {"target": 1, "idx": 2115, "func": "static av_always_inline void rv40_strong_loop_filter(uint8_t *src, const int step, const int stride, const int alpha, const int lims, const int dmode, const int chroma) { int i; for(i = 0; i < 4; i++, src += stride){ int sflag, p0, q0, p1, q1; int t = src[0*step] - src[-1*step]; if (!t) continue; sflag = (alpha * FFABS(t)) >> 7; if (sflag > 1) continue; p0 = (25*src[-3*step] + 26*src[-2*step] + 26*src[-1*step] + 26*src[ 0*step] + 25*src[ 1*step] + rv40_dither_l[dmode + i]) >> 7; q0 = (25*src[-2*step] + 26*src[-1*step] + 26*src[ 0*step] + 26*src[ 1*step] + 25*src[ 2*step] + rv40_dither_r[dmode + i]) >> 7; if (sflag) { p0 = av_clip(p0, src[-1*step] - lims, src[-1*step] + lims); q0 = av_clip(q0, src[ 0*step] - lims, src[ 0*step] + lims); } p1 = (25*src[-4*step] + 26*src[-3*step] + 26*src[-2*step] + 26*p0 + 25*src[ 0*step] + rv40_dither_l[dmode + i]) >> 7; q1 = (25*src[-1*step] + 26*q0 + 26*src[ 1*step] + 26*src[ 2*step] + 25*src[ 3*step] + rv40_dither_r[dmode + i]) >> 7; if (sflag) { p1 = av_clip(p1, src[-2*step] - lims, src[-2*step] + lims); q1 = av_clip(q1, src[ 1*step] - lims, src[ 1*step] + lims); } src[-2*step] = p1; src[-1*step] = p0; src[ 0*step] = q0; src[ 1*step] = q1; if(!chroma){ src[-3*step] = (25*src[-1*step] + 26*src[-2*step] + 51*src[-3*step] + 26*src[-4*step] + 64) >> 7; src[ 2*step] = (25*src[ 0*step] + 26*src[ 1*step] + 51*src[ 2*step] + 26*src[ 3*step] + 64) >> 7; } } }"} {"target": 1, "idx": 2124, "func": "DeviceState *qdev_device_add(QemuOpts *opts, Error **errp) { DeviceClass *dc; const char *driver, *path; DeviceState *dev; BusState *bus = NULL; Error *err = NULL; driver = qemu_opt_get(opts, \"driver\"); if (!driver) { error_setg(errp, QERR_MISSING_PARAMETER, \"driver\"); return NULL; } /* find driver */ dc = qdev_get_device_class(&driver, errp); if (!dc) { return NULL; } /* find bus */ path = qemu_opt_get(opts, \"bus\"); if (path != NULL) { bus = qbus_find(path, errp); if (!bus) { return NULL; } if (!object_dynamic_cast(OBJECT(bus), dc->bus_type)) { error_setg(errp, \"Device '%s' can't go on %s bus\", driver, object_get_typename(OBJECT(bus))); return NULL; } } else if (dc->bus_type != NULL) { bus = qbus_find_recursive(sysbus_get_default(), NULL, dc->bus_type); if (!bus || qbus_is_full(bus)) { error_setg(errp, \"No '%s' bus found for device '%s'\", dc->bus_type, driver); return NULL; } } if (qdev_hotplug && bus && !qbus_is_hotpluggable(bus)) { error_setg(errp, QERR_BUS_NO_HOTPLUG, bus->name); return NULL; } if (!migration_is_idle()) { error_setg(errp, \"device_add not allowed while migrating\"); return NULL; } /* create device */ dev = DEVICE(object_new(driver)); if (bus) { qdev_set_parent_bus(dev, bus); } qdev_set_id(dev, qemu_opts_id(opts)); /* set properties */ if (qemu_opt_foreach(opts, set_property, dev, &err)) { } dev->opts = opts; object_property_set_bool(OBJECT(dev), true, \"realized\", &err); if (err != NULL) { dev->opts = NULL; } return dev; err_del_dev: error_propagate(errp, err); object_unparent(OBJECT(dev)); object_unref(OBJECT(dev)); return NULL; }"} {"target": 1, "idx": 2130, "func": "static int rv10_decode_packet(AVCodecContext *avctx, const uint8_t *buf, int buf_size, int buf_size2) { RVDecContext *rv = avctx->priv_data; MpegEncContext *s = &rv->m; int mb_count, mb_pos, left, start_mb_x, active_bits_size, ret; active_bits_size = buf_size * 8; init_get_bits(&s->gb, buf, FFMAX(buf_size, buf_size2) * 8); if (s->codec_id == AV_CODEC_ID_RV10) mb_count = rv10_decode_picture_header(s); else mb_count = rv20_decode_picture_header(rv); if (mb_count < 0) { av_log(s->avctx, AV_LOG_ERROR, \"HEADER ERROR\\n\"); return AVERROR_INVALIDDATA; } if (s->mb_x >= s->mb_width || s->mb_y >= s->mb_height) { av_log(s->avctx, AV_LOG_ERROR, \"POS ERROR %d %d\\n\", s->mb_x, s->mb_y); return AVERROR_INVALIDDATA; } mb_pos = s->mb_y * s->mb_width + s->mb_x; left = s->mb_width * s->mb_height - mb_pos; if (mb_count > left) { av_log(s->avctx, AV_LOG_ERROR, \"COUNT ERROR\\n\"); return AVERROR_INVALIDDATA; } if ((s->mb_x == 0 && s->mb_y == 0) || s->current_picture_ptr == NULL) { // FIXME write parser so we always have complete frames? if (s->current_picture_ptr) { ff_er_frame_end(&s->er); ff_MPV_frame_end(s); s->mb_x = s->mb_y = s->resync_mb_x = s->resync_mb_y = 0; } if ((ret = ff_MPV_frame_start(s, avctx)) < 0) return ret; ff_mpeg_er_frame_start(s); } else { if (s->current_picture_ptr->f.pict_type != s->pict_type) { av_log(s->avctx, AV_LOG_ERROR, \"Slice type mismatch\\n\"); return AVERROR_INVALIDDATA; } } av_dlog(avctx, \"qscale=%d\\n\", s->qscale); /* default quantization values */ if (s->codec_id == AV_CODEC_ID_RV10) { if (s->mb_y == 0) s->first_slice_line = 1; } else { s->first_slice_line = 1; s->resync_mb_x = s->mb_x; } start_mb_x = s->mb_x; s->resync_mb_y = s->mb_y; if (s->h263_aic) { s->y_dc_scale_table = s->c_dc_scale_table = ff_aic_dc_scale_table; } else { s->y_dc_scale_table = s->c_dc_scale_table = ff_mpeg1_dc_scale_table; } if (s->modified_quant) s->chroma_qscale_table = ff_h263_chroma_qscale_table; ff_set_qscale(s, s->qscale); s->rv10_first_dc_coded[0] = 0; s->rv10_first_dc_coded[1] = 0; s->rv10_first_dc_coded[2] = 0; s->block_wrap[0] = s->block_wrap[1] = s->block_wrap[2] = s->block_wrap[3] = s->b8_stride; s->block_wrap[4] = s->block_wrap[5] = s->mb_stride; ff_init_block_index(s); /* decode each macroblock */ for (s->mb_num_left = mb_count; s->mb_num_left > 0; s->mb_num_left--) { int ret; ff_update_block_index(s); av_dlog(avctx, \"**mb x=%d y=%d\\n\", s->mb_x, s->mb_y); s->mv_dir = MV_DIR_FORWARD; s->mv_type = MV_TYPE_16X16; ret = ff_h263_decode_mb(s, s->block); // Repeat the slice end check from ff_h263_decode_mb with our active // bitstream size if (ret != SLICE_ERROR) { int v = show_bits(&s->gb, 16); if (get_bits_count(&s->gb) + 16 > active_bits_size) v >>= get_bits_count(&s->gb) + 16 - active_bits_size; if (!v) ret = SLICE_END; } if (ret != SLICE_ERROR && active_bits_size < get_bits_count(&s->gb) && 8 * buf_size2 >= get_bits_count(&s->gb)) { active_bits_size = buf_size2 * 8; av_log(avctx, AV_LOG_DEBUG, \"update size from %d to %d\\n\", 8 * buf_size, active_bits_size); ret = SLICE_OK; } if (ret == SLICE_ERROR || active_bits_size < get_bits_count(&s->gb)) { av_log(s->avctx, AV_LOG_ERROR, \"ERROR at MB %d %d\\n\", s->mb_x, s->mb_y); return AVERROR_INVALIDDATA; } if (s->pict_type != AV_PICTURE_TYPE_B) ff_h263_update_motion_val(s); ff_MPV_decode_mb(s, s->block); if (s->loop_filter) ff_h263_loop_filter(s); if (++s->mb_x == s->mb_width) { s->mb_x = 0; s->mb_y++; ff_init_block_index(s); } if (s->mb_x == s->resync_mb_x) s->first_slice_line = 0; if (ret == SLICE_END) break; } ff_er_add_slice(&s->er, start_mb_x, s->resync_mb_y, s->mb_x - 1, s->mb_y, ER_MB_END); return active_bits_size; }"} {"target": 1, "idx": 2133, "func": "static inline int cpu_gdb_index(CPUState *cpu) { #if defined(CONFIG_USER_ONLY) return cpu->host_tid; #else return cpu->cpu_index + 1; #endif }"} {"target": 1, "idx": 2142, "func": "static inline int *DEC_UQUAD(int *dst, unsigned idx, unsigned sign) { unsigned nz = idx >> 12; dst[0] = (idx & 3) * (1 + (((int)sign >> 31) << 1)); sign <<= nz & 1; nz >>= 1; dst[1] = (idx >> 2 & 3) * (1 + (((int)sign >> 31) << 1)); sign <<= nz & 1; nz >>= 1; dst[2] = (idx >> 4 & 3) * (1 + (((int)sign >> 31) << 1)); sign <<= nz & 1; nz >>= 1; dst[3] = (idx >> 6 & 3) * (1 + (((int)sign >> 31) << 1)); return dst + 4; }"} {"target": 1, "idx": 2148, "func": "void hmp_info_block_jobs(Monitor *mon, const QDict *qdict) { BlockJobInfoList *list; Error *err = NULL; list = qmp_query_block_jobs(&err); assert(!err); if (!list) { monitor_printf(mon, \"No active jobs\\n\"); return; } while (list) { if (strcmp(list->value->type, \"stream\") == 0) { monitor_printf(mon, \"Streaming device %s: Completed %\" PRId64 \" of %\" PRId64 \" bytes, speed limit %\" PRId64 \" bytes/s\\n\", list->value->device, list->value->offset, list->value->len, list->value->speed); } else { monitor_printf(mon, \"Type %s, device %s: Completed %\" PRId64 \" of %\" PRId64 \" bytes, speed limit %\" PRId64 \" bytes/s\\n\", list->value->type, list->value->device, list->value->offset, list->value->len, list->value->speed); } list = list->next; } }"} {"target": 0, "idx": 2157, "func": "static int atrim_filter_frame(AVFilterLink *inlink, AVFrame *frame) { AVFilterContext *ctx = inlink->dst; TrimContext *s = ctx->priv; int64_t start_sample, end_sample = frame->nb_samples; int64_t pts; int drop; /* drop everything if EOF has already been returned */ if (s->eof) { av_frame_free(&frame); return 0; } if (frame->pts != AV_NOPTS_VALUE) pts = av_rescale_q(frame->pts, inlink->time_base, (AVRational){ 1, inlink->sample_rate }); else pts = s->next_pts; s->next_pts = pts + frame->nb_samples; /* check if at least a part of the frame is after the start time */ if (s->start_sample < 0 && s->start_pts == AV_NOPTS_VALUE) { start_sample = 0; } else { drop = 1; start_sample = frame->nb_samples; if (s->start_sample >= 0 && s->nb_samples + frame->nb_samples > s->start_sample) { drop = 0; start_sample = FFMIN(start_sample, s->start_sample - s->nb_samples); } if (s->start_pts != AV_NOPTS_VALUE && pts != AV_NOPTS_VALUE && pts + frame->nb_samples > s->start_pts) { drop = 0; start_sample = FFMIN(start_sample, s->start_pts - pts); } if (drop) goto drop; } if (s->first_pts == AV_NOPTS_VALUE) s->first_pts = pts + start_sample; /* check if at least a part of the frame is before the end time */ if (s->end_sample == INT64_MAX && s->end_pts == AV_NOPTS_VALUE && !s->duration_tb) { end_sample = frame->nb_samples; } else { drop = 1; end_sample = 0; if (s->end_sample != INT64_MAX && s->nb_samples < s->end_sample) { drop = 0; end_sample = FFMAX(end_sample, s->end_sample - s->nb_samples); } if (s->end_pts != AV_NOPTS_VALUE && pts != AV_NOPTS_VALUE && pts < s->end_pts) { drop = 0; end_sample = FFMAX(end_sample, s->end_pts - pts); } if (s->duration_tb && pts - s->first_pts < s->duration_tb) { drop = 0; end_sample = FFMAX(end_sample, s->first_pts + s->duration_tb - pts); } if (drop) { s->eof = 1; goto drop; } } s->nb_samples += frame->nb_samples; start_sample = FFMAX(0, start_sample); end_sample = FFMIN(frame->nb_samples, end_sample); av_assert0(start_sample < end_sample); if (start_sample) { AVFrame *out = ff_get_audio_buffer(ctx->outputs[0], end_sample - start_sample); if (!out) { av_frame_free(&frame); return AVERROR(ENOMEM); } av_frame_copy_props(out, frame); av_samples_copy(out->extended_data, frame->extended_data, 0, start_sample, out->nb_samples, av_get_channel_layout_nb_channels(frame->channel_layout), frame->format); if (out->pts != AV_NOPTS_VALUE) out->pts += av_rescale_q(start_sample, (AVRational){ 1, out->sample_rate }, inlink->time_base); av_frame_free(&frame); frame = out; } else frame->nb_samples = end_sample; s->got_output = 1; return ff_filter_frame(ctx->outputs[0], frame); drop: s->nb_samples += frame->nb_samples; av_frame_free(&frame); return 0; }"} {"target": 0, "idx": 2182, "func": "static void tcg_reg_alloc_call(TCGContext *s, int nb_oargs, int nb_iargs, const TCGArg * const args, uint16_t dead_args, uint8_t sync_args) { int flags, nb_regs, i; TCGReg reg; TCGArg arg; TCGTemp *ts; intptr_t stack_offset; size_t call_stack_size; tcg_insn_unit *func_addr; int allocate_args; TCGRegSet allocated_regs; func_addr = (tcg_insn_unit *)(intptr_t)args[nb_oargs + nb_iargs]; flags = args[nb_oargs + nb_iargs + 1]; nb_regs = ARRAY_SIZE(tcg_target_call_iarg_regs); if (nb_regs > nb_iargs) { nb_regs = nb_iargs; } /* assign stack slots first */ call_stack_size = (nb_iargs - nb_regs) * sizeof(tcg_target_long); call_stack_size = (call_stack_size + TCG_TARGET_STACK_ALIGN - 1) & ~(TCG_TARGET_STACK_ALIGN - 1); allocate_args = (call_stack_size > TCG_STATIC_CALL_ARGS_SIZE); if (allocate_args) { /* XXX: if more than TCG_STATIC_CALL_ARGS_SIZE is needed, preallocate call stack */ tcg_abort(); } stack_offset = TCG_TARGET_CALL_STACK_OFFSET; for(i = nb_regs; i < nb_iargs; i++) { arg = args[nb_oargs + i]; #ifdef TCG_TARGET_STACK_GROWSUP stack_offset -= sizeof(tcg_target_long); #endif if (arg != TCG_CALL_DUMMY_ARG) { ts = &s->temps[arg]; temp_load(s, ts, tcg_target_available_regs[ts->type], s->reserved_regs); tcg_out_st(s, ts->type, ts->reg, TCG_REG_CALL_STACK, stack_offset); } #ifndef TCG_TARGET_STACK_GROWSUP stack_offset += sizeof(tcg_target_long); #endif } /* assign input registers */ tcg_regset_set(allocated_regs, s->reserved_regs); for(i = 0; i < nb_regs; i++) { arg = args[nb_oargs + i]; if (arg != TCG_CALL_DUMMY_ARG) { ts = &s->temps[arg]; reg = tcg_target_call_iarg_regs[i]; tcg_reg_free(s, reg, allocated_regs); if (ts->val_type == TEMP_VAL_REG) { if (ts->reg != reg) { tcg_out_mov(s, ts->type, reg, ts->reg); } } else { TCGRegSet arg_set; tcg_regset_clear(arg_set); tcg_regset_set_reg(arg_set, reg); temp_load(s, ts, arg_set, allocated_regs); } tcg_regset_set_reg(allocated_regs, reg); } } /* mark dead temporaries and free the associated registers */ for(i = nb_oargs; i < nb_iargs + nb_oargs; i++) { if (IS_DEAD_ARG(i)) { temp_dead(s, &s->temps[args[i]]); } } /* clobber call registers */ for (i = 0; i < TCG_TARGET_NB_REGS; i++) { if (tcg_regset_test_reg(tcg_target_call_clobber_regs, i)) { tcg_reg_free(s, i, allocated_regs); } } /* Save globals if they might be written by the helper, sync them if they might be read. */ if (flags & TCG_CALL_NO_READ_GLOBALS) { /* Nothing to do */ } else if (flags & TCG_CALL_NO_WRITE_GLOBALS) { sync_globals(s, allocated_regs); } else { save_globals(s, allocated_regs); } tcg_out_call(s, func_addr); /* assign output registers and emit moves if needed */ for(i = 0; i < nb_oargs; i++) { arg = args[i]; ts = &s->temps[arg]; reg = tcg_target_call_oarg_regs[i]; assert(s->reg_to_temp[reg] == NULL); if (ts->fixed_reg) { if (ts->reg != reg) { tcg_out_mov(s, ts->type, ts->reg, reg); } } else { if (ts->val_type == TEMP_VAL_REG) { s->reg_to_temp[ts->reg] = NULL; } ts->val_type = TEMP_VAL_REG; ts->reg = reg; ts->mem_coherent = 0; s->reg_to_temp[reg] = ts; if (NEED_SYNC_ARG(i)) { tcg_reg_sync(s, reg, allocated_regs); } if (IS_DEAD_ARG(i)) { temp_dead(s, ts); } } } }"} {"target": 0, "idx": 2187, "func": "static void curl_multi_do(void *arg) { BDRVCURLState *s = (BDRVCURLState *)arg; int running; int r; if (!s->multi) { return; } do { r = curl_multi_socket_all(s->multi, &running); } while(r == CURLM_CALL_MULTI_PERFORM); curl_multi_read(s); }"} {"target": 0, "idx": 2188, "func": "static int ffserver_save_avoption(const char *opt, const char *arg, int type, FFServerConfig *config) { static int hinted = 0; int ret = 0; AVDictionaryEntry *e; const AVOption *o = NULL; const char *option = NULL; const char *codec_name = NULL; char buff[1024]; AVCodecContext *ctx; AVDictionary **dict; enum AVCodecID guessed_codec_id; switch (type) { case AV_OPT_FLAG_VIDEO_PARAM: ctx = config->dummy_vctx; dict = &config->video_opts; guessed_codec_id = config->guessed_video_codec_id != AV_CODEC_ID_NONE ? config->guessed_video_codec_id : AV_CODEC_ID_H264; break; case AV_OPT_FLAG_AUDIO_PARAM: ctx = config->dummy_actx; dict = &config->audio_opts; guessed_codec_id = config->guessed_audio_codec_id != AV_CODEC_ID_NONE ? config->guessed_audio_codec_id : AV_CODEC_ID_AAC; break; default: av_assert0(0); } if (strchr(opt, ':')) { //explicit private option snprintf(buff, sizeof(buff), \"%s\", opt); codec_name = buff; option = strchr(buff, ':'); buff[option - buff] = '\\0'; option++; if ((ret = ffserver_set_codec(ctx, codec_name, config)) < 0) return ret; if (!ctx->codec || !ctx->priv_data) return -1; } else { option = opt; } o = av_opt_find(ctx, option, NULL, type | AV_OPT_FLAG_ENCODING_PARAM, AV_OPT_SEARCH_CHILDREN); if (!o && (!strcmp(option, \"time_base\") || !strcmp(option, \"pixel_format\") || !strcmp(option, \"video_size\") || !strcmp(option, \"codec_tag\"))) o = av_opt_find(ctx, option, NULL, 0, 0); if (!o) { report_config_error(config->filename, config->line_num, AV_LOG_ERROR, &config->errors, \"Option not found: %s\\n\", opt); if (!hinted && ctx->codec_id == AV_CODEC_ID_NONE) { hinted = 1; report_config_error(config->filename, config->line_num, AV_LOG_ERROR, NULL, \"If '%s' is a codec private option, then prefix it with codec name, \" \"for example '%s:%s %s' or define codec earlier.\\n\", opt, avcodec_get_name(guessed_codec_id) ,opt, arg); } } else if ((ret = av_opt_set(ctx, option, arg, AV_OPT_SEARCH_CHILDREN)) < 0) { report_config_error(config->filename, config->line_num, AV_LOG_ERROR, &config->errors, \"Invalid value for option %s (%s): %s\\n\", opt, arg, av_err2str(ret)); } else if ((e = av_dict_get(*dict, option, NULL, 0))) { if ((o->type == AV_OPT_TYPE_FLAGS) && arg && (arg[0] == '+' || arg[0] == '-')) return av_dict_set(dict, option, arg, AV_DICT_APPEND); report_config_error(config->filename, config->line_num, AV_LOG_ERROR, &config->errors, \"Redeclaring value of the option %s, previous value: %s\\n\", opt, e->value); } else if (av_dict_set(dict, option, arg, 0) < 0) { return AVERROR(ENOMEM); } return 0; }"} {"target": 0, "idx": 2190, "func": "static int decode_opc(MoxieCPU *cpu, DisasContext *ctx) { CPUMoxieState *env = &cpu->env; /* Local cache for the instruction opcode. */ int opcode; /* Set the default instruction length. */ int length = 2; if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) { tcg_gen_debug_insn_start(ctx->pc); } /* Examine the 16-bit opcode. */ opcode = ctx->opcode; /* Decode instruction. */ if (opcode & (1 << 15)) { if (opcode & (1 << 14)) { /* This is a Form 3 instruction. */ int inst = (opcode >> 10 & 0xf); #define BRANCH(cond) \\ do { \\ int l1 = gen_new_label(); \\ tcg_gen_brcond_i32(cond, cc_a, cc_b, l1); \\ gen_goto_tb(env, ctx, 1, ctx->pc+2); \\ gen_set_label(l1); \\ gen_goto_tb(env, ctx, 0, extract_branch_offset(opcode) + ctx->pc+2); \\ ctx->bstate = BS_BRANCH; \\ } while (0) switch (inst) { case 0x00: /* beq */ BRANCH(TCG_COND_EQ); break; case 0x01: /* bne */ BRANCH(TCG_COND_NE); break; case 0x02: /* blt */ BRANCH(TCG_COND_LT); break; case 0x03: /* bgt */ BRANCH(TCG_COND_GT); break; case 0x04: /* bltu */ BRANCH(TCG_COND_LTU); break; case 0x05: /* bgtu */ BRANCH(TCG_COND_GTU); break; case 0x06: /* bge */ BRANCH(TCG_COND_GE); break; case 0x07: /* ble */ BRANCH(TCG_COND_LE); break; case 0x08: /* bgeu */ BRANCH(TCG_COND_GEU); break; case 0x09: /* bleu */ BRANCH(TCG_COND_LEU); break; default: { TCGv temp = tcg_temp_new_i32(); tcg_gen_movi_i32(cpu_pc, ctx->pc); tcg_gen_movi_i32(temp, MOXIE_EX_BAD); gen_helper_raise_exception(cpu_env, temp); tcg_temp_free_i32(temp); } break; } } else { /* This is a Form 2 instruction. */ int inst = (opcode >> 12 & 0x3); switch (inst) { case 0x00: /* inc */ { int a = (opcode >> 8) & 0xf; unsigned int v = (opcode & 0xff); tcg_gen_addi_i32(REG(a), REG(a), v); } break; case 0x01: /* dec */ { int a = (opcode >> 8) & 0xf; unsigned int v = (opcode & 0xff); tcg_gen_subi_i32(REG(a), REG(a), v); } break; case 0x02: /* gsr */ { int a = (opcode >> 8) & 0xf; unsigned v = (opcode & 0xff); tcg_gen_ld_i32(REG(a), cpu_env, offsetof(CPUMoxieState, sregs[v])); } break; case 0x03: /* ssr */ { int a = (opcode >> 8) & 0xf; unsigned v = (opcode & 0xff); tcg_gen_st_i32(REG(a), cpu_env, offsetof(CPUMoxieState, sregs[v])); } break; default: { TCGv temp = tcg_temp_new_i32(); tcg_gen_movi_i32(cpu_pc, ctx->pc); tcg_gen_movi_i32(temp, MOXIE_EX_BAD); gen_helper_raise_exception(cpu_env, temp); tcg_temp_free_i32(temp); } break; } } } else { /* This is a Form 1 instruction. */ int inst = opcode >> 8; switch (inst) { case 0x00: /* nop */ break; case 0x01: /* ldi.l (immediate) */ { int reg = (opcode >> 4) & 0xf; int val = cpu_ldl_code(env, ctx->pc+2); tcg_gen_movi_i32(REG(reg), val); length = 6; } break; case 0x02: /* mov (register-to-register) */ { int dest = (opcode >> 4) & 0xf; int src = opcode & 0xf; tcg_gen_mov_i32(REG(dest), REG(src)); } break; case 0x03: /* jsra */ { TCGv t1 = tcg_temp_new_i32(); TCGv t2 = tcg_temp_new_i32(); tcg_gen_movi_i32(t1, ctx->pc + 6); /* Make space for the static chain and return address. */ tcg_gen_subi_i32(t2, REG(1), 8); tcg_gen_mov_i32(REG(1), t2); tcg_gen_qemu_st32(t1, REG(1), ctx->memidx); /* Push the current frame pointer. */ tcg_gen_subi_i32(t2, REG(1), 4); tcg_gen_mov_i32(REG(1), t2); tcg_gen_qemu_st32(REG(0), REG(1), ctx->memidx); /* Set the pc and $fp. */ tcg_gen_mov_i32(REG(0), REG(1)); gen_goto_tb(env, ctx, 0, cpu_ldl_code(env, ctx->pc+2)); tcg_temp_free_i32(t1); tcg_temp_free_i32(t2); ctx->bstate = BS_BRANCH; length = 6; } break; case 0x04: /* ret */ { TCGv t1 = tcg_temp_new_i32(); /* The new $sp is the old $fp. */ tcg_gen_mov_i32(REG(1), REG(0)); /* Pop the frame pointer. */ tcg_gen_qemu_ld32u(REG(0), REG(1), ctx->memidx); tcg_gen_addi_i32(t1, REG(1), 4); tcg_gen_mov_i32(REG(1), t1); /* Pop the return address and skip over the static chain slot. */ tcg_gen_qemu_ld32u(cpu_pc, REG(1), ctx->memidx); tcg_gen_addi_i32(t1, REG(1), 8); tcg_gen_mov_i32(REG(1), t1); tcg_temp_free_i32(t1); /* Jump... */ tcg_gen_exit_tb(0); ctx->bstate = BS_BRANCH; } break; case 0x05: /* add.l */ { int a = (opcode >> 4) & 0xf; int b = opcode & 0xf; tcg_gen_add_i32(REG(a), REG(a), REG(b)); } break; case 0x06: /* push */ { int a = (opcode >> 4) & 0xf; int b = opcode & 0xf; TCGv t1 = tcg_temp_new_i32(); tcg_gen_subi_i32(t1, REG(a), 4); tcg_gen_mov_i32(REG(a), t1); tcg_gen_qemu_st32(REG(b), REG(a), ctx->memidx); tcg_temp_free_i32(t1); } break; case 0x07: /* pop */ { int a = (opcode >> 4) & 0xf; int b = opcode & 0xf; TCGv t1 = tcg_temp_new_i32(); tcg_gen_qemu_ld32u(REG(b), REG(a), ctx->memidx); tcg_gen_addi_i32(t1, REG(a), 4); tcg_gen_mov_i32(REG(a), t1); tcg_temp_free_i32(t1); } break; case 0x08: /* lda.l */ { int reg = (opcode >> 4) & 0xf; TCGv ptr = tcg_temp_new_i32(); tcg_gen_movi_i32(ptr, cpu_ldl_code(env, ctx->pc+2)); tcg_gen_qemu_ld32u(REG(reg), ptr, ctx->memidx); tcg_temp_free_i32(ptr); length = 6; } break; case 0x09: /* sta.l */ { int val = (opcode >> 4) & 0xf; TCGv ptr = tcg_temp_new_i32(); tcg_gen_movi_i32(ptr, cpu_ldl_code(env, ctx->pc+2)); tcg_gen_qemu_st32(REG(val), ptr, ctx->memidx); tcg_temp_free_i32(ptr); length = 6; } break; case 0x0a: /* ld.l (register indirect) */ { int src = opcode & 0xf; int dest = (opcode >> 4) & 0xf; tcg_gen_qemu_ld32u(REG(dest), REG(src), ctx->memidx); } break; case 0x0b: /* st.l */ { int dest = (opcode >> 4) & 0xf; int val = opcode & 0xf; tcg_gen_qemu_st32(REG(val), REG(dest), ctx->memidx); } break; case 0x0c: /* ldo.l */ { int a = (opcode >> 4) & 0xf; int b = opcode & 0xf; TCGv t1 = tcg_temp_new_i32(); TCGv t2 = tcg_temp_new_i32(); tcg_gen_addi_i32(t1, REG(b), cpu_ldl_code(env, ctx->pc+2)); tcg_gen_qemu_ld32u(t2, t1, ctx->memidx); tcg_gen_mov_i32(REG(a), t2); tcg_temp_free_i32(t1); tcg_temp_free_i32(t2); length = 6; } break; case 0x0d: /* sto.l */ { int a = (opcode >> 4) & 0xf; int b = opcode & 0xf; TCGv t1 = tcg_temp_new_i32(); TCGv t2 = tcg_temp_new_i32(); tcg_gen_addi_i32(t1, REG(a), cpu_ldl_code(env, ctx->pc+2)); tcg_gen_qemu_st32(REG(b), t1, ctx->memidx); tcg_temp_free_i32(t1); tcg_temp_free_i32(t2); length = 6; } break; case 0x0e: /* cmp */ { int a = (opcode >> 4) & 0xf; int b = opcode & 0xf; tcg_gen_mov_i32(cc_a, REG(a)); tcg_gen_mov_i32(cc_b, REG(b)); } break; case 0x19: /* jsr */ { int fnreg = (opcode >> 4) & 0xf; /* Load the stack pointer into T0. */ TCGv t1 = tcg_temp_new_i32(); TCGv t2 = tcg_temp_new_i32(); tcg_gen_movi_i32(t1, ctx->pc+2); /* Make space for the static chain and return address. */ tcg_gen_subi_i32(t2, REG(1), 8); tcg_gen_mov_i32(REG(1), t2); tcg_gen_qemu_st32(t1, REG(1), ctx->memidx); /* Push the current frame pointer. */ tcg_gen_subi_i32(t2, REG(1), 4); tcg_gen_mov_i32(REG(1), t2); tcg_gen_qemu_st32(REG(0), REG(1), ctx->memidx); /* Set the pc and $fp. */ tcg_gen_mov_i32(REG(0), REG(1)); tcg_gen_mov_i32(cpu_pc, REG(fnreg)); tcg_temp_free_i32(t1); tcg_temp_free_i32(t2); tcg_gen_exit_tb(0); ctx->bstate = BS_BRANCH; } break; case 0x1a: /* jmpa */ { tcg_gen_movi_i32(cpu_pc, cpu_ldl_code(env, ctx->pc+2)); tcg_gen_exit_tb(0); ctx->bstate = BS_BRANCH; length = 6; } break; case 0x1b: /* ldi.b (immediate) */ { int reg = (opcode >> 4) & 0xf; int val = cpu_ldl_code(env, ctx->pc+2); tcg_gen_movi_i32(REG(reg), val); length = 6; } break; case 0x1c: /* ld.b (register indirect) */ { int src = opcode & 0xf; int dest = (opcode >> 4) & 0xf; tcg_gen_qemu_ld8u(REG(dest), REG(src), ctx->memidx); } break; case 0x1d: /* lda.b */ { int reg = (opcode >> 4) & 0xf; TCGv ptr = tcg_temp_new_i32(); tcg_gen_movi_i32(ptr, cpu_ldl_code(env, ctx->pc+2)); tcg_gen_qemu_ld8u(REG(reg), ptr, ctx->memidx); tcg_temp_free_i32(ptr); length = 6; } break; case 0x1e: /* st.b */ { int dest = (opcode >> 4) & 0xf; int val = opcode & 0xf; tcg_gen_qemu_st8(REG(val), REG(dest), ctx->memidx); } break; case 0x1f: /* sta.b */ { int val = (opcode >> 4) & 0xf; TCGv ptr = tcg_temp_new_i32(); tcg_gen_movi_i32(ptr, cpu_ldl_code(env, ctx->pc+2)); tcg_gen_qemu_st8(REG(val), ptr, ctx->memidx); tcg_temp_free_i32(ptr); length = 6; } break; case 0x20: /* ldi.s (immediate) */ { int reg = (opcode >> 4) & 0xf; int val = cpu_ldl_code(env, ctx->pc+2); tcg_gen_movi_i32(REG(reg), val); length = 6; } break; case 0x21: /* ld.s (register indirect) */ { int src = opcode & 0xf; int dest = (opcode >> 4) & 0xf; tcg_gen_qemu_ld16u(REG(dest), REG(src), ctx->memidx); } break; case 0x22: /* lda.s */ { int reg = (opcode >> 4) & 0xf; TCGv ptr = tcg_temp_new_i32(); tcg_gen_movi_i32(ptr, cpu_ldl_code(env, ctx->pc+2)); tcg_gen_qemu_ld16u(REG(reg), ptr, ctx->memidx); tcg_temp_free_i32(ptr); length = 6; } break; case 0x23: /* st.s */ { int dest = (opcode >> 4) & 0xf; int val = opcode & 0xf; tcg_gen_qemu_st16(REG(val), REG(dest), ctx->memidx); } break; case 0x24: /* sta.s */ { int val = (opcode >> 4) & 0xf; TCGv ptr = tcg_temp_new_i32(); tcg_gen_movi_i32(ptr, cpu_ldl_code(env, ctx->pc+2)); tcg_gen_qemu_st16(REG(val), ptr, ctx->memidx); tcg_temp_free_i32(ptr); length = 6; } break; case 0x25: /* jmp */ { int reg = (opcode >> 4) & 0xf; tcg_gen_mov_i32(cpu_pc, REG(reg)); tcg_gen_exit_tb(0); ctx->bstate = BS_BRANCH; } break; case 0x26: /* and */ { int a = (opcode >> 4) & 0xf; int b = opcode & 0xf; tcg_gen_and_i32(REG(a), REG(a), REG(b)); } break; case 0x27: /* lshr */ { int a = (opcode >> 4) & 0xf; int b = opcode & 0xf; TCGv sv = tcg_temp_new_i32(); tcg_gen_andi_i32(sv, REG(b), 0x1f); tcg_gen_shr_i32(REG(a), REG(a), sv); tcg_temp_free_i32(sv); } break; case 0x28: /* ashl */ { int a = (opcode >> 4) & 0xf; int b = opcode & 0xf; TCGv sv = tcg_temp_new_i32(); tcg_gen_andi_i32(sv, REG(b), 0x1f); tcg_gen_shl_i32(REG(a), REG(a), sv); tcg_temp_free_i32(sv); } break; case 0x29: /* sub.l */ { int a = (opcode >> 4) & 0xf; int b = opcode & 0xf; tcg_gen_sub_i32(REG(a), REG(a), REG(b)); } break; case 0x2a: /* neg */ { int a = (opcode >> 4) & 0xf; int b = opcode & 0xf; tcg_gen_neg_i32(REG(a), REG(b)); } break; case 0x2b: /* or */ { int a = (opcode >> 4) & 0xf; int b = opcode & 0xf; tcg_gen_or_i32(REG(a), REG(a), REG(b)); } break; case 0x2c: /* not */ { int a = (opcode >> 4) & 0xf; int b = opcode & 0xf; tcg_gen_not_i32(REG(a), REG(b)); } break; case 0x2d: /* ashr */ { int a = (opcode >> 4) & 0xf; int b = opcode & 0xf; TCGv sv = tcg_temp_new_i32(); tcg_gen_andi_i32(sv, REG(b), 0x1f); tcg_gen_sar_i32(REG(a), REG(a), sv); tcg_temp_free_i32(sv); } break; case 0x2e: /* xor */ { int a = (opcode >> 4) & 0xf; int b = opcode & 0xf; tcg_gen_xor_i32(REG(a), REG(a), REG(b)); } break; case 0x2f: /* mul.l */ { int a = (opcode >> 4) & 0xf; int b = opcode & 0xf; tcg_gen_mul_i32(REG(a), REG(a), REG(b)); } break; case 0x30: /* swi */ { int val = cpu_ldl_code(env, ctx->pc+2); TCGv temp = tcg_temp_new_i32(); tcg_gen_movi_i32(temp, val); tcg_gen_st_i32(temp, cpu_env, offsetof(CPUMoxieState, sregs[3])); tcg_gen_movi_i32(cpu_pc, ctx->pc); tcg_gen_movi_i32(temp, MOXIE_EX_SWI); gen_helper_raise_exception(cpu_env, temp); tcg_temp_free_i32(temp); length = 6; } break; case 0x31: /* div.l */ { int a = (opcode >> 4) & 0xf; int b = opcode & 0xf; tcg_gen_movi_i32(cpu_pc, ctx->pc); gen_helper_div(REG(a), cpu_env, REG(a), REG(b)); } break; case 0x32: /* udiv.l */ { int a = (opcode >> 4) & 0xf; int b = opcode & 0xf; tcg_gen_movi_i32(cpu_pc, ctx->pc); gen_helper_udiv(REG(a), cpu_env, REG(a), REG(b)); } break; case 0x33: /* mod.l */ { int a = (opcode >> 4) & 0xf; int b = opcode & 0xf; tcg_gen_rem_i32(REG(a), REG(a), REG(b)); } break; case 0x34: /* umod.l */ { int a = (opcode >> 4) & 0xf; int b = opcode & 0xf; tcg_gen_remu_i32(REG(a), REG(a), REG(b)); } break; case 0x35: /* brk */ { TCGv temp = tcg_temp_new_i32(); tcg_gen_movi_i32(cpu_pc, ctx->pc); tcg_gen_movi_i32(temp, MOXIE_EX_BREAK); gen_helper_raise_exception(cpu_env, temp); tcg_temp_free_i32(temp); } break; case 0x36: /* ldo.b */ { int a = (opcode >> 4) & 0xf; int b = opcode & 0xf; TCGv t1 = tcg_temp_new_i32(); TCGv t2 = tcg_temp_new_i32(); tcg_gen_addi_i32(t1, REG(b), cpu_ldl_code(env, ctx->pc+2)); tcg_gen_qemu_ld8u(t2, t1, ctx->memidx); tcg_gen_mov_i32(REG(a), t2); tcg_temp_free_i32(t1); tcg_temp_free_i32(t2); length = 6; } break; case 0x37: /* sto.b */ { int a = (opcode >> 4) & 0xf; int b = opcode & 0xf; TCGv t1 = tcg_temp_new_i32(); TCGv t2 = tcg_temp_new_i32(); tcg_gen_addi_i32(t1, REG(a), cpu_ldl_code(env, ctx->pc+2)); tcg_gen_qemu_st8(REG(b), t1, ctx->memidx); tcg_temp_free_i32(t1); tcg_temp_free_i32(t2); length = 6; } break; case 0x38: /* ldo.s */ { int a = (opcode >> 4) & 0xf; int b = opcode & 0xf; TCGv t1 = tcg_temp_new_i32(); TCGv t2 = tcg_temp_new_i32(); tcg_gen_addi_i32(t1, REG(b), cpu_ldl_code(env, ctx->pc+2)); tcg_gen_qemu_ld16u(t2, t1, ctx->memidx); tcg_gen_mov_i32(REG(a), t2); tcg_temp_free_i32(t1); tcg_temp_free_i32(t2); length = 6; } break; case 0x39: /* sto.s */ { int a = (opcode >> 4) & 0xf; int b = opcode & 0xf; TCGv t1 = tcg_temp_new_i32(); TCGv t2 = tcg_temp_new_i32(); tcg_gen_addi_i32(t1, REG(a), cpu_ldl_code(env, ctx->pc+2)); tcg_gen_qemu_st16(REG(b), t1, ctx->memidx); tcg_temp_free_i32(t1); tcg_temp_free_i32(t2); length = 6; } break; default: { TCGv temp = tcg_temp_new_i32(); tcg_gen_movi_i32(cpu_pc, ctx->pc); tcg_gen_movi_i32(temp, MOXIE_EX_BAD); gen_helper_raise_exception(cpu_env, temp); tcg_temp_free_i32(temp); } break; } } return length; }"} {"target": 0, "idx": 2195, "func": "static void tcg_out_opc(TCGContext *s, int opc, int r, int rm, int x) { int rex; if (opc & P_GS) { tcg_out8(s, 0x65); } if (opc & P_DATA16) { /* We should never be asking for both 16 and 64-bit operation. */ assert((opc & P_REXW) == 0); tcg_out8(s, 0x66); } if (opc & P_ADDR32) { tcg_out8(s, 0x67); } rex = 0; rex |= (opc & P_REXW) ? 0x8 : 0x0; /* REX.W */ rex |= (r & 8) >> 1; /* REX.R */ rex |= (x & 8) >> 2; /* REX.X */ rex |= (rm & 8) >> 3; /* REX.B */ /* P_REXB_{R,RM} indicates that the given register is the low byte. For %[abcd]l we need no REX prefix, but for %{si,di,bp,sp}l we do, as otherwise the encoding indicates %[abcd]h. Note that the values that are ORed in merely indicate that the REX byte must be present; those bits get discarded in output. */ rex |= opc & (r >= 4 ? P_REXB_R : 0); rex |= opc & (rm >= 4 ? P_REXB_RM : 0); if (rex) { tcg_out8(s, (uint8_t)(rex | 0x40)); } if (opc & (P_EXT | P_EXT38)) { tcg_out8(s, 0x0f); if (opc & P_EXT38) { tcg_out8(s, 0x38); } } tcg_out8(s, opc); }"} {"target": 1, "idx": 2213, "func": "qcow2_co_pwritev_compressed(BlockDriverState *bs, uint64_t offset, uint64_t bytes, QEMUIOVector *qiov) { BDRVQcow2State *s = bs->opaque; QEMUIOVector hd_qiov; struct iovec iov; z_stream strm; int ret, out_len; uint8_t *buf, *out_buf; uint64_t cluster_offset; if (bytes == 0) { /* align end of file to a sector boundary to ease reading with sector based I/Os */ cluster_offset = bdrv_getlength(bs->file->bs); return bdrv_truncate(bs->file, cluster_offset, PREALLOC_MODE_OFF, NULL); } buf = qemu_blockalign(bs, s->cluster_size); if (bytes != s->cluster_size) { if (bytes > s->cluster_size || offset + bytes != bs->total_sectors << BDRV_SECTOR_BITS) { qemu_vfree(buf); return -EINVAL; } /* Zero-pad last write if image size is not cluster aligned */ memset(buf + bytes, 0, s->cluster_size - bytes); } qemu_iovec_to_buf(qiov, 0, buf, bytes); out_buf = g_malloc(s->cluster_size); /* best compression, small window, no zlib header */ memset(&strm, 0, sizeof(strm)); ret = deflateInit2(&strm, Z_DEFAULT_COMPRESSION, Z_DEFLATED, -12, 9, Z_DEFAULT_STRATEGY); if (ret != 0) { ret = -EINVAL; goto fail; } strm.avail_in = s->cluster_size; strm.next_in = (uint8_t *)buf; strm.avail_out = s->cluster_size; strm.next_out = out_buf; ret = deflate(&strm, Z_FINISH); if (ret != Z_STREAM_END && ret != Z_OK) { deflateEnd(&strm); ret = -EINVAL; goto fail; } out_len = strm.next_out - out_buf; deflateEnd(&strm); if (ret != Z_STREAM_END || out_len >= s->cluster_size) { /* could not compress: write normal cluster */ ret = qcow2_co_pwritev(bs, offset, bytes, qiov, 0); if (ret < 0) { goto fail; } goto success; } qemu_co_mutex_lock(&s->lock); cluster_offset = qcow2_alloc_compressed_cluster_offset(bs, offset, out_len); if (!cluster_offset) { qemu_co_mutex_unlock(&s->lock); ret = -EIO; goto fail; } cluster_offset &= s->cluster_offset_mask; ret = qcow2_pre_write_overlap_check(bs, 0, cluster_offset, out_len); qemu_co_mutex_unlock(&s->lock); if (ret < 0) { goto fail; } iov = (struct iovec) { .iov_base = out_buf, .iov_len = out_len, }; qemu_iovec_init_external(&hd_qiov, &iov, 1); BLKDBG_EVENT(bs->file, BLKDBG_WRITE_COMPRESSED); ret = bdrv_co_pwritev(bs->file, cluster_offset, out_len, &hd_qiov, 0); if (ret < 0) { goto fail; } success: ret = 0; fail: qemu_vfree(buf); g_free(out_buf); return ret; }"} {"target": 1, "idx": 2229, "func": "static void gen_mfrom(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); #else if (unlikely(ctx->pr)) { gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); return; } gen_helper_602_mfrom(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); #endif }"} {"target": 0, "idx": 2246, "func": "static inline void RENAME(yuv422ptouyvy)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst, long width, long height, long lumStride, long chromStride, long dstStride) { RENAME(yuvPlanartouyvy)(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride, 1); }"} {"target": 0, "idx": 2265, "func": "static gboolean ga_channel_open(GAChannel *c, const gchar *path, GAChannelMethod method, int fd) { int ret; c->method = method; switch (c->method) { case GA_CHANNEL_VIRTIO_SERIAL: { assert(fd < 0); fd = qemu_open(path, O_RDWR | O_NONBLOCK #ifndef CONFIG_SOLARIS | O_ASYNC #endif ); if (fd == -1) { g_critical(\"error opening channel: %s\", strerror(errno)); return false; } #ifdef CONFIG_SOLARIS ret = ioctl(fd, I_SETSIG, S_OUTPUT | S_INPUT | S_HIPRI); if (ret == -1) { g_critical(\"error setting event mask for channel: %s\", strerror(errno)); close(fd); return false; } #endif ret = ga_channel_client_add(c, fd); if (ret) { g_critical(\"error adding channel to main loop\"); close(fd); return false; } break; } case GA_CHANNEL_ISA_SERIAL: { struct termios tio; assert(fd < 0); fd = qemu_open(path, O_RDWR | O_NOCTTY | O_NONBLOCK); if (fd == -1) { g_critical(\"error opening channel: %s\", strerror(errno)); return false; } tcgetattr(fd, &tio); /* set up serial port for non-canonical, dumb byte streaming */ tio.c_iflag &= ~(IGNBRK | BRKINT | IGNPAR | PARMRK | INPCK | ISTRIP | INLCR | IGNCR | ICRNL | IXON | IXOFF | IXANY | IMAXBEL); tio.c_oflag = 0; tio.c_lflag = 0; tio.c_cflag |= GA_CHANNEL_BAUDRATE_DEFAULT; /* 1 available byte min or reads will block (we'll set non-blocking * elsewhere, else we have to deal with read()=0 instead) */ tio.c_cc[VMIN] = 1; tio.c_cc[VTIME] = 0; /* flush everything waiting for read/xmit, it's garbage at this point */ tcflush(fd, TCIFLUSH); tcsetattr(fd, TCSANOW, &tio); ret = ga_channel_client_add(c, fd); if (ret) { g_critical(\"error adding channel to main loop\"); close(fd); return false; } break; } case GA_CHANNEL_UNIX_LISTEN: { if (fd < 0) { Error *local_err = NULL; fd = unix_listen(path, NULL, strlen(path), &local_err); if (local_err != NULL) { g_critical(\"%s\", error_get_pretty(local_err)); error_free(local_err); return false; } } ga_channel_listen_add(c, fd, true); break; } case GA_CHANNEL_VSOCK_LISTEN: { if (fd < 0) { Error *local_err = NULL; SocketAddress *addr; char *addr_str; addr_str = g_strdup_printf(\"vsock:%s\", path); addr = socket_parse(addr_str, &local_err); g_free(addr_str); if (local_err != NULL) { g_critical(\"%s\", error_get_pretty(local_err)); error_free(local_err); return false; } fd = socket_listen(addr, &local_err); qapi_free_SocketAddress(addr); if (local_err != NULL) { g_critical(\"%s\", error_get_pretty(local_err)); error_free(local_err); return false; } } ga_channel_listen_add(c, fd, true); break; } default: g_critical(\"error binding/listening to specified socket\"); return false; } return true; }"} {"target": 0, "idx": 2280, "func": "static int ram_save_block(QEMUFile *f) { RAMBlock *block = last_block; ram_addr_t offset = last_offset; int bytes_sent = -1; MemoryRegion *mr; if (!block) block = QLIST_FIRST(&ram_list.blocks); do { mr = block->mr; if (memory_region_get_dirty(mr, offset, TARGET_PAGE_SIZE, DIRTY_MEMORY_MIGRATION)) { uint8_t *p; int cont = (block == last_block) ? RAM_SAVE_FLAG_CONTINUE : 0; memory_region_reset_dirty(mr, offset, TARGET_PAGE_SIZE, DIRTY_MEMORY_MIGRATION); p = memory_region_get_ram_ptr(mr) + offset; if (is_dup_page(p)) { save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_COMPRESS); qemu_put_byte(f, *p); bytes_sent = 1; } else { save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_PAGE); qemu_put_buffer(f, p, TARGET_PAGE_SIZE); bytes_sent = TARGET_PAGE_SIZE; } break; } offset += TARGET_PAGE_SIZE; if (offset >= block->length) { offset = 0; block = QLIST_NEXT(block, next); if (!block) block = QLIST_FIRST(&ram_list.blocks); } } while (block != last_block || offset != last_offset); last_block = block; last_offset = offset; return bytes_sent; }"} {"target": 0, "idx": 2297, "func": "bool net_tx_pkt_add_raw_fragment(struct NetTxPkt *pkt, hwaddr pa, size_t len) { hwaddr mapped_len = 0; struct iovec *ventry; assert(pkt); assert(pkt->max_raw_frags > pkt->raw_frags); if (!len) { return true; } ventry = &pkt->raw[pkt->raw_frags]; mapped_len = len; ventry->iov_base = cpu_physical_memory_map(pa, &mapped_len, false); ventry->iov_len = mapped_len; pkt->raw_frags += !!ventry->iov_base; if ((ventry->iov_base == NULL) || (len != mapped_len)) { return false; } return true; }"} {"target": 0, "idx": 2298, "func": "yuv2422_2_c_template(SwsContext *c, const uint16_t *buf0, const uint16_t *buf1, const uint16_t *ubuf0, const uint16_t *ubuf1, const uint16_t *vbuf0, const uint16_t *vbuf1, const uint16_t *abuf0, const uint16_t *abuf1, uint8_t *dest, int dstW, int yalpha, int uvalpha, int y, enum PixelFormat target) { int yalpha1 = 4095 - yalpha; int uvalpha1 = 4095 - uvalpha; int i; for (i = 0; i < (dstW >> 1); i++) { int Y1 = (buf0[i * 2] * yalpha1 + buf1[i * 2] * yalpha) >> 19; int Y2 = (buf0[i * 2 + 1] * yalpha1 + buf1[i * 2 + 1] * yalpha) >> 19; int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha) >> 19; int V = (vbuf0[i] * uvalpha1 + vbuf1[i] * uvalpha) >> 19; output_pixels(i * 4, Y1, U, Y2, V); } }"} {"target": 0, "idx": 2300, "func": "static av_always_inline void emulated_edge_mc(uint8_t *buf, const uint8_t *src, int linesize, int block_w, int block_h, int src_x, int src_y, int w, int h, emu_edge_core_func *core_fn) { int start_y, start_x, end_y, end_x, src_y_add = 0; if (src_y >= h) { src_y_add = h - 1 - src_y; src_y = h - 1; } else if (src_y <= -block_h) { src_y_add = 1 - block_h - src_y; src_y = 1 - block_h; } if (src_x >= w) { src += w - 1 - src_x; src_x = w - 1; } else if (src_x <= -block_w) { src += 1 - block_w - src_x; src_x = 1 - block_w; } start_y = FFMAX(0, -src_y); start_x = FFMAX(0, -src_x); end_y = FFMIN(block_h, h-src_y); end_x = FFMIN(block_w, w-src_x); av_assert2(start_x < end_x && block_w > 0); av_assert2(start_y < end_y && block_h > 0); // fill in the to-be-copied part plus all above/below src += (src_y_add + start_y) * linesize + start_x; buf += start_x; core_fn(buf, src, linesize, start_y, end_y, block_h, start_x, end_x, block_w); }"} {"target": 1, "idx": 2325, "func": "static av_cold int init_bundles(BinkContext *c) { int bw, bh, blocks; int i; bw = (c->avctx->width + 7) >> 3; bh = (c->avctx->height + 7) >> 3; blocks = bw * bh; for (i = 0; i < BINKB_NB_SRC; i++) { c->bundle[i].data = av_malloc(blocks * 64); if (!c->bundle[i].data) return AVERROR(ENOMEM); c->bundle[i].data_end = c->bundle[i].data + blocks * 64; } return 0; }"} {"target": 1, "idx": 2328, "func": "static void quorum_vote(QuorumAIOCB *acb) { bool quorum = true; int i, j, ret; QuorumVoteValue hash; BDRVQuorumState *s = acb->common.bs->opaque; QuorumVoteVersion *winner; if (quorum_has_too_much_io_failed(acb)) { return; } /* get the index of the first successful read */ for (i = 0; i < s->num_children; i++) { if (!acb->qcrs[i].ret) { break; } } assert(i < s->num_children); /* compare this read with all other successful reads stopping at quorum * failure */ for (j = i + 1; j < s->num_children; j++) { if (acb->qcrs[j].ret) { continue; } quorum = quorum_compare(acb, &acb->qcrs[i].qiov, &acb->qcrs[j].qiov); if (!quorum) { break; } } /* Every successful read agrees */ if (quorum) { quorum_copy_qiov(acb->qiov, &acb->qcrs[i].qiov); return; } /* compute hashes for each successful read, also store indexes */ for (i = 0; i < s->num_children; i++) { if (acb->qcrs[i].ret) { continue; } ret = quorum_compute_hash(acb, i, &hash); /* if ever the hash computation failed */ if (ret < 0) { acb->vote_ret = ret; goto free_exit; } quorum_count_vote(&acb->votes, &hash, i); } /* vote to select the most represented version */ winner = quorum_get_vote_winner(&acb->votes); /* if the winner count is smaller than threshold the read fails */ if (winner->vote_count < s->threshold) { quorum_report_failure(acb); acb->vote_ret = -EIO; goto free_exit; } /* we have a winner: copy it */ quorum_copy_qiov(acb->qiov, &acb->qcrs[winner->index].qiov); /* some versions are bad print them */ quorum_report_bad_versions(s, acb, &winner->value); free_exit: /* free lists */ quorum_free_vote_list(&acb->votes); }"} {"target": 1, "idx": 2329, "func": "static void stellaris_init(const char *kernel_filename, const char *cpu_model, stellaris_board_info *board) { static const int uart_irq[] = {5, 6, 33, 34}; static const int timer_irq[] = {19, 21, 23, 35}; static const uint32_t gpio_addr[7] = { 0x40004000, 0x40005000, 0x40006000, 0x40007000, 0x40024000, 0x40025000, 0x40026000}; static const int gpio_irq[7] = {0, 1, 2, 3, 4, 30, 31}; qemu_irq *pic; DeviceState *gpio_dev[7]; qemu_irq gpio_in[7][8]; qemu_irq gpio_out[7][8]; qemu_irq adc; int sram_size; int flash_size; I2CBus *i2c; DeviceState *dev; int i; int j; MemoryRegion *sram = g_new(MemoryRegion, 1); MemoryRegion *flash = g_new(MemoryRegion, 1); MemoryRegion *system_memory = get_system_memory(); flash_size = (((board->dc0 & 0xffff) + 1) << 1) * 1024; sram_size = ((board->dc0 >> 18) + 1) * 1024; /* Flash programming is done via the SCU, so pretend it is ROM. */ memory_region_init_ram(flash, NULL, \"stellaris.flash\", flash_size, &error_abort); vmstate_register_ram_global(flash); memory_region_set_readonly(flash, true); memory_region_add_subregion(system_memory, 0, flash); memory_region_init_ram(sram, NULL, \"stellaris.sram\", sram_size, &error_abort); vmstate_register_ram_global(sram); memory_region_add_subregion(system_memory, 0x20000000, sram); pic = armv7m_init(system_memory, flash_size, NUM_IRQ_LINES, kernel_filename, cpu_model); if (board->dc1 & (1 << 16)) { dev = sysbus_create_varargs(TYPE_STELLARIS_ADC, 0x40038000, pic[14], pic[15], pic[16], pic[17], NULL); adc = qdev_get_gpio_in(dev, 0); } else { adc = NULL; } for (i = 0; i < 4; i++) { if (board->dc2 & (0x10000 << i)) { dev = sysbus_create_simple(TYPE_STELLARIS_GPTM, 0x40030000 + i * 0x1000, pic[timer_irq[i]]); /* TODO: This is incorrect, but we get away with it because the ADC output is only ever pulsed. */ qdev_connect_gpio_out(dev, 0, adc); } } stellaris_sys_init(0x400fe000, pic[28], board, nd_table[0].macaddr.a); for (i = 0; i < 7; i++) { if (board->dc4 & (1 << i)) { gpio_dev[i] = sysbus_create_simple(\"pl061_luminary\", gpio_addr[i], pic[gpio_irq[i]]); for (j = 0; j < 8; j++) { gpio_in[i][j] = qdev_get_gpio_in(gpio_dev[i], j); gpio_out[i][j] = NULL; } } } if (board->dc2 & (1 << 12)) { dev = sysbus_create_simple(TYPE_STELLARIS_I2C, 0x40020000, pic[8]); i2c = (I2CBus *)qdev_get_child_bus(dev, \"i2c\"); if (board->peripherals & BP_OLED_I2C) { i2c_create_slave(i2c, \"ssd0303\", 0x3d); } } for (i = 0; i < 4; i++) { if (board->dc2 & (1 << i)) { sysbus_create_simple(\"pl011_luminary\", 0x4000c000 + i * 0x1000, pic[uart_irq[i]]); } } if (board->dc2 & (1 << 4)) { dev = sysbus_create_simple(\"pl022\", 0x40008000, pic[7]); if (board->peripherals & BP_OLED_SSI) { void *bus; DeviceState *sddev; DeviceState *ssddev; /* Some boards have both an OLED controller and SD card connected to * the same SSI port, with the SD card chip select connected to a * GPIO pin. Technically the OLED chip select is connected to the * SSI Fss pin. We do not bother emulating that as both devices * should never be selected simultaneously, and our OLED controller * ignores stray 0xff commands that occur when deselecting the SD * card. */ bus = qdev_get_child_bus(dev, \"ssi\"); sddev = ssi_create_slave(bus, \"ssi-sd\"); ssddev = ssi_create_slave(bus, \"ssd0323\"); gpio_out[GPIO_D][0] = qemu_irq_split( qdev_get_gpio_in_named(sddev, SSI_GPIO_CS, 0), qdev_get_gpio_in_named(ssddev, SSI_GPIO_CS, 0)); gpio_out[GPIO_C][7] = qdev_get_gpio_in(ssddev, 0); /* Make sure the select pin is high. */ qemu_irq_raise(gpio_out[GPIO_D][0]); } } if (board->dc4 & (1 << 28)) { DeviceState *enet; qemu_check_nic_model(&nd_table[0], \"stellaris\"); enet = qdev_create(NULL, \"stellaris_enet\"); qdev_set_nic_properties(enet, &nd_table[0]); qdev_init_nofail(enet); sysbus_mmio_map(SYS_BUS_DEVICE(enet), 0, 0x40048000); sysbus_connect_irq(SYS_BUS_DEVICE(enet), 0, pic[42]); } if (board->peripherals & BP_GAMEPAD) { qemu_irq gpad_irq[5]; static const int gpad_keycode[5] = { 0xc8, 0xd0, 0xcb, 0xcd, 0x1d }; gpad_irq[0] = qemu_irq_invert(gpio_in[GPIO_E][0]); /* up */ gpad_irq[1] = qemu_irq_invert(gpio_in[GPIO_E][1]); /* down */ gpad_irq[2] = qemu_irq_invert(gpio_in[GPIO_E][2]); /* left */ gpad_irq[3] = qemu_irq_invert(gpio_in[GPIO_E][3]); /* right */ gpad_irq[4] = qemu_irq_invert(gpio_in[GPIO_F][1]); /* select */ stellaris_gamepad_init(5, gpad_irq, gpad_keycode); } for (i = 0; i < 7; i++) { if (board->dc4 & (1 << i)) { for (j = 0; j < 8; j++) { if (gpio_out[i][j]) { qdev_connect_gpio_out(gpio_dev[i], j, gpio_out[i][j]); } } } } }"} {"target": 1, "idx": 2334, "func": "void cpu_exec_init(CPUState *env) { CPUState **penv; int cpu_index; if (!code_gen_ptr) { code_gen_ptr = code_gen_buffer; page_init(); io_mem_init(); } env->next_cpu = NULL; penv = &first_cpu; cpu_index = 0; while (*penv != NULL) { penv = (CPUState **)&(*penv)->next_cpu; cpu_index++; } env->cpu_index = cpu_index; *penv = env; }"} {"target": 1, "idx": 2348, "func": "static inline void RENAME(yuv422ptoyuy2)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst, long width, long height, long lumStride, long chromStride, long dstStride) { RENAME(yuvPlanartoyuy2)(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride, 1); }"} {"target": 1, "idx": 2349, "func": "static inline bool handler_is_async(const mon_cmd_t *cmd) { return cmd->flags & MONITOR_CMD_ASYNC; }"} {"target": 1, "idx": 2358, "func": "static int vqa_decode_init(AVCodecContext *avctx) { VqaContext *s = (VqaContext *)avctx->priv_data; unsigned char *vqa_header; int i, j, codebook_index;; s->avctx = avctx; avctx->pix_fmt = PIX_FMT_PAL8; avctx->has_b_frames = 0; dsputil_init(&s->dsp, avctx); /* make sure the extradata made it */ if (s->avctx->extradata_size != VQA_HEADER_SIZE) { av_log(s->avctx, AV_LOG_ERROR, \" VQA video: expected extradata size of %d\\n\", VQA_HEADER_SIZE); /* load up the VQA parameters from the header */ vqa_header = (unsigned char *)s->avctx->extradata; s->vqa_version = vqa_header[0]; s->width = LE_16(&vqa_header[6]); s->height = LE_16(&vqa_header[8]); s->vector_width = vqa_header[10]; s->vector_height = vqa_header[11]; s->partial_count = s->partial_countdown = vqa_header[13]; /* the vector dimensions have to meet very stringent requirements */ if ((s->vector_width != 4) || ((s->vector_height != 2) && (s->vector_height != 4))) { /* return without further initialization */ /* allocate codebooks */ s->codebook_size = MAX_CODEBOOK_SIZE; s->codebook = av_malloc(s->codebook_size); s->next_codebook_buffer = av_malloc(s->codebook_size); /* initialize the solid-color vectors */ if (s->vector_height == 4) { codebook_index = 0xFF00 * 16; for (i = 0; i < 256; i++) for (j = 0; j < 16; j++) s->codebook[codebook_index++] = i; } else { codebook_index = 0xF00 * 8; for (i = 0; i < 256; i++) for (j = 0; j < 8; j++) s->codebook[codebook_index++] = i; s->next_codebook_buffer_index = 0; /* allocate decode buffer */ s->decode_buffer_size = (s->width / s->vector_width) * (s->height / s->vector_height) * 2; s->decode_buffer = av_malloc(s->decode_buffer_size); s->frame.data[0] = NULL; return 0;"} {"target": 0, "idx": 2365, "func": "static int open_output_file(OptionsContext *o, const char *filename) { AVFormatContext *oc; int i, j, err; AVOutputFormat *file_oformat; OutputFile *of; OutputStream *ost; InputStream *ist; AVDictionary *unused_opts = NULL; AVDictionaryEntry *e = NULL; if (configure_complex_filters() < 0) { av_log(NULL, AV_LOG_FATAL, \"Error configuring filters.\\n\"); exit_program(1); } if (o->stop_time != INT64_MAX && o->recording_time != INT64_MAX) { o->stop_time = INT64_MAX; av_log(NULL, AV_LOG_WARNING, \"-t and -to cannot be used together; using -t.\\n\"); } if (o->stop_time != INT64_MAX && o->recording_time == INT64_MAX) { int64_t start_time = o->start_time == AV_NOPTS_VALUE ? 0 : o->start_time; if (o->stop_time <= start_time) { av_log(NULL, AV_LOG_WARNING, \"-to value smaller than -ss; ignoring -to.\\n\"); o->stop_time = INT64_MAX; } else { o->recording_time = o->stop_time - start_time; } } GROW_ARRAY(output_files, nb_output_files); of = av_mallocz(sizeof(*of)); if (!of) exit_program(1); output_files[nb_output_files - 1] = of; of->ost_index = nb_output_streams; of->recording_time = o->recording_time; of->start_time = o->start_time; of->limit_filesize = o->limit_filesize; of->shortest = o->shortest; av_dict_copy(&of->opts, o->g->format_opts, 0); if (!strcmp(filename, \"-\")) filename = \"pipe:\"; err = avformat_alloc_output_context2(&oc, NULL, o->format, filename); if (!oc) { print_error(filename, err); exit_program(1); } of->ctx = oc; if (o->recording_time != INT64_MAX) oc->duration = o->recording_time; file_oformat= oc->oformat; oc->interrupt_callback = int_cb; /* create streams for all unlabeled output pads */ for (i = 0; i < nb_filtergraphs; i++) { FilterGraph *fg = filtergraphs[i]; for (j = 0; j < fg->nb_outputs; j++) { OutputFilter *ofilter = fg->outputs[j]; if (!ofilter->out_tmp || ofilter->out_tmp->name) continue; switch (avfilter_pad_get_type(ofilter->out_tmp->filter_ctx->output_pads, ofilter->out_tmp->pad_idx)) { case AVMEDIA_TYPE_VIDEO: o->video_disable = 1; break; case AVMEDIA_TYPE_AUDIO: o->audio_disable = 1; break; case AVMEDIA_TYPE_SUBTITLE: o->subtitle_disable = 1; break; } init_output_filter(ofilter, o, oc); } } /* ffserver seeking with date=... needs a date reference */ if (!strcmp(file_oformat->name, \"ffm\") && av_strstart(filename, \"http:\", NULL)) { int err = parse_option(o, \"metadata\", \"creation_time=now\", options); if (err < 0) { print_error(filename, err); exit_program(1); } } if (!strcmp(file_oformat->name, \"ffm\") && !override_ffserver && av_strstart(filename, \"http:\", NULL)) { int j; /* special case for files sent to ffserver: we get the stream parameters from ffserver */ int err = read_ffserver_streams(o, oc, filename); if (err < 0) { print_error(filename, err); exit_program(1); } for(j = nb_output_streams - oc->nb_streams; j < nb_output_streams; j++) { ost = output_streams[j]; for (i = 0; i < nb_input_streams; i++) { ist = input_streams[i]; if(ist->st->codec->codec_type == ost->st->codec->codec_type){ ost->sync_ist= ist; ost->source_index= i; if(ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO) ost->avfilter = av_strdup(\"anull\"); if(ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) ost->avfilter = av_strdup(\"null\"); ist->discard = 0; ist->st->discard = AVDISCARD_NONE; break; } } if(!ost->sync_ist){ av_log(NULL, AV_LOG_FATAL, \"Missing %s stream which is required by this ffm\\n\", av_get_media_type_string(ost->st->codec->codec_type)); exit_program(1); } } } else if (!o->nb_stream_maps) { char *subtitle_codec_name = NULL; /* pick the \"best\" stream of each type */ /* video: highest resolution */ if (!o->video_disable && oc->oformat->video_codec != AV_CODEC_ID_NONE) { int area = 0, idx = -1; int qcr = avformat_query_codec(oc->oformat, oc->oformat->video_codec, 0); for (i = 0; i < nb_input_streams; i++) { int new_area; ist = input_streams[i]; new_area = ist->st->codec->width * ist->st->codec->height; if((qcr!=MKTAG('A', 'P', 'I', 'C')) && (ist->st->disposition & AV_DISPOSITION_ATTACHED_PIC)) new_area = 1; if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && new_area > area) { if((qcr==MKTAG('A', 'P', 'I', 'C')) && !(ist->st->disposition & AV_DISPOSITION_ATTACHED_PIC)) continue; area = new_area; idx = i; } } if (idx >= 0) new_video_stream(o, oc, idx); } /* audio: most channels */ if (!o->audio_disable && oc->oformat->audio_codec != AV_CODEC_ID_NONE) { int channels = 0, idx = -1; for (i = 0; i < nb_input_streams; i++) { ist = input_streams[i]; if (ist->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && ist->st->codec->channels > channels) { channels = ist->st->codec->channels; idx = i; } } if (idx >= 0) new_audio_stream(o, oc, idx); } /* subtitles: pick first */ MATCH_PER_TYPE_OPT(codec_names, str, subtitle_codec_name, oc, \"s\"); if (!o->subtitle_disable && (oc->oformat->subtitle_codec != AV_CODEC_ID_NONE || subtitle_codec_name)) { for (i = 0; i < nb_input_streams; i++) if (input_streams[i]->st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) { new_subtitle_stream(o, oc, i); break; } } /* do something with data? */ } else { for (i = 0; i < o->nb_stream_maps; i++) { StreamMap *map = &o->stream_maps[i]; if (map->disabled) continue; if (map->linklabel) { FilterGraph *fg; OutputFilter *ofilter = NULL; int j, k; for (j = 0; j < nb_filtergraphs; j++) { fg = filtergraphs[j]; for (k = 0; k < fg->nb_outputs; k++) { AVFilterInOut *out = fg->outputs[k]->out_tmp; if (out && !strcmp(out->name, map->linklabel)) { ofilter = fg->outputs[k]; goto loop_end; } } } loop_end: if (!ofilter) { av_log(NULL, AV_LOG_FATAL, \"Output with label '%s' does not exist \" \"in any defined filter graph, or was already used elsewhere.\\n\", map->linklabel); exit_program(1); } init_output_filter(ofilter, o, oc); } else { int src_idx = input_files[map->file_index]->ist_index + map->stream_index; ist = input_streams[input_files[map->file_index]->ist_index + map->stream_index]; if(o->subtitle_disable && ist->st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) continue; if(o-> audio_disable && ist->st->codec->codec_type == AVMEDIA_TYPE_AUDIO) continue; if(o-> video_disable && ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) continue; if(o-> data_disable && ist->st->codec->codec_type == AVMEDIA_TYPE_DATA) continue; switch (ist->st->codec->codec_type) { case AVMEDIA_TYPE_VIDEO: ost = new_video_stream (o, oc, src_idx); break; case AVMEDIA_TYPE_AUDIO: ost = new_audio_stream (o, oc, src_idx); break; case AVMEDIA_TYPE_SUBTITLE: ost = new_subtitle_stream (o, oc, src_idx); break; case AVMEDIA_TYPE_DATA: ost = new_data_stream (o, oc, src_idx); break; case AVMEDIA_TYPE_ATTACHMENT: ost = new_attachment_stream(o, oc, src_idx); break; default: av_log(NULL, AV_LOG_FATAL, \"Cannot map stream #%d:%d - unsupported type.\\n\", map->file_index, map->stream_index); exit_program(1); } } } } /* handle attached files */ for (i = 0; i < o->nb_attachments; i++) { AVIOContext *pb; uint8_t *attachment; const char *p; int64_t len; if ((err = avio_open2(&pb, o->attachments[i], AVIO_FLAG_READ, &int_cb, NULL)) < 0) { av_log(NULL, AV_LOG_FATAL, \"Could not open attachment file %s.\\n\", o->attachments[i]); exit_program(1); } if ((len = avio_size(pb)) <= 0) { av_log(NULL, AV_LOG_FATAL, \"Could not get size of the attachment %s.\\n\", o->attachments[i]); exit_program(1); } if (!(attachment = av_malloc(len))) { av_log(NULL, AV_LOG_FATAL, \"Attachment %s too large to fit into memory.\\n\", o->attachments[i]); exit_program(1); } avio_read(pb, attachment, len); ost = new_attachment_stream(o, oc, -1); ost->stream_copy = 0; ost->attachment_filename = o->attachments[i]; ost->finished = 1; ost->st->codec->extradata = attachment; ost->st->codec->extradata_size = len; p = strrchr(o->attachments[i], '/'); av_dict_set(&ost->st->metadata, \"filename\", (p && *p) ? p + 1 : o->attachments[i], AV_DICT_DONT_OVERWRITE); avio_close(pb); } for (i = nb_output_streams - oc->nb_streams; i < nb_output_streams; i++) { //for all streams of this output file AVDictionaryEntry *e; ost = output_streams[i]; if ((ost->stream_copy || ost->attachment_filename) && (e = av_dict_get(o->g->codec_opts, \"flags\", NULL, AV_DICT_IGNORE_SUFFIX)) && (!e->key[5] || check_stream_specifier(oc, ost->st, e->key+6))) if (av_opt_set(ost->st->codec, \"flags\", e->value, 0) < 0) exit_program(1); } /* check if all codec options have been used */ unused_opts = strip_specifiers(o->g->codec_opts); for (i = of->ost_index; i < nb_output_streams; i++) { e = NULL; while ((e = av_dict_get(output_streams[i]->opts, \"\", e, AV_DICT_IGNORE_SUFFIX))) av_dict_set(&unused_opts, e->key, NULL, 0); } e = NULL; while ((e = av_dict_get(unused_opts, \"\", e, AV_DICT_IGNORE_SUFFIX))) { const AVClass *class = avcodec_get_class(); const AVOption *option = av_opt_find(&class, e->key, NULL, 0, AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ); if (!option) continue; if (!(option->flags & AV_OPT_FLAG_ENCODING_PARAM)) { av_log(NULL, AV_LOG_ERROR, \"Codec AVOption %s (%s) specified for \" \"output file #%d (%s) is not an encoding option.\\n\", e->key, option->help ? option->help : \"\", nb_output_files - 1, filename); exit_program(1); } // gop_timecode is injected by generic code but not always used if (!strcmp(e->key, \"gop_timecode\")) continue; av_log(NULL, AV_LOG_WARNING, \"Codec AVOption %s (%s) specified for \" \"output file #%d (%s) has not been used for any stream. The most \" \"likely reason is either wrong type (e.g. a video option with \" \"no video streams) or that it is a private option of some encoder \" \"which was not actually used for any stream.\\n\", e->key, option->help ? option->help : \"\", nb_output_files - 1, filename); } av_dict_free(&unused_opts); /* check filename in case of an image number is expected */ if (oc->oformat->flags & AVFMT_NEEDNUMBER) { if (!av_filename_number_test(oc->filename)) { print_error(oc->filename, AVERROR(EINVAL)); exit_program(1); } } if (!(oc->oformat->flags & AVFMT_NOFILE)) { /* test if it already exists to avoid losing precious files */ assert_file_overwrite(filename); /* open the file */ if ((err = avio_open2(&oc->pb, filename, AVIO_FLAG_WRITE, &oc->interrupt_callback, &of->opts)) < 0) { print_error(filename, err); exit_program(1); } } else if (strcmp(oc->oformat->name, \"image2\")==0 && !av_filename_number_test(filename)) assert_file_overwrite(filename); if (o->mux_preload) { uint8_t buf[64]; snprintf(buf, sizeof(buf), \"%d\", (int)(o->mux_preload*AV_TIME_BASE)); av_dict_set(&of->opts, \"preload\", buf, 0); } oc->max_delay = (int)(o->mux_max_delay * AV_TIME_BASE); /* copy metadata */ for (i = 0; i < o->nb_metadata_map; i++) { char *p; int in_file_index = strtol(o->metadata_map[i].u.str, &p, 0); if (in_file_index >= nb_input_files) { av_log(NULL, AV_LOG_FATAL, \"Invalid input file index %d while processing metadata maps\\n\", in_file_index); exit_program(1); } copy_metadata(o->metadata_map[i].specifier, *p ? p + 1 : p, oc, in_file_index >= 0 ? input_files[in_file_index]->ctx : NULL, o); } /* copy chapters */ if (o->chapters_input_file >= nb_input_files) { if (o->chapters_input_file == INT_MAX) { /* copy chapters from the first input file that has them*/ o->chapters_input_file = -1; for (i = 0; i < nb_input_files; i++) if (input_files[i]->ctx->nb_chapters) { o->chapters_input_file = i; break; } } else { av_log(NULL, AV_LOG_FATAL, \"Invalid input file index %d in chapter mapping.\\n\", o->chapters_input_file); exit_program(1); } } if (o->chapters_input_file >= 0) copy_chapters(input_files[o->chapters_input_file], of, !o->metadata_chapters_manual); /* copy global metadata by default */ if (!o->metadata_global_manual && nb_input_files){ av_dict_copy(&oc->metadata, input_files[0]->ctx->metadata, AV_DICT_DONT_OVERWRITE); if(o->recording_time != INT64_MAX) av_dict_set(&oc->metadata, \"duration\", NULL, 0); av_dict_set(&oc->metadata, \"creation_time\", NULL, 0); } if (!o->metadata_streams_manual) for (i = of->ost_index; i < nb_output_streams; i++) { InputStream *ist; if (output_streams[i]->source_index < 0) /* this is true e.g. for attached files */ continue; ist = input_streams[output_streams[i]->source_index]; av_dict_copy(&output_streams[i]->st->metadata, ist->st->metadata, AV_DICT_DONT_OVERWRITE); } /* process manually set metadata */ for (i = 0; i < o->nb_metadata; i++) { AVDictionary **m; char type, *val; const char *stream_spec; int index = 0, j, ret = 0; val = strchr(o->metadata[i].u.str, '='); if (!val) { av_log(NULL, AV_LOG_FATAL, \"No '=' character in metadata string %s.\\n\", o->metadata[i].u.str); exit_program(1); } *val++ = 0; parse_meta_type(o->metadata[i].specifier, &type, &index, &stream_spec); if (type == 's') { for (j = 0; j < oc->nb_streams; j++) { if ((ret = check_stream_specifier(oc, oc->streams[j], stream_spec)) > 0) { av_dict_set(&oc->streams[j]->metadata, o->metadata[i].u.str, *val ? val : NULL, 0); } else if (ret < 0) exit_program(1); } } else { switch (type) { case 'g': m = &oc->metadata; break; case 'c': if (index < 0 || index >= oc->nb_chapters) { av_log(NULL, AV_LOG_FATAL, \"Invalid chapter index %d in metadata specifier.\\n\", index); exit_program(1); } m = &oc->chapters[index]->metadata; break; default: av_log(NULL, AV_LOG_FATAL, \"Invalid metadata specifier %s.\\n\", o->metadata[i].specifier); exit_program(1); } av_dict_set(m, o->metadata[i].u.str, *val ? val : NULL, 0); } } return 0; }"} {"target": 1, "idx": 2395, "func": "static bool logic_imm_decode_wmask(uint64_t *result, unsigned int immn, unsigned int imms, unsigned int immr) { uint64_t mask; unsigned e, levels, s, r; int len; assert(immn < 2 && imms < 64 && immr < 64); /* The bit patterns we create here are 64 bit patterns which * are vectors of identical elements of size e = 2, 4, 8, 16, 32 or * 64 bits each. Each element contains the same value: a run * of between 1 and e-1 non-zero bits, rotated within the * element by between 0 and e-1 bits. * * The element size and run length are encoded into immn (1 bit) * and imms (6 bits) as follows: * 64 bit elements: immn = 1, imms = * 32 bit elements: immn = 0, imms = 0 : * 16 bit elements: immn = 0, imms = 10 : * 8 bit elements: immn = 0, imms = 110 : * 4 bit elements: immn = 0, imms = 1110 : * 2 bit elements: immn = 0, imms = 11110 : * Notice that immn = 0, imms = 11111x is the only combination * not covered by one of the above options; this is reserved. * Further, all-ones is a reserved pattern. * * In all cases the rotation is by immr % e (and immr is 6 bits). */ /* First determine the element size */ len = 31 - clz32((immn << 6) | (~imms & 0x3f)); if (len < 1) { /* This is the immn == 0, imms == 0x11111x case */ return false; } e = 1 << len; levels = e - 1; s = imms & levels; r = immr & levels; if (s == levels) { /* mustn't be all-ones. */ return false; } /* Create the value of one element: s+1 set bits rotated * by r within the element (which is e bits wide)... */ mask = bitmask64(s + 1); mask = (mask >> r) | (mask << (e - r)); /* ...then replicate the element over the whole 64 bit value */ mask = bitfield_replicate(mask, e); *result = mask; return true; }"} {"target": 1, "idx": 2400, "func": "static int add_candidate_ref(HEVCContext *s, RefPicList *list, int poc, int ref_flag) { HEVCFrame *ref = find_ref_idx(s, poc); if (ref == s->ref) return AVERROR_INVALIDDATA; if (!ref) { ref = generate_missing_ref(s, poc); if (!ref) return AVERROR(ENOMEM); } list->list[list->nb_refs] = ref->poc; list->ref[list->nb_refs] = ref; list->nb_refs++; mark_ref(ref, ref_flag); return 0; }"} {"target": 1, "idx": 2415, "func": "static inline void t_gen_mov_preg_TN(DisasContext *dc, int r, TCGv tn) { if (r < 0 || r > 15) { fprintf(stderr, \"wrong register write $p%d\\n\", r); } if (r == PR_BZ || r == PR_WZ || r == PR_DZ) { return; } else if (r == PR_SRS) { tcg_gen_andi_tl(cpu_PR[r], tn, 3); } else { if (r == PR_PID) { gen_helper_tlb_flush_pid(cpu_env, tn); } if (dc->tb_flags & S_FLAG && r == PR_SPC) { gen_helper_spc_write(cpu_env, tn); } else if (r == PR_CCS) { dc->cpustate_changed = 1; } tcg_gen_mov_tl(cpu_PR[r], tn); } }"} {"target": 1, "idx": 2424, "func": "static int vorbis_parse_id_hdr(vorbis_context *vc){ GetBitContext *gb=&vc->gb; uint_fast8_t bl0, bl1; if ((get_bits(gb, 8)!='v') || (get_bits(gb, 8)!='o') || (get_bits(gb, 8)!='r') || (get_bits(gb, 8)!='b') || (get_bits(gb, 8)!='i') || (get_bits(gb, 8)!='s')) { av_log(vc->avccontext, AV_LOG_ERROR, \" Vorbis id header packet corrupt (no vorbis signature). \\n\"); return 1; } vc->version=get_bits_long(gb, 32); //FIXME check 0 vc->audio_channels=get_bits(gb, 8); //FIXME check >0 vc->audio_samplerate=get_bits_long(gb, 32); //FIXME check >0 vc->bitrate_maximum=get_bits_long(gb, 32); vc->bitrate_nominal=get_bits_long(gb, 32); vc->bitrate_minimum=get_bits_long(gb, 32); bl0=get_bits(gb, 4); bl1=get_bits(gb, 4); vc->blocksize[0]=(1<blocksize[1]=(1<13 || bl0<6 || bl1>13 || bl1<6 || bl1avccontext, AV_LOG_ERROR, \" Vorbis id header packet corrupt (illegal blocksize). \\n\"); return 3; } // output format int16 if (vc->blocksize[1]/2 * vc->audio_channels * 2 > AVCODEC_MAX_AUDIO_FRAME_SIZE) { av_log(vc->avccontext, AV_LOG_ERROR, \"Vorbis channel count makes \" \"output packets too large.\\n\"); return 4; } vc->win[0]=ff_vorbis_vwin[bl0-6]; vc->win[1]=ff_vorbis_vwin[bl1-6]; if(vc->exp_bias){ int i, j; for(j=0; j<2; j++){ float *win = av_malloc(vc->blocksize[j]/2 * sizeof(float)); for(i=0; iblocksize[j]/2; i++) win[i] = vc->win[j][i] * (1<<15); vc->win[j] = win; } } if ((get_bits1(gb)) == 0) { av_log(vc->avccontext, AV_LOG_ERROR, \" Vorbis id header packet corrupt (framing flag not set). \\n\"); return 2; } vc->channel_residues= av_malloc((vc->blocksize[1]/2)*vc->audio_channels * sizeof(float)); vc->channel_floors = av_malloc((vc->blocksize[1]/2)*vc->audio_channels * sizeof(float)); vc->saved = av_mallocz((vc->blocksize[1]/2)*vc->audio_channels * sizeof(float)); vc->ret = av_malloc((vc->blocksize[1]/2)*vc->audio_channels * sizeof(float)); vc->buf = av_malloc( vc->blocksize[1] * sizeof(float)); vc->buf_tmp = av_malloc( vc->blocksize[1] * sizeof(float)); vc->previous_window=0; ff_mdct_init(&vc->mdct[0], bl0, 1); ff_mdct_init(&vc->mdct[1], bl1, 1); AV_DEBUG(\" vorbis version %d \\n audio_channels %d \\n audio_samplerate %d \\n bitrate_max %d \\n bitrate_nom %d \\n bitrate_min %d \\n blk_0 %d blk_1 %d \\n \", vc->version, vc->audio_channels, vc->audio_samplerate, vc->bitrate_maximum, vc->bitrate_nominal, vc->bitrate_minimum, vc->blocksize[0], vc->blocksize[1]); /* BLK=vc->blocksize[0]; for(i=0;iwin[0][i]=sin(0.5*3.14159265358*(sin(((float)i+0.5)/(float)BLK*3.14159265358))*(sin(((float)i+0.5)/(float)BLK*3.14159265358))); } */ return 0; }"} {"target": 0, "idx": 2429, "func": "void ff_avg_h264_qpel4_mc31_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride) { avc_luma_hv_qrt_and_aver_dst_4x4_msa(src - 2, src - (stride * 2) + sizeof(uint8_t), stride, dst, stride); }"} {"target": 1, "idx": 2451, "func": "static int net_vhost_user_init(NetClientState *peer, const char *device, const char *name, CharDriverState *chr, int queues) { NetClientState *nc; VhostUserState *s; int i; for (i = 0; i < queues; i++) { nc = qemu_new_net_client(&net_vhost_user_info, peer, device, name); snprintf(nc->info_str, sizeof(nc->info_str), \"vhost-user%d to %s\", i, chr->label); nc->queue_index = i; s = DO_UPCAST(VhostUserState, nc, nc); s->chr = chr; } qemu_chr_add_handlers(chr, NULL, NULL, net_vhost_user_event, (void*)name); return 0; }"} {"target": 0, "idx": 2466, "func": "static int flv_probe(AVProbeData *p) { const uint8_t *d; if (p->buf_size < 6) return 0; d = p->buf; if (d[0] == 'F' && d[1] == 'L' && d[2] == 'V' && d[3] < 5 && d[5]==0) { return AVPROBE_SCORE_MAX; } return 0; }"} {"target": 0, "idx": 2471, "func": "static void scsi_read_data(SCSIDevice *d, uint32_t tag) { SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); SCSIDiskReq *r; r = scsi_find_request(s, tag); if (!r) { BADF(\"Bad read tag 0x%x\\n\", tag); /* ??? This is the wrong error. */ scsi_command_complete(r, CHECK_CONDITION, HARDWARE_ERROR); return; } /* No data transfer may already be in progress */ assert(r->req.aiocb == NULL); scsi_read_request(r); }"} {"target": 0, "idx": 2502, "func": "static void opt_output_file(void *optctx, const char *filename) { OptionsContext *o = optctx; AVFormatContext *oc; int i, err; AVOutputFormat *file_oformat; OutputStream *ost; InputStream *ist; if (!strcmp(filename, \"-\")) filename = \"pipe:\"; oc = avformat_alloc_context(); if (!oc) { print_error(filename, AVERROR(ENOMEM)); exit_program(1); } if (last_asked_format) { file_oformat = av_guess_format(last_asked_format, NULL, NULL); if (!file_oformat) { fprintf(stderr, \"Requested output format '%s' is not a suitable output format\\n\", last_asked_format); exit_program(1); } last_asked_format = NULL; } else { file_oformat = av_guess_format(NULL, filename, NULL); if (!file_oformat) { fprintf(stderr, \"Unable to find a suitable output format for '%s'\\n\", filename); exit_program(1); } } oc->oformat = file_oformat; av_strlcpy(oc->filename, filename, sizeof(oc->filename)); if (!strcmp(file_oformat->name, \"ffm\") && av_strstart(filename, \"http:\", NULL)) { /* special case for files sent to avserver: we get the stream parameters from avserver */ int err = read_avserver_streams(oc, filename); if (err < 0) { print_error(filename, err); exit_program(1); } } else if (!o->nb_stream_maps) { /* pick the \"best\" stream of each type */ #define NEW_STREAM(type, index)\\ if (index >= 0) {\\ ost = new_ ## type ## _stream(oc);\\ ost->source_index = index;\\ ost->sync_ist = &input_streams[index];\\ input_streams[index].discard = 0;\\ } /* video: highest resolution */ if (!video_disable && oc->oformat->video_codec != CODEC_ID_NONE) { int area = 0, idx = -1; for (i = 0; i < nb_input_streams; i++) { ist = &input_streams[i]; if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && ist->st->codec->width * ist->st->codec->height > area) { area = ist->st->codec->width * ist->st->codec->height; idx = i; } } NEW_STREAM(video, idx); } /* audio: most channels */ if (!audio_disable && oc->oformat->audio_codec != CODEC_ID_NONE) { int channels = 0, idx = -1; for (i = 0; i < nb_input_streams; i++) { ist = &input_streams[i]; if (ist->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && ist->st->codec->channels > channels) { channels = ist->st->codec->channels; idx = i; } } NEW_STREAM(audio, idx); } /* subtitles: pick first */ if (!subtitle_disable && oc->oformat->subtitle_codec != CODEC_ID_NONE) { for (i = 0; i < nb_input_streams; i++) if (input_streams[i].st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) { NEW_STREAM(subtitle, i); break; } } /* do something with data? */ } else { for (i = 0; i < o->nb_stream_maps; i++) { StreamMap *map = &o->stream_maps[i]; if (map->disabled) continue; ist = &input_streams[input_files[map->file_index].ist_index + map->stream_index]; switch (ist->st->codec->codec_type) { case AVMEDIA_TYPE_VIDEO: ost = new_video_stream(oc); break; case AVMEDIA_TYPE_AUDIO: ost = new_audio_stream(oc); break; case AVMEDIA_TYPE_SUBTITLE: ost = new_subtitle_stream(oc); break; case AVMEDIA_TYPE_DATA: ost = new_data_stream(oc); break; default: av_log(NULL, AV_LOG_ERROR, \"Cannot map stream #%d.%d - unsupported type.\\n\", map->file_index, map->stream_index); exit_program(1); } ost->source_index = input_files[map->file_index].ist_index + map->stream_index; ost->sync_ist = &input_streams[input_files[map->sync_file_index].ist_index + map->sync_stream_index]; ist->discard = 0; } } av_dict_copy(&oc->metadata, metadata, 0); av_dict_free(&metadata); output_files = grow_array(output_files, sizeof(*output_files), &nb_output_files, nb_output_files + 1); output_files[nb_output_files - 1].ctx = oc; output_files[nb_output_files - 1].ost_index = nb_output_streams - oc->nb_streams; output_files[nb_output_files - 1].recording_time = o->recording_time; output_files[nb_output_files - 1].start_time = o->start_time; output_files[nb_output_files - 1].limit_filesize = limit_filesize; av_dict_copy(&output_files[nb_output_files - 1].opts, format_opts, 0); /* check filename in case of an image number is expected */ if (oc->oformat->flags & AVFMT_NEEDNUMBER) { if (!av_filename_number_test(oc->filename)) { print_error(oc->filename, AVERROR(EINVAL)); exit_program(1); } } if (!(oc->oformat->flags & AVFMT_NOFILE)) { /* test if it already exists to avoid loosing precious files */ if (!file_overwrite && (strchr(filename, ':') == NULL || filename[1] == ':' || av_strstart(filename, \"file:\", NULL))) { if (avio_check(filename, 0) == 0) { if (!using_stdin) { fprintf(stderr,\"File '%s' already exists. Overwrite ? [y/N] \", filename); fflush(stderr); if (!read_yesno()) { fprintf(stderr, \"Not overwriting - exiting\\n\"); exit_program(1); } } else { fprintf(stderr,\"File '%s' already exists. Exiting.\\n\", filename); exit_program(1); } } } /* open the file */ if ((err = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE)) < 0) { print_error(filename, err); exit_program(1); } } oc->preload= (int)(mux_preload*AV_TIME_BASE); oc->max_delay= (int)(mux_max_delay*AV_TIME_BASE); oc->flags |= AVFMT_FLAG_NONBLOCK; /* copy chapters */ if (chapters_input_file >= nb_input_files) { if (chapters_input_file == INT_MAX) { /* copy chapters from the first input file that has them*/ chapters_input_file = -1; for (i = 0; i < nb_input_files; i++) if (input_files[i].ctx->nb_chapters) { chapters_input_file = i; break; } } else { av_log(NULL, AV_LOG_ERROR, \"Invalid input file index %d in chapter mapping.\\n\", chapters_input_file); exit_program(1); } } if (chapters_input_file >= 0) copy_chapters(&input_files[chapters_input_file], &output_files[nb_output_files - 1]); /* copy metadata */ for (i = 0; i < nb_meta_data_maps; i++) { AVFormatContext *files[2]; AVDictionary **meta[2]; int j; #define METADATA_CHECK_INDEX(index, nb_elems, desc)\\ if ((index) < 0 || (index) >= (nb_elems)) {\\ av_log(NULL, AV_LOG_ERROR, \"Invalid %s index %d while processing metadata maps\\n\",\\ (desc), (index));\\ exit_program(1);\\ } int in_file_index = meta_data_maps[i][1].file; if (in_file_index < 0) continue; METADATA_CHECK_INDEX(in_file_index, nb_input_files, \"input file\") files[0] = oc; files[1] = input_files[in_file_index].ctx; for (j = 0; j < 2; j++) { MetadataMap *map = &meta_data_maps[i][j]; switch (map->type) { case 'g': meta[j] = &files[j]->metadata; break; case 's': METADATA_CHECK_INDEX(map->index, files[j]->nb_streams, \"stream\") meta[j] = &files[j]->streams[map->index]->metadata; break; case 'c': METADATA_CHECK_INDEX(map->index, files[j]->nb_chapters, \"chapter\") meta[j] = &files[j]->chapters[map->index]->metadata; break; case 'p': METADATA_CHECK_INDEX(map->index, files[j]->nb_programs, \"program\") meta[j] = &files[j]->programs[map->index]->metadata; break; } } av_dict_copy(meta[0], *meta[1], AV_DICT_DONT_OVERWRITE); } /* copy global metadata by default */ if (metadata_global_autocopy && nb_input_files) av_dict_copy(&oc->metadata, input_files[0].ctx->metadata, AV_DICT_DONT_OVERWRITE); if (metadata_streams_autocopy) for (i = output_files[nb_output_files - 1].ost_index; i < nb_output_streams; i++) { InputStream *ist = &input_streams[output_streams[i].source_index]; av_dict_copy(&output_streams[i].st->metadata, ist->st->metadata, AV_DICT_DONT_OVERWRITE); } frame_rate = (AVRational){0, 0}; frame_width = 0; frame_height = 0; audio_sample_rate = 0; audio_channels = 0; audio_sample_fmt = AV_SAMPLE_FMT_NONE; chapters_input_file = INT_MAX; limit_filesize = UINT64_MAX; av_freep(&meta_data_maps); nb_meta_data_maps = 0; metadata_global_autocopy = 1; metadata_streams_autocopy = 1; metadata_chapters_autocopy = 1; av_freep(&streamid_map); nb_streamid_map = 0; av_dict_free(&codec_names); av_freep(&forced_key_frames); reset_options(o); }"} {"target": 0, "idx": 2523, "func": "static int spawn_thread(void) { pthread_attr_t attr; int ret; cur_threads++; idle_threads++; pthread_attr_init(&attr); pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); ret = pthread_create(&thread_id, &attr, aio_thread, NULL); pthread_attr_destroy(&attr); return ret; }"} {"target": 0, "idx": 2534, "func": "static int read_gab2_sub(AVStream *st, AVPacket *pkt) { if (pkt->size >= 7 && !strcmp(pkt->data, \"GAB2\") && AV_RL16(pkt->data + 5) == 2) { uint8_t desc[256]; int score = AVPROBE_SCORE_EXTENSION, ret; AVIStream *ast = st->priv_data; AVInputFormat *sub_demuxer; AVRational time_base; AVIOContext *pb = avio_alloc_context(pkt->data + 7, pkt->size - 7, 0, NULL, NULL, NULL, NULL); AVProbeData pd; unsigned int desc_len = avio_rl32(pb); if (desc_len > pb->buf_end - pb->buf_ptr) goto error; ret = avio_get_str16le(pb, desc_len, desc, sizeof(desc)); avio_skip(pb, desc_len - ret); if (*desc) av_dict_set(&st->metadata, \"title\", desc, 0); avio_rl16(pb); /* flags? */ avio_rl32(pb); /* data size */ pd = (AVProbeData) { .buf = pb->buf_ptr, .buf_size = pb->buf_end - pb->buf_ptr }; if (!(sub_demuxer = av_probe_input_format2(&pd, 1, &score))) goto error; if (!(ast->sub_ctx = avformat_alloc_context())) goto error; ast->sub_ctx->pb = pb; if (!avformat_open_input(&ast->sub_ctx, \"\", sub_demuxer, NULL)) { ff_read_packet(ast->sub_ctx, &ast->sub_pkt); *st->codec = *ast->sub_ctx->streams[0]->codec; ast->sub_ctx->streams[0]->codec->extradata = NULL; time_base = ast->sub_ctx->streams[0]->time_base; avpriv_set_pts_info(st, 64, time_base.num, time_base.den); } ast->sub_buffer = pkt->data; memset(pkt, 0, sizeof(*pkt)); return 1; error: av_freep(&pb); } return 0; }"} {"target": 0, "idx": 2535, "func": "bool machine_iommu(MachineState *machine) { return machine->iommu; }"} {"target": 0, "idx": 2538, "func": "static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn) { uint32_t rd; TCGv tmp, tmp2; /* M profile cores use memory mapped registers instead of cp15. */ if (arm_feature(env, ARM_FEATURE_M)) return 1; if ((insn & (1 << 25)) == 0) { if (insn & (1 << 20)) { /* mrrc */ return 1; } /* mcrr. Used for block cache operations, so implement as no-op. */ return 0; } if ((insn & (1 << 4)) == 0) { /* cdp */ return 1; } if (IS_USER(s) && !cp15_user_ok(insn)) { return 1; } if ((insn & 0x0fff0fff) == 0x0e070f90 || (insn & 0x0fff0fff) == 0x0e070f58) { /* Wait for interrupt. */ gen_set_pc_im(s->pc); s->is_jmp = DISAS_WFI; return 0; } rd = (insn >> 12) & 0xf; if (cp15_tls_load_store(env, s, insn, rd)) return 0; tmp2 = tcg_const_i32(insn); if (insn & ARM_CP_RW_BIT) { tmp = new_tmp(); gen_helper_get_cp15(tmp, cpu_env, tmp2); /* If the destination register is r15 then sets condition codes. */ if (rd != 15) store_reg(s, rd, tmp); else dead_tmp(tmp); } else { tmp = load_reg(s, rd); gen_helper_set_cp15(cpu_env, tmp2, tmp); dead_tmp(tmp); /* Normally we would always end the TB here, but Linux * arch/arm/mach-pxa/sleep.S expects two instructions following * an MMU enable to execute from cache. Imitate this behaviour. */ if (!arm_feature(env, ARM_FEATURE_XSCALE) || (insn & 0x0fff0fff) != 0x0e010f10) gen_lookup_tb(s); } tcg_temp_free_i32(tmp2); return 0; }"} {"target": 0, "idx": 2564, "func": "static CharDriverState *vc_init(const char *id, ChardevBackend *backend, ChardevReturn *ret, Error **errp) { return vc_handler(backend->u.vc, errp); }"} {"target": 0, "idx": 2583, "func": "USBDevice *usb_host_device_open(const char *devname) { int fd = -1, ret; USBHostDevice *dev = NULL; struct usbdevfs_connectinfo ci; char buf[1024]; int bus_num, addr; char product_name[PRODUCT_NAME_SZ]; dev = qemu_mallocz(sizeof(USBHostDevice)); if (!dev) goto fail; #ifdef DEBUG_ISOCH printf(\"usb_host_device_open %s\\n\", devname); #endif if (usb_host_find_device(&bus_num, &addr, product_name, sizeof(product_name), devname) < 0) return NULL; snprintf(buf, sizeof(buf), USBDEVFS_PATH \"/%03d/%03d\", bus_num, addr); fd = open(buf, O_RDWR | O_NONBLOCK); if (fd < 0) { perror(buf); return NULL; } /* read the device description */ dev->descr_len = read(fd, dev->descr, sizeof(dev->descr)); if (dev->descr_len <= 0) { perror(\"usb_host_device_open: reading device data failed\"); goto fail; } #ifdef DEBUG { int x; printf(\"=== begin dumping device descriptor data ===\\n\"); for (x = 0; x < dev->descr_len; x++) printf(\"%02x \", dev->descr[x]); printf(\"\\n=== end dumping device descriptor data ===\\n\"); } #endif dev->fd = fd; dev->configuration = 1; /* XXX - do something about initial configuration */ if (!usb_host_update_interfaces(dev, 1)) goto fail; ret = ioctl(fd, USBDEVFS_CONNECTINFO, &ci); if (ret < 0) { perror(\"usb_host_device_open: USBDEVFS_CONNECTINFO\"); goto fail; } #ifdef DEBUG printf(\"host USB device %d.%d grabbed\\n\", bus_num, addr); #endif ret = usb_linux_update_endp_table(dev); if (ret) goto fail; if (ci.slow) dev->dev.speed = USB_SPEED_LOW; else dev->dev.speed = USB_SPEED_HIGH; dev->dev.handle_packet = usb_generic_handle_packet; dev->dev.handle_reset = usb_host_handle_reset; dev->dev.handle_control = usb_host_handle_control; dev->dev.handle_data = usb_host_handle_data; dev->dev.handle_destroy = usb_host_handle_destroy; if (product_name[0] == '\\0') snprintf(dev->dev.devname, sizeof(dev->dev.devname), \"host:%s\", devname); else pstrcpy(dev->dev.devname, sizeof(dev->dev.devname), product_name); #ifdef USE_ASYNCIO /* set up the signal handlers */ sigemptyset(&sigact.sa_mask); sigact.sa_sigaction = isoch_done; sigact.sa_flags = SA_SIGINFO; sigact.sa_restorer = 0; ret = sigaction(SIG_ISOCOMPLETE, &sigact, NULL); if (ret < 0) { perror(\"usb_host_device_open: sigaction failed\"); goto fail; } if (pipe(dev->pipe_fds) < 0) { perror(\"usb_host_device_open: pipe creation failed\"); goto fail; } fcntl(dev->pipe_fds[0], F_SETFL, O_NONBLOCK | O_ASYNC); fcntl(dev->pipe_fds[1], F_SETFL, O_NONBLOCK); qemu_set_fd_handler(dev->pipe_fds[0], urb_completion_pipe_read, NULL, dev); #endif dev->urbs_ready = 0; return (USBDevice *)dev; fail: if (dev) qemu_free(dev); close(fd); return NULL; }"} {"target": 0, "idx": 2594, "func": "uint64_t helper_frsp(CPUPPCState *env, uint64_t arg) { CPU_DoubleU farg; float32 f32; farg.ll = arg; if (unlikely(float64_is_signaling_nan(farg.d))) { /* sNaN square root */ fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN); } f32 = float64_to_float32(farg.d, &env->fp_status); farg.d = float32_to_float64(f32, &env->fp_status); return farg.ll; }"} {"target": 0, "idx": 2602, "func": "int gen_new_label(void) { TCGContext *s = &tcg_ctx; int idx; TCGLabel *l; if (s->nb_labels >= TCG_MAX_LABELS) tcg_abort(); idx = s->nb_labels++; l = &s->labels[idx]; l->has_value = 0; l->u.first_reloc = NULL; return idx; }"} {"target": 0, "idx": 2604, "func": "void bdrv_round_to_clusters(BlockDriverState *bs, int64_t sector_num, int nb_sectors, int64_t *cluster_sector_num, int *cluster_nb_sectors) { BlockDriverInfo bdi; if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) { *cluster_sector_num = sector_num; *cluster_nb_sectors = nb_sectors; } else { int64_t c = bdi.cluster_size / BDRV_SECTOR_SIZE; *cluster_sector_num = QEMU_ALIGN_DOWN(sector_num, c); *cluster_nb_sectors = QEMU_ALIGN_UP(sector_num - *cluster_sector_num + nb_sectors, c); } }"} {"target": 1, "idx": 2620, "func": "void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length) { CPUArchState *env; int mmu_idx; assert_cpu_is_self(cpu); env = cpu->env_ptr; for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { unsigned int i; for (i = 0; i < CPU_TLB_SIZE; i++) { tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i], start1, length); } for (i = 0; i < CPU_VTLB_SIZE; i++) { tlb_reset_dirty_range(&env->tlb_v_table[mmu_idx][i], start1, length); } } }"} {"target": 1, "idx": 2622, "func": "blkdebug_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags) { BDRVBlkdebugState *s = bs->opaque; BlkdebugRule *rule = NULL; QSIMPLEQ_FOREACH(rule, &s->active_rules, active_next) { uint64_t inject_offset = rule->options.inject.offset; if (inject_offset == -1 || (inject_offset >= offset && inject_offset < offset + bytes)) { break; if (rule && rule->options.inject.error) { return inject_error(bs, rule); return bdrv_co_preadv(bs->file, offset, bytes, qiov, flags);"} {"target": 1, "idx": 2624, "func": "static int idreg_init1(SysBusDevice *dev) { IDRegState *s = MACIO_ID_REGISTER(dev); memory_region_init_ram(&s->mem, OBJECT(s), \"sun4m.idreg\", sizeof(idreg_data), &error_abort); vmstate_register_ram_global(&s->mem); memory_region_set_readonly(&s->mem, true); sysbus_init_mmio(dev, &s->mem); return 0; }"} {"target": 0, "idx": 2628, "func": "static inline void tcg_out_ld_ptr(TCGContext *s, int ret, tcg_target_long arg) { #if defined(__sparc_v9__) && !defined(__sparc_v8plus__) if (arg != (arg & 0xffffffff)) fprintf(stderr, \"unimplemented %s with offset %ld\\n\", __func__, arg); if (arg != (arg & 0xfff)) tcg_out32(s, SETHI | INSN_RD(ret) | (((uint32_t)arg & 0xfffffc00) >> 10)); tcg_out32(s, LDX | INSN_RD(ret) | INSN_RS1(ret) | INSN_IMM13(arg & 0x3ff)); #else tcg_out_ld_raw(s, ret, arg); #endif }"} {"target": 0, "idx": 2632, "func": "int xen_config_dev_blk(DriveInfo *disk) { char fe[256], be[256]; int vdev = 202 * 256 + 16 * disk->unit; int cdrom = disk->bdrv->type == BDRV_TYPE_CDROM; const char *devtype = cdrom ? \"cdrom\" : \"disk\"; const char *mode = cdrom ? \"r\" : \"w\"; snprintf(disk->bdrv->device_name, sizeof(disk->bdrv->device_name), \"xvd%c\", 'a' + disk->unit); xen_be_printf(NULL, 1, \"config disk %d [%s]: %s\\n\", disk->unit, disk->bdrv->device_name, disk->bdrv->filename); xen_config_dev_dirs(\"vbd\", \"qdisk\", vdev, fe, be, sizeof(fe)); /* frontend */ xenstore_write_int(fe, \"virtual-device\", vdev); xenstore_write_str(fe, \"device-type\", devtype); /* backend */ xenstore_write_str(be, \"dev\", disk->bdrv->device_name); xenstore_write_str(be, \"type\", \"file\"); xenstore_write_str(be, \"params\", disk->bdrv->filename); xenstore_write_str(be, \"mode\", mode); /* common stuff */ return xen_config_dev_all(fe, be); }"} {"target": 0, "idx": 2634, "func": "static inline void gen_intermediate_code_internal(OpenRISCCPU *cpu, TranslationBlock *tb, int search_pc) { CPUState *cs = CPU(cpu); struct DisasContext ctx, *dc = &ctx; uint16_t *gen_opc_end; uint32_t pc_start; int j, k; uint32_t next_page_start; int num_insns; int max_insns; pc_start = tb->pc; dc->tb = tb; gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE; dc->is_jmp = DISAS_NEXT; dc->ppc = pc_start; dc->pc = pc_start; dc->flags = cpu->env.cpucfgr; dc->mem_idx = cpu_mmu_index(&cpu->env); dc->synced_flags = dc->tb_flags = tb->flags; dc->delayed_branch = !!(dc->tb_flags & D_FLAG); dc->singlestep_enabled = cs->singlestep_enabled; if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) { qemu_log(\"-----------------------------------------\\n\"); log_cpu_state(CPU(cpu), 0); } next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; k = -1; num_insns = 0; max_insns = tb->cflags & CF_COUNT_MASK; if (max_insns == 0) { max_insns = CF_COUNT_MASK; } gen_tb_start(); do { check_breakpoint(cpu, dc); if (search_pc) { j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf; if (k < j) { k++; while (k < j) { tcg_ctx.gen_opc_instr_start[k++] = 0; } } tcg_ctx.gen_opc_pc[k] = dc->pc; tcg_ctx.gen_opc_instr_start[k] = 1; tcg_ctx.gen_opc_icount[k] = num_insns; } if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) { tcg_gen_debug_insn_start(dc->pc); } if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) { gen_io_start(); } dc->ppc = dc->pc - 4; dc->npc = dc->pc + 4; tcg_gen_movi_tl(cpu_ppc, dc->ppc); tcg_gen_movi_tl(cpu_npc, dc->npc); disas_openrisc_insn(dc, cpu); dc->pc = dc->npc; num_insns++; /* delay slot */ if (dc->delayed_branch) { dc->delayed_branch--; if (!dc->delayed_branch) { dc->tb_flags &= ~D_FLAG; gen_sync_flags(dc); tcg_gen_mov_tl(cpu_pc, jmp_pc); tcg_gen_mov_tl(cpu_npc, jmp_pc); tcg_gen_movi_tl(jmp_pc, 0); tcg_gen_exit_tb(0); dc->is_jmp = DISAS_JUMP; break; } } } while (!dc->is_jmp && tcg_ctx.gen_opc_ptr < gen_opc_end && !cs->singlestep_enabled && !singlestep && (dc->pc < next_page_start) && num_insns < max_insns); if (tb->cflags & CF_LAST_IO) { gen_io_end(); } if (dc->is_jmp == DISAS_NEXT) { dc->is_jmp = DISAS_UPDATE; tcg_gen_movi_tl(cpu_pc, dc->pc); } if (unlikely(cs->singlestep_enabled)) { if (dc->is_jmp == DISAS_NEXT) { tcg_gen_movi_tl(cpu_pc, dc->pc); } gen_exception(dc, EXCP_DEBUG); } else { switch (dc->is_jmp) { case DISAS_NEXT: gen_goto_tb(dc, 0, dc->pc); break; default: case DISAS_JUMP: break; case DISAS_UPDATE: /* indicate that the hash table must be used to find the next TB */ tcg_gen_exit_tb(0); break; case DISAS_TB_JUMP: /* nothing more to generate */ break; } } gen_tb_end(tb, num_insns); *tcg_ctx.gen_opc_ptr = INDEX_op_end; if (search_pc) { j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf; k++; while (k <= j) { tcg_ctx.gen_opc_instr_start[k++] = 0; } } else { tb->size = dc->pc - pc_start; tb->icount = num_insns; } #ifdef DEBUG_DISAS if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) { qemu_log(\"\\n\"); log_target_disas(&cpu->env, pc_start, dc->pc - pc_start, 0); qemu_log(\"\\nisize=%d osize=%td\\n\", dc->pc - pc_start, tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf); } #endif }"} {"target": 1, "idx": 2649, "func": "static void pci_ivshmem_realize(PCIDevice *dev, Error **errp) { IVShmemState *s = IVSHMEM(dev); Error *err = NULL; uint8_t *pci_conf; uint8_t attr = PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_PREFETCH; if (!!s->server_chr + !!s->shmobj + !!s->hostmem != 1) { error_setg(errp, \"You must specify either 'shm', 'chardev' or 'x-memdev'\"); return; } if (s->hostmem) { MemoryRegion *mr; if (s->sizearg) { g_warning(\"size argument ignored with hostmem\"); } mr = host_memory_backend_get_memory(s->hostmem, &error_abort); s->ivshmem_size = memory_region_size(mr); } else if (s->sizearg == NULL) { s->ivshmem_size = 4 << 20; /* 4 MB default */ } else { char *end; int64_t size = qemu_strtosz(s->sizearg, &end); if (size < 0 || *end != '\\0' || !is_power_of_2(size)) { error_setg(errp, \"Invalid size %s\", s->sizearg); return; } s->ivshmem_size = size; } /* IRQFD requires MSI */ if (ivshmem_has_feature(s, IVSHMEM_IOEVENTFD) && !ivshmem_has_feature(s, IVSHMEM_MSI)) { error_setg(errp, \"ioeventfd/irqfd requires MSI\"); return; } /* check that role is reasonable */ if (s->role) { if (strncmp(s->role, \"peer\", 5) == 0) { s->role_val = IVSHMEM_PEER; } else if (strncmp(s->role, \"master\", 7) == 0) { s->role_val = IVSHMEM_MASTER; } else { error_setg(errp, \"'role' must be 'peer' or 'master'\"); return; } } else { s->role_val = IVSHMEM_MASTER; /* default */ } pci_conf = dev->config; pci_conf[PCI_COMMAND] = PCI_COMMAND_IO | PCI_COMMAND_MEMORY; /* * Note: we don't use INTx with IVSHMEM_MSI at all, so this is a * bald-faced lie then. But it's a backwards compatible lie. */ pci_config_set_interrupt_pin(pci_conf, 1); memory_region_init_io(&s->ivshmem_mmio, OBJECT(s), &ivshmem_mmio_ops, s, \"ivshmem-mmio\", IVSHMEM_REG_BAR_SIZE); /* region for registers*/ pci_register_bar(dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->ivshmem_mmio); memory_region_init(&s->bar, OBJECT(s), \"ivshmem-bar2-container\", s->ivshmem_size); if (s->ivshmem_64bit) { attr |= PCI_BASE_ADDRESS_MEM_TYPE_64; } if (s->hostmem != NULL) { MemoryRegion *mr; IVSHMEM_DPRINTF(\"using hostmem\\n\"); mr = host_memory_backend_get_memory(MEMORY_BACKEND(s->hostmem), &error_abort); vmstate_register_ram(mr, DEVICE(s)); memory_region_add_subregion(&s->bar, 0, mr); pci_register_bar(PCI_DEVICE(s), 2, attr, &s->bar); } else if (s->server_chr != NULL) { /* FIXME do not rely on what chr drivers put into filename */ if (strncmp(s->server_chr->filename, \"unix:\", 5)) { error_setg(errp, \"chardev is not a unix client socket\"); return; } /* if we get a UNIX socket as the parameter we will talk * to the ivshmem server to receive the memory region */ IVSHMEM_DPRINTF(\"using shared memory server (socket = %s)\\n\", s->server_chr->filename); if (ivshmem_setup_interrupts(s) < 0) { error_setg(errp, \"failed to initialize interrupts\"); return; } /* we allocate enough space for 16 peers and grow as needed */ resize_peers(s, 16); s->vm_id = -1; pci_register_bar(dev, 2, attr, &s->bar); qemu_chr_add_handlers(s->server_chr, ivshmem_can_receive, ivshmem_check_version, NULL, s); } else { /* just map the file immediately, we're not using a server */ int fd; IVSHMEM_DPRINTF(\"using shm_open (shm object = %s)\\n\", s->shmobj); /* try opening with O_EXCL and if it succeeds zero the memory * by truncating to 0 */ if ((fd = shm_open(s->shmobj, O_CREAT|O_RDWR|O_EXCL, S_IRWXU|S_IRWXG|S_IRWXO)) > 0) { /* truncate file to length PCI device's memory */ if (ftruncate(fd, s->ivshmem_size) != 0) { error_report(\"could not truncate shared file\"); } } else if ((fd = shm_open(s->shmobj, O_CREAT|O_RDWR, S_IRWXU|S_IRWXG|S_IRWXO)) < 0) { error_setg(errp, \"could not open shared file\"); return; } if (check_shm_size(s, fd, errp) == -1) { return; } create_shared_memory_BAR(s, fd, attr, &err); if (err) { error_propagate(errp, err); return; } } fifo8_create(&s->incoming_fifo, sizeof(int64_t)); if (s->role_val == IVSHMEM_PEER) { error_setg(&s->migration_blocker, \"Migration is disabled when using feature 'peer mode' in device 'ivshmem'\"); migrate_add_blocker(s->migration_blocker); } }"} {"target": 1, "idx": 2656, "func": "int print_insn_lm32(bfd_vma memaddr, struct disassemble_info *info) { fprintf_function fprintf_fn = info->fprintf_func; void *stream = info->stream; int rc; uint8_t insn[4]; const Lm32OpcodeInfo *opc_info; uint32_t op; const char *args_fmt; rc = info->read_memory_func(memaddr, insn, 4, info); if (rc != 0) { info->memory_error_func(rc, memaddr, info); return -1; } fprintf_fn(stream, \"%02x %02x %02x %02x \", insn[0], insn[1], insn[2], insn[3]); op = bfd_getb32(insn); opc_info = find_opcode_info(op); if (opc_info) { fprintf_fn(stream, \"%-8s \", opc_info->name); args_fmt = opc_info->args_fmt; while (args_fmt && *args_fmt) { if (*args_fmt == '%') { switch (*(++args_fmt)) { case '0': { uint8_t r0; const char *r0_name; r0 = (op >> 21) & 0x1f; r0_name = find_reg_info(r0)->name; fprintf_fn(stream, \"%s\", r0_name); break; } case '1': { uint8_t r1; const char *r1_name; r1 = (op >> 16) & 0x1f; r1_name = find_reg_info(r1)->name; fprintf_fn(stream, \"%s\", r1_name); break; } case '2': { uint8_t r2; const char *r2_name; r2 = (op >> 11) & 0x1f; r2_name = find_reg_info(r2)->name; fprintf_fn(stream, \"%s\", r2_name); break; } case 'c': { uint8_t csr; const char *csr_name; csr = (op >> 21) & 0x1f; csr_name = find_csr_info(csr)->name; if (csr_name) { fprintf_fn(stream, \"%s\", csr_name); } else { fprintf_fn(stream, \"0x%x\", csr); } break; } case 'u': { uint16_t u16; u16 = op & 0xffff; fprintf_fn(stream, \"0x%x\", u16); break; } case 's': { int16_t s16; s16 = (int16_t)(op & 0xffff); fprintf_fn(stream, \"%d\", s16); break; } case 'r': { uint32_t rela; rela = memaddr + (((int16_t)(op & 0xffff)) << 2); fprintf_fn(stream, \"%x\", rela); break; } case 'R': { uint32_t rela; int32_t imm26; imm26 = (int32_t)((op & 0x3ffffff) << 6) >> 4; rela = memaddr + imm26; fprintf_fn(stream, \"%x\", rela); break; } case 'h': { uint8_t u5; u5 = (op & 0x1f); fprintf_fn(stream, \"%d\", u5); break; } default: break; } } else { fprintf_fn(stream, \"%c\", *args_fmt); } args_fmt++; } } else { fprintf_fn(stream, \".word 0x%x\", op); } return 4; }"} {"target": 0, "idx": 2665, "func": "static int get_coc(Jpeg2000DecoderContext *s, Jpeg2000CodingStyle *c, uint8_t *properties) { int compno; if (s->buf_end - s->buf < 2) return AVERROR(EINVAL); compno = bytestream_get_byte(&s->buf); c += compno; c->csty = bytestream_get_byte(&s->buf); get_cox(s, c); properties[compno] |= HAD_COC; return 0; }"} {"target": 1, "idx": 2673, "func": "static void curses_setup(void) { int i, colour_default[8] = { COLOR_BLACK, COLOR_BLUE, COLOR_GREEN, COLOR_CYAN, COLOR_RED, COLOR_MAGENTA, COLOR_YELLOW, COLOR_WHITE, }; /* input as raw as possible, let everything be interpreted * by the guest system */ initscr(); noecho(); intrflush(stdscr, FALSE); nodelay(stdscr, TRUE); nonl(); keypad(stdscr, TRUE); start_color(); raw(); scrollok(stdscr, FALSE); for (i = 0; i < 64; i ++) init_pair(i, colour_default[i & 7], colour_default[i >> 3]); }"} {"target": 1, "idx": 2677, "func": "static inline void RENAME(hyscale)(uint16_t *dst, int dstWidth, uint8_t *src, int srcW, int xInc) { #ifdef HAVE_MMX // use the new MMX scaler if th mmx2 cant be used (its faster than the x86asm one) if(sws_flags != SWS_FAST_BILINEAR || (!canMMX2BeUsed)) #else if(sws_flags != SWS_FAST_BILINEAR) #endif { RENAME(hScale)(dst, dstWidth, src, srcW, xInc, hLumFilter, hLumFilterPos, hLumFilterSize); } else // Fast Bilinear upscale / crap downscale { #ifdef ARCH_X86 #ifdef HAVE_MMX2 int i; if(canMMX2BeUsed) { asm volatile( \"pxor %%mm7, %%mm7 \\n\\t\" \"pxor %%mm2, %%mm2 \\n\\t\" // 2*xalpha \"movd %5, %%mm6 \\n\\t\" // xInc&0xFFFF \"punpcklwd %%mm6, %%mm6 \\n\\t\" \"punpcklwd %%mm6, %%mm6 \\n\\t\" \"movq %%mm6, %%mm2 \\n\\t\" \"psllq $16, %%mm2 \\n\\t\" \"paddw %%mm6, %%mm2 \\n\\t\" \"psllq $16, %%mm2 \\n\\t\" \"paddw %%mm6, %%mm2 \\n\\t\" \"psllq $16, %%mm2 \\n\\t\" //0,t,2t,3t t=xInc&0xFF \"movq %%mm2, \"MANGLE(temp0)\" \\n\\t\" \"movd %4, %%mm6 \\n\\t\" //(xInc*4)&0xFFFF \"punpcklwd %%mm6, %%mm6 \\n\\t\" \"punpcklwd %%mm6, %%mm6 \\n\\t\" \"xorl %%eax, %%eax \\n\\t\" // i \"movl %0, %%esi \\n\\t\" // src \"movl %1, %%edi \\n\\t\" // buf1 \"movl %3, %%edx \\n\\t\" // (xInc*4)>>16 \"xorl %%ecx, %%ecx \\n\\t\" \"xorl %%ebx, %%ebx \\n\\t\" \"movw %4, %%bx \\n\\t\" // (xInc*4)&0xFFFF #define FUNNY_Y_CODE \\ PREFETCH\" 1024(%%esi) \\n\\t\"\\ PREFETCH\" 1056(%%esi) \\n\\t\"\\ PREFETCH\" 1088(%%esi) \\n\\t\"\\ \"call \"MANGLE(funnyYCode)\" \\n\\t\"\\ \"movq \"MANGLE(temp0)\", %%mm2 \\n\\t\"\\ \"xorl %%ecx, %%ecx \\n\\t\" FUNNY_Y_CODE FUNNY_Y_CODE FUNNY_Y_CODE FUNNY_Y_CODE FUNNY_Y_CODE FUNNY_Y_CODE FUNNY_Y_CODE FUNNY_Y_CODE :: \"m\" (src), \"m\" (dst), \"m\" (dstWidth), \"m\" ((xInc*4)>>16), \"m\" ((xInc*4)&0xFFFF), \"m\" (xInc&0xFFFF) : \"%eax\", \"%ebx\", \"%ecx\", \"%edx\", \"%esi\", \"%edi\" ); for(i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--) dst[i] = src[srcW-1]*128; } else { #endif //NO MMX just normal asm ... asm volatile( \"xorl %%eax, %%eax \\n\\t\" // i \"xorl %%ebx, %%ebx \\n\\t\" // xx \"xorl %%ecx, %%ecx \\n\\t\" // 2*xalpha \".balign 16 \\n\\t\" \"1: \\n\\t\" \"movzbl (%0, %%ebx), %%edi \\n\\t\" //src[xx] \"movzbl 1(%0, %%ebx), %%esi \\n\\t\" //src[xx+1] \"subl %%edi, %%esi \\n\\t\" //src[xx+1] - src[xx] \"imull %%ecx, %%esi \\n\\t\" //(src[xx+1] - src[xx])*2*xalpha \"shll $16, %%edi \\n\\t\" \"addl %%edi, %%esi \\n\\t\" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha) \"movl %1, %%edi \\n\\t\" \"shrl $9, %%esi \\n\\t\" \"movw %%si, (%%edi, %%eax, 2) \\n\\t\" \"addw %4, %%cx \\n\\t\" //2*xalpha += xInc&0xFF \"adcl %3, %%ebx \\n\\t\" //xx+= xInc>>8 + carry \"movzbl (%0, %%ebx), %%edi \\n\\t\" //src[xx] \"movzbl 1(%0, %%ebx), %%esi \\n\\t\" //src[xx+1] \"subl %%edi, %%esi \\n\\t\" //src[xx+1] - src[xx] \"imull %%ecx, %%esi \\n\\t\" //(src[xx+1] - src[xx])*2*xalpha \"shll $16, %%edi \\n\\t\" \"addl %%edi, %%esi \\n\\t\" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha) \"movl %1, %%edi \\n\\t\" \"shrl $9, %%esi \\n\\t\" \"movw %%si, 2(%%edi, %%eax, 2) \\n\\t\" \"addw %4, %%cx \\n\\t\" //2*xalpha += xInc&0xFF \"adcl %3, %%ebx \\n\\t\" //xx+= xInc>>8 + carry \"addl $2, %%eax \\n\\t\" \"cmpl %2, %%eax \\n\\t\" \" jb 1b \\n\\t\" :: \"r\" (src), \"m\" (dst), \"m\" (dstWidth), \"m\" (xInc>>16), \"m\" (xInc&0xFFFF) : \"%eax\", \"%ebx\", \"%ecx\", \"%edi\", \"%esi\" ); #ifdef HAVE_MMX2 } //if MMX2 cant be used #endif #else int i; unsigned int xpos=0; for(i=0;i>16; register unsigned int xalpha=(xpos&0xFFFF)>>9; dst[i]= (src[xx]<<7) + (src[xx+1] - src[xx])*xalpha; xpos+=xInc; } #endif } }"} {"target": 1, "idx": 2680, "func": "static int get_uint16_equal(QEMUFile *f, void *pv, size_t size) { uint16_t *v = pv; uint16_t v2; qemu_get_be16s(f, &v2); if (*v == v2) { return 0; } return -EINVAL; }"} {"target": 1, "idx": 2704, "func": "static int h264_handle_packet(AVFormatContext *ctx, PayloadContext *data, AVStream *st, AVPacket *pkt, uint32_t *timestamp, const uint8_t *buf, int len, uint16_t seq, int flags) { uint8_t nal; uint8_t type; int result = 0; if (!len) { av_log(ctx, AV_LOG_ERROR, \"Empty H264 RTP packet\\n\"); return AVERROR_INVALIDDATA; } nal = buf[0]; type = nal & 0x1f; assert(data); assert(buf); /* Simplify the case (these are all the nal types used internally by * the h264 codec). */ if (type >= 1 && type <= 23) type = 1; switch (type) { case 0: // undefined, but pass them through case 1: av_new_packet(pkt, len + sizeof(start_sequence)); memcpy(pkt->data, start_sequence, sizeof(start_sequence)); memcpy(pkt->data + sizeof(start_sequence), buf, len); COUNT_NAL_TYPE(data, nal); break; case 24: // STAP-A (one packet, multiple nals) // consume the STAP-A NAL buf++; len--; // first we are going to figure out the total size { int pass = 0; int total_length = 0; uint8_t *dst = NULL; for (pass = 0; pass < 2; pass++) { const uint8_t *src = buf; int src_len = len; while (src_len > 2) { uint16_t nal_size = AV_RB16(src); // consume the length of the aggregate src += 2; src_len -= 2; if (nal_size <= src_len) { if (pass == 0) { // counting total_length += sizeof(start_sequence) + nal_size; } else { // copying assert(dst); memcpy(dst, start_sequence, sizeof(start_sequence)); dst += sizeof(start_sequence); memcpy(dst, src, nal_size); COUNT_NAL_TYPE(data, *src); dst += nal_size; } } else { av_log(ctx, AV_LOG_ERROR, \"nal size exceeds length: %d %d\\n\", nal_size, src_len); } // eat what we handled src += nal_size; src_len -= nal_size; if (src_len < 0) av_log(ctx, AV_LOG_ERROR, \"Consumed more bytes than we got! (%d)\\n\", src_len); } if (pass == 0) { /* now we know the total size of the packet (with the * start sequences added) */ av_new_packet(pkt, total_length); dst = pkt->data; } else { assert(dst - pkt->data == total_length); } } } break; case 25: // STAP-B case 26: // MTAP-16 case 27: // MTAP-24 case 29: // FU-B av_log(ctx, AV_LOG_ERROR, \"Unhandled type (%d) (See RFC for implementation details\\n\", type); result = AVERROR(ENOSYS); break; case 28: // FU-A (fragmented nal) buf++; len--; // skip the fu_indicator if (len > 1) { // these are the same as above, we just redo them here for clarity uint8_t fu_indicator = nal; uint8_t fu_header = *buf; uint8_t start_bit = fu_header >> 7; uint8_t av_unused end_bit = (fu_header & 0x40) >> 6; uint8_t nal_type = fu_header & 0x1f; uint8_t reconstructed_nal; // Reconstruct this packet's true nal; only the data follows. /* The original nal forbidden bit and NRI are stored in this * packet's nal. */ reconstructed_nal = fu_indicator & 0xe0; reconstructed_nal |= nal_type; // skip the fu_header buf++; len--; if (start_bit) COUNT_NAL_TYPE(data, nal_type); if (start_bit) { /* copy in the start sequence, and the reconstructed nal */ av_new_packet(pkt, sizeof(start_sequence) + sizeof(nal) + len); memcpy(pkt->data, start_sequence, sizeof(start_sequence)); pkt->data[sizeof(start_sequence)] = reconstructed_nal; memcpy(pkt->data + sizeof(start_sequence) + sizeof(nal), buf, len); } else { av_new_packet(pkt, len); memcpy(pkt->data, buf, len); } } else { av_log(ctx, AV_LOG_ERROR, \"Too short data for FU-A H264 RTP packet\\n\"); result = AVERROR_INVALIDDATA; } break; case 30: // undefined case 31: // undefined default: av_log(ctx, AV_LOG_ERROR, \"Undefined type (%d)\\n\", type); result = AVERROR_INVALIDDATA; break; } pkt->stream_index = st->index; return result; }"} {"target": 0, "idx": 2737, "func": "void bdrv_refresh_filename(BlockDriverState *bs) { BlockDriver *drv = bs->drv; QDict *opts; if (!drv) { return; } /* This BDS's file name will most probably depend on its file's name, so * refresh that first */ if (bs->file) { bdrv_refresh_filename(bs->file->bs); } if (drv->bdrv_refresh_filename) { /* Obsolete information is of no use here, so drop the old file name * information before refreshing it */ bs->exact_filename[0] = '\\0'; if (bs->full_open_options) { QDECREF(bs->full_open_options); bs->full_open_options = NULL; } drv->bdrv_refresh_filename(bs); } else if (bs->file) { /* Try to reconstruct valid information from the underlying file */ bool has_open_options; bs->exact_filename[0] = '\\0'; if (bs->full_open_options) { QDECREF(bs->full_open_options); bs->full_open_options = NULL; } opts = qdict_new(); has_open_options = append_open_options(opts, bs); /* If no specific options have been given for this BDS, the filename of * the underlying file should suffice for this one as well */ if (bs->file->bs->exact_filename[0] && !has_open_options) { strcpy(bs->exact_filename, bs->file->bs->exact_filename); } /* Reconstructing the full options QDict is simple for most format block * drivers, as long as the full options are known for the underlying * file BDS. The full options QDict of that file BDS should somehow * contain a representation of the filename, therefore the following * suffices without querying the (exact_)filename of this BDS. */ if (bs->file->bs->full_open_options) { qdict_put_obj(opts, \"driver\", QOBJECT(qstring_from_str(drv->format_name))); QINCREF(bs->file->bs->full_open_options); qdict_put_obj(opts, \"file\", QOBJECT(bs->file->bs->full_open_options)); bs->full_open_options = opts; } else { QDECREF(opts); } } else if (!bs->full_open_options && qdict_size(bs->options)) { /* There is no underlying file BDS (at least referenced by BDS.file), * so the full options QDict should be equal to the options given * specifically for this block device when it was opened (plus the * driver specification). * Because those options don't change, there is no need to update * full_open_options when it's already set. */ opts = qdict_new(); append_open_options(opts, bs); qdict_put_obj(opts, \"driver\", QOBJECT(qstring_from_str(drv->format_name))); if (bs->exact_filename[0]) { /* This may not work for all block protocol drivers (some may * require this filename to be parsed), but we have to find some * default solution here, so just include it. If some block driver * does not support pure options without any filename at all or * needs some special format of the options QDict, it needs to * implement the driver-specific bdrv_refresh_filename() function. */ qdict_put_obj(opts, \"filename\", QOBJECT(qstring_from_str(bs->exact_filename))); } bs->full_open_options = opts; } if (bs->exact_filename[0]) { pstrcpy(bs->filename, sizeof(bs->filename), bs->exact_filename); } else if (bs->full_open_options) { QString *json = qobject_to_json(QOBJECT(bs->full_open_options)); snprintf(bs->filename, sizeof(bs->filename), \"json:%s\", qstring_get_str(json)); QDECREF(json); } }"} {"target": 0, "idx": 2762, "func": "static inline bool cpu_handle_exception(CPUState *cpu, int *ret) { if (cpu->exception_index >= 0) { if (cpu->exception_index >= EXCP_INTERRUPT) { /* exit request from the cpu execution loop */ *ret = cpu->exception_index; if (*ret == EXCP_DEBUG) { cpu_handle_debug_exception(cpu); } cpu->exception_index = -1; return true; } else { #if defined(CONFIG_USER_ONLY) /* if user mode only, we simulate a fake exception which will be handled outside the cpu execution loop */ #if defined(TARGET_I386) CPUClass *cc = CPU_GET_CLASS(cpu); cc->do_interrupt(cpu); #endif *ret = cpu->exception_index; cpu->exception_index = -1; return true; #else if (replay_exception()) { CPUClass *cc = CPU_GET_CLASS(cpu); qemu_mutex_lock_iothread(); cc->do_interrupt(cpu); qemu_mutex_unlock_iothread(); cpu->exception_index = -1; } else if (!replay_has_interrupt()) { /* give a chance to iothread in replay mode */ *ret = EXCP_INTERRUPT; return true; } #endif } #ifndef CONFIG_USER_ONLY } else if (replay_has_exception() && cpu->icount_decr.u16.low + cpu->icount_extra == 0) { /* try to cause an exception pending in the log */ cpu_exec_nocache(cpu, 1, tb_find(cpu, NULL, 0), true); *ret = -1; return true; #endif } return false; }"} {"target": 0, "idx": 2793, "func": "void ff_dnxhdenc_init_x86(DNXHDEncContext *ctx) { #if HAVE_INLINE_ASM if (av_get_cpu_flags() & AV_CPU_FLAG_SSE2) { if (ctx->cid_table->bit_depth == 8) ctx->get_pixels_8x4_sym = get_pixels_8x4_sym_sse2; } #endif /* HAVE_INLINE_ASM */ }"} {"target": 0, "idx": 2806, "func": "uint64_t helper_fdiv(CPUPPCState *env, uint64_t arg1, uint64_t arg2) { CPU_DoubleU farg1, farg2; farg1.ll = arg1; farg2.ll = arg2; if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d))) { /* Division of infinity by infinity */ farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIDI); } else if (unlikely(float64_is_zero(farg1.d) && float64_is_zero(farg2.d))) { /* Division of zero by zero */ farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXZDZ); } else { if (unlikely(float64_is_signaling_nan(farg1.d) || float64_is_signaling_nan(farg2.d))) { /* sNaN division */ fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN); } farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status); } return farg1.ll; }"} {"target": 0, "idx": 2817, "func": "static void test_io_channel_ipv4(bool async) { SocketAddress *listen_addr = g_new0(SocketAddress, 1); SocketAddress *connect_addr = g_new0(SocketAddress, 1); listen_addr->type = SOCKET_ADDRESS_KIND_INET; listen_addr->u.inet = g_new0(InetSocketAddress, 1); listen_addr->u.inet->host = g_strdup(\"0.0.0.0\"); listen_addr->u.inet->port = NULL; /* Auto-select */ connect_addr->type = SOCKET_ADDRESS_KIND_INET; connect_addr->u.inet = g_new0(InetSocketAddress, 1); connect_addr->u.inet->host = g_strdup(\"127.0.0.1\"); connect_addr->u.inet->port = NULL; /* Filled in later */ test_io_channel(async, listen_addr, connect_addr); qapi_free_SocketAddress(listen_addr); qapi_free_SocketAddress(connect_addr); }"} {"target": 0, "idx": 2818, "func": "static void gen_loongson_multimedia(DisasContext *ctx, int rd, int rs, int rt) { const char *opn = \"loongson_cp2\"; uint32_t opc, shift_max; TCGv_i64 t0, t1; opc = MASK_LMI(ctx->opcode); switch (opc) { case OPC_ADD_CP2: case OPC_SUB_CP2: case OPC_DADD_CP2: case OPC_DSUB_CP2: t0 = tcg_temp_local_new_i64(); t1 = tcg_temp_local_new_i64(); break; default: t0 = tcg_temp_new_i64(); t1 = tcg_temp_new_i64(); break; } gen_load_fpr64(ctx, t0, rs); gen_load_fpr64(ctx, t1, rt); #define LMI_HELPER(UP, LO) \\ case OPC_##UP: gen_helper_##LO(t0, t0, t1); opn = #LO; break #define LMI_HELPER_1(UP, LO) \\ case OPC_##UP: gen_helper_##LO(t0, t0); opn = #LO; break #define LMI_DIRECT(UP, LO, OP) \\ case OPC_##UP: tcg_gen_##OP##_i64(t0, t0, t1); opn = #LO; break switch (opc) { LMI_HELPER(PADDSH, paddsh); LMI_HELPER(PADDUSH, paddush); LMI_HELPER(PADDH, paddh); LMI_HELPER(PADDW, paddw); LMI_HELPER(PADDSB, paddsb); LMI_HELPER(PADDUSB, paddusb); LMI_HELPER(PADDB, paddb); LMI_HELPER(PSUBSH, psubsh); LMI_HELPER(PSUBUSH, psubush); LMI_HELPER(PSUBH, psubh); LMI_HELPER(PSUBW, psubw); LMI_HELPER(PSUBSB, psubsb); LMI_HELPER(PSUBUSB, psubusb); LMI_HELPER(PSUBB, psubb); LMI_HELPER(PSHUFH, pshufh); LMI_HELPER(PACKSSWH, packsswh); LMI_HELPER(PACKSSHB, packsshb); LMI_HELPER(PACKUSHB, packushb); LMI_HELPER(PUNPCKLHW, punpcklhw); LMI_HELPER(PUNPCKHHW, punpckhhw); LMI_HELPER(PUNPCKLBH, punpcklbh); LMI_HELPER(PUNPCKHBH, punpckhbh); LMI_HELPER(PUNPCKLWD, punpcklwd); LMI_HELPER(PUNPCKHWD, punpckhwd); LMI_HELPER(PAVGH, pavgh); LMI_HELPER(PAVGB, pavgb); LMI_HELPER(PMAXSH, pmaxsh); LMI_HELPER(PMINSH, pminsh); LMI_HELPER(PMAXUB, pmaxub); LMI_HELPER(PMINUB, pminub); LMI_HELPER(PCMPEQW, pcmpeqw); LMI_HELPER(PCMPGTW, pcmpgtw); LMI_HELPER(PCMPEQH, pcmpeqh); LMI_HELPER(PCMPGTH, pcmpgth); LMI_HELPER(PCMPEQB, pcmpeqb); LMI_HELPER(PCMPGTB, pcmpgtb); LMI_HELPER(PSLLW, psllw); LMI_HELPER(PSLLH, psllh); LMI_HELPER(PSRLW, psrlw); LMI_HELPER(PSRLH, psrlh); LMI_HELPER(PSRAW, psraw); LMI_HELPER(PSRAH, psrah); LMI_HELPER(PMULLH, pmullh); LMI_HELPER(PMULHH, pmulhh); LMI_HELPER(PMULHUH, pmulhuh); LMI_HELPER(PMADDHW, pmaddhw); LMI_HELPER(PASUBUB, pasubub); LMI_HELPER_1(BIADD, biadd); LMI_HELPER_1(PMOVMSKB, pmovmskb); LMI_DIRECT(PADDD, paddd, add); LMI_DIRECT(PSUBD, psubd, sub); LMI_DIRECT(XOR_CP2, xor, xor); LMI_DIRECT(NOR_CP2, nor, nor); LMI_DIRECT(AND_CP2, and, and); LMI_DIRECT(PANDN, pandn, andc); LMI_DIRECT(OR, or, or); case OPC_PINSRH_0: tcg_gen_deposit_i64(t0, t0, t1, 0, 16); opn = \"pinsrh_0\"; break; case OPC_PINSRH_1: tcg_gen_deposit_i64(t0, t0, t1, 16, 16); opn = \"pinsrh_1\"; break; case OPC_PINSRH_2: tcg_gen_deposit_i64(t0, t0, t1, 32, 16); opn = \"pinsrh_2\"; break; case OPC_PINSRH_3: tcg_gen_deposit_i64(t0, t0, t1, 48, 16); opn = \"pinsrh_3\"; break; case OPC_PEXTRH: tcg_gen_andi_i64(t1, t1, 3); tcg_gen_shli_i64(t1, t1, 4); tcg_gen_shr_i64(t0, t0, t1); tcg_gen_ext16u_i64(t0, t0); opn = \"pextrh\"; break; case OPC_ADDU_CP2: tcg_gen_add_i64(t0, t0, t1); tcg_gen_ext32s_i64(t0, t0); opn = \"addu\"; break; case OPC_SUBU_CP2: tcg_gen_sub_i64(t0, t0, t1); tcg_gen_ext32s_i64(t0, t0); opn = \"addu\"; break; case OPC_SLL_CP2: opn = \"sll\"; shift_max = 32; goto do_shift; case OPC_SRL_CP2: opn = \"srl\"; shift_max = 32; goto do_shift; case OPC_SRA_CP2: opn = \"sra\"; shift_max = 32; goto do_shift; case OPC_DSLL_CP2: opn = \"dsll\"; shift_max = 64; goto do_shift; case OPC_DSRL_CP2: opn = \"dsrl\"; shift_max = 64; goto do_shift; case OPC_DSRA_CP2: opn = \"dsra\"; shift_max = 64; goto do_shift; do_shift: /* Make sure shift count isn't TCG undefined behaviour. */ tcg_gen_andi_i64(t1, t1, shift_max - 1); switch (opc) { case OPC_SLL_CP2: case OPC_DSLL_CP2: tcg_gen_shl_i64(t0, t0, t1); break; case OPC_SRA_CP2: case OPC_DSRA_CP2: /* Since SRA is UndefinedResult without sign-extended inputs, we can treat SRA and DSRA the same. */ tcg_gen_sar_i64(t0, t0, t1); break; case OPC_SRL_CP2: /* We want to shift in zeros for SRL; zero-extend first. */ tcg_gen_ext32u_i64(t0, t0); /* FALLTHRU */ case OPC_DSRL_CP2: tcg_gen_shr_i64(t0, t0, t1); break; } if (shift_max == 32) { tcg_gen_ext32s_i64(t0, t0); } /* Shifts larger than MAX produce zero. */ tcg_gen_setcondi_i64(TCG_COND_LTU, t1, t1, shift_max); tcg_gen_neg_i64(t1, t1); tcg_gen_and_i64(t0, t0, t1); break; case OPC_ADD_CP2: case OPC_DADD_CP2: { TCGv_i64 t2 = tcg_temp_new_i64(); int lab = gen_new_label(); tcg_gen_mov_i64(t2, t0); tcg_gen_add_i64(t0, t1, t2); if (opc == OPC_ADD_CP2) { tcg_gen_ext32s_i64(t0, t0); } tcg_gen_xor_i64(t1, t1, t2); tcg_gen_xor_i64(t2, t2, t0); tcg_gen_andc_i64(t1, t2, t1); tcg_temp_free_i64(t2); tcg_gen_brcondi_i64(TCG_COND_GE, t1, 0, lab); generate_exception(ctx, EXCP_OVERFLOW); gen_set_label(lab); opn = (opc == OPC_ADD_CP2 ? \"add\" : \"dadd\"); break; } case OPC_SUB_CP2: case OPC_DSUB_CP2: { TCGv_i64 t2 = tcg_temp_new_i64(); int lab = gen_new_label(); tcg_gen_mov_i64(t2, t0); tcg_gen_sub_i64(t0, t1, t2); if (opc == OPC_SUB_CP2) { tcg_gen_ext32s_i64(t0, t0); } tcg_gen_xor_i64(t1, t1, t2); tcg_gen_xor_i64(t2, t2, t0); tcg_gen_and_i64(t1, t1, t2); tcg_temp_free_i64(t2); tcg_gen_brcondi_i64(TCG_COND_GE, t1, 0, lab); generate_exception(ctx, EXCP_OVERFLOW); gen_set_label(lab); opn = (opc == OPC_SUB_CP2 ? \"sub\" : \"dsub\"); break; } case OPC_PMULUW: tcg_gen_ext32u_i64(t0, t0); tcg_gen_ext32u_i64(t1, t1); tcg_gen_mul_i64(t0, t0, t1); opn = \"pmuluw\"; break; case OPC_SEQU_CP2: case OPC_SEQ_CP2: case OPC_SLTU_CP2: case OPC_SLT_CP2: case OPC_SLEU_CP2: case OPC_SLE_CP2: /* ??? Document is unclear: Set FCC[CC]. Does that mean the FD field is the CC field? */ default: MIPS_INVAL(opn); generate_exception(ctx, EXCP_RI); return; } #undef LMI_HELPER #undef LMI_DIRECT gen_store_fpr64(ctx, t0, rd); (void)opn; /* avoid a compiler warning */ MIPS_DEBUG(\"%s %s, %s, %s\", opn, fregnames[rd], fregnames[rs], fregnames[rt]); tcg_temp_free_i64(t0); tcg_temp_free_i64(t1); }"} {"target": 0, "idx": 2820, "func": "static void v9fs_stat(void *opaque) { int32_t fid; V9fsStat v9stat; ssize_t err = 0; size_t offset = 7; struct stat stbuf; V9fsFidState *fidp; V9fsPDU *pdu = opaque; V9fsState *s = pdu->s; pdu_unmarshal(pdu, offset, \"d\", &fid); trace_v9fs_stat(pdu->tag, pdu->id, fid); fidp = get_fid(pdu, fid); if (fidp == NULL) { err = -ENOENT; goto out_nofid; } err = v9fs_co_lstat(pdu, &fidp->path, &stbuf); if (err < 0) { goto out; } err = stat_to_v9stat(pdu, &fidp->path, &stbuf, &v9stat); if (err < 0) { goto out; } offset += pdu_marshal(pdu, offset, \"wS\", 0, &v9stat); err = offset; trace_v9fs_stat_return(pdu->tag, pdu->id, v9stat.mode, v9stat.atime, v9stat.mtime, v9stat.length); v9fs_stat_free(&v9stat); out: put_fid(pdu, fidp); out_nofid: complete_pdu(s, pdu, err); }"} {"target": 1, "idx": 2832, "func": "uint64_t helper_addlv (uint64_t op1, uint64_t op2) { uint64_t tmp = op1; op1 = (uint32_t)(op1 + op2); if (unlikely((tmp ^ op2 ^ (-1UL)) & (tmp ^ op1) & (1UL << 31))) { arith_excp(env, GETPC(), EXC_M_IOV, 0); } return op1; }"} {"target": 1, "idx": 2846, "func": "static int of_dpa_cmd_add_l2_flood(OfDpa *of_dpa, OfDpaGroup *group, RockerTlv **group_tlvs) { OfDpaGroup *l2_group; RockerTlv **tlvs; int err; int i; if (!group_tlvs[ROCKER_TLV_OF_DPA_GROUP_COUNT] || !group_tlvs[ROCKER_TLV_OF_DPA_GROUP_IDS]) { return -ROCKER_EINVAL; } group->l2_flood.group_count = rocker_tlv_get_le16(group_tlvs[ROCKER_TLV_OF_DPA_GROUP_COUNT]); tlvs = g_malloc0((group->l2_flood.group_count + 1) * sizeof(RockerTlv *)); if (!tlvs) { return -ROCKER_ENOMEM; } g_free(group->l2_flood.group_ids); group->l2_flood.group_ids = g_malloc0(group->l2_flood.group_count * sizeof(uint32_t)); if (!group->l2_flood.group_ids) { err = -ROCKER_ENOMEM; goto err_out; } rocker_tlv_parse_nested(tlvs, group->l2_flood.group_count, group_tlvs[ROCKER_TLV_OF_DPA_GROUP_IDS]); for (i = 0; i < group->l2_flood.group_count; i++) { group->l2_flood.group_ids[i] = rocker_tlv_get_le32(tlvs[i + 1]); } /* All of the L2 interface groups referenced by the L2 flood * must have same VLAN */ for (i = 0; i < group->l2_flood.group_count; i++) { l2_group = of_dpa_group_find(of_dpa, group->l2_flood.group_ids[i]); if (!l2_group) { continue; } if ((ROCKER_GROUP_TYPE_GET(l2_group->id) == ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE) && (ROCKER_GROUP_VLAN_GET(l2_group->id) != ROCKER_GROUP_VLAN_GET(group->id))) { DPRINTF(\"l2 interface group 0x%08x VLAN doesn't match l2 \" \"flood group 0x%08x\\n\", group->l2_flood.group_ids[i], group->id); err = -ROCKER_EINVAL; goto err_out; } } g_free(tlvs); return ROCKER_OK; err_out: group->l2_flood.group_count = 0; g_free(group->l2_flood.group_ids); g_free(tlvs); return err; }"} {"target": 1, "idx": 2857, "func": "int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) { const uint64_t fuzz_tag = FUZZ_TAG; FuzzDataBuffer buffer; const uint8_t *last = data; const uint8_t *end = data + size; uint32_t it = 0; if (!c) c = AVCodecInitialize(FFMPEG_CODEC); // Done once. AVCodecContext* ctx = avcodec_alloc_context3(NULL); if (!ctx) error(\"Failed memory allocation\"); ctx->max_pixels = 4096 * 4096; //To reduce false positive OOM and hangs int res = avcodec_open2(ctx, c, NULL); if (res < 0) return res; FDBCreate(&buffer); int got_frame; AVFrame *frame = av_frame_alloc(); if (!frame) error(\"Failed memory allocation\"); // Read very simple container AVPacket avpkt; while (data < end && it < maxiteration) { // Search for the TAG while (data + sizeof(fuzz_tag) < end) { if (data[0] == (fuzz_tag & 0xFF) && *(const uint64_t *)(data) == fuzz_tag) break; data++; } if (data + sizeof(fuzz_tag) > end) data = end; FDBPrepare(&buffer, &avpkt, last, data - last); data += sizeof(fuzz_tag); last = data; // Iterate through all data while (avpkt.size > 0 && it++ < maxiteration) { av_frame_unref(frame); int ret = decode_handler(ctx, frame, &got_frame, &avpkt); if (it > 20) ctx->error_concealment = 0; if (ret <= 0 || ret > avpkt.size) break; avpkt.data += ret; avpkt.size -= ret; } } av_init_packet(&avpkt); avpkt.data = NULL; avpkt.size = 0; do { got_frame = 0; decode_handler(ctx, frame, &got_frame, &avpkt); } while (got_frame == 1 && it++ < maxiteration); av_frame_free(&frame); avcodec_free_context(&ctx); av_freep(&ctx); FDBDesroy(&buffer); return 0; }"} {"target": 0, "idx": 2880, "func": "void op_cp1_64bitmode(void) { if (!(env->CP0_Status & (1 << CP0St_FR))) { CALL_FROM_TB1(do_raise_exception, EXCP_RI); } RETURN(); }"} {"target": 0, "idx": 2884, "func": "void disas_a64_insn(CPUARMState *env, DisasContext *s) { uint32_t insn; insn = arm_ldl_code(env, s->pc, s->bswap_code); s->insn = insn; s->pc += 4; switch ((insn >> 24) & 0x1f) { default: unallocated_encoding(s); break; } if (unlikely(s->singlestep_enabled) && (s->is_jmp == DISAS_TB_JUMP)) { /* go through the main loop for single step */ s->is_jmp = DISAS_JUMP; } }"} {"target": 0, "idx": 2895, "func": "static inline void gen_op_mov_v_reg(int ot, TCGv t0, int reg) { switch(ot) { case OT_BYTE: if (reg < 4 X86_64_DEF( || reg >= 8 || x86_64_hregs)) { goto std_case; } else { tcg_gen_shri_tl(t0, cpu_regs[reg - 4], 8); tcg_gen_ext8u_tl(t0, t0); } break; default: std_case: tcg_gen_mov_tl(t0, cpu_regs[reg]); break; } }"} {"target": 0, "idx": 2903, "func": "static int hdev_open(BlockDriverState *bs, const char *filename, int flags) { BDRVRawState *s = bs->opaque; int access_flags, create_flags; DWORD overlapped; char device_name[64]; if (strstart(filename, \"/dev/cdrom\", NULL)) { if (find_cdrom(device_name, sizeof(device_name)) < 0) return -ENOENT; filename = device_name; } else { /* transform drive letters into device name */ if (((filename[0] >= 'a' && filename[0] <= 'z') || (filename[0] >= 'A' && filename[0] <= 'Z')) && filename[1] == ':' && filename[2] == '\\0') { snprintf(device_name, sizeof(device_name), \"\\\\\\\\.\\\\%c:\", filename[0]); filename = device_name; } } s->type = find_device_type(bs, filename); if ((flags & BDRV_O_ACCESS) == O_RDWR) { access_flags = GENERIC_READ | GENERIC_WRITE; } else { access_flags = GENERIC_READ; } create_flags = OPEN_EXISTING; #ifdef QEMU_TOOL overlapped = FILE_ATTRIBUTE_NORMAL; #else overlapped = FILE_FLAG_OVERLAPPED; #endif s->hfile = CreateFile(filename, access_flags, FILE_SHARE_READ, NULL, create_flags, overlapped, NULL); if (s->hfile == INVALID_HANDLE_VALUE) return -1; return 0; }"} {"target": 0, "idx": 2911, "func": "static void sigp_store_adtl_status(CPUState *cs, run_on_cpu_data arg) { S390CPU *cpu = S390_CPU(cs); SigpInfo *si = arg.host_ptr; if (!s390_has_feat(S390_FEAT_VECTOR)) { set_sigp_status(si, SIGP_STAT_INVALID_ORDER); return; } /* cpu has to be stopped */ if (s390_cpu_get_state(cpu) != CPU_STATE_STOPPED) { set_sigp_status(si, SIGP_STAT_INCORRECT_STATE); return; } /* parameter must be aligned to 1024-byte boundary */ if (si->param & 0x3ff) { set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER); return; } cpu_synchronize_state(cs); if (kvm_s390_store_adtl_status(cpu, si->param)) { set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER); return; } si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; }"} {"target": 0, "idx": 2913, "func": "static int cpu_gdb_write_register(CPUState *env, uint8_t *mem_buf, int n) { target_ulong tmp; tmp = ldtul_p(mem_buf); if (n < 32) { env->active_tc.gpr[n] = tmp; return sizeof(target_ulong); } if (env->CP0_Config1 & (1 << CP0C1_FP) && n >= 38 && n < 73) { if (n < 70) { if (env->CP0_Status & (1 << CP0St_FR)) env->active_fpu.fpr[n - 38].d = tmp; else env->active_fpu.fpr[n - 38].w[FP_ENDIAN_IDX] = tmp; } switch (n) { case 70: env->active_fpu.fcr31 = tmp & 0xFF83FFFF; /* set rounding mode */ RESTORE_ROUNDING_MODE; #ifndef CONFIG_SOFTFLOAT /* no floating point exception for native float */ SET_FP_ENABLE(env->active_fpu.fcr31, 0); #endif break; case 71: env->active_fpu.fcr0 = tmp; break; } return sizeof(target_ulong); } switch (n) { case 32: env->CP0_Status = tmp; break; case 33: env->active_tc.LO[0] = tmp; break; case 34: env->active_tc.HI[0] = tmp; break; case 35: env->CP0_BadVAddr = tmp; break; case 36: env->CP0_Cause = tmp; break; case 37: env->active_tc.PC = tmp; break; case 72: /* fp, ignored */ break; default: if (n > 89) return 0; /* Other registers are readonly. Ignore writes. */ break; } return sizeof(target_ulong); }"} {"target": 0, "idx": 2914, "func": "QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque) { QEMUBH *bh; bh = g_malloc0(sizeof(QEMUBH)); bh->ctx = ctx; bh->cb = cb; bh->opaque = opaque; qemu_mutex_lock(&ctx->bh_lock); bh->next = ctx->first_bh; /* Make sure that the members are ready before putting bh into list */ smp_wmb(); ctx->first_bh = bh; qemu_mutex_unlock(&ctx->bh_lock); return bh; }"} {"target": 0, "idx": 2929, "func": "static void audiogen(void *data, enum AVSampleFormat sample_fmt, int channels, int sample_rate, int nb_samples) { int i, ch, k; double v, f, a, ampa; double tabf1[SWR_CH_MAX]; double tabf2[SWR_CH_MAX]; double taba[SWR_CH_MAX]; unsigned static rnd; #define PUT_SAMPLE set(data, ch, k, channels, sample_fmt, v); #define uint_rand(x) (x = x * 1664525 + 1013904223) #define dbl_rand(x) (uint_rand(x)*2.0 / (double)UINT_MAX - 1) k = 0; /* 1 second of single freq sinus at 1000 Hz */ a = 0; for (i = 0; i < 1 * sample_rate && k < nb_samples; i++, k++) { v = sin(a) * 0.30; for (ch = 0; ch < channels; ch++) PUT_SAMPLE a += M_PI * 1000.0 * 2.0 / sample_rate; } /* 1 second of varying frequency between 100 and 10000 Hz */ a = 0; for (i = 0; i < 1 * sample_rate && k < nb_samples; i++, k++) { v = sin(a) * 0.30; for (ch = 0; ch < channels; ch++) PUT_SAMPLE f = 100.0 + (((10000.0 - 100.0) * i) / sample_rate); a += M_PI * f * 2.0 / sample_rate; } /* 0.5 second of low amplitude white noise */ for (i = 0; i < sample_rate / 2 && k < nb_samples; i++, k++) { v = dbl_rand(rnd) * 0.30; for (ch = 0; ch < channels; ch++) PUT_SAMPLE } /* 0.5 second of high amplitude white noise */ for (i = 0; i < sample_rate / 2 && k < nb_samples; i++, k++) { v = dbl_rand(rnd); for (ch = 0; ch < channels; ch++) PUT_SAMPLE } /* 1 second of unrelated ramps for each channel */ for (ch = 0; ch < channels; ch++) { taba[ch] = 0; tabf1[ch] = 100 + uint_rand(rnd) % 5000; tabf2[ch] = 100 + uint_rand(rnd) % 5000; } for (i = 0; i < 1 * sample_rate && k < nb_samples; i++, k++) { for (ch = 0; ch < channels; ch++) { v = sin(taba[ch]) * 0.30; PUT_SAMPLE f = tabf1[ch] + (((tabf2[ch] - tabf1[ch]) * i) / sample_rate); taba[ch] += M_PI * f * 2.0 / sample_rate; } } /* 2 seconds of 500 Hz with varying volume */ a = 0; ampa = 0; for (i = 0; i < 2 * sample_rate && k < nb_samples; i++, k++) { for (ch = 0; ch < channels; ch++) { double amp = (1.0 + sin(ampa)) * 0.15; if (ch & 1) amp = 0.30 - amp; v = sin(a) * amp; PUT_SAMPLE a += M_PI * 500.0 * 2.0 / sample_rate; ampa += M_PI * 2.0 / sample_rate; } } }"} {"target": 1, "idx": 2946, "func": "static void paint_mouse_pointer(AVFormatContext *s1, struct gdigrab *gdigrab) { CURSORINFO ci = {0}; #define CURSOR_ERROR(str) \\ if (!gdigrab->cursor_error_printed) { \\ WIN32_API_ERROR(str); \\ gdigrab->cursor_error_printed = 1; \\ } ci.cbSize = sizeof(ci); if (GetCursorInfo(&ci)) { HCURSOR icon = CopyCursor(ci.hCursor); ICONINFO info; POINT pos; RECT clip_rect = gdigrab->clip_rect; HWND hwnd = gdigrab->hwnd; info.hbmMask = NULL; info.hbmColor = NULL; if (ci.flags != CURSOR_SHOWING) return; if (!icon) { /* Use the standard arrow cursor as a fallback. * You'll probably only hit this in Wine, which can't fetch * the current system cursor. */ icon = CopyCursor(LoadCursor(NULL, IDC_ARROW)); } if (!GetIconInfo(icon, &info)) { CURSOR_ERROR(\"Could not get icon info\"); goto icon_error; } pos.x = ci.ptScreenPos.x - clip_rect.left - info.xHotspot; pos.y = ci.ptScreenPos.y - clip_rect.top - info.yHotspot; if (hwnd) { RECT rect; if (GetWindowRect(hwnd, &rect)) { pos.x -= rect.left; pos.y -= rect.top; } else { CURSOR_ERROR(\"Couldn't get window rectangle\"); goto icon_error; } } av_log(s1, AV_LOG_DEBUG, \"Cursor pos (%li,%li) -> (%li,%li)\\n\", ci.ptScreenPos.x, ci.ptScreenPos.y, pos.x, pos.y); if (pos.x >= 0 && pos.x <= clip_rect.right - clip_rect.left && pos.y >= 0 && pos.y <= clip_rect.bottom - clip_rect.top) { if (!DrawIcon(gdigrab->dest_hdc, pos.x, pos.y, icon)) CURSOR_ERROR(\"Couldn't draw icon\"); } icon_error: if (icon) DestroyCursor(icon); } else { CURSOR_ERROR(\"Couldn't get cursor info\"); } }"} {"target": 1, "idx": 2950, "func": "static void machine_initfn(Object *obj) { MachineState *ms = MACHINE(obj); ms->kernel_irqchip_allowed = true; ms->kvm_shadow_mem = -1; ms->dump_guest_core = true; object_property_add_str(obj, \"accel\", machine_get_accel, machine_set_accel, NULL); object_property_set_description(obj, \"accel\", \"Accelerator list\", NULL); object_property_add_bool(obj, \"kernel-irqchip\", NULL, machine_set_kernel_irqchip, NULL); object_property_set_description(obj, \"kernel-irqchip\", \"Use KVM in-kernel irqchip\", NULL); object_property_add(obj, \"kvm-shadow-mem\", \"int\", machine_get_kvm_shadow_mem, machine_set_kvm_shadow_mem, NULL, NULL, NULL); object_property_set_description(obj, \"kvm-shadow-mem\", \"KVM shadow MMU size\", NULL); object_property_add_str(obj, \"kernel\", machine_get_kernel, machine_set_kernel, NULL); object_property_set_description(obj, \"kernel\", \"Linux kernel image file\", NULL); object_property_add_str(obj, \"initrd\", machine_get_initrd, machine_set_initrd, NULL); object_property_set_description(obj, \"initrd\", \"Linux initial ramdisk file\", NULL); object_property_add_str(obj, \"append\", machine_get_append, machine_set_append, NULL); object_property_set_description(obj, \"append\", \"Linux kernel command line\", NULL); object_property_add_str(obj, \"dtb\", machine_get_dtb, machine_set_dtb, NULL); object_property_set_description(obj, \"dtb\", \"Linux kernel device tree file\", NULL); object_property_add_str(obj, \"dumpdtb\", machine_get_dumpdtb, machine_set_dumpdtb, NULL); object_property_set_description(obj, \"dumpdtb\", \"Dump current dtb to a file and quit\", NULL); object_property_add(obj, \"phandle-start\", \"int\", machine_get_phandle_start, machine_set_phandle_start, NULL, NULL, NULL); object_property_set_description(obj, \"phandle-start\", \"The first phandle ID we may generate dynamically\", NULL); object_property_add_str(obj, \"dt-compatible\", machine_get_dt_compatible, machine_set_dt_compatible, NULL); object_property_set_description(obj, \"dt-compatible\", \"Overrides the \\\"compatible\\\" property of the dt root node\", NULL); object_property_add_bool(obj, \"dump-guest-core\", machine_get_dump_guest_core, machine_set_dump_guest_core, NULL); object_property_set_description(obj, \"dump-guest-core\", \"Include guest memory in a core dump\", NULL); object_property_add_bool(obj, \"mem-merge\", machine_get_mem_merge, machine_set_mem_merge, NULL); object_property_set_description(obj, \"mem-merge\", \"Enable/disable memory merge support\", NULL); object_property_add_bool(obj, \"usb\", machine_get_usb, machine_set_usb, NULL); object_property_set_description(obj, \"usb\", \"Set on/off to enable/disable usb\", NULL); object_property_add_str(obj, \"firmware\", machine_get_firmware, machine_set_firmware, NULL); object_property_set_description(obj, \"firmware\", \"Firmware image\", NULL); object_property_add_bool(obj, \"iommu\", machine_get_iommu, machine_set_iommu, NULL); object_property_set_description(obj, \"iommu\", \"Set on/off to enable/disable Intel IOMMU (VT-d)\", NULL); /* Register notifier when init is done for sysbus sanity checks */ ms->sysbus_notifier.notify = machine_init_notify; qemu_add_machine_init_done_notifier(&ms->sysbus_notifier); }"} {"target": 1, "idx": 2958, "func": "void bdrv_delete(BlockDriverState *bs) { assert(!bs->peer); /* remove from list, if necessary */ if (bs->device_name[0] != '\\0') { QTAILQ_REMOVE(&bdrv_states, bs, list); } bdrv_close(bs); if (bs->file != NULL) { bdrv_delete(bs->file); } assert(bs != bs_snapshots); qemu_free(bs); }"} {"target": 1, "idx": 2967, "func": "static void hmp_migrate_status_cb(void *opaque) { MigrationStatus *status = opaque; MigrationInfo *info; info = qmp_query_migrate(NULL); if (!info->has_status || strcmp(info->status, \"active\") == 0) { if (info->has_disk) { int progress; if (info->disk->remaining) { progress = info->disk->transferred * 100 / info->disk->total; } else { progress = 100; } monitor_printf(status->mon, \"Completed %d %%\\r\", progress); monitor_flush(status->mon); } timer_mod(status->timer, qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + 1000); } else { if (status->is_block_migration) { monitor_printf(status->mon, \"\\n\"); } monitor_resume(status->mon); timer_del(status->timer); g_free(status); } qapi_free_MigrationInfo(info); }"} {"target": 0, "idx": 2996, "func": "int kvm_arch_on_sigbus(int code, void *addr) { #ifdef KVM_CAP_MCE if ((first_cpu->mcg_cap & MCG_SER_P) && addr && code == BUS_MCEERR_AO) { void *vaddr; ram_addr_t ram_addr; target_phys_addr_t paddr; /* Hope we are lucky for AO MCE */ vaddr = addr; if (qemu_ram_addr_from_host(vaddr, &ram_addr) || !kvm_physical_memory_addr_from_ram(first_cpu->kvm_state, ram_addr, &paddr)) { fprintf(stderr, \"Hardware memory error for memory used by \" \"QEMU itself instead of guest system!: %p\\n\", addr); return 0; } kvm_mce_inj_srao_memscrub2(first_cpu, paddr); } else #endif /* KVM_CAP_MCE */ { if (code == BUS_MCEERR_AO) { return 0; } else if (code == BUS_MCEERR_AR) { hardware_memory_error(); } else { return 1; } } return 0; }"} {"target": 1, "idx": 3001, "func": "coroutine_fn iscsi_co_pdiscard(BlockDriverState *bs, int64_t offset, int bytes) { IscsiLun *iscsilun = bs->opaque; struct IscsiTask iTask; struct unmap_list list; int r = 0; if (!is_byte_request_lun_aligned(offset, bytes, iscsilun)) { return -ENOTSUP; } if (!iscsilun->lbp.lbpu) { /* UNMAP is not supported by the target */ return 0; } list.lba = offset / iscsilun->block_size; list.num = bytes / iscsilun->block_size; iscsi_co_init_iscsitask(iscsilun, &iTask); qemu_mutex_lock(&iscsilun->mutex); retry: if (iscsi_unmap_task(iscsilun->iscsi, iscsilun->lun, 0, 0, &list, 1, iscsi_co_generic_cb, &iTask) == NULL) { r = -ENOMEM; goto out_unlock; } while (!iTask.complete) { iscsi_set_events(iscsilun); qemu_mutex_unlock(&iscsilun->mutex); qemu_coroutine_yield(); qemu_mutex_lock(&iscsilun->mutex); } if (iTask.task != NULL) { scsi_free_scsi_task(iTask.task); iTask.task = NULL; } if (iTask.do_retry) { iTask.complete = 0; goto retry; } if (iTask.status == SCSI_STATUS_CHECK_CONDITION) { /* the target might fail with a check condition if it is not happy with the alignment of the UNMAP request we silently fail in this case */ goto out_unlock; } if (iTask.status != SCSI_STATUS_GOOD) { r = iTask.err_code; goto out_unlock; } iscsi_allocmap_set_invalid(iscsilun, offset >> BDRV_SECTOR_BITS, bytes >> BDRV_SECTOR_BITS); out_unlock: qemu_mutex_unlock(&iscsilun->mutex); return r; }"} {"target": 1, "idx": 3003, "func": "int mips_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw, int mmu_idx) { MIPSCPU *cpu = MIPS_CPU(cs); CPUMIPSState *env = &cpu->env; #if !defined(CONFIG_USER_ONLY) hwaddr physical; int prot; int access_type; #endif int ret = 0; #if 0 log_cpu_state(cs, 0); #endif qemu_log_mask(CPU_LOG_MMU, \"%s pc \" TARGET_FMT_lx \" ad %\" VADDR_PRIx \" rw %d mmu_idx %d\\n\", __func__, env->active_tc.PC, address, rw, mmu_idx); /* data access */ #if !defined(CONFIG_USER_ONLY) /* XXX: put correct access by using cpu_restore_state() correctly */ access_type = ACCESS_INT; ret = get_physical_address(env, &physical, &prot, address, rw, access_type); qemu_log_mask(CPU_LOG_MMU, \"%s address=%\" VADDR_PRIx \" ret %d physical \" TARGET_FMT_plx \" prot %d\\n\", __func__, address, ret, physical, prot); if (ret == TLBRET_MATCH) { tlb_set_page(cs, address & TARGET_PAGE_MASK, physical & TARGET_PAGE_MASK, prot | PAGE_EXEC, mmu_idx, TARGET_PAGE_SIZE); ret = 0; } else if (ret < 0) #endif { raise_mmu_exception(env, address, rw, ret); ret = 1; } return ret; }"} {"target": 0, "idx": 3020, "func": "static ModuleTypeList *find_type(module_init_type type) { ModuleTypeList *l; init_types(); l = &init_type_list[type]; return l; }"} {"target": 0, "idx": 3040, "func": "static int qemu_suspend_requested(void) { int r = suspend_requested; suspend_requested = 0; return r; }"} {"target": 0, "idx": 3054, "func": "int opt_cpuflags(const char *opt, const char *arg) { #define CPUFLAG_MMX2 (AV_CPU_FLAG_MMX | AV_CPU_FLAG_MMX2) #define CPUFLAG_3DNOW (AV_CPU_FLAG_3DNOW | AV_CPU_FLAG_MMX) #define CPUFLAG_3DNOWEXT (AV_CPU_FLAG_3DNOWEXT | CPUFLAG_3DNOW) #define CPUFLAG_SSE (AV_CPU_FLAG_SSE | CPUFLAG_MMX2) #define CPUFLAG_SSE2 (AV_CPU_FLAG_SSE2 | CPUFLAG_SSE) #define CPUFLAG_SSE2SLOW (AV_CPU_FLAG_SSE2SLOW | CPUFLAG_SSE2) #define CPUFLAG_SSE3 (AV_CPU_FLAG_SSE3 | CPUFLAG_SSE2) #define CPUFLAG_SSE3SLOW (AV_CPU_FLAG_SSE3SLOW | CPUFLAG_SSE3) #define CPUFLAG_SSSE3 (AV_CPU_FLAG_SSSE3 | CPUFLAG_SSE3) #define CPUFLAG_SSE4 (AV_CPU_FLAG_SSE4 | CPUFLAG_SSSE3) #define CPUFLAG_SSE42 (AV_CPU_FLAG_SSE42 | CPUFLAG_SSE4) #define CPUFLAG_AVX (AV_CPU_FLAG_AVX | CPUFLAG_SSE42) #define CPUFLAG_XOP (AV_CPU_FLAG_XOP | CPUFLAG_AVX) #define CPUFLAG_FMA4 (AV_CPU_FLAG_FMA4 | CPUFLAG_AVX) static const AVOption cpuflags_opts[] = { { \"flags\" , NULL, 0, AV_OPT_TYPE_FLAGS, { 0 }, INT64_MIN, INT64_MAX, .unit = \"flags\" }, { \"altivec\" , NULL, 0, AV_OPT_TYPE_CONST, { AV_CPU_FLAG_ALTIVEC }, .unit = \"flags\" }, { \"mmx\" , NULL, 0, AV_OPT_TYPE_CONST, { AV_CPU_FLAG_MMX }, .unit = \"flags\" }, { \"mmx2\" , NULL, 0, AV_OPT_TYPE_CONST, { CPUFLAG_MMX2 }, .unit = \"flags\" }, { \"sse\" , NULL, 0, AV_OPT_TYPE_CONST, { CPUFLAG_SSE }, .unit = \"flags\" }, { \"sse2\" , NULL, 0, AV_OPT_TYPE_CONST, { CPUFLAG_SSE2 }, .unit = \"flags\" }, { \"sse2slow\", NULL, 0, AV_OPT_TYPE_CONST, { CPUFLAG_SSE2SLOW }, .unit = \"flags\" }, { \"sse3\" , NULL, 0, AV_OPT_TYPE_CONST, { CPUFLAG_SSE3 }, .unit = \"flags\" }, { \"sse3slow\", NULL, 0, AV_OPT_TYPE_CONST, { CPUFLAG_SSE3SLOW }, .unit = \"flags\" }, { \"ssse3\" , NULL, 0, AV_OPT_TYPE_CONST, { CPUFLAG_SSSE3 }, .unit = \"flags\" }, { \"atom\" , NULL, 0, AV_OPT_TYPE_CONST, { AV_CPU_FLAG_ATOM }, .unit = \"flags\" }, { \"sse4.1\" , NULL, 0, AV_OPT_TYPE_CONST, { CPUFLAG_SSE4 }, .unit = \"flags\" }, { \"sse4.2\" , NULL, 0, AV_OPT_TYPE_CONST, { CPUFLAG_SSE42 }, .unit = \"flags\" }, { \"avx\" , NULL, 0, AV_OPT_TYPE_CONST, { CPUFLAG_AVX }, .unit = \"flags\" }, { \"xop\" , NULL, 0, AV_OPT_TYPE_CONST, { CPUFLAG_XOP }, .unit = \"flags\" }, { \"fma4\" , NULL, 0, AV_OPT_TYPE_CONST, { CPUFLAG_FMA4 }, .unit = \"flags\" }, { \"3dnow\" , NULL, 0, AV_OPT_TYPE_CONST, { CPUFLAG_3DNOW }, .unit = \"flags\" }, { \"3dnowext\", NULL, 0, AV_OPT_TYPE_CONST, { CPUFLAG_3DNOWEXT }, .unit = \"flags\" }, { NULL }, }; static const AVClass class = { .class_name = \"cpuflags\", .item_name = av_default_item_name, .option = cpuflags_opts, .version = LIBAVUTIL_VERSION_INT, }; int flags = av_get_cpu_flags(); int ret; const AVClass *pclass = &class; if ((ret = av_opt_eval_flags(&pclass, &cpuflags_opts[0], arg, &flags)) < 0) return ret; av_force_cpu_flags(flags); return 0; }"} {"target": 0, "idx": 3065, "func": "void ff_put_h264_qpel8_mc21_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride) { avc_luma_midv_qrt_8w_msa(src - (2 * stride) - 2, stride, dst, stride, 8, 0); }"} {"target": 1, "idx": 3070, "func": "static void serial_tx_done(void *opaque) { SerialState *s = opaque; if (s->tx_burst < 0) { uint16_t divider; if (s->divider) divider = s->divider; else divider = 1; /* We assume 10 bits/char, OK for this purpose. */ s->tx_burst = THROTTLE_TX_INTERVAL * 1000 / (1000000 * 10 / (s->baudbase / divider)); } s->thr_ipending = 1; s->lsr |= UART_LSR_THRE; s->lsr |= UART_LSR_TEMT; serial_update_irq(s); }"} {"target": 1, "idx": 3076, "func": "void qemu_spice_create_host_primary(SimpleSpiceDisplay *ssd) { QXLDevSurfaceCreate surface; memset(&surface, 0, sizeof(surface)); dprint(1, \"%s/%d: %dx%d\\n\", __func__, ssd->qxl.id, surface_width(ssd->ds), surface_height(ssd->ds)); surface.format = SPICE_SURFACE_FMT_32_xRGB; surface.width = surface_width(ssd->ds); surface.height = surface_height(ssd->ds); surface.stride = -surface.width * 4; surface.mouse_mode = true; surface.flags = 0; surface.type = 0; surface.mem = (uintptr_t)ssd->buf; surface.group_id = MEMSLOT_GROUP_HOST; qemu_spice_create_primary_surface(ssd, 0, &surface, QXL_SYNC); }"} {"target": 1, "idx": 3082, "func": "static void init_proc_750fx (CPUPPCState *env) { gen_spr_ne_601(env); gen_spr_7xx(env); /* XXX : not implemented */ spr_register(env, SPR_L2CR, \"L2CR\", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, NULL, 0x00000000); /* Time base */ gen_tbl(env); /* Thermal management */ gen_spr_thrm(env); /* XXX : not implemented */ spr_register(env, SPR_750_THRM4, \"THRM4\", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Hardware implementation registers */ /* XXX : not implemented */ spr_register(env, SPR_HID0, \"HID0\", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_HID1, \"HID1\", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_750FX_HID2, \"HID2\", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Memory management */ gen_low_BATs(env); /* PowerPC 750fx & 750gx has 8 DBATs and 8 IBATs */ gen_high_BATs(env); init_excp_7x0(env); env->dcache_line_size = 32; env->icache_line_size = 32; /* Allocate hardware IRQ controller */ ppc6xx_irq_init(env); }"} {"target": 1, "idx": 3098, "func": "void st_print_trace_file_status(FILE *stream, int (*stream_printf)(FILE *stream, const char *fmt, ...)) { stream_printf(stream, \"Trace file \\\"%s\\\" %s.\\n\", trace_file_name, trace_file_enabled ? \"on\" : \"off\"); }"} {"target": 0, "idx": 3117, "func": "static int mkv_write_header(AVFormatContext *s) { MatroskaMuxContext *mkv = s->priv_data; AVIOContext *pb = s->pb; ebml_master ebml_header; AVDictionaryEntry *tag; int ret, i, version = 2; int64_t creation_time; if (!strcmp(s->oformat->name, \"webm\")) mkv->mode = MODE_WEBM; else mkv->mode = MODE_MATROSKAv2; if (mkv->mode != MODE_WEBM || av_dict_get(s->metadata, \"stereo_mode\", NULL, 0) || av_dict_get(s->metadata, \"alpha_mode\", NULL, 0)) version = 4; if (s->nb_streams > MAX_TRACKS) { av_log(s, AV_LOG_ERROR, \"At most %d streams are supported for muxing in Matroska\\n\", MAX_TRACKS); return AVERROR(EINVAL); } for (i = 0; i < s->nb_streams; i++) { if (s->streams[i]->codecpar->codec_id == AV_CODEC_ID_ATRAC3 || s->streams[i]->codecpar->codec_id == AV_CODEC_ID_COOK || s->streams[i]->codecpar->codec_id == AV_CODEC_ID_RA_288 || s->streams[i]->codecpar->codec_id == AV_CODEC_ID_SIPR || s->streams[i]->codecpar->codec_id == AV_CODEC_ID_RV10 || s->streams[i]->codecpar->codec_id == AV_CODEC_ID_RV20) { av_log(s, AV_LOG_ERROR, \"The Matroska muxer does not yet support muxing %s\\n\", avcodec_get_name(s->streams[i]->codecpar->codec_id)); return AVERROR_PATCHWELCOME; } if (s->streams[i]->codecpar->codec_id == AV_CODEC_ID_OPUS || av_dict_get(s->streams[i]->metadata, \"stereo_mode\", NULL, 0) || av_dict_get(s->streams[i]->metadata, \"alpha_mode\", NULL, 0)) version = 4; } mkv->tracks = av_mallocz_array(s->nb_streams, sizeof(*mkv->tracks)); if (!mkv->tracks) { ret = AVERROR(ENOMEM); goto fail; } ebml_header = start_ebml_master(pb, EBML_ID_HEADER, 0); put_ebml_uint (pb, EBML_ID_EBMLVERSION , 1); put_ebml_uint (pb, EBML_ID_EBMLREADVERSION , 1); put_ebml_uint (pb, EBML_ID_EBMLMAXIDLENGTH , 4); put_ebml_uint (pb, EBML_ID_EBMLMAXSIZELENGTH , 8); put_ebml_string (pb, EBML_ID_DOCTYPE , s->oformat->name); put_ebml_uint (pb, EBML_ID_DOCTYPEVERSION , version); put_ebml_uint (pb, EBML_ID_DOCTYPEREADVERSION , 2); end_ebml_master(pb, ebml_header); mkv->segment = start_ebml_master(pb, MATROSKA_ID_SEGMENT, 0); mkv->segment_offset = avio_tell(pb); // we write 2 seek heads - one at the end of the file to point to each // cluster, and one at the beginning to point to all other level one // elements (including the seek head at the end of the file), which // isn't more than 10 elements if we only write one of each other // currently defined level 1 element mkv->main_seekhead = mkv_start_seekhead(pb, mkv->segment_offset, 10); if (!mkv->main_seekhead) { ret = AVERROR(ENOMEM); goto fail; } ret = mkv_add_seekhead_entry(mkv->main_seekhead, MATROSKA_ID_INFO, avio_tell(pb)); if (ret < 0) goto fail; ret = start_ebml_master_crc32(pb, &mkv->info_bc, mkv, &mkv->info, MATROSKA_ID_INFO, 0); if (ret < 0) return ret; pb = mkv->info_bc; put_ebml_uint(pb, MATROSKA_ID_TIMECODESCALE, 1000000); if ((tag = av_dict_get(s->metadata, \"title\", NULL, 0))) put_ebml_string(pb, MATROSKA_ID_TITLE, tag->value); if (!(s->flags & AVFMT_FLAG_BITEXACT)) { put_ebml_string(pb, MATROSKA_ID_MUXINGAPP, LIBAVFORMAT_IDENT); if ((tag = av_dict_get(s->metadata, \"encoding_tool\", NULL, 0))) put_ebml_string(pb, MATROSKA_ID_WRITINGAPP, tag->value); else put_ebml_string(pb, MATROSKA_ID_WRITINGAPP, LIBAVFORMAT_IDENT); if (mkv->mode != MODE_WEBM) { uint32_t segment_uid[4]; AVLFG lfg; av_lfg_init(&lfg, av_get_random_seed()); for (i = 0; i < 4; i++) segment_uid[i] = av_lfg_get(&lfg); put_ebml_binary(pb, MATROSKA_ID_SEGMENTUID, segment_uid, 16); } } else { const char *ident = \"Lavf\"; put_ebml_string(pb, MATROSKA_ID_MUXINGAPP , ident); put_ebml_string(pb, MATROSKA_ID_WRITINGAPP, ident); } if (ff_parse_creation_time_metadata(s, &creation_time, 0) > 0) { // Adjust time so it's relative to 2001-01-01 and convert to nanoseconds. int64_t date_utc = (creation_time - 978307200000000LL) * 1000; uint8_t date_utc_buf[8]; AV_WB64(date_utc_buf, date_utc); put_ebml_binary(pb, MATROSKA_ID_DATEUTC, date_utc_buf, 8); } // reserve space for the duration mkv->duration = 0; mkv->duration_offset = avio_tell(pb); if (!mkv->is_live) { int64_t metadata_duration = get_metadata_duration(s); if (s->duration > 0) { int64_t scaledDuration = av_rescale(s->duration, 1000, AV_TIME_BASE); put_ebml_float(pb, MATROSKA_ID_DURATION, scaledDuration); av_log(s, AV_LOG_DEBUG, \"Write early duration from recording time = %\" PRIu64 \"\\n\", scaledDuration); } else if (metadata_duration > 0) { int64_t scaledDuration = av_rescale(metadata_duration, 1000, AV_TIME_BASE); put_ebml_float(pb, MATROSKA_ID_DURATION, scaledDuration); av_log(s, AV_LOG_DEBUG, \"Write early duration from metadata = %\" PRIu64 \"\\n\", scaledDuration); } else { put_ebml_void(pb, 11); // assumes double-precision float to be written } } if ((s->pb->seekable & AVIO_SEEKABLE_NORMAL) && !mkv->is_live) end_ebml_master_crc32_preliminary(s->pb, &mkv->info_bc, mkv, mkv->info); else end_ebml_master_crc32(s->pb, &mkv->info_bc, mkv, mkv->info); pb = s->pb; // initialize stream_duration fields mkv->stream_durations = av_mallocz(s->nb_streams * sizeof(int64_t)); mkv->stream_duration_offsets = av_mallocz(s->nb_streams * sizeof(int64_t)); ret = mkv_write_tracks(s); if (ret < 0) goto fail; for (i = 0; i < s->nb_chapters; i++) mkv->chapter_id_offset = FFMAX(mkv->chapter_id_offset, 1LL - s->chapters[i]->id); ret = mkv_write_chapters(s); if (ret < 0) goto fail; if (mkv->mode != MODE_WEBM) { ret = mkv_write_attachments(s); if (ret < 0) goto fail; } ret = mkv_write_tags(s); if (ret < 0) goto fail; if (!(s->pb->seekable & AVIO_SEEKABLE_NORMAL) && !mkv->is_live) mkv_write_seekhead(pb, mkv); mkv->cues = mkv_start_cues(mkv->segment_offset); if (!mkv->cues) { ret = AVERROR(ENOMEM); goto fail; } if ((pb->seekable & AVIO_SEEKABLE_NORMAL) && mkv->reserve_cues_space) { mkv->cues_pos = avio_tell(pb); put_ebml_void(pb, mkv->reserve_cues_space); } av_init_packet(&mkv->cur_audio_pkt); mkv->cur_audio_pkt.size = 0; mkv->cluster_pos = -1; avio_flush(pb); // start a new cluster every 5 MB or 5 sec, or 32k / 1 sec for streaming or // after 4k and on a keyframe if (pb->seekable & AVIO_SEEKABLE_NORMAL) { if (mkv->cluster_time_limit < 0) mkv->cluster_time_limit = 5000; if (mkv->cluster_size_limit < 0) mkv->cluster_size_limit = 5 * 1024 * 1024; } else { if (mkv->cluster_time_limit < 0) mkv->cluster_time_limit = 1000; if (mkv->cluster_size_limit < 0) mkv->cluster_size_limit = 32 * 1024; } return 0; fail: mkv_free(mkv); return ret; }"} {"target": 1, "idx": 3130, "func": "e1000_can_receive(void *opaque) { E1000State *s = opaque; return (!(s->mac_reg[RCTL] & E1000_RCTL_EN) || s->mac_reg[RDH] != s->mac_reg[RDT]); }"} {"target": 0, "idx": 3153, "func": "av_cold void ff_sws_init_swScale_mmx(SwsContext *c) { int cpu_flags = av_get_cpu_flags(); #if HAVE_INLINE_ASM if (cpu_flags & AV_CPU_FLAG_MMX) sws_init_swScale_MMX(c); #if HAVE_MMXEXT_INLINE if (cpu_flags & AV_CPU_FLAG_MMXEXT) sws_init_swScale_MMX2(c); #endif #endif /* HAVE_INLINE_ASM */ #if HAVE_YASM #define ASSIGN_SCALE_FUNC2(hscalefn, filtersize, opt1, opt2) do { \\ if (c->srcBpc == 8) { \\ hscalefn = c->dstBpc <= 10 ? ff_hscale8to15_ ## filtersize ## _ ## opt2 : \\ ff_hscale8to19_ ## filtersize ## _ ## opt1; \\ } else if (c->srcBpc == 9) { \\ hscalefn = c->dstBpc <= 10 ? ff_hscale9to15_ ## filtersize ## _ ## opt2 : \\ ff_hscale9to19_ ## filtersize ## _ ## opt1; \\ } else if (c->srcBpc == 10) { \\ hscalefn = c->dstBpc <= 10 ? ff_hscale10to15_ ## filtersize ## _ ## opt2 : \\ ff_hscale10to19_ ## filtersize ## _ ## opt1; \\ } else /* c->srcBpc == 16 */ { \\ hscalefn = c->dstBpc <= 10 ? ff_hscale16to15_ ## filtersize ## _ ## opt2 : \\ ff_hscale16to19_ ## filtersize ## _ ## opt1; \\ } \\ } while (0) #define ASSIGN_MMX_SCALE_FUNC(hscalefn, filtersize, opt1, opt2) \\ switch (filtersize) { \\ case 4: ASSIGN_SCALE_FUNC2(hscalefn, 4, opt1, opt2); break; \\ case 8: ASSIGN_SCALE_FUNC2(hscalefn, 8, opt1, opt2); break; \\ default: ASSIGN_SCALE_FUNC2(hscalefn, X, opt1, opt2); break; \\ } #define ASSIGN_VSCALEX_FUNC(vscalefn, opt, do_16_case, condition_8bit) \\ switch(c->dstBpc){ \\ case 16: do_16_case; break; \\ case 10: if (!isBE(c->dstFormat)) vscalefn = ff_yuv2planeX_10_ ## opt; break; \\ case 9: if (!isBE(c->dstFormat)) vscalefn = ff_yuv2planeX_9_ ## opt; break; \\ default: if (condition_8bit) vscalefn = ff_yuv2planeX_8_ ## opt; break; \\ } #define ASSIGN_VSCALE_FUNC(vscalefn, opt1, opt2, opt2chk) \\ switch(c->dstBpc){ \\ case 16: if (!isBE(c->dstFormat)) vscalefn = ff_yuv2plane1_16_ ## opt1; break; \\ case 10: if (!isBE(c->dstFormat) && opt2chk) vscalefn = ff_yuv2plane1_10_ ## opt2; break; \\ case 9: if (!isBE(c->dstFormat) && opt2chk) vscalefn = ff_yuv2plane1_9_ ## opt2; break; \\ default: vscalefn = ff_yuv2plane1_8_ ## opt1; break; \\ } #define case_rgb(x, X, opt) \\ case PIX_FMT_ ## X: \\ c->lumToYV12 = ff_ ## x ## ToY_ ## opt; \\ if (!c->chrSrcHSubSample) \\ c->chrToYV12 = ff_ ## x ## ToUV_ ## opt; \\ break #if ARCH_X86_32 if (cpu_flags & AV_CPU_FLAG_MMX) { ASSIGN_MMX_SCALE_FUNC(c->hyScale, c->hLumFilterSize, mmx, mmx); ASSIGN_MMX_SCALE_FUNC(c->hcScale, c->hChrFilterSize, mmx, mmx); ASSIGN_VSCALE_FUNC(c->yuv2plane1, mmx, mmx2, cpu_flags & AV_CPU_FLAG_MMXEXT); switch (c->srcFormat) { case PIX_FMT_Y400A: c->lumToYV12 = ff_yuyvToY_mmx; if (c->alpPixBuf) c->alpToYV12 = ff_uyvyToY_mmx; break; case PIX_FMT_YUYV422: c->lumToYV12 = ff_yuyvToY_mmx; c->chrToYV12 = ff_yuyvToUV_mmx; break; case PIX_FMT_UYVY422: c->lumToYV12 = ff_uyvyToY_mmx; c->chrToYV12 = ff_uyvyToUV_mmx; break; case PIX_FMT_NV12: c->chrToYV12 = ff_nv12ToUV_mmx; break; case PIX_FMT_NV21: c->chrToYV12 = ff_nv21ToUV_mmx; break; case_rgb(rgb24, RGB24, mmx); case_rgb(bgr24, BGR24, mmx); case_rgb(bgra, BGRA, mmx); case_rgb(rgba, RGBA, mmx); case_rgb(abgr, ABGR, mmx); case_rgb(argb, ARGB, mmx); default: break; } } if (cpu_flags & AV_CPU_FLAG_MMXEXT) { ASSIGN_VSCALEX_FUNC(c->yuv2planeX, mmx2, , 1); } #endif /* ARCH_X86_32 */ #define ASSIGN_SSE_SCALE_FUNC(hscalefn, filtersize, opt1, opt2) \\ switch (filtersize) { \\ case 4: ASSIGN_SCALE_FUNC2(hscalefn, 4, opt1, opt2); break; \\ case 8: ASSIGN_SCALE_FUNC2(hscalefn, 8, opt1, opt2); break; \\ default: if (filtersize & 4) ASSIGN_SCALE_FUNC2(hscalefn, X4, opt1, opt2); \\ else ASSIGN_SCALE_FUNC2(hscalefn, X8, opt1, opt2); \\ break; \\ } if (cpu_flags & AV_CPU_FLAG_SSE2) { ASSIGN_SSE_SCALE_FUNC(c->hyScale, c->hLumFilterSize, sse2, sse2); ASSIGN_SSE_SCALE_FUNC(c->hcScale, c->hChrFilterSize, sse2, sse2); ASSIGN_VSCALEX_FUNC(c->yuv2planeX, sse2, , HAVE_ALIGNED_STACK || ARCH_X86_64); ASSIGN_VSCALE_FUNC(c->yuv2plane1, sse2, sse2, 1); switch (c->srcFormat) { case PIX_FMT_Y400A: c->lumToYV12 = ff_yuyvToY_sse2; if (c->alpPixBuf) c->alpToYV12 = ff_uyvyToY_sse2; break; case PIX_FMT_YUYV422: c->lumToYV12 = ff_yuyvToY_sse2; c->chrToYV12 = ff_yuyvToUV_sse2; break; case PIX_FMT_UYVY422: c->lumToYV12 = ff_uyvyToY_sse2; c->chrToYV12 = ff_uyvyToUV_sse2; break; case PIX_FMT_NV12: c->chrToYV12 = ff_nv12ToUV_sse2; break; case PIX_FMT_NV21: c->chrToYV12 = ff_nv21ToUV_sse2; break; case_rgb(rgb24, RGB24, sse2); case_rgb(bgr24, BGR24, sse2); case_rgb(bgra, BGRA, sse2); case_rgb(rgba, RGBA, sse2); case_rgb(abgr, ABGR, sse2); case_rgb(argb, ARGB, sse2); default: break; } } if (cpu_flags & AV_CPU_FLAG_SSSE3) { ASSIGN_SSE_SCALE_FUNC(c->hyScale, c->hLumFilterSize, ssse3, ssse3); ASSIGN_SSE_SCALE_FUNC(c->hcScale, c->hChrFilterSize, ssse3, ssse3); switch (c->srcFormat) { case_rgb(rgb24, RGB24, ssse3); case_rgb(bgr24, BGR24, ssse3); default: break; } } if (cpu_flags & AV_CPU_FLAG_SSE4) { /* Xto15 don't need special sse4 functions */ ASSIGN_SSE_SCALE_FUNC(c->hyScale, c->hLumFilterSize, sse4, ssse3); ASSIGN_SSE_SCALE_FUNC(c->hcScale, c->hChrFilterSize, sse4, ssse3); ASSIGN_VSCALEX_FUNC(c->yuv2planeX, sse4, if (!isBE(c->dstFormat)) c->yuv2planeX = ff_yuv2planeX_16_sse4, HAVE_ALIGNED_STACK || ARCH_X86_64); if (c->dstBpc == 16 && !isBE(c->dstFormat)) c->yuv2plane1 = ff_yuv2plane1_16_sse4; } if (cpu_flags & AV_CPU_FLAG_AVX) { ASSIGN_VSCALEX_FUNC(c->yuv2planeX, avx, , HAVE_ALIGNED_STACK || ARCH_X86_64); ASSIGN_VSCALE_FUNC(c->yuv2plane1, avx, avx, 1); switch (c->srcFormat) { case PIX_FMT_YUYV422: c->chrToYV12 = ff_yuyvToUV_avx; break; case PIX_FMT_UYVY422: c->chrToYV12 = ff_uyvyToUV_avx; break; case PIX_FMT_NV12: c->chrToYV12 = ff_nv12ToUV_avx; break; case PIX_FMT_NV21: c->chrToYV12 = ff_nv21ToUV_avx; break; case_rgb(rgb24, RGB24, avx); case_rgb(bgr24, BGR24, avx); case_rgb(bgra, BGRA, avx); case_rgb(rgba, RGBA, avx); case_rgb(abgr, ABGR, avx); case_rgb(argb, ARGB, avx); default: break; } } #endif }"} {"target": 1, "idx": 3158, "func": "static unsigned tget(const uint8_t **p, int type, int le) { switch (type) { case TIFF_BYTE: return *(*p)++; case TIFF_SHORT: return tget_short(p, le); case TIFF_LONG: return tget_long(p, le); default: return UINT_MAX; } }"} {"target": 1, "idx": 3160, "func": "static int decode_hq_slice(AVCodecContext *avctx, void *arg) { int i, quant, level, orientation, quant_idx; uint8_t quants[MAX_DWT_LEVELS][4]; DiracContext *s = avctx->priv_data; DiracSlice *slice = arg; GetBitContext *gb = &slice->gb; skip_bits_long(gb, 8*s->highquality.prefix_bytes); quant_idx = get_bits(gb, 8); /* Slice quantization (slice_quantizers() in the specs) */ for (level = 0; level < s->wavelet_depth; level++) { for (orientation = !!level; orientation < 4; orientation++) { quant = FFMAX(quant_idx - s->lowdelay.quant[level][orientation], 0); quants[level][orientation] = quant; } } /* Luma + 2 Chroma planes */ for (i = 0; i < 3; i++) { int64_t length = s->highquality.size_scaler * get_bits(gb, 8); int64_t bits_left = 8 * length; int64_t bits_end = get_bits_count(gb) + bits_left; if (bits_end >= INT_MAX) { av_log(s->avctx, AV_LOG_ERROR, \"end too far away\\n\"); return AVERROR_INVALIDDATA; } for (level = 0; level < s->wavelet_depth; level++) { for (orientation = !!level; orientation < 4; orientation++) { decode_subband(s, gb, quants[level][orientation], slice->slice_x, slice->slice_y, bits_end, &s->plane[i].band[level][orientation], NULL); } } skip_bits_long(gb, bits_end - get_bits_count(gb)); } return 0; }"} {"target": 1, "idx": 3161, "func": "static av_cold int amr_wb_encode_init(AVCodecContext *avctx) { AMRWBContext *s = avctx->priv_data; if (avctx->sample_rate != 16000) { av_log(avctx, AV_LOG_ERROR, \"Only 16000Hz sample rate supported\\n\"); return AVERROR(ENOSYS); } if (avctx->channels != 1) { av_log(avctx, AV_LOG_ERROR, \"Only mono supported\\n\"); return AVERROR(ENOSYS); } s->mode = get_wb_bitrate_mode(avctx->bit_rate, avctx); s->last_bitrate = avctx->bit_rate; avctx->frame_size = 320; avctx->coded_frame = avcodec_alloc_frame(); s->state = E_IF_init(); return 0; }"} {"target": 1, "idx": 3162, "func": "uint32_t div32(uint32_t *q_ptr, uint64_t num, uint32_t den) { *q_ptr = num / den; return num % den; }"} {"target": 1, "idx": 3176, "func": "void ff_fetch_timestamp(AVCodecParserContext *s, int off, int remove) { int i; s->dts = s->pts = AV_NOPTS_VALUE; s->pos = -1; s->offset = 0; for (i = 0; i < AV_PARSER_PTS_NB; i++) { if (s->cur_offset + off >= s->cur_frame_offset[i] && (s->frame_offset < s->cur_frame_offset[i] || (!s->frame_offset && !s->next_frame_offset)) && // first field/frame // check disabled since MPEG-TS does not send complete PES packets /*s->next_frame_offset + off <*/ s->cur_frame_end[i]){ s->dts = s->cur_frame_dts[i]; s->pts = s->cur_frame_pts[i]; s->pos = s->cur_frame_pos[i]; s->offset = s->next_frame_offset - s->cur_frame_offset[i]; if (remove) s->cur_frame_offset[i] = INT64_MAX; if (s->cur_offset + off < s->cur_frame_end[i]) break; } } }"} {"target": 1, "idx": 3179, "func": "static void simple_whitespace(void) { int i; struct { const char *encoded; LiteralQObject decoded; } test_cases[] = { { .encoded = \" [ 43 , 42 ]\", .decoded = QLIT_QLIST(((LiteralQObject[]){ QLIT_QINT(43), QLIT_QINT(42), { } })), }, { .encoded = \" [ 43 , { 'h' : 'b' }, [ ], 42 ]\", .decoded = QLIT_QLIST(((LiteralQObject[]){ QLIT_QINT(43), QLIT_QDICT(((LiteralQDictEntry[]){ { \"h\", QLIT_QSTR(\"b\") }, { }})), QLIT_QLIST(((LiteralQObject[]){ { }})), QLIT_QINT(42), { } })), }, { .encoded = \" [ 43 , { 'h' : 'b' , 'a' : 32 }, [ ], 42 ]\", .decoded = QLIT_QLIST(((LiteralQObject[]){ QLIT_QINT(43), QLIT_QDICT(((LiteralQDictEntry[]){ { \"h\", QLIT_QSTR(\"b\") }, { \"a\", QLIT_QINT(32) }, { }})), QLIT_QLIST(((LiteralQObject[]){ { }})), QLIT_QINT(42), { } })), }, { } }; for (i = 0; test_cases[i].encoded; i++) { QObject *obj; QString *str; obj = qobject_from_json(test_cases[i].encoded, NULL); g_assert(compare_litqobj_to_qobj(&test_cases[i].decoded, obj) == 1); str = qobject_to_json(obj); qobject_decref(obj); obj = qobject_from_json(qstring_get_str(str), NULL); g_assert(compare_litqobj_to_qobj(&test_cases[i].decoded, obj) == 1); qobject_decref(obj); QDECREF(str); } }"} {"target": 1, "idx": 3180, "func": "static void gen_check_sr(DisasContext *dc, uint32_t sr, unsigned access) { if (!xtensa_option_bits_enabled(dc->config, sregnames[sr].opt_bits)) { if (sregnames[sr].name) { qemu_log(\"SR %s is not configured\\n\", sregnames[sr].name); } else { qemu_log(\"SR %d is not implemented\\n\", sr); } gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE); } else if (!(sregnames[sr].access & access)) { static const char * const access_text[] = { [SR_R] = \"rsr\", [SR_W] = \"wsr\", [SR_X] = \"xsr\", }; assert(access < ARRAY_SIZE(access_text) && access_text[access]); qemu_log(\"SR %s is not available for %s\\n\", sregnames[sr].name, access_text[access]); gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE); } }"} {"target": 1, "idx": 3182, "func": "static void gen_lswi(DisasContext *ctx) { TCGv t0; TCGv_i32 t1, t2; int nb = NB(ctx->opcode); int start = rD(ctx->opcode); int ra = rA(ctx->opcode); int nr; if (nb == 0) nb = 32; nr = (nb + 3) / 4; if (unlikely(lsw_reg_in_range(start, nr, ra))) { gen_inval_exception(ctx, POWERPC_EXCP_INVAL_LSWX); return; } gen_set_access_type(ctx, ACCESS_INT); /* NIP cannot be restored if the memory exception comes from an helper */ gen_update_nip(ctx, ctx->nip - 4); t0 = tcg_temp_new(); gen_addr_register(ctx, t0); t1 = tcg_const_i32(nb); t2 = tcg_const_i32(start); gen_helper_lsw(cpu_env, t0, t1, t2); tcg_temp_free(t0); tcg_temp_free_i32(t1); tcg_temp_free_i32(t2); }"} {"target": 0, "idx": 3183, "func": "static int decode_frame(AVCodecContext *avctx, const uint8_t *databuf, float **out_samples) { ATRAC3Context *q = avctx->priv_data; int ret, i; uint8_t *ptr1; if (q->coding_mode == JOINT_STEREO) { /* channel coupling mode */ /* decode Sound Unit 1 */ init_get_bits(&q->gb, databuf, avctx->block_align * 8); ret = decode_channel_sound_unit(q, &q->gb, q->units, out_samples[0], 0, JOINT_STEREO); if (ret != 0) return ret; /* Framedata of the su2 in the joint-stereo mode is encoded in * reverse byte order so we need to swap it first. */ if (databuf == q->decoded_bytes_buffer) { uint8_t *ptr2 = q->decoded_bytes_buffer + avctx->block_align - 1; ptr1 = q->decoded_bytes_buffer; for (i = 0; i < avctx->block_align / 2; i++, ptr1++, ptr2--) FFSWAP(uint8_t, *ptr1, *ptr2); } else { const uint8_t *ptr2 = databuf + avctx->block_align - 1; for (i = 0; i < avctx->block_align; i++) q->decoded_bytes_buffer[i] = *ptr2--; } /* Skip the sync codes (0xF8). */ ptr1 = q->decoded_bytes_buffer; for (i = 4; *ptr1 == 0xF8; i++, ptr1++) { if (i >= avctx->block_align) return AVERROR_INVALIDDATA; } /* set the bitstream reader at the start of the second Sound Unit*/ init_get_bits8(&q->gb, ptr1, q->decoded_bytes_buffer + avctx->block_align - ptr1); /* Fill the Weighting coeffs delay buffer */ memmove(q->weighting_delay, &q->weighting_delay[2], 4 * sizeof(*q->weighting_delay)); q->weighting_delay[4] = get_bits1(&q->gb); q->weighting_delay[5] = get_bits(&q->gb, 3); for (i = 0; i < 4; i++) { q->matrix_coeff_index_prev[i] = q->matrix_coeff_index_now[i]; q->matrix_coeff_index_now[i] = q->matrix_coeff_index_next[i]; q->matrix_coeff_index_next[i] = get_bits(&q->gb, 2); } /* Decode Sound Unit 2. */ ret = decode_channel_sound_unit(q, &q->gb, &q->units[1], out_samples[1], 1, JOINT_STEREO); if (ret != 0) return ret; /* Reconstruct the channel coefficients. */ reverse_matrixing(out_samples[0], out_samples[1], q->matrix_coeff_index_prev, q->matrix_coeff_index_now); channel_weighting(out_samples[0], out_samples[1], q->weighting_delay); } else { /* single channels */ /* Decode the channel sound units. */ for (i = 0; i < avctx->channels; i++) { /* Set the bitstream reader at the start of a channel sound unit. */ init_get_bits(&q->gb, databuf + i * avctx->block_align / avctx->channels, avctx->block_align * 8 / avctx->channels); ret = decode_channel_sound_unit(q, &q->gb, &q->units[i], out_samples[i], i, q->coding_mode); if (ret != 0) return ret; } } /* Apply the iQMF synthesis filter. */ for (i = 0; i < avctx->channels; i++) { float *p1 = out_samples[i]; float *p2 = p1 + 256; float *p3 = p2 + 256; float *p4 = p3 + 256; ff_atrac_iqmf(p1, p2, 256, p1, q->units[i].delay_buf1, q->temp_buf); ff_atrac_iqmf(p4, p3, 256, p3, q->units[i].delay_buf2, q->temp_buf); ff_atrac_iqmf(p1, p3, 512, p1, q->units[i].delay_buf3, q->temp_buf); } return 0; }"} {"target": 1, "idx": 3184, "func": "uint32_t HELPER(shr_cc)(CPUM68KState *env, uint32_t val, uint32_t shift) { uint64_t temp; uint32_t result; shift &= 63; temp = (uint64_t)val << 32 >> shift; result = temp >> 32; env->cc_c = (temp >> 31) & 1; env->cc_n = result; env->cc_z = result; env->cc_v = 0; env->cc_x = shift ? env->cc_c : env->cc_x; return result; }"} {"target": 1, "idx": 3205, "func": "static void term_down_char(void) { if (term_hist_entry == TERM_MAX_CMDS - 1 || term_hist_entry == -1) return; if (term_history[++term_hist_entry] != NULL) { strcpy(term_cmd_buf, term_history[term_hist_entry]); } else { term_hist_entry = -1; } term_printf(\"\\n\"); term_print_cmdline(term_cmd_buf); term_cmd_buf_index = term_cmd_buf_size = strlen(term_cmd_buf); }"} {"target": 0, "idx": 3211, "func": "static int decode_cabac_mb_cbp_luma( H264Context *h) { int cbp_b, cbp_a, ctx, cbp = 0; cbp_a = h->slice_table[h->left_mb_xy[0]] == h->slice_num ? h->left_cbp : -1; cbp_b = h->slice_table[h->top_mb_xy] == h->slice_num ? h->top_cbp : -1; ctx = !(cbp_a & 0x02) + 2 * !(cbp_b & 0x04); cbp |= get_cabac_noinline(&h->cabac, &h->cabac_state[73 + ctx]); ctx = !(cbp & 0x01) + 2 * !(cbp_b & 0x08); cbp |= get_cabac_noinline(&h->cabac, &h->cabac_state[73 + ctx]) << 1; ctx = !(cbp_a & 0x08) + 2 * !(cbp & 0x01); cbp |= get_cabac_noinline(&h->cabac, &h->cabac_state[73 + ctx]) << 2; ctx = !(cbp & 0x04) + 2 * !(cbp & 0x02); cbp |= get_cabac_noinline(&h->cabac, &h->cabac_state[73 + ctx]) << 3; return cbp; }"} {"target": 1, "idx": 3227, "func": "void ff_qsv_decode_reset(AVCodecContext *avctx, QSVContext *q) { QSVFrame *cur; AVPacket pkt; int ret = 0; mfxVideoParam param = { { 0 } }; if (q->reinit_pending) { close_decoder(q); } else if (q->engine_ready) { ret = MFXVideoDECODE_GetVideoParam(q->session, ¶m); if (ret < 0) { av_log(avctx, AV_LOG_ERROR, \"MFX decode get param error %d\\n\", ret); } ret = MFXVideoDECODE_Reset(q->session, ¶m); if (ret < 0) { av_log(avctx, AV_LOG_ERROR, \"MFX decode reset error %d\\n\", ret); } /* Free all frames*/ cur = q->work_frames; while (cur) { q->work_frames = cur->next; av_frame_free(&cur->frame); av_freep(&cur); cur = q->work_frames; } } /* Reset output surfaces */ av_fifo_reset(q->async_fifo); /* Reset input packets fifo */ while (av_fifo_size(q->pkt_fifo)) { av_fifo_generic_read(q->pkt_fifo, &pkt, sizeof(pkt), NULL); av_packet_unref(&pkt); } /* Reset input bitstream fifo */ av_fifo_reset(q->input_fifo); }"} {"target": 1, "idx": 3228, "func": "target_ulong helper_sdiv(target_ulong a, target_ulong b) { int64_t x0; int32_t x1; x0 = (a & 0xffffffff) | ((int64_t) (env->y) << 32); x1 = (b & 0xffffffff); if (x1 == 0) { raise_exception(TT_DIV_ZERO); } x0 = x0 / x1; if ((int32_t) x0 != x0) { env->cc_src2 = 1; return x0 < 0? 0x80000000: 0x7fffffff; } else { env->cc_src2 = 0; return x0; } }"} {"target": 1, "idx": 3236, "func": "static void xhci_reset(DeviceState *dev) { XHCIState *xhci = XHCI(dev); int i; trace_usb_xhci_reset(); if (!(xhci->usbsts & USBSTS_HCH)) { DPRINTF(\"xhci: reset while running!\\n\"); } xhci->usbcmd = 0; xhci->usbsts = USBSTS_HCH; xhci->dnctrl = 0; xhci->crcr_low = 0; xhci->crcr_high = 0; xhci->dcbaap_low = 0; xhci->dcbaap_high = 0; xhci->config = 0; for (i = 0; i < xhci->numslots; i++) { xhci_disable_slot(xhci, i+1); } for (i = 0; i < xhci->numports; i++) { xhci_port_update(xhci->ports + i, 0); } for (i = 0; i < xhci->numintrs; i++) { xhci->intr[i].iman = 0; xhci->intr[i].imod = 0; xhci->intr[i].erstsz = 0; xhci->intr[i].erstba_low = 0; xhci->intr[i].erstba_high = 0; xhci->intr[i].erdp_low = 0; xhci->intr[i].erdp_high = 0; xhci->intr[i].msix_used = 0; xhci->intr[i].er_ep_idx = 0; xhci->intr[i].er_pcs = 1; xhci->intr[i].er_full = 0; xhci->intr[i].ev_buffer_put = 0; xhci->intr[i].ev_buffer_get = 0; } xhci->mfindex_start = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); xhci_mfwrap_update(xhci); }"} {"target": 1, "idx": 3237, "func": "int ppc_find_by_pvr (uint32_t pvr, ppc_def_t **def) { int i, ret; ret = -1; *def = NULL; for (i = 0; ppc_defs[i].name != NULL; i++) { if ((pvr & ppc_defs[i].pvr_mask) == (ppc_defs[i].pvr & ppc_defs[i].pvr_mask)) { *def = &ppc_defs[i]; ret = 0; break; } } return ret; }"} {"target": 0, "idx": 3256, "func": "void helper_tlb_update(uint32_t T0) { #if !defined(CONFIG_USER_ONLY) uint32_t vaddr; uint32_t srs = env->pregs[PR_SRS]; if (srs != 1 && srs != 2) return; vaddr = cris_mmu_tlb_latest_update(env, T0); D(printf(\"flush old_vaddr=%x vaddr=%x T0=%x\\n\", vaddr, env->sregs[SFR_R_MM_CAUSE] & TARGET_PAGE_MASK, T0)); tlb_flush_page(env, vaddr); #endif }"} {"target": 0, "idx": 3266, "func": "static void patch_pcihp(int slot, uint8_t *ssdt_ptr, uint32_t eject) { ssdt_ptr[ACPI_PCIHP_OFFSET_HEX] = acpi_get_hex(slot >> 4); ssdt_ptr[ACPI_PCIHP_OFFSET_HEX + 1] = acpi_get_hex(slot); ssdt_ptr[ACPI_PCIHP_OFFSET_ID] = slot; ssdt_ptr[ACPI_PCIHP_OFFSET_ADR + 2] = slot; /* Runtime patching of ACPI_EJ0: to disable hotplug for a slot, * replace the method name: _EJ0 by ACPI_EJ0_. */ /* Sanity check */ assert(!memcmp(ssdt_ptr + ACPI_PCIHP_OFFSET_EJ0, \"_EJ0\", 4)); if (!eject) { memcpy(ssdt_ptr + ACPI_PCIHP_OFFSET_EJ0, \"EJ0_\", 4); } }"} {"target": 0, "idx": 3274, "func": "static float ssim_plane(uint8_t *main, int main_stride, uint8_t *ref, int ref_stride, int width, int height, void *temp) { int z = 0; int x, y; float ssim = 0.0; int (*sum0)[4] = temp; int (*sum1)[4] = sum0 + (width >> 2) + 3; width >>= 2; height >>= 2; for (y = 1; y < height; y++) { for (; z <= y; z++) { FFSWAP(void*, sum0, sum1); for (x = 0; x < width; x+=2) ssim_4x4x2_core(&main[4 * (x + z * main_stride)], main_stride, &ref[4 * (x + z * ref_stride)], ref_stride, &sum0[x]); } ssim += ssim_endn(sum0, sum1, width - 1); } return ssim / ((height - 1) * (width - 1)); }"} {"target": 1, "idx": 3278, "func": "static void nvme_instance_init(Object *obj) { object_property_add(obj, \"bootindex\", \"int32\", nvme_get_bootindex, nvme_set_bootindex, NULL, NULL, NULL); object_property_set_int(obj, -1, \"bootindex\", NULL); }"} {"target": 1, "idx": 3279, "func": "static int blkverify_open(BlockDriverState *bs, QDict *options, int flags, Error **errp) { BDRVBlkverifyState *s = bs->opaque; QemuOpts *opts; Error *local_err = NULL; int ret; opts = qemu_opts_create(&runtime_opts, NULL, 0, &error_abort); qemu_opts_absorb_qdict(opts, options, &local_err); if (local_err) { error_propagate(errp, local_err); ret = -EINVAL; goto fail; /* Open the raw file */ bs->file = bdrv_open_child(qemu_opt_get(opts, \"x-raw\"), options, \"raw\", bs, &child_file, false, &local_err); if (local_err) { ret = -EINVAL; error_propagate(errp, local_err); goto fail; /* Open the test file */ s->test_file = bdrv_open_child(qemu_opt_get(opts, \"x-image\"), options, \"test\", bs, &child_format, false, &local_err); if (local_err) { ret = -EINVAL; error_propagate(errp, local_err); goto fail; ret = 0; fail: qemu_opts_del(opts); return ret;"} {"target": 1, "idx": 3288, "func": "static void tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi, int s_bits, int tlb_offset) { TCGReg base = TCG_AREG0; /* Should generate something like the following: * pre-v7: * shr tmp, addr_reg, #TARGET_PAGE_BITS (1) * add r2, env, #off & 0xff00 * and r0, tmp, #(CPU_TLB_SIZE - 1) (2) * add r2, r2, r0, lsl #CPU_TLB_ENTRY_BITS (3) * ldr r0, [r2, #off & 0xff]! (4) * tst addr_reg, #s_mask * cmpeq r0, tmp, lsl #TARGET_PAGE_BITS (5) * * v7 (not implemented yet): * ubfx r2, addr_reg, #TARGET_PAGE_BITS, #CPU_TLB_BITS (1) * movw tmp, #~TARGET_PAGE_MASK & ~s_mask * movw r0, #off * add r2, env, r2, lsl #CPU_TLB_ENTRY_BITS (2) * bic tmp, addr_reg, tmp * ldr r0, [r2, r0]! (3) * cmp r0, tmp (4) */ # if CPU_TLB_BITS > 8 # error # endif tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0, addrlo, SHIFT_IMM_LSR(TARGET_PAGE_BITS)); /* We assume that the offset is contained within 16 bits. */ assert((tlb_offset & ~0xffff) == 0); if (tlb_offset > 0xff) { tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R2, base, (24 << 7) | (tlb_offset >> 8)); tlb_offset &= 0xff; base = TCG_REG_R2; } tcg_out_dat_imm(s, COND_AL, ARITH_AND, TCG_REG_R0, TCG_REG_TMP, CPU_TLB_SIZE - 1); tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_R2, base, TCG_REG_R0, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS)); /* Load the tlb comparator. Use ldrd if needed and available, but due to how the pointer needs setting up, ldm isn't useful. Base arm5 doesn't have ldrd, but armv5te does. */ if (use_armv6_instructions && TARGET_LONG_BITS == 64) { tcg_out_memop_8(s, COND_AL, INSN_LDRD_IMM, TCG_REG_R0, TCG_REG_R2, tlb_offset, 1, 1); } else { tcg_out_memop_12(s, COND_AL, INSN_LDR_IMM, TCG_REG_R0, TCG_REG_R2, tlb_offset, 1, 1); if (TARGET_LONG_BITS == 64) { tcg_out_memop_12(s, COND_AL, INSN_LDR_IMM, TCG_REG_R1, TCG_REG_R2, 4, 1, 0); } } /* Check alignment. */ if (s_bits) { tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo, (1 << s_bits) - 1); } tcg_out_dat_reg(s, (s_bits ? COND_EQ : COND_AL), ARITH_CMP, 0, TCG_REG_R0, TCG_REG_TMP, SHIFT_IMM_LSL(TARGET_PAGE_BITS)); if (TARGET_LONG_BITS == 64) { tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0, TCG_REG_R1, addrhi, SHIFT_IMM_LSL(0)); } }"} {"target": 0, "idx": 3290, "func": "static void qdm2_decode_super_block(QDM2Context *q) { GetBitContext gb; QDM2SubPacket header, *packet; int i, packet_bytes, sub_packet_size, sub_packets_D; unsigned int next_index = 0; memset(q->tone_level_idx_hi1, 0, sizeof(q->tone_level_idx_hi1)); memset(q->tone_level_idx_mid, 0, sizeof(q->tone_level_idx_mid)); memset(q->tone_level_idx_hi2, 0, sizeof(q->tone_level_idx_hi2)); q->sub_packets_B = 0; sub_packets_D = 0; average_quantized_coeffs(q); // average elements in quantized_coeffs[max_ch][10][8] init_get_bits(&gb, q->compressed_data, q->compressed_size * 8); qdm2_decode_sub_packet_header(&gb, &header); if (header.type < 2 || header.type >= 8) { q->has_errors = 1; av_log(NULL, AV_LOG_ERROR, \"bad superblock type\\n\"); return; } q->superblocktype_2_3 = (header.type == 2 || header.type == 3); packet_bytes = (q->compressed_size - get_bits_count(&gb) / 8); init_get_bits(&gb, header.data, header.size * 8); if (header.type == 2 || header.type == 4 || header.type == 5) { int csum = 257 * get_bits(&gb, 8); csum += 2 * get_bits(&gb, 8); csum = qdm2_packet_checksum(q->compressed_data, q->checksum_size, csum); if (csum != 0) { q->has_errors = 1; av_log(NULL, AV_LOG_ERROR, \"bad packet checksum\\n\"); return; } } q->sub_packet_list_B[0].packet = NULL; q->sub_packet_list_D[0].packet = NULL; for (i = 0; i < 6; i++) if (--q->fft_level_exp[i] < 0) q->fft_level_exp[i] = 0; for (i = 0; packet_bytes > 0; i++) { int j; if (i >= FF_ARRAY_ELEMS(q->sub_packet_list_A)) { SAMPLES_NEEDED_2(\"too many packet bytes\"); return; } q->sub_packet_list_A[i].next = NULL; if (i > 0) { q->sub_packet_list_A[i - 1].next = &q->sub_packet_list_A[i]; /* seek to next block */ init_get_bits(&gb, header.data, header.size * 8); skip_bits(&gb, next_index * 8); if (next_index >= header.size) break; } /* decode subpacket */ packet = &q->sub_packets[i]; qdm2_decode_sub_packet_header(&gb, packet); next_index = packet->size + get_bits_count(&gb) / 8; sub_packet_size = ((packet->size > 0xff) ? 1 : 0) + packet->size + 2; if (packet->type == 0) break; if (sub_packet_size > packet_bytes) { if (packet->type != 10 && packet->type != 11 && packet->type != 12) break; packet->size += packet_bytes - sub_packet_size; } packet_bytes -= sub_packet_size; /* add subpacket to 'all subpackets' list */ q->sub_packet_list_A[i].packet = packet; /* add subpacket to related list */ if (packet->type == 8) { SAMPLES_NEEDED_2(\"packet type 8\"); return; } else if (packet->type >= 9 && packet->type <= 12) { /* packets for MPEG Audio like Synthesis Filter */ QDM2_LIST_ADD(q->sub_packet_list_D, sub_packets_D, packet); } else if (packet->type == 13) { for (j = 0; j < 6; j++) q->fft_level_exp[j] = get_bits(&gb, 6); } else if (packet->type == 14) { for (j = 0; j < 6; j++) q->fft_level_exp[j] = qdm2_get_vlc(&gb, &fft_level_exp_vlc, 0, 2); } else if (packet->type == 15) { SAMPLES_NEEDED_2(\"packet type 15\") return; } else if (packet->type >= 16 && packet->type < 48 && !fft_subpackets[packet->type - 16]) { /* packets for FFT */ QDM2_LIST_ADD(q->sub_packet_list_B, q->sub_packets_B, packet); } } // Packet bytes loop if (q->sub_packet_list_D[0].packet != NULL) { process_synthesis_subpackets(q, q->sub_packet_list_D); q->do_synth_filter = 1; } else if (q->do_synth_filter) { process_subpacket_10(q, NULL); process_subpacket_11(q, NULL); process_subpacket_12(q, NULL); } }"} {"target": 1, "idx": 3293, "func": "static void hScale8To15_c(SwsContext *c, int16_t *dst, int dstW, const uint8_t *src, const int16_t *filter, const int16_t *filterPos, int filterSize) { int i; for (i=0; i>7, (1<<15)-1); // the cubic equation does overflow ... //dst[i] = val>>7; } }"} {"target": 1, "idx": 3320, "func": "MigrationState *migrate_get_current(void) { static MigrationState current_migration = { .state = MIG_STATE_NONE, .bandwidth_limit = MAX_THROTTLE, .xbzrle_cache_size = DEFAULT_MIGRATE_CACHE_SIZE, .mbps = -1, }; return ¤t_migration; }"} {"target": 1, "idx": 3322, "func": "qemu_co_sendv_recvv(int sockfd, struct iovec *iov, unsigned iov_cnt, size_t offset, size_t bytes, bool do_send) { size_t done = 0; ssize_t ret; while (done < bytes) { ret = iov_send_recv(sockfd, iov, offset + done, bytes - done, do_send); if (ret > 0) { done += ret; } else if (ret < 0) { if (errno == EAGAIN) { qemu_coroutine_yield(); } else if (done == 0) { return -1; } else { break; } } else if (ret == 0 && !do_send) { /* write (send) should never return 0. * read (recv) returns 0 for end-of-file (-data). * In both cases there's little point retrying, * but we do for write anyway, just in case */ break; } } return done; }"} {"target": 1, "idx": 3327, "func": "static int qcow2_change_backing_file(BlockDriverState *bs, const char *backing_file, const char *backing_fmt) { return qcow2_update_ext_header(bs, backing_file, backing_fmt); }"} {"target": 0, "idx": 3330, "func": "static void av_always_inline filter_mb_edgech( uint8_t *pix, int stride, const int16_t bS[4], unsigned int qp, H264Context *h ) { const int qp_bd_offset = 6 * (h->sps.bit_depth_luma - 8); const unsigned int index_a = qp - qp_bd_offset + h->slice_alpha_c0_offset; const int alpha = alpha_table[index_a]; const int beta = beta_table[qp - qp_bd_offset + h->slice_beta_offset]; if (alpha ==0 || beta == 0) return; if( bS[0] < 4 ) { int8_t tc[4]; tc[0] = tc0_table[index_a][bS[0]]+1; tc[1] = tc0_table[index_a][bS[1]]+1; tc[2] = tc0_table[index_a][bS[2]]+1; tc[3] = tc0_table[index_a][bS[3]]+1; h->h264dsp.h264_v_loop_filter_chroma(pix, stride, alpha, beta, tc); } else { h->h264dsp.h264_v_loop_filter_chroma_intra(pix, stride, alpha, beta); } }"} {"target": 0, "idx": 3333, "func": "static void sun4d_hw_init(const struct sun4d_hwdef *hwdef, ram_addr_t RAM_size, const char *boot_device, DisplayState *ds, const char *kernel_filename, const char *kernel_cmdline, const char *initrd_filename, const char *cpu_model) { CPUState *env, *envs[MAX_CPUS]; unsigned int i; void *iounits[MAX_IOUNITS], *espdma, *ledma, *main_esp, *nvram, *sbi; qemu_irq *cpu_irqs[MAX_CPUS], *sbi_irq, *sbi_cpu_irq, *espdma_irq, *ledma_irq; qemu_irq *esp_reset, *le_reset; ram_addr_t ram_offset, prom_offset, tcx_offset; unsigned long kernel_size; int ret; char buf[1024]; int drive_index; void *fw_cfg; /* init CPUs */ if (!cpu_model) cpu_model = hwdef->default_cpu_model; for (i = 0; i < smp_cpus; i++) { env = cpu_init(cpu_model); if (!env) { fprintf(stderr, \"qemu: Unable to find Sparc CPU definition\\n\"); exit(1); } cpu_sparc_set_id(env, i); envs[i] = env; if (i == 0) { qemu_register_reset(main_cpu_reset, env); } else { qemu_register_reset(secondary_cpu_reset, env); env->halted = 1; } cpu_irqs[i] = qemu_allocate_irqs(cpu_set_irq, envs[i], MAX_PILS); env->prom_addr = hwdef->slavio_base; } for (i = smp_cpus; i < MAX_CPUS; i++) cpu_irqs[i] = qemu_allocate_irqs(dummy_cpu_set_irq, NULL, MAX_PILS); /* allocate RAM */ if ((uint64_t)RAM_size > hwdef->max_mem) { fprintf(stderr, \"qemu: Too much memory for this machine: %d, maximum %d\\n\", (unsigned int)(RAM_size / (1024 * 1024)), (unsigned int)(hwdef->max_mem / (1024 * 1024))); exit(1); } ram_offset = qemu_ram_alloc(RAM_size); cpu_register_physical_memory(0, RAM_size, ram_offset); /* load boot prom */ prom_offset = qemu_ram_alloc(PROM_SIZE_MAX); cpu_register_physical_memory(hwdef->slavio_base, (PROM_SIZE_MAX + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK, prom_offset | IO_MEM_ROM); if (bios_name == NULL) bios_name = PROM_FILENAME; snprintf(buf, sizeof(buf), \"%s/%s\", bios_dir, bios_name); ret = load_elf(buf, hwdef->slavio_base - PROM_VADDR, NULL, NULL, NULL); if (ret < 0 || ret > PROM_SIZE_MAX) ret = load_image_targphys(buf, hwdef->slavio_base, PROM_SIZE_MAX); if (ret < 0 || ret > PROM_SIZE_MAX) { fprintf(stderr, \"qemu: could not load prom '%s'\\n\", buf); exit(1); } /* set up devices */ sbi = sbi_init(hwdef->sbi_base, &sbi_irq, &sbi_cpu_irq, cpu_irqs); for (i = 0; i < MAX_IOUNITS; i++) if (hwdef->iounit_bases[i] != (target_phys_addr_t)-1) iounits[i] = iommu_init(hwdef->iounit_bases[i], hwdef->iounit_version, sbi_irq[hwdef->me_irq]); espdma = sparc32_dma_init(hwdef->espdma_base, sbi_irq[hwdef->esp_irq], iounits[0], &espdma_irq, &esp_reset); ledma = sparc32_dma_init(hwdef->ledma_base, sbi_irq[hwdef->le_irq], iounits[0], &ledma_irq, &le_reset); if (graphic_depth != 8 && graphic_depth != 24) { fprintf(stderr, \"qemu: Unsupported depth: %d\\n\", graphic_depth); exit (1); } tcx_offset = qemu_ram_alloc(hwdef->vram_size); tcx_init(ds, hwdef->tcx_base, phys_ram_base + tcx_offset, tcx_offset, hwdef->vram_size, graphic_width, graphic_height, graphic_depth); if (nd_table[0].model == NULL) nd_table[0].model = \"lance\"; if (strcmp(nd_table[0].model, \"lance\") == 0) { lance_init(&nd_table[0], hwdef->le_base, ledma, *ledma_irq, le_reset); } else if (strcmp(nd_table[0].model, \"?\") == 0) { fprintf(stderr, \"qemu: Supported NICs: lance\\n\"); exit (1); } else { fprintf(stderr, \"qemu: Unsupported NIC: %s\\n\", nd_table[0].model); exit (1); } nvram = m48t59_init(sbi_irq[0], hwdef->nvram_base, 0, hwdef->nvram_size, 8); slavio_timer_init_all(hwdef->counter_base, sbi_irq[hwdef->clock1_irq], sbi_cpu_irq, smp_cpus); slavio_serial_ms_kbd_init(hwdef->ms_kb_base, sbi_irq[hwdef->ms_kb_irq], nographic, ESCC_CLOCK, 1); // Slavio TTYA (base+4, Linux ttyS0) is the first Qemu serial device // Slavio TTYB (base+0, Linux ttyS1) is the second Qemu serial device escc_init(hwdef->serial_base, sbi_irq[hwdef->ser_irq], serial_hds[1], serial_hds[0], ESCC_CLOCK, 1); if (drive_get_max_bus(IF_SCSI) > 0) { fprintf(stderr, \"qemu: too many SCSI bus\\n\"); exit(1); } main_esp = esp_init(hwdef->esp_base, 2, espdma_memory_read, espdma_memory_write, espdma, *espdma_irq, esp_reset); for (i = 0; i < ESP_MAX_DEVS; i++) { drive_index = drive_get_index(IF_SCSI, 0, i); if (drive_index == -1) continue; esp_scsi_attach(main_esp, drives_table[drive_index].bdrv, i); } kernel_size = sun4m_load_kernel(kernel_filename, initrd_filename, RAM_size); nvram_init(nvram, (uint8_t *)&nd_table[0].macaddr, kernel_cmdline, boot_device, RAM_size, kernel_size, graphic_width, graphic_height, graphic_depth, hwdef->nvram_machine_id, \"Sun4d\"); fw_cfg = fw_cfg_init(0, 0, CFG_ADDR, CFG_ADDR + 2); fw_cfg_add_i32(fw_cfg, FW_CFG_ID, 1); fw_cfg_add_i64(fw_cfg, FW_CFG_RAM_SIZE, (uint64_t)ram_size); fw_cfg_add_i16(fw_cfg, FW_CFG_MACHINE_ID, hwdef->machine_id); }"} {"target": 0, "idx": 3348, "func": "static int qemu_rdma_block_for_wrid(RDMAContext *rdma, int wrid_requested) { int num_cq_events = 0, ret = 0; struct ibv_cq *cq; void *cq_ctx; uint64_t wr_id = RDMA_WRID_NONE, wr_id_in; if (ibv_req_notify_cq(rdma->cq, 0)) { return -1; } /* poll cq first */ while (wr_id != wrid_requested) { ret = qemu_rdma_poll(rdma, &wr_id_in); if (ret < 0) { return ret; } wr_id = wr_id_in & RDMA_WRID_TYPE_MASK; if (wr_id == RDMA_WRID_NONE) { break; } if (wr_id != wrid_requested) { DDDPRINTF(\"A Wanted wrid %s (%d) but got %s (%\" PRIu64 \")\\n\", print_wrid(wrid_requested), wrid_requested, print_wrid(wr_id), wr_id); } } if (wr_id == wrid_requested) { return 0; } while (1) { /* * Coroutine doesn't start until process_incoming_migration() * so don't yield unless we know we're running inside of a coroutine. */ if (rdma->migration_started_on_destination) { yield_until_fd_readable(rdma->comp_channel->fd); } if (ibv_get_cq_event(rdma->comp_channel, &cq, &cq_ctx)) { perror(\"ibv_get_cq_event\"); goto err_block_for_wrid; } num_cq_events++; if (ibv_req_notify_cq(cq, 0)) { goto err_block_for_wrid; } while (wr_id != wrid_requested) { ret = qemu_rdma_poll(rdma, &wr_id_in); if (ret < 0) { goto err_block_for_wrid; } wr_id = wr_id_in & RDMA_WRID_TYPE_MASK; if (wr_id == RDMA_WRID_NONE) { break; } if (wr_id != wrid_requested) { DDDPRINTF(\"B Wanted wrid %s (%d) but got %s (%\" PRIu64 \")\\n\", print_wrid(wrid_requested), wrid_requested, print_wrid(wr_id), wr_id); } } if (wr_id == wrid_requested) { goto success_block_for_wrid; } } success_block_for_wrid: if (num_cq_events) { ibv_ack_cq_events(cq, num_cq_events); } return 0; err_block_for_wrid: if (num_cq_events) { ibv_ack_cq_events(cq, num_cq_events); } return ret; }"} {"target": 1, "idx": 3375, "func": "static inline void RENAME(rgb15to32)(const uint8_t *src, uint8_t *dst, int src_size) { const uint16_t *end; const uint16_t *mm_end; uint8_t *d = dst; const uint16_t *s = (const uint16_t *)src; end = s + src_size/2; __asm__ volatile(PREFETCH\" %0\"::\"m\"(*s):\"memory\"); __asm__ volatile(\"pxor %%mm7,%%mm7 \\n\\t\":::\"memory\"); __asm__ volatile(\"pcmpeqd %%mm6,%%mm6 \\n\\t\":::\"memory\"); mm_end = end - 3; while (s < mm_end) { __asm__ volatile( PREFETCH\" 32%1 \\n\\t\" \"movq %1, %%mm0 \\n\\t\" \"movq %1, %%mm1 \\n\\t\" \"movq %1, %%mm2 \\n\\t\" \"pand %2, %%mm0 \\n\\t\" \"pand %3, %%mm1 \\n\\t\" \"pand %4, %%mm2 \\n\\t\" \"psllq $3, %%mm0 \\n\\t\" \"psrlq $2, %%mm1 \\n\\t\" \"psrlq $7, %%mm2 \\n\\t\" PACK_RGB32 :\"=m\"(*d) :\"m\"(*s),\"m\"(mask15b),\"m\"(mask15g),\"m\"(mask15r) :\"memory\"); d += 16; s += 4; } __asm__ volatile(SFENCE:::\"memory\"); __asm__ volatile(EMMS:::\"memory\"); while (s < end) { register uint16_t bgr; bgr = *s++; *d++ = (bgr&0x1F)<<3; *d++ = (bgr&0x3E0)>>2; *d++ = (bgr&0x7C00)>>7; *d++ = 255; } }"} {"target": 1, "idx": 3392, "func": "static int mux_chr_can_read(void *opaque) { CharDriverState *chr = opaque; MuxDriver *d = chr->opaque; if ((d->prod - d->cons) < MUX_BUFFER_SIZE) return 1; if (d->chr_can_read[chr->focus]) return d->chr_can_read[chr->focus](d->ext_opaque[chr->focus]); return 0; }"} {"target": 1, "idx": 3394, "func": "static inline bool vtd_queued_inv_enable_check(IntelIOMMUState *s) { return s->iq_tail == 0; }"} {"target": 1, "idx": 3397, "func": "static av_always_inline void hcscale(SwsContext *c, int16_t *dst1, int16_t *dst2, int dstWidth, const uint8_t *src_in[4], int srcW, int xInc, const int16_t *hChrFilter, const int16_t *hChrFilterPos, int hChrFilterSize, uint8_t *formatConvBuffer, uint32_t *pal) { const uint8_t *src1 = src_in[1], *src2 = src_in[2]; if (c->chrToYV12) { uint8_t *buf2 = formatConvBuffer + FFALIGN(srcW * FFALIGN(c->srcBpc, 8) >> 3, 16); c->chrToYV12(formatConvBuffer, buf2, src1, src2, srcW, pal); src1= formatConvBuffer; src2= buf2; } else if (c->readChrPlanar) { uint8_t *buf2 = formatConvBuffer + FFALIGN(srcW * FFALIGN(c->srcBpc, 8) >> 3, 16); c->readChrPlanar(formatConvBuffer, buf2, src_in, srcW); src1= formatConvBuffer; src2= buf2; } if (!c->hcscale_fast) { c->hcScale(c, dst1, dstWidth, src1, hChrFilter, hChrFilterPos, hChrFilterSize); c->hcScale(c, dst2, dstWidth, src2, hChrFilter, hChrFilterPos, hChrFilterSize); } else { // fast bilinear upscale / crap downscale c->hcscale_fast(c, dst1, dst2, dstWidth, src1, src2, srcW, xInc); } if (c->chrConvertRange) c->chrConvertRange(dst1, dst2, dstWidth); }"} {"target": 1, "idx": 3398, "func": "static int coroutine_fn bdrv_co_do_copy_on_readv(BdrvChild *child, int64_t offset, unsigned int bytes, QEMUIOVector *qiov) { BlockDriverState *bs = child->bs; /* Perform I/O through a temporary buffer so that users who scribble over * their read buffer while the operation is in progress do not end up * modifying the image file. This is critical for zero-copy guest I/O * where anything might happen inside guest memory. */ void *bounce_buffer; BlockDriver *drv = bs->drv; struct iovec iov; QEMUIOVector local_qiov; int64_t cluster_offset; int64_t cluster_bytes; size_t skip_bytes; int ret; int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer, BDRV_REQUEST_MAX_BYTES); unsigned int progress = 0; /* FIXME We cannot require callers to have write permissions when all they * are doing is a read request. If we did things right, write permissions * would be obtained anyway, but internally by the copy-on-read code. As * long as it is implemented here rather than in a separate filter driver, * the copy-on-read code doesn't have its own BdrvChild, however, for which * it could request permissions. Therefore we have to bypass the permission * system for the moment. */ // assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE)); /* Cover entire cluster so no additional backing file I/O is required when * allocating cluster in the image file. Note that this value may exceed * BDRV_REQUEST_MAX_BYTES (even when the original read did not), which * is one reason we loop rather than doing it all at once. */ bdrv_round_to_clusters(bs, offset, bytes, &cluster_offset, &cluster_bytes); skip_bytes = offset - cluster_offset; trace_bdrv_co_do_copy_on_readv(bs, offset, bytes, cluster_offset, cluster_bytes); bounce_buffer = qemu_try_blockalign(bs, MIN(MIN(max_transfer, cluster_bytes), MAX_BOUNCE_BUFFER)); if (bounce_buffer == NULL) { ret = -ENOMEM; goto err; while (cluster_bytes) { int64_t pnum; ret = bdrv_is_allocated(bs, cluster_offset, MIN(cluster_bytes, max_transfer), &pnum); if (ret < 0) { /* Safe to treat errors in querying allocation as if * unallocated; we'll probably fail again soon on the * read, but at least that will set a decent errno. */ pnum = MIN(cluster_bytes, max_transfer); assert(skip_bytes < pnum); if (ret <= 0) { /* Must copy-on-read; use the bounce buffer */ iov.iov_base = bounce_buffer; iov.iov_len = pnum = MIN(pnum, MAX_BOUNCE_BUFFER); qemu_iovec_init_external(&local_qiov, &iov, 1); ret = bdrv_driver_preadv(bs, cluster_offset, pnum, &local_qiov, 0); if (ret < 0) { goto err; bdrv_debug_event(bs, BLKDBG_COR_WRITE); if (drv->bdrv_co_pwrite_zeroes && buffer_is_zero(bounce_buffer, pnum)) { /* FIXME: Should we (perhaps conditionally) be setting * BDRV_REQ_MAY_UNMAP, if it will allow for a sparser copy * that still correctly reads as zero? */ ret = bdrv_co_do_pwrite_zeroes(bs, cluster_offset, pnum, 0); } else { /* This does not change the data on the disk, it is not * necessary to flush even in cache=writethrough mode. */ ret = bdrv_driver_pwritev(bs, cluster_offset, pnum, &local_qiov, 0); if (ret < 0) { /* It might be okay to ignore write errors for guest * requests. If this is a deliberate copy-on-read * then we don't want to ignore the error. Simply * report it in all cases. */ goto err; qemu_iovec_from_buf(qiov, progress, bounce_buffer + skip_bytes, pnum - skip_bytes); } else { /* Read directly into the destination */ qemu_iovec_init(&local_qiov, qiov->niov); qemu_iovec_concat(&local_qiov, qiov, progress, pnum - skip_bytes); ret = bdrv_driver_preadv(bs, offset + progress, local_qiov.size, &local_qiov, 0); qemu_iovec_destroy(&local_qiov); if (ret < 0) { goto err; cluster_offset += pnum; cluster_bytes -= pnum; progress += pnum - skip_bytes; skip_bytes = 0; ret = 0; err: qemu_vfree(bounce_buffer); return ret;"} {"target": 1, "idx": 3399, "func": "static void virtio_scsi_device_unrealize(DeviceState *dev, Error **errp) { virtio_scsi_common_unrealize(dev, errp); }"} {"target": 1, "idx": 3401, "func": "static int protocol_client_init(VncState *vs, uint8_t *data, size_t len) { char buf[1024]; VncShareMode mode; int size; mode = data[0] ? VNC_SHARE_MODE_SHARED : VNC_SHARE_MODE_EXCLUSIVE; switch (vs->vd->share_policy) { case VNC_SHARE_POLICY_IGNORE: /* * Ignore the shared flag. Nothing to do here. * * Doesn't conform to the rfb spec but is traditional qemu * behavior, thus left here as option for compatibility * reasons. */ break; case VNC_SHARE_POLICY_ALLOW_EXCLUSIVE: /* * Policy: Allow clients ask for exclusive access. * * Implementation: When a client asks for exclusive access, * disconnect all others. Shared connects are allowed as long * as no exclusive connection exists. * * This is how the rfb spec suggests to handle the shared flag. */ if (mode == VNC_SHARE_MODE_EXCLUSIVE) { VncState *client; QTAILQ_FOREACH(client, &vs->vd->clients, next) { if (vs == client) { continue; } if (client->share_mode != VNC_SHARE_MODE_EXCLUSIVE && client->share_mode != VNC_SHARE_MODE_SHARED) { continue; } vnc_disconnect_start(client); } } if (mode == VNC_SHARE_MODE_SHARED) { if (vs->vd->num_exclusive > 0) { vnc_disconnect_start(vs); return 0; } } break; case VNC_SHARE_POLICY_FORCE_SHARED: /* * Policy: Shared connects only. * Implementation: Disallow clients asking for exclusive access. * * Useful for shared desktop sessions where you don't want * someone forgetting to say -shared when running the vnc * client disconnect everybody else. */ if (mode == VNC_SHARE_MODE_EXCLUSIVE) { vnc_disconnect_start(vs); return 0; } break; } vnc_set_share_mode(vs, mode); vs->client_width = surface_width(vs->vd->ds); vs->client_height = surface_height(vs->vd->ds); vnc_write_u16(vs, vs->client_width); vnc_write_u16(vs, vs->client_height); pixel_format_message(vs); if (qemu_name) size = snprintf(buf, sizeof(buf), \"QEMU (%s)\", qemu_name); else size = snprintf(buf, sizeof(buf), \"QEMU\"); vnc_write_u32(vs, size); vnc_write(vs, buf, size); vnc_flush(vs); vnc_client_cache_auth(vs); vnc_qmp_event(vs, QAPI_EVENT_VNC_INITIALIZED); vnc_read_when(vs, protocol_client_msg, 1); return 0; }"} {"target": 0, "idx": 3416, "func": "static void avc_luma_hv_qrt_and_aver_dst_4x4_msa(const uint8_t *src_x, const uint8_t *src_y, int32_t src_stride, uint8_t *dst, int32_t dst_stride) { v16i8 src_hz0, src_hz1, src_hz2, src_hz3; v16u8 dst0, dst1, dst2, dst3; v16i8 src_vt0, src_vt1, src_vt2, src_vt3, src_vt4; v16i8 src_vt5, src_vt6, src_vt7, src_vt8; v16i8 mask0, mask1, mask2; v8i16 hz_out0, hz_out1, vert_out0, vert_out1; v8i16 res0, res1; v16u8 res; LD_SB3(&luma_mask_arr[48], 16, mask0, mask1, mask2); LD_SB5(src_y, src_stride, src_vt0, src_vt1, src_vt2, src_vt3, src_vt4); src_y += (5 * src_stride); src_vt0 = (v16i8) __msa_insve_w((v4i32) src_vt0, 1, (v4i32) src_vt1); src_vt1 = (v16i8) __msa_insve_w((v4i32) src_vt1, 1, (v4i32) src_vt2); src_vt2 = (v16i8) __msa_insve_w((v4i32) src_vt2, 1, (v4i32) src_vt3); src_vt3 = (v16i8) __msa_insve_w((v4i32) src_vt3, 1, (v4i32) src_vt4); XORI_B4_128_SB(src_vt0, src_vt1, src_vt2, src_vt3); LD_SB4(src_x, src_stride, src_hz0, src_hz1, src_hz2, src_hz3); LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3); XORI_B4_128_SB(src_hz0, src_hz1, src_hz2, src_hz3); hz_out0 = AVC_XOR_VSHF_B_AND_APPLY_6TAP_HORIZ_FILT_SH(src_hz0, src_hz1, mask0, mask1, mask2); hz_out1 = AVC_XOR_VSHF_B_AND_APPLY_6TAP_HORIZ_FILT_SH(src_hz2, src_hz3, mask0, mask1, mask2); SRARI_H2_SH(hz_out0, hz_out1, 5); SAT_SH2_SH(hz_out0, hz_out1, 7); LD_SB4(src_y, src_stride, src_vt5, src_vt6, src_vt7, src_vt8); src_vt4 = (v16i8) __msa_insve_w((v4i32) src_vt4, 1, (v4i32) src_vt5); src_vt5 = (v16i8) __msa_insve_w((v4i32) src_vt5, 1, (v4i32) src_vt6); src_vt6 = (v16i8) __msa_insve_w((v4i32) src_vt6, 1, (v4i32) src_vt7); src_vt7 = (v16i8) __msa_insve_w((v4i32) src_vt7, 1, (v4i32) src_vt8); XORI_B4_128_SB(src_vt4, src_vt5, src_vt6, src_vt7); /* filter calc */ vert_out0 = AVC_CALC_DPADD_B_6PIX_2COEFF_R_SH(src_vt0, src_vt1, src_vt2, src_vt3, src_vt4, src_vt5); vert_out1 = AVC_CALC_DPADD_B_6PIX_2COEFF_R_SH(src_vt2, src_vt3, src_vt4, src_vt5, src_vt6, src_vt7); SRARI_H2_SH(vert_out0, vert_out1, 5); SAT_SH2_SH(vert_out0, vert_out1, 7); res1 = __msa_srari_h((hz_out1 + vert_out1), 1); res0 = __msa_srari_h((hz_out0 + vert_out0), 1); SAT_SH2_SH(res0, res1, 7); res = PCKEV_XORI128_UB(res0, res1); dst0 = (v16u8) __msa_insve_w((v4i32) dst0, 1, (v4i32) dst1); dst1 = (v16u8) __msa_insve_w((v4i32) dst2, 1, (v4i32) dst3); dst0 = (v16u8) __msa_insve_d((v2i64) dst0, 1, (v2i64) dst1); dst0 = __msa_aver_u_b(res, dst0); ST4x4_UB(dst0, dst0, 0, 1, 2, 3, dst, dst_stride); }"} {"target": 0, "idx": 3440, "func": "static BlockDriverAIOCB *raw_aio_readv(BlockDriverState *bs, int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, BlockDriverCompletionFunc *cb, void *opaque) { BDRVRawState *s = bs->opaque; return paio_submit(bs, s->hfile, sector_num, qiov, nb_sectors, cb, opaque, QEMU_AIO_READ); }"} {"target": 0, "idx": 3449, "func": "static void tcx_blit_writel(void *opaque, hwaddr addr, uint64_t val, unsigned size) { TCXState *s = opaque; uint32_t adsr, len; int i; if (!(addr & 4)) { s->tmpblit = val; } else { addr = (addr >> 3) & 0xfffff; adsr = val & 0xffffff; len = ((val >> 24) & 0x1f) + 1; if (adsr == 0xffffff) { memset(&s->vram[addr], s->tmpblit, len); if (s->depth == 24) { val = s->tmpblit & 0xffffff; val = cpu_to_be32(val); for (i = 0; i < len; i++) { s->vram24[addr + i] = val; } } } else { memcpy(&s->vram[addr], &s->vram[adsr], len); if (s->depth == 24) { memcpy(&s->vram24[addr], &s->vram24[adsr], len * 4); } } memory_region_set_dirty(&s->vram_mem, addr, len); } }"} {"target": 0, "idx": 3452, "func": "static void avc_loopfilter_cb_or_cr_intra_edge_ver_msa(uint8_t *data_cb_or_cr, uint8_t alpha_in, uint8_t beta_in, uint32_t img_width) { uint16_t out0, out1, out2, out3; v8i16 tmp1; v16u8 alpha, beta, is_less_than; v8i16 p0_or_q0, q0_or_p0; v16u8 p1_or_q1_org, p0_or_q0_org, q0_or_p0_org, q1_or_p1_org; v16i8 zero = { 0 }; v16u8 p0_asub_q0, p1_asub_p0, q1_asub_q0; v16u8 is_less_than_alpha, is_less_than_beta; v8i16 p1_org_r, p0_org_r, q0_org_r, q1_org_r; { v16u8 row0, row1, row2, row3, row4, row5, row6, row7; LOAD_8VECS_UB((data_cb_or_cr - 2), img_width, row0, row1, row2, row3, row4, row5, row6, row7); TRANSPOSE8x4_B_UB(row0, row1, row2, row3, row4, row5, row6, row7, p1_or_q1_org, p0_or_q0_org, q0_or_p0_org, q1_or_p1_org); } alpha = (v16u8) __msa_fill_b(alpha_in); beta = (v16u8) __msa_fill_b(beta_in); p0_asub_q0 = __msa_asub_u_b(p0_or_q0_org, q0_or_p0_org); p1_asub_p0 = __msa_asub_u_b(p1_or_q1_org, p0_or_q0_org); q1_asub_q0 = __msa_asub_u_b(q1_or_p1_org, q0_or_p0_org); is_less_than_alpha = (p0_asub_q0 < alpha); is_less_than_beta = (p1_asub_p0 < beta); is_less_than = is_less_than_beta & is_less_than_alpha; is_less_than_beta = (q1_asub_q0 < beta); is_less_than = is_less_than_beta & is_less_than; is_less_than = (v16u8) __msa_ilvr_d((v2i64) zero, (v2i64) is_less_than); if (!__msa_test_bz_v(is_less_than)) { p1_org_r = (v8i16) __msa_ilvr_b(zero, (v16i8) p1_or_q1_org); p0_org_r = (v8i16) __msa_ilvr_b(zero, (v16i8) p0_or_q0_org); q0_org_r = (v8i16) __msa_ilvr_b(zero, (v16i8) q0_or_p0_org); q1_org_r = (v8i16) __msa_ilvr_b(zero, (v16i8) q1_or_p1_org); AVC_LOOP_FILTER_P0_OR_Q0(p0_org_r, q1_org_r, p1_org_r, p0_or_q0); AVC_LOOP_FILTER_P0_OR_Q0(q0_org_r, p1_org_r, q1_org_r, q0_or_p0); /* convert 16 bit output into 8 bit output */ p0_or_q0 = (v8i16) __msa_pckev_b(zero, (v16i8) p0_or_q0); q0_or_p0 = (v8i16) __msa_pckev_b(zero, (v16i8) q0_or_p0); p0_or_q0_org = __msa_bmnz_v(p0_or_q0_org, (v16u8) p0_or_q0, is_less_than); q0_or_p0_org = __msa_bmnz_v(q0_or_p0_org, (v16u8) q0_or_p0, is_less_than); tmp1 = (v8i16) __msa_ilvr_b((v16i8) q0_or_p0_org, (v16i8) p0_or_q0_org); data_cb_or_cr -= 1; out0 = __msa_copy_u_h(tmp1, 0); out1 = __msa_copy_u_h(tmp1, 1); out2 = __msa_copy_u_h(tmp1, 2); out3 = __msa_copy_u_h(tmp1, 3); STORE_HWORD(data_cb_or_cr, out0); data_cb_or_cr += img_width; STORE_HWORD(data_cb_or_cr, out1); data_cb_or_cr += img_width; STORE_HWORD(data_cb_or_cr, out2); data_cb_or_cr += img_width; STORE_HWORD(data_cb_or_cr, out3); data_cb_or_cr += img_width; out0 = __msa_copy_u_h(tmp1, 4); out1 = __msa_copy_u_h(tmp1, 5); out2 = __msa_copy_u_h(tmp1, 6); out3 = __msa_copy_u_h(tmp1, 7); STORE_HWORD(data_cb_or_cr, out0); data_cb_or_cr += img_width; STORE_HWORD(data_cb_or_cr, out1); data_cb_or_cr += img_width; STORE_HWORD(data_cb_or_cr, out2); data_cb_or_cr += img_width; STORE_HWORD(data_cb_or_cr, out3); } }"} {"target": 1, "idx": 3456, "func": "int ff_h261_handle_packet(AVFormatContext *ctx, PayloadContext *data, AVStream *st, AVPacket *pkt, uint32_t *timestamp, const uint8_t *buf, int len, uint16_t seq, int flags) { int sbit, ebit, gobn, mbap, quant; int res; //av_log(ctx, AV_LOG_DEBUG, \"got h261 RTP packet with time: %u\\n\", timestamp); /* drop data of previous packets in case of non-continuous (loss) packet stream */ if (data->buf && data->timestamp != *timestamp) { h261_free_dyn_buffer(&data->buf); } /* sanity check for size of input packet */ if (len < 5 /* 4 bytes header and 1 byte payload at least */) { av_log(ctx, AV_LOG_ERROR, \"Too short H.261 RTP packet\\n\"); return AVERROR_INVALIDDATA; } /* decode the H.261 payload header according to section 4.1 of RFC 4587: (uses 4 bytes between RTP header and H.261 stream per packet) 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |SBIT |EBIT |I|V| GOBN | MBAP | QUANT | HMVD | VMVD | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ Start bit position (SBIT): 3 bits End bit position (EBIT): 3 bits INTRA-frame encoded data (I): 1 bit Motion Vector flag (V): 1 bit GOB number (GOBN): 4 bits Macroblock address predictor (MBAP): 5 bits Quantizer (QUANT): 5 bits Horizontal motion vector data (HMVD): 5 bits Vertical motion vector data (VMVD): 5 bits */ sbit = (buf[0] >> 5) & 0x07; ebit = (buf[0] >> 2) & 0x07; gobn = (buf[1] >> 4) & 0x0f; mbap = ((buf[1] << 1) & 0x1e) | ((buf[1] >> 7) & 0x01); quant = (buf[1] >> 4) & 0x0f; /* pass the H.261 payload header and continue with the actual payload */ buf += RTP_H261_PAYLOAD_HEADER_SIZE; len -= RTP_H261_PAYLOAD_HEADER_SIZE; /* start frame buffering with new dynamic buffer */ if (!data->buf) { /* sanity check: a new frame starts with gobn=0, sbit=0, mbap=0, uqnat=0 */ if (!gobn && !sbit && !mbap && !quant){ res = avio_open_dyn_buf(&data->buf); if (res < 0) return res; /* update the timestamp in the frame packet with the one from the RTP packet */ data->timestamp = *timestamp; } else { /* frame not started yet, need more packets */ return AVERROR(EAGAIN); } } /* do the \"byte merging\" at the boundaries of two consecutive frame fragments */ if (data->endbyte_bits || sbit) { if (data->endbyte_bits == sbit) { data->endbyte |= buf[0] & (0xff >> sbit); data->endbyte_bits = 0; buf++; len--; avio_w8(data->buf, data->endbyte); } else { /* ebit/sbit values inconsistent, assuming packet loss */ GetBitContext gb; init_get_bits(&gb, buf, len*8 - ebit); skip_bits(&gb, sbit); if (data->endbyte_bits) { data->endbyte |= get_bits(&gb, 8 - data->endbyte_bits); avio_w8(data->buf, data->endbyte); } while (get_bits_left(&gb) >= 8) avio_w8(data->buf, get_bits(&gb, 8)); data->endbyte_bits = get_bits_left(&gb); if (data->endbyte_bits) data->endbyte = get_bits(&gb, data->endbyte_bits) << (8 - data->endbyte_bits); ebit = 0; len = 0; } } if (ebit) { if (len > 0) avio_write(data->buf, buf, len - 1); data->endbyte_bits = 8 - ebit; data->endbyte = buf[len - 1] & (0xff << ebit); } else { avio_write(data->buf, buf, len); } /* RTP marker bit means: last fragment of current frame was received; otherwise, an additional fragment is needed for the current frame */ if (!(flags & RTP_FLAG_MARKER)) return AVERROR(EAGAIN); /* write the completed last byte from the \"byte merging\" */ if (data->endbyte_bits) avio_w8(data->buf, data->endbyte); data->endbyte_bits = 0; /* close frame buffering and create resulting A/V packet */ res = ff_rtp_finalize_packet(pkt, &data->buf, st->index); if (res < 0) return res; return 0; }"} {"target": 1, "idx": 3460, "func": "static int vdpau_mpeg_start_frame(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size) { MpegEncContext * const s = avctx->priv_data; Picture *pic = s->current_picture_ptr; struct vdpau_picture_context *pic_ctx = pic->hwaccel_picture_private; VdpPictureInfoMPEG1Or2 *info = &pic_ctx->info.mpeg; VdpVideoSurface ref; int i; /* fill VdpPictureInfoMPEG1Or2 struct */ info->forward_reference = VDP_INVALID_HANDLE; info->backward_reference = VDP_INVALID_HANDLE; switch (s->pict_type) { case AV_PICTURE_TYPE_B: ref = ff_vdpau_get_surface_id(&s->next_picture.f); assert(ref != VDP_INVALID_HANDLE); info->backward_reference = ref; /* fall through to forward prediction */ case AV_PICTURE_TYPE_P: ref = ff_vdpau_get_surface_id(&s->last_picture.f); info->forward_reference = ref; } info->slice_count = 0; info->picture_structure = s->picture_structure; info->picture_coding_type = s->pict_type; info->intra_dc_precision = s->intra_dc_precision; info->frame_pred_frame_dct = s->frame_pred_frame_dct; info->concealment_motion_vectors = s->concealment_motion_vectors; info->intra_vlc_format = s->intra_vlc_format; info->alternate_scan = s->alternate_scan; info->q_scale_type = s->q_scale_type; info->top_field_first = s->top_field_first; // Both for MPEG-1 only, zero for MPEG-2: info->full_pel_forward_vector = s->full_pel[0]; info->full_pel_backward_vector = s->full_pel[1]; // For MPEG-1 fill both horizontal & vertical: info->f_code[0][0] = s->mpeg_f_code[0][0]; info->f_code[0][1] = s->mpeg_f_code[0][1]; info->f_code[1][0] = s->mpeg_f_code[1][0]; info->f_code[1][1] = s->mpeg_f_code[1][1]; for (i = 0; i < 64; ++i) { info->intra_quantizer_matrix[i] = s->intra_matrix[i]; info->non_intra_quantizer_matrix[i] = s->inter_matrix[i]; } return ff_vdpau_common_start_frame(pic_ctx, buffer, size); }"} {"target": 1, "idx": 3476, "func": "void h263_decode_init_vlc(MpegEncContext *s) { static int done = 0; if (!done) { done = 1; init_vlc(&intra_MCBPC_vlc, INTRA_MCBPC_VLC_BITS, 9, intra_MCBPC_bits, 1, 1, intra_MCBPC_code, 1, 1); init_vlc(&inter_MCBPC_vlc, INTER_MCBPC_VLC_BITS, 28, inter_MCBPC_bits, 1, 1, inter_MCBPC_code, 1, 1); init_vlc(&cbpy_vlc, CBPY_VLC_BITS, 16, &cbpy_tab[0][1], 2, 1, &cbpy_tab[0][0], 2, 1); init_vlc(&mv_vlc, MV_VLC_BITS, 33, &mvtab[0][1], 2, 1, &mvtab[0][0], 2, 1); init_rl(&rl_inter); init_rl(&rl_intra); init_rl(&rvlc_rl_inter); init_rl(&rvlc_rl_intra); init_rl(&rl_intra_aic); init_vlc_rl(&rl_inter); init_vlc_rl(&rl_intra); init_vlc_rl(&rvlc_rl_inter); init_vlc_rl(&rvlc_rl_intra); init_vlc_rl(&rl_intra_aic); init_vlc(&dc_lum, DC_VLC_BITS, 10 /* 13 */, &DCtab_lum[0][1], 2, 1, &DCtab_lum[0][0], 2, 1); init_vlc(&dc_chrom, DC_VLC_BITS, 10 /* 13 */, &DCtab_chrom[0][1], 2, 1, &DCtab_chrom[0][0], 2, 1); init_vlc(&sprite_trajectory, SPRITE_TRAJ_VLC_BITS, 15, &sprite_trajectory_tab[0][1], 4, 2, &sprite_trajectory_tab[0][0], 4, 2); init_vlc(&mb_type_b_vlc, MB_TYPE_B_VLC_BITS, 4, &mb_type_b_tab[0][1], 2, 1, &mb_type_b_tab[0][0], 2, 1); init_vlc(&h263_mbtype_b_vlc, H263_MBTYPE_B_VLC_BITS, 15, &h263_mbtype_b_tab[0][1], 2, 1, &h263_mbtype_b_tab[0][0], 2, 1); init_vlc(&cbpc_b_vlc, CBPC_B_VLC_BITS, 4, &cbpc_b_tab[0][1], 2, 1, &cbpc_b_tab[0][0], 2, 1); } }"} {"target": 1, "idx": 3484, "func": "static uint64_t icp_pit_read(void *opaque, target_phys_addr_t offset, unsigned size) { icp_pit_state *s = (icp_pit_state *)opaque; int n; /* ??? Don't know the PrimeCell ID for this device. */ n = offset >> 8; if (n > 3) { hw_error(\"sp804_read: Bad timer %d\\n\", n); } return arm_timer_read(s->timer[n], offset & 0xff); }"} {"target": 1, "idx": 3499, "func": "static int mpeg1_decode_sequence(AVCodecContext *avctx, UINT8 *buf, int buf_size) { Mpeg1Context *s1 = avctx->priv_data; MpegEncContext *s = &s1->mpeg_enc_ctx; int width, height, i, v, j; init_get_bits(&s->gb, buf, buf_size); width = get_bits(&s->gb, 12); height = get_bits(&s->gb, 12); skip_bits(&s->gb, 4); s->frame_rate_index = get_bits(&s->gb, 4); if (s->frame_rate_index == 0) return -1; s->bit_rate = get_bits(&s->gb, 18) * 400; if (get_bits1(&s->gb) == 0) /* marker */ return -1; if (width <= 0 || height <= 0 || (width % 2) != 0 || (height % 2) != 0) return -1; if (width != s->width || height != s->height) { /* start new mpeg1 context decoding */ s->out_format = FMT_MPEG1; if (s1->mpeg_enc_ctx_allocated) { MPV_common_end(s); } s->width = width; s->height = height; s->has_b_frames = 1; s->avctx = avctx; avctx->width = width; avctx->height = height; avctx->frame_rate = frame_rate_tab[s->frame_rate_index]; s->frame_rate = avctx->frame_rate; avctx->bit_rate = s->bit_rate; if (MPV_common_init(s) < 0) return -1; mpeg1_init_vlc(s); s1->mpeg_enc_ctx_allocated = 1; } skip_bits(&s->gb, 10); /* vbv_buffer_size */ skip_bits(&s->gb, 1); /* get matrix */ if (get_bits1(&s->gb)) { for(i=0;i<64;i++) { v = get_bits(&s->gb, 8); j = zigzag_direct[i]; s->intra_matrix[j] = v; s->chroma_intra_matrix[j] = v; } #ifdef DEBUG dprintf(\"intra matrix present\\n\"); for(i=0;i<64;i++) dprintf(\" %d\", s->intra_matrix[zigzag_direct[i]]); printf(\"\\n\"); #endif } else { for(i=0;i<64;i++) { v = default_intra_matrix[i]; s->intra_matrix[i] = v; s->chroma_intra_matrix[i] = v; } } if (get_bits1(&s->gb)) { for(i=0;i<64;i++) { v = get_bits(&s->gb, 8); j = zigzag_direct[i]; s->non_intra_matrix[j] = v; s->chroma_non_intra_matrix[j] = v; } #ifdef DEBUG dprintf(\"non intra matrix present\\n\"); for(i=0;i<64;i++) dprintf(\" %d\", s->non_intra_matrix[zigzag_direct[i]]); printf(\"\\n\"); #endif } else { for(i=0;i<64;i++) { v = default_non_intra_matrix[i]; s->non_intra_matrix[i] = v; s->chroma_non_intra_matrix[i] = v; } } /* we set mpeg2 parameters so that it emulates mpeg1 */ s->progressive_sequence = 1; s->progressive_frame = 1; s->picture_structure = PICT_FRAME; s->frame_pred_frame_dct = 1; s->mpeg2 = 0; return 0; }"} {"target": 1, "idx": 3502, "func": "void ff_rfps_calculate(AVFormatContext *ic) { int i, j; for (i = 0; inb_streams; i++) { AVStream *st = ic->streams[i]; if (st->codec->codec_type != AVMEDIA_TYPE_VIDEO) // the check for tb_unreliable() is not completely correct, since this is not about handling // a unreliable/inexact time base, but a time base that is finer than necessary, as e.g. // ipmovie.c produces. if (tb_unreliable(st->codec) && st->info->duration_count > 15 && st->info->duration_gcd > FFMAX(1, st->time_base.den/(500LL*st->time_base.num)) && !st->r_frame_rate.num) av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, st->time_base.den, st->time_base.num * st->info->duration_gcd, INT_MAX); if (st->info->duration_count>1 && !st->r_frame_rate.num && tb_unreliable(st->codec)) { int num = 0; double best_error= 0.01; for (j=0; jinfo->codec_info_duration && st->info->codec_info_duration*av_q2d(st->time_base) < (1001*12.0)/get_std_framerate(j)) if(!st->info->codec_info_duration && 1.0 < (1001*12.0)/get_std_framerate(j)) for(k=0; k<2; k++){ int n= st->info->duration_count; double a= st->info->duration_error[k][0][j] / n; double error= st->info->duration_error[k][1][j]/n - a*a; if(error < best_error && best_error> 0.000000001){ best_error= error; num = get_std_framerate(j); } if(error < 0.02) av_log(NULL, AV_LOG_DEBUG, \"rfps: %f %f\\n\", get_std_framerate(j) / 12.0/1001, error); } } // do not increase frame rate by more than 1 % in order to match a standard rate. if (num && (!st->r_frame_rate.num || (double)num/(12*1001) < 1.01 * av_q2d(st->r_frame_rate))) av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, num, 12*1001, INT_MAX); } av_freep(&st->info->duration_error); st->info->last_dts = AV_NOPTS_VALUE; st->info->duration_count = 0; st->info->rfps_duration_sum = 0; } }"} {"target": 0, "idx": 3513, "func": "static void gen_compute_eflags_o(DisasContext *s, TCGv reg) { gen_compute_eflags(s); tcg_gen_shri_tl(reg, cpu_cc_src, 11); tcg_gen_andi_tl(reg, reg, 1); }"} {"target": 0, "idx": 3517, "func": "static void pred_spatial_direct_motion(const H264Context *const h, H264SliceContext *sl, int *mb_type) { int b8_stride = 2; int b4_stride = h->b_stride; int mb_xy = sl->mb_xy, mb_y = sl->mb_y; int mb_type_col[2]; const int16_t (*l1mv0)[2], (*l1mv1)[2]; const int8_t *l1ref0, *l1ref1; const int is_b8x8 = IS_8X8(*mb_type); unsigned int sub_mb_type = MB_TYPE_L0L1; int i8, i4; int ref[2]; int mv[2]; int list; assert(sl->ref_list[1][0].reference & 3); await_reference_mb_row(h, sl->ref_list[1][0].parent, sl->mb_y + !!IS_INTERLACED(*mb_type)); #define MB_TYPE_16x16_OR_INTRA (MB_TYPE_16x16 | MB_TYPE_INTRA4x4 | \\ MB_TYPE_INTRA16x16 | MB_TYPE_INTRA_PCM) /* ref = min(neighbors) */ for (list = 0; list < 2; list++) { int left_ref = sl->ref_cache[list][scan8[0] - 1]; int top_ref = sl->ref_cache[list][scan8[0] - 8]; int refc = sl->ref_cache[list][scan8[0] - 8 + 4]; const int16_t *C = sl->mv_cache[list][scan8[0] - 8 + 4]; if (refc == PART_NOT_AVAILABLE) { refc = sl->ref_cache[list][scan8[0] - 8 - 1]; C = sl->mv_cache[list][scan8[0] - 8 - 1]; } ref[list] = FFMIN3((unsigned)left_ref, (unsigned)top_ref, (unsigned)refc); if (ref[list] >= 0) { /* This is just pred_motion() but with the cases removed that * cannot happen for direct blocks. */ const int16_t *const A = sl->mv_cache[list][scan8[0] - 1]; const int16_t *const B = sl->mv_cache[list][scan8[0] - 8]; int match_count = (left_ref == ref[list]) + (top_ref == ref[list]) + (refc == ref[list]); if (match_count > 1) { // most common mv[list] = pack16to32(mid_pred(A[0], B[0], C[0]), mid_pred(A[1], B[1], C[1])); } else { assert(match_count == 1); if (left_ref == ref[list]) mv[list] = AV_RN32A(A); else if (top_ref == ref[list]) mv[list] = AV_RN32A(B); else mv[list] = AV_RN32A(C); } } else { int mask = ~(MB_TYPE_L0 << (2 * list)); mv[list] = 0; ref[list] = -1; if (!is_b8x8) *mb_type &= mask; sub_mb_type &= mask; } } if (ref[0] < 0 && ref[1] < 0) { ref[0] = ref[1] = 0; if (!is_b8x8) *mb_type |= MB_TYPE_L0L1; sub_mb_type |= MB_TYPE_L0L1; } if (!(is_b8x8 | mv[0] | mv[1])) { fill_rectangle(&sl->ref_cache[0][scan8[0]], 4, 4, 8, (uint8_t)ref[0], 1); fill_rectangle(&sl->ref_cache[1][scan8[0]], 4, 4, 8, (uint8_t)ref[1], 1); fill_rectangle(&sl->mv_cache[0][scan8[0]], 4, 4, 8, 0, 4); fill_rectangle(&sl->mv_cache[1][scan8[0]], 4, 4, 8, 0, 4); *mb_type = (*mb_type & ~(MB_TYPE_8x8 | MB_TYPE_16x8 | MB_TYPE_8x16 | MB_TYPE_P1L0 | MB_TYPE_P1L1)) | MB_TYPE_16x16 | MB_TYPE_DIRECT2; return; } if (IS_INTERLACED(sl->ref_list[1][0].parent->mb_type[mb_xy])) { // AFL/AFR/FR/FL -> AFL/FL if (!IS_INTERLACED(*mb_type)) { // AFR/FR -> AFL/FL mb_y = (sl->mb_y & ~1) + sl->col_parity; mb_xy = sl->mb_x + ((sl->mb_y & ~1) + sl->col_parity) * h->mb_stride; b8_stride = 0; } else { mb_y += sl->col_fieldoff; mb_xy += h->mb_stride * sl->col_fieldoff; // non-zero for FL -> FL & differ parity } goto single_col; } else { // AFL/AFR/FR/FL -> AFR/FR if (IS_INTERLACED(*mb_type)) { // AFL /FL -> AFR/FR mb_y = sl->mb_y & ~1; mb_xy = (sl->mb_y & ~1) * h->mb_stride + sl->mb_x; mb_type_col[0] = sl->ref_list[1][0].parent->mb_type[mb_xy]; mb_type_col[1] = sl->ref_list[1][0].parent->mb_type[mb_xy + h->mb_stride]; b8_stride = 2 + 4 * h->mb_stride; b4_stride *= 6; if (IS_INTERLACED(mb_type_col[0]) != IS_INTERLACED(mb_type_col[1])) { mb_type_col[0] &= ~MB_TYPE_INTERLACED; mb_type_col[1] &= ~MB_TYPE_INTERLACED; } sub_mb_type |= MB_TYPE_16x16 | MB_TYPE_DIRECT2; /* B_SUB_8x8 */ if ((mb_type_col[0] & MB_TYPE_16x16_OR_INTRA) && (mb_type_col[1] & MB_TYPE_16x16_OR_INTRA) && !is_b8x8) { *mb_type |= MB_TYPE_16x8 | MB_TYPE_DIRECT2; /* B_16x8 */ } else { *mb_type |= MB_TYPE_8x8; } } else { // AFR/FR -> AFR/FR single_col: mb_type_col[0] = mb_type_col[1] = sl->ref_list[1][0].parent->mb_type[mb_xy]; sub_mb_type |= MB_TYPE_16x16 | MB_TYPE_DIRECT2; /* B_SUB_8x8 */ if (!is_b8x8 && (mb_type_col[0] & MB_TYPE_16x16_OR_INTRA)) { *mb_type |= MB_TYPE_16x16 | MB_TYPE_DIRECT2; /* B_16x16 */ } else if (!is_b8x8 && (mb_type_col[0] & (MB_TYPE_16x8 | MB_TYPE_8x16))) { *mb_type |= MB_TYPE_DIRECT2 | (mb_type_col[0] & (MB_TYPE_16x8 | MB_TYPE_8x16)); } else { if (!h->sps.direct_8x8_inference_flag) { /* FIXME: Save sub mb types from previous frames (or derive * from MVs) so we know exactly what block size to use. */ sub_mb_type += (MB_TYPE_8x8 - MB_TYPE_16x16); /* B_SUB_4x4 */ } *mb_type |= MB_TYPE_8x8; } } } await_reference_mb_row(h, sl->ref_list[1][0].parent, mb_y); l1mv0 = &sl->ref_list[1][0].parent->motion_val[0][h->mb2b_xy[mb_xy]]; l1mv1 = &sl->ref_list[1][0].parent->motion_val[1][h->mb2b_xy[mb_xy]]; l1ref0 = &sl->ref_list[1][0].parent->ref_index[0][4 * mb_xy]; l1ref1 = &sl->ref_list[1][0].parent->ref_index[1][4 * mb_xy]; if (!b8_stride) { if (sl->mb_y & 1) { l1ref0 += 2; l1ref1 += 2; l1mv0 += 2 * b4_stride; l1mv1 += 2 * b4_stride; } } if (IS_INTERLACED(*mb_type) != IS_INTERLACED(mb_type_col[0])) { int n = 0; for (i8 = 0; i8 < 4; i8++) { int x8 = i8 & 1; int y8 = i8 >> 1; int xy8 = x8 + y8 * b8_stride; int xy4 = x8 * 3 + y8 * b4_stride; int a, b; if (is_b8x8 && !IS_DIRECT(sl->sub_mb_type[i8])) continue; sl->sub_mb_type[i8] = sub_mb_type; fill_rectangle(&sl->ref_cache[0][scan8[i8 * 4]], 2, 2, 8, (uint8_t)ref[0], 1); fill_rectangle(&sl->ref_cache[1][scan8[i8 * 4]], 2, 2, 8, (uint8_t)ref[1], 1); if (!IS_INTRA(mb_type_col[y8]) && !sl->ref_list[1][0].parent->long_ref && ((l1ref0[xy8] == 0 && FFABS(l1mv0[xy4][0]) <= 1 && FFABS(l1mv0[xy4][1]) <= 1) || (l1ref0[xy8] < 0 && l1ref1[xy8] == 0 && FFABS(l1mv1[xy4][0]) <= 1 && FFABS(l1mv1[xy4][1]) <= 1))) { a = b = 0; if (ref[0] > 0) a = mv[0]; if (ref[1] > 0) b = mv[1]; n++; } else { a = mv[0]; b = mv[1]; } fill_rectangle(&sl->mv_cache[0][scan8[i8 * 4]], 2, 2, 8, a, 4); fill_rectangle(&sl->mv_cache[1][scan8[i8 * 4]], 2, 2, 8, b, 4); } if (!is_b8x8 && !(n & 3)) *mb_type = (*mb_type & ~(MB_TYPE_8x8 | MB_TYPE_16x8 | MB_TYPE_8x16 | MB_TYPE_P1L0 | MB_TYPE_P1L1)) | MB_TYPE_16x16 | MB_TYPE_DIRECT2; } else if (IS_16X16(*mb_type)) { int a, b; fill_rectangle(&sl->ref_cache[0][scan8[0]], 4, 4, 8, (uint8_t)ref[0], 1); fill_rectangle(&sl->ref_cache[1][scan8[0]], 4, 4, 8, (uint8_t)ref[1], 1); if (!IS_INTRA(mb_type_col[0]) && !sl->ref_list[1][0].parent->long_ref && ((l1ref0[0] == 0 && FFABS(l1mv0[0][0]) <= 1 && FFABS(l1mv0[0][1]) <= 1) || (l1ref0[0] < 0 && !l1ref1[0] && FFABS(l1mv1[0][0]) <= 1 && FFABS(l1mv1[0][1]) <= 1 && h->x264_build > 33U))) { a = b = 0; if (ref[0] > 0) a = mv[0]; if (ref[1] > 0) b = mv[1]; } else { a = mv[0]; b = mv[1]; } fill_rectangle(&sl->mv_cache[0][scan8[0]], 4, 4, 8, a, 4); fill_rectangle(&sl->mv_cache[1][scan8[0]], 4, 4, 8, b, 4); } else { int n = 0; for (i8 = 0; i8 < 4; i8++) { const int x8 = i8 & 1; const int y8 = i8 >> 1; if (is_b8x8 && !IS_DIRECT(sl->sub_mb_type[i8])) continue; sl->sub_mb_type[i8] = sub_mb_type; fill_rectangle(&sl->mv_cache[0][scan8[i8 * 4]], 2, 2, 8, mv[0], 4); fill_rectangle(&sl->mv_cache[1][scan8[i8 * 4]], 2, 2, 8, mv[1], 4); fill_rectangle(&sl->ref_cache[0][scan8[i8 * 4]], 2, 2, 8, (uint8_t)ref[0], 1); fill_rectangle(&sl->ref_cache[1][scan8[i8 * 4]], 2, 2, 8, (uint8_t)ref[1], 1); assert(b8_stride == 2); /* col_zero_flag */ if (!IS_INTRA(mb_type_col[0]) && !sl->ref_list[1][0].parent->long_ref && (l1ref0[i8] == 0 || (l1ref0[i8] < 0 && l1ref1[i8] == 0 && h->x264_build > 33U))) { const int16_t (*l1mv)[2] = l1ref0[i8] == 0 ? l1mv0 : l1mv1; if (IS_SUB_8X8(sub_mb_type)) { const int16_t *mv_col = l1mv[x8 * 3 + y8 * 3 * b4_stride]; if (FFABS(mv_col[0]) <= 1 && FFABS(mv_col[1]) <= 1) { if (ref[0] == 0) fill_rectangle(&sl->mv_cache[0][scan8[i8 * 4]], 2, 2, 8, 0, 4); if (ref[1] == 0) fill_rectangle(&sl->mv_cache[1][scan8[i8 * 4]], 2, 2, 8, 0, 4); n += 4; } } else { int m = 0; for (i4 = 0; i4 < 4; i4++) { const int16_t *mv_col = l1mv[x8 * 2 + (i4 & 1) + (y8 * 2 + (i4 >> 1)) * b4_stride]; if (FFABS(mv_col[0]) <= 1 && FFABS(mv_col[1]) <= 1) { if (ref[0] == 0) AV_ZERO32(sl->mv_cache[0][scan8[i8 * 4 + i4]]); if (ref[1] == 0) AV_ZERO32(sl->mv_cache[1][scan8[i8 * 4 + i4]]); m++; } } if (!(m & 3)) sl->sub_mb_type[i8] += MB_TYPE_16x16 - MB_TYPE_8x8; n += m; } } } if (!is_b8x8 && !(n & 15)) *mb_type = (*mb_type & ~(MB_TYPE_8x8 | MB_TYPE_16x8 | MB_TYPE_8x16 | MB_TYPE_P1L0 | MB_TYPE_P1L1)) | MB_TYPE_16x16 | MB_TYPE_DIRECT2; } }"} {"target": 0, "idx": 3521, "func": "static void gem_write(void *opaque, target_phys_addr_t offset, uint64_t val, unsigned size) { GemState *s = (GemState *)opaque; uint32_t readonly; DB_PRINT(\"offset: 0x%04x write: 0x%08x \", offset, (unsigned)val); offset >>= 2; /* Squash bits which are read only in write value */ val &= ~(s->regs_ro[offset]); /* Preserve (only) bits which are read only in register */ readonly = s->regs[offset]; readonly &= s->regs_ro[offset]; /* Squash bits which are write 1 to clear */ val &= ~(s->regs_w1c[offset] & val); /* Copy register write to backing store */ s->regs[offset] = val | readonly; /* Handle register write side effects */ switch (offset) { case GEM_NWCTRL: if (val & GEM_NWCTRL_TXSTART) { gem_transmit(s); } if (!(val & GEM_NWCTRL_TXENA)) { /* Reset to start of Q when transmit disabled. */ s->tx_desc_addr = s->regs[GEM_TXQBASE]; } if (!(val & GEM_NWCTRL_RXENA)) { /* Reset to start of Q when receive disabled. */ s->rx_desc_addr = s->regs[GEM_RXQBASE]; } break; case GEM_TXSTATUS: gem_update_int_status(s); break; case GEM_RXQBASE: s->rx_desc_addr = val; break; case GEM_TXQBASE: s->tx_desc_addr = val; break; case GEM_RXSTATUS: gem_update_int_status(s); break; case GEM_IER: s->regs[GEM_IMR] &= ~val; gem_update_int_status(s); break; case GEM_IDR: s->regs[GEM_IMR] |= val; gem_update_int_status(s); break; case GEM_PHYMNTNC: if (val & GEM_PHYMNTNC_OP_W) { uint32_t phy_addr, reg_num; phy_addr = (val & GEM_PHYMNTNC_ADDR) >> GEM_PHYMNTNC_ADDR_SHFT; if (phy_addr == BOARD_PHY_ADDRESS) { reg_num = (val & GEM_PHYMNTNC_REG) >> GEM_PHYMNTNC_REG_SHIFT; gem_phy_write(s, reg_num, val); } } break; } DB_PRINT(\"newval: 0x%08x\\n\", s->regs[offset]); }"} {"target": 1, "idx": 3540, "func": "void cpu_loop(CPUMIPSState *env) { target_siginfo_t info; int trapnr, ret; unsigned int syscall_num; for(;;) { trapnr = cpu_mips_exec(env); switch(trapnr) { case EXCP_SYSCALL: syscall_num = env->active_tc.gpr[2] - 4000; env->active_tc.PC += 4; if (syscall_num >= sizeof(mips_syscall_args)) { ret = -ENOSYS; } else { int nb_args; abi_ulong sp_reg; abi_ulong arg5 = 0, arg6 = 0, arg7 = 0, arg8 = 0; nb_args = mips_syscall_args[syscall_num]; sp_reg = env->active_tc.gpr[29]; switch (nb_args) { /* these arguments are taken from the stack */ /* FIXME - what to do if get_user() fails? */ case 8: get_user_ual(arg8, sp_reg + 28); case 7: get_user_ual(arg7, sp_reg + 24); case 6: get_user_ual(arg6, sp_reg + 20); case 5: get_user_ual(arg5, sp_reg + 16); default: ret = do_syscall(env, env->active_tc.gpr[2], env->active_tc.gpr[4], env->active_tc.gpr[5], env->active_tc.gpr[6], env->active_tc.gpr[7], arg5, arg6/*, arg7, arg8*/); if ((unsigned int)ret >= (unsigned int)(-1133)) { env->active_tc.gpr[7] = 1; /* error flag */ ret = -ret; } else { env->active_tc.gpr[7] = 0; /* error flag */ env->active_tc.gpr[2] = ret; case EXCP_TLBL: case EXCP_TLBS: info.si_signo = TARGET_SIGSEGV; info.si_errno = 0; /* XXX: check env->error_code */ info.si_code = TARGET_SEGV_MAPERR; info._sifields._sigfault._addr = env->CP0_BadVAddr; queue_signal(env, info.si_signo, &info); case EXCP_CpU: case EXCP_RI: info.si_signo = TARGET_SIGILL; info.si_errno = 0; info.si_code = 0; queue_signal(env, info.si_signo, &info); case EXCP_INTERRUPT: /* just indicate that signals should be handled asap */ case EXCP_DEBUG: { int sig; sig = gdb_handlesig (env, TARGET_SIGTRAP); if (sig) { info.si_signo = sig; info.si_errno = 0; info.si_code = TARGET_TRAP_BRKPT; queue_signal(env, info.si_signo, &info); default: // error: fprintf(stderr, \"qemu: unhandled CPU exception 0x%x - aborting\\n\", trapnr); cpu_dump_state(env, stderr, fprintf, 0); abort(); process_pending_signals(env);"} {"target": 1, "idx": 3551, "func": "static int tmv_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags) { TMVContext *tmv = s->priv_data; int64_t pos; if (stream_index) return -1; pos = timestamp * (tmv->audio_chunk_size + tmv->video_chunk_size + tmv->padding); avio_seek(s->pb, pos + TMV_HEADER_SIZE, SEEK_SET); tmv->stream_index = 0; return 0; }"} {"target": 1, "idx": 3555, "func": "static void bonito_ldma_writel(void *opaque, hwaddr addr, uint64_t val, unsigned size) { PCIBonitoState *s = opaque; ((uint32_t *)(&s->bonldma))[addr/sizeof(uint32_t)] = val & 0xffffffff;"} {"target": 1, "idx": 3561, "func": "static int jpeg2000_decode_packet(Jpeg2000DecoderContext *s, Jpeg2000Tile *tile, int *tp_index, Jpeg2000CodingStyle *codsty, Jpeg2000ResLevel *rlevel, int precno, int layno, uint8_t *expn, int numgbits) { int bandno, cblkno, ret, nb_code_blocks; int cwsno; if (layno < rlevel->band[0].prec[precno].decoded_layers) return 0; rlevel->band[0].prec[precno].decoded_layers = layno + 1; if (bytestream2_get_bytes_left(&s->g) == 0 && s->bit_index == 8) { if (*tp_index < FF_ARRAY_ELEMS(tile->tile_part) - 1) { s->g = tile->tile_part[++(*tp_index)].tpg; } } if (bytestream2_peek_be32(&s->g) == JPEG2000_SOP_FIXED_BYTES) bytestream2_skip(&s->g, JPEG2000_SOP_BYTE_LENGTH); if (!(ret = get_bits(s, 1))) { jpeg2000_flush(s); return 0; } else if (ret < 0) return ret; for (bandno = 0; bandno < rlevel->nbands; bandno++) { Jpeg2000Band *band = rlevel->band + bandno; Jpeg2000Prec *prec = band->prec + precno; if (band->coord[0][0] == band->coord[0][1] || band->coord[1][0] == band->coord[1][1]) continue; nb_code_blocks = prec->nb_codeblocks_height * prec->nb_codeblocks_width; for (cblkno = 0; cblkno < nb_code_blocks; cblkno++) { Jpeg2000Cblk *cblk = prec->cblk + cblkno; int incl, newpasses, llen; if (cblk->npasses) incl = get_bits(s, 1); else incl = tag_tree_decode(s, prec->cblkincl + cblkno, layno + 1) == layno; if (!incl) continue; else if (incl < 0) return incl; if (!cblk->npasses) { int v = expn[bandno] + numgbits - 1 - tag_tree_decode(s, prec->zerobits + cblkno, 100); if (v < 0 || v > 30) { av_log(s->avctx, AV_LOG_ERROR, \"nonzerobits %d invalid or unsupported\\n\", v); return AVERROR_INVALIDDATA; } cblk->nonzerobits = v; } if ((newpasses = getnpasses(s)) < 0) return newpasses; av_assert2(newpasses > 0); if (cblk->npasses + newpasses >= JPEG2000_MAX_PASSES) { avpriv_request_sample(s->avctx, \"Too many passes\"); return AVERROR_PATCHWELCOME; } if ((llen = getlblockinc(s)) < 0) return llen; if (cblk->lblock + llen + av_log2(newpasses) > 16) { avpriv_request_sample(s->avctx, \"Block with length beyond 16 bits\"); return AVERROR_PATCHWELCOME; } cblk->lblock += llen; cblk->nb_lengthinc = 0; cblk->nb_terminationsinc = 0; do { int newpasses1 = 0; while (newpasses1 < newpasses) { newpasses1 ++; if (needs_termination(codsty->cblk_style, cblk->npasses + newpasses1 - 1)) { cblk->nb_terminationsinc ++; break; } } if ((ret = get_bits(s, av_log2(newpasses1) + cblk->lblock)) < 0) return ret; if (ret > sizeof(cblk->data)) { avpriv_request_sample(s->avctx, \"Block with lengthinc greater than %\"SIZE_SPECIFIER\"\", sizeof(cblk->data)); return AVERROR_PATCHWELCOME; } cblk->lengthinc[cblk->nb_lengthinc++] = ret; cblk->npasses += newpasses1; newpasses -= newpasses1; } while(newpasses); } } jpeg2000_flush(s); if (codsty->csty & JPEG2000_CSTY_EPH) { if (bytestream2_peek_be16(&s->g) == JPEG2000_EPH) bytestream2_skip(&s->g, 2); else av_log(s->avctx, AV_LOG_ERROR, \"EPH marker not found. instead %X\\n\", bytestream2_peek_be32(&s->g)); } for (bandno = 0; bandno < rlevel->nbands; bandno++) { Jpeg2000Band *band = rlevel->band + bandno; Jpeg2000Prec *prec = band->prec + precno; nb_code_blocks = prec->nb_codeblocks_height * prec->nb_codeblocks_width; for (cblkno = 0; cblkno < nb_code_blocks; cblkno++) { Jpeg2000Cblk *cblk = prec->cblk + cblkno; for (cwsno = 0; cwsno < cblk->nb_lengthinc; cwsno ++) { if ( bytestream2_get_bytes_left(&s->g) < cblk->lengthinc[cwsno] || sizeof(cblk->data) < cblk->length + cblk->lengthinc[cwsno] + 4 ) { av_log(s->avctx, AV_LOG_ERROR, \"Block length %\"PRIu16\" or lengthinc %d is too large, left %d\\n\", cblk->length, cblk->lengthinc[cwsno], bytestream2_get_bytes_left(&s->g)); return AVERROR_INVALIDDATA; } bytestream2_get_bufferu(&s->g, cblk->data + cblk->length, cblk->lengthinc[cwsno]); cblk->length += cblk->lengthinc[cwsno]; cblk->lengthinc[cwsno] = 0; if (cblk->nb_terminationsinc) { cblk->nb_terminationsinc--; cblk->nb_terminations++; cblk->data[cblk->length++] = 0xFF; cblk->data[cblk->length++] = 0xFF; cblk->data_start[cblk->nb_terminations] = cblk->length; } } } } return 0; }"} {"target": 1, "idx": 3582, "func": "static void curses_refresh(DisplayChangeListener *dcl) { int chr, nextchr, keysym, keycode, keycode_alt; curses_winch_check(); if (invalidate) { clear(); refresh(); curses_calc_pad(); graphic_hw_invalidate(NULL); invalidate = 0; } graphic_hw_text_update(NULL, screen); nextchr = ERR; while (1) { /* while there are any pending key strokes to process */ if (nextchr == ERR) chr = getch(); else { chr = nextchr; nextchr = ERR; } if (chr == ERR) break; #ifdef KEY_RESIZE /* this shouldn't occur when we use a custom SIGWINCH handler */ if (chr == KEY_RESIZE) { clear(); refresh(); curses_calc_pad(); curses_update(dcl, 0, 0, width, height); continue; } #endif keycode = curses2keycode[chr]; keycode_alt = 0; /* alt key */ if (keycode == 1) { nextchr = getch(); if (nextchr != ERR) { chr = nextchr; keycode_alt = ALT; keycode = curses2keycode[nextchr]; nextchr = ERR; if (keycode != -1) { keycode |= ALT; /* process keys reserved for qemu */ if (keycode >= QEMU_KEY_CONSOLE0 && keycode < QEMU_KEY_CONSOLE0 + 9) { erase(); wnoutrefresh(stdscr); console_select(keycode - QEMU_KEY_CONSOLE0); invalidate = 1; continue; } } } } if (kbd_layout) { keysym = -1; if (chr < CURSES_KEYS) keysym = curses2keysym[chr]; if (keysym == -1) { if (chr < ' ') { keysym = chr + '@'; if (keysym >= 'A' && keysym <= 'Z') keysym += 'a' - 'A'; keysym |= KEYSYM_CNTRL; } else keysym = chr; } keycode = keysym2scancode(kbd_layout, keysym & KEYSYM_MASK); if (keycode == 0) continue; keycode |= (keysym & ~KEYSYM_MASK) >> 16; keycode |= keycode_alt; } if (keycode == -1) continue; if (qemu_console_is_graphic(NULL)) { /* since terminals don't know about key press and release * events, we need to emit both for each key received */ if (keycode & SHIFT) { qemu_input_event_send_key_number(NULL, SHIFT_CODE, true); qemu_input_event_send_key_delay(0); } if (keycode & CNTRL) { qemu_input_event_send_key_number(NULL, CNTRL_CODE, true); qemu_input_event_send_key_delay(0); } if (keycode & ALT) { qemu_input_event_send_key_number(NULL, ALT_CODE, true); qemu_input_event_send_key_delay(0); } if (keycode & ALTGR) { qemu_input_event_send_key_number(NULL, GREY | ALT_CODE, true); qemu_input_event_send_key_delay(0); } qemu_input_event_send_key_number(NULL, keycode & KEY_MASK, true); qemu_input_event_send_key_delay(0); qemu_input_event_send_key_number(NULL, keycode & KEY_MASK, false); qemu_input_event_send_key_delay(0); if (keycode & ALTGR) { qemu_input_event_send_key_number(NULL, GREY | ALT_CODE, false); qemu_input_event_send_key_delay(0); } if (keycode & ALT) { qemu_input_event_send_key_number(NULL, ALT_CODE, false); qemu_input_event_send_key_delay(0); } if (keycode & CNTRL) { qemu_input_event_send_key_number(NULL, CNTRL_CODE, false); qemu_input_event_send_key_delay(0); } if (keycode & SHIFT) { qemu_input_event_send_key_number(NULL, SHIFT_CODE, false); qemu_input_event_send_key_delay(0); } } else { keysym = curses2qemu[chr]; if (keysym == -1) keysym = chr; kbd_put_keysym(keysym); } } }"} {"target": 1, "idx": 3596, "func": "static int64_t read_ts(char **line, int *duration) { int64_t start, end; if (sscanf(*line, \"%\"SCNd64\",%\"SCNd64, &start, &end) == 2) { *line += strcspn(*line, \"\\\"\") + 1; *duration = end - start; return start; } return AV_NOPTS_VALUE; }"} {"target": 1, "idx": 3598, "func": "static void raven_class_init(ObjectClass *klass, void *data) { PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); k->init = raven_init; k->vendor_id = PCI_VENDOR_ID_MOTOROLA; k->device_id = PCI_DEVICE_ID_MOTOROLA_RAVEN; k->revision = 0x00; k->class_id = PCI_CLASS_BRIDGE_HOST; dc->desc = \"PReP Host Bridge - Motorola Raven\"; dc->vmsd = &vmstate_raven; dc->no_user = 1; }"} {"target": 1, "idx": 3602, "func": "static void pci_add_option_rom(PCIDevice *pdev, bool is_default_rom, Error **errp) { int size; char *path; void *ptr; char name[32]; const VMStateDescription *vmsd; if (!pdev->romfile) return; if (strlen(pdev->romfile) == 0) return; if (!pdev->rom_bar) { /* * Load rom via fw_cfg instead of creating a rom bar, * for 0.11 compatibility. */ int class = pci_get_word(pdev->config + PCI_CLASS_DEVICE); /* * Hot-plugged devices can't use the option ROM * if the rom bar is disabled. */ if (DEVICE(pdev)->hotplugged) { error_setg(errp, \"Hot-plugged device without ROM bar\" \" can't have an option ROM\"); return; } if (class == 0x0300) { rom_add_vga(pdev->romfile); } else { rom_add_option(pdev->romfile, -1); } return; } path = qemu_find_file(QEMU_FILE_TYPE_BIOS, pdev->romfile); if (path == NULL) { path = g_strdup(pdev->romfile); } size = get_image_size(path); if (size < 0) { error_setg(errp, \"failed to find romfile \\\"%s\\\"\", pdev->romfile); g_free(path); return; } else if (size == 0) { error_setg(errp, \"romfile \\\"%s\\\" is empty\", pdev->romfile); g_free(path); return; } size = pow2ceil(size); vmsd = qdev_get_vmsd(DEVICE(pdev)); if (vmsd) { snprintf(name, sizeof(name), \"%s.rom\", vmsd->name); } else { snprintf(name, sizeof(name), \"%s.rom\", object_get_typename(OBJECT(pdev))); } pdev->has_rom = true; memory_region_init_ram(&pdev->rom, OBJECT(pdev), name, size, &error_abort); vmstate_register_ram(&pdev->rom, &pdev->qdev); ptr = memory_region_get_ram_ptr(&pdev->rom); load_image(path, ptr); g_free(path); if (is_default_rom) { /* Only the default rom images will be patched (if needed). */ pci_patch_ids(pdev, ptr, size); } pci_register_bar(pdev, PCI_ROM_SLOT, 0, &pdev->rom); }"} {"target": 1, "idx": 3610, "func": "static void mov_write_uuidprof_tag(AVIOContext *pb, AVFormatContext *s) { AVStream *video_st = s->streams[0]; AVCodecParameters *video_par = s->streams[0]->codecpar; AVCodecParameters *audio_par = s->streams[1]->codecpar; int audio_rate = audio_par->sample_rate; // TODO: should be avg_frame_rate int frame_rate = ((video_st->time_base.den) * (0x10000)) / (video_st->time_base.num); int audio_kbitrate = audio_par->bit_rate / 1000; int video_kbitrate = FFMIN(video_par->bit_rate / 1000, 800 - audio_kbitrate); avio_wb32(pb, 0x94); /* size */ ffio_wfourcc(pb, \"uuid\"); ffio_wfourcc(pb, \"PROF\"); avio_wb32(pb, 0x21d24fce); /* 96 bit UUID */ avio_wb32(pb, 0xbb88695c); avio_wb32(pb, 0xfac9c740); avio_wb32(pb, 0x0); /* ? */ avio_wb32(pb, 0x3); /* 3 sections ? */ avio_wb32(pb, 0x14); /* size */ ffio_wfourcc(pb, \"FPRF\"); avio_wb32(pb, 0x0); /* ? */ avio_wb32(pb, 0x0); /* ? */ avio_wb32(pb, 0x0); /* ? */ avio_wb32(pb, 0x2c); /* size */ ffio_wfourcc(pb, \"APRF\"); /* audio */ avio_wb32(pb, 0x0); avio_wb32(pb, 0x2); /* TrackID */ ffio_wfourcc(pb, \"mp4a\"); avio_wb32(pb, 0x20f); avio_wb32(pb, 0x0); avio_wb32(pb, audio_kbitrate); avio_wb32(pb, audio_kbitrate); avio_wb32(pb, audio_rate); avio_wb32(pb, audio_par->channels); avio_wb32(pb, 0x34); /* size */ ffio_wfourcc(pb, \"VPRF\"); /* video */ avio_wb32(pb, 0x0); avio_wb32(pb, 0x1); /* TrackID */ if (video_par->codec_id == AV_CODEC_ID_H264) { ffio_wfourcc(pb, \"avc1\"); avio_wb16(pb, 0x014D); avio_wb16(pb, 0x0015); } else { ffio_wfourcc(pb, \"mp4v\"); avio_wb16(pb, 0x0000); avio_wb16(pb, 0x0103); } avio_wb32(pb, 0x0); avio_wb32(pb, video_kbitrate); avio_wb32(pb, video_kbitrate); avio_wb32(pb, frame_rate); avio_wb32(pb, frame_rate); avio_wb16(pb, video_par->width); avio_wb16(pb, video_par->height); avio_wb32(pb, 0x010001); /* ? */ }"} {"target": 1, "idx": 3611, "func": "static char *spapr_phb_vfio_get_loc_code(sPAPRPHBState *sphb, PCIDevice *pdev) { char *path = NULL, *buf = NULL, *host = NULL; /* Get the PCI VFIO host id */ host = object_property_get_str(OBJECT(pdev), \"host\", NULL); if (!host) { goto err_out; } /* Construct the path of the file that will give us the DT location */ path = g_strdup_printf(\"/sys/bus/pci/devices/%s/devspec\", host); g_free(host); if (!path || !g_file_get_contents(path, &buf, NULL, NULL)) { goto err_out; } g_free(path); /* Construct and read from host device tree the loc-code */ path = g_strdup_printf(\"/proc/device-tree%s/ibm,loc-code\", buf); g_free(buf); if (!path || !g_file_get_contents(path, &buf, NULL, NULL)) { goto err_out; } return buf; err_out: g_free(path); return NULL; }"} {"target": 1, "idx": 3621, "func": "static inline void yuv2nv12XinC(int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize, int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize, uint8_t *dest, uint8_t *uDest, int dstW, int chrDstW, int dstFormat) { //FIXME Optimize (just quickly writen not opti..) int i; for (i=0; i>19); } if (!uDest) return; if (dstFormat == PIX_FMT_NV12) for (i=0; i>19); uDest[2*i+1]= av_clip_uint8(v>>19); } else for (i=0; i>19); uDest[2*i+1]= av_clip_uint8(u>>19); } }"} {"target": 1, "idx": 3634, "func": "static void nvme_process_db(NvmeCtrl *n, hwaddr addr, int val) { uint32_t qid; if (addr & ((1 << 2) - 1)) { return; } if (((addr - 0x1000) >> 2) & 1) { uint16_t new_head = val & 0xffff; int start_sqs; NvmeCQueue *cq; qid = (addr - (0x1000 + (1 << 2))) >> 3; if (nvme_check_cqid(n, qid)) { return; } cq = n->cq[qid]; if (new_head >= cq->size) { return; } start_sqs = nvme_cq_full(cq) ? 1 : 0; cq->head = new_head; if (start_sqs) { NvmeSQueue *sq; QTAILQ_FOREACH(sq, &cq->sq_list, entry) { timer_mod(sq->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 500); } timer_mod(cq->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 500); } if (cq->tail != cq->head) { nvme_isr_notify(n, cq); } } else { uint16_t new_tail = val & 0xffff; NvmeSQueue *sq; qid = (addr - 0x1000) >> 3; if (nvme_check_sqid(n, qid)) { return; } sq = n->sq[qid]; if (new_tail >= sq->size) { return; } sq->tail = new_tail; timer_mod(sq->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 500); } }"} {"target": 0, "idx": 3658, "func": "static av_cold void build_modpred(Indeo3DecodeContext *s) { int i, j; s->ModPred = av_malloc(8 * 128); for (i=0; i < 128; ++i) { s->ModPred[i+0*128] = i > 126 ? 254 : 2*(i + 1 - ((i + 1) % 2)); s->ModPred[i+1*128] = i == 7 ? 20 : i == 119 || i == 120 ? 236 : 2*(i + 2 - ((i + 1) % 3)); s->ModPred[i+2*128] = i > 125 ? 248 : 2*(i + 2 - ((i + 2) % 4)); s->ModPred[i+3*128] = 2*(i + 1 - ((i - 3) % 5)); s->ModPred[i+4*128] = i == 8 ? 20 : 2*(i + 1 - ((i - 3) % 6)); s->ModPred[i+5*128] = 2*(i + 4 - ((i + 3) % 7)); s->ModPred[i+6*128] = i > 123 ? 240 : 2*(i + 4 - ((i + 4) % 8)); s->ModPred[i+7*128] = 2*(i + 5 - ((i + 4) % 9)); } s->corrector_type = av_malloc(24 * 256); for (i=0; i < 24; ++i) { for (j=0; j < 256; ++j) { s->corrector_type[i*256+j] = j < corrector_type_0[i] ? 1 : j < 248 || (i == 16 && j == 248) ? 0 : corrector_type_2[j - 248]; } } }"} {"target": 1, "idx": 3681, "func": "static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val) { TCGv tmp; tmp = new_tmp(); tcg_gen_trunc_i64_i32(tmp, val); store_reg(s, rlow, tmp); tmp = new_tmp(); tcg_gen_shri_i64(val, val, 32); tcg_gen_trunc_i64_i32(tmp, val); store_reg(s, rhigh, tmp); }"} {"target": 1, "idx": 3691, "func": "static int ccid_bulk_in_copy_to_guest(USBCCIDState *s, uint8_t *data, int len) { int ret = 0; assert(len > 0); ccid_bulk_in_get(s); if (s->current_bulk_in != NULL) { ret = MIN(s->current_bulk_in->len - s->current_bulk_in->pos, len); memcpy(data, s->current_bulk_in->data + s->current_bulk_in->pos, ret); s->current_bulk_in->pos += ret; if (s->current_bulk_in->pos == s->current_bulk_in->len) { ccid_bulk_in_release(s); } } else { /* return when device has no data - usb 2.0 spec Table 8-4 */ ret = USB_RET_NAK; } if (ret > 0) { DPRINTF(s, D_MORE_INFO, \"%s: %d/%d req/act to guest (BULK_IN)\\n\", __func__, len, ret); } if (ret != USB_RET_NAK && ret < len) { DPRINTF(s, 1, \"%s: returning short (EREMOTEIO) %d < %d\\n\", __func__, ret, len); } return ret; }"} {"target": 1, "idx": 3714, "func": "static void lms_update(WmallDecodeCtx *s, int ich, int ilms, int16_t input, int16_t pred) { int16_t icoef; int recent = s->cdlms[ich][ilms].recent; int16_t range = 1 << (s->bits_per_sample - 1); int bps = s->bits_per_sample > 16 ? 4 : 2; // bytes per sample if (input > pred) { for (icoef = 0; icoef < s->cdlms[ich][ilms].order; icoef++) s->cdlms[ich][ilms].coefs[icoef] += s->cdlms[ich][ilms].lms_updates[icoef + recent]; } else { for (icoef = 0; icoef < s->cdlms[ich][ilms].order; icoef++) s->cdlms[ich][ilms].coefs[icoef] -= s->cdlms[ich][ilms].lms_updates[icoef]; // XXX: [icoef + recent] ? } s->cdlms[ich][ilms].recent--; s->cdlms[ich][ilms].lms_prevvalues[recent] = av_clip(input, -range, range - 1); if (input > pred) s->cdlms[ich][ilms].lms_updates[recent] = s->update_speed[ich]; else if (input < pred) s->cdlms[ich][ilms].lms_updates[recent] = -s->update_speed[ich]; /* XXX: spec says: cdlms[iCh][ilms].updates[iRecent + cdlms[iCh][ilms].order >> 4] >>= 2; lms_updates[iCh][ilms][iRecent + cdlms[iCh][ilms].order >> 3] >>= 1; Questions is - are cdlms[iCh][ilms].updates[] and lms_updates[][][] two seperate buffers? Here I've assumed that the two are same which makes more sense to me. */ s->cdlms[ich][ilms].lms_updates[recent + s->cdlms[ich][ilms].order >> 4] >>= 2; s->cdlms[ich][ilms].lms_updates[recent + s->cdlms[ich][ilms].order >> 3] >>= 1; /* XXX: recent + (s->cdlms[ich][ilms].order >> 4) ? */ if (s->cdlms[ich][ilms].recent == 0) { /* XXX: This memcpy()s will probably fail if a fixed 32-bit buffer is used. follow kshishkov's suggestion of using a union. */ memcpy(s->cdlms[ich][ilms].lms_prevvalues + s->cdlms[ich][ilms].order, s->cdlms[ich][ilms].lms_prevvalues, bps * s->cdlms[ich][ilms].order); memcpy(s->cdlms[ich][ilms].lms_updates + s->cdlms[ich][ilms].order, s->cdlms[ich][ilms].lms_updates, bps * s->cdlms[ich][ilms].order); s->cdlms[ich][ilms].recent = s->cdlms[ich][ilms].order; } }"} {"target": 1, "idx": 3721, "func": "static int create_filter(AVFilterContext **filt_ctx, AVFilterGraph *ctx, int index, const char *filt_name, const char *args, AVClass *log_ctx) { AVFilter *filt; char inst_name[30]; char tmp_args[256]; int ret; snprintf(inst_name, sizeof(inst_name), \"Parsed filter %d %s\", index, filt_name); filt = avfilter_get_by_name(filt_name); if (!filt) { av_log(log_ctx, AV_LOG_ERROR, \"No such filter: '%s'\\n\", filt_name); return AVERROR(EINVAL); } ret = avfilter_open(filt_ctx, filt, inst_name); if (!*filt_ctx) { av_log(log_ctx, AV_LOG_ERROR, \"Error creating filter '%s'\\n\", filt_name); return ret; } if ((ret = avfilter_graph_add_filter(ctx, *filt_ctx)) < 0) { avfilter_free(*filt_ctx); return ret; } if (!strcmp(filt_name, \"scale\") && !strstr(args, \"flags\")) { snprintf(tmp_args, sizeof(tmp_args), \"%s:%s\", args, ctx->scale_sws_opts); args = tmp_args; } if ((ret = avfilter_init_filter(*filt_ctx, args, NULL)) < 0) { av_log(log_ctx, AV_LOG_ERROR, \"Error initializing filter '%s' with args '%s'\\n\", filt_name, args); return ret; } return 0; }"} {"target": 1, "idx": 3722, "func": "static void decode_mclms(WmallDecodeCtx *s) { s->mclms_order = (get_bits(&s->gb, 4) + 1) * 2; s->mclms_scaling = get_bits(&s->gb, 4); if(get_bits1(&s->gb)) { // mclms_send_coef int i; int send_coef_bits; int cbits = av_log2(s->mclms_scaling + 1); assert(cbits == my_log2(s->mclms_scaling + 1)); if(1 << cbits < s->mclms_scaling + 1) cbits++; send_coef_bits = (cbits ? get_bits(&s->gb, cbits) : 0) + 2; for(i = 0; i < s->mclms_order * s->num_channels * s->num_channels; i++) { s->mclms_coeffs[i] = get_bits(&s->gb, send_coef_bits); } for(i = 0; i < s->num_channels; i++) { int c; for(c = 0; c < i; c++) { s->mclms_coeffs_cur[i * s->num_channels + c] = get_bits(&s->gb, send_coef_bits); } } } }"} {"target": 1, "idx": 3731, "func": "static void ppc_spapr_init(ram_addr_t ram_size, const char *boot_device, const char *kernel_filename, const char *kernel_cmdline, const char *initrd_filename, const char *cpu_model) { PowerPCCPU *cpu; CPUPPCState *env; int i; MemoryRegion *sysmem = get_system_memory(); MemoryRegion *ram = g_new(MemoryRegion, 1); target_phys_addr_t rma_alloc_size, rma_size; uint32_t initrd_base = 0; long kernel_size = 0, initrd_size = 0; long load_limit, rtas_limit, fw_size; long pteg_shift = 17; char *filename; spapr = g_malloc0(sizeof(*spapr)); QLIST_INIT(&spapr->phbs); cpu_ppc_hypercall = emulate_spapr_hypercall; /* Allocate RMA if necessary */ rma_alloc_size = kvmppc_alloc_rma(\"ppc_spapr.rma\", sysmem); if (rma_alloc_size == -1) { hw_error(\"qemu: Unable to create RMA\\n\"); exit(1); } if (rma_alloc_size && (rma_alloc_size < ram_size)) { rma_size = rma_alloc_size; } else { rma_size = ram_size; } /* We place the device tree and RTAS just below either the top of the RMA, * or just below 2GB, whichever is lowere, so that it can be * processed with 32-bit real mode code if necessary */ rtas_limit = MIN(rma_size, 0x80000000); spapr->rtas_addr = rtas_limit - RTAS_MAX_SIZE; spapr->fdt_addr = spapr->rtas_addr - FDT_MAX_SIZE; load_limit = spapr->fdt_addr - FW_OVERHEAD; /* init CPUs */ if (cpu_model == NULL) { cpu_model = kvm_enabled() ? \"host\" : \"POWER7\"; } for (i = 0; i < smp_cpus; i++) { cpu = cpu_ppc_init(cpu_model); if (cpu == NULL) { fprintf(stderr, \"Unable to find PowerPC CPU definition\\n\"); exit(1); } env = &cpu->env; /* Set time-base frequency to 512 MHz */ cpu_ppc_tb_init(env, TIMEBASE_FREQ); qemu_register_reset(spapr_cpu_reset, cpu); env->hreset_vector = 0x60; env->hreset_excp_prefix = 0; env->gpr[3] = env->cpu_index; } /* allocate RAM */ spapr->ram_limit = ram_size; if (spapr->ram_limit > rma_alloc_size) { ram_addr_t nonrma_base = rma_alloc_size; ram_addr_t nonrma_size = spapr->ram_limit - rma_alloc_size; memory_region_init_ram(ram, \"ppc_spapr.ram\", nonrma_size); vmstate_register_ram_global(ram); memory_region_add_subregion(sysmem, nonrma_base, ram); } /* allocate hash page table. For now we always make this 16mb, * later we should probably make it scale to the size of guest * RAM */ spapr->htab_size = 1ULL << (pteg_shift + 7); spapr->htab = qemu_memalign(spapr->htab_size, spapr->htab_size); for (env = first_cpu; env != NULL; env = env->next_cpu) { env->external_htab = spapr->htab; env->htab_base = -1; env->htab_mask = spapr->htab_size - 1; /* Tell KVM that we're in PAPR mode */ env->spr[SPR_SDR1] = (unsigned long)spapr->htab | ((pteg_shift + 7) - 18); env->spr[SPR_HIOR] = 0; if (kvm_enabled()) { kvmppc_set_papr(env); } } filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, \"spapr-rtas.bin\"); spapr->rtas_size = load_image_targphys(filename, spapr->rtas_addr, rtas_limit - spapr->rtas_addr); if (spapr->rtas_size < 0) { hw_error(\"qemu: could not load LPAR rtas '%s'\\n\", filename); exit(1); } if (spapr->rtas_size > RTAS_MAX_SIZE) { hw_error(\"RTAS too big ! 0x%lx bytes (max is 0x%x)\\n\", spapr->rtas_size, RTAS_MAX_SIZE); exit(1); } g_free(filename); /* Set up Interrupt Controller */ spapr->icp = xics_system_init(XICS_IRQS); spapr->next_irq = 16; /* Set up VIO bus */ spapr->vio_bus = spapr_vio_bus_init(); for (i = 0; i < MAX_SERIAL_PORTS; i++) { if (serial_hds[i]) { spapr_vty_create(spapr->vio_bus, serial_hds[i]); } } /* Set up PCI */ spapr_create_phb(spapr, \"pci\", SPAPR_PCI_BUID, SPAPR_PCI_MEM_WIN_ADDR, SPAPR_PCI_MEM_WIN_SIZE, SPAPR_PCI_IO_WIN_ADDR); for (i = 0; i < nb_nics; i++) { NICInfo *nd = &nd_table[i]; if (!nd->model) { nd->model = g_strdup(\"ibmveth\"); } if (strcmp(nd->model, \"ibmveth\") == 0) { spapr_vlan_create(spapr->vio_bus, nd); } else { pci_nic_init_nofail(&nd_table[i], nd->model, NULL); } } for (i = 0; i <= drive_get_max_bus(IF_SCSI); i++) { spapr_vscsi_create(spapr->vio_bus); } if (rma_size < (MIN_RMA_SLOF << 20)) { fprintf(stderr, \"qemu: pSeries SLOF firmware requires >= \" \"%ldM guest RMA (Real Mode Area memory)\\n\", MIN_RMA_SLOF); exit(1); } fprintf(stderr, \"sPAPR memory map:\\n\"); fprintf(stderr, \"RTAS : 0x%08lx..%08lx\\n\", (unsigned long)spapr->rtas_addr, (unsigned long)(spapr->rtas_addr + spapr->rtas_size - 1)); fprintf(stderr, \"FDT : 0x%08lx..%08lx\\n\", (unsigned long)spapr->fdt_addr, (unsigned long)(spapr->fdt_addr + FDT_MAX_SIZE - 1)); if (kernel_filename) { uint64_t lowaddr = 0; kernel_size = load_elf(kernel_filename, translate_kernel_address, NULL, NULL, &lowaddr, NULL, 1, ELF_MACHINE, 0); if (kernel_size < 0) { kernel_size = load_image_targphys(kernel_filename, KERNEL_LOAD_ADDR, load_limit - KERNEL_LOAD_ADDR); } if (kernel_size < 0) { fprintf(stderr, \"qemu: could not load kernel '%s'\\n\", kernel_filename); exit(1); } fprintf(stderr, \"Kernel : 0x%08x..%08lx\\n\", KERNEL_LOAD_ADDR, KERNEL_LOAD_ADDR + kernel_size - 1); /* load initrd */ if (initrd_filename) { /* Try to locate the initrd in the gap between the kernel * and the firmware. Add a bit of space just in case */ initrd_base = (KERNEL_LOAD_ADDR + kernel_size + 0x1ffff) & ~0xffff; initrd_size = load_image_targphys(initrd_filename, initrd_base, load_limit - initrd_base); if (initrd_size < 0) { fprintf(stderr, \"qemu: could not load initial ram disk '%s'\\n\", initrd_filename); exit(1); } fprintf(stderr, \"Ramdisk : 0x%08lx..%08lx\\n\", (long)initrd_base, (long)(initrd_base + initrd_size - 1)); } else { initrd_base = 0; initrd_size = 0; } } filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, FW_FILE_NAME); fw_size = load_image_targphys(filename, 0, FW_MAX_SIZE); if (fw_size < 0) { hw_error(\"qemu: could not load LPAR rtas '%s'\\n\", filename); exit(1); } g_free(filename); fprintf(stderr, \"Firmware load : 0x%08x..%08lx\\n\", 0, fw_size); fprintf(stderr, \"Firmware runtime : 0x%08lx..%08lx\\n\", load_limit, (unsigned long)spapr->fdt_addr); spapr->entry_point = 0x100; /* SLOF will startup the secondary CPUs using RTAS */ for (env = first_cpu; env != NULL; env = env->next_cpu) { env->halted = 1; } /* Prepare the device tree */ spapr->fdt_skel = spapr_create_fdt_skel(cpu_model, rma_size, initrd_base, initrd_size, kernel_size, boot_device, kernel_cmdline, pteg_shift + 7); assert(spapr->fdt_skel != NULL); qemu_register_reset(spapr_reset, spapr); }"} {"target": 0, "idx": 3743, "func": "static av_cold int sonic_encode_init(AVCodecContext *avctx) { SonicContext *s = avctx->priv_data; PutBitContext pb; int i, version = 0; if (avctx->channels > MAX_CHANNELS) { av_log(avctx, AV_LOG_ERROR, \"Only mono and stereo streams are supported by now\\n\"); return AVERROR(EINVAL); /* only stereo or mono for now */ } if (avctx->channels == 2) s->decorrelation = MID_SIDE; else s->decorrelation = 3; if (avctx->codec->id == AV_CODEC_ID_SONIC_LS) { s->lossless = 1; s->num_taps = 32; s->downsampling = 1; s->quantization = 0.0; } else { s->num_taps = 128; s->downsampling = 2; s->quantization = 1.0; } // max tap 2048 if ((s->num_taps < 32) || (s->num_taps > 1024) || ((s->num_taps>>5)<<5 != s->num_taps)) { av_log(avctx, AV_LOG_ERROR, \"Invalid number of taps\\n\"); return AVERROR_INVALIDDATA; } // generate taps s->tap_quant = av_calloc(s->num_taps, sizeof(*s->tap_quant)); for (i = 0; i < s->num_taps; i++) s->tap_quant[i] = ff_sqrt(i+1); s->channels = avctx->channels; s->samplerate = avctx->sample_rate; s->block_align = 2048LL*s->samplerate/(44100*s->downsampling); s->frame_size = s->channels*s->block_align*s->downsampling; s->tail_size = s->num_taps*s->channels; s->tail = av_calloc(s->tail_size, sizeof(*s->tail)); if (!s->tail) return AVERROR(ENOMEM); s->predictor_k = av_calloc(s->num_taps, sizeof(*s->predictor_k) ); if (!s->predictor_k) return AVERROR(ENOMEM); for (i = 0; i < s->channels; i++) { s->coded_samples[i] = av_calloc(s->block_align, sizeof(**s->coded_samples)); if (!s->coded_samples[i]) return AVERROR(ENOMEM); } s->int_samples = av_calloc(s->frame_size, sizeof(*s->int_samples)); s->window_size = ((2*s->tail_size)+s->frame_size); s->window = av_calloc(s->window_size, sizeof(*s->window)); if (!s->window) return AVERROR(ENOMEM); avctx->extradata = av_mallocz(16); if (!avctx->extradata) return AVERROR(ENOMEM); init_put_bits(&pb, avctx->extradata, 16*8); put_bits(&pb, 2, version); // version if (version == 1) { put_bits(&pb, 2, s->channels); put_bits(&pb, 4, code_samplerate(s->samplerate)); } put_bits(&pb, 1, s->lossless); if (!s->lossless) put_bits(&pb, 3, SAMPLE_SHIFT); // XXX FIXME: sample precision put_bits(&pb, 2, s->decorrelation); put_bits(&pb, 2, s->downsampling); put_bits(&pb, 5, (s->num_taps >> 5)-1); // 32..1024 put_bits(&pb, 1, 0); // XXX FIXME: no custom tap quant table flush_put_bits(&pb); avctx->extradata_size = put_bits_count(&pb)/8; av_log(avctx, AV_LOG_INFO, \"Sonic: ver: %d ls: %d dr: %d taps: %d block: %d frame: %d downsamp: %d\\n\", version, s->lossless, s->decorrelation, s->num_taps, s->block_align, s->frame_size, s->downsampling); avctx->frame_size = s->block_align*s->downsampling; return 0; }"} {"target": 0, "idx": 3748, "func": "int ff_h264_check_intra_pred_mode(H264Context *h, int mode, int is_chroma) { static const int8_t top[7] = { LEFT_DC_PRED8x8, 1, -1, -1 }; static const int8_t left[7] = { TOP_DC_PRED8x8, -1, 2, -1, DC_128_PRED8x8 }; if (mode > 6U) { av_log(h->avctx, AV_LOG_ERROR, \"out of range intra chroma pred mode at %d %d\\n\", h->mb_x, h->mb_y); return -1; } if (!(h->top_samples_available & 0x8000)) { mode = top[mode]; if (mode < 0) { av_log(h->avctx, AV_LOG_ERROR, \"top block unavailable for requested intra mode at %d %d\\n\", h->mb_x, h->mb_y); return -1; } } if ((h->left_samples_available & 0x8080) != 0x8080) { mode = left[mode]; if (is_chroma && (h->left_samples_available & 0x8080)) { // mad cow disease mode, aka MBAFF + constrained_intra_pred mode = ALZHEIMER_DC_L0T_PRED8x8 + (!(h->left_samples_available & 0x8000)) + 2 * (mode == DC_128_PRED8x8); } if (mode < 0) { av_log(h->avctx, AV_LOG_ERROR, \"left block unavailable for requested intra mode at %d %d\\n\", h->mb_x, h->mb_y); return -1; } } return mode; }"} {"target": 0, "idx": 3755, "func": "void audio_pcm_init_info (struct audio_pcm_info *info, audsettings_t *as) { int bits = 8, sign = 0, shift = 0; switch (as->fmt) { case AUD_FMT_S8: sign = 1; case AUD_FMT_U8: break; case AUD_FMT_S16: sign = 1; case AUD_FMT_U16: bits = 16; shift = 1; break; case AUD_FMT_S32: sign = 1; case AUD_FMT_U32: bits = 32; shift = 2; break; } info->freq = as->freq; info->bits = bits; info->sign = sign; info->nchannels = as->nchannels; info->shift = (as->nchannels == 2) + shift; info->align = (1 << info->shift) - 1; info->bytes_per_second = info->freq << info->shift; info->swap_endianness = (as->endianness != AUDIO_HOST_ENDIANNESS); }"} {"target": 0, "idx": 3763, "func": "static void block_job_completed_txn_abort(BlockJob *job) { AioContext *ctx; BlockJobTxn *txn = job->txn; BlockJob *other_job, *next; if (txn->aborting) { /* * We are cancelled by another job, which will handle everything. */ return; } txn->aborting = true; /* We are the first failed job. Cancel other jobs. */ QLIST_FOREACH(other_job, &txn->jobs, txn_list) { ctx = blk_get_aio_context(other_job->blk); aio_context_acquire(ctx); } QLIST_FOREACH(other_job, &txn->jobs, txn_list) { if (other_job == job || other_job->completed) { /* Other jobs are \"effectively\" cancelled by us, set the status for * them; this job, however, may or may not be cancelled, depending * on the caller, so leave it. */ if (other_job != job) { block_job_cancel_async(other_job); } continue; } block_job_cancel_sync(other_job); assert(other_job->completed); } QLIST_FOREACH_SAFE(other_job, &txn->jobs, txn_list, next) { ctx = blk_get_aio_context(other_job->blk); block_job_completed_single(other_job); aio_context_release(ctx); } }"} {"target": 0, "idx": 3766, "func": "void isa_mmio_setup(MemoryRegion *mr, target_phys_addr_t size) { memory_region_init_io(mr, &isa_mmio_ops, NULL, \"isa-mmio\", size); }"} {"target": 0, "idx": 3770, "func": "int qio_channel_socket_dgram_sync(QIOChannelSocket *ioc, SocketAddressLegacy *localAddr, SocketAddressLegacy *remoteAddr, Error **errp) { int fd; trace_qio_channel_socket_dgram_sync(ioc, localAddr, remoteAddr); fd = socket_dgram(remoteAddr, localAddr, errp); if (fd < 0) { trace_qio_channel_socket_dgram_fail(ioc); return -1; } trace_qio_channel_socket_dgram_complete(ioc, fd); if (qio_channel_socket_set_fd(ioc, fd, errp) < 0) { close(fd); return -1; } return 0; }"} {"target": 0, "idx": 3778, "func": "static void dec_bit(DisasContext *dc) { TCGv t0, t1; unsigned int op; int mem_index = cpu_mmu_index(dc->env); op = dc->ir & ((1 << 9) - 1); switch (op) { case 0x21: /* src. */ t0 = tcg_temp_new(); LOG_DIS(\"src r%d r%d\\n\", dc->rd, dc->ra); tcg_gen_andi_tl(t0, cpu_R[dc->ra], 1); if (dc->rd) { t1 = tcg_temp_new(); read_carry(dc, t1); tcg_gen_shli_tl(t1, t1, 31); tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1); tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->rd], t1); tcg_temp_free(t1); } /* Update carry. */ write_carry(dc, t0); tcg_temp_free(t0); break; case 0x1: case 0x41: /* srl. */ t0 = tcg_temp_new(); LOG_DIS(\"srl r%d r%d\\n\", dc->rd, dc->ra); /* Update carry. */ tcg_gen_andi_tl(t0, cpu_R[dc->ra], 1); write_carry(dc, t0); tcg_temp_free(t0); if (dc->rd) { if (op == 0x41) tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1); else tcg_gen_sari_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1); } break; case 0x60: LOG_DIS(\"ext8s r%d r%d\\n\", dc->rd, dc->ra); tcg_gen_ext8s_i32(cpu_R[dc->rd], cpu_R[dc->ra]); break; case 0x61: LOG_DIS(\"ext16s r%d r%d\\n\", dc->rd, dc->ra); tcg_gen_ext16s_i32(cpu_R[dc->rd], cpu_R[dc->ra]); break; case 0x64: case 0x66: case 0x74: case 0x76: /* wdc. */ LOG_DIS(\"wdc r%d\\n\", dc->ra); if ((dc->tb_flags & MSR_EE_FLAG) && mem_index == MMU_USER_IDX) { tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN); t_gen_raise_exception(dc, EXCP_HW_EXCP); return; } break; case 0x68: /* wic. */ LOG_DIS(\"wic r%d\\n\", dc->ra); if ((dc->tb_flags & MSR_EE_FLAG) && mem_index == MMU_USER_IDX) { tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN); t_gen_raise_exception(dc, EXCP_HW_EXCP); return; } break; case 0xe0: if ((dc->tb_flags & MSR_EE_FLAG) && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK) && !((dc->env->pvr.regs[2] & PVR2_USE_PCMP_INSTR))) { tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP); t_gen_raise_exception(dc, EXCP_HW_EXCP); } if (dc->env->pvr.regs[2] & PVR2_USE_PCMP_INSTR) { gen_helper_clz(cpu_R[dc->rd], cpu_R[dc->ra]); } break; case 0x1e0: /* swapb */ LOG_DIS(\"swapb r%d r%d\\n\", dc->rd, dc->ra); tcg_gen_bswap32_i32(cpu_R[dc->rd], cpu_R[dc->ra]); break; case 0x1e2: /*swaph */ LOG_DIS(\"swaph r%d r%d\\n\", dc->rd, dc->ra); tcg_gen_rotri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 16); break; default: cpu_abort(dc->env, \"unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\\n\", dc->pc, op, dc->rd, dc->ra, dc->rb); break; } }"} {"target": 0, "idx": 3795, "func": "int memory_region_get_fd(MemoryRegion *mr) { if (mr->alias) { return memory_region_get_fd(mr->alias); } assert(mr->terminates); return qemu_get_ram_fd(mr->ram_addr & TARGET_PAGE_MASK); }"} {"target": 0, "idx": 3811, "func": "static void tcg_out_insn_3314(TCGContext *s, AArch64Insn insn, TCGReg r1, TCGReg r2, TCGReg rn, tcg_target_long ofs, bool pre, bool w) { insn |= 1u << 31; /* ext */ insn |= pre << 24; insn |= w << 23; assert(ofs >= -0x200 && ofs < 0x200 && (ofs & 7) == 0); insn |= (ofs & (0x7f << 3)) << (15 - 3); tcg_out32(s, insn | r2 << 10 | rn << 5 | r1); }"} {"target": 0, "idx": 3815, "func": "static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, void *opaque, const char *name, Error **errp) { X86CPU *cpu = X86_CPU(obj); const int64_t min = 0; const int64_t max = INT_MAX; int64_t value; visit_type_int(v, &value, name, errp); if (error_is_set(errp)) { return; } if (value < min || value > max) { error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, \"\", name ? name : \"null\", value, min, max); return; } cpu->env.tsc_khz = value / 1000; }"} {"target": 1, "idx": 3824, "func": "static int cow_create(const char *filename, QemuOpts *opts, Error **errp) { struct cow_header_v2 cow_header; struct stat st; int64_t image_sectors = 0; char *image_filename = NULL; Error *local_err = NULL; int ret; BlockDriverState *cow_bs; /* Read out options */ image_sectors = qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0) / 512; image_filename = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FILE); ret = bdrv_create_file(filename, opts, &local_err); if (ret < 0) { error_propagate(errp, local_err); goto exit; } cow_bs = NULL; ret = bdrv_open(&cow_bs, filename, NULL, NULL, BDRV_O_RDWR | BDRV_O_PROTOCOL, NULL, &local_err); if (ret < 0) { error_propagate(errp, local_err); goto exit; } memset(&cow_header, 0, sizeof(cow_header)); cow_header.magic = cpu_to_be32(COW_MAGIC); cow_header.version = cpu_to_be32(COW_VERSION); if (image_filename) { /* Note: if no file, we put a dummy mtime */ cow_header.mtime = cpu_to_be32(0); if (stat(image_filename, &st) != 0) { goto mtime_fail; } cow_header.mtime = cpu_to_be32(st.st_mtime); mtime_fail: pstrcpy(cow_header.backing_file, sizeof(cow_header.backing_file), image_filename); } cow_header.sectorsize = cpu_to_be32(512); cow_header.size = cpu_to_be64(image_sectors * 512); ret = bdrv_pwrite(cow_bs, 0, &cow_header, sizeof(cow_header)); if (ret < 0) { goto exit; } /* resize to include at least all the bitmap */ ret = bdrv_truncate(cow_bs, sizeof(cow_header) + ((image_sectors + 7) >> 3)); if (ret < 0) { goto exit; } exit: g_free(image_filename); bdrv_unref(cow_bs); return ret; }"} {"target": 0, "idx": 3832, "func": "static av_cold int check_format(AVCodecContext *avctx) { AVCodecParserContext *parser; uint8_t *pout; int psize; int index; H264Context *h; int ret = -1; /* init parser & parse file */ parser = av_parser_init(avctx->codec->id); if (!parser) { av_log(avctx, AV_LOG_ERROR, \"Failed to open H.264 parser.\\n\"); goto final; } parser->flags = PARSER_FLAG_COMPLETE_FRAMES; index = av_parser_parse2(parser, avctx, &pout, &psize, NULL, 0, 0, 0, 0); if (index < 0) { av_log(avctx, AV_LOG_ERROR, \"Failed to parse this file.\\n\"); goto release_parser; } /* check if support */ h = parser->priv_data; switch (h->sps.bit_depth_luma) { case 8: if (!CHROMA444(h) && !CHROMA422(h)) { // only this will H.264 decoder switch to hwaccel ret = 0; break; } default: av_log(avctx, AV_LOG_ERROR, \"Unsupported file.\\n\"); } release_parser: av_parser_close(parser); final: return ret; }"} {"target": 0, "idx": 3834, "func": "static unsigned int dec_move_mr(DisasContext *dc) { int memsize = memsize_zz(dc); int insn_len; DIS(fprintf (logfile, \"move.%c [$r%u%s, $r%u\\n\", memsize_char(memsize), dc->op1, dc->postinc ? \"+]\" : \"]\", dc->op2)); if (memsize == 4) { insn_len = dec_prep_move_m(dc, 0, 4, cpu_R[dc->op2]); cris_cc_mask(dc, CC_MASK_NZ); cris_update_cc_op(dc, CC_OP_MOVE, 4); cris_update_cc_x(dc); cris_update_result(dc, cpu_R[dc->op2]); } else { TCGv t0; t0 = tcg_temp_new(TCG_TYPE_TL); insn_len = dec_prep_move_m(dc, 0, memsize, t0); cris_cc_mask(dc, CC_MASK_NZ); cris_alu(dc, CC_OP_MOVE, cpu_R[dc->op2], cpu_R[dc->op2], t0, memsize); tcg_temp_free(t0); } do_postinc(dc, memsize); return insn_len; }"} {"target": 0, "idx": 3838, "func": "static int rtc_initfn(ISADevice *dev) { RTCState *s = DO_UPCAST(RTCState, dev, dev); int base = 0x70; int isairq = 8; isa_init_irq(dev, &s->irq, isairq); s->cmos_data[RTC_REG_A] = 0x26; s->cmos_data[RTC_REG_B] = 0x02; s->cmos_data[RTC_REG_C] = 0x00; s->cmos_data[RTC_REG_D] = 0x80; rtc_set_date_from_host(s); s->periodic_timer = qemu_new_timer(rtc_clock, rtc_periodic_timer, s); #ifdef TARGET_I386 if (rtc_td_hack) s->coalesced_timer = qemu_new_timer(rtc_clock, rtc_coalesced_timer, s); #endif s->second_timer = qemu_new_timer(rtc_clock, rtc_update_second, s); s->second_timer2 = qemu_new_timer(rtc_clock, rtc_update_second2, s); s->next_second_time = qemu_get_clock(rtc_clock) + (get_ticks_per_sec() * 99) / 100; qemu_mod_timer(s->second_timer2, s->next_second_time); register_ioport_write(base, 2, 1, cmos_ioport_write, s); register_ioport_read(base, 2, 1, cmos_ioport_read, s); register_savevm(\"mc146818rtc\", base, 1, rtc_save, rtc_load, s); #ifdef TARGET_I386 if (rtc_td_hack) register_savevm(\"mc146818rtc-td\", base, 1, rtc_save_td, rtc_load_td, s); #endif qemu_register_reset(rtc_reset, s); return 0; }"} {"target": 0, "idx": 3845, "func": "static int virtio_net_load(QEMUFile *f, void *opaque, int version_id) { VirtIONet *n = opaque; VirtIODevice *vdev = VIRTIO_DEVICE(n); if (version_id < 2 || version_id > VIRTIO_NET_VM_VERSION) return -EINVAL; return virtio_load(vdev, f, version_id); }"} {"target": 0, "idx": 3901, "func": "static int read_packet(AVFormatContext *s1, AVPacket *pkt) { VideoDemuxData *s = s1->priv_data; char filename_bytes[1024]; char *filename = filename_bytes; int i; int size[3]={0}, ret[3]={0}; AVIOContext *f[3] = {NULL}; AVCodecContext *codec= s1->streams[0]->codec; if (!s->is_pipe) { /* loop over input */ if (s->loop && s->img_number > s->img_last) { s->img_number = s->img_first; } if (s->img_number > s->img_last) return AVERROR_EOF; if (s->use_glob) { #if HAVE_GLOB filename = s->globstate.gl_pathv[s->img_number]; #endif } else { if (av_get_frame_filename(filename_bytes, sizeof(filename_bytes), s->path, s->img_number)<0 && s->img_number > 1) return AVERROR(EIO); } for(i=0; i<3; i++){ if (avio_open2(&f[i], filename, AVIO_FLAG_READ, &s1->interrupt_callback, NULL) < 0) { if(i>=1) break; av_log(s1, AV_LOG_ERROR, \"Could not open file : %s\\n\",filename); return AVERROR(EIO); } size[i]= avio_size(f[i]); if(!s->split_planes) break; filename[ strlen(filename) - 1 ]= 'U' + i; } if(codec->codec_id == AV_CODEC_ID_RAWVIDEO && !codec->width) infer_size(&codec->width, &codec->height, size[0]); } else { f[0] = s1->pb; if (url_feof(f[0])) return AVERROR(EIO); size[0]= 4096; } av_new_packet(pkt, size[0] + size[1] + size[2]); pkt->stream_index = 0; pkt->flags |= AV_PKT_FLAG_KEY; pkt->size= 0; for(i=0; i<3; i++){ if(f[i]){ ret[i]= avio_read(f[i], pkt->data + pkt->size, size[i]); if (!s->is_pipe) avio_close(f[i]); if(ret[i]>0) pkt->size += ret[i]; } } if (ret[0] <= 0 || ret[1]<0 || ret[2]<0) { av_free_packet(pkt); return AVERROR(EIO); /* signal EOF */ } else { s->img_count++; s->img_number++; return 0; } }"} {"target": 1, "idx": 3913, "func": "static int smacker_read_header(AVFormatContext *s) { AVIOContext *pb = s->pb; SmackerContext *smk = s->priv_data; AVStream *st, *ast[7]; int i, ret; int tbase; /* read and check header */ smk->magic = avio_rl32(pb); if (smk->magic != MKTAG('S', 'M', 'K', '2') && smk->magic != MKTAG('S', 'M', 'K', '4')) smk->width = avio_rl32(pb); smk->height = avio_rl32(pb); smk->frames = avio_rl32(pb); smk->pts_inc = (int32_t)avio_rl32(pb); smk->flags = avio_rl32(pb); if(smk->flags & SMACKER_FLAG_RING_FRAME) smk->frames++; for(i = 0; i < 7; i++) smk->audio[i] = avio_rl32(pb); smk->treesize = avio_rl32(pb); if(smk->treesize >= UINT_MAX/4){ // smk->treesize + 16 must not overflow (this check is probably redundant) av_log(s, AV_LOG_ERROR, \"treesize too large\\n\"); //FIXME remove extradata \"rebuilding\" smk->mmap_size = avio_rl32(pb); smk->mclr_size = avio_rl32(pb); smk->full_size = avio_rl32(pb); smk->type_size = avio_rl32(pb); for(i = 0; i < 7; i++) { smk->rates[i] = avio_rl24(pb); smk->aflags[i] = avio_r8(pb); smk->pad = avio_rl32(pb); /* setup data */ if(smk->frames > 0xFFFFFF) { av_log(s, AV_LOG_ERROR, \"Too many frames: %\"PRIu32\"\\n\", smk->frames); smk->frm_size = av_malloc_array(smk->frames, sizeof(*smk->frm_size)); smk->frm_flags = av_malloc(smk->frames); if (!smk->frm_size || !smk->frm_flags) { av_freep(&smk->frm_size); av_freep(&smk->frm_flags); return AVERROR(ENOMEM); smk->is_ver4 = (smk->magic != MKTAG('S', 'M', 'K', '2')); /* read frame info */ for(i = 0; i < smk->frames; i++) { smk->frm_size[i] = avio_rl32(pb); for(i = 0; i < smk->frames; i++) { smk->frm_flags[i] = avio_r8(pb); /* init video codec */ st = avformat_new_stream(s, NULL); if (!st) return AVERROR(ENOMEM); smk->videoindex = st->index; st->codec->width = smk->width; st->codec->height = smk->height; st->codec->pix_fmt = AV_PIX_FMT_PAL8; st->codec->codec_type = AVMEDIA_TYPE_VIDEO; st->codec->codec_id = AV_CODEC_ID_SMACKVIDEO; st->codec->codec_tag = smk->magic; /* Smacker uses 100000 as internal timebase */ if(smk->pts_inc < 0) smk->pts_inc = -smk->pts_inc; else smk->pts_inc *= 100; tbase = 100000; av_reduce(&tbase, &smk->pts_inc, tbase, smk->pts_inc, (1UL<<31)-1); avpriv_set_pts_info(st, 33, smk->pts_inc, tbase); st->duration = smk->frames; /* handle possible audio streams */ for(i = 0; i < 7; i++) { smk->indexes[i] = -1; if (smk->rates[i]) { ast[i] = avformat_new_stream(s, NULL); if (!ast[i]) return AVERROR(ENOMEM); smk->indexes[i] = ast[i]->index; ast[i]->codec->codec_type = AVMEDIA_TYPE_AUDIO; if (smk->aflags[i] & SMK_AUD_BINKAUD) { ast[i]->codec->codec_id = AV_CODEC_ID_BINKAUDIO_RDFT; } else if (smk->aflags[i] & SMK_AUD_USEDCT) { ast[i]->codec->codec_id = AV_CODEC_ID_BINKAUDIO_DCT; } else if (smk->aflags[i] & SMK_AUD_PACKED){ ast[i]->codec->codec_id = AV_CODEC_ID_SMACKAUDIO; ast[i]->codec->codec_tag = MKTAG('S', 'M', 'K', 'A'); } else { ast[i]->codec->codec_id = AV_CODEC_ID_PCM_U8; if (smk->aflags[i] & SMK_AUD_STEREO) { ast[i]->codec->channels = 2; ast[i]->codec->channel_layout = AV_CH_LAYOUT_STEREO; } else { ast[i]->codec->channels = 1; ast[i]->codec->channel_layout = AV_CH_LAYOUT_MONO; ast[i]->codec->sample_rate = smk->rates[i]; ast[i]->codec->bits_per_coded_sample = (smk->aflags[i] & SMK_AUD_16BITS) ? 16 : 8; if(ast[i]->codec->bits_per_coded_sample == 16 && ast[i]->codec->codec_id == AV_CODEC_ID_PCM_U8) ast[i]->codec->codec_id = AV_CODEC_ID_PCM_S16LE; avpriv_set_pts_info(ast[i], 64, 1, ast[i]->codec->sample_rate * ast[i]->codec->channels * ast[i]->codec->bits_per_coded_sample / 8); /* load trees to extradata, they will be unpacked by decoder */ if(ff_alloc_extradata(st->codec, smk->treesize + 16)){ av_log(s, AV_LOG_ERROR, \"Cannot allocate %\"PRIu32\" bytes of extradata\\n\", smk->treesize + 16); av_freep(&smk->frm_size); av_freep(&smk->frm_flags); return AVERROR(ENOMEM); ret = avio_read(pb, st->codec->extradata + 16, st->codec->extradata_size - 16); if(ret != st->codec->extradata_size - 16){ av_freep(&smk->frm_size); av_freep(&smk->frm_flags); return AVERROR(EIO); ((int32_t*)st->codec->extradata)[0] = av_le2ne32(smk->mmap_size); ((int32_t*)st->codec->extradata)[1] = av_le2ne32(smk->mclr_size); ((int32_t*)st->codec->extradata)[2] = av_le2ne32(smk->full_size); ((int32_t*)st->codec->extradata)[3] = av_le2ne32(smk->type_size); smk->curstream = -1; smk->nextpos = avio_tell(pb); return 0;"} {"target": 0, "idx": 3920, "func": "static int coroutine_fn raw_co_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors) { return bdrv_co_discard(bs->file->bs, sector_num, nb_sectors); }"} {"target": 0, "idx": 3924, "func": "BlockAIOCB *bdrv_aio_write_zeroes(BlockDriverState *bs, int64_t sector_num, int nb_sectors, BdrvRequestFlags flags, BlockCompletionFunc *cb, void *opaque) { trace_bdrv_aio_write_zeroes(bs, sector_num, nb_sectors, flags, opaque); return bdrv_co_aio_rw_vector(bs, sector_num, NULL, nb_sectors, BDRV_REQ_ZERO_WRITE | flags, cb, opaque, true); }"} {"target": 0, "idx": 3926, "func": "static void virtio_pci_reset(DeviceState *d) { VirtIOPCIProxy *proxy = container_of(d, VirtIOPCIProxy, pci_dev.qdev); virtio_reset(proxy->vdev); msix_reset(&proxy->pci_dev); proxy->flags = 0; }"} {"target": 0, "idx": 3933, "func": "static void tcg_out_dat_rIK(TCGContext *s, int cond, int opc, int opinv, TCGReg dst, TCGReg lhs, TCGArg rhs, bool rhs_is_const) { /* Emit either the reg,imm or reg,reg form of a data-processing insn. * rhs must satisfy the \"rIK\" constraint. */ if (rhs_is_const) { int rot = encode_imm(rhs); if (rot < 0) { rhs = ~rhs; rot = encode_imm(rhs); assert(rot >= 0); opc = opinv; } tcg_out_dat_imm(s, cond, opc, dst, lhs, rotl(rhs, rot) | (rot << 7)); } else { tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0)); } }"} {"target": 0, "idx": 3946, "func": "static int twl92230_init(i2c_slave *i2c) { MenelausState *s = FROM_I2C_SLAVE(MenelausState, i2c); s->rtc.hz_tm = qemu_new_timer(rt_clock, menelaus_rtc_hz, s); /* Three output pins plus one interrupt pin. */ qdev_init_gpio_out(&i2c->qdev, s->out, 4); qdev_init_gpio_in(&i2c->qdev, menelaus_gpio_set, 3); s->pwrbtn = qemu_allocate_irqs(menelaus_pwrbtn_set, s, 1)[0]; menelaus_reset(&s->i2c); return 0; }"} {"target": 1, "idx": 3956, "func": "static int mov_write_packet(AVFormatContext *s, AVPacket *pkt) { MOVContext *mov = s->priv_data; ByteIOContext *pb = s->pb; MOVTrack *trk = &mov->tracks[pkt->stream_index]; AVCodecContext *enc = trk->enc; unsigned int samplesInChunk = 0; int size= pkt->size; if (url_is_streamed(s->pb)) return 0; /* Can't handle that */ if (!size) return 0; /* Discard 0 sized packets */ if (enc->codec_id == CODEC_ID_AMR_NB) { /* We must find out how many AMR blocks there are in one packet */ static uint16_t packed_size[16] = {13, 14, 16, 18, 20, 21, 27, 32, 6, 0, 0, 0, 0, 0, 0, 0}; int len = 0; while (len < size && samplesInChunk < 100) { len += packed_size[(pkt->data[len] >> 3) & 0x0F]; samplesInChunk++; } if(samplesInChunk > 1){ av_log(s, AV_LOG_ERROR, \"fatal error, input is not a single packet, implement a AVParser for it\\n\"); return -1; } } else if (trk->sampleSize) samplesInChunk = size/trk->sampleSize; else samplesInChunk = 1; /* copy extradata if it exists */ if (trk->vosLen == 0 && enc->extradata_size > 0) { trk->vosLen = enc->extradata_size; trk->vosData = av_malloc(trk->vosLen); memcpy(trk->vosData, enc->extradata, trk->vosLen); } if (enc->codec_id == CODEC_ID_H264 && trk->vosLen > 0 && *(uint8_t *)trk->vosData != 1) { /* from x264 or from bytestream h264 */ /* nal reformating needed */ int ret = ff_avc_parse_nal_units(pkt->data, &pkt->data, &pkt->size); if (ret < 0) return ret; assert(pkt->size); size = pkt->size; } else if (enc->codec_id == CODEC_ID_DNXHD && !trk->vosLen) { /* copy frame to create needed atoms */ trk->vosLen = size; trk->vosData = av_malloc(size); memcpy(trk->vosData, pkt->data, size); } if (!(trk->entry % MOV_INDEX_CLUSTER_SIZE)) { trk->cluster = av_realloc(trk->cluster, (trk->entry + MOV_INDEX_CLUSTER_SIZE) * sizeof(*trk->cluster)); if (!trk->cluster) return -1; } trk->cluster[trk->entry].pos = url_ftell(pb); trk->cluster[trk->entry].samplesInChunk = samplesInChunk; trk->cluster[trk->entry].size = size; trk->cluster[trk->entry].entries = samplesInChunk; trk->cluster[trk->entry].dts = pkt->dts; trk->trackDuration = pkt->dts - trk->cluster[0].dts + pkt->duration; if (pkt->pts == AV_NOPTS_VALUE) { av_log(s, AV_LOG_WARNING, \"pts has no value\\n\"); pkt->pts = pkt->dts; } if (pkt->dts != pkt->pts) trk->hasBframes = 1; trk->cluster[trk->entry].cts = pkt->pts - pkt->dts; trk->cluster[trk->entry].key_frame = !!(pkt->flags & PKT_FLAG_KEY); if(trk->cluster[trk->entry].key_frame) trk->hasKeyframes++; trk->entry++; trk->sampleCount += samplesInChunk; mov->mdat_size += size; put_buffer(pb, pkt->data, size); put_flush_packet(pb); return 0; }"} {"target": 1, "idx": 3960, "func": "static void gen_st_cond (DisasContext *ctx, uint32_t opc, int rt, int base, int16_t offset) { const char *opn = \"st_cond\"; TCGv t0, t1; t0 = tcg_temp_local_new(); gen_base_offset_addr(ctx, t0, base, offset); /* Don't do NOP if destination is zero: we must perform the actual memory access. */ t1 = tcg_temp_local_new(); gen_load_gpr(t1, rt); switch (opc) { #if defined(TARGET_MIPS64) case OPC_SCD: save_cpu_state(ctx, 0); op_st_scd(t1, t0, rt, ctx); opn = \"scd\"; break; #endif case OPC_SC: save_cpu_state(ctx, 1); op_st_sc(t1, t0, rt, ctx); opn = \"sc\"; break; } (void)opn; /* avoid a compiler warning */ MIPS_DEBUG(\"%s %s, %d(%s)\", opn, regnames[rt], offset, regnames[base]); tcg_temp_free(t1); tcg_temp_free(t0); }"} {"target": 1, "idx": 3970, "func": "static int32_t bmdma_prepare_buf(IDEDMA *dma, int is_write) { BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma); IDEState *s = bmdma_active_if(bm); PCIDevice *pci_dev = PCI_DEVICE(bm->pci_dev); struct { uint32_t addr; uint32_t size; } prd; int l, len; pci_dma_sglist_init(&s->sg, pci_dev, s->nsector / (BMDMA_PAGE_SIZE / 512) + 1); s->io_buffer_size = 0; for(;;) { if (bm->cur_prd_len == 0) { /* end of table (with a fail safe of one page) */ if (bm->cur_prd_last || (bm->cur_addr - bm->addr) >= BMDMA_PAGE_SIZE) { return s->io_buffer_size; } pci_dma_read(pci_dev, bm->cur_addr, &prd, 8); bm->cur_addr += 8; prd.addr = le32_to_cpu(prd.addr); prd.size = le32_to_cpu(prd.size); len = prd.size & 0xfffe; if (len == 0) len = 0x10000; bm->cur_prd_len = len; bm->cur_prd_addr = prd.addr; bm->cur_prd_last = (prd.size & 0x80000000); } l = bm->cur_prd_len; if (l > 0) { qemu_sglist_add(&s->sg, bm->cur_prd_addr, l); /* Note: We limit the max transfer to be 2GiB. * This should accommodate the largest ATA transaction * for LBA48 (65,536 sectors) and 32K sector sizes. */ if (s->sg.size > INT32_MAX) { error_report(\"IDE: sglist describes more than 2GiB.\"); break; } bm->cur_prd_addr += l; bm->cur_prd_len -= l; s->io_buffer_size += l; } } qemu_sglist_destroy(&s->sg); s->io_buffer_size = 0; return -1; }"} {"target": 0, "idx": 3971, "func": "static int apply_window_and_mdct(vorbis_enc_context *venc, float *audio, int samples) { int channel; const float * win = venc->win[0]; int window_len = 1 << (venc->log2_blocksize[0] - 1); float n = (float)(1 << venc->log2_blocksize[0]) / 4.0; AVFloatDSPContext *fdsp = venc->fdsp; if (!venc->have_saved && !samples) return 0; if (venc->have_saved) { for (channel = 0; channel < venc->channels; channel++) memcpy(venc->samples + channel * window_len * 2, venc->saved + channel * window_len, sizeof(float) * window_len); } else { for (channel = 0; channel < venc->channels; channel++) memset(venc->samples + channel * window_len * 2, 0, sizeof(float) * window_len); } if (samples) { for (channel = 0; channel < venc->channels; channel++) { float *offset = venc->samples + channel * window_len * 2 + window_len; fdsp->vector_fmul_reverse(offset, audio + channel * window_len, win, samples); fdsp->vector_fmul_scalar(offset, offset, 1/n, samples); } } else { for (channel = 0; channel < venc->channels; channel++) memset(venc->samples + channel * window_len * 2 + window_len, 0, sizeof(float) * window_len); } for (channel = 0; channel < venc->channels; channel++) venc->mdct[0].mdct_calc(&venc->mdct[0], venc->coeffs + channel * window_len, venc->samples + channel * window_len * 2); if (samples) { for (channel = 0; channel < venc->channels; channel++) { float *offset = venc->saved + channel * window_len; fdsp->vector_fmul(offset, audio + channel * window_len, win, samples); fdsp->vector_fmul_scalar(offset, offset, 1/n, samples); } venc->have_saved = 1; } else { venc->have_saved = 0; } return 1; }"} {"target": 1, "idx": 3977, "func": "static inline int popcountl(unsigned long l) { return BITS_PER_LONG == 32 ? ctpop32(l) : ctpop64(l); }"} {"target": 1, "idx": 3984, "func": "static int32_t parse_gain(const char *gain) { char *fraction; int scale = 10000; int32_t mb = 0; int sign = 1; int db; if (!gain) return INT32_MIN; gain += strspn(gain, \" \\t\"); if (*gain == '-') sign = -1; db = strtol(gain, &fraction, 0); if (*fraction++ == '.') { while (av_isdigit(*fraction) && scale) { mb += scale * (*fraction - '0'); scale /= 10; fraction++; } } if (abs(db) > (INT32_MAX - mb) / 100000) return INT32_MIN; return db * 100000 + sign * mb; }"} {"target": 1, "idx": 3988, "func": "static inline void ide_dma_submit_check(IDEState *s, BlockDriverCompletionFunc *dma_cb) { if (s->bus->dma->aiocb) return; dma_cb(s, -1); }"} {"target": 1, "idx": 4006, "func": "static BlockStats *bdrv_query_bds_stats(const BlockDriverState *bs, bool query_backing) { BlockStats *s = NULL; s = g_malloc0(sizeof(*s)); s->stats = g_malloc0(sizeof(*s->stats)); if (!bs) { return s; } if (bdrv_get_node_name(bs)[0]) { s->has_node_name = true; s->node_name = g_strdup(bdrv_get_node_name(bs)); } s->stats->wr_highest_offset = stat64_get(&bs->wr_highest_offset); if (bs->file) { s->has_parent = true; s->parent = bdrv_query_bds_stats(bs->file->bs, query_backing); } if (query_backing && bs->backing) { s->has_backing = true; s->backing = bdrv_query_bds_stats(bs->backing->bs, query_backing); } return s; }"} {"target": 1, "idx": 4009, "func": "static int lag_decode_zero_run_line(LagarithContext *l, uint8_t *dst, const uint8_t *src, const uint8_t *src_end, int width, int esc_count) { int i = 0; int count; uint8_t zero_run = 0; const uint8_t *src_start = src; uint8_t mask1 = -(esc_count < 2); uint8_t mask2 = -(esc_count < 3); uint8_t *end = dst + (width - 2); avpriv_request_sample(l->avctx, \"zero_run_line\"); return AVERROR_PATCHWELCOME; output_zeros: if (l->zeros_rem) { count = FFMIN(l->zeros_rem, width - i); if (end - dst < count) { av_log(l->avctx, AV_LOG_ERROR, \"Too many zeros remaining.\\n\"); return AVERROR_INVALIDDATA; } memset(dst, 0, count); l->zeros_rem -= count; dst += count; } while (dst < end) { i = 0; while (!zero_run && dst + i < end) { i++; if (i+2 >= src_end - src) return AVERROR_INVALIDDATA; zero_run = !(src[i] | (src[i + 1] & mask1) | (src[i + 2] & mask2)); } if (zero_run) { zero_run = 0; i += esc_count; memcpy(dst, src, i); dst += i; l->zeros_rem = lag_calc_zero_run(src[i]); src += i + 1; goto output_zeros; } else { memcpy(dst, src, i); src += i; dst += i; } } return src - src_start; }"} {"target": 1, "idx": 4017, "func": "static int ram_init_all(RAMState **rsp) { Error *local_err = NULL; if (ram_state_init(rsp)) { return -1; } if (migrate_use_xbzrle()) { XBZRLE_cache_lock(); XBZRLE.zero_target_page = g_malloc0(TARGET_PAGE_SIZE); XBZRLE.cache = cache_init(migrate_xbzrle_cache_size(), TARGET_PAGE_SIZE, &local_err); if (!XBZRLE.cache) { XBZRLE_cache_unlock(); error_report_err(local_err); g_free(*rsp); *rsp = NULL; return -1; } XBZRLE_cache_unlock(); /* We prefer not to abort if there is no memory */ XBZRLE.encoded_buf = g_try_malloc0(TARGET_PAGE_SIZE); if (!XBZRLE.encoded_buf) { error_report(\"Error allocating encoded_buf\"); g_free(*rsp); *rsp = NULL; return -1; } XBZRLE.current_buf = g_try_malloc(TARGET_PAGE_SIZE); if (!XBZRLE.current_buf) { error_report(\"Error allocating current_buf\"); g_free(XBZRLE.encoded_buf); XBZRLE.encoded_buf = NULL; g_free(*rsp); *rsp = NULL; return -1; } } /* For memory_global_dirty_log_start below. */ qemu_mutex_lock_iothread(); qemu_mutex_lock_ramlist(); rcu_read_lock(); /* Skip setting bitmap if there is no RAM */ if (ram_bytes_total()) { RAMBlock *block; QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { unsigned long pages = block->max_length >> TARGET_PAGE_BITS; block->bmap = bitmap_new(pages); bitmap_set(block->bmap, 0, pages); if (migrate_postcopy_ram()) { block->unsentmap = bitmap_new(pages); bitmap_set(block->unsentmap, 0, pages); } } } memory_global_dirty_log_start(); migration_bitmap_sync(*rsp); qemu_mutex_unlock_ramlist(); qemu_mutex_unlock_iothread(); rcu_read_unlock(); return 0; }"} {"target": 0, "idx": 4025, "func": "static void av_always_inline filter_mb_edgecv( uint8_t *pix, int stride, int16_t bS[4], unsigned int qp, H264Context *h ) { const unsigned int index_a = qp + h->slice_alpha_c0_offset; const int alpha = alpha_table[index_a]; const int beta = beta_table[qp + h->slice_beta_offset]; if (alpha ==0 || beta == 0) return; if( bS[0] < 4 ) { int8_t tc[4]; tc[0] = tc0_table[index_a][bS[0]]+1; tc[1] = tc0_table[index_a][bS[1]]+1; tc[2] = tc0_table[index_a][bS[2]]+1; tc[3] = tc0_table[index_a][bS[3]]+1; h->h264dsp.h264_h_loop_filter_chroma(pix, stride, alpha, beta, tc); } else { h->h264dsp.h264_h_loop_filter_chroma_intra(pix, stride, alpha, beta); } }"} {"target": 1, "idx": 4029, "func": "static int decode_entropy_coded_image(WebPContext *s, enum ImageRole role, int w, int h) { ImageContext *img; HuffReader *hg; int i, j, ret, x, y, width; img = &s->image[role]; img->role = role; if (!img->frame) { img->frame = av_frame_alloc(); if (!img->frame) return AVERROR(ENOMEM); } img->frame->format = AV_PIX_FMT_ARGB; img->frame->width = w; img->frame->height = h; if (role == IMAGE_ROLE_ARGB && !img->is_alpha_primary) { ThreadFrame pt = { .f = img->frame }; ret = ff_thread_get_buffer(s->avctx, &pt, 0); } else ret = av_frame_get_buffer(img->frame, 1); if (ret < 0) return ret; if (get_bits1(&s->gb)) { img->color_cache_bits = get_bits(&s->gb, 4); if (img->color_cache_bits < 1 || img->color_cache_bits > 11) { av_log(s->avctx, AV_LOG_ERROR, \"invalid color cache bits: %d\\n\", img->color_cache_bits); return AVERROR_INVALIDDATA; } img->color_cache = av_mallocz_array(1 << img->color_cache_bits, sizeof(*img->color_cache)); if (!img->color_cache) return AVERROR(ENOMEM); } else { img->color_cache_bits = 0; } img->nb_huffman_groups = 1; if (role == IMAGE_ROLE_ARGB && get_bits1(&s->gb)) { ret = decode_entropy_image(s); if (ret < 0) return ret; img->nb_huffman_groups = s->nb_huffman_groups; } img->huffman_groups = av_mallocz_array(img->nb_huffman_groups * HUFFMAN_CODES_PER_META_CODE, sizeof(*img->huffman_groups)); if (!img->huffman_groups) return AVERROR(ENOMEM); for (i = 0; i < img->nb_huffman_groups; i++) { hg = &img->huffman_groups[i * HUFFMAN_CODES_PER_META_CODE]; for (j = 0; j < HUFFMAN_CODES_PER_META_CODE; j++) { int alphabet_size = alphabet_sizes[j]; if (!j && img->color_cache_bits > 0) alphabet_size += 1 << img->color_cache_bits; if (get_bits1(&s->gb)) { read_huffman_code_simple(s, &hg[j]); } else { ret = read_huffman_code_normal(s, &hg[j], alphabet_size); if (ret < 0) return ret; } } } width = img->frame->width; if (role == IMAGE_ROLE_ARGB && s->reduced_width > 0) width = s->reduced_width; x = 0; y = 0; while (y < img->frame->height) { int v; hg = get_huffman_group(s, img, x, y); v = huff_reader_get_symbol(&hg[HUFF_IDX_GREEN], &s->gb); if (v < NUM_LITERAL_CODES) { /* literal pixel values */ uint8_t *p = GET_PIXEL(img->frame, x, y); p[2] = v; p[1] = huff_reader_get_symbol(&hg[HUFF_IDX_RED], &s->gb); p[3] = huff_reader_get_symbol(&hg[HUFF_IDX_BLUE], &s->gb); p[0] = huff_reader_get_symbol(&hg[HUFF_IDX_ALPHA], &s->gb); if (img->color_cache_bits) color_cache_put(img, AV_RB32(p)); x++; if (x == width) { x = 0; y++; } } else if (v < NUM_LITERAL_CODES + NUM_LENGTH_CODES) { /* LZ77 backwards mapping */ int prefix_code, length, distance, ref_x, ref_y; /* parse length and distance */ prefix_code = v - NUM_LITERAL_CODES; if (prefix_code < 4) { length = prefix_code + 1; } else { int extra_bits = (prefix_code - 2) >> 1; int offset = 2 + (prefix_code & 1) << extra_bits; length = offset + get_bits(&s->gb, extra_bits) + 1; } prefix_code = huff_reader_get_symbol(&hg[HUFF_IDX_DIST], &s->gb); if (prefix_code > 39) { av_log(s->avctx, AV_LOG_ERROR, \"distance prefix code too large: %d\\n\", prefix_code); return AVERROR_INVALIDDATA; } if (prefix_code < 4) { distance = prefix_code + 1; } else { int extra_bits = prefix_code - 2 >> 1; int offset = 2 + (prefix_code & 1) << extra_bits; distance = offset + get_bits(&s->gb, extra_bits) + 1; } /* find reference location */ if (distance <= NUM_SHORT_DISTANCES) { int xi = lz77_distance_offsets[distance - 1][0]; int yi = lz77_distance_offsets[distance - 1][1]; distance = FFMAX(1, xi + yi * width); } else { distance -= NUM_SHORT_DISTANCES; } ref_x = x; ref_y = y; if (distance <= x) { ref_x -= distance; distance = 0; } else { ref_x = 0; distance -= x; } while (distance >= width) { ref_y--; distance -= width; } if (distance > 0) { ref_x = width - distance; ref_y--; } ref_x = FFMAX(0, ref_x); ref_y = FFMAX(0, ref_y); /* copy pixels * source and dest regions can overlap and wrap lines, so just * copy per-pixel */ for (i = 0; i < length; i++) { uint8_t *p_ref = GET_PIXEL(img->frame, ref_x, ref_y); uint8_t *p = GET_PIXEL(img->frame, x, y); AV_COPY32(p, p_ref); if (img->color_cache_bits) color_cache_put(img, AV_RB32(p)); x++; ref_x++; if (x == width) { x = 0; y++; } if (ref_x == width) { ref_x = 0; ref_y++; } if (y == img->frame->height || ref_y == img->frame->height) break; } } else { /* read from color cache */ uint8_t *p = GET_PIXEL(img->frame, x, y); int cache_idx = v - (NUM_LITERAL_CODES + NUM_LENGTH_CODES); if (!img->color_cache_bits) { av_log(s->avctx, AV_LOG_ERROR, \"color cache not found\\n\"); return AVERROR_INVALIDDATA; } if (cache_idx >= 1 << img->color_cache_bits) { av_log(s->avctx, AV_LOG_ERROR, \"color cache index out-of-bounds\\n\"); return AVERROR_INVALIDDATA; } AV_WB32(p, img->color_cache[cache_idx]); x++; if (x == width) { x = 0; y++; } } } return 0; }"} {"target": 1, "idx": 4041, "func": "void ff_mspel_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t **ref_picture, op_pixels_func (*pix_op)[4], int motion_x, int motion_y, int h) { Wmv2Context * const w= (Wmv2Context*)s; uint8_t *ptr; int dxy, offset, mx, my, src_x, src_y, v_edge_pos, linesize, uvlinesize; int emu=0; dxy = ((motion_y & 1) << 1) | (motion_x & 1); dxy = 2*dxy + w->hshift; src_x = s->mb_x * 16 + (motion_x >> 1); src_y = s->mb_y * 16 + (motion_y >> 1); /* WARNING: do no forget half pels */ v_edge_pos = s->v_edge_pos; src_x = av_clip(src_x, -16, s->width); src_y = av_clip(src_y, -16, s->height); if(src_x<=-16 || src_x >= s->width) dxy &= ~3; if(src_y<=-16 || src_y >= s->height) dxy &= ~4; linesize = s->linesize; uvlinesize = s->uvlinesize; ptr = ref_picture[0] + (src_y * linesize) + src_x; if(src_x<1 || src_y<1 || src_x + 17 >= s->h_edge_pos || src_y + h+1 >= v_edge_pos){ s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr - 1 - s->linesize, s->linesize, 19, 19, src_x-1, src_y-1, s->h_edge_pos, s->v_edge_pos); ptr= s->edge_emu_buffer + 1 + s->linesize; emu=1; } s->dsp.put_mspel_pixels_tab[dxy](dest_y , ptr , linesize); s->dsp.put_mspel_pixels_tab[dxy](dest_y+8 , ptr+8 , linesize); s->dsp.put_mspel_pixels_tab[dxy](dest_y +8*linesize, ptr +8*linesize, linesize); s->dsp.put_mspel_pixels_tab[dxy](dest_y+8+8*linesize, ptr+8+8*linesize, linesize); if(s->flags&CODEC_FLAG_GRAY) return; if (s->out_format == FMT_H263) { dxy = 0; if ((motion_x & 3) != 0) dxy |= 1; if ((motion_y & 3) != 0) dxy |= 2; mx = motion_x >> 2; my = motion_y >> 2; } else { mx = motion_x / 2; my = motion_y / 2; dxy = ((my & 1) << 1) | (mx & 1); mx >>= 1; my >>= 1; } src_x = s->mb_x * 8 + mx; src_y = s->mb_y * 8 + my; src_x = av_clip(src_x, -8, s->width >> 1); if (src_x == (s->width >> 1)) dxy &= ~1; src_y = av_clip(src_y, -8, s->height >> 1); if (src_y == (s->height >> 1)) dxy &= ~2; offset = (src_y * uvlinesize) + src_x; ptr = ref_picture[1] + offset; if(emu){ s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1); ptr= s->edge_emu_buffer; } pix_op[1][dxy](dest_cb, ptr, uvlinesize, h >> 1); ptr = ref_picture[2] + offset; if(emu){ s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1); ptr= s->edge_emu_buffer; } pix_op[1][dxy](dest_cr, ptr, uvlinesize, h >> 1); }"} {"target": 1, "idx": 4062, "func": "static int svq1_encode_plane(SVQ1Context *s, int plane, unsigned char *src_plane, unsigned char *ref_plane, unsigned char *decoded_plane, int width, int height, int src_stride, int stride) { const AVFrame *f = s->avctx->coded_frame; int x, y; int i; int block_width, block_height; int level; int threshold[6]; uint8_t *src = s->scratchbuf + stride * 16; const int lambda = (f->quality * f->quality) >> (2 * FF_LAMBDA_SHIFT); /* figure out the acceptable level thresholds in advance */ threshold[5] = QUALITY_THRESHOLD; for (level = 4; level >= 0; level--) threshold[level] = threshold[level + 1] * THRESHOLD_MULTIPLIER; block_width = (width + 15) / 16; block_height = (height + 15) / 16; if (f->pict_type == AV_PICTURE_TYPE_P) { s->m.avctx = s->avctx; s->m.current_picture_ptr = &s->m.current_picture; s->m.last_picture_ptr = &s->m.last_picture; s->m.last_picture.f.data[0] = ref_plane; s->m.linesize = s->m.last_picture.f.linesize[0] = s->m.new_picture.f.linesize[0] = s->m.current_picture.f.linesize[0] = stride; s->m.width = width; s->m.height = height; s->m.mb_width = block_width; s->m.mb_height = block_height; s->m.mb_stride = s->m.mb_width + 1; s->m.b8_stride = 2 * s->m.mb_width + 1; s->m.f_code = 1; s->m.pict_type = f->pict_type; s->m.me_method = s->avctx->me_method; s->m.me.scene_change_score = 0; s->m.flags = s->avctx->flags; // s->m.out_format = FMT_H263; // s->m.unrestricted_mv = 1; s->m.lambda = f->quality; s->m.qscale = s->m.lambda * 139 + FF_LAMBDA_SCALE * 64 >> FF_LAMBDA_SHIFT + 7; s->m.lambda2 = s->m.lambda * s->m.lambda + FF_LAMBDA_SCALE / 2 >> FF_LAMBDA_SHIFT; if (!s->motion_val8[plane]) { s->motion_val8[plane] = av_mallocz((s->m.b8_stride * block_height * 2 + 2) * 2 * sizeof(int16_t)); s->motion_val16[plane] = av_mallocz((s->m.mb_stride * (block_height + 2) + 1) * 2 * sizeof(int16_t)); } s->m.mb_type = s->mb_type; // dummies, to avoid segfaults s->m.current_picture.mb_mean = (uint8_t *)s->dummy; s->m.current_picture.mb_var = (uint16_t *)s->dummy; s->m.current_picture.mc_mb_var = (uint16_t *)s->dummy; s->m.current_picture.mb_type = s->dummy; s->m.current_picture.motion_val[0] = s->motion_val8[plane] + 2; s->m.p_mv_table = s->motion_val16[plane] + s->m.mb_stride + 1; s->m.dsp = s->dsp; // move ff_init_me(&s->m); s->m.me.dia_size = s->avctx->dia_size; s->m.first_slice_line = 1; for (y = 0; y < block_height; y++) { s->m.new_picture.f.data[0] = src - y * 16 * stride; // ugly s->m.mb_y = y; for (i = 0; i < 16 && i + 16 * y < height; i++) { memcpy(&src[i * stride], &src_plane[(i + 16 * y) * src_stride], width); for (x = width; x < 16 * block_width; x++) src[i * stride + x] = src[i * stride + x - 1]; } for (; i < 16 && i + 16 * y < 16 * block_height; i++) memcpy(&src[i * stride], &src[(i - 1) * stride], 16 * block_width); for (x = 0; x < block_width; x++) { s->m.mb_x = x; ff_init_block_index(&s->m); ff_update_block_index(&s->m); ff_estimate_p_frame_motion(&s->m, x, y); } s->m.first_slice_line = 0; } ff_fix_long_p_mvs(&s->m); ff_fix_long_mvs(&s->m, NULL, 0, s->m.p_mv_table, s->m.f_code, CANDIDATE_MB_TYPE_INTER, 0); } s->m.first_slice_line = 1; for (y = 0; y < block_height; y++) { for (i = 0; i < 16 && i + 16 * y < height; i++) { memcpy(&src[i * stride], &src_plane[(i + 16 * y) * src_stride], width); for (x = width; x < 16 * block_width; x++) src[i * stride + x] = src[i * stride + x - 1]; } for (; i < 16 && i + 16 * y < 16 * block_height; i++) memcpy(&src[i * stride], &src[(i - 1) * stride], 16 * block_width); s->m.mb_y = y; for (x = 0; x < block_width; x++) { uint8_t reorder_buffer[3][6][7 * 32]; int count[3][6]; int offset = y * 16 * stride + x * 16; uint8_t *decoded = decoded_plane + offset; uint8_t *ref = ref_plane + offset; int score[4] = { 0, 0, 0, 0 }, best; uint8_t *temp = s->scratchbuf; if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) < 3000) { // FIXME: check size av_log(s->avctx, AV_LOG_ERROR, \"encoded frame too large\\n\"); return -1; } s->m.mb_x = x; ff_init_block_index(&s->m); ff_update_block_index(&s->m); if (f->pict_type == AV_PICTURE_TYPE_I || (s->m.mb_type[x + y * s->m.mb_stride] & CANDIDATE_MB_TYPE_INTRA)) { for (i = 0; i < 6; i++) init_put_bits(&s->reorder_pb[i], reorder_buffer[0][i], 7 * 32); if (f->pict_type == AV_PICTURE_TYPE_P) { const uint8_t *vlc = ff_svq1_block_type_vlc[SVQ1_BLOCK_INTRA]; put_bits(&s->reorder_pb[5], vlc[1], vlc[0]); score[0] = vlc[1] * lambda; } score[0] += encode_block(s, src + 16 * x, NULL, temp, stride, 5, 64, lambda, 1); for (i = 0; i < 6; i++) { count[0][i] = put_bits_count(&s->reorder_pb[i]); flush_put_bits(&s->reorder_pb[i]); } } else score[0] = INT_MAX; best = 0; if (f->pict_type == AV_PICTURE_TYPE_P) { const uint8_t *vlc = ff_svq1_block_type_vlc[SVQ1_BLOCK_INTER]; int mx, my, pred_x, pred_y, dxy; int16_t *motion_ptr; motion_ptr = ff_h263_pred_motion(&s->m, 0, 0, &pred_x, &pred_y); if (s->m.mb_type[x + y * s->m.mb_stride] & CANDIDATE_MB_TYPE_INTER) { for (i = 0; i < 6; i++) init_put_bits(&s->reorder_pb[i], reorder_buffer[1][i], 7 * 32); put_bits(&s->reorder_pb[5], vlc[1], vlc[0]); s->m.pb = s->reorder_pb[5]; mx = motion_ptr[0]; my = motion_ptr[1]; assert(mx >= -32 && mx <= 31); assert(my >= -32 && my <= 31); assert(pred_x >= -32 && pred_x <= 31); assert(pred_y >= -32 && pred_y <= 31); ff_h263_encode_motion(&s->m, mx - pred_x, 1); ff_h263_encode_motion(&s->m, my - pred_y, 1); s->reorder_pb[5] = s->m.pb; score[1] += lambda * put_bits_count(&s->reorder_pb[5]); dxy = (mx & 1) + 2 * (my & 1); s->hdsp.put_pixels_tab[0][dxy](temp + 16, ref + (mx >> 1) + stride * (my >> 1), stride, 16); score[1] += encode_block(s, src + 16 * x, temp + 16, decoded, stride, 5, 64, lambda, 0); best = score[1] <= score[0]; vlc = ff_svq1_block_type_vlc[SVQ1_BLOCK_SKIP]; score[2] = s->dsp.sse[0](NULL, src + 16 * x, ref, stride, 16); score[2] += vlc[1] * lambda; if (score[2] < score[best] && mx == 0 && my == 0) { best = 2; s->hdsp.put_pixels_tab[0][0](decoded, ref, stride, 16); for (i = 0; i < 6; i++) count[2][i] = 0; put_bits(&s->pb, vlc[1], vlc[0]); } } if (best == 1) { for (i = 0; i < 6; i++) { count[1][i] = put_bits_count(&s->reorder_pb[i]); flush_put_bits(&s->reorder_pb[i]); } } else { motion_ptr[0] = motion_ptr[1] = motion_ptr[2] = motion_ptr[3] = motion_ptr[0 + 2 * s->m.b8_stride] = motion_ptr[1 + 2 * s->m.b8_stride] = motion_ptr[2 + 2 * s->m.b8_stride] = motion_ptr[3 + 2 * s->m.b8_stride] = 0; } } s->rd_total += score[best]; for (i = 5; i >= 0; i--) avpriv_copy_bits(&s->pb, reorder_buffer[best][i], count[best][i]); if (best == 0) s->hdsp.put_pixels_tab[0][0](decoded, temp, stride, 16); } s->m.first_slice_line = 0; } return 0; }"} {"target": 1, "idx": 4064, "func": "static int ffmmal_add_packet(AVCodecContext *avctx, AVPacket *avpkt, int is_extradata) { MMALDecodeContext *ctx = avctx->priv_data; AVBufferRef *buf = NULL; int size = 0; uint8_t *data = (uint8_t *)\"\"; uint8_t *start; int ret = 0; if (avpkt->size) { if (avpkt->buf) { buf = av_buffer_ref(avpkt->buf); size = avpkt->size; data = avpkt->data; } else { buf = av_buffer_alloc(avpkt->size); if (buf) { memcpy(buf->data, avpkt->data, avpkt->size); size = buf->size; data = buf->data; } } if (!buf) { ret = AVERROR(ENOMEM); goto done; } if (!is_extradata) ctx->packets_sent++; } else { if (!ctx->packets_sent) { // Short-cut the flush logic to avoid upsetting MMAL. ctx->eos_sent = 1; ctx->eos_received = 1; goto done; } } start = data; do { FFBufferEntry *buffer = av_mallocz(sizeof(*buffer)); if (!buffer) { ret = AVERROR(ENOMEM); goto done; } buffer->data = data; buffer->length = FFMIN(size, ctx->decoder->input[0]->buffer_size); if (is_extradata) buffer->flags |= MMAL_BUFFER_HEADER_FLAG_CONFIG; if (data == start) buffer->flags |= MMAL_BUFFER_HEADER_FLAG_FRAME_START; data += buffer->length; size -= buffer->length; buffer->pts = avpkt->pts == AV_NOPTS_VALUE ? MMAL_TIME_UNKNOWN : avpkt->pts; buffer->dts = avpkt->dts == AV_NOPTS_VALUE ? MMAL_TIME_UNKNOWN : avpkt->dts; if (!size) buffer->flags |= MMAL_BUFFER_HEADER_FLAG_FRAME_END; if (!buffer->length) { buffer->flags |= MMAL_BUFFER_HEADER_FLAG_EOS; ctx->eos_sent = 1; } if (buf) { buffer->ref = av_buffer_ref(buf); if (!buffer->ref) { av_free(buffer); ret = AVERROR(ENOMEM); goto done; } } // Insert at end of the list if (!ctx->waiting_buffers) ctx->waiting_buffers = buffer; if (ctx->waiting_buffers_tail) ctx->waiting_buffers_tail->next = buffer; ctx->waiting_buffers_tail = buffer; } while (size); done: av_buffer_unref(&buf); return ret; }"} {"target": 1, "idx": 4070, "func": "static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost) { AVBitStreamFilterContext *bsfc = ost->bitstream_filters; AVCodecContext *avctx = ost->st->codec; int ret; if ((avctx->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) || (avctx->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0)) pkt->pts = pkt->dts = AV_NOPTS_VALUE; if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) { int64_t max = ost->st->cur_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT); if (ost->st->cur_dts && ost->st->cur_dts != AV_NOPTS_VALUE && max > pkt->dts) { av_log(s, max - pkt->dts > 2 ? AV_LOG_WARNING : AV_LOG_DEBUG, \"Audio timestamp %\"PRId64\" < %\"PRId64\" invalid, cliping\\n\", pkt->dts, max); pkt->pts = pkt->dts = max; /* * Audio encoders may split the packets -- #frames in != #packets out. * But there is no reordering, so we can limit the number of output packets * by simply dropping them here. * Counting encoded video frames needs to be done separately because of * reordering, see do_video_out() */ if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) { if (ost->frame_number >= ost->max_frames) { av_free_packet(pkt); return; ost->frame_number++; while (bsfc) { AVPacket new_pkt = *pkt; int a = av_bitstream_filter_filter(bsfc, avctx, NULL, &new_pkt.data, &new_pkt.size, pkt->data, pkt->size, pkt->flags & AV_PKT_FLAG_KEY); if (a > 0) { av_free_packet(pkt); new_pkt.destruct = av_destruct_packet; } else if (a < 0) { av_log(NULL, AV_LOG_ERROR, \"Failed to open bitstream filter %s for stream %d with codec %s\", bsfc->filter->name, pkt->stream_index, avctx->codec ? avctx->codec->name : \"copy\"); print_error(\"\", a); if (exit_on_error) exit_program(1); *pkt = new_pkt; bsfc = bsfc->next; pkt->stream_index = ost->index; ret = av_interleaved_write_frame(s, pkt); if (ret < 0) { print_error(\"av_interleaved_write_frame()\", ret); exit_program(1);"} {"target": 1, "idx": 4080, "func": "void qed_commit_l2_cache_entry(L2TableCache *l2_cache, CachedL2Table *l2_table) { CachedL2Table *entry; entry = qed_find_l2_cache_entry(l2_cache, l2_table->offset); if (entry) { qed_unref_l2_cache_entry(entry); qed_unref_l2_cache_entry(l2_table); return; } if (l2_cache->n_entries >= MAX_L2_CACHE_SIZE) { entry = QTAILQ_FIRST(&l2_cache->entries); QTAILQ_REMOVE(&l2_cache->entries, entry, node); l2_cache->n_entries--; qed_unref_l2_cache_entry(entry); } l2_cache->n_entries++; QTAILQ_INSERT_TAIL(&l2_cache->entries, l2_table, node); }"} {"target": 1, "idx": 4091, "func": "av_cold void ff_pixblockdsp_init_x86(PixblockDSPContext *c, AVCodecContext *avctx, unsigned high_bit_depth) { int cpu_flags = av_get_cpu_flags(); if (EXTERNAL_MMX(cpu_flags)) { if (!high_bit_depth) c->get_pixels = ff_get_pixels_mmx; c->diff_pixels = ff_diff_pixels_mmx; } if (EXTERNAL_SSE2(cpu_flags)) { if (!high_bit_depth) c->get_pixels = ff_get_pixels_sse2; c->diff_pixels = ff_diff_pixels_sse2; } }"} {"target": 1, "idx": 4093, "func": "static USBDevice *usb_net_init(const char *cmdline) { USBDevice *dev; QemuOpts *opts; int idx; opts = qemu_opts_parse(&qemu_net_opts, cmdline, NULL); if (!opts) { qemu_opt_set(opts, \"type\", \"nic\"); qemu_opt_set(opts, \"model\", \"usb\"); idx = net_client_init(NULL, opts, 0); if (idx == -1) { dev = usb_create(NULL /* FIXME */, \"usb-net\"); qdev_set_nic_properties(&dev->qdev, &nd_table[idx]); qdev_init_nofail(&dev->qdev); return dev;"} {"target": 1, "idx": 4095, "func": "static void pkt_dump_internal(void *avcl, FILE *f, int level, const AVPacket *pkt, int dump_payload, AVRational time_base) { HEXDUMP_PRINT(\"stream #%d:\\n\", pkt->stream_index); HEXDUMP_PRINT(\" keyframe=%d\\n\", (pkt->flags & AV_PKT_FLAG_KEY) != 0); HEXDUMP_PRINT(\" duration=%0.3f\\n\", pkt->duration * av_q2d(time_base)); /* DTS is _always_ valid after av_read_frame() */ HEXDUMP_PRINT(\" dts=\"); if (pkt->dts == AV_NOPTS_VALUE) HEXDUMP_PRINT(\"N/A\"); else HEXDUMP_PRINT(\"%0.3f\", pkt->dts * av_q2d(time_base)); /* PTS may not be known if B-frames are present. */ HEXDUMP_PRINT(\" pts=\"); if (pkt->pts == AV_NOPTS_VALUE) HEXDUMP_PRINT(\"N/A\"); else HEXDUMP_PRINT(\"%0.3f\", pkt->pts * av_q2d(time_base)); HEXDUMP_PRINT(\"\\n\"); HEXDUMP_PRINT(\" size=%d\\n\", pkt->size); if (dump_payload) av_hex_dump(f, pkt->data, pkt->size); }"} {"target": 0, "idx": 4104, "func": "static void handle_mousewheel(SDL_Event *ev) { struct sdl2_console *scon = get_scon_from_window(ev->key.windowID); SDL_MouseWheelEvent *wev = &ev->wheel; InputButton btn; if (wev->y > 0) { btn = INPUT_BUTTON_WHEEL_UP; } else if (wev->y < 0) { btn = INPUT_BUTTON_WHEEL_DOWN; } else { return; } qemu_input_queue_btn(scon->dcl.con, btn, true); qemu_input_event_sync(); qemu_input_queue_btn(scon->dcl.con, btn, false); qemu_input_event_sync(); }"} {"target": 1, "idx": 4141, "func": "static void init_virtio_dev(TestServer *s) { QPCIBus *bus; QVirtioPCIDevice *dev; uint32_t features; bus = qpci_init_pc(NULL); g_assert_nonnull(bus); dev = qvirtio_pci_device_find(bus, VIRTIO_ID_NET); g_assert_nonnull(dev); qvirtio_pci_device_enable(dev); qvirtio_reset(&dev->vdev); qvirtio_set_acknowledge(&dev->vdev); qvirtio_set_driver(&dev->vdev); features = qvirtio_get_features(&dev->vdev); features = features & VIRTIO_NET_F_MAC; qvirtio_set_features(&dev->vdev, features); qvirtio_set_driver_ok(&dev->vdev); }"} {"target": 1, "idx": 4159, "func": "int av_packet_split_side_data(AVPacket *pkt){ if (!pkt->side_data_elems && pkt->size >12 && AV_RB64(pkt->data + pkt->size - 8) == FF_MERGE_MARKER){ int i; unsigned int size; uint8_t *p; p = pkt->data + pkt->size - 8 - 5; for (i=1; ; i++){ size = AV_RB32(p); if (size>INT_MAX || p - pkt->data < size) return 0; if (p[4]&128) break; p-= size+5; } pkt->side_data = av_malloc_array(i, sizeof(*pkt->side_data)); if (!pkt->side_data) return AVERROR(ENOMEM); p= pkt->data + pkt->size - 8 - 5; for (i=0; ; i++){ size= AV_RB32(p); av_assert0(size<=INT_MAX && p - pkt->data >= size); pkt->side_data[i].data = av_mallocz(size + AV_INPUT_BUFFER_PADDING_SIZE); pkt->side_data[i].size = size; pkt->side_data[i].type = p[4]&127; if (!pkt->side_data[i].data) return AVERROR(ENOMEM); memcpy(pkt->side_data[i].data, p-size, size); pkt->size -= size + 5; if(p[4]&128) break; p-= size+5; } pkt->size -= 8; pkt->side_data_elems = i+1; return 1; } return 0; }"} {"target": 1, "idx": 4165, "func": "static void usb_msd_cancel_io(USBDevice *dev, USBPacket *p) { MSDState *s = DO_UPCAST(MSDState, dev, dev); scsi_req_cancel(s->req); }"} {"target": 1, "idx": 4166, "func": "ram_addr_t ppc405_set_bootinfo (CPUState *env, ppc4xx_bd_info_t *bd, uint32_t flags) { ram_addr_t bdloc; int i, n; /* We put the bd structure at the top of memory */ if (bd->bi_memsize >= 0x01000000UL) bdloc = 0x01000000UL - sizeof(struct ppc4xx_bd_info_t); else bdloc = bd->bi_memsize - sizeof(struct ppc4xx_bd_info_t); stl_phys(bdloc + 0x00, bd->bi_memstart); stl_phys(bdloc + 0x04, bd->bi_memsize); stl_phys(bdloc + 0x08, bd->bi_flashstart); stl_phys(bdloc + 0x0C, bd->bi_flashsize); stl_phys(bdloc + 0x10, bd->bi_flashoffset); stl_phys(bdloc + 0x14, bd->bi_sramstart); stl_phys(bdloc + 0x18, bd->bi_sramsize); stl_phys(bdloc + 0x1C, bd->bi_bootflags); stl_phys(bdloc + 0x20, bd->bi_ipaddr); for (i = 0; i < 6; i++) stb_phys(bdloc + 0x24 + i, bd->bi_enetaddr[i]); stw_phys(bdloc + 0x2A, bd->bi_ethspeed); stl_phys(bdloc + 0x2C, bd->bi_intfreq); stl_phys(bdloc + 0x30, bd->bi_busfreq); stl_phys(bdloc + 0x34, bd->bi_baudrate); for (i = 0; i < 4; i++) stb_phys(bdloc + 0x38 + i, bd->bi_s_version[i]); for (i = 0; i < 32; i++) stb_phys(bdloc + 0x3C + i, bd->bi_s_version[i]); stl_phys(bdloc + 0x5C, bd->bi_plb_busfreq); stl_phys(bdloc + 0x60, bd->bi_pci_busfreq); for (i = 0; i < 6; i++) stb_phys(bdloc + 0x64 + i, bd->bi_pci_enetaddr[i]); n = 0x6A; if (flags & 0x00000001) { for (i = 0; i < 6; i++) stb_phys(bdloc + n++, bd->bi_pci_enetaddr2[i]); } stl_phys(bdloc + n, bd->bi_opbfreq); n += 4; for (i = 0; i < 2; i++) { stl_phys(bdloc + n, bd->bi_iic_fast[i]); n += 4; } return bdloc; }"} {"target": 0, "idx": 4173, "func": "bool virtio_ipl_disk_is_valid(void) { return blk_cfg.blk_size && (virtio_disk_is_scsi() || virtio_disk_is_eckd()); }"} {"target": 0, "idx": 4178, "func": "static void bdrv_password_cb(Monitor *mon, const char *password, void *opaque) { BlockDriverState *bs = opaque; int ret = 0; if (bdrv_set_key(bs, password) != 0) { monitor_printf(mon, \"invalid password\\n\"); ret = -EPERM; } if (mon->password_completion_cb) mon->password_completion_cb(mon->password_opaque, ret); monitor_read_command(mon, 1); }"} {"target": 0, "idx": 4196, "func": "dshow_cycle_devices(AVFormatContext *avctx, ICreateDevEnum *devenum, enum dshowDeviceType devtype, IBaseFilter **pfilter) { struct dshow_ctx *ctx = avctx->priv_data; IBaseFilter *device_filter = NULL; IEnumMoniker *classenum = NULL; IMoniker *m = NULL; const char *device_name = ctx->device_name[devtype]; int skip = (devtype == VideoDevice) ? ctx->video_device_number : ctx->audio_device_number; int r; const GUID *device_guid[2] = { &CLSID_VideoInputDeviceCategory, &CLSID_AudioInputDeviceCategory }; const char *devtypename = (devtype == VideoDevice) ? \"video\" : \"audio\"; r = ICreateDevEnum_CreateClassEnumerator(devenum, device_guid[devtype], (IEnumMoniker **) &classenum, 0); if (r != S_OK) { av_log(avctx, AV_LOG_ERROR, \"Could not enumerate %s devices.\\n\", devtypename); return AVERROR(EIO); } while (!device_filter && IEnumMoniker_Next(classenum, 1, &m, NULL) == S_OK) { IPropertyBag *bag = NULL; char *buf = NULL; VARIANT var; r = IMoniker_BindToStorage(m, 0, 0, &IID_IPropertyBag, (void *) &bag); if (r != S_OK) goto fail1; var.vt = VT_BSTR; r = IPropertyBag_Read(bag, L\"FriendlyName\", &var, NULL); if (r != S_OK) goto fail1; buf = dup_wchar_to_utf8(var.bstrVal); if (pfilter) { if (strcmp(device_name, buf)) goto fail1; if (!skip--) IMoniker_BindToObject(m, 0, 0, &IID_IBaseFilter, (void *) &device_filter); } else { av_log(avctx, AV_LOG_INFO, \" \\\"%s\\\"\\n\", buf); } fail1: if (buf) av_free(buf); if (bag) IPropertyBag_Release(bag); IMoniker_Release(m); } IEnumMoniker_Release(classenum); if (pfilter) { if (!device_filter) { av_log(avctx, AV_LOG_ERROR, \"Could not find %s device.\\n\", devtypename); return AVERROR(EIO); } *pfilter = device_filter; } return 0; }"} {"target": 0, "idx": 4198, "func": "static int do_decode(AVCodecContext *avctx, AVPacket *pkt) { int got_frame; int ret; av_assert0(!avctx->internal->buffer_frame->buf[0]); if (!pkt) pkt = avctx->internal->buffer_pkt; // This is the lesser evil. The field is for compatibility with legacy users // of the legacy API, and users using the new API should not be forced to // even know about this field. avctx->refcounted_frames = 1; // Some codecs (at least wma lossless) will crash when feeding drain packets // after EOF was signaled. if (avctx->internal->draining_done) return AVERROR_EOF; if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) { ret = avcodec_decode_video2(avctx, avctx->internal->buffer_frame, &got_frame, pkt); if (ret >= 0 && !(avctx->flags & AV_CODEC_FLAG_TRUNCATED)) ret = pkt->size; } else if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) { ret = avcodec_decode_audio4(avctx, avctx->internal->buffer_frame, &got_frame, pkt); } else { ret = AVERROR(EINVAL); } if (ret == AVERROR(EAGAIN)) ret = pkt->size; if (ret < 0) return ret; if (avctx->internal->draining && !got_frame) avctx->internal->draining_done = 1; if (ret >= pkt->size) { av_packet_unref(avctx->internal->buffer_pkt); } else { int consumed = ret; if (pkt != avctx->internal->buffer_pkt) { av_packet_unref(avctx->internal->buffer_pkt); if ((ret = av_packet_ref(avctx->internal->buffer_pkt, pkt)) < 0) return ret; } avctx->internal->buffer_pkt->data += consumed; avctx->internal->buffer_pkt->size -= consumed; avctx->internal->buffer_pkt->pts = AV_NOPTS_VALUE; avctx->internal->buffer_pkt->dts = AV_NOPTS_VALUE; } if (got_frame) av_assert0(avctx->internal->buffer_frame->buf[0]); return 0; }"} {"target": 0, "idx": 4209, "func": "void net_host_device_remove(Monitor *mon, int vlan_id, const char *device) { VLANState *vlan; VLANClientState *vc; vlan = qemu_find_vlan(vlan_id); for(vc = vlan->first_client; vc != NULL; vc = vc->next) if (!strcmp(vc->name, device)) break; if (!vc) { monitor_printf(mon, \"can't find device %s\\n\", device); return; } qemu_del_vlan_client(vc); }"} {"target": 0, "idx": 4211, "func": "static void vfio_bar_write(void *opaque, hwaddr addr, uint64_t data, unsigned size) { VFIOBAR *bar = opaque; union { uint8_t byte; uint16_t word; uint32_t dword; uint64_t qword; } buf; switch (size) { case 1: buf.byte = data; break; case 2: buf.word = cpu_to_le16(data); break; case 4: buf.dword = cpu_to_le32(data); break; default: hw_error(\"vfio: unsupported write size, %d bytes\\n\", size); break; } if (pwrite(bar->fd, &buf, size, bar->fd_offset + addr) != size) { error_report(\"%s(,0x%\"HWADDR_PRIx\", 0x%\"PRIx64\", %d) failed: %m\", __func__, addr, data, size); } DPRINTF(\"%s(BAR%d+0x%\"HWADDR_PRIx\", 0x%\"PRIx64\", %d)\\n\", __func__, bar->nr, addr, data, size); /* * A read or write to a BAR always signals an INTx EOI. This will * do nothing if not pending (including not in INTx mode). We assume * that a BAR access is in response to an interrupt and that BAR * accesses will service the interrupt. Unfortunately, we don't know * which access will service the interrupt, so we're potentially * getting quite a few host interrupts per guest interrupt. */ vfio_eoi(container_of(bar, VFIODevice, bars[bar->nr])); }"} {"target": 0, "idx": 4213, "func": "static void mcf5208evb_init(QEMUMachineInitArgs *args) { ram_addr_t ram_size = args->ram_size; const char *cpu_model = args->cpu_model; const char *kernel_filename = args->kernel_filename; CPUM68KState *env; int kernel_size; uint64_t elf_entry; target_phys_addr_t entry; qemu_irq *pic; MemoryRegion *address_space_mem = get_system_memory(); MemoryRegion *ram = g_new(MemoryRegion, 1); MemoryRegion *sram = g_new(MemoryRegion, 1); if (!cpu_model) cpu_model = \"m5208\"; env = cpu_init(cpu_model); if (!env) { fprintf(stderr, \"Unable to find m68k CPU definition\\n\"); exit(1); } /* Initialize CPU registers. */ env->vbr = 0; /* TODO: Configure BARs. */ /* DRAM at 0x40000000 */ memory_region_init_ram(ram, \"mcf5208.ram\", ram_size); vmstate_register_ram_global(ram); memory_region_add_subregion(address_space_mem, 0x40000000, ram); /* Internal SRAM. */ memory_region_init_ram(sram, \"mcf5208.sram\", 16384); vmstate_register_ram_global(sram); memory_region_add_subregion(address_space_mem, 0x80000000, sram); /* Internal peripherals. */ pic = mcf_intc_init(address_space_mem, 0xfc048000, env); mcf_uart_mm_init(address_space_mem, 0xfc060000, pic[26], serial_hds[0]); mcf_uart_mm_init(address_space_mem, 0xfc064000, pic[27], serial_hds[1]); mcf_uart_mm_init(address_space_mem, 0xfc068000, pic[28], serial_hds[2]); mcf5208_sys_init(address_space_mem, pic); if (nb_nics > 1) { fprintf(stderr, \"Too many NICs\\n\"); exit(1); } if (nd_table[0].used) mcf_fec_init(address_space_mem, &nd_table[0], 0xfc030000, pic + 36); /* 0xfc000000 SCM. */ /* 0xfc004000 XBS. */ /* 0xfc008000 FlexBus CS. */ /* 0xfc030000 FEC. */ /* 0xfc040000 SCM + Power management. */ /* 0xfc044000 eDMA. */ /* 0xfc048000 INTC. */ /* 0xfc058000 I2C. */ /* 0xfc05c000 QSPI. */ /* 0xfc060000 UART0. */ /* 0xfc064000 UART0. */ /* 0xfc068000 UART0. */ /* 0xfc070000 DMA timers. */ /* 0xfc080000 PIT0. */ /* 0xfc084000 PIT1. */ /* 0xfc088000 EPORT. */ /* 0xfc08c000 Watchdog. */ /* 0xfc090000 clock module. */ /* 0xfc0a0000 CCM + reset. */ /* 0xfc0a4000 GPIO. */ /* 0xfc0a8000 SDRAM controller. */ /* Load kernel. */ if (!kernel_filename) { fprintf(stderr, \"Kernel image must be specified\\n\"); exit(1); } kernel_size = load_elf(kernel_filename, NULL, NULL, &elf_entry, NULL, NULL, 1, ELF_MACHINE, 0); entry = elf_entry; if (kernel_size < 0) { kernel_size = load_uimage(kernel_filename, &entry, NULL, NULL); } if (kernel_size < 0) { kernel_size = load_image_targphys(kernel_filename, 0x40000000, ram_size); entry = 0x40000000; } if (kernel_size < 0) { fprintf(stderr, \"qemu: could not load kernel '%s'\\n\", kernel_filename); exit(1); } env->pc = entry; }"} {"target": 0, "idx": 4217, "func": "static QObject *parse_escape(JSONParserContext *ctxt, va_list *ap) { QObject *token; const char *val; if (ap == NULL) { return NULL; } token = parser_context_pop_token(ctxt); assert(token && token_get_type(token) == JSON_ESCAPE); val = token_get_value(token); if (!strcmp(val, \"%p\")) { return va_arg(*ap, QObject *); } else if (!strcmp(val, \"%i\")) { return QOBJECT(qbool_from_bool(va_arg(*ap, int))); } else if (!strcmp(val, \"%d\")) { return QOBJECT(qint_from_int(va_arg(*ap, int))); } else if (!strcmp(val, \"%ld\")) { return QOBJECT(qint_from_int(va_arg(*ap, long))); } else if (!strcmp(val, \"%lld\") || !strcmp(val, \"%I64d\")) { return QOBJECT(qint_from_int(va_arg(*ap, long long))); } else if (!strcmp(val, \"%s\")) { return QOBJECT(qstring_from_str(va_arg(*ap, const char *))); } else if (!strcmp(val, \"%f\")) { return QOBJECT(qfloat_from_double(va_arg(*ap, double))); } return NULL; }"} {"target": 0, "idx": 4218, "func": "static inline uint64_t ldq_phys_internal(hwaddr addr, enum device_endian endian) { uint8_t *ptr; uint64_t val; MemoryRegionSection *section; section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS); if (!(memory_region_is_ram(section->mr) || memory_region_is_romd(section->mr))) { /* I/O case */ addr = memory_region_section_addr(section, addr); /* XXX This is broken when device endian != cpu endian. Fix and add \"endian\" variable check */ #ifdef TARGET_WORDS_BIGENDIAN val = io_mem_read(section->mr, addr, 4) << 32; val |= io_mem_read(section->mr, addr + 4, 4); #else val = io_mem_read(section->mr, addr, 4); val |= io_mem_read(section->mr, addr + 4, 4) << 32; #endif } else { /* RAM case */ ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK) + memory_region_section_addr(section, addr)); switch (endian) { case DEVICE_LITTLE_ENDIAN: val = ldq_le_p(ptr); break; case DEVICE_BIG_ENDIAN: val = ldq_be_p(ptr); break; default: val = ldq_p(ptr); break; } } return val; }"} {"target": 0, "idx": 4221, "func": "void main_loop_wait(int nonblocking) { fd_set rfds, wfds, xfds; int ret, nfds; struct timeval tv; int timeout; if (nonblocking) timeout = 0; else { timeout = qemu_calculate_timeout(); qemu_bh_update_timeout(&timeout); } os_host_main_loop_wait(&timeout); tv.tv_sec = timeout / 1000; tv.tv_usec = (timeout % 1000) * 1000; /* poll any events */ /* XXX: separate device handlers from system ones */ nfds = -1; FD_ZERO(&rfds); FD_ZERO(&wfds); FD_ZERO(&xfds); qemu_iohandler_fill(&nfds, &rfds, &wfds, &xfds); slirp_select_fill(&nfds, &rfds, &wfds, &xfds); qemu_mutex_unlock_iothread(); ret = select(nfds + 1, &rfds, &wfds, &xfds, &tv); qemu_mutex_lock_iothread(); qemu_iohandler_poll(&rfds, &wfds, &xfds, ret); slirp_select_poll(&rfds, &wfds, &xfds, (ret < 0)); qemu_run_all_timers(); /* Check bottom-halves last in case any of the earlier events triggered them. */ qemu_bh_poll(); }"} {"target": 1, "idx": 4253, "func": "void bdrv_invalidate_cache_all(Error **errp) { BlockDriverState *bs; Error *local_err = NULL; BdrvNextIterator *it = NULL; while ((it = bdrv_next(it, &bs)) != NULL) { AioContext *aio_context = bdrv_get_aio_context(bs); aio_context_acquire(aio_context); bdrv_invalidate_cache(bs, &local_err); aio_context_release(aio_context); if (local_err) { error_propagate(errp, local_err); return; } } }"} {"target": 1, "idx": 4255, "func": "static int parse_adaptation_sets(AVFormatContext *s) { WebMDashMuxContext *w = s->priv_data; char *p = w->adaptation_sets; char *q; enum { new_set, parsed_id, parsing_streams } state; if (!w->adaptation_sets) { av_log(s, AV_LOG_ERROR, \"The 'adaptation_sets' option must be set.\\n\"); return AVERROR(EINVAL); } // syntax id=0,streams=0,1,2 id=1,streams=3,4 and so on state = new_set; while (p < w->adaptation_sets + strlen(w->adaptation_sets)) { if (*p == ' ') continue; else if (state == new_set && !strncmp(p, \"id=\", 3)) { void *mem = av_realloc(w->as, sizeof(*w->as) * (w->nb_as + 1)); if (mem == NULL) return AVERROR(ENOMEM); w->as = mem; ++w->nb_as; w->as[w->nb_as - 1].nb_streams = 0; w->as[w->nb_as - 1].streams = NULL; p += 3; // consume \"id=\" q = w->as[w->nb_as - 1].id; while (*p != ',') *q++ = *p++; *q = 0; p++; state = parsed_id; } else if (state == parsed_id && !strncmp(p, \"streams=\", 8)) { p += 8; // consume \"streams=\" state = parsing_streams; } else if (state == parsing_streams) { struct AdaptationSet *as = &w->as[w->nb_as - 1]; q = p; while (*q != '\\0' && *q != ',' && *q != ' ') q++; as->streams = av_realloc(as->streams, sizeof(*as->streams) * ++as->nb_streams); if (as->streams == NULL) return AVERROR(ENOMEM); as->streams[as->nb_streams - 1] = to_integer(p, q - p + 1); if (as->streams[as->nb_streams - 1] < 0) return -1; if (*q == '\\0') break; if (*q == ' ') state = new_set; p = ++q; } else { return -1; } } return 0; }"} {"target": 1, "idx": 4256, "func": "static QObject *parse_keyword(JSONParserContext *ctxt) { QObject *token, *ret; JSONParserContext saved_ctxt = parser_context_save(ctxt); token = parser_context_pop_token(ctxt); if (token == NULL) { goto out; } if (token_get_type(token) != JSON_KEYWORD) { goto out; } if (token_is_keyword(token, \"true\")) { ret = QOBJECT(qbool_from_int(true)); } else if (token_is_keyword(token, \"false\")) { ret = QOBJECT(qbool_from_int(false)); } else { parse_error(ctxt, token, \"invalid keyword `%s'\", token_get_value(token)); goto out; } return ret; out: parser_context_restore(ctxt, saved_ctxt); return NULL; }"} {"target": 0, "idx": 4260, "func": "static int aac_encode_frame(AVCodecContext *avctx, uint8_t *frame, int buf_size, void *data) { AACEncContext *s = avctx->priv_data; int16_t *samples = s->samples, *samples2, *la; ChannelElement *cpe; int i, j, chans, tag, start_ch; const uint8_t *chan_map = aac_chan_configs[avctx->channels-1]; int chan_el_counter[4]; FFPsyWindowInfo windows[AAC_MAX_CHANNELS]; if (s->last_frame) return 0; if (data) { if (!s->psypp) { memcpy(s->samples + 1024 * avctx->channels, data, 1024 * avctx->channels * sizeof(s->samples[0])); } else { start_ch = 0; samples2 = s->samples + 1024 * avctx->channels; for (i = 0; i < chan_map[0]; i++) { tag = chan_map[i+1]; chans = tag == TYPE_CPE ? 2 : 1; ff_psy_preprocess(s->psypp, (uint16_t*)data + start_ch, samples2 + start_ch, start_ch, chans); start_ch += chans; } } } if (!avctx->frame_number) { memcpy(s->samples, s->samples + 1024 * avctx->channels, 1024 * avctx->channels * sizeof(s->samples[0])); return 0; } start_ch = 0; for (i = 0; i < chan_map[0]; i++) { FFPsyWindowInfo* wi = windows + start_ch; tag = chan_map[i+1]; chans = tag == TYPE_CPE ? 2 : 1; cpe = &s->cpe[i]; for (j = 0; j < chans; j++) { IndividualChannelStream *ics = &cpe->ch[j].ics; int k; int cur_channel = start_ch + j; samples2 = samples + cur_channel; la = samples2 + (448+64) * avctx->channels; if (!data) la = NULL; if (tag == TYPE_LFE) { wi[j].window_type[0] = ONLY_LONG_SEQUENCE; wi[j].window_shape = 0; wi[j].num_windows = 1; wi[j].grouping[0] = 1; } else { wi[j] = ff_psy_suggest_window(&s->psy, samples2, la, cur_channel, ics->window_sequence[0]); } ics->window_sequence[1] = ics->window_sequence[0]; ics->window_sequence[0] = wi[j].window_type[0]; ics->use_kb_window[1] = ics->use_kb_window[0]; ics->use_kb_window[0] = wi[j].window_shape; ics->num_windows = wi[j].num_windows; ics->swb_sizes = s->psy.bands [ics->num_windows == 8]; ics->num_swb = tag == TYPE_LFE ? 12 : s->psy.num_bands[ics->num_windows == 8]; for (k = 0; k < ics->num_windows; k++) ics->group_len[k] = wi[j].grouping[k]; apply_window_and_mdct(avctx, s, &cpe->ch[j], samples2); } start_ch += chans; } do { int frame_bits; init_put_bits(&s->pb, frame, buf_size*8); if ((avctx->frame_number & 0xFF)==1 && !(avctx->flags & CODEC_FLAG_BITEXACT)) put_bitstream_info(avctx, s, LIBAVCODEC_IDENT); start_ch = 0; memset(chan_el_counter, 0, sizeof(chan_el_counter)); for (i = 0; i < chan_map[0]; i++) { FFPsyWindowInfo* wi = windows + start_ch; tag = chan_map[i+1]; chans = tag == TYPE_CPE ? 2 : 1; cpe = &s->cpe[i]; put_bits(&s->pb, 3, tag); put_bits(&s->pb, 4, chan_el_counter[tag]++); for (j = 0; j < chans; j++) { s->cur_channel = start_ch + j; ff_psy_set_band_info(&s->psy, s->cur_channel, cpe->ch[j].coeffs, &wi[j]); s->coder->search_for_quantizers(avctx, s, &cpe->ch[j], s->lambda); } cpe->common_window = 0; if (chans > 1 && wi[0].window_type[0] == wi[1].window_type[0] && wi[0].window_shape == wi[1].window_shape) { cpe->common_window = 1; for (j = 0; j < wi[0].num_windows; j++) { if (wi[0].grouping[j] != wi[1].grouping[j]) { cpe->common_window = 0; break; } } } s->cur_channel = start_ch; if (cpe->common_window && s->coder->search_for_ms) s->coder->search_for_ms(s, cpe, s->lambda); adjust_frame_information(s, cpe, chans); if (chans == 2) { put_bits(&s->pb, 1, cpe->common_window); if (cpe->common_window) { put_ics_info(s, &cpe->ch[0].ics); encode_ms_info(&s->pb, cpe); } } for (j = 0; j < chans; j++) { s->cur_channel = start_ch + j; encode_individual_channel(avctx, s, &cpe->ch[j], cpe->common_window); } start_ch += chans; } frame_bits = put_bits_count(&s->pb); if (frame_bits <= 6144 * avctx->channels - 3) { s->psy.bitres.bits = frame_bits / avctx->channels; break; } s->lambda *= avctx->bit_rate * 1024.0f / avctx->sample_rate / frame_bits; } while (1); put_bits(&s->pb, 3, TYPE_END); flush_put_bits(&s->pb); avctx->frame_bits = put_bits_count(&s->pb); // rate control stuff if (!(avctx->flags & CODEC_FLAG_QSCALE)) { float ratio = avctx->bit_rate * 1024.0f / avctx->sample_rate / avctx->frame_bits; s->lambda *= ratio; s->lambda = FFMIN(s->lambda, 65536.f); } if (!data) s->last_frame = 1; memcpy(s->samples, s->samples + 1024 * avctx->channels, 1024 * avctx->channels * sizeof(s->samples[0])); return put_bits_count(&s->pb)>>3; }"} {"target": 0, "idx": 4306, "func": "static int dxtory_decode_v2_420(AVCodecContext *avctx, AVFrame *pic, const uint8_t *src, int src_size) { GetByteContext gb; GetBitContext gb2; int nslices, slice, slice_height, ref_slice_height; int cur_y, next_y; uint32_t off, slice_size; uint8_t *Y, *U, *V; int ret; bytestream2_init(&gb, src, src_size); nslices = bytestream2_get_le16(&gb); off = FFALIGN(nslices * 4 + 2, 16); if (src_size < off) { av_log(avctx, AV_LOG_ERROR, \"no slice data\\n\"); return AVERROR_INVALIDDATA; } if (!nslices || avctx->height % nslices) { avpriv_request_sample(avctx, \"%d slices for %dx%d\", nslices, avctx->width, avctx->height); return AVERROR_PATCHWELCOME; } ref_slice_height = avctx->height / nslices; if ((avctx->width & 1) || (avctx->height & 1)) { avpriv_request_sample(avctx, \"Frame dimensions %dx%d\", avctx->width, avctx->height); } avctx->pix_fmt = AV_PIX_FMT_YUV420P; if ((ret = ff_get_buffer(avctx, pic, 0)) < 0) return ret; Y = pic->data[0]; U = pic->data[1]; V = pic->data[2]; cur_y = 0; next_y = ref_slice_height; for (slice = 0; slice < nslices; slice++) { slice_size = bytestream2_get_le32(&gb); slice_height = (next_y & ~1) - (cur_y & ~1); if (slice_size > src_size - off) { av_log(avctx, AV_LOG_ERROR, \"invalid slice size %\"PRIu32\" (only %\"PRIu32\" bytes left)\\n\", slice_size, src_size - off); return AVERROR_INVALIDDATA; } if (slice_size <= 16) { av_log(avctx, AV_LOG_ERROR, \"invalid slice size %\"PRIu32\"\\n\", slice_size); return AVERROR_INVALIDDATA; } if (AV_RL32(src + off) != slice_size - 16) { av_log(avctx, AV_LOG_ERROR, \"Slice sizes mismatch: got %\"PRIu32\" instead of %\"PRIu32\"\\n\", AV_RL32(src + off), slice_size - 16); } init_get_bits(&gb2, src + off + 16, (slice_size - 16) * 8); dx2_decode_slice_420(&gb2, avctx->width, slice_height, Y, U, V, pic->linesize[0], pic->linesize[1], pic->linesize[2]); Y += pic->linesize[0] * slice_height; U += pic->linesize[1] * (slice_height >> 1); V += pic->linesize[2] * (slice_height >> 1); off += slice_size; cur_y = next_y; next_y += ref_slice_height; } return 0; }"} {"target": 0, "idx": 4307, "func": "static int read_sbr_grid(AACContext *ac, SpectralBandReplication *sbr, GetBitContext *gb, SBRData *ch_data) { int i; unsigned bs_pointer = 0; // frameLengthFlag ? 15 : 16; 960 sample length frames unsupported; this value is numTimeSlots int abs_bord_trail = 16; int num_rel_lead, num_rel_trail; unsigned bs_num_env_old = ch_data->bs_num_env; ch_data->bs_freq_res[0] = ch_data->bs_freq_res[ch_data->bs_num_env]; ch_data->bs_amp_res = sbr->bs_amp_res_header; ch_data->t_env_num_env_old = ch_data->t_env[bs_num_env_old]; switch (ch_data->bs_frame_class = get_bits(gb, 2)) { case FIXFIX: ch_data->bs_num_env = 1 << get_bits(gb, 2); num_rel_lead = ch_data->bs_num_env - 1; if (ch_data->bs_num_env == 1) ch_data->bs_amp_res = 0; if (ch_data->bs_num_env > 4) { av_log(ac->avccontext, AV_LOG_ERROR, \"Invalid bitstream, too many SBR envelopes in FIXFIX type SBR frame: %d\\n\", ch_data->bs_num_env); return -1; } ch_data->t_env[0] = 0; ch_data->t_env[ch_data->bs_num_env] = abs_bord_trail; abs_bord_trail = (abs_bord_trail + (ch_data->bs_num_env >> 1)) / ch_data->bs_num_env; for (i = 0; i < num_rel_lead; i++) ch_data->t_env[i + 1] = ch_data->t_env[i] + abs_bord_trail; ch_data->bs_freq_res[1] = get_bits1(gb); for (i = 1; i < ch_data->bs_num_env; i++) ch_data->bs_freq_res[i + 1] = ch_data->bs_freq_res[1]; break; case FIXVAR: abs_bord_trail += get_bits(gb, 2); num_rel_trail = get_bits(gb, 2); ch_data->bs_num_env = num_rel_trail + 1; ch_data->t_env[0] = 0; ch_data->t_env[ch_data->bs_num_env] = abs_bord_trail; for (i = 0; i < num_rel_trail; i++) ch_data->t_env[ch_data->bs_num_env - 1 - i] = ch_data->t_env[ch_data->bs_num_env - i] - 2 * get_bits(gb, 2) - 2; bs_pointer = get_bits(gb, ceil_log2[ch_data->bs_num_env]); for (i = 0; i < ch_data->bs_num_env; i++) ch_data->bs_freq_res[ch_data->bs_num_env - i] = get_bits1(gb); break; case VARFIX: ch_data->t_env[0] = get_bits(gb, 2); num_rel_lead = get_bits(gb, 2); ch_data->bs_num_env = num_rel_lead + 1; ch_data->t_env[ch_data->bs_num_env] = abs_bord_trail; for (i = 0; i < num_rel_lead; i++) ch_data->t_env[i + 1] = ch_data->t_env[i] + 2 * get_bits(gb, 2) + 2; bs_pointer = get_bits(gb, ceil_log2[ch_data->bs_num_env]); get_bits1_vector(gb, ch_data->bs_freq_res + 1, ch_data->bs_num_env); break; case VARVAR: ch_data->t_env[0] = get_bits(gb, 2); abs_bord_trail += get_bits(gb, 2); num_rel_lead = get_bits(gb, 2); num_rel_trail = get_bits(gb, 2); ch_data->bs_num_env = num_rel_lead + num_rel_trail + 1; ch_data->t_env[ch_data->bs_num_env] = abs_bord_trail; if (ch_data->bs_num_env > 5) { av_log(ac->avccontext, AV_LOG_ERROR, \"Invalid bitstream, too many SBR envelopes in VARVAR type SBR frame: %d\\n\", ch_data->bs_num_env); return -1; } for (i = 0; i < num_rel_lead; i++) ch_data->t_env[i + 1] = ch_data->t_env[i] + 2 * get_bits(gb, 2) + 2; for (i = 0; i < num_rel_trail; i++) ch_data->t_env[ch_data->bs_num_env - 1 - i] = ch_data->t_env[ch_data->bs_num_env - i] - 2 * get_bits(gb, 2) - 2; bs_pointer = get_bits(gb, ceil_log2[ch_data->bs_num_env]); get_bits1_vector(gb, ch_data->bs_freq_res + 1, ch_data->bs_num_env); break; } if (bs_pointer > ch_data->bs_num_env + 1) { av_log(ac->avccontext, AV_LOG_ERROR, \"Invalid bitstream, bs_pointer points to a middle noise border outside the time borders table: %d\\n\", bs_pointer); return -1; } ch_data->bs_num_noise = (ch_data->bs_num_env > 1) + 1; ch_data->t_q[0] = ch_data->t_env[0]; ch_data->t_q[ch_data->bs_num_noise] = ch_data->t_env[ch_data->bs_num_env]; if (ch_data->bs_num_noise > 1) { unsigned int idx; if (ch_data->bs_frame_class == FIXFIX) { idx = ch_data->bs_num_env >> 1; } else if (ch_data->bs_frame_class & 1) { // FIXVAR or VARVAR idx = ch_data->bs_num_env - FFMAX(bs_pointer - 1, 1); } else { // VARFIX if (!bs_pointer) idx = 1; else if (bs_pointer == 1) idx = ch_data->bs_num_env - 1; else // bs_pointer > 1 idx = bs_pointer - 1; } ch_data->t_q[1] = ch_data->t_env[idx]; } ch_data->e_a[0] = -(ch_data->e_a[1] != bs_num_env_old); // l_APrev ch_data->e_a[1] = -1; if ((ch_data->bs_frame_class & 1) && bs_pointer) { // FIXVAR or VARVAR and bs_pointer != 0 ch_data->e_a[1] = ch_data->bs_num_env + 1 - bs_pointer; } else if ((ch_data->bs_frame_class == 2) && (bs_pointer > 1)) // VARFIX and bs_pointer > 1 ch_data->e_a[1] = bs_pointer - 1; return 0; }"} {"target": 0, "idx": 4315, "func": "static int uhci_complete_td(UHCIState *s, UHCI_TD *td, UHCIAsync *async, uint32_t *int_mask) { int len = 0, max_len, err, ret; uint8_t pid; max_len = ((td->token >> 21) + 1) & 0x7ff; pid = td->token & 0xff; ret = async->packet.result; if (td->ctrl & TD_CTRL_IOS) td->ctrl &= ~TD_CTRL_ACTIVE; if (ret < 0) goto out; len = async->packet.result; td->ctrl = (td->ctrl & ~0x7ff) | ((len - 1) & 0x7ff); /* The NAK bit may have been set by a previous frame, so clear it here. The docs are somewhat unclear, but win2k relies on this behavior. */ td->ctrl &= ~(TD_CTRL_ACTIVE | TD_CTRL_NAK); if (td->ctrl & TD_CTRL_IOC) *int_mask |= 0x01; if (pid == USB_TOKEN_IN) { if (len > max_len) { ret = USB_RET_BABBLE; goto out; } if ((td->ctrl & TD_CTRL_SPD) && len < max_len) { *int_mask |= 0x02; /* short packet: do not update QH */ trace_usb_uhci_packet_complete_shortxfer(async->queue->token, async->td); return TD_RESULT_NEXT_QH; } } /* success */ trace_usb_uhci_packet_complete_success(async->queue->token, async->td); return TD_RESULT_COMPLETE; out: /* * We should not do any further processing on a queue with errors! * This is esp. important for bulk endpoints with pipelining enabled * (redirection to a real USB device), where we must cancel all the * transfers after this one so that: * 1) If they've completed already, they are not processed further * causing more stalls, originating from the same failed transfer * 2) If still in flight, they are cancelled before the guest does * a clear stall, otherwise the guest and device can loose sync! */ while (!QTAILQ_EMPTY(&async->queue->asyncs)) { UHCIAsync *as = QTAILQ_FIRST(&async->queue->asyncs); uhci_async_unlink(as); uhci_async_cancel(as); } switch(ret) { case USB_RET_STALL: td->ctrl |= TD_CTRL_STALL; td->ctrl &= ~TD_CTRL_ACTIVE; s->status |= UHCI_STS_USBERR; if (td->ctrl & TD_CTRL_IOC) { *int_mask |= 0x01; } uhci_update_irq(s); trace_usb_uhci_packet_complete_stall(async->queue->token, async->td); return TD_RESULT_NEXT_QH; case USB_RET_BABBLE: td->ctrl |= TD_CTRL_BABBLE | TD_CTRL_STALL; td->ctrl &= ~TD_CTRL_ACTIVE; s->status |= UHCI_STS_USBERR; if (td->ctrl & TD_CTRL_IOC) { *int_mask |= 0x01; } uhci_update_irq(s); /* frame interrupted */ trace_usb_uhci_packet_complete_babble(async->queue->token, async->td); return TD_RESULT_STOP_FRAME; case USB_RET_NAK: td->ctrl |= TD_CTRL_NAK; if (pid == USB_TOKEN_SETUP) break; return TD_RESULT_NEXT_QH; case USB_RET_IOERROR: case USB_RET_NODEV: default: break; } /* Retry the TD if error count is not zero */ td->ctrl |= TD_CTRL_TIMEOUT; err = (td->ctrl >> TD_CTRL_ERROR_SHIFT) & 3; if (err != 0) { err--; if (err == 0) { td->ctrl &= ~TD_CTRL_ACTIVE; s->status |= UHCI_STS_USBERR; if (td->ctrl & TD_CTRL_IOC) *int_mask |= 0x01; uhci_update_irq(s); trace_usb_uhci_packet_complete_error(async->queue->token, async->td); } } td->ctrl = (td->ctrl & ~(3 << TD_CTRL_ERROR_SHIFT)) | (err << TD_CTRL_ERROR_SHIFT); return TD_RESULT_NEXT_QH; }"} {"target": 0, "idx": 4337, "func": "static void cpu_x86_dump_state(FILE *f) { int eflags; char cc_op_name[32]; eflags = cc_table[CC_OP].compute_all(); eflags |= (DF & DF_MASK); if ((unsigned)env->cc_op < CC_OP_NB) strcpy(cc_op_name, cc_op_str[env->cc_op]); else snprintf(cc_op_name, sizeof(cc_op_name), \"[%d]\", env->cc_op); fprintf(f, \"EAX=%08x EBX=%08X ECX=%08x EDX=%08x\\n\" \"ESI=%08x EDI=%08X EBP=%08x ESP=%08x\\n\" \"CCS=%08x CCD=%08x CCO=%-8s EFL=%c%c%c%c%c%c%c\\n\" \"EIP=%08x\\n\", env->regs[R_EAX], env->regs[R_EBX], env->regs[R_ECX], env->regs[R_EDX], env->regs[R_ESI], env->regs[R_EDI], env->regs[R_EBP], env->regs[R_ESP], env->cc_src, env->cc_dst, cc_op_name, eflags & DF_MASK ? 'D' : '-', eflags & CC_O ? 'O' : '-', eflags & CC_S ? 'S' : '-', eflags & CC_Z ? 'Z' : '-', eflags & CC_A ? 'A' : '-', eflags & CC_P ? 'P' : '-', eflags & CC_C ? 'C' : '-', env->eip); #if 1 fprintf(f, \"ST0=%f ST1=%f ST2=%f ST3=%f\\n\", (double)ST0, (double)ST1, (double)ST(2), (double)ST(3)); #endif }"} {"target": 0, "idx": 4341, "func": "static int gif_read_image(GifState *s) { int left, top, width, height, bits_per_pixel, code_size, flags; int is_interleaved, has_local_palette, y, pass, y1, linesize, n, i; uint8_t *ptr, *spal, *palette, *ptr1; left = bytestream_get_le16(&s->bytestream); top = bytestream_get_le16(&s->bytestream); width = bytestream_get_le16(&s->bytestream); height = bytestream_get_le16(&s->bytestream); flags = bytestream_get_byte(&s->bytestream); is_interleaved = flags & 0x40; has_local_palette = flags & 0x80; bits_per_pixel = (flags & 0x07) + 1; av_dlog(s->avctx, \"image x=%d y=%d w=%d h=%d\\n\", left, top, width, height); if (has_local_palette) { bytestream_get_buffer(&s->bytestream, s->local_palette, 3 * (1 << bits_per_pixel)); palette = s->local_palette; } else { palette = s->global_palette; bits_per_pixel = s->bits_per_pixel; } /* verify that all the image is inside the screen dimensions */ if (left + width > s->screen_width || top + height > s->screen_height) return AVERROR(EINVAL); /* build the palette */ n = (1 << bits_per_pixel); spal = palette; for(i = 0; i < n; i++) { s->image_palette[i] = (0xffu << 24) | AV_RB24(spal); spal += 3; } for(; i < 256; i++) s->image_palette[i] = (0xffu << 24); /* handle transparency */ if (s->transparent_color_index >= 0) s->image_palette[s->transparent_color_index] = 0; /* now get the image data */ code_size = bytestream_get_byte(&s->bytestream); ff_lzw_decode_init(s->lzw, code_size, s->bytestream, s->bytestream_end - s->bytestream, FF_LZW_GIF); /* read all the image */ linesize = s->picture.linesize[0]; ptr1 = s->picture.data[0] + top * linesize + left; ptr = ptr1; pass = 0; y1 = 0; for (y = 0; y < height; y++) { ff_lzw_decode(s->lzw, ptr, width); if (is_interleaved) { switch(pass) { default: case 0: case 1: y1 += 8; ptr += linesize * 8; if (y1 >= height) { y1 = pass ? 2 : 4; ptr = ptr1 + linesize * y1; pass++; } break; case 2: y1 += 4; ptr += linesize * 4; if (y1 >= height) { y1 = 1; ptr = ptr1 + linesize; pass++; } break; case 3: y1 += 2; ptr += linesize * 2; break; } } else { ptr += linesize; } } /* read the garbage data until end marker is found */ ff_lzw_decode_tail(s->lzw); s->bytestream = ff_lzw_cur_ptr(s->lzw); return 0; }"} {"target": 0, "idx": 4344, "func": "static int alac_encode_frame(AVCodecContext *avctx, uint8_t *frame, int buf_size, void *data) { AlacEncodeContext *s = avctx->priv_data; PutBitContext *pb = &s->pbctx; int i, out_bytes, verbatim_flag = 0; if (avctx->frame_size > DEFAULT_FRAME_SIZE) { av_log(avctx, AV_LOG_ERROR, \"input frame size exceeded\\n\"); return -1; } if (buf_size < 2 * s->max_coded_frame_size) { av_log(avctx, AV_LOG_ERROR, \"buffer size is too small\\n\"); return -1; } verbatim: init_put_bits(pb, frame, buf_size); if (s->compression_level == 0 || verbatim_flag) { // Verbatim mode const int16_t *samples = data; write_frame_header(s, 1); for (i = 0; i < avctx->frame_size * avctx->channels; i++) { put_sbits(pb, 16, *samples++); } } else { init_sample_buffers(s, data); write_frame_header(s, 0); write_compressed_frame(s); } put_bits(pb, 3, 7); flush_put_bits(pb); out_bytes = put_bits_count(pb) >> 3; if (out_bytes > s->max_coded_frame_size) { /* frame too large. use verbatim mode */ if (verbatim_flag || s->compression_level == 0) { /* still too large. must be an error. */ av_log(avctx, AV_LOG_ERROR, \"error encoding frame\\n\"); return -1; } verbatim_flag = 1; goto verbatim; } return out_bytes; }"} {"target": 1, "idx": 4360, "func": "static void i440fx_update_memory_mappings(PCII440FXState *d) { int i, r; uint32_t smram; bool smram_enabled; memory_region_transaction_begin(); update_pam(d, 0xf0000, 0x100000, (d->dev.config[I440FX_PAM] >> 4) & 3, &d->pam_regions[0]); for(i = 0; i < 12; i++) { r = (d->dev.config[(i >> 1) + (I440FX_PAM + 1)] >> ((i & 1) * 4)) & 3; update_pam(d, 0xc0000 + 0x4000 * i, 0xc0000 + 0x4000 * (i + 1), r, &d->pam_regions[i+1]); } smram = d->dev.config[I440FX_SMRAM]; smram_enabled = (d->smm_enabled && (smram & 0x08)) || (smram & 0x40); memory_region_set_enabled(&d->smram_region, !smram_enabled); memory_region_transaction_commit(); }"} {"target": 1, "idx": 4362, "func": "static int vmdk_open_vmdk4(BlockDriverState *bs, BlockDriverState *file, int flags) { int ret; uint32_t magic; uint32_t l1_size, l1_entry_sectors; VMDK4Header header; VmdkExtent *extent; int64_t l1_backup_offset = 0; ret = bdrv_pread(file, sizeof(magic), &header, sizeof(header)); if (ret < 0) { return ret; } if (header.capacity == 0) { int64_t desc_offset = le64_to_cpu(header.desc_offset); if (desc_offset) { return vmdk_open_desc_file(bs, flags, desc_offset << 9); } } if (le64_to_cpu(header.gd_offset) == VMDK4_GD_AT_END) { /* * The footer takes precedence over the header, so read it in. The * footer starts at offset -1024 from the end: One sector for the * footer, and another one for the end-of-stream marker. */ struct { struct { uint64_t val; uint32_t size; uint32_t type; uint8_t pad[512 - 16]; } QEMU_PACKED footer_marker; uint32_t magic; VMDK4Header header; uint8_t pad[512 - 4 - sizeof(VMDK4Header)]; struct { uint64_t val; uint32_t size; uint32_t type; uint8_t pad[512 - 16]; } QEMU_PACKED eos_marker; } QEMU_PACKED footer; ret = bdrv_pread(file, bs->file->total_sectors * 512 - 1536, &footer, sizeof(footer)); if (ret < 0) { return ret; } /* Some sanity checks for the footer */ if (be32_to_cpu(footer.magic) != VMDK4_MAGIC || le32_to_cpu(footer.footer_marker.size) != 0 || le32_to_cpu(footer.footer_marker.type) != MARKER_FOOTER || le64_to_cpu(footer.eos_marker.val) != 0 || le32_to_cpu(footer.eos_marker.size) != 0 || le32_to_cpu(footer.eos_marker.type) != MARKER_END_OF_STREAM) { return -EINVAL; } header = footer.header; } if (le32_to_cpu(header.version) >= 3) { char buf[64]; snprintf(buf, sizeof(buf), \"VMDK version %d\", le32_to_cpu(header.version)); qerror_report(QERR_UNKNOWN_BLOCK_FORMAT_FEATURE, bs->device_name, \"vmdk\", buf); return -ENOTSUP; } l1_entry_sectors = le32_to_cpu(header.num_gtes_per_gte) * le64_to_cpu(header.granularity); if (l1_entry_sectors == 0) { return -EINVAL; } l1_size = (le64_to_cpu(header.capacity) + l1_entry_sectors - 1) / l1_entry_sectors; if (le32_to_cpu(header.flags) & VMDK4_FLAG_RGD) { l1_backup_offset = le64_to_cpu(header.rgd_offset) << 9; } extent = vmdk_add_extent(bs, file, false, le64_to_cpu(header.capacity), le64_to_cpu(header.gd_offset) << 9, l1_backup_offset, l1_size, le32_to_cpu(header.num_gtes_per_gte), le64_to_cpu(header.granularity)); extent->compressed = le16_to_cpu(header.compressAlgorithm) == VMDK4_COMPRESSION_DEFLATE; extent->has_marker = le32_to_cpu(header.flags) & VMDK4_FLAG_MARKER; extent->version = le32_to_cpu(header.version); extent->has_zero_grain = le32_to_cpu(header.flags) & VMDK4_FLAG_ZERO_GRAIN; ret = vmdk_init_tables(bs, extent); if (ret) { /* free extent allocated by vmdk_add_extent */ vmdk_free_last_extent(bs); } return ret; }"} {"target": 1, "idx": 4385, "func": "static void control_to_network(RDMAControlHeader *control) { control->type = htonl(control->type); control->len = htonl(control->len); control->repeat = htonl(control->repeat); }"} {"target": 0, "idx": 4389, "func": "static bool use_multiport(VirtIOSerial *vser) { VirtIODevice *vdev = VIRTIO_DEVICE(vser); return virtio_has_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT); }"} {"target": 0, "idx": 4411, "func": "static int poll_filter(OutputStream *ost) { OutputFile *of = output_files[ost->file_index]; AVFrame *filtered_frame = NULL; int frame_size, ret; if (!ost->filtered_frame && !(ost->filtered_frame = avcodec_alloc_frame())) { return AVERROR(ENOMEM); } else avcodec_get_frame_defaults(ost->filtered_frame); filtered_frame = ost->filtered_frame; if (ost->enc->type == AVMEDIA_TYPE_AUDIO && !(ost->enc->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE)) ret = av_buffersink_get_samples(ost->filter->filter, filtered_frame, ost->st->codec->frame_size); else ret = av_buffersink_get_frame(ost->filter->filter, filtered_frame); if (ret < 0) return ret; if (filtered_frame->pts != AV_NOPTS_VALUE) { filtered_frame->pts = av_rescale_q(filtered_frame->pts, ost->filter->filter->inputs[0]->time_base, ost->st->codec->time_base) - av_rescale_q(of->start_time, AV_TIME_BASE_Q, ost->st->codec->time_base); } switch (ost->filter->filter->inputs[0]->type) { case AVMEDIA_TYPE_VIDEO: if (!ost->frame_aspect_ratio) ost->st->codec->sample_aspect_ratio = filtered_frame->sample_aspect_ratio; do_video_out(of->ctx, ost, filtered_frame, &frame_size); if (vstats_filename && frame_size) do_video_stats(ost, frame_size); break; case AVMEDIA_TYPE_AUDIO: do_audio_out(of->ctx, ost, filtered_frame); break; default: // TODO support subtitle filters av_assert0(0); } av_frame_unref(filtered_frame); return 0; }"} {"target": 0, "idx": 4440, "func": "void *memory_region_get_ram_ptr(MemoryRegion *mr) { if (mr->alias) { return memory_region_get_ram_ptr(mr->alias) + mr->alias_offset; } assert(mr->terminates); return qemu_get_ram_ptr(mr->ram_addr & TARGET_PAGE_MASK); }"} {"target": 0, "idx": 4479, "func": "void ff_af_queue_init(AVCodecContext *avctx, AudioFrameQueue *afq) { afq->avctx = avctx; afq->next_pts = AV_NOPTS_VALUE; afq->remaining_delay = avctx->delay; afq->remaining_samples = avctx->delay; afq->frame_queue = NULL; }"} {"target": 0, "idx": 4488, "func": "void kvm_s390_service_interrupt(S390CPU *cpu, uint32_t parm) { kvm_s390_interrupt_internal(cpu, KVM_S390_INT_SERVICE, parm, 0 , 1); }"} {"target": 1, "idx": 4519, "func": "static inline void RENAME(rgb24tobgr15)(const uint8_t *src, uint8_t *dst, int src_size) { const uint8_t *s = src; const uint8_t *end; const uint8_t *mm_end; uint16_t *d = (uint16_t *)dst; end = s + src_size; __asm__ volatile(PREFETCH\" %0\"::\"m\"(*src):\"memory\"); __asm__ volatile( \"movq %0, %%mm7 \\n\\t\" \"movq %1, %%mm6 \\n\\t\" ::\"m\"(red_15mask),\"m\"(green_15mask)); mm_end = end - 11; while (s < mm_end) { __asm__ volatile( PREFETCH\" 32%1 \\n\\t\" \"movd %1, %%mm0 \\n\\t\" \"movd 3%1, %%mm3 \\n\\t\" \"punpckldq 6%1, %%mm0 \\n\\t\" \"punpckldq 9%1, %%mm3 \\n\\t\" \"movq %%mm0, %%mm1 \\n\\t\" \"movq %%mm0, %%mm2 \\n\\t\" \"movq %%mm3, %%mm4 \\n\\t\" \"movq %%mm3, %%mm5 \\n\\t\" \"psrlq $3, %%mm0 \\n\\t\" \"psrlq $3, %%mm3 \\n\\t\" \"pand %2, %%mm0 \\n\\t\" \"pand %2, %%mm3 \\n\\t\" \"psrlq $6, %%mm1 \\n\\t\" \"psrlq $6, %%mm4 \\n\\t\" \"pand %%mm6, %%mm1 \\n\\t\" \"pand %%mm6, %%mm4 \\n\\t\" \"psrlq $9, %%mm2 \\n\\t\" \"psrlq $9, %%mm5 \\n\\t\" \"pand %%mm7, %%mm2 \\n\\t\" \"pand %%mm7, %%mm5 \\n\\t\" \"por %%mm1, %%mm0 \\n\\t\" \"por %%mm4, %%mm3 \\n\\t\" \"por %%mm2, %%mm0 \\n\\t\" \"por %%mm5, %%mm3 \\n\\t\" \"psllq $16, %%mm3 \\n\\t\" \"por %%mm3, %%mm0 \\n\\t\" MOVNTQ\" %%mm0, %0 \\n\\t\" :\"=m\"(*d):\"m\"(*s),\"m\"(blue_15mask):\"memory\"); d += 4; s += 12; } __asm__ volatile(SFENCE:::\"memory\"); __asm__ volatile(EMMS:::\"memory\"); while (s < end) { const int b = *s++; const int g = *s++; const int r = *s++; *d++ = (b>>3) | ((g&0xF8)<<2) | ((r&0xF8)<<7); } }"} {"target": 1, "idx": 4521, "func": "static gboolean gd_motion_event(GtkWidget *widget, GdkEventMotion *motion, void *opaque) { GtkDisplayState *s = opaque; int x, y; int mx, my; int fbh, fbw; int ww, wh; fbw = surface_width(s->ds) * s->scale_x; fbh = surface_height(s->ds) * s->scale_y; gdk_drawable_get_size(gtk_widget_get_window(s->drawing_area), &ww, &wh); mx = my = 0; if (ww > fbw) { mx = (ww - fbw) / 2; } if (wh > fbh) { my = (wh - fbh) / 2; } x = (motion->x - mx) / s->scale_x; y = (motion->y - my) / s->scale_y; if (x < 0 || y < 0 || x >= surface_width(s->ds) || y >= surface_height(s->ds)) { return TRUE; } if (qemu_input_is_absolute()) { qemu_input_queue_abs(s->dcl.con, INPUT_AXIS_X, x, surface_width(s->ds)); qemu_input_queue_abs(s->dcl.con, INPUT_AXIS_Y, y, surface_height(s->ds)); qemu_input_event_sync(); } else if (s->last_x != -1 && s->last_y != -1 && gd_is_grab_active(s)) { qemu_input_queue_rel(s->dcl.con, INPUT_AXIS_X, x - s->last_x); qemu_input_queue_rel(s->dcl.con, INPUT_AXIS_Y, y - s->last_y); qemu_input_event_sync(); } s->last_x = x; s->last_y = y; if (!qemu_input_is_absolute() && gd_is_grab_active(s)) { GdkScreen *screen = gtk_widget_get_screen(s->drawing_area); int x = (int)motion->x_root; int y = (int)motion->y_root; /* In relative mode check to see if client pointer hit * one of the screen edges, and if so move it back by * 200 pixels. This is important because the pointer * in the server doesn't correspond 1-for-1, and so * may still be only half way across the screen. Without * this warp, the server pointer would thus appear to hit * an invisible wall */ if (x == 0) { x += 200; } if (y == 0) { y += 200; } if (x == (gdk_screen_get_width(screen) - 1)) { x -= 200; } if (y == (gdk_screen_get_height(screen) - 1)) { y -= 200; } if (x != (int)motion->x_root || y != (int)motion->y_root) { #if GTK_CHECK_VERSION(3, 0, 0) GdkDevice *dev = gdk_event_get_device((GdkEvent *)motion); gdk_device_warp(dev, screen, x, y); #else GdkDisplay *display = gtk_widget_get_display(widget); gdk_display_warp_pointer(display, screen, x, y); #endif s->last_x = -1; s->last_y = -1; return FALSE; } } return TRUE; }"} {"target": 0, "idx": 4524, "func": "int ff_vdpau_common_init(AVCodecContext *avctx, VdpDecoderProfile profile, int level) { VDPAUHWContext *hwctx = avctx->hwaccel_context; VDPAUContext *vdctx = avctx->internal->hwaccel_priv_data; VdpVideoSurfaceQueryCapabilities *surface_query_caps; VdpDecoderQueryCapabilities *decoder_query_caps; VdpDecoderCreate *create; void *func; VdpStatus status; VdpBool supported; uint32_t max_level, max_mb, max_width, max_height; VdpChromaType type; uint32_t width; uint32_t height; vdctx->width = UINT32_MAX; vdctx->height = UINT32_MAX; if (av_vdpau_get_surface_parameters(avctx, &type, &width, &height)) return AVERROR(ENOSYS); if (hwctx) { hwctx->reset = 0; if (hwctx->context.decoder != VDP_INVALID_HANDLE) { vdctx->decoder = hwctx->context.decoder; vdctx->render = hwctx->context.render; vdctx->device = VDP_INVALID_HANDLE; return 0; /* Decoder created by user */ } vdctx->device = hwctx->device; vdctx->get_proc_address = hwctx->get_proc_address; if (hwctx->flags & AV_HWACCEL_FLAG_IGNORE_LEVEL) level = 0; if (!(hwctx->flags & AV_HWACCEL_FLAG_ALLOW_HIGH_DEPTH) && type != VDP_CHROMA_TYPE_420) return AVERROR(ENOSYS); } else { AVHWFramesContext *frames_ctx = NULL; AVVDPAUDeviceContext *dev_ctx; // We assume the hw_frames_ctx always survives until ff_vdpau_common_uninit // is called. This holds true as the user is not allowed to touch // hw_device_ctx, or hw_frames_ctx after get_format (and ff_get_format // itself also uninits before unreffing hw_frames_ctx). if (avctx->hw_frames_ctx) { frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data; } else if (avctx->hw_device_ctx) { int ret; avctx->hw_frames_ctx = av_hwframe_ctx_alloc(avctx->hw_device_ctx); if (!avctx->hw_frames_ctx) return AVERROR(ENOMEM); frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data; frames_ctx->format = AV_PIX_FMT_VDPAU; frames_ctx->sw_format = avctx->sw_pix_fmt; frames_ctx->width = avctx->coded_width; frames_ctx->height = avctx->coded_height; ret = av_hwframe_ctx_init(avctx->hw_frames_ctx); if (ret < 0) { av_buffer_unref(&avctx->hw_frames_ctx); return ret; } } if (!frames_ctx) { av_log(avctx, AV_LOG_ERROR, \"A hardware frames context is \" \"required for VDPAU decoding.\\n\"); return AVERROR(EINVAL); } dev_ctx = frames_ctx->device_ctx->hwctx; vdctx->device = dev_ctx->device; vdctx->get_proc_address = dev_ctx->get_proc_address; if (avctx->hwaccel_flags & AV_HWACCEL_FLAG_IGNORE_LEVEL) level = 0; } if (level < 0) return AVERROR(ENOTSUP); status = vdctx->get_proc_address(vdctx->device, VDP_FUNC_ID_VIDEO_SURFACE_QUERY_CAPABILITIES, &func); if (status != VDP_STATUS_OK) return vdpau_error(status); else surface_query_caps = func; status = surface_query_caps(vdctx->device, type, &supported, &max_width, &max_height); if (status != VDP_STATUS_OK) return vdpau_error(status); if (supported != VDP_TRUE || max_width < width || max_height < height) return AVERROR(ENOTSUP); status = vdctx->get_proc_address(vdctx->device, VDP_FUNC_ID_DECODER_QUERY_CAPABILITIES, &func); if (status != VDP_STATUS_OK) return vdpau_error(status); else decoder_query_caps = func; status = decoder_query_caps(vdctx->device, profile, &supported, &max_level, &max_mb, &max_width, &max_height); #ifdef VDP_DECODER_PROFILE_H264_CONSTRAINED_BASELINE if ((status != VDP_STATUS_OK || supported != VDP_TRUE) && profile == VDP_DECODER_PROFILE_H264_CONSTRAINED_BASELINE) { profile = VDP_DECODER_PROFILE_H264_MAIN; status = decoder_query_caps(vdctx->device, profile, &supported, &max_level, &max_mb, &max_width, &max_height); } #endif if (status != VDP_STATUS_OK) return vdpau_error(status); if (supported != VDP_TRUE || max_level < level || max_width < width || max_height < height) return AVERROR(ENOTSUP); status = vdctx->get_proc_address(vdctx->device, VDP_FUNC_ID_DECODER_CREATE, &func); if (status != VDP_STATUS_OK) return vdpau_error(status); else create = func; status = vdctx->get_proc_address(vdctx->device, VDP_FUNC_ID_DECODER_RENDER, &func); if (status != VDP_STATUS_OK) return vdpau_error(status); else vdctx->render = func; status = create(vdctx->device, profile, width, height, avctx->refs, &vdctx->decoder); if (status == VDP_STATUS_OK) { vdctx->width = avctx->coded_width; vdctx->height = avctx->coded_height; } return vdpau_error(status); }"} {"target": 1, "idx": 4539, "func": "static int estimate_qp(MpegEncContext *s, int dry_run){ if (s->next_lambda){ s->current_picture_ptr->f.quality = s->current_picture.f.quality = s->next_lambda; if(!dry_run) s->next_lambda= 0; } else if (!s->fixed_qscale) { s->current_picture_ptr->f.quality = s->current_picture.f.quality = ff_rate_estimate_qscale(s, dry_run); if (s->current_picture.f.quality < 0) return -1; } if(s->adaptive_quant){ switch(s->codec_id){ case AV_CODEC_ID_MPEG4: if (CONFIG_MPEG4_ENCODER) ff_clean_mpeg4_qscales(s); break; case AV_CODEC_ID_H263: case AV_CODEC_ID_H263P: case AV_CODEC_ID_FLV1: if (CONFIG_H263_ENCODER) ff_clean_h263_qscales(s); break; default: ff_init_qscale_tab(s); } s->lambda= s->lambda_table[0]; //FIXME broken }else s->lambda = s->current_picture.f.quality; update_qscale(s); return 0; }"} {"target": 0, "idx": 4540, "func": "static void spr_write_tbu (DisasContext *ctx, int sprn, int gprn) { if (use_icount) { gen_io_start(); } gen_helper_store_tbu(cpu_env, cpu_gpr[gprn]); if (use_icount) { gen_io_end(); gen_stop_exception(ctx); } }"} {"target": 0, "idx": 4551, "func": "static void vfio_intx_enable_kvm(VFIOPCIDevice *vdev) { #ifdef CONFIG_KVM struct kvm_irqfd irqfd = { .fd = event_notifier_get_fd(&vdev->intx.interrupt), .gsi = vdev->intx.route.irq, .flags = KVM_IRQFD_FLAG_RESAMPLE, }; struct vfio_irq_set *irq_set; int ret, argsz; int32_t *pfd; if (!VFIO_ALLOW_KVM_INTX || !kvm_irqfds_enabled() || vdev->intx.route.mode != PCI_INTX_ENABLED || !kvm_resamplefds_enabled()) { return; } /* Get to a known interrupt state */ qemu_set_fd_handler(irqfd.fd, NULL, NULL, vdev); vfio_mask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX); vdev->intx.pending = false; pci_irq_deassert(&vdev->pdev); /* Get an eventfd for resample/unmask */ if (event_notifier_init(&vdev->intx.unmask, 0)) { error_report(\"vfio: Error: event_notifier_init failed eoi\"); goto fail; } /* KVM triggers it, VFIO listens for it */ irqfd.resamplefd = event_notifier_get_fd(&vdev->intx.unmask); if (kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd)) { error_report(\"vfio: Error: Failed to setup resample irqfd: %m\"); goto fail_irqfd; } argsz = sizeof(*irq_set) + sizeof(*pfd); irq_set = g_malloc0(argsz); irq_set->argsz = argsz; irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_UNMASK; irq_set->index = VFIO_PCI_INTX_IRQ_INDEX; irq_set->start = 0; irq_set->count = 1; pfd = (int32_t *)&irq_set->data; *pfd = irqfd.resamplefd; ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set); g_free(irq_set); if (ret) { error_report(\"vfio: Error: Failed to setup INTx unmask fd: %m\"); goto fail_vfio; } /* Let'em rip */ vfio_unmask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX); vdev->intx.kvm_accel = true; trace_vfio_intx_enable_kvm(vdev->vbasedev.name); return; fail_vfio: irqfd.flags = KVM_IRQFD_FLAG_DEASSIGN; kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd); fail_irqfd: event_notifier_cleanup(&vdev->intx.unmask); fail: qemu_set_fd_handler(irqfd.fd, vfio_intx_interrupt, NULL, vdev); vfio_unmask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX); #endif }"} {"target": 0, "idx": 4555, "func": "static inline void gen_evfsnabs(DisasContext *ctx) { if (unlikely(!ctx->spe_enabled)) { gen_exception(ctx, POWERPC_EXCP_APU); return; } #if defined(TARGET_PPC64) tcg_gen_ori_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 0x8000000080000000LL); #else tcg_gen_ori_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 0x80000000); tcg_gen_ori_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)], 0x80000000); #endif }"} {"target": 0, "idx": 4556, "func": "static uint64_t imx_timerg_read(void *opaque, target_phys_addr_t offset, unsigned size) { IMXTimerGState *s = (IMXTimerGState *)opaque; DPRINTF(\"g-read(offset=%x)\", offset >> 2); switch (offset >> 2) { case 0: /* Control Register */ DPRINTF(\" cr = %x\\n\", s->cr); return s->cr; case 1: /* prescaler */ DPRINTF(\" pr = %x\\n\", s->pr); return s->pr; case 2: /* Status Register */ DPRINTF(\" sr = %x\\n\", s->sr); return s->sr; case 3: /* Interrupt Register */ DPRINTF(\" ir = %x\\n\", s->ir); return s->ir; case 4: /* Output Compare Register 1 */ DPRINTF(\" ocr1 = %x\\n\", s->ocr1); return s->ocr1; case 9: /* cnt */ imx_timerg_update_counts(s); DPRINTF(\" cnt = %x\\n\", s->cnt); return s->cnt; } IPRINTF(\"imx_timerg_read: Bad offset %x\\n\", (int)offset >> 2); return 0; }"} {"target": 0, "idx": 4563, "func": "static int blkdebug_open(BlockDriverState *bs, QDict *options, int flags, Error **errp) { BDRVBlkdebugState *s = bs->opaque; QemuOpts *opts; Error *local_err = NULL; int ret; opts = qemu_opts_create(&runtime_opts, NULL, 0, &error_abort); qemu_opts_absorb_qdict(opts, options, &local_err); if (local_err) { error_propagate(errp, local_err); ret = -EINVAL; goto out; } /* Read rules from config file or command line options */ s->config_file = g_strdup(qemu_opt_get(opts, \"config\")); ret = read_config(s, s->config_file, options, errp); if (ret) { goto out; } /* Set initial state */ s->state = 1; /* Open the image file */ bs->file = bdrv_open_child(qemu_opt_get(opts, \"x-image\"), options, \"image\", bs, &child_file, false, &local_err); if (local_err) { ret = -EINVAL; error_propagate(errp, local_err); goto out; } bs->supported_write_flags = BDRV_REQ_FUA & bs->file->bs->supported_write_flags; bs->supported_zero_flags = (BDRV_REQ_FUA | BDRV_REQ_MAY_UNMAP) & bs->file->bs->supported_zero_flags; ret = -EINVAL; /* Set request alignment */ s->align = qemu_opt_get_size(opts, \"align\", 0); if (s->align && (s->align >= INT_MAX || !is_power_of_2(s->align))) { error_setg(errp, \"Cannot meet constraints with align %\" PRIu64, s->align); goto out; } ret = 0; out: if (ret < 0) { g_free(s->config_file); } qemu_opts_del(opts); return ret; }"} {"target": 0, "idx": 4569, "func": "static void test_qemu_strtoull_full_negative(void) { const char *str = \" \\t -321\"; uint64_t res = 999; int err; err = qemu_strtoull(str, NULL, 0, &res); g_assert_cmpint(err, ==, 0); g_assert_cmpint(res, ==, 18446744073709551295LLU); }"} {"target": 0, "idx": 4570, "func": "static uint64_t exynos4210_fimd_read(void *opaque, target_phys_addr_t offset, unsigned size) { Exynos4210fimdState *s = (Exynos4210fimdState *)opaque; int w, i; uint32_t ret = 0; DPRINT_L2(\"read offset 0x%08x\\n\", offset); switch (offset) { case FIMD_VIDCON0 ... FIMD_VIDCON3: return s->vidcon[(offset - FIMD_VIDCON0) >> 2]; case FIMD_VIDTCON_START ... FIMD_VIDTCON_END: return s->vidtcon[(offset - FIMD_VIDTCON_START) >> 2]; case FIMD_WINCON_START ... FIMD_WINCON_END: return s->window[(offset - FIMD_WINCON_START) >> 2].wincon; case FIMD_SHADOWCON: return s->shadowcon; case FIMD_WINCHMAP: return s->winchmap; case FIMD_VIDOSD_START ... FIMD_VIDOSD_END: w = (offset - FIMD_VIDOSD_START) >> 4; i = ((offset - FIMD_VIDOSD_START) & 0xF) >> 2; switch (i) { case 0: ret = ((s->window[w].lefttop_x & FIMD_VIDOSD_COORD_MASK) << FIMD_VIDOSD_HOR_SHIFT) | (s->window[w].lefttop_y & FIMD_VIDOSD_COORD_MASK); break; case 1: ret = ((s->window[w].rightbot_x & FIMD_VIDOSD_COORD_MASK) << FIMD_VIDOSD_HOR_SHIFT) | (s->window[w].rightbot_y & FIMD_VIDOSD_COORD_MASK); break; case 2: if (w == 0) { ret = s->window[w].osdsize; } else { ret = (pack_upper_4(s->window[w].alpha_val[0]) << FIMD_VIDOSD_AEN0_SHIFT) | pack_upper_4(s->window[w].alpha_val[1]); } break; case 3: if (w != 1 && w != 2) { DPRINT_ERROR(\"bad read offset 0x%08x\\n\", offset); return 0xBAADBAAD; } ret = s->window[w].osdsize; break; } return ret; case FIMD_VIDWADD0_START ... FIMD_VIDWADD0_END: w = (offset - FIMD_VIDWADD0_START) >> 3; i = ((offset - FIMD_VIDWADD0_START) >> 2) & 1; return s->window[w].buf_start[i]; case FIMD_VIDWADD1_START ... FIMD_VIDWADD1_END: w = (offset - FIMD_VIDWADD1_START) >> 3; i = ((offset - FIMD_VIDWADD1_START) >> 2) & 1; return s->window[w].buf_end[i]; case FIMD_VIDWADD2_START ... FIMD_VIDWADD2_END: w = (offset - FIMD_VIDWADD2_START) >> 2; return s->window[w].virtpage_width | (s->window[w].virtpage_offsize << FIMD_VIDWADD2_OFFSIZE_SHIFT); case FIMD_VIDINTCON0 ... FIMD_VIDINTCON1: return s->vidintcon[(offset - FIMD_VIDINTCON0) >> 2]; case FIMD_WKEYCON_START ... FIMD_WKEYCON_END: w = ((offset - FIMD_WKEYCON_START) >> 3) + 1; i = ((offset - FIMD_WKEYCON_START) >> 2) & 1; return s->window[w].keycon[i]; case FIMD_WKEYALPHA_START ... FIMD_WKEYALPHA_END: w = ((offset - FIMD_WKEYALPHA_START) >> 2) + 1; return s->window[w].keyalpha; case FIMD_DITHMODE: return s->dithmode; case FIMD_WINMAP_START ... FIMD_WINMAP_END: return s->window[(offset - FIMD_WINMAP_START) >> 2].winmap; case FIMD_WPALCON_HIGH ... FIMD_WPALCON_LOW: return s->wpalcon[(offset - FIMD_WPALCON_HIGH) >> 2]; case FIMD_TRIGCON: return s->trigcon; case FIMD_I80IFCON_START ... FIMD_I80IFCON_END: return s->i80ifcon[(offset - FIMD_I80IFCON_START) >> 2]; case FIMD_COLORGAINCON: return s->colorgaincon; case FIMD_LDI_CMDCON0 ... FIMD_LDI_CMDCON1: return s->ldi_cmdcon[(offset - FIMD_LDI_CMDCON0) >> 2]; case FIMD_SIFCCON0 ... FIMD_SIFCCON2: i = (offset - FIMD_SIFCCON0) >> 2; return s->sifccon[i]; case FIMD_HUECOEFCR_START ... FIMD_HUECOEFCR_END: i = (offset - FIMD_HUECOEFCR_START) >> 2; return s->huecoef_cr[i]; case FIMD_HUECOEFCB_START ... FIMD_HUECOEFCB_END: i = (offset - FIMD_HUECOEFCB_START) >> 2; return s->huecoef_cb[i]; case FIMD_HUEOFFSET: return s->hueoffset; case FIMD_VIDWALPHA_START ... FIMD_VIDWALPHA_END: w = ((offset - FIMD_VIDWALPHA_START) >> 3); i = ((offset - FIMD_VIDWALPHA_START) >> 2) & 1; return s->window[w].alpha_val[i] & (w == 0 ? 0xFFFFFF : FIMD_VIDALPHA_ALPHA_LOWER); case FIMD_BLENDEQ_START ... FIMD_BLENDEQ_END: return s->window[(offset - FIMD_BLENDEQ_START) >> 2].blendeq; case FIMD_BLENDCON: return s->blendcon; case FIMD_WRTQOSCON_START ... FIMD_WRTQOSCON_END: return s->window[(offset - FIMD_WRTQOSCON_START) >> 2].rtqoscon; case FIMD_I80IFCMD_START ... FIMD_I80IFCMD_END: return s->i80ifcmd[(offset - FIMD_I80IFCMD_START) >> 2]; case FIMD_VIDW0ADD0_B2 ... FIMD_VIDW4ADD0_B2: if (offset & 0x0004) { break; } return s->window[(offset - FIMD_VIDW0ADD0_B2) >> 3].buf_start[2]; case FIMD_SHD_ADD0_START ... FIMD_SHD_ADD0_END: if (offset & 0x0004) { break; } return s->window[(offset - FIMD_SHD_ADD0_START) >> 3].shadow_buf_start; case FIMD_SHD_ADD1_START ... FIMD_SHD_ADD1_END: if (offset & 0x0004) { break; } return s->window[(offset - FIMD_SHD_ADD1_START) >> 3].shadow_buf_end; case FIMD_SHD_ADD2_START ... FIMD_SHD_ADD2_END: return s->window[(offset - FIMD_SHD_ADD2_START) >> 2].shadow_buf_size; case FIMD_PAL_MEM_START ... FIMD_PAL_MEM_END: w = (offset - FIMD_PAL_MEM_START) >> 10; i = ((offset - FIMD_PAL_MEM_START) >> 2) & 0xFF; return s->window[w].palette[i]; case FIMD_PALMEM_AL_START ... FIMD_PALMEM_AL_END: /* Palette aliases for win 0,1 */ w = (offset - FIMD_PALMEM_AL_START) >> 10; i = ((offset - FIMD_PALMEM_AL_START) >> 2) & 0xFF; return s->window[w].palette[i]; } DPRINT_ERROR(\"bad read offset 0x%08x\\n\", offset); return 0xBAADBAAD; }"} {"target": 0, "idx": 4571, "func": "float64 HELPER(ucf64_absd)(float64 a) { return float64_abs(a); }"} {"target": 0, "idx": 4607, "func": "static CharDriverState *qemu_chr_open_null(const char *id, ChardevBackend *backend, ChardevReturn *ret, Error **errp) { CharDriverState *chr; ChardevCommon *common = backend->u.null; chr = qemu_chr_alloc(common, errp); if (!chr) { return NULL; } chr->chr_write = null_chr_write; chr->explicit_be_open = true; return chr; }"} {"target": 0, "idx": 4609, "func": "static int packet_enqueue(CompareState *s, int mode) { ConnectionKey key; Packet *pkt = NULL; Connection *conn; if (mode == PRIMARY_IN) { pkt = packet_new(s->pri_rs.buf, s->pri_rs.packet_len, s->pri_rs.vnet_hdr_len); } else { pkt = packet_new(s->sec_rs.buf, s->sec_rs.packet_len, s->sec_rs.vnet_hdr_len); } if (parse_packet_early(pkt)) { packet_destroy(pkt, NULL); pkt = NULL; return -1; } fill_connection_key(pkt, &key); conn = connection_get(s->connection_track_table, &key, &s->conn_list); if (!conn->processing) { g_queue_push_tail(&s->conn_list, conn); conn->processing = true; } if (mode == PRIMARY_IN) { if (!colo_insert_packet(&conn->primary_list, pkt)) { error_report(\"colo compare primary queue size too big,\" \"drop packet\"); } } else { if (!colo_insert_packet(&conn->secondary_list, pkt)) { error_report(\"colo compare secondary queue size too big,\" \"drop packet\"); } } return 0; }"} {"target": 0, "idx": 4610, "func": "enum CodecID av_codec_get_id(const AVCodecTag *tags[4], unsigned int tag) { int i; for(i=0; i<4 && tags[i]; i++){ enum CodecID id= codec_get_id(tags[i], tag); if(id!=CODEC_ID_NONE) return id; } return CODEC_ID_NONE; }"} {"target": 0, "idx": 4611, "func": "static void ide_atapi_identify(IDEState *s) { uint16_t *p; if (s->identify_set) { memcpy(s->io_buffer, s->identify_data, sizeof(s->identify_data)); return; } memset(s->io_buffer, 0, 512); p = (uint16_t *)s->io_buffer; /* Removable CDROM, 50us response, 12 byte packets */ put_le16(p + 0, (2 << 14) | (5 << 8) | (1 << 7) | (2 << 5) | (0 << 0)); padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */ put_le16(p + 20, 3); /* buffer type */ put_le16(p + 21, 512); /* cache size in sectors */ put_le16(p + 22, 4); /* ecc bytes */ padstr((char *)(p + 23), s->version, 8); /* firmware version */ padstr((char *)(p + 27), \"QEMU DVD-ROM\", 40); /* model */ put_le16(p + 48, 1); /* dword I/O (XXX: should not be set on CDROM) */ #ifdef USE_DMA_CDROM put_le16(p + 49, 1 << 9 | 1 << 8); /* DMA and LBA supported */ put_le16(p + 53, 7); /* words 64-70, 54-58, 88 valid */ put_le16(p + 62, 7); /* single word dma0-2 supported */ put_le16(p + 63, 7); /* mdma0-2 supported */ #else put_le16(p + 49, 1 << 9); /* LBA supported, no DMA */ put_le16(p + 53, 3); /* words 64-70, 54-58 valid */ put_le16(p + 63, 0x103); /* DMA modes XXX: may be incorrect */ #endif put_le16(p + 64, 3); /* pio3-4 supported */ put_le16(p + 65, 0xb4); /* minimum DMA multiword tx cycle time */ put_le16(p + 66, 0xb4); /* recommended DMA multiword tx cycle time */ put_le16(p + 67, 0x12c); /* minimum PIO cycle time without flow control */ put_le16(p + 68, 0xb4); /* minimum PIO cycle time with IORDY flow control */ put_le16(p + 71, 30); /* in ns */ put_le16(p + 72, 30); /* in ns */ if (s->ncq_queues) { put_le16(p + 75, s->ncq_queues - 1); /* NCQ supported */ put_le16(p + 76, (1 << 8)); } put_le16(p + 80, 0x1e); /* support up to ATA/ATAPI-4 */ #ifdef USE_DMA_CDROM put_le16(p + 88, 0x3f | (1 << 13)); /* udma5 set and supported */ #endif memcpy(s->identify_data, p, sizeof(s->identify_data)); s->identify_set = 1; }"} {"target": 0, "idx": 4615, "func": "static int kvm_put_xsave(CPUState *env) { #ifdef KVM_CAP_XSAVE int i, r; struct kvm_xsave* xsave; uint16_t cwd, swd, twd, fop; if (!kvm_has_xsave()) return kvm_put_fpu(env); xsave = qemu_memalign(4096, sizeof(struct kvm_xsave)); memset(xsave, 0, sizeof(struct kvm_xsave)); cwd = swd = twd = fop = 0; swd = env->fpus & ~(7 << 11); swd |= (env->fpstt & 7) << 11; cwd = env->fpuc; for (i = 0; i < 8; ++i) twd |= (!env->fptags[i]) << i; xsave->region[0] = (uint32_t)(swd << 16) + cwd; xsave->region[1] = (uint32_t)(fop << 16) + twd; memcpy(&xsave->region[XSAVE_ST_SPACE], env->fpregs, sizeof env->fpregs); memcpy(&xsave->region[XSAVE_XMM_SPACE], env->xmm_regs, sizeof env->xmm_regs); xsave->region[XSAVE_MXCSR] = env->mxcsr; *(uint64_t *)&xsave->region[XSAVE_XSTATE_BV] = env->xstate_bv; memcpy(&xsave->region[XSAVE_YMMH_SPACE], env->ymmh_regs, sizeof env->ymmh_regs); r = kvm_vcpu_ioctl(env, KVM_SET_XSAVE, xsave); qemu_free(xsave); return r; #else return kvm_put_fpu(env); #endif }"} {"target": 1, "idx": 4639, "func": "static inline void blockCopy(uint8_t dst[], int dstStride, uint8_t src[], int srcStride, int numLines, int levelFix) { int i; if(levelFix) { #ifdef HAVE_MMX asm volatile( \"movl %4, %%eax \\n\\t\" \"movl %%eax, temp0\\n\\t\" \"pushl %0 \\n\\t\" \"pushl %1 \\n\\t\" \"leal (%2,%2), %%eax \\n\\t\" \"leal (%3,%3), %%ebx \\n\\t\" \"movq packedYOffset, %%mm2 \\n\\t\" \"movq packedYScale, %%mm3 \\n\\t\" \"pxor %%mm4, %%mm4 \\n\\t\" #define SCALED_CPY \\ \"movq (%0), %%mm0 \\n\\t\"\\ \"movq (%0,%2), %%mm1 \\n\\t\"\\ \"psubusb %%mm2, %%mm0 \\n\\t\"\\ \"psubusb %%mm2, %%mm1 \\n\\t\"\\ \"movq %%mm0, %%mm5 \\n\\t\"\\ \"punpcklbw %%mm4, %%mm0 \\n\\t\"\\ \"punpckhbw %%mm4, %%mm5 \\n\\t\"\\ \"psllw $7, %%mm0 \\n\\t\"\\ \"psllw $7, %%mm5 \\n\\t\"\\ \"pmulhw %%mm3, %%mm0 \\n\\t\"\\ \"pmulhw %%mm3, %%mm5 \\n\\t\"\\ \"packuswb %%mm5, %%mm0 \\n\\t\"\\ \"movq %%mm0, (%1) \\n\\t\"\\ \"movq %%mm1, %%mm5 \\n\\t\"\\ \"punpcklbw %%mm4, %%mm1 \\n\\t\"\\ \"punpckhbw %%mm4, %%mm5 \\n\\t\"\\ \"psllw $7, %%mm1 \\n\\t\"\\ \"psllw $7, %%mm5 \\n\\t\"\\ \"pmulhw %%mm3, %%mm1 \\n\\t\"\\ \"pmulhw %%mm3, %%mm5 \\n\\t\"\\ \"packuswb %%mm5, %%mm1 \\n\\t\"\\ \"movq %%mm1, (%1, %3) \\n\\t\"\\ \"1: \\n\\t\" SCALED_CPY \"addl %%eax, %0 \\n\\t\" \"addl %%ebx, %1 \\n\\t\" SCALED_CPY \"addl %%eax, %0 \\n\\t\" \"addl %%ebx, %1 \\n\\t\" \"decl temp0 \\n\\t\" \"jnz 1b \\n\\t\" \"popl %1 \\n\\t\" \"popl %0 \\n\\t\" : : \"r\" (src), \"r\" (dst), \"r\" (srcStride), \"r\" (dstStride), \"m\" (numLines>>2) : \"%eax\", \"%ebx\" ); #else for(i=0; i>2) : \"%eax\", \"%ebx\" ); #else for(i=0; iplatform_class = cpu_to_le16(TPM2_ACPI_CLASS_CLIENT); tpm2_ptr->control_area_address = cpu_to_le64(0); tpm2_ptr->start_method = cpu_to_le32(TPM2_START_METHOD_MMIO); build_header(linker, table_data, (void *)tpm2_ptr, \"TPM2\", sizeof(*tpm2_ptr), 4, NULL, NULL); }"} {"target": 1, "idx": 4668, "func": "int ff_hevc_output_frame(HEVCContext *s, AVFrame *out, int flush) { do { int nb_output = 0; int min_poc = INT_MAX; int i, min_idx, ret; if (s->sh.no_output_of_prior_pics_flag == 1) { for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) { HEVCFrame *frame = &s->DPB[i]; if (!(frame->flags & HEVC_FRAME_FLAG_BUMPING) && frame->poc != s->poc && frame->sequence == s->seq_output) { ff_hevc_unref_frame(s, frame, HEVC_FRAME_FLAG_OUTPUT); } } } for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) { HEVCFrame *frame = &s->DPB[i]; if ((frame->flags & HEVC_FRAME_FLAG_OUTPUT) && frame->sequence == s->seq_output) { nb_output++; if (frame->poc < min_poc) { min_poc = frame->poc; min_idx = i; } } } /* wait for more frames before output */ if (!flush && s->seq_output == s->seq_decode && s->sps && nb_output <= s->sps->temporal_layer[s->sps->max_sub_layers - 1].num_reorder_pics) return 0; if (nb_output) { HEVCFrame *frame = &s->DPB[min_idx]; AVFrame *dst = out; AVFrame *src = frame->frame; const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(src->format); int pixel_shift = !!(desc->comp[0].depth_minus1 > 7); ret = av_frame_ref(out, src); if (frame->flags & HEVC_FRAME_FLAG_BUMPING) ff_hevc_unref_frame(s, frame, HEVC_FRAME_FLAG_OUTPUT | HEVC_FRAME_FLAG_BUMPING); else ff_hevc_unref_frame(s, frame, HEVC_FRAME_FLAG_OUTPUT); if (ret < 0) return ret; for (i = 0; i < 3; i++) { int hshift = (i > 0) ? desc->log2_chroma_w : 0; int vshift = (i > 0) ? desc->log2_chroma_h : 0; int off = ((frame->window.left_offset >> hshift) << pixel_shift) + (frame->window.top_offset >> vshift) * dst->linesize[i]; dst->data[i] += off; } av_log(s->avctx, AV_LOG_DEBUG, \"Output frame with POC %d.\\n\", frame->poc); return 1; } if (s->seq_output != s->seq_decode) s->seq_output = (s->seq_output + 1) & 0xff; else break; } while (1); return 0; }"} {"target": 0, "idx": 4681, "func": "static int mpegaudio_parse(AVCodecParserContext *s1, AVCodecContext *avctx, uint8_t **poutbuf, int *poutbuf_size, const uint8_t *buf, int buf_size) { MpegAudioParseContext *s = s1->priv_data; int len, ret; uint32_t header; const uint8_t *buf_ptr; *poutbuf = NULL; *poutbuf_size = 0; buf_ptr = buf; while (buf_size > 0) { len = s->inbuf_ptr - s->inbuf; if (s->frame_size == 0) { /* special case for next header for first frame in free format case (XXX: find a simpler method) */ if (s->free_format_next_header != 0) { s->inbuf[0] = s->free_format_next_header >> 24; s->inbuf[1] = s->free_format_next_header >> 16; s->inbuf[2] = s->free_format_next_header >> 8; s->inbuf[3] = s->free_format_next_header; s->inbuf_ptr = s->inbuf + 4; s->free_format_next_header = 0; goto got_header; } /* no header seen : find one. We need at least MPA_HEADER_SIZE bytes to parse it */ len = MPA_HEADER_SIZE - len; if (len > buf_size) len = buf_size; if (len > 0) { memcpy(s->inbuf_ptr, buf_ptr, len); buf_ptr += len; buf_size -= len; s->inbuf_ptr += len; } if ((s->inbuf_ptr - s->inbuf) >= MPA_HEADER_SIZE) { got_header: header = (s->inbuf[0] << 24) | (s->inbuf[1] << 16) | (s->inbuf[2] << 8) | s->inbuf[3]; ret = mpa_decode_header(avctx, header); if (ret < 0) { /* no sync found : move by one byte (inefficient, but simple!) */ memmove(s->inbuf, s->inbuf + 1, s->inbuf_ptr - s->inbuf - 1); s->inbuf_ptr--; dprintf(\"skip %x\\n\", header); /* reset free format frame size to give a chance to get a new bitrate */ s->free_format_frame_size = 0; } else { s->frame_size = ret; #if 0 /* free format: prepare to compute frame size */ if (decode_header(s, header) == 1) { s->frame_size = -1; } #endif } } } else #if 0 if (s->frame_size == -1) { /* free format : find next sync to compute frame size */ len = MPA_MAX_CODED_FRAME_SIZE - len; if (len > buf_size) len = buf_size; if (len == 0) { /* frame too long: resync */ s->frame_size = 0; memmove(s->inbuf, s->inbuf + 1, s->inbuf_ptr - s->inbuf - 1); s->inbuf_ptr--; } else { uint8_t *p, *pend; uint32_t header1; int padding; memcpy(s->inbuf_ptr, buf_ptr, len); /* check for header */ p = s->inbuf_ptr - 3; pend = s->inbuf_ptr + len - 4; while (p <= pend) { header = (p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3]; header1 = (s->inbuf[0] << 24) | (s->inbuf[1] << 16) | (s->inbuf[2] << 8) | s->inbuf[3]; /* check with high probability that we have a valid header */ if ((header & SAME_HEADER_MASK) == (header1 & SAME_HEADER_MASK)) { /* header found: update pointers */ len = (p + 4) - s->inbuf_ptr; buf_ptr += len; buf_size -= len; s->inbuf_ptr = p; /* compute frame size */ s->free_format_next_header = header; s->free_format_frame_size = s->inbuf_ptr - s->inbuf; padding = (header1 >> 9) & 1; if (s->layer == 1) s->free_format_frame_size -= padding * 4; else s->free_format_frame_size -= padding; dprintf(\"free frame size=%d padding=%d\\n\", s->free_format_frame_size, padding); decode_header(s, header1); goto next_data; } p++; } /* not found: simply increase pointers */ buf_ptr += len; s->inbuf_ptr += len; buf_size -= len; } } else #endif if (len < s->frame_size) { if (s->frame_size > MPA_MAX_CODED_FRAME_SIZE) s->frame_size = MPA_MAX_CODED_FRAME_SIZE; len = s->frame_size - len; if (len > buf_size) len = buf_size; memcpy(s->inbuf_ptr, buf_ptr, len); buf_ptr += len; s->inbuf_ptr += len; buf_size -= len; } // next_data: if (s->frame_size > 0 && (s->inbuf_ptr - s->inbuf) >= s->frame_size) { *poutbuf = s->inbuf; *poutbuf_size = s->inbuf_ptr - s->inbuf; s->inbuf_ptr = s->inbuf; s->frame_size = 0; break; } } return buf_ptr - buf; }"} {"target": 0, "idx": 4685, "func": "JNIEnv *ff_jni_attach_env(int *attached, void *log_ctx) { int ret = 0; JNIEnv *env = NULL; *attached = 0; pthread_mutex_lock(&lock); if (java_vm == NULL && (java_vm = av_jni_get_java_vm(log_ctx)) == NULL) { av_log(log_ctx, AV_LOG_INFO, \"Retrieving current Java virtual machine using Android JniInvocation wrapper\\n\"); if (check_jni_invocation(log_ctx) == 0) { if ((java_vm = get_java_vm(NULL, log_ctx)) != NULL || (java_vm = get_java_vm(\"libdvm.so\", log_ctx)) != NULL || (java_vm = get_java_vm(\"libart.so\", log_ctx)) != NULL) { av_log(log_ctx, AV_LOG_INFO, \"Found Java virtual machine using Android JniInvocation wrapper\\n\"); } } } pthread_mutex_unlock(&lock); if (!java_vm) { av_log(log_ctx, AV_LOG_ERROR, \"Could not retrieve a Java virtual machine\\n\"); return NULL; } ret = (*java_vm)->GetEnv(java_vm, (void **)&env, JNI_VERSION_1_6); switch(ret) { case JNI_EDETACHED: if ((*java_vm)->AttachCurrentThread(java_vm, &env, NULL) != 0) { av_log(log_ctx, AV_LOG_ERROR, \"Failed to attach the JNI environment to the current thread\\n\"); env = NULL; } else { *attached = 1; } break; case JNI_OK: break; case JNI_EVERSION: av_log(log_ctx, AV_LOG_ERROR, \"The specified JNI version is not supported\\n\"); break; default: av_log(log_ctx, AV_LOG_ERROR, \"Failed to get the JNI environment attached to this thread\"); break; } return env; }"} {"target": 1, "idx": 4697, "func": "static int perform_cow(BlockDriverState *bs, QCowL2Meta *m) { BDRVQcow2State *s = bs->opaque; Qcow2COWRegion *start = &m->cow_start; Qcow2COWRegion *end = &m->cow_end; unsigned buffer_size; unsigned data_bytes = end->offset - (start->offset + start->nb_bytes); bool merge_reads; uint8_t *start_buffer, *end_buffer; QEMUIOVector qiov; int ret; assert(start->nb_bytes <= UINT_MAX - end->nb_bytes); assert(start->nb_bytes + end->nb_bytes <= UINT_MAX - data_bytes); assert(start->offset + start->nb_bytes <= end->offset); assert(!m->data_qiov || m->data_qiov->size == data_bytes); if (start->nb_bytes == 0 && end->nb_bytes == 0) { return 0; } /* If we have to read both the start and end COW regions and the * middle region is not too large then perform just one read * operation */ merge_reads = start->nb_bytes && end->nb_bytes && data_bytes <= 16384; if (merge_reads) { buffer_size = start->nb_bytes + data_bytes + end->nb_bytes; } else { /* If we have to do two reads, add some padding in the middle * if necessary to make sure that the end region is optimally * aligned. */ size_t align = bdrv_opt_mem_align(bs); assert(align > 0 && align <= UINT_MAX); assert(QEMU_ALIGN_UP(start->nb_bytes, align) <= UINT_MAX - end->nb_bytes); buffer_size = QEMU_ALIGN_UP(start->nb_bytes, align) + end->nb_bytes; } /* Reserve a buffer large enough to store all the data that we're * going to read */ start_buffer = qemu_try_blockalign(bs, buffer_size); if (start_buffer == NULL) { return -ENOMEM; } /* The part of the buffer where the end region is located */ end_buffer = start_buffer + buffer_size - end->nb_bytes; qemu_iovec_init(&qiov, 2 + (m->data_qiov ? m->data_qiov->niov : 0)); qemu_co_mutex_unlock(&s->lock); /* First we read the existing data from both COW regions. We * either read the whole region in one go, or the start and end * regions separately. */ if (merge_reads) { qemu_iovec_add(&qiov, start_buffer, buffer_size); ret = do_perform_cow_read(bs, m->offset, start->offset, &qiov); } else { qemu_iovec_add(&qiov, start_buffer, start->nb_bytes); ret = do_perform_cow_read(bs, m->offset, start->offset, &qiov); if (ret < 0) { goto fail; } qemu_iovec_reset(&qiov); qemu_iovec_add(&qiov, end_buffer, end->nb_bytes); ret = do_perform_cow_read(bs, m->offset, end->offset, &qiov); } if (ret < 0) { goto fail; } /* Encrypt the data if necessary before writing it */ if (bs->encrypted) { if (!do_perform_cow_encrypt(bs, m->offset, start->offset, start_buffer, start->nb_bytes) || !do_perform_cow_encrypt(bs, m->offset, end->offset, end_buffer, end->nb_bytes)) { ret = -EIO; goto fail; } } /* And now we can write everything. If we have the guest data we * can write everything in one single operation */ if (m->data_qiov) { qemu_iovec_reset(&qiov); if (start->nb_bytes) { qemu_iovec_add(&qiov, start_buffer, start->nb_bytes); } qemu_iovec_concat(&qiov, m->data_qiov, 0, data_bytes); if (end->nb_bytes) { qemu_iovec_add(&qiov, end_buffer, end->nb_bytes); } /* NOTE: we have a write_aio blkdebug event here followed by * a cow_write one in do_perform_cow_write(), but there's only * one single I/O operation */ BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO); ret = do_perform_cow_write(bs, m->alloc_offset, start->offset, &qiov); } else { /* If there's no guest data then write both COW regions separately */ qemu_iovec_reset(&qiov); qemu_iovec_add(&qiov, start_buffer, start->nb_bytes); ret = do_perform_cow_write(bs, m->alloc_offset, start->offset, &qiov); if (ret < 0) { goto fail; } qemu_iovec_reset(&qiov); qemu_iovec_add(&qiov, end_buffer, end->nb_bytes); ret = do_perform_cow_write(bs, m->alloc_offset, end->offset, &qiov); } fail: qemu_co_mutex_lock(&s->lock); /* * Before we update the L2 table to actually point to the new cluster, we * need to be sure that the refcounts have been increased and COW was * handled. */ if (ret == 0) { qcow2_cache_depends_on_flush(s->l2_table_cache); } qemu_vfree(start_buffer); qemu_iovec_destroy(&qiov); return ret; }"} {"target": 0, "idx": 4698, "func": "void sws_rgb2rgb_init(int flags) { #if HAVE_SSE2 || HAVE_MMX2 || HAVE_AMD3DNOW || HAVE_MMX if (flags & SWS_CPU_CAPS_SSE2) rgb2rgb_init_SSE2(); else if (flags & SWS_CPU_CAPS_MMX2) rgb2rgb_init_MMX2(); else if (flags & SWS_CPU_CAPS_3DNOW) rgb2rgb_init_3DNOW(); else if (flags & SWS_CPU_CAPS_MMX) rgb2rgb_init_MMX(); else #endif /* HAVE_MMX2 || HAVE_AMD3DNOW || HAVE_MMX */ rgb2rgb_init_C(); }"} {"target": 0, "idx": 4718, "func": "static double tget_double(GetByteContext *gb, int le) { av_alias64 i = { .u64 = le ? bytestream2_get_le64(gb) : bytestream2_get_be64(gb)}; return i.f64; }"} {"target": 1, "idx": 4760, "func": "static void init_proc_power5plus(CPUPPCState *env) { gen_spr_ne_601(env); gen_spr_7xx(env); /* Time base */ gen_tbl(env); /* Hardware implementation registers */ /* XXX : not implemented */ spr_register(env, SPR_HID0, \"HID0\", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_clear, 0x60000000); /* XXX : not implemented */ spr_register(env, SPR_HID1, \"HID1\", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_750FX_HID2, \"HID2\", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_970_HID5, \"HID5\", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, POWERPC970_HID5_INIT); /* XXX : not implemented */ spr_register(env, SPR_L2CR, \"L2CR\", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, NULL, 0x00000000); /* Memory management */ /* XXX: not correct */ gen_low_BATs(env); /* XXX : not implemented */ spr_register(env, SPR_MMUCFG, \"MMUCFG\", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, SPR_NOACCESS, 0x00000000); /* TOFIX */ /* XXX : not implemented */ spr_register(env, SPR_MMUCSR0, \"MMUCSR0\", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* TOFIX */ spr_register(env, SPR_HIOR, \"SPR_HIOR\", SPR_NOACCESS, SPR_NOACCESS, &spr_read_hior, &spr_write_hior, 0x00000000); spr_register(env, SPR_CTRL, \"SPR_CTRL\", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_UCTRL, \"SPR_UCTRL\", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_VRSAVE, \"SPR_VRSAVE\", &spr_read_generic, &spr_write_generic, &spr_read_generic, &spr_write_generic, 0x00000000); #if !defined(CONFIG_USER_ONLY) env->slb_nr = 64; #endif init_excp_970(env); env->dcache_line_size = 128; env->icache_line_size = 128; /* Allocate hardware IRQ controller */ ppc970_irq_init(env); /* Can't find information on what this should be on reset. This * value is the one used by 74xx processors. */ vscr_init(env, 0x00010000); }"} {"target": 1, "idx": 4761, "func": "static int64_t get_bit_rate(AVCodecContext *ctx) { int64_t bit_rate; int bits_per_sample; switch (ctx->codec_type) { case AVMEDIA_TYPE_VIDEO: case AVMEDIA_TYPE_DATA: case AVMEDIA_TYPE_SUBTITLE: case AVMEDIA_TYPE_ATTACHMENT: bit_rate = ctx->bit_rate; break; case AVMEDIA_TYPE_AUDIO: bits_per_sample = av_get_bits_per_sample(ctx->codec_id); bit_rate = bits_per_sample ? ctx->sample_rate * ctx->channels * bits_per_sample : ctx->bit_rate; break; default: bit_rate = 0; break; } return bit_rate; }"} {"target": 0, "idx": 4762, "func": "static int mkv_write_codecprivate(AVFormatContext *s, AVIOContext *pb, AVCodecParameters *par, int native_id, int qt_id) { AVIOContext *dyn_cp; uint8_t *codecpriv; int ret, codecpriv_size; ret = avio_open_dyn_buf(&dyn_cp); if (ret < 0) return ret; if (native_id) { ret = mkv_write_native_codecprivate(s, par, dyn_cp); } else if (par->codec_type == AVMEDIA_TYPE_VIDEO) { if (qt_id) { if (!par->codec_tag) par->codec_tag = ff_codec_get_tag(ff_codec_movvideo_tags, par->codec_id); if ( ff_codec_get_id(ff_codec_movvideo_tags, par->codec_tag) == par->codec_id && (!par->extradata_size || ff_codec_get_id(ff_codec_movvideo_tags, AV_RL32(par->extradata + 4)) != par->codec_id) ) { int i; avio_wb32(dyn_cp, 0x5a + par->extradata_size); avio_wl32(dyn_cp, par->codec_tag); for(i = 0; i < 0x5a - 8; i++) avio_w8(dyn_cp, 0); } avio_write(dyn_cp, par->extradata, par->extradata_size); } else { if (!ff_codec_get_tag(ff_codec_bmp_tags, par->codec_id)) av_log(s, AV_LOG_WARNING, \"codec %s is not supported by this format\\n\", avcodec_get_name(par->codec_id)); if (!par->codec_tag) par->codec_tag = ff_codec_get_tag(ff_codec_bmp_tags, par->codec_id); if (!par->codec_tag && par->codec_id != AV_CODEC_ID_RAWVIDEO) { av_log(s, AV_LOG_ERROR, \"No bmp codec tag found for codec %s\\n\", avcodec_get_name(par->codec_id)); ret = AVERROR(EINVAL); } ff_put_bmp_header(dyn_cp, par, ff_codec_bmp_tags, 0, 0); } } else if (par->codec_type == AVMEDIA_TYPE_AUDIO) { unsigned int tag; tag = ff_codec_get_tag(ff_codec_wav_tags, par->codec_id); if (!tag) { av_log(s, AV_LOG_ERROR, \"No wav codec tag found for codec %s\\n\", avcodec_get_name(par->codec_id)); ret = AVERROR(EINVAL); } if (!par->codec_tag) par->codec_tag = tag; ff_put_wav_header(s, dyn_cp, par, FF_PUT_WAV_HEADER_FORCE_WAVEFORMATEX); } codecpriv_size = avio_close_dyn_buf(dyn_cp, &codecpriv); if (codecpriv_size) put_ebml_binary(pb, MATROSKA_ID_CODECPRIVATE, codecpriv, codecpriv_size); av_free(codecpriv); return ret; }"} {"target": 1, "idx": 4766, "func": "void ff_vp3_idct_dc_add_c(uint8_t *dest/*align 8*/, int line_size, const DCTELEM *block/*align 16*/){ int i, dc = (block[0] + 15) >> 5; const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP + dc; for(i = 0; i < 8; i++){ dest[0] = cm[dest[0]]; dest[1] = cm[dest[1]]; dest[2] = cm[dest[2]]; dest[3] = cm[dest[3]]; dest[4] = cm[dest[4]]; dest[5] = cm[dest[5]]; dest[6] = cm[dest[6]]; dest[7] = cm[dest[7]]; dest += line_size; } }"} {"target": 1, "idx": 4768, "func": "void object_property_add_bool(Object *obj, const char *name, bool (*get)(Object *, Error **), void (*set)(Object *, bool, Error **), Error **errp) { BoolProperty *prop = g_malloc0(sizeof(*prop)); prop->get = get; prop->set = set; object_property_add(obj, name, \"bool\", get ? property_get_bool : NULL, set ? property_set_bool : NULL, property_release_bool, prop, errp); }"} {"target": 1, "idx": 4769, "func": "static inline int mirror(int v, int m){ if (v<0) return -v; else if(v>m) return 2*m-v; else return v; }"} {"target": 1, "idx": 4770, "func": "void virtio_blk_data_plane_stop(VirtIOBlockDataPlane *s) { if (!s->started) { return; } s->started = false; trace_virtio_blk_data_plane_stop(s); /* Stop thread or cancel pending thread creation BH */ if (s->start_bh) { qemu_bh_delete(s->start_bh); s->start_bh = NULL; } else { event_poll_notify(&s->event_poll); qemu_thread_join(&s->thread); } ioq_cleanup(&s->ioqueue); s->vdev->binding->set_host_notifier(s->vdev->binding_opaque, 0, false); event_poll_cleanup(&s->event_poll); /* Clean up guest notifier (irq) */ s->vdev->binding->set_guest_notifiers(s->vdev->binding_opaque, 1, false); vring_teardown(&s->vring); }"} {"target": 1, "idx": 4781, "func": "static int mov_write_tkhd_tag(AVIOContext *pb, MOVMuxContext *mov, MOVTrack *track, AVStream *st) { int64_t duration = av_rescale_rnd(track->track_duration, MOV_TIMESCALE, track->timescale, AV_ROUND_UP); int version = duration < INT32_MAX ? 0 : 1; int flags = MOV_TKHD_FLAG_IN_MOVIE; int rotation = 0; int group = 0; uint32_t *display_matrix = NULL; int display_matrix_size, i; if (st) { if (mov->per_stream_grouping) group = st->index; else group = st->codecpar->codec_type; display_matrix = (uint32_t*)av_stream_get_side_data(st, AV_PKT_DATA_DISPLAYMATRIX, &display_matrix_size); if (display_matrix && display_matrix_size < 9 * sizeof(*display_matrix)) display_matrix = NULL; } if (track->flags & MOV_TRACK_ENABLED) flags |= MOV_TKHD_FLAG_ENABLED; if (track->mode == MODE_ISM) version = 1; (version == 1) ? avio_wb32(pb, 104) : avio_wb32(pb, 92); /* size */ ffio_wfourcc(pb, \"tkhd\"); avio_w8(pb, version); avio_wb24(pb, flags); if (version == 1) { avio_wb64(pb, track->time); avio_wb64(pb, track->time); } else { avio_wb32(pb, track->time); /* creation time */ avio_wb32(pb, track->time); /* modification time */ } avio_wb32(pb, track->track_id); /* track-id */ avio_wb32(pb, 0); /* reserved */ if (!track->entry && mov->mode == MODE_ISM) (version == 1) ? avio_wb64(pb, UINT64_C(0xffffffffffffffff)) : avio_wb32(pb, 0xffffffff); else if (!track->entry) (version == 1) ? avio_wb64(pb, 0) : avio_wb32(pb, 0); else (version == 1) ? avio_wb64(pb, duration) : avio_wb32(pb, duration); avio_wb32(pb, 0); /* reserved */ avio_wb32(pb, 0); /* reserved */ avio_wb16(pb, 0); /* layer */ avio_wb16(pb, group); /* alternate group) */ /* Volume, only for audio */ if (track->par->codec_type == AVMEDIA_TYPE_AUDIO) avio_wb16(pb, 0x0100); else avio_wb16(pb, 0); avio_wb16(pb, 0); /* reserved */ /* Matrix structure */ if (st && st->metadata) { AVDictionaryEntry *rot = av_dict_get(st->metadata, \"rotate\", NULL, 0); rotation = (rot && rot->value) ? atoi(rot->value) : 0; } if (display_matrix) { for (i = 0; i < 9; i++) avio_wb32(pb, display_matrix[i]); } else if (rotation == 90) { write_matrix(pb, 0, 1, -1, 0, track->par->height, 0); } else if (rotation == 180) { write_matrix(pb, -1, 0, 0, -1, track->par->width, track->par->height); } else if (rotation == 270) { write_matrix(pb, 0, -1, 1, 0, 0, track->par->width); } else { write_matrix(pb, 1, 0, 0, 1, 0, 0); } /* Track width and height, for visual only */ if (st && (track->par->codec_type == AVMEDIA_TYPE_VIDEO || track->par->codec_type == AVMEDIA_TYPE_SUBTITLE)) { int64_t track_width_1616; if (track->mode == MODE_MOV) { track_width_1616 = track->par->width * 0x10000ULL; } else { track_width_1616 = av_rescale(st->sample_aspect_ratio.num, track->par->width * 0x10000LL, st->sample_aspect_ratio.den); if (!track_width_1616 || track->height != track->par->height || track_width_1616 > UINT32_MAX) track_width_1616 = track->par->width * 0x10000ULL; } if (track_width_1616 > UINT32_MAX) { av_log(mov->fc, AV_LOG_WARNING, \"track width is too large\\n\"); track_width_1616 = 0; } avio_wb32(pb, track_width_1616); if (track->height > 0xFFFF) { av_log(mov->fc, AV_LOG_WARNING, \"track height is too large\\n\"); avio_wb32(pb, 0); } else avio_wb32(pb, track->height * 0x10000U); } else { avio_wb32(pb, 0); avio_wb32(pb, 0); } return 0x5c; }"} {"target": 1, "idx": 4798, "func": "static int vobsub_read_seek(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags) { MpegDemuxContext *vobsub = s->priv_data; /* Rescale requested timestamps based on the first stream (timebase is the * same for all subtitles stream within a .idx/.sub). Rescaling is done just * like in avformat_seek_file(). */ if (stream_index == -1 && s->nb_streams != 1) { int i, ret = 0; AVRational time_base = s->streams[0]->time_base; ts = av_rescale_q(ts, AV_TIME_BASE_Q, time_base); min_ts = av_rescale_rnd(min_ts, time_base.den, time_base.num * (int64_t)AV_TIME_BASE, AV_ROUND_UP | AV_ROUND_PASS_MINMAX); max_ts = av_rescale_rnd(max_ts, time_base.den, time_base.num * (int64_t)AV_TIME_BASE, AV_ROUND_DOWN | AV_ROUND_PASS_MINMAX); for (i = 0; i < s->nb_streams; i++) { int r = ff_subtitles_queue_seek(&vobsub->q[i], s, stream_index, min_ts, ts, max_ts, flags); if (r < 0) ret = r; } return ret; } return ff_subtitles_queue_seek(&vobsub->q[stream_index], s, stream_index, min_ts, ts, max_ts, flags); }"} {"target": 0, "idx": 4800, "func": "static av_cold int aac_encode_init(AVCodecContext *avctx) { AACContext *s = avctx->priv_data; int ret = AVERROR(EINVAL); AACENC_InfoStruct info = { 0 }; CHANNEL_MODE mode; AACENC_ERROR err; int aot = FF_PROFILE_AAC_LOW + 1; int sce = 0, cpe = 0; if ((err = aacEncOpen(&s->handle, 0, avctx->channels)) != AACENC_OK) { av_log(avctx, AV_LOG_ERROR, \"Unable to open the encoder: %s\\n\", aac_get_error(err)); goto error; } if (avctx->profile != FF_PROFILE_UNKNOWN) aot = avctx->profile + 1; if ((err = aacEncoder_SetParam(s->handle, AACENC_AOT, aot)) != AACENC_OK) { av_log(avctx, AV_LOG_ERROR, \"Unable to set the AOT %d: %s\\n\", aot, aac_get_error(err)); goto error; } if (aot == FF_PROFILE_AAC_ELD + 1 && s->eld_sbr) { if ((err = aacEncoder_SetParam(s->handle, AACENC_SBR_MODE, 1)) != AACENC_OK) { av_log(avctx, AV_LOG_ERROR, \"Unable to enable SBR for ELD: %s\\n\", aac_get_error(err)); goto error; } } if ((err = aacEncoder_SetParam(s->handle, AACENC_SAMPLERATE, avctx->sample_rate)) != AACENC_OK) { av_log(avctx, AV_LOG_ERROR, \"Unable to set the sample rate %d: %s\\n\", avctx->sample_rate, aac_get_error(err)); goto error; } switch (avctx->channels) { case 1: mode = MODE_1; sce = 1; cpe = 0; break; case 2: mode = MODE_2; sce = 0; cpe = 1; break; case 3: mode = MODE_1_2; sce = 1; cpe = 1; break; case 4: mode = MODE_1_2_1; sce = 2; cpe = 1; break; case 5: mode = MODE_1_2_2; sce = 1; cpe = 2; break; case 6: mode = MODE_1_2_2_1; sce = 2; cpe = 2; break; default: av_log(avctx, AV_LOG_ERROR, \"Unsupported number of channels %d\\n\", avctx->channels); goto error; } if ((err = aacEncoder_SetParam(s->handle, AACENC_CHANNELMODE, mode)) != AACENC_OK) { av_log(avctx, AV_LOG_ERROR, \"Unable to set channel mode %d: %s\\n\", mode, aac_get_error(err)); goto error; } if ((err = aacEncoder_SetParam(s->handle, AACENC_CHANNELORDER, 1)) != AACENC_OK) { av_log(avctx, AV_LOG_ERROR, \"Unable to set wav channel order %d: %s\\n\", mode, aac_get_error(err)); goto error; } if (avctx->flags & CODEC_FLAG_QSCALE || s->vbr) { int mode = s->vbr ? s->vbr : avctx->global_quality; if (mode < 1 || mode > 5) { av_log(avctx, AV_LOG_WARNING, \"VBR quality %d out of range, should be 1-5\\n\", mode); mode = av_clip(mode, 1, 5); } av_log(avctx, AV_LOG_WARNING, \"Note, the VBR setting is unsupported and only works with \" \"some parameter combinations\\n\"); if ((err = aacEncoder_SetParam(s->handle, AACENC_BITRATEMODE, mode)) != AACENC_OK) { av_log(avctx, AV_LOG_ERROR, \"Unable to set the VBR bitrate mode %d: %s\\n\", mode, aac_get_error(err)); goto error; } } else { if (avctx->bit_rate <= 0) { if (avctx->profile == FF_PROFILE_AAC_HE_V2) { sce = 1; cpe = 0; } avctx->bit_rate = (96*sce + 128*cpe) * avctx->sample_rate / 44; if (avctx->profile == FF_PROFILE_AAC_HE || avctx->profile == FF_PROFILE_AAC_HE_V2 || s->eld_sbr) avctx->bit_rate /= 2; } if ((err = aacEncoder_SetParam(s->handle, AACENC_BITRATE, avctx->bit_rate)) != AACENC_OK) { av_log(avctx, AV_LOG_ERROR, \"Unable to set the bitrate %d: %s\\n\", avctx->bit_rate, aac_get_error(err)); goto error; } } /* Choose bitstream format - if global header is requested, use * raw access units, otherwise use ADTS. */ if ((err = aacEncoder_SetParam(s->handle, AACENC_TRANSMUX, avctx->flags & CODEC_FLAG_GLOBAL_HEADER ? 0 : s->latm ? 10 : 2)) != AACENC_OK) { av_log(avctx, AV_LOG_ERROR, \"Unable to set the transmux format: %s\\n\", aac_get_error(err)); goto error; } if (s->latm && s->header_period) { if ((err = aacEncoder_SetParam(s->handle, AACENC_HEADER_PERIOD, s->header_period)) != AACENC_OK) { av_log(avctx, AV_LOG_ERROR, \"Unable to set header period: %s\\n\", aac_get_error(err)); goto error; } } /* If no signaling mode is chosen, use explicit hierarchical signaling * if using mp4 mode (raw access units, with global header) and * implicit signaling if using ADTS. */ if (s->signaling < 0) s->signaling = avctx->flags & CODEC_FLAG_GLOBAL_HEADER ? 2 : 0; if ((err = aacEncoder_SetParam(s->handle, AACENC_SIGNALING_MODE, s->signaling)) != AACENC_OK) { av_log(avctx, AV_LOG_ERROR, \"Unable to set signaling mode %d: %s\\n\", s->signaling, aac_get_error(err)); goto error; } if ((err = aacEncoder_SetParam(s->handle, AACENC_AFTERBURNER, s->afterburner)) != AACENC_OK) { av_log(avctx, AV_LOG_ERROR, \"Unable to set afterburner to %d: %s\\n\", s->afterburner, aac_get_error(err)); goto error; } if (avctx->cutoff > 0) { if (avctx->cutoff < (avctx->sample_rate + 255) >> 8) { av_log(avctx, AV_LOG_ERROR, \"cutoff valid range is %d-20000\\n\", (avctx->sample_rate + 255) >> 8); goto error; } if ((err = aacEncoder_SetParam(s->handle, AACENC_BANDWIDTH, avctx->cutoff)) != AACENC_OK) { av_log(avctx, AV_LOG_ERROR, \"Unable to set the encoder bandwidth to %d: %s\\n\", avctx->cutoff, aac_get_error(err)); goto error; } } if ((err = aacEncEncode(s->handle, NULL, NULL, NULL, NULL)) != AACENC_OK) { av_log(avctx, AV_LOG_ERROR, \"Unable to initialize the encoder: %s\\n\", aac_get_error(err)); return AVERROR(EINVAL); } if ((err = aacEncInfo(s->handle, &info)) != AACENC_OK) { av_log(avctx, AV_LOG_ERROR, \"Unable to get encoder info: %s\\n\", aac_get_error(err)); goto error; } #if FF_API_OLD_ENCODE_AUDIO avctx->coded_frame = avcodec_alloc_frame(); if (!avctx->coded_frame) { ret = AVERROR(ENOMEM); goto error; } #endif avctx->frame_size = info.frameLength; avctx->delay = info.encoderDelay; ff_af_queue_init(avctx, &s->afq); if (avctx->flags & CODEC_FLAG_GLOBAL_HEADER) { avctx->extradata_size = info.confSize; avctx->extradata = av_mallocz(avctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE); if (!avctx->extradata) { ret = AVERROR(ENOMEM); goto error; } memcpy(avctx->extradata, info.confBuf, info.confSize); } return 0; error: aac_encode_close(avctx); return ret; }"} {"target": 1, "idx": 4801, "func": "static int vorbis_floor1_decode(vorbis_context *vc, vorbis_floor_data *vfu, float *vec) { vorbis_floor1 *vf = &vfu->t1; GetBitContext *gb = &vc->gb; uint16_t range_v[4] = { 256, 128, 86, 64 }; unsigned range = range_v[vf->multiplier - 1]; uint16_t floor1_Y[258]; uint16_t floor1_Y_final[258]; int floor1_flag[258]; unsigned class, cdim, cbits, csub, cval, offset, i, j; int book, adx, ady, dy, off, predicted, err; if (!get_bits1(gb)) // silence return 1; // Read values (or differences) for the floor's points floor1_Y[0] = get_bits(gb, ilog(range - 1)); floor1_Y[1] = get_bits(gb, ilog(range - 1)); av_dlog(NULL, \"floor 0 Y %d floor 1 Y %d \\n\", floor1_Y[0], floor1_Y[1]); offset = 2; for (i = 0; i < vf->partitions; ++i) { class = vf->partition_class[i]; cdim = vf->class_dimensions[class]; cbits = vf->class_subclasses[class]; csub = (1 << cbits) - 1; cval = 0; av_dlog(NULL, \"Cbits %u\\n\", cbits); if (cbits) // this reads all subclasses for this partition's class cval = get_vlc2(gb, vc->codebooks[vf->class_masterbook[class]].vlc.table, vc->codebooks[vf->class_masterbook[class]].nb_bits, 3); for (j = 0; j < cdim; ++j) { book = vf->subclass_books[class][cval & csub]; av_dlog(NULL, \"book %d Cbits %u cval %u bits:%d\\n\", book, cbits, cval, get_bits_count(gb)); cval = cval >> cbits; if (book > -1) { floor1_Y[offset+j] = get_vlc2(gb, vc->codebooks[book].vlc.table, vc->codebooks[book].nb_bits, 3); } else { floor1_Y[offset+j] = 0; } av_dlog(NULL, \" floor(%d) = %d \\n\", vf->list[offset+j].x, floor1_Y[offset+j]); } offset+=cdim; } // Amplitude calculation from the differences floor1_flag[0] = 1; floor1_flag[1] = 1; floor1_Y_final[0] = floor1_Y[0]; floor1_Y_final[1] = floor1_Y[1]; for (i = 2; i < vf->x_list_dim; ++i) { unsigned val, highroom, lowroom, room, high_neigh_offs, low_neigh_offs; low_neigh_offs = vf->list[i].low; high_neigh_offs = vf->list[i].high; dy = floor1_Y_final[high_neigh_offs] - floor1_Y_final[low_neigh_offs]; // render_point begin adx = vf->list[high_neigh_offs].x - vf->list[low_neigh_offs].x; ady = FFABS(dy); err = ady * (vf->list[i].x - vf->list[low_neigh_offs].x); off = err / adx; if (dy < 0) { predicted = floor1_Y_final[low_neigh_offs] - off; } else { predicted = floor1_Y_final[low_neigh_offs] + off; } // render_point end val = floor1_Y[i]; highroom = range-predicted; lowroom = predicted; if (highroom < lowroom) { room = highroom * 2; } else { room = lowroom * 2; // SPEC mispelling } if (val) { floor1_flag[low_neigh_offs] = 1; floor1_flag[high_neigh_offs] = 1; floor1_flag[i] = 1; if (val >= room) { if (highroom > lowroom) { floor1_Y_final[i] = val - lowroom + predicted; } else { floor1_Y_final[i] = predicted - val + highroom - 1; } } else { if (val & 1) { floor1_Y_final[i] = predicted - (val + 1) / 2; } else { floor1_Y_final[i] = predicted + val / 2; } } } else { floor1_flag[i] = 0; floor1_Y_final[i] = predicted; } av_dlog(NULL, \" Decoded floor(%d) = %u / val %u\\n\", vf->list[i].x, floor1_Y_final[i], val); } // Curve synth - connect the calculated dots and convert from dB scale FIXME optimize ? ff_vorbis_floor1_render_list(vf->list, vf->x_list_dim, floor1_Y_final, floor1_flag, vf->multiplier, vec, vf->list[1].x); av_dlog(NULL, \" Floor decoded\\n\"); return 0; }"} {"target": 1, "idx": 4815, "func": "static void colo_process_checkpoint(MigrationState *s) { QIOChannelBuffer *bioc; QEMUFile *fb = NULL; int64_t current_time, checkpoint_time = qemu_clock_get_ms(QEMU_CLOCK_HOST); Error *local_err = NULL; int ret; failover_init_state(); s->rp_state.from_dst_file = qemu_file_get_return_path(s->to_dst_file); if (!s->rp_state.from_dst_file) { error_report(\"Open QEMUFile from_dst_file failed\"); /* * Wait for Secondary finish loading VM states and enter COLO * restore. */ colo_receive_check_message(s->rp_state.from_dst_file, COLO_MESSAGE_CHECKPOINT_READY, &local_err); if (local_err) { bioc = qio_channel_buffer_new(COLO_BUFFER_BASE_SIZE); fb = qemu_fopen_channel_output(QIO_CHANNEL(bioc)); object_unref(OBJECT(bioc)); qemu_mutex_lock_iothread(); vm_start(); qemu_mutex_unlock_iothread(); trace_colo_vm_state_change(\"stop\", \"run\"); while (s->state == MIGRATION_STATUS_COLO) { current_time = qemu_clock_get_ms(QEMU_CLOCK_HOST); if (current_time - checkpoint_time < s->parameters.x_checkpoint_delay) { int64_t delay_ms; delay_ms = s->parameters.x_checkpoint_delay - (current_time - checkpoint_time); g_usleep(delay_ms * 1000); ret = colo_do_checkpoint_transaction(s, bioc, fb); if (ret < 0) { checkpoint_time = qemu_clock_get_ms(QEMU_CLOCK_HOST); out: /* Throw the unreported error message after exited from loop */ if (local_err) { error_report_err(local_err); if (fb) { qemu_fclose(fb); if (s->rp_state.from_dst_file) { qemu_fclose(s->rp_state.from_dst_file);"} {"target": 0, "idx": 4835, "func": "static void pc_init1(QEMUMachineInitArgs *args, int pci_enabled, int kvmclock_enabled) { MemoryRegion *system_memory = get_system_memory(); MemoryRegion *system_io = get_system_io(); int i; ram_addr_t below_4g_mem_size, above_4g_mem_size; PCIBus *pci_bus; ISABus *isa_bus; PCII440FXState *i440fx_state; int piix3_devfn = -1; qemu_irq *cpu_irq; qemu_irq *gsi; qemu_irq *i8259; qemu_irq *smi_irq; GSIState *gsi_state; DriveInfo *hd[MAX_IDE_BUS * MAX_IDE_DEVS]; BusState *idebus[MAX_IDE_BUS]; ISADevice *rtc_state; ISADevice *floppy; MemoryRegion *ram_memory; MemoryRegion *pci_memory; MemoryRegion *rom_memory; DeviceState *icc_bridge; FWCfgState *fw_cfg = NULL; PcGuestInfo *guest_info; if (xen_enabled() && xen_hvm_init(&ram_memory) != 0) { fprintf(stderr, \"xen hardware virtual machine initialisation failed\\n\"); exit(1); } icc_bridge = qdev_create(NULL, TYPE_ICC_BRIDGE); object_property_add_child(qdev_get_machine(), \"icc-bridge\", OBJECT(icc_bridge), NULL); pc_cpus_init(args->cpu_model, icc_bridge); if (kvm_enabled() && kvmclock_enabled) { kvmclock_create(); } /* Check whether RAM fits below 4G (leaving 1/2 GByte for IO memory). * If it doesn't, we need to split it in chunks below and above 4G. * In any case, try to make sure that guest addresses aligned at * 1G boundaries get mapped to host addresses aligned at 1G boundaries. * For old machine types, use whatever split we used historically to avoid * breaking migration. */ if (args->ram_size >= 0xe0000000) { ram_addr_t lowmem = gigabyte_align ? 0xc0000000 : 0xe0000000; above_4g_mem_size = args->ram_size - lowmem; below_4g_mem_size = lowmem; } else { above_4g_mem_size = 0; below_4g_mem_size = args->ram_size; } if (pci_enabled) { pci_memory = g_new(MemoryRegion, 1); memory_region_init(pci_memory, NULL, \"pci\", UINT64_MAX); rom_memory = pci_memory; } else { pci_memory = NULL; rom_memory = system_memory; } guest_info = pc_guest_info_init(below_4g_mem_size, above_4g_mem_size); guest_info->has_acpi_build = has_acpi_build; guest_info->has_pci_info = has_pci_info; guest_info->isapc_ram_fw = !pci_enabled; if (smbios_defaults) { /* These values are guest ABI, do not change */ smbios_set_defaults(\"QEMU\", \"Standard PC (i440FX + PIIX, 1996)\", args->machine->name); } /* allocate ram and load rom/bios */ if (!xen_enabled()) { fw_cfg = pc_memory_init(system_memory, args->kernel_filename, args->kernel_cmdline, args->initrd_filename, below_4g_mem_size, above_4g_mem_size, rom_memory, &ram_memory, guest_info); } gsi_state = g_malloc0(sizeof(*gsi_state)); if (kvm_irqchip_in_kernel()) { kvm_pc_setup_irq_routing(pci_enabled); gsi = qemu_allocate_irqs(kvm_pc_gsi_handler, gsi_state, GSI_NUM_PINS); } else { gsi = qemu_allocate_irqs(gsi_handler, gsi_state, GSI_NUM_PINS); } if (pci_enabled) { pci_bus = i440fx_init(&i440fx_state, &piix3_devfn, &isa_bus, gsi, system_memory, system_io, args->ram_size, below_4g_mem_size, above_4g_mem_size, pci_memory, ram_memory); } else { pci_bus = NULL; i440fx_state = NULL; isa_bus = isa_bus_new(NULL, system_io); no_hpet = 1; } isa_bus_irqs(isa_bus, gsi); if (kvm_irqchip_in_kernel()) { i8259 = kvm_i8259_init(isa_bus); } else if (xen_enabled()) { i8259 = xen_interrupt_controller_init(); } else { cpu_irq = pc_allocate_cpu_irq(); i8259 = i8259_init(isa_bus, cpu_irq[0]); } for (i = 0; i < ISA_NUM_IRQS; i++) { gsi_state->i8259_irq[i] = i8259[i]; } if (pci_enabled) { ioapic_init_gsi(gsi_state, \"i440fx\"); } qdev_init_nofail(icc_bridge); pc_register_ferr_irq(gsi[13]); pc_vga_init(isa_bus, pci_enabled ? pci_bus : NULL); /* init basic PC hardware */ pc_basic_device_init(isa_bus, gsi, &rtc_state, &floppy, xen_enabled(), 0x4); pc_nic_init(isa_bus, pci_bus); ide_drive_get(hd, MAX_IDE_BUS); if (pci_enabled) { PCIDevice *dev; if (xen_enabled()) { dev = pci_piix3_xen_ide_init(pci_bus, hd, piix3_devfn + 1); } else { dev = pci_piix3_ide_init(pci_bus, hd, piix3_devfn + 1); } idebus[0] = qdev_get_child_bus(&dev->qdev, \"ide.0\"); idebus[1] = qdev_get_child_bus(&dev->qdev, \"ide.1\"); } else { for(i = 0; i < MAX_IDE_BUS; i++) { ISADevice *dev; char busname[] = \"ide.0\"; dev = isa_ide_init(isa_bus, ide_iobase[i], ide_iobase2[i], ide_irq[i], hd[MAX_IDE_DEVS * i], hd[MAX_IDE_DEVS * i + 1]); /* * The ide bus name is ide.0 for the first bus and ide.1 for the * second one. */ busname[4] = '0' + i; idebus[i] = qdev_get_child_bus(DEVICE(dev), busname); } } pc_cmos_init(below_4g_mem_size, above_4g_mem_size, args->boot_order, floppy, idebus[0], idebus[1], rtc_state); if (pci_enabled && usb_enabled(false)) { pci_create_simple(pci_bus, piix3_devfn + 2, \"piix3-usb-uhci\"); } if (pci_enabled && acpi_enabled) { I2CBus *smbus; smi_irq = qemu_allocate_irqs(pc_acpi_smi_interrupt, first_cpu, 1); /* TODO: Populate SPD eeprom data. */ smbus = piix4_pm_init(pci_bus, piix3_devfn + 3, 0xb100, gsi[9], *smi_irq, kvm_enabled(), fw_cfg); smbus_eeprom_init(smbus, 8, NULL, 0); } if (pci_enabled) { pc_pci_device_init(pci_bus); } }"} {"target": 0, "idx": 4842, "func": "uint32_t wm8750_adc_dat(void *opaque) { WM8750State *s = (WM8750State *) opaque; uint32_t *data; if (s->idx_in >= sizeof(s->data_in)) wm8750_in_load(s); data = (uint32_t *) &s->data_in[s->idx_in]; s->req_in -= 4; s->idx_in += 4; return *data; }"} {"target": 0, "idx": 4849, "func": "static QmpInputVisitor *to_qiv(Visitor *v) { return container_of(v, QmpInputVisitor, visitor); }"} {"target": 0, "idx": 4873, "func": "static int v4l2_read_packet(AVFormatContext *s1, AVPacket *pkt) { #if FF_API_CODED_FRAME FF_DISABLE_DEPRECATION_WARNINGS struct video_data *s = s1->priv_data; AVFrame *frame = s1->streams[0]->codec->coded_frame; FF_ENABLE_DEPRECATION_WARNINGS #endif int res; av_init_packet(pkt); if ((res = mmap_read_frame(s1, pkt)) < 0) { return res; } #if FF_API_CODED_FRAME FF_DISABLE_DEPRECATION_WARNINGS if (frame && s->interlaced) { frame->interlaced_frame = 1; frame->top_field_first = s->top_field_first; } FF_ENABLE_DEPRECATION_WARNINGS #endif return pkt->size; }"} {"target": 1, "idx": 4874, "func": "static void bonito_pcihost_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass); k->init = bonito_pcihost_initfn; dc->no_user = 1; }"} {"target": 0, "idx": 4879, "func": "static int mp3_parse_vbr_tags(AVFormatContext *s, AVStream *st, int64_t base) { uint32_t v, spf; MPADecodeHeader c; int vbrtag_size = 0; MP3DecContext *mp3 = s->priv_data; ffio_init_checksum(s->pb, ff_crcA001_update, 0); v = avio_rb32(s->pb); if(ff_mpa_check_header(v) < 0) return -1; if (avpriv_mpegaudio_decode_header(&c, v) == 0) vbrtag_size = c.frame_size; if(c.layer != 3) return -1; spf = c.lsf ? 576 : 1152; /* Samples per frame, layer 3 */ mp3->frames = 0; mp3->size = 0; mp3_parse_info_tag(s, st, &c, spf); mp3_parse_vbri_tag(s, st, base); if (!mp3->frames && !mp3->size) return -1; /* Skip the vbr tag frame */ avio_seek(s->pb, base + vbrtag_size, SEEK_SET); if (mp3->frames) st->duration = av_rescale_q(mp3->frames, (AVRational){spf, c.sample_rate}, st->time_base); if (mp3->size && mp3->frames && !mp3->is_cbr) st->codec->bit_rate = av_rescale(mp3->size, 8 * c.sample_rate, mp3->frames * (int64_t)spf); return 0; }"} {"target": 0, "idx": 4892, "func": "void memory_region_iommu_replay_all(MemoryRegion *mr) { IOMMUNotifier *notifier; IOMMU_NOTIFIER_FOREACH(notifier, mr) { memory_region_iommu_replay(mr, notifier, false); } }"} {"target": 0, "idx": 4893, "func": "static int usb_uhci_piix3_initfn(PCIDevice *dev) { UHCIState *s = DO_UPCAST(UHCIState, dev, dev); uint8_t *pci_conf = s->dev.config; pci_config_set_vendor_id(pci_conf, PCI_VENDOR_ID_INTEL); pci_config_set_device_id(pci_conf, PCI_DEVICE_ID_INTEL_82371SB_2); return usb_uhci_common_initfn(s); }"} {"target": 0, "idx": 4896, "func": "static void mpegts_write_pmt(AVFormatContext *s, MpegTSService *service) { MpegTSWrite *ts = s->priv_data; uint8_t data[1012], *q, *desc_length_ptr, *program_info_length_ptr; int val, stream_type, i; q = data; put16(&q, 0xe000 | service->pcr_pid); program_info_length_ptr = q; q += 2; /* patched after */ /* put program info here */ val = 0xf000 | (q - program_info_length_ptr - 2); program_info_length_ptr[0] = val >> 8; program_info_length_ptr[1] = val; for(i = 0; i < s->nb_streams; i++) { AVStream *st = s->streams[i]; MpegTSWriteStream *ts_st = st->priv_data; AVDictionaryEntry *lang = av_dict_get(st->metadata, \"language\", NULL,0); switch(st->codec->codec_id) { case AV_CODEC_ID_MPEG1VIDEO: case AV_CODEC_ID_MPEG2VIDEO: stream_type = STREAM_TYPE_VIDEO_MPEG2; break; case AV_CODEC_ID_MPEG4: stream_type = STREAM_TYPE_VIDEO_MPEG4; break; case AV_CODEC_ID_H264: stream_type = STREAM_TYPE_VIDEO_H264; break; case AV_CODEC_ID_CAVS: stream_type = STREAM_TYPE_VIDEO_CAVS; break; case AV_CODEC_ID_DIRAC: stream_type = STREAM_TYPE_VIDEO_DIRAC; break; case AV_CODEC_ID_MP2: case AV_CODEC_ID_MP3: stream_type = STREAM_TYPE_AUDIO_MPEG1; break; case AV_CODEC_ID_AAC: stream_type = (ts->flags & MPEGTS_FLAG_AAC_LATM) ? STREAM_TYPE_AUDIO_AAC_LATM : STREAM_TYPE_AUDIO_AAC; break; case AV_CODEC_ID_AAC_LATM: stream_type = STREAM_TYPE_AUDIO_AAC_LATM; break; case AV_CODEC_ID_AC3: stream_type = STREAM_TYPE_AUDIO_AC3; break; default: stream_type = STREAM_TYPE_PRIVATE_DATA; break; } *q++ = stream_type; put16(&q, 0xe000 | ts_st->pid); desc_length_ptr = q; q += 2; /* patched after */ /* write optional descriptors here */ switch(st->codec->codec_type) { case AVMEDIA_TYPE_AUDIO: if(st->codec->codec_id==AV_CODEC_ID_EAC3){ *q++=0x7a; // EAC3 descriptor see A038 DVB SI *q++=1; // 1 byte, all flags sets to 0 *q++=0; // omit all fields... } if(st->codec->codec_id==AV_CODEC_ID_S302M){ *q++ = 0x05; /* MPEG-2 registration descriptor*/ *q++ = 4; *q++ = 'B'; *q++ = 'S'; *q++ = 'S'; *q++ = 'D'; } if (lang) { char *p; char *next = lang->value; uint8_t *len_ptr; *q++ = 0x0a; /* ISO 639 language descriptor */ len_ptr = q++; *len_ptr = 0; for (p = lang->value; next && *len_ptr < 255 / 4 * 4; p = next + 1) { next = strchr(p, ','); if (strlen(p) != 3 && (!next || next != p + 3)) continue; /* not a 3-letter code */ *q++ = *p++; *q++ = *p++; *q++ = *p++; if (st->disposition & AV_DISPOSITION_CLEAN_EFFECTS) *q++ = 0x01; else if (st->disposition & AV_DISPOSITION_HEARING_IMPAIRED) *q++ = 0x02; else if (st->disposition & AV_DISPOSITION_VISUAL_IMPAIRED) *q++ = 0x03; else *q++ = 0; /* undefined type */ *len_ptr += 4; } if (*len_ptr == 0) q -= 2; /* no language codes were written */ } break; case AVMEDIA_TYPE_SUBTITLE: { const char default_language[] = \"und\"; const char *language = lang && strlen(lang->value) >= 3 ? lang->value : default_language; if (st->codec->codec_id == AV_CODEC_ID_DVB_SUBTITLE) { uint8_t *len_ptr; int extradata_copied = 0; *q++ = 0x59; /* subtitling_descriptor */ len_ptr = q++; while (strlen(language) >= 3 && (sizeof(data) - (q - data)) >= 8) { /* 8 bytes per DVB subtitle substream data */ *q++ = *language++; *q++ = *language++; *q++ = *language++; /* Skip comma */ if (*language != '\\0') language++; if (st->codec->extradata_size - extradata_copied >= 5) { *q++ = st->codec->extradata[extradata_copied + 4]; /* subtitling_type */ memcpy(q, st->codec->extradata + extradata_copied, 4); /* composition_page_id and ancillary_page_id */ extradata_copied += 5; q += 4; } else { /* subtitling_type: * 0x10 - normal with no monitor aspect ratio criticality * 0x20 - for the hard of hearing with no monitor aspect ratio criticality */ *q++ = (st->disposition & AV_DISPOSITION_HEARING_IMPAIRED) ? 0x20 : 0x10; if ((st->codec->extradata_size == 4) && (extradata_copied == 0)) { /* support of old 4-byte extradata format */ memcpy(q, st->codec->extradata, 4); /* composition_page_id and ancillary_page_id */ extradata_copied += 4; q += 4; } else { put16(&q, 1); /* composition_page_id */ put16(&q, 1); /* ancillary_page_id */ } } } *len_ptr = q - len_ptr - 1; } else if (st->codec->codec_id == AV_CODEC_ID_DVB_TELETEXT) { uint8_t *len_ptr = NULL; int extradata_copied = 0; /* The descriptor tag. teletext_descriptor */ *q++ = 0x56; len_ptr = q++; while (strlen(language) >= 3) { *q++ = *language++; *q++ = *language++; *q++ = *language++; /* Skip comma */ if (*language != '\\0') language++; if (st->codec->extradata_size - 1 > extradata_copied) { memcpy(q, st->codec->extradata + extradata_copied, 2); extradata_copied += 2; q += 2; } else { /* The Teletext descriptor: * teletext_type: This 5-bit field indicates the type of Teletext page indicated. (0x01 Initial Teletext page) * teletext_magazine_number: This is a 3-bit field which identifies the magazine number. * teletext_page_number: This is an 8-bit field giving two 4-bit hex digits identifying the page number. */ *q++ = 0x08; *q++ = 0x00; } } *len_ptr = q - len_ptr - 1; } } break; case AVMEDIA_TYPE_VIDEO: if (stream_type == STREAM_TYPE_VIDEO_DIRAC) { *q++ = 0x05; /*MPEG-2 registration descriptor*/ *q++ = 4; *q++ = 'd'; *q++ = 'r'; *q++ = 'a'; *q++ = 'c'; } break; case AVMEDIA_TYPE_DATA: if (st->codec->codec_id == AV_CODEC_ID_SMPTE_KLV) { *q++ = 0x05; /* MPEG-2 registration descriptor */ *q++ = 4; *q++ = 'K'; *q++ = 'L'; *q++ = 'V'; *q++ = 'A'; } break; } val = 0xf000 | (q - desc_length_ptr - 2); desc_length_ptr[0] = val >> 8; desc_length_ptr[1] = val; } mpegts_write_section1(&service->pmt, PMT_TID, service->sid, ts->tables_version, 0, 0, data, q - data); }"} {"target": 0, "idx": 4899, "func": "static unsigned int dec_addi_r(DisasContext *dc) { TCGv t0; DIS(fprintf (logfile, \"addi.%c $r%u, $r%u\\n\", memsize_char(memsize_zz(dc)), dc->op2, dc->op1)); cris_cc_mask(dc, 0); t0 = tcg_temp_new(TCG_TYPE_TL); tcg_gen_shl_tl(t0, cpu_R[dc->op2], tcg_const_tl(dc->zzsize)); tcg_gen_add_tl(cpu_R[dc->op1], cpu_R[dc->op1], t0); tcg_temp_free(t0); return 2; }"} {"target": 0, "idx": 4924, "func": "static void gen_trap (DisasContext *ctx, uint32_t opc, int rs, int rt, int16_t imm) { int cond; TCGv t0 = tcg_temp_new(); TCGv t1 = tcg_temp_new(); cond = 0; /* Load needed operands */ switch (opc) { case OPC_TEQ: case OPC_TGE: case OPC_TGEU: case OPC_TLT: case OPC_TLTU: case OPC_TNE: /* Compare two registers */ if (rs != rt) { gen_load_gpr(t0, rs); gen_load_gpr(t1, rt); cond = 1; } break; case OPC_TEQI: case OPC_TGEI: case OPC_TGEIU: case OPC_TLTI: case OPC_TLTIU: case OPC_TNEI: /* Compare register to immediate */ if (rs != 0 || imm != 0) { gen_load_gpr(t0, rs); tcg_gen_movi_tl(t1, (int32_t)imm); cond = 1; } break; } if (cond == 0) { switch (opc) { case OPC_TEQ: /* rs == rs */ case OPC_TEQI: /* r0 == 0 */ case OPC_TGE: /* rs >= rs */ case OPC_TGEI: /* r0 >= 0 */ case OPC_TGEU: /* rs >= rs unsigned */ case OPC_TGEIU: /* r0 >= 0 unsigned */ /* Always trap */ generate_exception(ctx, EXCP_TRAP); break; case OPC_TLT: /* rs < rs */ case OPC_TLTI: /* r0 < 0 */ case OPC_TLTU: /* rs < rs unsigned */ case OPC_TLTIU: /* r0 < 0 unsigned */ case OPC_TNE: /* rs != rs */ case OPC_TNEI: /* r0 != 0 */ /* Never trap: treat as NOP. */ break; } } else { int l1 = gen_new_label(); switch (opc) { case OPC_TEQ: case OPC_TEQI: tcg_gen_brcond_tl(TCG_COND_NE, t0, t1, l1); break; case OPC_TGE: case OPC_TGEI: tcg_gen_brcond_tl(TCG_COND_LT, t0, t1, l1); break; case OPC_TGEU: case OPC_TGEIU: tcg_gen_brcond_tl(TCG_COND_LTU, t0, t1, l1); break; case OPC_TLT: case OPC_TLTI: tcg_gen_brcond_tl(TCG_COND_GE, t0, t1, l1); break; case OPC_TLTU: case OPC_TLTIU: tcg_gen_brcond_tl(TCG_COND_GEU, t0, t1, l1); break; case OPC_TNE: case OPC_TNEI: tcg_gen_brcond_tl(TCG_COND_EQ, t0, t1, l1); break; } generate_exception(ctx, EXCP_TRAP); gen_set_label(l1); } tcg_temp_free(t0); tcg_temp_free(t1); }"} {"target": 0, "idx": 4961, "func": "static uint64_t pxa2xx_gpio_read(void *opaque, hwaddr offset, unsigned size) { PXA2xxGPIOInfo *s = (PXA2xxGPIOInfo *) opaque; uint32_t ret; int bank; if (offset >= 0x200) return 0; bank = pxa2xx_gpio_regs[offset].bank; switch (pxa2xx_gpio_regs[offset].reg) { case GPDR: /* GPIO Pin-Direction registers */ return s->dir[bank]; case GPSR: /* GPIO Pin-Output Set registers */ qemu_log_mask(LOG_GUEST_ERROR, \"pxa2xx GPIO: read from write only register GPSR\\n\"); return 0; case GPCR: /* GPIO Pin-Output Clear registers */ qemu_log_mask(LOG_GUEST_ERROR, \"pxa2xx GPIO: read from write only register GPCR\\n\"); return 0; case GRER: /* GPIO Rising-Edge Detect Enable registers */ return s->rising[bank]; case GFER: /* GPIO Falling-Edge Detect Enable registers */ return s->falling[bank]; case GAFR_L: /* GPIO Alternate Function registers */ return s->gafr[bank * 2]; case GAFR_U: /* GPIO Alternate Function registers */ return s->gafr[bank * 2 + 1]; case GPLR: /* GPIO Pin-Level registers */ ret = (s->olevel[bank] & s->dir[bank]) | (s->ilevel[bank] & ~s->dir[bank]); qemu_irq_raise(s->read_notify); return ret; case GEDR: /* GPIO Edge Detect Status registers */ return s->status[bank]; default: hw_error(\"%s: Bad offset \" REG_FMT \"\\n\", __FUNCTION__, offset); } return 0; }"} {"target": 1, "idx": 4995, "func": "static int ipvideo_decode_block_opcode_0xD(IpvideoContext *s, AVFrame *frame) { int y; unsigned char P[2]; /* 4-color block encoding: each 4x4 block is a different color */ for (y = 0; y < 8; y++) { if (!(y & 3)) { P[0] = bytestream2_get_byte(&s->stream_ptr); P[1] = bytestream2_get_byte(&s->stream_ptr); memset(s->pixel_ptr, P[0], 4); memset(s->pixel_ptr + 4, P[1], 4); s->pixel_ptr += s->stride; /* report success */ return 0;"} {"target": 0, "idx": 5035, "func": "static int64_t migration_set_rate_limit(void *opaque, int64_t new_rate) { MigrationState *s = opaque; if (qemu_file_get_error(s->file)) { goto out; } s->xfer_limit = new_rate; out: return s->xfer_limit; }"} {"target": 0, "idx": 5044, "func": "static void reverse_matrixing(float *su1, float *su2, int *prev_code, int *curr_code) { int i, nsample, band; float mc1_l, mc1_r, mc2_l, mc2_r; for (i = 0, band = 0; band < 4 * 256; band += 256, i++) { int s1 = prev_code[i]; int s2 = curr_code[i]; nsample = 0; if (s1 != s2) { /* Selector value changed, interpolation needed. */ mc1_l = matrix_coeffs[s1 * 2 ]; mc1_r = matrix_coeffs[s1 * 2 + 1]; mc2_l = matrix_coeffs[s2 * 2 ]; mc2_r = matrix_coeffs[s2 * 2 + 1]; /* Interpolation is done over the first eight samples. */ for (; nsample < 8; nsample++) { float c1 = su1[band + nsample]; float c2 = su2[band + nsample]; c2 = c1 * INTERPOLATE(mc1_l, mc2_l, nsample) + c2 * INTERPOLATE(mc1_r, mc2_r, nsample); su1[band + nsample] = c2; su2[band + nsample] = c1 * 2.0 - c2; } } /* Apply the matrix without interpolation. */ switch (s2) { case 0: /* M/S decoding */ for (; nsample < 256; nsample++) { float c1 = su1[band + nsample]; float c2 = su2[band + nsample]; su1[band + nsample] = c2 * 2.0; su2[band + nsample] = (c1 - c2) * 2.0; } break; case 1: for (; nsample < 256; nsample++) { float c1 = su1[band + nsample]; float c2 = su2[band + nsample]; su1[band + nsample] = (c1 + c2) * 2.0; su2[band + nsample] = c2 * -2.0; } break; case 2: case 3: for (; nsample < 256; nsample++) { float c1 = su1[band + nsample]; float c2 = su2[band + nsample]; su1[band + nsample] = c1 + c2; su2[band + nsample] = c1 - c2; } break; default: assert(0); } } }"} {"target": 0, "idx": 5054, "func": "static void scsi_disk_reset(DeviceState *dev) { SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev.qdev, dev); uint64_t nb_sectors; scsi_device_purge_requests(&s->qdev, SENSE_CODE(RESET)); bdrv_get_geometry(s->qdev.conf.bs, &nb_sectors); nb_sectors /= s->qdev.blocksize / 512; if (nb_sectors) { nb_sectors--; } s->qdev.max_lba = nb_sectors; /* reset tray statuses */ s->tray_locked = 0; s->tray_open = 0; }"} {"target": 1, "idx": 5060, "func": "static int read_part_of_packet(AVFormatContext *s, int64_t *pts, int *len, int *strid, int read_packet) { AVIOContext *pb = s->pb; PVAContext *pvactx = s->priv_data; int syncword, streamid, reserved, flags, length, pts_flag; int64_t pva_pts = AV_NOPTS_VALUE, startpos; int ret; recover: startpos = avio_tell(pb); syncword = avio_rb16(pb); streamid = avio_r8(pb); avio_r8(pb); /* counter not used */ reserved = avio_r8(pb); flags = avio_r8(pb); length = avio_rb16(pb); pts_flag = flags & 0x10; if (syncword != PVA_MAGIC) { pva_log(s, AV_LOG_ERROR, \"invalid syncword\\n\"); return AVERROR(EIO); } if (streamid != PVA_VIDEO_PAYLOAD && streamid != PVA_AUDIO_PAYLOAD) { pva_log(s, AV_LOG_ERROR, \"invalid streamid\\n\"); return AVERROR(EIO); } if (reserved != 0x55) { pva_log(s, AV_LOG_WARNING, \"expected reserved byte to be 0x55\\n\"); } if (length > PVA_MAX_PAYLOAD_LENGTH) { pva_log(s, AV_LOG_ERROR, \"invalid payload length %u\\n\", length); return AVERROR(EIO); } if (streamid == PVA_VIDEO_PAYLOAD && pts_flag) { pva_pts = avio_rb32(pb); length -= 4; } else if (streamid == PVA_AUDIO_PAYLOAD) { /* PVA Audio Packets either start with a signaled PES packet or * are a continuation of the previous PES packet. New PES packets * always start at the beginning of a PVA Packet, never somewhere in * the middle. */ if (!pvactx->continue_pes) { int pes_signal, pes_header_data_length, pes_packet_length, pes_flags; unsigned char pes_header_data[256]; pes_signal = avio_rb24(pb); avio_r8(pb); pes_packet_length = avio_rb16(pb); pes_flags = avio_rb16(pb); pes_header_data_length = avio_r8(pb); if (pes_signal != 1 || pes_header_data_length == 0) { pva_log(s, AV_LOG_WARNING, \"expected non empty signaled PES packet, \" \"trying to recover\\n\"); avio_skip(pb, length - 9); if (!read_packet) return AVERROR(EIO); goto recover; } ret = avio_read(pb, pes_header_data, pes_header_data_length); if (ret != pes_header_data_length) return ret < 0 ? ret : AVERROR_INVALIDDATA; length -= 9 + pes_header_data_length; pes_packet_length -= 3 + pes_header_data_length; pvactx->continue_pes = pes_packet_length; if (pes_flags & 0x80 && (pes_header_data[0] & 0xf0) == 0x20) pva_pts = ff_parse_pes_pts(pes_header_data); } pvactx->continue_pes -= length; if (pvactx->continue_pes < 0) { pva_log(s, AV_LOG_WARNING, \"audio data corruption\\n\"); pvactx->continue_pes = 0; } } if (pva_pts != AV_NOPTS_VALUE) av_add_index_entry(s->streams[streamid-1], startpos, pva_pts, 0, 0, AVINDEX_KEYFRAME); *pts = pva_pts; *len = length; *strid = streamid; return 0; }"} {"target": 1, "idx": 5071, "func": "static void pred8x8_top_dc_rv40_c(uint8_t *src, int stride){ int i; int dc0; dc0=0; for(i=0;i<8; i++) dc0+= src[i-stride]; dc0= 0x01010101*((dc0 + 4)>>3); for(i=0; i<8; i++){ ((uint32_t*)(src+i*stride))[0]= ((uint32_t*)(src+i*stride))[1]= dc0; } }"} {"target": 0, "idx": 5093, "func": "static int virtio_ccw_set_vqs(SubchDev *sch, uint64_t addr, uint32_t align, uint16_t index, uint16_t num) { VirtIODevice *vdev = virtio_ccw_get_vdev(sch); if (index >= VIRTIO_PCI_QUEUE_MAX) { return -EINVAL; } /* Current code in virtio.c relies on 4K alignment. */ if (addr && (align != 4096)) { return -EINVAL; } if (!vdev) { return -EINVAL; } virtio_queue_set_addr(vdev, index, addr); if (!addr) { virtio_queue_set_vector(vdev, index, 0); } else { /* Fail if we don't have a big enough queue. */ /* TODO: Add interface to handle vring.num changing */ if (virtio_queue_get_num(vdev, index) > num) { return -EINVAL; } virtio_queue_set_vector(vdev, index, index); } /* tell notify handler in case of config change */ vdev->config_vector = VIRTIO_PCI_QUEUE_MAX; return 0; }"} {"target": 0, "idx": 5099, "func": "static void rtl8139_receive(void *opaque, const uint8_t *buf, size_t size) { rtl8139_do_receive(opaque, buf, size, 1); }"} {"target": 0, "idx": 5102, "func": "static int local_chown(FsContext *fs_ctx, V9fsPath *fs_path, FsCred *credp) { char buffer[PATH_MAX]; char *path = fs_path->data; if ((credp->fc_uid == -1 && credp->fc_gid == -1) || (fs_ctx->export_flags & V9FS_SM_PASSTHROUGH) || (fs_ctx->export_flags & V9FS_SM_NONE)) { return lchown(rpath(fs_ctx, path, buffer), credp->fc_uid, credp->fc_gid); } else if (fs_ctx->export_flags & V9FS_SM_MAPPED) { return local_set_xattr(rpath(fs_ctx, path, buffer), credp); } else if (fs_ctx->export_flags & V9FS_SM_MAPPED_FILE) { return local_set_mapped_file_attr(fs_ctx, path, credp); } return -1; }"} {"target": 0, "idx": 5104, "func": "static void qdict_crumple_test_recursive(void) { QDict *src, *dst, *rule, *vnc, *acl, *listen; QObject *child, *res; QList *rules; src = qdict_new(); qdict_put(src, \"vnc.listen.addr\", qstring_from_str(\"127.0.0.1\")); qdict_put(src, \"vnc.listen.port\", qstring_from_str(\"5901\")); qdict_put(src, \"vnc.acl.rules.0.match\", qstring_from_str(\"fred\")); qdict_put(src, \"vnc.acl.rules.0.policy\", qstring_from_str(\"allow\")); qdict_put(src, \"vnc.acl.rules.1.match\", qstring_from_str(\"bob\")); qdict_put(src, \"vnc.acl.rules.1.policy\", qstring_from_str(\"deny\")); qdict_put(src, \"vnc.acl.default\", qstring_from_str(\"deny\")); qdict_put(src, \"vnc.acl..name\", qstring_from_str(\"acl0\")); qdict_put(src, \"vnc.acl.rule..name\", qstring_from_str(\"acl0\")); res = qdict_crumple(src, &error_abort); g_assert_cmpint(qobject_type(res), ==, QTYPE_QDICT); dst = qobject_to_qdict(res); g_assert_cmpint(qdict_size(dst), ==, 1); child = qdict_get(dst, \"vnc\"); g_assert_cmpint(qobject_type(child), ==, QTYPE_QDICT); vnc = qobject_to_qdict(child); child = qdict_get(vnc, \"listen\"); g_assert_cmpint(qobject_type(child), ==, QTYPE_QDICT); listen = qobject_to_qdict(child); g_assert_cmpstr(\"127.0.0.1\", ==, qdict_get_str(listen, \"addr\")); g_assert_cmpstr(\"5901\", ==, qdict_get_str(listen, \"port\")); child = qdict_get(vnc, \"acl\"); g_assert_cmpint(qobject_type(child), ==, QTYPE_QDICT); acl = qobject_to_qdict(child); child = qdict_get(acl, \"rules\"); g_assert_cmpint(qobject_type(child), ==, QTYPE_QLIST); rules = qobject_to_qlist(child); g_assert_cmpint(qlist_size(rules), ==, 2); rule = qobject_to_qdict(qlist_pop(rules)); g_assert_cmpint(qdict_size(rule), ==, 2); g_assert_cmpstr(\"fred\", ==, qdict_get_str(rule, \"match\")); g_assert_cmpstr(\"allow\", ==, qdict_get_str(rule, \"policy\")); QDECREF(rule); rule = qobject_to_qdict(qlist_pop(rules)); g_assert_cmpint(qdict_size(rule), ==, 2); g_assert_cmpstr(\"bob\", ==, qdict_get_str(rule, \"match\")); g_assert_cmpstr(\"deny\", ==, qdict_get_str(rule, \"policy\")); QDECREF(rule); /* With recursive crumpling, we should see all names unescaped */ g_assert_cmpstr(\"acl0\", ==, qdict_get_str(vnc, \"acl.name\")); child = qdict_get(vnc, \"acl\"); g_assert_cmpint(qobject_type(child), ==, QTYPE_QDICT); acl = qdict_get_qdict(vnc, \"acl\"); g_assert_cmpstr(\"acl0\", ==, qdict_get_str(acl, \"rule.name\")); QDECREF(src); QDECREF(dst); }"} {"target": 1, "idx": 5121, "func": "static void test_validate_fail_alternate(TestInputVisitorData *data, const void *unused) { UserDefAlternate *tmp; Visitor *v; Error *err = NULL; v = validate_test_init(data, \"3.14\"); visit_type_UserDefAlternate(v, NULL, &tmp, &err); error_free_or_abort(&err); qapi_free_UserDefAlternate(tmp); }"} {"target": 1, "idx": 5129, "func": "static int qcow2_co_flush(BlockDriverState *bs) { BDRVQcowState *s = bs->opaque; int ret; qemu_co_mutex_lock(&s->lock); ret = qcow2_cache_flush(bs, s->l2_table_cache); if (ret < 0) { return ret; } ret = qcow2_cache_flush(bs, s->refcount_block_cache); if (ret < 0) { return ret; } return bdrv_co_flush(bs->file); }"} {"target": 1, "idx": 5138, "func": "static int vc1_init_common(VC1Context *v) { static int done = 0; int i = 0; v->hrd_rate = v->hrd_buffer = NULL; /* VLC tables */ if(!done) { done = 1; init_vlc(&ff_vc1_bfraction_vlc, VC1_BFRACTION_VLC_BITS, 23, ff_vc1_bfraction_bits, 1, 1, ff_vc1_bfraction_codes, 1, 1, INIT_VLC_USE_STATIC); init_vlc(&ff_vc1_norm2_vlc, VC1_NORM2_VLC_BITS, 4, ff_vc1_norm2_bits, 1, 1, ff_vc1_norm2_codes, 1, 1, INIT_VLC_USE_STATIC); init_vlc(&ff_vc1_norm6_vlc, VC1_NORM6_VLC_BITS, 64, ff_vc1_norm6_bits, 1, 1, ff_vc1_norm6_codes, 2, 2, INIT_VLC_USE_STATIC); init_vlc(&ff_vc1_imode_vlc, VC1_IMODE_VLC_BITS, 7, ff_vc1_imode_bits, 1, 1, ff_vc1_imode_codes, 1, 1, INIT_VLC_USE_STATIC); for (i=0; i<3; i++) { init_vlc(&ff_vc1_ttmb_vlc[i], VC1_TTMB_VLC_BITS, 16, ff_vc1_ttmb_bits[i], 1, 1, ff_vc1_ttmb_codes[i], 2, 2, INIT_VLC_USE_STATIC); init_vlc(&ff_vc1_ttblk_vlc[i], VC1_TTBLK_VLC_BITS, 8, ff_vc1_ttblk_bits[i], 1, 1, ff_vc1_ttblk_codes[i], 1, 1, INIT_VLC_USE_STATIC); init_vlc(&ff_vc1_subblkpat_vlc[i], VC1_SUBBLKPAT_VLC_BITS, 15, ff_vc1_subblkpat_bits[i], 1, 1, ff_vc1_subblkpat_codes[i], 1, 1, INIT_VLC_USE_STATIC); } for(i=0; i<4; i++) { init_vlc(&ff_vc1_4mv_block_pattern_vlc[i], VC1_4MV_BLOCK_PATTERN_VLC_BITS, 16, ff_vc1_4mv_block_pattern_bits[i], 1, 1, ff_vc1_4mv_block_pattern_codes[i], 1, 1, INIT_VLC_USE_STATIC); init_vlc(&ff_vc1_cbpcy_p_vlc[i], VC1_CBPCY_P_VLC_BITS, 64, ff_vc1_cbpcy_p_bits[i], 1, 1, ff_vc1_cbpcy_p_codes[i], 2, 2, INIT_VLC_USE_STATIC); init_vlc(&ff_vc1_mv_diff_vlc[i], VC1_MV_DIFF_VLC_BITS, 73, ff_vc1_mv_diff_bits[i], 1, 1, ff_vc1_mv_diff_codes[i], 2, 2, INIT_VLC_USE_STATIC); } for(i=0; i<8; i++) init_vlc(&ff_vc1_ac_coeff_table[i], AC_VLC_BITS, vc1_ac_sizes[i], &vc1_ac_tables[i][0][1], 8, 4, &vc1_ac_tables[i][0][0], 8, 4, INIT_VLC_USE_STATIC); init_vlc(&ff_msmp4_mb_i_vlc, MB_INTRA_VLC_BITS, 64, &ff_msmp4_mb_i_table[0][1], 4, 2, &ff_msmp4_mb_i_table[0][0], 4, 2, INIT_VLC_USE_STATIC); } /* Other defaults */ v->pq = -1; v->mvrange = 0; /* 7.1.1.18, p80 */ return 0; }"} {"target": 1, "idx": 5146, "func": "int av_read_packet(AVFormatContext *s, AVPacket *pkt) { int ret, i; AVStream *st; for(;;){ AVPacketList *pktl = s->raw_packet_buffer; if (pktl) { *pkt = pktl->pkt; if(s->streams[pkt->stream_index]->codec->codec_id != CODEC_ID_PROBE || !s->streams[pkt->stream_index]->probe_packets || s->raw_packet_buffer_remaining_size < pkt->size){ AVProbeData *pd = &s->streams[pkt->stream_index]->probe_data; av_freep(&pd->buf); pd->buf_size = 0; s->raw_packet_buffer = pktl->next; s->raw_packet_buffer_remaining_size += pkt->size; av_free(pktl); return 0; av_init_packet(pkt); ret= s->iformat->read_packet(s, pkt); if (ret < 0) { if (!pktl || ret == AVERROR(EAGAIN)) return ret; for (i = 0; i < s->nb_streams; i++) s->streams[i]->probe_packets = 0; st= s->streams[pkt->stream_index]; switch(st->codec->codec_type){ case AVMEDIA_TYPE_VIDEO: if(s->video_codec_id) st->codec->codec_id= s->video_codec_id; break; case AVMEDIA_TYPE_AUDIO: if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id; break; case AVMEDIA_TYPE_SUBTITLE: if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id; break; if(!pktl && (st->codec->codec_id != CODEC_ID_PROBE || !st->probe_packets)) return ret; add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end); s->raw_packet_buffer_remaining_size -= pkt->size; if(st->codec->codec_id == CODEC_ID_PROBE){ AVProbeData *pd = &st->probe_data; av_log(s, AV_LOG_DEBUG, \"probing stream %d\\n\", st->index); --st->probe_packets; pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE); memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size); pd->buf_size += pkt->size; memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE); if(av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){ //FIXME we dont reduce score to 0 for the case of running out of buffer space in bytes set_codec_from_probe_data(s, st, pd, st->probe_packets > 0 ? AVPROBE_SCORE_MAX/4 : 0); if(st->codec->codec_id != CODEC_ID_PROBE){ pd->buf_size=0; av_freep(&pd->buf); av_log(s, AV_LOG_DEBUG, \"probed stream %d\\n\", st->index);"} {"target": 1, "idx": 5171, "func": "static int nbd_can_accept(void) { return nb_fds < shared; }"} {"target": 1, "idx": 5172, "func": "static int net_connect(struct XenDevice *xendev) { struct XenNetDev *netdev = container_of(xendev, struct XenNetDev, xendev); int rx_copy; if (xenstore_read_fe_int(&netdev->xendev, \"tx-ring-ref\", &netdev->tx_ring_ref) == -1) { return -1; } if (xenstore_read_fe_int(&netdev->xendev, \"rx-ring-ref\", &netdev->rx_ring_ref) == -1) { return 1; } if (xenstore_read_fe_int(&netdev->xendev, \"event-channel\", &netdev->xendev.remote_port) == -1) { return -1; } if (xenstore_read_fe_int(&netdev->xendev, \"request-rx-copy\", &rx_copy) == -1) { rx_copy = 0; } if (rx_copy == 0) { xen_be_printf(&netdev->xendev, 0, \"frontend doesn't support rx-copy.\\n\"); return -1; } netdev->txs = xc_gnttab_map_grant_ref(netdev->xendev.gnttabdev, netdev->xendev.dom, netdev->tx_ring_ref, PROT_READ | PROT_WRITE); netdev->rxs = xc_gnttab_map_grant_ref(netdev->xendev.gnttabdev, netdev->xendev.dom, netdev->rx_ring_ref, PROT_READ | PROT_WRITE); if (!netdev->txs || !netdev->rxs) { return -1; } BACK_RING_INIT(&netdev->tx_ring, netdev->txs, XC_PAGE_SIZE); BACK_RING_INIT(&netdev->rx_ring, netdev->rxs, XC_PAGE_SIZE); xen_be_bind_evtchn(&netdev->xendev); xen_be_printf(&netdev->xendev, 1, \"ok: tx-ring-ref %d, rx-ring-ref %d, \" \"remote port %d, local port %d\\n\", netdev->tx_ring_ref, netdev->rx_ring_ref, netdev->xendev.remote_port, netdev->xendev.local_port); net_tx_packets(netdev); return 0; }"} {"target": 1, "idx": 5174, "func": "static bool do_modify_softint(CPUSPARCState *env, uint32_t value) { if (env->softint != value) { env->softint = value; #if !defined(CONFIG_USER_ONLY) if (cpu_interrupts_enabled(env)) { cpu_check_irqs(env); } #endif return true; } return false; }"} {"target": 1, "idx": 5190, "func": "static int decodeTonalComponents (GetBitContext *gb, tonal_component *pComponent, int numBands) { int i,j,k,cnt; int components, coding_mode_selector, coding_mode, coded_values_per_component; int sfIndx, coded_values, max_coded_values, quant_step_index, coded_components; int band_flags[4], mantissa[8]; float *pCoef; float scalefactor; int component_count = 0; components = get_bits(gb,5); /* no tonal components */ if (components == 0) return 0; coding_mode_selector = get_bits(gb,2); if (coding_mode_selector == 2) coding_mode = coding_mode_selector & 1; for (i = 0; i < components; i++) { for (cnt = 0; cnt <= numBands; cnt++) band_flags[cnt] = get_bits1(gb); coded_values_per_component = get_bits(gb,3); quant_step_index = get_bits(gb,3); if (quant_step_index <= 1) if (coding_mode_selector == 3) coding_mode = get_bits1(gb); for (j = 0; j < (numBands + 1) * 4; j++) { if (band_flags[j >> 2] == 0) continue; coded_components = get_bits(gb,3); for (k=0; k= 0; i--) { keycode = keycodes[i]; if (keycode & 0x80) kbd_put_keycode(0xe0); kbd_put_keycode(keycode | 0x80); } }"} {"target": 0, "idx": 5198, "func": "void do_migrate_set_speed(Monitor *mon, const QDict *qdict, QObject **ret_data) { double d; char *ptr; FdMigrationState *s; const char *value = qdict_get_str(qdict, \"value\"); d = strtod(value, &ptr); switch (*ptr) { case 'G': case 'g': d *= 1024; case 'M': case 'm': d *= 1024; case 'K': case 'k': d *= 1024; default: break; } max_throttle = (uint32_t)d; s = migrate_to_fms(current_migration); if (s) { qemu_file_set_rate_limit(s->file, max_throttle); } }"} {"target": 0, "idx": 5201, "func": "static inline void gen_efdneg(DisasContext *ctx) { if (unlikely(!ctx->spe_enabled)) { gen_exception(ctx, POWERPC_EXCP_APU); return; } #if defined(TARGET_PPC64) tcg_gen_xori_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 0x8000000000000000LL); #else tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); tcg_gen_xori_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)], 0x80000000); #endif }"} {"target": 0, "idx": 5219, "func": "static void gen_srlq(DisasContext *ctx) { int l1 = gen_new_label(); int l2 = gen_new_label(); TCGv t0 = tcg_temp_local_new(); TCGv t1 = tcg_temp_local_new(); TCGv t2 = tcg_temp_local_new(); tcg_gen_andi_tl(t2, cpu_gpr[rB(ctx->opcode)], 0x1F); tcg_gen_movi_tl(t1, 0xFFFFFFFF); tcg_gen_shr_tl(t2, t1, t2); tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x20); tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, l1); gen_load_spr(t0, SPR_MQ); tcg_gen_and_tl(cpu_gpr[rA(ctx->opcode)], t0, t2); tcg_gen_br(l2); gen_set_label(l1); tcg_gen_shr_tl(t0, cpu_gpr[rS(ctx->opcode)], t2); tcg_gen_and_tl(t0, t0, t2); gen_load_spr(t1, SPR_MQ); tcg_gen_andc_tl(t1, t1, t2); tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t1); gen_set_label(l2); tcg_temp_free(t0); tcg_temp_free(t1); tcg_temp_free(t2); if (unlikely(Rc(ctx->opcode) != 0)) gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); }"} {"target": 0, "idx": 5222, "func": "static size_t send_control_msg(VirtIOSerial *vser, void *buf, size_t len) { VirtQueueElement elem; VirtQueue *vq; vq = vser->c_ivq; if (!virtio_queue_ready(vq)) { return 0; } if (!virtqueue_pop(vq, &elem)) { return 0; } /* TODO: detect a buffer that's too short, set NEEDS_RESET */ iov_from_buf(elem.in_sg, elem.in_num, 0, buf, len); virtqueue_push(vq, &elem, len); virtio_notify(VIRTIO_DEVICE(vser), vq); return len; }"} {"target": 0, "idx": 5229, "func": "void ich9_pm_init(PCIDevice *lpc_pci, ICH9LPCPMRegs *pm, qemu_irq sci_irq) { memory_region_init(&pm->io, OBJECT(lpc_pci), \"ich9-pm\", ICH9_PMIO_SIZE); memory_region_set_enabled(&pm->io, false); memory_region_add_subregion(pci_address_space_io(lpc_pci), 0, &pm->io); acpi_pm_tmr_init(&pm->acpi_regs, ich9_pm_update_sci_fn, &pm->io); acpi_pm1_evt_init(&pm->acpi_regs, ich9_pm_update_sci_fn, &pm->io); acpi_pm1_cnt_init(&pm->acpi_regs, &pm->io, pm->s4_val); acpi_gpe_init(&pm->acpi_regs, ICH9_PMIO_GPE0_LEN); memory_region_init_io(&pm->io_gpe, OBJECT(lpc_pci), &ich9_gpe_ops, pm, \"acpi-gpe0\", ICH9_PMIO_GPE0_LEN); memory_region_add_subregion(&pm->io, ICH9_PMIO_GPE0_STS, &pm->io_gpe); memory_region_init_io(&pm->io_smi, OBJECT(lpc_pci), &ich9_smi_ops, pm, \"acpi-smi\", 8); memory_region_add_subregion(&pm->io, ICH9_PMIO_SMI_EN, &pm->io_smi); pm->irq = sci_irq; qemu_register_reset(pm_reset, pm); pm->powerdown_notifier.notify = pm_powerdown_req; qemu_register_powerdown_notifier(&pm->powerdown_notifier); acpi_cpu_hotplug_init(pci_address_space_io(lpc_pci), OBJECT(lpc_pci), &pm->gpe_cpu, ICH9_CPU_HOTPLUG_IO_BASE); if (pm->acpi_memory_hotplug.is_enabled) { acpi_memory_hotplug_init(pci_address_space_io(lpc_pci), OBJECT(lpc_pci), &pm->acpi_memory_hotplug); } }"} {"target": 0, "idx": 5250, "func": "uint32_t omap_badwidth_read8(void *opaque, target_phys_addr_t addr) { uint8_t ret; OMAP_8B_REG(addr); cpu_physical_memory_read(addr, (void *) &ret, 1); return ret; }"} {"target": 1, "idx": 5263, "func": "static inline void IRQ_setbit(IRQ_queue_t *q, int n_IRQ) { q->pending++; set_bit(q->queue, n_IRQ); }"} {"target": 1, "idx": 5277, "func": "static int matroska_decode_buffer(uint8_t** buf, int* buf_size, MatroskaTrack *track) { MatroskaTrackEncoding *encodings = track->encodings.elem; uint8_t* data = *buf; int isize = *buf_size; uint8_t* pkt_data = NULL; int pkt_size = isize; int result = 0; int olen; if (pkt_size >= 10000000) return -1; switch (encodings[0].compression.algo) { case MATROSKA_TRACK_ENCODING_COMP_HEADERSTRIP: return encodings[0].compression.settings.size; case MATROSKA_TRACK_ENCODING_COMP_LZO: do { olen = pkt_size *= 3; pkt_data = av_realloc(pkt_data, pkt_size+AV_LZO_OUTPUT_PADDING); result = av_lzo1x_decode(pkt_data, &olen, data, &isize); } while (result==AV_LZO_OUTPUT_FULL && pkt_size<10000000); if (result) goto failed; pkt_size -= olen; break; #if CONFIG_ZLIB case MATROSKA_TRACK_ENCODING_COMP_ZLIB: { z_stream zstream = {0}; if (inflateInit(&zstream) != Z_OK) return -1; zstream.next_in = data; zstream.avail_in = isize; do { pkt_size *= 3; pkt_data = av_realloc(pkt_data, pkt_size); zstream.avail_out = pkt_size - zstream.total_out; zstream.next_out = pkt_data + zstream.total_out; result = inflate(&zstream, Z_NO_FLUSH); } while (result==Z_OK && pkt_size<10000000); pkt_size = zstream.total_out; inflateEnd(&zstream); if (result != Z_STREAM_END) goto failed; break; } #endif #if CONFIG_BZLIB case MATROSKA_TRACK_ENCODING_COMP_BZLIB: { bz_stream bzstream = {0}; if (BZ2_bzDecompressInit(&bzstream, 0, 0) != BZ_OK) return -1; bzstream.next_in = data; bzstream.avail_in = isize; do { pkt_size *= 3; pkt_data = av_realloc(pkt_data, pkt_size); bzstream.avail_out = pkt_size - bzstream.total_out_lo32; bzstream.next_out = pkt_data + bzstream.total_out_lo32; result = BZ2_bzDecompress(&bzstream); } while (result==BZ_OK && pkt_size<10000000); pkt_size = bzstream.total_out_lo32; BZ2_bzDecompressEnd(&bzstream); if (result != BZ_STREAM_END) goto failed; break; } #endif default: return -1; } *buf = pkt_data; *buf_size = pkt_size; return 0; failed: av_free(pkt_data); return -1; }"} {"target": 0, "idx": 5281, "func": "static void ac3_extract_exponents_c(uint8_t *exp, int32_t *coef, int nb_coefs) { int i; for (i = 0; i < nb_coefs; i++) { int e; int v = abs(coef[i]); if (v == 0) e = 24; else { e = 23 - av_log2(v); if (e >= 24) { e = 24; coef[i] = 0; } av_assert2(e >= 0); } exp[i] = e; } }"} {"target": 0, "idx": 5283, "func": "static void vhost_log_stop(MemoryListener *listener, MemoryRegionSection *section) { /* FIXME: implement */ }"} {"target": 0, "idx": 5298, "func": "static void spapr_hotplug_req_event(sPAPRDRConnector *drc, uint8_t hp_action) { struct hp_log_full *new_hp; struct rtas_error_log *hdr; struct rtas_event_log_v6 *v6hdr; struct rtas_event_log_v6_maina *maina; struct rtas_event_log_v6_mainb *mainb; struct rtas_event_log_v6_hp *hp; sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc); sPAPRDRConnectorType drc_type = drck->get_type(drc); new_hp = g_malloc0(sizeof(struct hp_log_full)); hdr = &new_hp->hdr; v6hdr = &new_hp->v6hdr; maina = &new_hp->maina; mainb = &new_hp->mainb; hp = &new_hp->hp; hdr->summary = cpu_to_be32(RTAS_LOG_VERSION_6 | RTAS_LOG_SEVERITY_EVENT | RTAS_LOG_DISPOSITION_NOT_RECOVERED | RTAS_LOG_OPTIONAL_PART_PRESENT | RTAS_LOG_INITIATOR_HOTPLUG | RTAS_LOG_TYPE_HOTPLUG); hdr->extended_length = cpu_to_be32(sizeof(*new_hp) - sizeof(new_hp->hdr)); spapr_init_v6hdr(v6hdr); spapr_init_maina(maina, 3 /* Main-A, Main-B, HP */); mainb->hdr.section_id = cpu_to_be16(RTAS_LOG_V6_SECTION_ID_MAINB); mainb->hdr.section_length = cpu_to_be16(sizeof(*mainb)); mainb->subsystem_id = 0x80; /* External environment */ mainb->event_severity = 0x00; /* Informational / non-error */ mainb->event_subtype = 0x00; /* Normal shutdown */ hp->hdr.section_id = cpu_to_be16(RTAS_LOG_V6_SECTION_ID_HOTPLUG); hp->hdr.section_length = cpu_to_be16(sizeof(*hp)); hp->hdr.section_version = 1; /* includes extended modifier */ hp->hotplug_action = hp_action; switch (drc_type) { case SPAPR_DR_CONNECTOR_TYPE_PCI: hp->drc.index = cpu_to_be32(drck->get_index(drc)); hp->hotplug_identifier = RTAS_LOG_V6_HP_ID_DRC_INDEX; hp->hotplug_type = RTAS_LOG_V6_HP_TYPE_PCI; break; default: /* we shouldn't be signaling hotplug events for resources * that don't support them */ g_assert(false); return; } rtas_event_log_queue(RTAS_LOG_TYPE_HOTPLUG, new_hp); qemu_irq_pulse(xics_get_qirq(spapr->icp, spapr->check_exception_irq)); }"} {"target": 0, "idx": 5304, "func": "static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addr, int mem_index, TCGMemOp s_bits, int which) { const TCGReg r0 = TCG_REG_O0; const TCGReg r1 = TCG_REG_O1; const TCGReg r2 = TCG_REG_O2; int tlb_ofs; /* Shift the page number down. */ tcg_out_arithi(s, r1, addr, TARGET_PAGE_BITS, SHIFT_SRL); /* Mask out the page offset, except for the required alignment. */ tcg_out_movi(s, TCG_TYPE_TL, TCG_REG_T1, TARGET_PAGE_MASK | ((1 << s_bits) - 1)); /* Mask the tlb index. */ tcg_out_arithi(s, r1, r1, CPU_TLB_SIZE - 1, ARITH_AND); /* Mask page, part 2. */ tcg_out_arith(s, r0, addr, TCG_REG_T1, ARITH_AND); /* Shift the tlb index into place. */ tcg_out_arithi(s, r1, r1, CPU_TLB_ENTRY_BITS, SHIFT_SLL); /* Relative to the current ENV. */ tcg_out_arith(s, r1, TCG_AREG0, r1, ARITH_ADD); /* Find a base address that can load both tlb comparator and addend. */ tlb_ofs = offsetof(CPUArchState, tlb_table[mem_index][0]); if (!check_fit_tl(tlb_ofs + sizeof(CPUTLBEntry), 13)) { tcg_out_addi(s, r1, tlb_ofs & ~0x3ff); tlb_ofs &= 0x3ff; } /* Load the tlb comparator and the addend. */ tcg_out_ld(s, TCG_TYPE_TL, r2, r1, tlb_ofs + which); tcg_out_ld(s, TCG_TYPE_PTR, r1, r1, tlb_ofs+offsetof(CPUTLBEntry, addend)); /* subcc arg0, arg2, %g0 */ tcg_out_cmp(s, r0, r2, 0); /* If the guest address must be zero-extended, do so now. */ if (SPARC64 && TARGET_LONG_BITS == 32) { tcg_out_arithi(s, r0, addr, 0, SHIFT_SRL); return r0; } return addr; }"} {"target": 0, "idx": 5306, "func": "static int curl_sock_cb(CURL *curl, curl_socket_t fd, int action, void *s, void *sp) { DPRINTF(\"CURL (AIO): Sock action %d on fd %d\\n\", action, fd); switch (action) { case CURL_POLL_IN: qemu_aio_set_fd_handler(fd, curl_multi_do, NULL, s); break; case CURL_POLL_OUT: qemu_aio_set_fd_handler(fd, NULL, curl_multi_do, s); break; case CURL_POLL_INOUT: qemu_aio_set_fd_handler(fd, curl_multi_do, curl_multi_do, s); break; case CURL_POLL_REMOVE: qemu_aio_set_fd_handler(fd, NULL, NULL, NULL); break; } return 0; }"} {"target": 1, "idx": 5308, "func": "SwsFunc yuv2rgb_get_func_ptr (SwsContext *c) { #if defined(HAVE_MMX2) || defined(HAVE_MMX) if(c->flags & SWS_CPU_CAPS_MMX2){ switch(c->dstFormat){ case PIX_FMT_RGB32: return yuv420_rgb32_MMX2; case PIX_FMT_BGR24: return yuv420_rgb24_MMX2; case PIX_FMT_BGR565: return yuv420_rgb16_MMX2; case PIX_FMT_BGR555: return yuv420_rgb15_MMX2; } } if(c->flags & SWS_CPU_CAPS_MMX){ switch(c->dstFormat){ case PIX_FMT_RGB32: return yuv420_rgb32_MMX; case PIX_FMT_BGR24: return yuv420_rgb24_MMX; case PIX_FMT_BGR565: return yuv420_rgb16_MMX; case PIX_FMT_BGR555: return yuv420_rgb15_MMX; } } #endif #ifdef HAVE_MLIB { SwsFunc t= yuv2rgb_init_mlib(c); if(t) return t; } #endif #ifdef HAVE_ALTIVEC if (c->flags & SWS_CPU_CAPS_ALTIVEC) { SwsFunc t = yuv2rgb_init_altivec(c); if(t) return t; } #endif av_log(c, AV_LOG_WARNING, \"No accelerated colorspace conversion found\\n\"); switch(c->dstFormat){ case PIX_FMT_BGR32: case PIX_FMT_RGB32: return yuv2rgb_c_32; case PIX_FMT_RGB24: return yuv2rgb_c_24_rgb; case PIX_FMT_BGR24: return yuv2rgb_c_24_bgr; case PIX_FMT_RGB565: case PIX_FMT_BGR565: case PIX_FMT_RGB555: case PIX_FMT_BGR555: return yuv2rgb_c_16; case PIX_FMT_RGB8: case PIX_FMT_BGR8: return yuv2rgb_c_8_ordered_dither; case PIX_FMT_RGB4: case PIX_FMT_BGR4: return yuv2rgb_c_4_ordered_dither; case PIX_FMT_RGB4_BYTE: case PIX_FMT_BGR4_BYTE: return yuv2rgb_c_4b_ordered_dither; case PIX_FMT_MONOBLACK: return yuv2rgb_c_1_ordered_dither; default: assert(0); } return NULL; }"} {"target": 1, "idx": 5314, "func": "static void vnc_set_share_mode(VncState *vs, VncShareMode mode) { #ifdef _VNC_DEBUG static const char *mn[] = { [0] = \"undefined\", [VNC_SHARE_MODE_CONNECTING] = \"connecting\", [VNC_SHARE_MODE_SHARED] = \"shared\", [VNC_SHARE_MODE_EXCLUSIVE] = \"exclusive\", [VNC_SHARE_MODE_DISCONNECTED] = \"disconnected\", }; fprintf(stderr, \"%s/%d: %s -> %s\\n\", __func__, vs->csock, mn[vs->share_mode], mn[mode]); #endif if (vs->share_mode == VNC_SHARE_MODE_EXCLUSIVE) { vs->vd->num_exclusive--; } vs->share_mode = mode; if (vs->share_mode == VNC_SHARE_MODE_EXCLUSIVE) { vs->vd->num_exclusive++; } }"} {"target": 1, "idx": 5327, "func": "uint8_t ff_mlp_calculate_parity(const uint8_t *buf, unsigned int buf_size) { uint32_t scratch = 0; const uint8_t *buf_end = buf + buf_size; for (; buf < buf_end - 3; buf += 4) scratch ^= *((const uint32_t*)buf); scratch = xor_32_to_8(scratch); for (; buf < buf_end; buf++) return scratch; }"} {"target": 0, "idx": 5353, "func": "void ff_er_frame_start(ERContext *s) { if (!s->avctx->err_recognition) return; memset(s->error_status_table, ER_MB_ERROR | VP_START | ER_MB_END, s->mb_stride * s->mb_height * sizeof(uint8_t)); s->error_count = 3 * s->mb_num; s->error_occurred = 0; }"} {"target": 0, "idx": 5356, "func": "av_cold int MPV_common_init(MpegEncContext *s) { int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y, threads; if(s->codec_id == CODEC_ID_MPEG2VIDEO && !s->progressive_sequence) s->mb_height = (s->height + 31) / 32 * 2; else if (s->codec_id != CODEC_ID_H264) s->mb_height = (s->height + 15) / 16; if(s->avctx->pix_fmt == PIX_FMT_NONE){ av_log(s->avctx, AV_LOG_ERROR, \"decoding to PIX_FMT_NONE is not supported.\\n\"); return -1; } if((s->encoding || (s->avctx->active_thread_type & FF_THREAD_SLICE)) && (s->avctx->thread_count > MAX_THREADS || (s->avctx->thread_count > s->mb_height && s->mb_height))){ av_log(s->avctx, AV_LOG_ERROR, \"too many threads\\n\"); return -1; } if((s->width || s->height) && av_image_check_size(s->width, s->height, 0, s->avctx)) return -1; dsputil_init(&s->dsp, s->avctx); ff_dct_common_init(s); s->flags= s->avctx->flags; s->flags2= s->avctx->flags2; if (s->width && s->height) { s->mb_width = (s->width + 15) / 16; s->mb_stride = s->mb_width + 1; s->b8_stride = s->mb_width*2 + 1; s->b4_stride = s->mb_width*4 + 1; mb_array_size= s->mb_height * s->mb_stride; mv_table_size= (s->mb_height+2) * s->mb_stride + 1; /* set chroma shifts */ avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,&(s->chroma_x_shift), &(s->chroma_y_shift) ); /* set default edge pos, will be overriden in decode_header if needed */ s->h_edge_pos= s->mb_width*16; s->v_edge_pos= s->mb_height*16; s->mb_num = s->mb_width * s->mb_height; s->block_wrap[0]= s->block_wrap[1]= s->block_wrap[2]= s->block_wrap[3]= s->b8_stride; s->block_wrap[4]= s->block_wrap[5]= s->mb_stride; y_size = s->b8_stride * (2 * s->mb_height + 1); c_size = s->mb_stride * (s->mb_height + 1); yc_size = y_size + 2 * c_size; /* convert fourcc to upper case */ s->codec_tag = ff_toupper4(s->avctx->codec_tag); s->stream_codec_tag = ff_toupper4(s->avctx->stream_codec_tag); s->avctx->coded_frame= (AVFrame*)&s->current_picture; FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num+1)*sizeof(int), fail) //error ressilience code looks cleaner with this for(y=0; ymb_height; y++){ for(x=0; xmb_width; x++){ s->mb_index2xy[ x + y*s->mb_width ] = x + y*s->mb_stride; } } s->mb_index2xy[ s->mb_height*s->mb_width ] = (s->mb_height-1)*s->mb_stride + s->mb_width; //FIXME really needed? if (s->encoding) { /* Allocate MV tables */ FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail) FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail) FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail) FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail) FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail) FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail) s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1; s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1; s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1; s->b_bidir_forw_mv_table= s->b_bidir_forw_mv_table_base + s->mb_stride + 1; s->b_bidir_back_mv_table= s->b_bidir_back_mv_table_base + s->mb_stride + 1; s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1; if(s->msmpeg4_version){ FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats, 2*2*(MAX_LEVEL+1)*(MAX_RUN+1)*2*sizeof(int), fail); } FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail); /* Allocate MB type table */ FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type , mb_array_size * sizeof(uint16_t), fail) //needed for encoding FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail) FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix , 64*32 * sizeof(int), fail) FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix , 64*32 * sizeof(int), fail) FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64*32*2 * sizeof(uint16_t), fail) FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64*32*2 * sizeof(uint16_t), fail) FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail) FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail) if(s->avctx->noise_reduction){ FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset, 2 * 64 * sizeof(uint16_t), fail) } } } s->picture_count = MAX_PICTURE_COUNT * FFMAX(1, s->avctx->thread_count); FF_ALLOCZ_OR_GOTO(s->avctx, s->picture, s->picture_count * sizeof(Picture), fail) for(i = 0; i < s->picture_count; i++) { avcodec_get_frame_defaults((AVFrame *)&s->picture[i]); } if (s->width && s->height) { FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table, mb_array_size*sizeof(uint8_t), fail) if(s->codec_id==CODEC_ID_MPEG4 || (s->flags & CODEC_FLAG_INTERLACED_ME)){ /* interlaced direct mode decoding tables */ for(i=0; i<2; i++){ int j, k; for(j=0; j<2; j++){ for(k=0; k<2; k++){ FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_mv_table_base[i][j][k], mv_table_size * 2 * sizeof(int16_t), fail) s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] + s->mb_stride + 1; } FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail) FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail) s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]+ s->mb_stride + 1; } FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail) } } if (s->out_format == FMT_H263) { /* cbp values */ FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail); s->coded_block= s->coded_block_base + s->b8_stride + 1; /* cbp, ac_pred, pred_dir */ FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail) FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail) } if (s->h263_pred || s->h263_plus || !s->encoding) { /* dc values */ //MN: we need these for error resilience of intra-frames FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail); s->dc_val[0] = s->dc_val_base + s->b8_stride + 1; s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1; s->dc_val[2] = s->dc_val[1] + c_size; for(i=0;idc_val_base[i] = 1024; } /* which mb is a intra block */ FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail); memset(s->mbintra_table, 1, mb_array_size); /* init macroblock skip table */ FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size+2, fail); //Note the +1 is for a quicker mpeg4 slice_end detection FF_ALLOCZ_OR_GOTO(s->avctx, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE, fail); s->parse_context.state= -1; if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){ s->visualization_buffer[0] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH); s->visualization_buffer[1] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH); s->visualization_buffer[2] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH); } } s->context_initialized = 1; s->thread_context[0]= s; if (s->width && s->height) { if (s->encoding || (HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_SLICE)) { threads = s->avctx->thread_count; for(i=1; ithread_context[i]= av_malloc(sizeof(MpegEncContext)); memcpy(s->thread_context[i], s, sizeof(MpegEncContext)); } for(i=0; ithread_context[i], s) < 0) goto fail; s->thread_context[i]->start_mb_y= (s->mb_height*(i ) + s->avctx->thread_count/2) / s->avctx->thread_count; s->thread_context[i]->end_mb_y = (s->mb_height*(i+1) + s->avctx->thread_count/2) / s->avctx->thread_count; } } else { if(init_duplicate_context(s, s) < 0) goto fail; s->start_mb_y = 0; s->end_mb_y = s->mb_height; } } return 0; fail: MPV_common_end(s); return -1; }"} {"target": 0, "idx": 5357, "func": "int av_probe_input_buffer(AVIOContext *pb, AVInputFormat **fmt, const char *filename, void *logctx, unsigned int offset, unsigned int max_probe_size) { AVProbeData pd = { filename ? filename : \"\", NULL, -offset }; unsigned char *buf = NULL; int ret = 0, probe_size; if (!max_probe_size) { max_probe_size = PROBE_BUF_MAX; } else if (max_probe_size > PROBE_BUF_MAX) { max_probe_size = PROBE_BUF_MAX; } else if (max_probe_size < PROBE_BUF_MIN) { return AVERROR(EINVAL); } if (offset >= max_probe_size) { return AVERROR(EINVAL); } for(probe_size= PROBE_BUF_MIN; probe_size<=max_probe_size && !*fmt; probe_size = FFMIN(probe_size<<1, FFMAX(max_probe_size, probe_size+1))) { int score = probe_size < max_probe_size ? AVPROBE_SCORE_RETRY : 0; int buf_offset = (probe_size == PROBE_BUF_MIN) ? 0 : probe_size>>1; void *buftmp; if (probe_size < offset) { continue; } /* read probe data */ buftmp = av_realloc(buf, probe_size + AVPROBE_PADDING_SIZE); if(!buftmp){ av_free(buf); return AVERROR(ENOMEM); } buf=buftmp; if ((ret = avio_read(pb, buf + buf_offset, probe_size - buf_offset)) < 0) { /* fail if error was not end of file, otherwise, lower score */ if (ret != AVERROR_EOF) { av_free(buf); return ret; } score = 0; ret = 0; /* error was end of file, nothing read */ } pd.buf_size += ret; pd.buf = &buf[offset]; memset(pd.buf + pd.buf_size, 0, AVPROBE_PADDING_SIZE); /* guess file format */ *fmt = av_probe_input_format2(&pd, 1, &score); if(*fmt){ if(score <= AVPROBE_SCORE_RETRY){ //this can only be true in the last iteration av_log(logctx, AV_LOG_WARNING, \"Format %s detected only with low score of %d, misdetection possible!\\n\", (*fmt)->name, score); }else av_log(logctx, AV_LOG_DEBUG, \"Format %s probed with size=%d and score=%d\\n\", (*fmt)->name, probe_size, score); } } if (!*fmt) { av_free(buf); return AVERROR_INVALIDDATA; } /* rewind. reuse probe buffer to avoid seeking */ ret = ffio_rewind_with_probe_data(pb, &buf, pd.buf_size); return ret; }"} {"target": 1, "idx": 5361, "func": "static void slirp_smb(SlirpState* s, Monitor *mon, const char *exported_dir, struct in_addr vserver_addr) { static int instance; char smb_conf[128]; char smb_cmdline[128]; FILE *f; snprintf(s->smb_dir, sizeof(s->smb_dir), \"/tmp/qemu-smb.%ld-%d\", (long)getpid(), instance++); if (mkdir(s->smb_dir, 0700) < 0) { config_error(mon, \"could not create samba server dir '%s'\\n\", s->smb_dir); return; } snprintf(smb_conf, sizeof(smb_conf), \"%s/%s\", s->smb_dir, \"smb.conf\"); f = fopen(smb_conf, \"w\"); if (!f) { slirp_smb_cleanup(s); config_error(mon, \"could not create samba server \" \"configuration file '%s'\\n\", smb_conf); return; } fprintf(f, \"[global]\\n\" \"private dir=%s\\n\" \"smb ports=0\\n\" \"socket address=127.0.0.1\\n\" \"pid directory=%s\\n\" \"lock directory=%s\\n\" \"log file=%s/log.smbd\\n\" \"smb passwd file=%s/smbpasswd\\n\" \"security = share\\n\" \"[qemu]\\n\" \"path=%s\\n\" \"read only=no\\n\" \"guest ok=yes\\n\", s->smb_dir, s->smb_dir, s->smb_dir, s->smb_dir, s->smb_dir, exported_dir ); fclose(f); snprintf(smb_cmdline, sizeof(smb_cmdline), \"%s -s %s\", SMBD_COMMAND, smb_conf); if (slirp_add_exec(s->slirp, 0, smb_cmdline, &vserver_addr, 139) < 0) { slirp_smb_cleanup(s); config_error(mon, \"conflicting/invalid smbserver address\\n\"); } }"} {"target": 0, "idx": 5381, "func": "void tlb_set_page(CPUState *env, target_ulong vaddr, target_phys_addr_t paddr, int prot, int mmu_idx, target_ulong size) { PhysPageDesc *p; unsigned long pd; unsigned int index; target_ulong address; target_ulong code_address; unsigned long addend; CPUTLBEntry *te; CPUWatchpoint *wp; target_phys_addr_t iotlb; assert(size >= TARGET_PAGE_SIZE); if (size != TARGET_PAGE_SIZE) { tlb_add_large_page(env, vaddr, size); } p = phys_page_find(paddr >> TARGET_PAGE_BITS); if (!p) { pd = IO_MEM_UNASSIGNED; } else { pd = p->phys_offset; } #if defined(DEBUG_TLB) printf(\"tlb_set_page: vaddr=\" TARGET_FMT_lx \" paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\\n\", vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd); #endif address = vaddr; if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) { /* IO memory case (romd handled later) */ address |= TLB_MMIO; } addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK); if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) { /* Normal RAM. */ iotlb = pd & TARGET_PAGE_MASK; if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM) iotlb |= IO_MEM_NOTDIRTY; else iotlb |= IO_MEM_ROM; } else { /* IO handlers are currently passed a physical address. It would be nice to pass an offset from the base address of that region. This would avoid having to special case RAM, and avoid full address decoding in every device. We can't use the high bits of pd for this because IO_MEM_ROMD uses these as a ram address. */ iotlb = (pd & ~TARGET_PAGE_MASK); if (p) { iotlb += p->region_offset; } else { iotlb += paddr; } } code_address = address; /* Make accesses to pages with watchpoints go via the watchpoint trap routines. */ QTAILQ_FOREACH(wp, &env->watchpoints, entry) { if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) { iotlb = io_mem_watch + paddr; /* TODO: The memory case can be optimized by not trapping reads of pages with a write breakpoint. */ address |= TLB_MMIO; } } index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); env->iotlb[mmu_idx][index] = iotlb - vaddr; te = &env->tlb_table[mmu_idx][index]; te->addend = addend - vaddr; if (prot & PAGE_READ) { te->addr_read = address; } else { te->addr_read = -1; } if (prot & PAGE_EXEC) { te->addr_code = code_address; } else { te->addr_code = -1; } if (prot & PAGE_WRITE) { if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM || (pd & IO_MEM_ROMD)) { /* Write access calls the I/O callback. */ te->addr_write = address | TLB_MMIO; } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && !cpu_physical_memory_is_dirty(pd)) { te->addr_write = address | TLB_NOTDIRTY; } else { te->addr_write = address; } } else { te->addr_write = -1; } }"} {"target": 0, "idx": 5386, "func": "void *slavio_intctl_init(target_phys_addr_t addr, target_phys_addr_t addrg, const uint32_t *intbit_to_level, qemu_irq **irq, qemu_irq **cpu_irq, unsigned int cputimer) { int slavio_intctl_io_memory, slavio_intctlm_io_memory, i; SLAVIO_INTCTLState *s; s = qemu_mallocz(sizeof(SLAVIO_INTCTLState)); if (!s) return NULL; s->intbit_to_level = intbit_to_level; for (i = 0; i < MAX_CPUS; i++) { slavio_intctl_io_memory = cpu_register_io_memory(0, slavio_intctl_mem_read, slavio_intctl_mem_write, s); cpu_register_physical_memory(addr + i * TARGET_PAGE_SIZE, INTCTL_SIZE, slavio_intctl_io_memory); } slavio_intctlm_io_memory = cpu_register_io_memory(0, slavio_intctlm_mem_read, slavio_intctlm_mem_write, s); cpu_register_physical_memory(addrg, INTCTLM_SIZE, slavio_intctlm_io_memory); register_savevm(\"slavio_intctl\", addr, 1, slavio_intctl_save, slavio_intctl_load, s); qemu_register_reset(slavio_intctl_reset, s); *irq = qemu_allocate_irqs(slavio_set_irq, s, 32); *cpu_irq = qemu_allocate_irqs(slavio_set_timer_irq_cpu, s, MAX_CPUS); s->cputimer_bit = 1 << s->intbit_to_level[cputimer]; slavio_intctl_reset(s); return s; }"} {"target": 1, "idx": 5408, "func": "void helper_frndint(void) { ST0 = rint(ST0); }"} {"target": 0, "idx": 5416, "func": "static void start_children(FFStream *feed) { if (no_launch) return; for (; feed; feed = feed->next) { if (feed->child_argv && !feed->pid) { feed->pid_start = time(0); feed->pid = fork(); if (feed->pid < 0) { http_log(\"Unable to create children\\n\"); exit(1); } if (!feed->pid) { /* In child */ char pathname[1024]; char *slash; int i; av_strlcpy(pathname, my_program_name, sizeof(pathname)); slash = strrchr(pathname, '/'); if (!slash) slash = pathname; else slash++; strcpy(slash, \"ffmpeg\"); http_log(\"Launch command line: \"); http_log(\"%s \", pathname); for (i = 1; feed->child_argv[i] && feed->child_argv[i][0]; i++) http_log(\"%s \", feed->child_argv[i]); http_log(\"\\n\"); for (i = 3; i < 256; i++) close(i); if (!ffserver_debug) { i = open(\"/dev/null\", O_RDWR); if (i != -1) { dup2(i, 0); dup2(i, 1); dup2(i, 2); close(i); } } /* This is needed to make relative pathnames work */ chdir(my_program_dir); signal(SIGPIPE, SIG_DFL); execvp(pathname, feed->child_argv); _exit(1); } } } }"} {"target": 1, "idx": 5429, "func": "static int mov_seek_fragment(AVFormatContext *s, AVStream *st, int64_t timestamp) { MOVContext *mov = s->priv_data; int i, j; if (!mov->fragment_index_complete) return 0; for (i = 0; i < mov->fragment_index_count; i++) { if (mov->fragment_index_data[i]->track_id == st->id) { MOVFragmentIndex *index = index = mov->fragment_index_data[i]; for (j = index->item_count - 1; j >= 0; j--) { if (index->items[j].time <= timestamp) { if (index->items[j].headers_read) return 0; return mov_switch_root(s, index->items[j].moof_offset); } } } } return 0; }"} {"target": 1, "idx": 5430, "func": "static int mxf_read_track(MXFTrack *track, ByteIOContext *pb, int tag) { switch(tag) { case 0x4801: track->track_id = get_be32(pb); break; case 0x4804: get_buffer(pb, track->track_number, 4); break; case 0x4B01: track->edit_rate.den = get_be32(pb); track->edit_rate.num = get_be32(pb); break; case 0x4803: get_buffer(pb, track->sequence_ref, 16); break; } return 0; }"} {"target": 1, "idx": 5437, "func": "int ff_wms_parse_sdp_a_line(AVFormatContext *s, const char *p) { int ret = 0; if (av_strstart(p, \"pgmpu:data:application/vnd.ms.wms-hdr.asfv1;base64,\", &p)) { AVIOContext pb; RTSPState *rt = s->priv_data; AVDictionary *opts = NULL; int len = strlen(p) * 6 / 8; char *buf = av_mallocz(len); AVInputFormat *iformat; if (!buf) return AVERROR(ENOMEM); av_base64_decode(buf, p, len); if (rtp_asf_fix_header(buf, len) < 0) av_log(s, AV_LOG_ERROR, \"Failed to fix invalid RTSP-MS/ASF min_pktsize\\n\"); init_packetizer(&pb, buf, len); if (rt->asf_ctx) { avformat_close_input(&rt->asf_ctx); } if (!(iformat = av_find_input_format(\"asf\"))) return AVERROR_DEMUXER_NOT_FOUND; rt->asf_ctx = avformat_alloc_context(); if (!rt->asf_ctx) { av_free(buf); return AVERROR(ENOMEM); } rt->asf_ctx->pb = &pb; av_dict_set(&opts, \"no_resync_search\", \"1\", 0); if ((ret = ff_copy_whiteblacklists(rt->asf_ctx, s)) < 0) { av_dict_free(&opts); return ret; } ret = avformat_open_input(&rt->asf_ctx, \"\", iformat, &opts); av_dict_free(&opts); if (ret < 0) { av_free(buf); return ret; } av_dict_copy(&s->metadata, rt->asf_ctx->metadata, 0); rt->asf_pb_pos = avio_tell(&pb); av_free(buf); rt->asf_ctx->pb = NULL; } return ret; }"} {"target": 1, "idx": 5452, "func": "static int ra144_encode_frame(AVCodecContext *avctx, uint8_t *frame, int buf_size, void *data) { static const uint8_t sizes[LPC_ORDER] = {64, 32, 32, 16, 16, 8, 8, 8, 8, 4}; static const uint8_t bit_sizes[LPC_ORDER] = {6, 5, 5, 4, 4, 3, 3, 3, 3, 2}; RA144Context *ractx; PutBitContext pb; int32_t lpc_data[NBLOCKS * BLOCKSIZE]; int32_t lpc_coefs[LPC_ORDER][MAX_LPC_ORDER]; int shift[LPC_ORDER]; int16_t block_coefs[NBLOCKS][LPC_ORDER]; int lpc_refl[LPC_ORDER]; /**< reflection coefficients of the frame */ unsigned int refl_rms[NBLOCKS]; /**< RMS of the reflection coefficients */ int energy = 0; int i, idx; if (buf_size < FRAMESIZE) { av_log(avctx, AV_LOG_ERROR, \"output buffer too small\\n\"); return 0; } ractx = avctx->priv_data; /** * Since the LPC coefficients are calculated on a frame centered over the * fourth subframe, to encode a given frame, data from the next frame is * needed. In each call to this function, the previous frame (whose data are * saved in the encoder context) is encoded, and data from the current frame * are saved in the encoder context to be used in the next function call. */ for (i = 0; i < (2 * BLOCKSIZE + BLOCKSIZE / 2); i++) { lpc_data[i] = ractx->curr_block[BLOCKSIZE + BLOCKSIZE / 2 + i]; energy += (lpc_data[i] * lpc_data[i]) >> 4; } for (i = 2 * BLOCKSIZE + BLOCKSIZE / 2; i < NBLOCKS * BLOCKSIZE; i++) { lpc_data[i] = *((int16_t *)data + i - 2 * BLOCKSIZE - BLOCKSIZE / 2) >> 2; energy += (lpc_data[i] * lpc_data[i]) >> 4; } energy = ff_energy_tab[quantize(ff_t_sqrt(energy >> 5) >> 10, ff_energy_tab, 32)]; ff_lpc_calc_coefs(&ractx->lpc_ctx, lpc_data, NBLOCKS * BLOCKSIZE, LPC_ORDER, LPC_ORDER, 16, lpc_coefs, shift, FF_LPC_TYPE_LEVINSON, 0, ORDER_METHOD_EST, 12, 0); for (i = 0; i < LPC_ORDER; i++) block_coefs[NBLOCKS - 1][i] = -(lpc_coefs[LPC_ORDER - 1][i] << (12 - shift[LPC_ORDER - 1])); /** * TODO: apply perceptual weighting of the input speech through bandwidth * expansion of the LPC filter. */ if (ff_eval_refl(lpc_refl, block_coefs[NBLOCKS - 1], avctx)) { /** * The filter is unstable: use the coefficients of the previous frame. */ ff_int_to_int16(block_coefs[NBLOCKS - 1], ractx->lpc_coef[1]); ff_eval_refl(lpc_refl, block_coefs[NBLOCKS - 1], avctx); } init_put_bits(&pb, frame, buf_size); for (i = 0; i < LPC_ORDER; i++) { idx = quantize(lpc_refl[i], ff_lpc_refl_cb[i], sizes[i]); put_bits(&pb, bit_sizes[i], idx); lpc_refl[i] = ff_lpc_refl_cb[i][idx]; } ractx->lpc_refl_rms[0] = ff_rms(lpc_refl); ff_eval_coefs(ractx->lpc_coef[0], lpc_refl); refl_rms[0] = ff_interp(ractx, block_coefs[0], 1, 1, ractx->old_energy); refl_rms[1] = ff_interp(ractx, block_coefs[1], 2, energy <= ractx->old_energy, ff_t_sqrt(energy * ractx->old_energy) >> 12); refl_rms[2] = ff_interp(ractx, block_coefs[2], 3, 0, energy); refl_rms[3] = ff_rescale_rms(ractx->lpc_refl_rms[0], energy); ff_int_to_int16(block_coefs[NBLOCKS - 1], ractx->lpc_coef[0]); put_bits(&pb, 5, quantize(energy, ff_energy_tab, 32)); for (i = 0; i < NBLOCKS; i++) ra144_encode_subblock(ractx, ractx->curr_block + i * BLOCKSIZE, block_coefs[i], refl_rms[i], &pb); flush_put_bits(&pb); ractx->old_energy = energy; ractx->lpc_refl_rms[1] = ractx->lpc_refl_rms[0]; FFSWAP(unsigned int *, ractx->lpc_coef[0], ractx->lpc_coef[1]); for (i = 0; i < NBLOCKS * BLOCKSIZE; i++) ractx->curr_block[i] = *((int16_t *)data + i) >> 2; return FRAMESIZE; }"} {"target": 0, "idx": 5457, "func": "static void decode0(GetByteContext *gb, RangeCoder *rc, unsigned cumFreq, unsigned freq, unsigned total_freq) { int t = rc->range * (uint64_t)cumFreq / total_freq; rc->code1 += t + 1; rc->range = rc->range * (uint64_t)(freq + cumFreq) / total_freq - (t + 1); while (rc->range < TOP && bytestream2_get_bytes_left(gb) > 0) { unsigned byte = bytestream2_get_byte(gb); rc->code = (rc->code << 8) | byte; rc->code1 <<= 8; rc->range <<= 8; } }"} {"target": 0, "idx": 5482, "func": "static int dmg_open(BlockDriverState *bs, const char *filename, int flags) { BDRVDMGState *s = bs->opaque; off_t info_begin,info_end,last_in_offset,last_out_offset; uint32_t count; uint32_t max_compressed_size=1,max_sectors_per_chunk=1,i; int64_t offset; s->fd = open(filename, O_RDONLY | O_BINARY); if (s->fd < 0) return -errno; bs->read_only = 1; s->n_chunks = 0; s->offsets = s->lengths = s->sectors = s->sectorcounts = NULL; /* read offset of info blocks */ offset = lseek(s->fd, -0x1d8, SEEK_END); if (offset < 0) { goto fail; } info_begin = read_off(s->fd, offset); if (info_begin == 0) { goto fail; } if (read_uint32(s->fd, info_begin) != 0x100) { goto fail; } count = read_uint32(s->fd, info_begin + 4); if (count == 0) { goto fail; } info_end = info_begin + count; offset = info_begin + 0x100; /* read offsets */ last_in_offset = last_out_offset = 0; while (offset < info_end) { uint32_t type; count = read_uint32(s->fd, offset); if(count==0) goto fail; offset += 4; type = read_uint32(s->fd, offset); if (type == 0x6d697368 && count >= 244) { int new_size, chunk_count; offset += 4; offset += 200; chunk_count = (count-204)/40; new_size = sizeof(uint64_t) * (s->n_chunks + chunk_count); s->types = qemu_realloc(s->types, new_size/2); s->offsets = qemu_realloc(s->offsets, new_size); s->lengths = qemu_realloc(s->lengths, new_size); s->sectors = qemu_realloc(s->sectors, new_size); s->sectorcounts = qemu_realloc(s->sectorcounts, new_size); for(i=s->n_chunks;in_chunks+chunk_count;i++) { s->types[i] = read_uint32(s->fd, offset); offset += 4; if(s->types[i]!=0x80000005 && s->types[i]!=1 && s->types[i]!=2) { if(s->types[i]==0xffffffff) { last_in_offset = s->offsets[i-1]+s->lengths[i-1]; last_out_offset = s->sectors[i-1]+s->sectorcounts[i-1]; } chunk_count--; i--; offset += 36; continue; } offset += 4; s->sectors[i] = last_out_offset+read_off(s->fd, offset); offset += 8; s->sectorcounts[i] = read_off(s->fd, offset); offset += 8; s->offsets[i] = last_in_offset+read_off(s->fd, offset); offset += 8; s->lengths[i] = read_off(s->fd, offset); offset += 8; if(s->lengths[i]>max_compressed_size) max_compressed_size = s->lengths[i]; if(s->sectorcounts[i]>max_sectors_per_chunk) max_sectors_per_chunk = s->sectorcounts[i]; } s->n_chunks+=chunk_count; } } /* initialize zlib engine */ s->compressed_chunk = qemu_malloc(max_compressed_size+1); s->uncompressed_chunk = qemu_malloc(512*max_sectors_per_chunk); if(inflateInit(&s->zstream) != Z_OK) goto fail; s->current_chunk = s->n_chunks; return 0; fail: close(s->fd); return -1; }"} {"target": 1, "idx": 5490, "func": "bool net_rx_pkt_is_vlan_stripped(struct NetRxPkt *pkt) { assert(pkt); return pkt->vlan_stripped; }"} {"target": 1, "idx": 5500, "func": "static Visitor *validate_test_init_raw(TestInputVisitorData *data, const char *json_string) { Visitor *v; data->obj = qobject_from_json(json_string); g_assert(data->obj != NULL); data->qiv = qmp_input_visitor_new_strict(data->obj); g_assert(data->qiv != NULL); v = qmp_input_get_visitor(data->qiv); g_assert(v != NULL); return v; }"} {"target": 1, "idx": 5508, "func": "static int connect_namedsocket(const char *path) { int sockfd, size; struct sockaddr_un helper; sockfd = socket(AF_UNIX, SOCK_STREAM, 0); if (sockfd < 0) { fprintf(stderr, \"socket %s\\n\", strerror(errno)); return -1; } strcpy(helper.sun_path, path); helper.sun_family = AF_UNIX; size = strlen(helper.sun_path) + sizeof(helper.sun_family); if (connect(sockfd, (struct sockaddr *)&helper, size) < 0) { fprintf(stderr, \"socket error\\n\"); return -1; } /* remove the socket for security reasons */ unlink(path); return sockfd; }"} {"target": 1, "idx": 5509, "func": "static void ehci_mem_writel(void *ptr, target_phys_addr_t addr, uint32_t val) { EHCIState *s = ptr; uint32_t *mmio = (uint32_t *)(&s->mmio[addr]); uint32_t old = *mmio; int i; trace_usb_ehci_mmio_writel(addr, addr2str(addr), val); /* Only aligned reads are allowed on OHCI */ if (addr & 3) { fprintf(stderr, \"usb-ehci: Mis-aligned write to addr 0x\" TARGET_FMT_plx \"\\n\", addr); return; if (addr >= PORTSC && addr < PORTSC + 4 * NB_PORTS) { handle_port_status_write(s, (addr-PORTSC)/4, val); trace_usb_ehci_mmio_change(addr, addr2str(addr), *mmio, old); return; if (addr < OPREGBASE) { fprintf(stderr, \"usb-ehci: write attempt to read-only register\" TARGET_FMT_plx \"\\n\", addr); return; /* Do any register specific pre-write processing here. */ switch(addr) { case USBCMD: if (val & USBCMD_HCRESET) { ehci_reset(s); val = s->usbcmd; break; /* not supporting dynamic frame list size at the moment */ if ((val & USBCMD_FLS) && !(s->usbcmd & USBCMD_FLS)) { fprintf(stderr, \"attempt to set frame list size -- value %d\\n\", val & USBCMD_FLS); val &= ~USBCMD_FLS; if (((USBCMD_RUNSTOP | USBCMD_PSE | USBCMD_ASE) & val) != ((USBCMD_RUNSTOP | USBCMD_PSE | USBCMD_ASE) & s->usbcmd)) { if (s->pstate == EST_INACTIVE) { SET_LAST_RUN_CLOCK(s); s->usbcmd = val; /* Set usbcmd for ehci_update_halt() */ ehci_update_halt(s); qemu_mod_timer(s->frame_timer, qemu_get_clock_ns(vm_clock)); break; case USBSTS: val &= USBSTS_RO_MASK; // bits 6 through 31 are RO ehci_clear_usbsts(s, val); // bits 0 through 5 are R/WC val = s->usbsts; ehci_update_irq(s); break; case USBINTR: val &= USBINTR_MASK; break; case FRINDEX: val &= 0x00003ff8; /* frindex is 14bits and always a multiple of 8 */ break; case CONFIGFLAG: val &= 0x1; if (val) { for(i = 0; i < NB_PORTS; i++) handle_port_owner_write(s, i, 0); break; case PERIODICLISTBASE: if (ehci_periodic_enabled(s)) { fprintf(stderr, \"ehci: PERIODIC list base register set while periodic schedule\\n\" \" is enabled and HC is enabled\\n\"); break; case ASYNCLISTADDR: if (ehci_async_enabled(s)) { fprintf(stderr, \"ehci: ASYNC list address register set while async schedule\\n\" \" is enabled and HC is enabled\\n\"); break; *mmio = val; trace_usb_ehci_mmio_change(addr, addr2str(addr), *mmio, old);"} {"target": 0, "idx": 5539, "func": "static av_cold int dnxhd_encode_init(AVCodecContext *avctx) { DNXHDEncContext *ctx = avctx->priv_data; int i, index, bit_depth, ret; switch (avctx->pix_fmt) { case AV_PIX_FMT_YUV422P: bit_depth = 8; break; case AV_PIX_FMT_YUV422P10: bit_depth = 10; break; default: av_log(avctx, AV_LOG_ERROR, \"pixel format is incompatible with DNxHD\\n\"); return AVERROR(EINVAL); } ctx->cid = ff_dnxhd_find_cid(avctx, bit_depth); if (!ctx->cid) { av_log(avctx, AV_LOG_ERROR, \"video parameters incompatible with DNxHD\\n\"); return AVERROR(EINVAL); } av_log(avctx, AV_LOG_DEBUG, \"cid %d\\n\", ctx->cid); index = ff_dnxhd_get_cid_table(ctx->cid); if (index < 0) return index; ctx->cid_table = &ff_dnxhd_cid_table[index]; ctx->m.avctx = avctx; ctx->m.mb_intra = 1; ctx->m.h263_aic = 1; avctx->bits_per_raw_sample = ctx->cid_table->bit_depth; ff_blockdsp_init(&ctx->bdsp, avctx); ff_fdctdsp_init(&ctx->m.fdsp, avctx); ff_mpv_idct_init(&ctx->m); ff_mpegvideoencdsp_init(&ctx->m.mpvencdsp, avctx); ff_pixblockdsp_init(&ctx->m.pdsp, avctx); if (!ctx->m.dct_quantize) ctx->m.dct_quantize = ff_dct_quantize_c; if (ctx->cid_table->bit_depth == 10) { ctx->m.dct_quantize = dnxhd_10bit_dct_quantize; ctx->get_pixels_8x4_sym = dnxhd_10bit_get_pixels_8x4_sym; ctx->block_width_l2 = 4; } else { ctx->get_pixels_8x4_sym = dnxhd_8bit_get_pixels_8x4_sym; ctx->block_width_l2 = 3; } if (ARCH_X86) ff_dnxhdenc_init_x86(ctx); ctx->m.mb_height = (avctx->height + 15) / 16; ctx->m.mb_width = (avctx->width + 15) / 16; if (avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) { ctx->interlaced = 1; ctx->m.mb_height /= 2; } ctx->m.mb_num = ctx->m.mb_height * ctx->m.mb_width; #if FF_API_QUANT_BIAS FF_DISABLE_DEPRECATION_WARNINGS if (ctx->intra_quant_bias == FF_DEFAULT_QUANT_BIAS && avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS) ctx->intra_quant_bias = avctx->intra_quant_bias; FF_ENABLE_DEPRECATION_WARNINGS #endif // XXX tune lbias/cbias if ((ret = dnxhd_init_qmat(ctx, ctx->intra_quant_bias, 0)) < 0) return ret; /* Avid Nitris hardware decoder requires a minimum amount of padding * in the coding unit payload */ if (ctx->nitris_compat) ctx->min_padding = 1600; if ((ret = dnxhd_init_vlc(ctx)) < 0) return ret; if ((ret = dnxhd_init_rc(ctx)) < 0) return ret; FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->slice_size, ctx->m.mb_height * sizeof(uint32_t), fail); FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->slice_offs, ctx->m.mb_height * sizeof(uint32_t), fail); FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->mb_bits, ctx->m.mb_num * sizeof(uint16_t), fail); FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->mb_qscale, ctx->m.mb_num * sizeof(uint8_t), fail); #if FF_API_CODED_FRAME FF_DISABLE_DEPRECATION_WARNINGS avctx->coded_frame->key_frame = 1; avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I; FF_ENABLE_DEPRECATION_WARNINGS #endif if (avctx->thread_count > MAX_THREADS) { av_log(avctx, AV_LOG_ERROR, \"too many threads\\n\"); return AVERROR(EINVAL); } ctx->thread[0] = ctx; for (i = 1; i < avctx->thread_count; i++) { ctx->thread[i] = av_malloc(sizeof(DNXHDEncContext)); memcpy(ctx->thread[i], ctx, sizeof(DNXHDEncContext)); } return 0; fail: // for FF_ALLOCZ_OR_GOTO return AVERROR(ENOMEM); }"} {"target": 0, "idx": 5567, "func": "static int vc1_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size, n_slices = 0, i; VC1Context *v = avctx->priv_data; MpegEncContext *s = &v->s; AVFrame *pict = data; uint8_t *buf2 = NULL; const uint8_t *buf_start = buf, *buf_start_second_field = NULL; int mb_height, n_slices1=-1; struct { uint8_t *buf; GetBitContext gb; int mby_start; } *slices = NULL, *tmp; v->second_field = 0; if(s->flags & CODEC_FLAG_LOW_DELAY) s->low_delay = 1; /* no supplementary picture */ if (buf_size == 0 || (buf_size == 4 && AV_RB32(buf) == VC1_CODE_ENDOFSEQ)) { /* special case for last picture */ if (s->low_delay == 0 && s->next_picture_ptr) { *pict = s->next_picture_ptr->f; s->next_picture_ptr = NULL; *data_size = sizeof(AVFrame); } return buf_size; } if (s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU) { if (v->profile < PROFILE_ADVANCED) avctx->pix_fmt = AV_PIX_FMT_VDPAU_WMV3; else avctx->pix_fmt = AV_PIX_FMT_VDPAU_VC1; } //for advanced profile we may need to parse and unescape data if (avctx->codec_id == AV_CODEC_ID_VC1 || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) { int buf_size2 = 0; buf2 = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE); if (IS_MARKER(AV_RB32(buf))) { /* frame starts with marker and needs to be parsed */ const uint8_t *start, *end, *next; int size; next = buf; for (start = buf, end = buf + buf_size; next < end; start = next) { next = find_next_marker(start + 4, end); size = next - start - 4; if (size <= 0) continue; switch (AV_RB32(start)) { case VC1_CODE_FRAME: if (avctx->hwaccel || s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU) buf_start = start; buf_size2 = vc1_unescape_buffer(start + 4, size, buf2); break; case VC1_CODE_FIELD: { int buf_size3; if (avctx->hwaccel || s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU) buf_start_second_field = start; tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1)); if (!tmp) goto err; slices = tmp; slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE); if (!slices[n_slices].buf) goto err; buf_size3 = vc1_unescape_buffer(start + 4, size, slices[n_slices].buf); init_get_bits(&slices[n_slices].gb, slices[n_slices].buf, buf_size3 << 3); /* assuming that the field marker is at the exact middle, hope it's correct */ slices[n_slices].mby_start = s->mb_height >> 1; n_slices1 = n_slices - 1; // index of the last slice of the first field n_slices++; break; } case VC1_CODE_ENTRYPOINT: /* it should be before frame data */ buf_size2 = vc1_unescape_buffer(start + 4, size, buf2); init_get_bits(&s->gb, buf2, buf_size2 * 8); ff_vc1_decode_entry_point(avctx, v, &s->gb); break; case VC1_CODE_SLICE: { int buf_size3; tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1)); if (!tmp) goto err; slices = tmp; slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE); if (!slices[n_slices].buf) goto err; buf_size3 = vc1_unescape_buffer(start + 4, size, slices[n_slices].buf); init_get_bits(&slices[n_slices].gb, slices[n_slices].buf, buf_size3 << 3); slices[n_slices].mby_start = get_bits(&slices[n_slices].gb, 9); n_slices++; break; } } } } else if (v->interlace && ((buf[0] & 0xC0) == 0xC0)) { /* WVC1 interlaced stores both fields divided by marker */ const uint8_t *divider; int buf_size3; divider = find_next_marker(buf, buf + buf_size); if ((divider == (buf + buf_size)) || AV_RB32(divider) != VC1_CODE_FIELD) { av_log(avctx, AV_LOG_ERROR, \"Error in WVC1 interlaced frame\\n\"); goto err; } else { // found field marker, unescape second field if (avctx->hwaccel || s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU) buf_start_second_field = divider; tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1)); if (!tmp) goto err; slices = tmp; slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE); if (!slices[n_slices].buf) goto err; buf_size3 = vc1_unescape_buffer(divider + 4, buf + buf_size - divider - 4, slices[n_slices].buf); init_get_bits(&slices[n_slices].gb, slices[n_slices].buf, buf_size3 << 3); slices[n_slices].mby_start = s->mb_height >> 1; n_slices1 = n_slices - 1; n_slices++; } buf_size2 = vc1_unescape_buffer(buf, divider - buf, buf2); } else { buf_size2 = vc1_unescape_buffer(buf, buf_size, buf2); } init_get_bits(&s->gb, buf2, buf_size2*8); } else init_get_bits(&s->gb, buf, buf_size*8); if (v->res_sprite) { v->new_sprite = !get_bits1(&s->gb); v->two_sprites = get_bits1(&s->gb); /* res_sprite means a Windows Media Image stream, AV_CODEC_ID_*IMAGE means we're using the sprite compositor. These are intentionally kept separate so you can get the raw sprites by using the wmv3 decoder for WMVP or the vc1 one for WVP2 */ if (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) { if (v->new_sprite) { // switch AVCodecContext parameters to those of the sprites avctx->width = avctx->coded_width = v->sprite_width; avctx->height = avctx->coded_height = v->sprite_height; } else { goto image; } } } if (s->context_initialized && (s->width != avctx->coded_width || s->height != avctx->coded_height)) { ff_vc1_decode_end(avctx); } if (!s->context_initialized) { if (ff_msmpeg4_decode_init(avctx) < 0 || ff_vc1_decode_init_alloc_tables(v) < 0) goto err; s->low_delay = !avctx->has_b_frames || v->res_sprite; if (v->profile == PROFILE_ADVANCED) { s->h_edge_pos = avctx->coded_width; s->v_edge_pos = avctx->coded_height; } } /* We need to set current_picture_ptr before reading the header, * otherwise we cannot store anything in there. */ if (s->current_picture_ptr == NULL || s->current_picture_ptr->f.data[0]) { int i = ff_find_unused_picture(s, 0); if (i < 0) goto err; s->current_picture_ptr = &s->picture[i]; } // do parse frame header v->pic_header_flag = 0; if (v->profile < PROFILE_ADVANCED) { if (ff_vc1_parse_frame_header(v, &s->gb) < 0) { goto err; } } else { if (ff_vc1_parse_frame_header_adv(v, &s->gb) < 0) { goto err; } } if (avctx->debug & FF_DEBUG_PICT_INFO) av_log(v->s.avctx, AV_LOG_DEBUG, \"pict_type: %c\\n\", av_get_picture_type_char(s->pict_type)); if ((avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) && s->pict_type != AV_PICTURE_TYPE_I) { av_log(v->s.avctx, AV_LOG_ERROR, \"Sprite decoder: expected I-frame\\n\"); goto err; } if ((s->mb_height >> v->field_mode) == 0) { av_log(v->s.avctx, AV_LOG_ERROR, \"image too short\\n\"); goto err; } // process pulldown flags s->current_picture_ptr->f.repeat_pict = 0; // Pulldown flags are only valid when 'broadcast' has been set. // So ticks_per_frame will be 2 if (v->rff) { // repeat field s->current_picture_ptr->f.repeat_pict = 1; } else if (v->rptfrm) { // repeat frames s->current_picture_ptr->f.repeat_pict = v->rptfrm * 2; } // for skipping the frame s->current_picture.f.pict_type = s->pict_type; s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I; /* skip B-frames if we don't have reference frames */ if (s->last_picture_ptr == NULL && (s->pict_type == AV_PICTURE_TYPE_B || s->dropable)) { goto err; } if ((avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B) || (avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I) || avctx->skip_frame >= AVDISCARD_ALL) { goto end; } if (s->next_p_frame_damaged) { if (s->pict_type == AV_PICTURE_TYPE_B) goto end; else s->next_p_frame_damaged = 0; } if (ff_MPV_frame_start(s, avctx) < 0) { goto err; } v->s.current_picture_ptr->f.interlaced_frame = (v->fcm != PROGRESSIVE); v->s.current_picture_ptr->f.top_field_first = v->tff; s->me.qpel_put = s->dsp.put_qpel_pixels_tab; s->me.qpel_avg = s->dsp.avg_qpel_pixels_tab; if ((CONFIG_VC1_VDPAU_DECODER) &&s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU) ff_vdpau_vc1_decode_picture(s, buf_start, (buf + buf_size) - buf_start); else if (avctx->hwaccel) { if (v->field_mode && buf_start_second_field) { // decode first field s->picture_structure = PICT_BOTTOM_FIELD - v->tff; if (avctx->hwaccel->start_frame(avctx, buf_start, buf_start_second_field - buf_start) < 0) goto err; if (avctx->hwaccel->decode_slice(avctx, buf_start, buf_start_second_field - buf_start) < 0) goto err; if (avctx->hwaccel->end_frame(avctx) < 0) goto err; // decode second field s->gb = slices[n_slices1 + 1].gb; s->picture_structure = PICT_TOP_FIELD + v->tff; v->second_field = 1; v->pic_header_flag = 0; if (ff_vc1_parse_frame_header_adv(v, &s->gb) < 0) { av_log(avctx, AV_LOG_ERROR, \"parsing header for second field failed\"); goto err; } v->s.current_picture_ptr->f.pict_type = v->s.pict_type; if (avctx->hwaccel->start_frame(avctx, buf_start_second_field, (buf + buf_size) - buf_start_second_field) < 0) goto err; if (avctx->hwaccel->decode_slice(avctx, buf_start_second_field, (buf + buf_size) - buf_start_second_field) < 0) goto err; if (avctx->hwaccel->end_frame(avctx) < 0) goto err; } else { s->picture_structure = PICT_FRAME; if (avctx->hwaccel->start_frame(avctx, buf_start, (buf + buf_size) - buf_start) < 0) goto err; if (avctx->hwaccel->decode_slice(avctx, buf_start, (buf + buf_size) - buf_start) < 0) goto err; if (avctx->hwaccel->end_frame(avctx) < 0) goto err; } } else { if (v->fcm == ILACE_FRAME && s->pict_type == AV_PICTURE_TYPE_B) goto err; // This codepath is still incomplete thus it is disabled ff_er_frame_start(s); v->bits = buf_size * 8; v->end_mb_x = s->mb_width; if (v->field_mode) { uint8_t *tmp[2]; s->current_picture.f.linesize[0] <<= 1; s->current_picture.f.linesize[1] <<= 1; s->current_picture.f.linesize[2] <<= 1; s->linesize <<= 1; s->uvlinesize <<= 1; tmp[0] = v->mv_f_last[0]; tmp[1] = v->mv_f_last[1]; v->mv_f_last[0] = v->mv_f_next[0]; v->mv_f_last[1] = v->mv_f_next[1]; v->mv_f_next[0] = v->mv_f[0]; v->mv_f_next[1] = v->mv_f[1]; v->mv_f[0] = tmp[0]; v->mv_f[1] = tmp[1]; } mb_height = s->mb_height >> v->field_mode; for (i = 0; i <= n_slices; i++) { if (i > 0 && slices[i - 1].mby_start >= mb_height) { if (v->field_mode <= 0) { av_log(v->s.avctx, AV_LOG_ERROR, \"Slice %d starts beyond \" \"picture boundary (%d >= %d)\\n\", i, slices[i - 1].mby_start, mb_height); continue; } v->second_field = 1; v->blocks_off = s->mb_width * s->mb_height << 1; v->mb_off = s->mb_stride * s->mb_height >> 1; } else { v->second_field = 0; v->blocks_off = 0; v->mb_off = 0; } if (i) { v->pic_header_flag = 0; if (v->field_mode && i == n_slices1 + 2) { if (ff_vc1_parse_frame_header_adv(v, &s->gb) < 0) { av_log(v->s.avctx, AV_LOG_ERROR, \"Field header damaged\\n\"); continue; } } else if (get_bits1(&s->gb)) { v->pic_header_flag = 1; if (ff_vc1_parse_frame_header_adv(v, &s->gb) < 0) { av_log(v->s.avctx, AV_LOG_ERROR, \"Slice header damaged\\n\"); continue; } } } s->start_mb_y = (i == 0) ? 0 : FFMAX(0, slices[i-1].mby_start % mb_height); if (!v->field_mode || v->second_field) s->end_mb_y = (i == n_slices ) ? mb_height : FFMIN(mb_height, slices[i].mby_start % mb_height); else s->end_mb_y = (i <= n_slices1 + 1) ? mb_height : FFMIN(mb_height, slices[i].mby_start % mb_height); if (s->end_mb_y <= s->start_mb_y) { av_log(v->s.avctx, AV_LOG_ERROR, \"end mb y %d %d invalid\\n\", s->end_mb_y, s->start_mb_y); continue; } ff_vc1_decode_blocks(v); if (i != n_slices) s->gb = slices[i].gb; } if (v->field_mode) { v->second_field = 0; if (s->pict_type == AV_PICTURE_TYPE_B) { memcpy(v->mv_f_base, v->mv_f_next_base, 2 * (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2)); } s->current_picture.f.linesize[0] >>= 1; s->current_picture.f.linesize[1] >>= 1; s->current_picture.f.linesize[2] >>= 1; s->linesize >>= 1; s->uvlinesize >>= 1; } av_dlog(s->avctx, \"Consumed %i/%i bits\\n\", get_bits_count(&s->gb), s->gb.size_in_bits); // if (get_bits_count(&s->gb) > buf_size * 8) // return -1; if(s->error_occurred && s->pict_type == AV_PICTURE_TYPE_B) goto err; if(!v->field_mode) ff_er_frame_end(s); } ff_MPV_frame_end(s); if (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) { image: avctx->width = avctx->coded_width = v->output_width; avctx->height = avctx->coded_height = v->output_height; if (avctx->skip_frame >= AVDISCARD_NONREF) goto end; #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER if (vc1_decode_sprites(v, &s->gb)) goto err; #endif *pict = v->sprite_output_frame; *data_size = sizeof(AVFrame); } else { if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) { *pict = s->current_picture_ptr->f; } else if (s->last_picture_ptr != NULL) { *pict = s->last_picture_ptr->f; } if (s->last_picture_ptr || s->low_delay) { *data_size = sizeof(AVFrame); ff_print_debug_info(s, pict); } } end: av_free(buf2); for (i = 0; i < n_slices; i++) av_free(slices[i].buf); av_free(slices); return buf_size; err: av_free(buf2); for (i = 0; i < n_slices; i++) av_free(slices[i].buf); av_free(slices); return -1; }"} {"target": 0, "idx": 5581, "func": "void blk_apply_root_state(BlockBackend *blk, BlockDriverState *bs) { bs->detect_zeroes = blk->root_state.detect_zeroes; }"} {"target": 0, "idx": 5582, "func": "vcard_emul_mirror_card(VReader *vreader) { /* * lookup certs using the C_FindObjects. The Stan Cert handle won't give * us the real certs until we log in. */ PK11GenericObject *firstObj, *thisObj; int cert_count; unsigned char **certs; int *cert_len; VCardKey **keys; PK11SlotInfo *slot; VCard *card; slot = vcard_emul_reader_get_slot(vreader); if (slot == NULL) { return NULL; } firstObj = PK11_FindGenericObjects(slot, CKO_CERTIFICATE); if (firstObj == NULL) { return NULL; } /* count the certs */ cert_count = 0; for (thisObj = firstObj; thisObj; thisObj = PK11_GetNextGenericObject(thisObj)) { cert_count++; } if (cert_count == 0) { PK11_DestroyGenericObjects(firstObj); return NULL; } /* allocate the arrays */ vcard_emul_alloc_arrays(&certs, &cert_len, &keys, cert_count); /* fill in the arrays */ cert_count = 0; for (thisObj = firstObj; thisObj; thisObj = PK11_GetNextGenericObject(thisObj)) { SECItem derCert; CERTCertificate *cert; SECStatus rv; rv = PK11_ReadRawAttribute(PK11_TypeGeneric, thisObj, CKA_VALUE, &derCert); if (rv != SECSuccess) { continue; } /* create floating temp cert. This gives us a cert structure even if * the token isn't logged in */ cert = CERT_NewTempCertificate(CERT_GetDefaultCertDB(), &derCert, NULL, PR_FALSE, PR_TRUE); SECITEM_FreeItem(&derCert, PR_FALSE); if (cert == NULL) { continue; } certs[cert_count] = cert->derCert.data; cert_len[cert_count] = cert->derCert.len; keys[cert_count] = vcard_emul_make_key(slot, cert); cert_count++; CERT_DestroyCertificate(cert); /* key obj still has a reference */ } /* now create the card */ card = vcard_emul_make_card(vreader, certs, cert_len, keys, cert_count); g_free(certs); g_free(cert_len); g_free(keys); return card; }"} {"target": 0, "idx": 5587, "func": "static uint64_t megasas_fw_time(void) { struct tm curtime; uint64_t bcd_time; qemu_get_timedate(&curtime, 0); bcd_time = ((uint64_t)curtime.tm_sec & 0xff) << 48 | ((uint64_t)curtime.tm_min & 0xff) << 40 | ((uint64_t)curtime.tm_hour & 0xff) << 32 | ((uint64_t)curtime.tm_mday & 0xff) << 24 | ((uint64_t)curtime.tm_mon & 0xff) << 16 | ((uint64_t)(curtime.tm_year + 1900) & 0xffff); return bcd_time; }"} {"target": 1, "idx": 5607, "func": "static OfDpaFlow *of_dpa_flow_alloc(uint64_t cookie) { OfDpaFlow *flow; int64_t now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) / 1000; flow = g_new0(OfDpaFlow, 1); if (!flow) { return NULL; } flow->cookie = cookie; flow->mask.tbl_id = 0xffffffff; flow->stats.install_time = flow->stats.refresh_time = now; return flow; }"} {"target": 0, "idx": 5617, "func": "static int rtp_open(URLContext *h, const char *uri, int flags) { RTPContext *s; int port, is_output, ttl, local_port; char hostname[256]; char buf[1024]; char path[1024]; const char *p; is_output = (flags & URL_WRONLY); s = av_mallocz(sizeof(RTPContext)); if (!s) return AVERROR(ENOMEM); h->priv_data = s; url_split(NULL, 0, NULL, 0, hostname, sizeof(hostname), &port, path, sizeof(path), uri); /* extract parameters */ ttl = -1; local_port = -1; p = strchr(uri, '?'); if (p) { if (find_info_tag(buf, sizeof(buf), \"ttl\", p)) { ttl = strtol(buf, NULL, 10); } if (find_info_tag(buf, sizeof(buf), \"localport\", p)) { local_port = strtol(buf, NULL, 10); } } build_udp_url(buf, sizeof(buf), hostname, port, local_port, ttl); if (url_open(&s->rtp_hd, buf, flags) < 0) goto fail; local_port = udp_get_local_port(s->rtp_hd); /* XXX: need to open another connection if the port is not even */ /* well, should suppress localport in path */ build_udp_url(buf, sizeof(buf), hostname, port + 1, local_port + 1, ttl); if (url_open(&s->rtcp_hd, buf, flags) < 0) goto fail; /* just to ease handle access. XXX: need to suppress direct handle access */ s->rtp_fd = udp_get_file_handle(s->rtp_hd); s->rtcp_fd = udp_get_file_handle(s->rtcp_hd); h->max_packet_size = url_get_max_packet_size(s->rtp_hd); h->is_streamed = 1; return 0; fail: if (s->rtp_hd) url_close(s->rtp_hd); if (s->rtcp_hd) url_close(s->rtcp_hd); av_free(s); return AVERROR(EIO); }"} {"target": 1, "idx": 5621, "func": "static float get_band_cost_UPAIR7_mips(struct AACEncContext *s, PutBitContext *pb, const float *in, const float *scaled, int size, int scale_idx, int cb, const float lambda, const float uplim, int *bits) { const float Q34 = ff_aac_pow34sf_tab[POW_SF2_ZERO - scale_idx + SCALE_ONE_POS - SCALE_DIV_512]; const float IQ = ff_aac_pow2sf_tab [POW_SF2_ZERO + scale_idx - SCALE_ONE_POS + SCALE_DIV_512]; int i; float cost = 0; int qc1, qc2, qc3, qc4; int curbits = 0; uint8_t *p_bits = (uint8_t *)ff_aac_spectral_bits[cb-1]; float *p_codes = (float *)ff_aac_codebook_vectors[cb-1]; for (i = 0; i < size; i += 4) { const float *vec, *vec2; int curidx, curidx2, sign1, count1, sign2, count2; int *in_int = (int *)&in[i]; float *in_pos = (float *)&in[i]; float di0, di1, di2, di3; int t0, t1, t2, t3, t4; qc1 = scaled[i ] * Q34 + ROUND_STANDARD; qc2 = scaled[i+1] * Q34 + ROUND_STANDARD; qc3 = scaled[i+2] * Q34 + ROUND_STANDARD; qc4 = scaled[i+3] * Q34 + ROUND_STANDARD; __asm__ volatile ( \".set push \\n\\t\" \".set noreorder \\n\\t\" \"ori %[t4], $zero, 7 \\n\\t\" \"ori %[sign1], $zero, 0 \\n\\t\" \"ori %[sign2], $zero, 0 \\n\\t\" \"slt %[t0], %[t4], %[qc1] \\n\\t\" \"slt %[t1], %[t4], %[qc2] \\n\\t\" \"slt %[t2], %[t4], %[qc3] \\n\\t\" \"slt %[t3], %[t4], %[qc4] \\n\\t\" \"movn %[qc1], %[t4], %[t0] \\n\\t\" \"movn %[qc2], %[t4], %[t1] \\n\\t\" \"movn %[qc3], %[t4], %[t2] \\n\\t\" \"movn %[qc4], %[t4], %[t3] \\n\\t\" \"lw %[t0], 0(%[in_int]) \\n\\t\" \"lw %[t1], 4(%[in_int]) \\n\\t\" \"lw %[t2], 8(%[in_int]) \\n\\t\" \"lw %[t3], 12(%[in_int]) \\n\\t\" \"slt %[t0], %[t0], $zero \\n\\t\" \"movn %[sign1], %[t0], %[qc1] \\n\\t\" \"slt %[t2], %[t2], $zero \\n\\t\" \"movn %[sign2], %[t2], %[qc3] \\n\\t\" \"slt %[t1], %[t1], $zero \\n\\t\" \"sll %[t0], %[sign1], 1 \\n\\t\" \"or %[t0], %[t0], %[t1] \\n\\t\" \"movn %[sign1], %[t0], %[qc2] \\n\\t\" \"slt %[t3], %[t3], $zero \\n\\t\" \"sll %[t0], %[sign2], 1 \\n\\t\" \"or %[t0], %[t0], %[t3] \\n\\t\" \"movn %[sign2], %[t0], %[qc4] \\n\\t\" \"slt %[count1], $zero, %[qc1] \\n\\t\" \"slt %[t1], $zero, %[qc2] \\n\\t\" \"slt %[count2], $zero, %[qc3] \\n\\t\" \"slt %[t2], $zero, %[qc4] \\n\\t\" \"addu %[count1], %[count1], %[t1] \\n\\t\" \"addu %[count2], %[count2], %[t2] \\n\\t\" \".set pop \\n\\t\" : [qc1]\"+r\"(qc1), [qc2]\"+r\"(qc2), [qc3]\"+r\"(qc3), [qc4]\"+r\"(qc4), [sign1]\"=&r\"(sign1), [count1]\"=&r\"(count1), [sign2]\"=&r\"(sign2), [count2]\"=&r\"(count2), [t0]\"=&r\"(t0), [t1]\"=&r\"(t1), [t2]\"=&r\"(t2), [t3]\"=&r\"(t3), [t4]\"=&r\"(t4) : [in_int]\"r\"(in_int) : \"memory\" ); curidx = 8 * qc1; curidx += qc2; curidx2 = 8 * qc3; curidx2 += qc4; curbits += p_bits[curidx]; curbits += upair7_sign_bits[curidx]; vec = &p_codes[curidx*2]; curbits += p_bits[curidx2]; curbits += upair7_sign_bits[curidx2]; vec2 = &p_codes[curidx2*2]; __asm__ volatile ( \".set push \\n\\t\" \".set noreorder \\n\\t\" \"lwc1 %[di0], 0(%[in_pos]) \\n\\t\" \"lwc1 %[di1], 4(%[in_pos]) \\n\\t\" \"lwc1 %[di2], 8(%[in_pos]) \\n\\t\" \"lwc1 %[di3], 12(%[in_pos]) \\n\\t\" \"abs.s %[di0], %[di0] \\n\\t\" \"abs.s %[di1], %[di1] \\n\\t\" \"abs.s %[di2], %[di2] \\n\\t\" \"abs.s %[di3], %[di3] \\n\\t\" \"lwc1 $f0, 0(%[vec]) \\n\\t\" \"lwc1 $f1, 4(%[vec]) \\n\\t\" \"lwc1 $f2, 0(%[vec2]) \\n\\t\" \"lwc1 $f3, 4(%[vec2]) \\n\\t\" \"nmsub.s %[di0], %[di0], $f0, %[IQ] \\n\\t\" \"nmsub.s %[di1], %[di1], $f1, %[IQ] \\n\\t\" \"nmsub.s %[di2], %[di2], $f2, %[IQ] \\n\\t\" \"nmsub.s %[di3], %[di3], $f3, %[IQ] \\n\\t\" \".set pop \\n\\t\" : [di0]\"=&f\"(di0), [di1]\"=&f\"(di1), [di2]\"=&f\"(di2), [di3]\"=&f\"(di3) : [in_pos]\"r\"(in_pos), [vec]\"r\"(vec), [vec2]\"r\"(vec2), [IQ]\"f\"(IQ) : \"$f0\", \"$f1\", \"$f2\", \"$f3\", \"memory\" ); cost += di0 * di0 + di1 * di1 + di2 * di2 + di3 * di3; } if (bits) *bits = curbits; return cost * lambda + curbits; }"} {"target": 1, "idx": 5624, "func": "QObject *json_parser_parse_err(QList *tokens, va_list *ap, Error **errp) { JSONParserContext ctxt = {}; QList *working = qlist_copy(tokens); QObject *result; result = parse_value(&ctxt, &working, ap); QDECREF(working); error_propagate(errp, ctxt.err); return result; }"} {"target": 1, "idx": 5630, "func": "static int init_directories(BDRVVVFATState* s, const char *dirname, int heads, int secs, Error **errp) { bootsector_t* bootsector; mapping_t* mapping; unsigned int i; unsigned int cluster; memset(&(s->first_sectors[0]),0,0x40*0x200); s->cluster_size=s->sectors_per_cluster*0x200; s->cluster_buffer=g_malloc(s->cluster_size); /* * The formula: sc = spf+1+spf*spc*(512*8/fat_type), * where sc is sector_count, * spf is sectors_per_fat, * spc is sectors_per_clusters, and * fat_type = 12, 16 or 32. */ i = 1+s->sectors_per_cluster*0x200*8/s->fat_type; s->sectors_per_fat=(s->sector_count+i)/i; /* round up */ s->offset_to_fat = s->offset_to_bootsector + 1; s->offset_to_root_dir = s->offset_to_fat + s->sectors_per_fat * 2; array_init(&(s->mapping),sizeof(mapping_t)); array_init(&(s->directory),sizeof(direntry_t)); /* add volume label */ { direntry_t* entry=array_get_next(&(s->directory)); entry->attributes=0x28; /* archive | volume label */ memcpy(entry->name, s->volume_label, sizeof(entry->name)); } /* Now build FAT, and write back information into directory */ init_fat(s); s->cluster_count=sector2cluster(s, s->sector_count); mapping = array_get_next(&(s->mapping)); mapping->begin = 0; mapping->dir_index = 0; mapping->info.dir.parent_mapping_index = -1; mapping->first_mapping_index = -1; mapping->path = g_strdup(dirname); i = strlen(mapping->path); if (i > 0 && mapping->path[i - 1] == '/') mapping->path[i - 1] = '\\0'; mapping->mode = MODE_DIRECTORY; mapping->read_only = 0; s->path = mapping->path; for (i = 0, cluster = 0; i < s->mapping.next; i++) { /* MS-DOS expects the FAT to be 0 for the root directory * (except for the media byte). */ /* LATER TODO: still true for FAT32? */ int fix_fat = (i != 0); mapping = array_get(&(s->mapping), i); if (mapping->mode & MODE_DIRECTORY) { mapping->begin = cluster; if(read_directory(s, i)) { error_setg(errp, \"Could not read directory %s\", mapping->path); return -1; } mapping = array_get(&(s->mapping), i); } else { assert(mapping->mode == MODE_UNDEFINED); mapping->mode=MODE_NORMAL; mapping->begin = cluster; if (mapping->end > 0) { direntry_t* direntry = array_get(&(s->directory), mapping->dir_index); mapping->end = cluster + 1 + (mapping->end-1)/s->cluster_size; set_begin_of_direntry(direntry, mapping->begin); } else { mapping->end = cluster + 1; fix_fat = 0; } } assert(mapping->begin < mapping->end); /* next free cluster */ cluster = mapping->end; if(cluster > s->cluster_count) { error_setg(errp, \"Directory does not fit in FAT%d (capacity %.2f MB)\", s->fat_type, s->sector_count / 2000.0); return -1; } /* fix fat for entry */ if (fix_fat) { int j; for(j = mapping->begin; j < mapping->end - 1; j++) fat_set(s, j, j+1); fat_set(s, mapping->end - 1, s->max_fat_value); } } mapping = array_get(&(s->mapping), 0); s->sectors_of_root_directory = mapping->end * s->sectors_per_cluster; s->last_cluster_of_root_directory = mapping->end; /* the FAT signature */ fat_set(s,0,s->max_fat_value); fat_set(s,1,s->max_fat_value); s->current_mapping = NULL; bootsector = (bootsector_t *)(s->first_sectors + s->offset_to_bootsector * 0x200); bootsector->jump[0]=0xeb; bootsector->jump[1]=0x3e; bootsector->jump[2]=0x90; memcpy(bootsector->name,\"QEMU \",8); bootsector->sector_size=cpu_to_le16(0x200); bootsector->sectors_per_cluster=s->sectors_per_cluster; bootsector->reserved_sectors=cpu_to_le16(1); bootsector->number_of_fats=0x2; /* number of FATs */ bootsector->root_entries=cpu_to_le16(s->sectors_of_root_directory*0x10); bootsector->total_sectors16=s->sector_count>0xffff?0:cpu_to_le16(s->sector_count); /* media descriptor: hard disk=0xf8, floppy=0xf0 */ bootsector->media_type = (s->offset_to_bootsector > 0 ? 0xf8 : 0xf0); s->fat.pointer[0] = bootsector->media_type; bootsector->sectors_per_fat=cpu_to_le16(s->sectors_per_fat); bootsector->sectors_per_track = cpu_to_le16(secs); bootsector->number_of_heads = cpu_to_le16(heads); bootsector->hidden_sectors = cpu_to_le32(s->offset_to_bootsector); bootsector->total_sectors=cpu_to_le32(s->sector_count>0xffff?s->sector_count:0); /* LATER TODO: if FAT32, this is wrong */ /* drive_number: fda=0, hda=0x80 */ bootsector->u.fat16.drive_number = s->offset_to_bootsector == 0 ? 0 : 0x80; bootsector->u.fat16.signature=0x29; bootsector->u.fat16.id=cpu_to_le32(0xfabe1afd); memcpy(bootsector->u.fat16.volume_label, s->volume_label, sizeof(bootsector->u.fat16.volume_label)); memcpy(bootsector->u.fat16.fat_type, s->fat_type == 12 ? \"FAT12 \" : \"FAT16 \", 8); bootsector->magic[0]=0x55; bootsector->magic[1]=0xaa; return 0; }"} {"target": 0, "idx": 5646, "func": "void pxa25x_timer_init(target_phys_addr_t base, qemu_irq *irqs) { pxa2xx_timer_info *s = pxa2xx_timer_init(base, irqs); s->freq = PXA25X_FREQ; s->tm4 = NULL; }"} {"target": 0, "idx": 5670, "func": "struct omap_mpu_state_s *omap2420_mpu_init(unsigned long sdram_size, const char *core) { struct omap_mpu_state_s *s = (struct omap_mpu_state_s *) g_malloc0(sizeof(struct omap_mpu_state_s)); ram_addr_t sram_base, q2_base; qemu_irq *cpu_irq; qemu_irq dma_irqs[4]; DriveInfo *dinfo; int i; SysBusDevice *busdev; struct omap_target_agent_s *ta; /* Core */ s->mpu_model = omap2420; s->env = cpu_init(core ?: \"arm1136-r2\"); if (!s->env) { fprintf(stderr, \"Unable to find CPU definition\\n\"); exit(1); } s->sdram_size = sdram_size; s->sram_size = OMAP242X_SRAM_SIZE; s->wakeup = qemu_allocate_irqs(omap_mpu_wakeup, s, 1)[0]; /* Clocks */ omap_clk_init(s); /* Memory-mapped stuff */ cpu_register_physical_memory(OMAP2_Q2_BASE, s->sdram_size, (q2_base = qemu_ram_alloc(NULL, \"omap2.dram\", s->sdram_size)) | IO_MEM_RAM); cpu_register_physical_memory(OMAP2_SRAM_BASE, s->sram_size, (sram_base = qemu_ram_alloc(NULL, \"omap2.sram\", s->sram_size)) | IO_MEM_RAM); s->l4 = omap_l4_init(OMAP2_L4_BASE, 54); /* Actually mapped at any 2K boundary in the ARM11 private-peripheral if */ cpu_irq = arm_pic_init_cpu(s->env); s->ih[0] = omap2_inth_init(0x480fe000, 0x1000, 3, &s->irq[0], cpu_irq[ARM_PIC_CPU_IRQ], cpu_irq[ARM_PIC_CPU_FIQ], omap_findclk(s, \"mpu_intc_fclk\"), omap_findclk(s, \"mpu_intc_iclk\")); s->prcm = omap_prcm_init(omap_l4tao(s->l4, 3), s->irq[0][OMAP_INT_24XX_PRCM_MPU_IRQ], NULL, NULL, s); s->sysc = omap_sysctl_init(omap_l4tao(s->l4, 1), omap_findclk(s, \"omapctrl_iclk\"), s); for (i = 0; i < 4; i ++) dma_irqs[i] = s->irq[omap2_dma_irq_map[i].ih][omap2_dma_irq_map[i].intr]; s->dma = omap_dma4_init(0x48056000, dma_irqs, s, 256, 32, omap_findclk(s, \"sdma_iclk\"), omap_findclk(s, \"sdma_fclk\")); s->port->addr_valid = omap2_validate_addr; /* Register SDRAM and SRAM ports for fast DMA transfers. */ soc_dma_port_add_mem(s->dma, qemu_get_ram_ptr(q2_base), OMAP2_Q2_BASE, s->sdram_size); soc_dma_port_add_mem(s->dma, qemu_get_ram_ptr(sram_base), OMAP2_SRAM_BASE, s->sram_size); s->uart[0] = omap2_uart_init(omap_l4ta(s->l4, 19), s->irq[0][OMAP_INT_24XX_UART1_IRQ], omap_findclk(s, \"uart1_fclk\"), omap_findclk(s, \"uart1_iclk\"), s->drq[OMAP24XX_DMA_UART1_TX], s->drq[OMAP24XX_DMA_UART1_RX], \"uart1\", serial_hds[0]); s->uart[1] = omap2_uart_init(omap_l4ta(s->l4, 20), s->irq[0][OMAP_INT_24XX_UART2_IRQ], omap_findclk(s, \"uart2_fclk\"), omap_findclk(s, \"uart2_iclk\"), s->drq[OMAP24XX_DMA_UART2_TX], s->drq[OMAP24XX_DMA_UART2_RX], \"uart2\", serial_hds[0] ? serial_hds[1] : NULL); s->uart[2] = omap2_uart_init(omap_l4ta(s->l4, 21), s->irq[0][OMAP_INT_24XX_UART3_IRQ], omap_findclk(s, \"uart3_fclk\"), omap_findclk(s, \"uart3_iclk\"), s->drq[OMAP24XX_DMA_UART3_TX], s->drq[OMAP24XX_DMA_UART3_RX], \"uart3\", serial_hds[0] && serial_hds[1] ? serial_hds[2] : NULL); s->gptimer[0] = omap_gp_timer_init(omap_l4ta(s->l4, 7), s->irq[0][OMAP_INT_24XX_GPTIMER1], omap_findclk(s, \"wu_gpt1_clk\"), omap_findclk(s, \"wu_l4_iclk\")); s->gptimer[1] = omap_gp_timer_init(omap_l4ta(s->l4, 8), s->irq[0][OMAP_INT_24XX_GPTIMER2], omap_findclk(s, \"core_gpt2_clk\"), omap_findclk(s, \"core_l4_iclk\")); s->gptimer[2] = omap_gp_timer_init(omap_l4ta(s->l4, 22), s->irq[0][OMAP_INT_24XX_GPTIMER3], omap_findclk(s, \"core_gpt3_clk\"), omap_findclk(s, \"core_l4_iclk\")); s->gptimer[3] = omap_gp_timer_init(omap_l4ta(s->l4, 23), s->irq[0][OMAP_INT_24XX_GPTIMER4], omap_findclk(s, \"core_gpt4_clk\"), omap_findclk(s, \"core_l4_iclk\")); s->gptimer[4] = omap_gp_timer_init(omap_l4ta(s->l4, 24), s->irq[0][OMAP_INT_24XX_GPTIMER5], omap_findclk(s, \"core_gpt5_clk\"), omap_findclk(s, \"core_l4_iclk\")); s->gptimer[5] = omap_gp_timer_init(omap_l4ta(s->l4, 25), s->irq[0][OMAP_INT_24XX_GPTIMER6], omap_findclk(s, \"core_gpt6_clk\"), omap_findclk(s, \"core_l4_iclk\")); s->gptimer[6] = omap_gp_timer_init(omap_l4ta(s->l4, 26), s->irq[0][OMAP_INT_24XX_GPTIMER7], omap_findclk(s, \"core_gpt7_clk\"), omap_findclk(s, \"core_l4_iclk\")); s->gptimer[7] = omap_gp_timer_init(omap_l4ta(s->l4, 27), s->irq[0][OMAP_INT_24XX_GPTIMER8], omap_findclk(s, \"core_gpt8_clk\"), omap_findclk(s, \"core_l4_iclk\")); s->gptimer[8] = omap_gp_timer_init(omap_l4ta(s->l4, 28), s->irq[0][OMAP_INT_24XX_GPTIMER9], omap_findclk(s, \"core_gpt9_clk\"), omap_findclk(s, \"core_l4_iclk\")); s->gptimer[9] = omap_gp_timer_init(omap_l4ta(s->l4, 29), s->irq[0][OMAP_INT_24XX_GPTIMER10], omap_findclk(s, \"core_gpt10_clk\"), omap_findclk(s, \"core_l4_iclk\")); s->gptimer[10] = omap_gp_timer_init(omap_l4ta(s->l4, 30), s->irq[0][OMAP_INT_24XX_GPTIMER11], omap_findclk(s, \"core_gpt11_clk\"), omap_findclk(s, \"core_l4_iclk\")); s->gptimer[11] = omap_gp_timer_init(omap_l4ta(s->l4, 31), s->irq[0][OMAP_INT_24XX_GPTIMER12], omap_findclk(s, \"core_gpt12_clk\"), omap_findclk(s, \"core_l4_iclk\")); omap_tap_init(omap_l4ta(s->l4, 2), s); s->synctimer = omap_synctimer_init(omap_l4tao(s->l4, 2), s, omap_findclk(s, \"clk32-kHz\"), omap_findclk(s, \"core_l4_iclk\")); s->i2c[0] = omap2_i2c_init(omap_l4tao(s->l4, 5), s->irq[0][OMAP_INT_24XX_I2C1_IRQ], &s->drq[OMAP24XX_DMA_I2C1_TX], omap_findclk(s, \"i2c1.fclk\"), omap_findclk(s, \"i2c1.iclk\")); s->i2c[1] = omap2_i2c_init(omap_l4tao(s->l4, 6), s->irq[0][OMAP_INT_24XX_I2C2_IRQ], &s->drq[OMAP24XX_DMA_I2C2_TX], omap_findclk(s, \"i2c2.fclk\"), omap_findclk(s, \"i2c2.iclk\")); s->gpio = qdev_create(NULL, \"omap2-gpio\"); qdev_prop_set_int32(s->gpio, \"mpu_model\", s->mpu_model); qdev_prop_set_ptr(s->gpio, \"iclk\", omap_findclk(s, \"gpio_iclk\")); qdev_prop_set_ptr(s->gpio, \"fclk0\", omap_findclk(s, \"gpio1_dbclk\")); qdev_prop_set_ptr(s->gpio, \"fclk1\", omap_findclk(s, \"gpio2_dbclk\")); qdev_prop_set_ptr(s->gpio, \"fclk2\", omap_findclk(s, \"gpio3_dbclk\")); qdev_prop_set_ptr(s->gpio, \"fclk3\", omap_findclk(s, \"gpio4_dbclk\")); if (s->mpu_model == omap2430) { qdev_prop_set_ptr(s->gpio, \"fclk4\", omap_findclk(s, \"gpio5_dbclk\")); } qdev_init_nofail(s->gpio); busdev = sysbus_from_qdev(s->gpio); sysbus_connect_irq(busdev, 0, s->irq[0][OMAP_INT_24XX_GPIO_BANK1]); sysbus_connect_irq(busdev, 3, s->irq[0][OMAP_INT_24XX_GPIO_BANK2]); sysbus_connect_irq(busdev, 6, s->irq[0][OMAP_INT_24XX_GPIO_BANK3]); sysbus_connect_irq(busdev, 9, s->irq[0][OMAP_INT_24XX_GPIO_BANK4]); ta = omap_l4ta(s->l4, 3); sysbus_mmio_map(busdev, 0, omap_l4_region_base(ta, 1)); sysbus_mmio_map(busdev, 1, omap_l4_region_base(ta, 0)); sysbus_mmio_map(busdev, 2, omap_l4_region_base(ta, 2)); sysbus_mmio_map(busdev, 3, omap_l4_region_base(ta, 4)); sysbus_mmio_map(busdev, 4, omap_l4_region_base(ta, 5)); s->sdrc = omap_sdrc_init(0x68009000); s->gpmc = omap_gpmc_init(s, 0x6800a000, s->irq[0][OMAP_INT_24XX_GPMC_IRQ], s->drq[OMAP24XX_DMA_GPMC]); dinfo = drive_get(IF_SD, 0, 0); if (!dinfo) { fprintf(stderr, \"qemu: missing SecureDigital device\\n\"); exit(1); } s->mmc = omap2_mmc_init(omap_l4tao(s->l4, 9), dinfo->bdrv, s->irq[0][OMAP_INT_24XX_MMC_IRQ], &s->drq[OMAP24XX_DMA_MMC1_TX], omap_findclk(s, \"mmc_fclk\"), omap_findclk(s, \"mmc_iclk\")); s->mcspi[0] = omap_mcspi_init(omap_l4ta(s->l4, 35), 4, s->irq[0][OMAP_INT_24XX_MCSPI1_IRQ], &s->drq[OMAP24XX_DMA_SPI1_TX0], omap_findclk(s, \"spi1_fclk\"), omap_findclk(s, \"spi1_iclk\")); s->mcspi[1] = omap_mcspi_init(omap_l4ta(s->l4, 36), 2, s->irq[0][OMAP_INT_24XX_MCSPI2_IRQ], &s->drq[OMAP24XX_DMA_SPI2_TX0], omap_findclk(s, \"spi2_fclk\"), omap_findclk(s, \"spi2_iclk\")); s->dss = omap_dss_init(omap_l4ta(s->l4, 10), 0x68000800, /* XXX wire M_IRQ_25, D_L2_IRQ_30 and I_IRQ_13 together */ s->irq[0][OMAP_INT_24XX_DSS_IRQ], s->drq[OMAP24XX_DMA_DSS], omap_findclk(s, \"dss_clk1\"), omap_findclk(s, \"dss_clk2\"), omap_findclk(s, \"dss_54m_clk\"), omap_findclk(s, \"dss_l3_iclk\"), omap_findclk(s, \"dss_l4_iclk\")); omap_sti_init(omap_l4ta(s->l4, 18), 0x54000000, s->irq[0][OMAP_INT_24XX_STI], omap_findclk(s, \"emul_ck\"), serial_hds[0] && serial_hds[1] && serial_hds[2] ? serial_hds[3] : NULL); s->eac = omap_eac_init(omap_l4ta(s->l4, 32), s->irq[0][OMAP_INT_24XX_EAC_IRQ], /* Ten consecutive lines */ &s->drq[OMAP24XX_DMA_EAC_AC_RD], omap_findclk(s, \"func_96m_clk\"), omap_findclk(s, \"core_l4_iclk\")); /* All register mappings (includin those not currenlty implemented): * SystemControlMod 48000000 - 48000fff * SystemControlL4 48001000 - 48001fff * 32kHz Timer Mod 48004000 - 48004fff * 32kHz Timer L4 48005000 - 48005fff * PRCM ModA 48008000 - 480087ff * PRCM ModB 48008800 - 48008fff * PRCM L4 48009000 - 48009fff * TEST-BCM Mod 48012000 - 48012fff * TEST-BCM L4 48013000 - 48013fff * TEST-TAP Mod 48014000 - 48014fff * TEST-TAP L4 48015000 - 48015fff * GPIO1 Mod 48018000 - 48018fff * GPIO Top 48019000 - 48019fff * GPIO2 Mod 4801a000 - 4801afff * GPIO L4 4801b000 - 4801bfff * GPIO3 Mod 4801c000 - 4801cfff * GPIO4 Mod 4801e000 - 4801efff * WDTIMER1 Mod 48020000 - 48010fff * WDTIMER Top 48021000 - 48011fff * WDTIMER2 Mod 48022000 - 48012fff * WDTIMER L4 48023000 - 48013fff * WDTIMER3 Mod 48024000 - 48014fff * WDTIMER3 L4 48025000 - 48015fff * WDTIMER4 Mod 48026000 - 48016fff * WDTIMER4 L4 48027000 - 48017fff * GPTIMER1 Mod 48028000 - 48018fff * GPTIMER1 L4 48029000 - 48019fff * GPTIMER2 Mod 4802a000 - 4801afff * GPTIMER2 L4 4802b000 - 4801bfff * L4-Config AP 48040000 - 480407ff * L4-Config IP 48040800 - 48040fff * L4-Config LA 48041000 - 48041fff * ARM11ETB Mod 48048000 - 48049fff * ARM11ETB L4 4804a000 - 4804afff * DISPLAY Top 48050000 - 480503ff * DISPLAY DISPC 48050400 - 480507ff * DISPLAY RFBI 48050800 - 48050bff * DISPLAY VENC 48050c00 - 48050fff * DISPLAY L4 48051000 - 48051fff * CAMERA Top 48052000 - 480523ff * CAMERA core 48052400 - 480527ff * CAMERA DMA 48052800 - 48052bff * CAMERA MMU 48052c00 - 48052fff * CAMERA L4 48053000 - 48053fff * SDMA Mod 48056000 - 48056fff * SDMA L4 48057000 - 48057fff * SSI Top 48058000 - 48058fff * SSI GDD 48059000 - 48059fff * SSI Port1 4805a000 - 4805afff * SSI Port2 4805b000 - 4805bfff * SSI L4 4805c000 - 4805cfff * USB Mod 4805e000 - 480fefff * USB L4 4805f000 - 480fffff * WIN_TRACER1 Mod 48060000 - 48060fff * WIN_TRACER1 L4 48061000 - 48061fff * WIN_TRACER2 Mod 48062000 - 48062fff * WIN_TRACER2 L4 48063000 - 48063fff * WIN_TRACER3 Mod 48064000 - 48064fff * WIN_TRACER3 L4 48065000 - 48065fff * WIN_TRACER4 Top 48066000 - 480660ff * WIN_TRACER4 ETT 48066100 - 480661ff * WIN_TRACER4 WT 48066200 - 480662ff * WIN_TRACER4 L4 48067000 - 48067fff * XTI Mod 48068000 - 48068fff * XTI L4 48069000 - 48069fff * UART1 Mod 4806a000 - 4806afff * UART1 L4 4806b000 - 4806bfff * UART2 Mod 4806c000 - 4806cfff * UART2 L4 4806d000 - 4806dfff * UART3 Mod 4806e000 - 4806efff * UART3 L4 4806f000 - 4806ffff * I2C1 Mod 48070000 - 48070fff * I2C1 L4 48071000 - 48071fff * I2C2 Mod 48072000 - 48072fff * I2C2 L4 48073000 - 48073fff * McBSP1 Mod 48074000 - 48074fff * McBSP1 L4 48075000 - 48075fff * McBSP2 Mod 48076000 - 48076fff * McBSP2 L4 48077000 - 48077fff * GPTIMER3 Mod 48078000 - 48078fff * GPTIMER3 L4 48079000 - 48079fff * GPTIMER4 Mod 4807a000 - 4807afff * GPTIMER4 L4 4807b000 - 4807bfff * GPTIMER5 Mod 4807c000 - 4807cfff * GPTIMER5 L4 4807d000 - 4807dfff * GPTIMER6 Mod 4807e000 - 4807efff * GPTIMER6 L4 4807f000 - 4807ffff * GPTIMER7 Mod 48080000 - 48080fff * GPTIMER7 L4 48081000 - 48081fff * GPTIMER8 Mod 48082000 - 48082fff * GPTIMER8 L4 48083000 - 48083fff * GPTIMER9 Mod 48084000 - 48084fff * GPTIMER9 L4 48085000 - 48085fff * GPTIMER10 Mod 48086000 - 48086fff * GPTIMER10 L4 48087000 - 48087fff * GPTIMER11 Mod 48088000 - 48088fff * GPTIMER11 L4 48089000 - 48089fff * GPTIMER12 Mod 4808a000 - 4808afff * GPTIMER12 L4 4808b000 - 4808bfff * EAC Mod 48090000 - 48090fff * EAC L4 48091000 - 48091fff * FAC Mod 48092000 - 48092fff * FAC L4 48093000 - 48093fff * MAILBOX Mod 48094000 - 48094fff * MAILBOX L4 48095000 - 48095fff * SPI1 Mod 48098000 - 48098fff * SPI1 L4 48099000 - 48099fff * SPI2 Mod 4809a000 - 4809afff * SPI2 L4 4809b000 - 4809bfff * MMC/SDIO Mod 4809c000 - 4809cfff * MMC/SDIO L4 4809d000 - 4809dfff * MS_PRO Mod 4809e000 - 4809efff * MS_PRO L4 4809f000 - 4809ffff * RNG Mod 480a0000 - 480a0fff * RNG L4 480a1000 - 480a1fff * DES3DES Mod 480a2000 - 480a2fff * DES3DES L4 480a3000 - 480a3fff * SHA1MD5 Mod 480a4000 - 480a4fff * SHA1MD5 L4 480a5000 - 480a5fff * AES Mod 480a6000 - 480a6fff * AES L4 480a7000 - 480a7fff * PKA Mod 480a8000 - 480a9fff * PKA L4 480aa000 - 480aafff * MG Mod 480b0000 - 480b0fff * MG L4 480b1000 - 480b1fff * HDQ/1-wire Mod 480b2000 - 480b2fff * HDQ/1-wire L4 480b3000 - 480b3fff * MPU interrupt 480fe000 - 480fefff * STI channel base 54000000 - 5400ffff * IVA RAM 5c000000 - 5c01ffff * IVA ROM 5c020000 - 5c027fff * IMG_BUF_A 5c040000 - 5c040fff * IMG_BUF_B 5c042000 - 5c042fff * VLCDS 5c048000 - 5c0487ff * IMX_COEF 5c049000 - 5c04afff * IMX_CMD 5c051000 - 5c051fff * VLCDQ 5c053000 - 5c0533ff * VLCDH 5c054000 - 5c054fff * SEQ_CMD 5c055000 - 5c055fff * IMX_REG 5c056000 - 5c0560ff * VLCD_REG 5c056100 - 5c0561ff * SEQ_REG 5c056200 - 5c0562ff * IMG_BUF_REG 5c056300 - 5c0563ff * SEQIRQ_REG 5c056400 - 5c0564ff * OCP_REG 5c060000 - 5c060fff * SYSC_REG 5c070000 - 5c070fff * MMU_REG 5d000000 - 5d000fff * sDMA R 68000400 - 680005ff * sDMA W 68000600 - 680007ff * Display Control 68000800 - 680009ff * DSP subsystem 68000a00 - 68000bff * MPU subsystem 68000c00 - 68000dff * IVA subsystem 68001000 - 680011ff * USB 68001200 - 680013ff * Camera 68001400 - 680015ff * VLYNQ (firewall) 68001800 - 68001bff * VLYNQ 68001e00 - 68001fff * SSI 68002000 - 680021ff * L4 68002400 - 680025ff * DSP (firewall) 68002800 - 68002bff * DSP subsystem 68002e00 - 68002fff * IVA (firewall) 68003000 - 680033ff * IVA 68003600 - 680037ff * GFX 68003a00 - 68003bff * CMDWR emulation 68003c00 - 68003dff * SMS 68004000 - 680041ff * OCM 68004200 - 680043ff * GPMC 68004400 - 680045ff * RAM (firewall) 68005000 - 680053ff * RAM (err login) 68005400 - 680057ff * ROM (firewall) 68005800 - 68005bff * ROM (err login) 68005c00 - 68005fff * GPMC (firewall) 68006000 - 680063ff * GPMC (err login) 68006400 - 680067ff * SMS (err login) 68006c00 - 68006fff * SMS registers 68008000 - 68008fff * SDRC registers 68009000 - 68009fff * GPMC registers 6800a000 6800afff */ qemu_register_reset(omap2_mpu_reset, s); return s; }"} {"target": 0, "idx": 5672, "func": "static void tcg_out_qemu_ld_slow_path (TCGContext *s, TCGLabelQemuLdst *label) { int s_bits; int ir; int opc = label->opc; int mem_index = label->mem_index; int data_reg = label->datalo_reg; int data_reg2 = label->datahi_reg; int addr_reg = label->addrlo_reg; uint8_t *raddr = label->raddr; uint8_t **label_ptr = &label->label_ptr[0]; s_bits = opc & 3; /* resolve label address */ reloc_pc14 (label_ptr[0], (tcg_target_long) s->code_ptr); /* slow path */ ir = 3; tcg_out_mov (s, TCG_TYPE_I32, ir++, TCG_AREG0); #if TARGET_LONG_BITS == 32 tcg_out_mov (s, TCG_TYPE_I32, ir++, addr_reg); #else #ifdef TCG_TARGET_CALL_ALIGN_ARGS ir |= 1; #endif tcg_out_mov (s, TCG_TYPE_I32, ir++, label->addrhi_reg); tcg_out_mov (s, TCG_TYPE_I32, ir++, addr_reg); #endif tcg_out_movi (s, TCG_TYPE_I32, ir, mem_index); tcg_out_call (s, (tcg_target_long) qemu_ld_helpers[s_bits], 1); tcg_out32 (s, B | 8); tcg_out32 (s, (tcg_target_long) raddr); switch (opc) { case 0|4: tcg_out32 (s, EXTSB | RA (data_reg) | RS (3)); break; case 1|4: tcg_out32 (s, EXTSH | RA (data_reg) | RS (3)); break; case 0: case 1: case 2: if (data_reg != 3) tcg_out_mov (s, TCG_TYPE_I32, data_reg, 3); break; case 3: if (data_reg == 3) { if (data_reg2 == 4) { tcg_out_mov (s, TCG_TYPE_I32, 0, 4); tcg_out_mov (s, TCG_TYPE_I32, 4, 3); tcg_out_mov (s, TCG_TYPE_I32, 3, 0); } else { tcg_out_mov (s, TCG_TYPE_I32, data_reg2, 3); tcg_out_mov (s, TCG_TYPE_I32, 3, 4); } } else { if (data_reg != 4) tcg_out_mov (s, TCG_TYPE_I32, data_reg, 4); if (data_reg2 != 3) tcg_out_mov (s, TCG_TYPE_I32, data_reg2, 3); } break; } /* Jump to the code corresponding to next IR of qemu_st */ tcg_out_b (s, 0, (tcg_target_long) raddr); }"} {"target": 0, "idx": 5673, "func": "static int nbd_negotiate_handle_info(NBDClient *client, uint32_t length, uint32_t opt, uint16_t myflags, Error **errp) { int rc; char name[NBD_MAX_NAME_SIZE + 1]; NBDExport *exp; uint16_t requests; uint16_t request; uint32_t namelen; bool sendname = false; bool blocksize = false; uint32_t sizes[3]; char buf[sizeof(uint64_t) + sizeof(uint16_t)]; const char *msg; /* Client sends: 4 bytes: L, name length (can be 0) L bytes: export name 2 bytes: N, number of requests (can be 0) N * 2 bytes: N requests */ if (length < sizeof(namelen) + sizeof(requests)) { msg = \"overall request too short\"; goto invalid; } if (nbd_read(client->ioc, &namelen, sizeof(namelen), errp) < 0) { return -EIO; } be32_to_cpus(&namelen); length -= sizeof(namelen); if (namelen > length - sizeof(requests) || (length - namelen) % 2) { msg = \"name length is incorrect\"; goto invalid; } if (nbd_read(client->ioc, name, namelen, errp) < 0) { return -EIO; } name[namelen] = '\\0'; length -= namelen; trace_nbd_negotiate_handle_export_name_request(name); if (nbd_read(client->ioc, &requests, sizeof(requests), errp) < 0) { return -EIO; } be16_to_cpus(&requests); length -= sizeof(requests); trace_nbd_negotiate_handle_info_requests(requests); if (requests != length / sizeof(request)) { msg = \"incorrect number of requests for overall length\"; goto invalid; } while (requests--) { if (nbd_read(client->ioc, &request, sizeof(request), errp) < 0) { return -EIO; } be16_to_cpus(&request); length -= sizeof(request); trace_nbd_negotiate_handle_info_request(request, nbd_info_lookup(request)); /* We care about NBD_INFO_NAME and NBD_INFO_BLOCK_SIZE; * everything else is either a request we don't know or * something we send regardless of request */ switch (request) { case NBD_INFO_NAME: sendname = true; break; case NBD_INFO_BLOCK_SIZE: blocksize = true; break; } } exp = nbd_export_find(name); if (!exp) { return nbd_negotiate_send_rep_err(client->ioc, NBD_REP_ERR_UNKNOWN, opt, errp, \"export '%s' not present\", name); } /* Don't bother sending NBD_INFO_NAME unless client requested it */ if (sendname) { rc = nbd_negotiate_send_info(client, opt, NBD_INFO_NAME, length, name, errp); if (rc < 0) { return rc; } } /* Send NBD_INFO_DESCRIPTION only if available, regardless of * client request */ if (exp->description) { size_t len = strlen(exp->description); rc = nbd_negotiate_send_info(client, opt, NBD_INFO_DESCRIPTION, len, exp->description, errp); if (rc < 0) { return rc; } } /* Send NBD_INFO_BLOCK_SIZE always, but tweak the minimum size * according to whether the client requested it, and according to * whether this is OPT_INFO or OPT_GO. */ /* minimum - 1 for back-compat, or 512 if client is new enough. * TODO: consult blk_bs(blk)->bl.request_alignment? */ sizes[0] = (opt == NBD_OPT_INFO || blocksize) ? BDRV_SECTOR_SIZE : 1; /* preferred - Hard-code to 4096 for now. * TODO: is blk_bs(blk)->bl.opt_transfer appropriate? */ sizes[1] = 4096; /* maximum - At most 32M, but smaller as appropriate. */ sizes[2] = MIN(blk_get_max_transfer(exp->blk), NBD_MAX_BUFFER_SIZE); trace_nbd_negotiate_handle_info_block_size(sizes[0], sizes[1], sizes[2]); cpu_to_be32s(&sizes[0]); cpu_to_be32s(&sizes[1]); cpu_to_be32s(&sizes[2]); rc = nbd_negotiate_send_info(client, opt, NBD_INFO_BLOCK_SIZE, sizeof(sizes), sizes, errp); if (rc < 0) { return rc; } /* Send NBD_INFO_EXPORT always */ trace_nbd_negotiate_new_style_size_flags(exp->size, exp->nbdflags | myflags); stq_be_p(buf, exp->size); stw_be_p(buf + 8, exp->nbdflags | myflags); rc = nbd_negotiate_send_info(client, opt, NBD_INFO_EXPORT, sizeof(buf), buf, errp); if (rc < 0) { return rc; } /* If the client is just asking for NBD_OPT_INFO, but forgot to * request block sizes, return an error. * TODO: consult blk_bs(blk)->request_align, and only error if it * is not 1? */ if (opt == NBD_OPT_INFO && !blocksize) { return nbd_negotiate_send_rep_err(client->ioc, NBD_REP_ERR_BLOCK_SIZE_REQD, opt, errp, \"request NBD_INFO_BLOCK_SIZE to \" \"use this export\"); } /* Final reply */ rc = nbd_negotiate_send_rep(client->ioc, NBD_REP_ACK, opt, errp); if (rc < 0) { return rc; } if (opt == NBD_OPT_GO) { client->exp = exp; QTAILQ_INSERT_TAIL(&client->exp->clients, client, next); nbd_export_get(client->exp); rc = 1; } return rc; invalid: if (nbd_drop(client->ioc, length, errp) < 0) { return -EIO; } return nbd_negotiate_send_rep_err(client->ioc, NBD_REP_ERR_INVALID, opt, errp, \"%s\", msg); }"} {"target": 0, "idx": 5676, "func": "static int scsi_req_stream_length(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf) { switch (buf[0]) { /* stream commands */ case ERASE_12: case ERASE_16: cmd->xfer = 0; break; case READ_6: case READ_REVERSE: case RECOVER_BUFFERED_DATA: case WRITE_6: cmd->len = 6; cmd->xfer = buf[4] | (buf[3] << 8) | (buf[2] << 16); if (buf[1] & 0x01) { /* fixed */ cmd->xfer *= dev->blocksize; } break; case READ_16: case READ_REVERSE_16: case VERIFY_16: case WRITE_16: cmd->len = 16; cmd->xfer = buf[14] | (buf[13] << 8) | (buf[12] << 16); if (buf[1] & 0x01) { /* fixed */ cmd->xfer *= dev->blocksize; } break; case REWIND: case LOAD_UNLOAD: cmd->len = 6; cmd->xfer = 0; break; case SPACE_16: cmd->xfer = buf[13] | (buf[12] << 8); break; case READ_POSITION: cmd->xfer = buf[8] | (buf[7] << 8); break; case FORMAT_UNIT: cmd->xfer = buf[4] | (buf[3] << 8); break; /* generic commands */ default: return scsi_req_length(cmd, dev, buf); } return 0; }"} {"target": 0, "idx": 5694, "func": "static void gen_tst_cc (DisasContext *dc, TCGv cc, int cond) { int arith_opt, move_opt; /* TODO: optimize more condition codes. */ /* * If the flags are live, we've gotta look into the bits of CCS. * Otherwise, if we just did an arithmetic operation we try to * evaluate the condition code faster. * * When this function is done, T0 should be non-zero if the condition * code is true. */ arith_opt = arith_cc(dc) && !dc->flags_uptodate; move_opt = (dc->cc_op == CC_OP_MOVE) && dc->flags_uptodate; switch (cond) { case CC_EQ: if (arith_opt || move_opt) { /* If cc_result is zero, T0 should be non-zero otherwise T0 should be zero. */ int l1; l1 = gen_new_label(); tcg_gen_movi_tl(cc, 0); tcg_gen_brcondi_tl(TCG_COND_NE, cc_result, 0, l1); tcg_gen_movi_tl(cc, 1); gen_set_label(l1); } else { cris_evaluate_flags(dc); tcg_gen_andi_tl(cc, cpu_PR[PR_CCS], Z_FLAG); } break; case CC_NE: if (arith_opt || move_opt) tcg_gen_mov_tl(cc, cc_result); else { cris_evaluate_flags(dc); tcg_gen_xori_tl(cc, cpu_PR[PR_CCS], Z_FLAG); tcg_gen_andi_tl(cc, cc, Z_FLAG); } break; case CC_CS: cris_evaluate_flags(dc); tcg_gen_andi_tl(cc, cpu_PR[PR_CCS], C_FLAG); break; case CC_CC: cris_evaluate_flags(dc); tcg_gen_xori_tl(cc, cpu_PR[PR_CCS], C_FLAG); tcg_gen_andi_tl(cc, cc, C_FLAG); break; case CC_VS: cris_evaluate_flags(dc); tcg_gen_andi_tl(cc, cpu_PR[PR_CCS], V_FLAG); break; case CC_VC: cris_evaluate_flags(dc); tcg_gen_xori_tl(cc, cpu_PR[PR_CCS], V_FLAG); tcg_gen_andi_tl(cc, cc, V_FLAG); break; case CC_PL: if (arith_opt || move_opt) { int bits = 31; if (dc->cc_size == 1) bits = 7; else if (dc->cc_size == 2) bits = 15; tcg_gen_shri_tl(cc, cc_result, bits); tcg_gen_xori_tl(cc, cc, 1); } else { cris_evaluate_flags(dc); tcg_gen_xori_tl(cc, cpu_PR[PR_CCS], N_FLAG); tcg_gen_andi_tl(cc, cc, N_FLAG); } break; case CC_MI: if (arith_opt || move_opt) { int bits = 31; if (dc->cc_size == 1) bits = 7; else if (dc->cc_size == 2) bits = 15; tcg_gen_shri_tl(cc, cc_result, 31); } else { cris_evaluate_flags(dc); tcg_gen_andi_tl(cc, cpu_PR[PR_CCS], N_FLAG); } break; case CC_LS: cris_evaluate_flags(dc); tcg_gen_andi_tl(cc, cpu_PR[PR_CCS], C_FLAG | Z_FLAG); break; case CC_HI: cris_evaluate_flags(dc); { TCGv tmp; tmp = tcg_temp_new(TCG_TYPE_TL); tcg_gen_xori_tl(tmp, cpu_PR[PR_CCS], C_FLAG | Z_FLAG); /* Overlay the C flag on top of the Z. */ tcg_gen_shli_tl(cc, tmp, 2); tcg_gen_and_tl(cc, tmp, cc); tcg_gen_andi_tl(cc, cc, Z_FLAG); tcg_temp_free(tmp); } break; case CC_GE: cris_evaluate_flags(dc); /* Overlay the V flag on top of the N. */ tcg_gen_shli_tl(cc, cpu_PR[PR_CCS], 2); tcg_gen_xor_tl(cc, cpu_PR[PR_CCS], cc); tcg_gen_andi_tl(cc, cc, N_FLAG); tcg_gen_xori_tl(cc, cc, N_FLAG); break; case CC_LT: cris_evaluate_flags(dc); /* Overlay the V flag on top of the N. */ tcg_gen_shli_tl(cc, cpu_PR[PR_CCS], 2); tcg_gen_xor_tl(cc, cpu_PR[PR_CCS], cc); tcg_gen_andi_tl(cc, cc, N_FLAG); break; case CC_GT: cris_evaluate_flags(dc); { TCGv n, z; n = tcg_temp_new(TCG_TYPE_TL); z = tcg_temp_new(TCG_TYPE_TL); /* To avoid a shift we overlay everything on the V flag. */ tcg_gen_shri_tl(n, cpu_PR[PR_CCS], 2); tcg_gen_shri_tl(z, cpu_PR[PR_CCS], 1); /* invert Z. */ tcg_gen_xori_tl(z, z, 2); tcg_gen_xor_tl(n, n, cpu_PR[PR_CCS]); tcg_gen_xori_tl(n, n, 2); tcg_gen_and_tl(cc, z, n); tcg_gen_andi_tl(cc, cc, 2); tcg_temp_free(n); tcg_temp_free(z); } break; case CC_LE: cris_evaluate_flags(dc); { TCGv n, z; n = tcg_temp_new(TCG_TYPE_TL); z = tcg_temp_new(TCG_TYPE_TL); /* To avoid a shift we overlay everything on the V flag. */ tcg_gen_shri_tl(n, cpu_PR[PR_CCS], 2); tcg_gen_shri_tl(z, cpu_PR[PR_CCS], 1); tcg_gen_xor_tl(n, n, cpu_PR[PR_CCS]); tcg_gen_or_tl(cc, z, n); tcg_gen_andi_tl(cc, cc, 2); tcg_temp_free(n); tcg_temp_free(z); } break; case CC_P: cris_evaluate_flags(dc); tcg_gen_andi_tl(cc, cpu_PR[PR_CCS], P_FLAG); break; case CC_A: tcg_gen_movi_tl(cc, 1); break; default: BUG(); break; }; }"} {"target": 0, "idx": 5707, "func": "void do_smm_enter(X86CPU *cpu) { CPUX86State *env = &cpu->env; CPUState *cs = CPU(cpu); target_ulong sm_state; SegmentCache *dt; int i, offset; qemu_log_mask(CPU_LOG_INT, \"SMM: enter\\n\"); log_cpu_state_mask(CPU_LOG_INT, CPU(cpu), CPU_DUMP_CCOP); env->hflags |= HF_SMM_MASK; if (env->hflags2 & HF2_NMI_MASK) { env->hflags2 |= HF2_SMM_INSIDE_NMI_MASK; } else { env->hflags2 |= HF2_NMI_MASK; } cpu_smm_update(env); sm_state = env->smbase + 0x8000; #ifdef TARGET_X86_64 for (i = 0; i < 6; i++) { dt = &env->segs[i]; offset = 0x7e00 + i * 16; x86_stw_phys(cs, sm_state + offset, dt->selector); x86_stw_phys(cs, sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff); x86_stl_phys(cs, sm_state + offset + 4, dt->limit); x86_stq_phys(cs, sm_state + offset + 8, dt->base); } x86_stq_phys(cs, sm_state + 0x7e68, env->gdt.base); x86_stl_phys(cs, sm_state + 0x7e64, env->gdt.limit); x86_stw_phys(cs, sm_state + 0x7e70, env->ldt.selector); x86_stq_phys(cs, sm_state + 0x7e78, env->ldt.base); x86_stl_phys(cs, sm_state + 0x7e74, env->ldt.limit); x86_stw_phys(cs, sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff); x86_stq_phys(cs, sm_state + 0x7e88, env->idt.base); x86_stl_phys(cs, sm_state + 0x7e84, env->idt.limit); x86_stw_phys(cs, sm_state + 0x7e90, env->tr.selector); x86_stq_phys(cs, sm_state + 0x7e98, env->tr.base); x86_stl_phys(cs, sm_state + 0x7e94, env->tr.limit); x86_stw_phys(cs, sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff); x86_stq_phys(cs, sm_state + 0x7ed0, env->efer); x86_stq_phys(cs, sm_state + 0x7ff8, env->regs[R_EAX]); x86_stq_phys(cs, sm_state + 0x7ff0, env->regs[R_ECX]); x86_stq_phys(cs, sm_state + 0x7fe8, env->regs[R_EDX]); x86_stq_phys(cs, sm_state + 0x7fe0, env->regs[R_EBX]); x86_stq_phys(cs, sm_state + 0x7fd8, env->regs[R_ESP]); x86_stq_phys(cs, sm_state + 0x7fd0, env->regs[R_EBP]); x86_stq_phys(cs, sm_state + 0x7fc8, env->regs[R_ESI]); x86_stq_phys(cs, sm_state + 0x7fc0, env->regs[R_EDI]); for (i = 8; i < 16; i++) { x86_stq_phys(cs, sm_state + 0x7ff8 - i * 8, env->regs[i]); } x86_stq_phys(cs, sm_state + 0x7f78, env->eip); x86_stl_phys(cs, sm_state + 0x7f70, cpu_compute_eflags(env)); x86_stl_phys(cs, sm_state + 0x7f68, env->dr[6]); x86_stl_phys(cs, sm_state + 0x7f60, env->dr[7]); x86_stl_phys(cs, sm_state + 0x7f48, env->cr[4]); x86_stq_phys(cs, sm_state + 0x7f50, env->cr[3]); x86_stl_phys(cs, sm_state + 0x7f58, env->cr[0]); x86_stl_phys(cs, sm_state + 0x7efc, SMM_REVISION_ID); x86_stl_phys(cs, sm_state + 0x7f00, env->smbase); #else x86_stl_phys(cs, sm_state + 0x7ffc, env->cr[0]); x86_stl_phys(cs, sm_state + 0x7ff8, env->cr[3]); x86_stl_phys(cs, sm_state + 0x7ff4, cpu_compute_eflags(env)); x86_stl_phys(cs, sm_state + 0x7ff0, env->eip); x86_stl_phys(cs, sm_state + 0x7fec, env->regs[R_EDI]); x86_stl_phys(cs, sm_state + 0x7fe8, env->regs[R_ESI]); x86_stl_phys(cs, sm_state + 0x7fe4, env->regs[R_EBP]); x86_stl_phys(cs, sm_state + 0x7fe0, env->regs[R_ESP]); x86_stl_phys(cs, sm_state + 0x7fdc, env->regs[R_EBX]); x86_stl_phys(cs, sm_state + 0x7fd8, env->regs[R_EDX]); x86_stl_phys(cs, sm_state + 0x7fd4, env->regs[R_ECX]); x86_stl_phys(cs, sm_state + 0x7fd0, env->regs[R_EAX]); x86_stl_phys(cs, sm_state + 0x7fcc, env->dr[6]); x86_stl_phys(cs, sm_state + 0x7fc8, env->dr[7]); x86_stl_phys(cs, sm_state + 0x7fc4, env->tr.selector); x86_stl_phys(cs, sm_state + 0x7f64, env->tr.base); x86_stl_phys(cs, sm_state + 0x7f60, env->tr.limit); x86_stl_phys(cs, sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff); x86_stl_phys(cs, sm_state + 0x7fc0, env->ldt.selector); x86_stl_phys(cs, sm_state + 0x7f80, env->ldt.base); x86_stl_phys(cs, sm_state + 0x7f7c, env->ldt.limit); x86_stl_phys(cs, sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff); x86_stl_phys(cs, sm_state + 0x7f74, env->gdt.base); x86_stl_phys(cs, sm_state + 0x7f70, env->gdt.limit); x86_stl_phys(cs, sm_state + 0x7f58, env->idt.base); x86_stl_phys(cs, sm_state + 0x7f54, env->idt.limit); for (i = 0; i < 6; i++) { dt = &env->segs[i]; if (i < 3) { offset = 0x7f84 + i * 12; } else { offset = 0x7f2c + (i - 3) * 12; } x86_stl_phys(cs, sm_state + 0x7fa8 + i * 4, dt->selector); x86_stl_phys(cs, sm_state + offset + 8, dt->base); x86_stl_phys(cs, sm_state + offset + 4, dt->limit); x86_stl_phys(cs, sm_state + offset, (dt->flags >> 8) & 0xf0ff); } x86_stl_phys(cs, sm_state + 0x7f14, env->cr[4]); x86_stl_phys(cs, sm_state + 0x7efc, SMM_REVISION_ID); x86_stl_phys(cs, sm_state + 0x7ef8, env->smbase); #endif /* init SMM cpu state */ #ifdef TARGET_X86_64 cpu_load_efer(env, 0); #endif cpu_load_eflags(env, 0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK)); env->eip = 0x00008000; cpu_x86_update_cr0(env, env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK)); cpu_x86_update_cr4(env, 0); env->dr[7] = 0x00000400; cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase, 0xffffffff, DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | DESC_A_MASK); cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | DESC_A_MASK); cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | DESC_A_MASK); cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | DESC_A_MASK); cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | DESC_A_MASK); cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | DESC_A_MASK); }"} {"target": 1, "idx": 5718, "func": "PCIDevice *pci_nic_init(NICInfo *nd, const char *default_model, const char *default_devaddr) { const char *devaddr = nd->devaddr ? nd->devaddr : default_devaddr; PCIDevice *pci_dev; DeviceState *dev; int i; i = qemu_check_nic_model_list(nd, pci_nic_models, default_model); pci_dev = pci_create(pci_nic_names[i], devaddr); dev = &pci_dev->qdev; if (nd->id) dev->id = qemu_strdup(nd->id); dev->nd = nd; qdev_init(dev); nd->private = dev; return pci_dev; }"} {"target": 1, "idx": 5731, "func": "void ff_mlp_init_x86(DSPContext* c, AVCodecContext *avctx) { #if HAVE_7REGS && HAVE_TEN_OPERANDS c->mlp_filter_channel = mlp_filter_channel_x86; #endif }"} {"target": 1, "idx": 5754, "func": "static av_cold int roq_encode_init(AVCodecContext *avctx) { RoqContext *enc = avctx->priv_data; av_lfg_init(&enc->randctx, 1); enc->framesSinceKeyframe = 0; if ((avctx->width & 0xf) || (avctx->height & 0xf)) { av_log(avctx, AV_LOG_ERROR, \"Dimensions must be divisible by 16\\n\"); return AVERROR(EINVAL); if (avctx->width > 65535 || avctx->height > 65535) { av_log(avctx, AV_LOG_ERROR, \"Dimensions are max %d\\n\", enc->quake3_compat ? 32768 : 65535); return AVERROR(EINVAL); if (((avctx->width)&(avctx->width-1))||((avctx->height)&(avctx->height-1))) av_log(avctx, AV_LOG_ERROR, \"Warning: dimensions not power of two, this is not supported by quake\\n\"); enc->width = avctx->width; enc->height = avctx->height; enc->framesSinceKeyframe = 0; enc->first_frame = 1; enc->last_frame = av_frame_alloc(); enc->current_frame = av_frame_alloc(); if (!enc->last_frame || !enc->current_frame) { enc->tmpData = av_malloc(sizeof(RoqTempdata)); enc->this_motion4 = av_mallocz_array((enc->width*enc->height/16), sizeof(motion_vect)); enc->last_motion4 = av_malloc_array ((enc->width*enc->height/16), sizeof(motion_vect)); enc->this_motion8 = av_mallocz_array((enc->width*enc->height/64), sizeof(motion_vect)); enc->last_motion8 = av_malloc_array ((enc->width*enc->height/64), sizeof(motion_vect)); return 0;"} {"target": 1, "idx": 5755, "func": "void ff_h264_flush_change(H264Context *h) { int i, j; h->next_outputed_poc = INT_MIN; h->prev_interlaced_frame = 1; idr(h); h->poc.prev_frame_num = -1; if (h->cur_pic_ptr) { h->cur_pic_ptr->reference = 0; for (j=i=0; h->delayed_pic[i]; i++) if (h->delayed_pic[i] != h->cur_pic_ptr) h->delayed_pic[j++] = h->delayed_pic[i]; h->delayed_pic[j] = NULL; } ff_h264_unref_picture(h, &h->last_pic_for_ec); h->first_field = 0; ff_h264_sei_uninit(&h->sei); h->recovery_frame = -1; h->frame_recovered = 0; h->current_slice = 0; h->mmco_reset = 1; for (i = 0; i < h->nb_slice_ctx; i++) h->slice_ctx[i].list_count = 0; }"} {"target": 0, "idx": 5762, "func": "static int av_set_number2(void *obj, const char *name, double num, int den, int64_t intnum, const AVOption **o_out) { const AVOption *o = av_opt_find(obj, name, NULL, 0, 0); void *dst; if (o_out) *o_out= o; if (!o || o->offset<=0) return AVERROR_OPTION_NOT_FOUND; if (o->max*den < num*intnum || o->min*den > num*intnum) { av_log(obj, AV_LOG_ERROR, \"Value %lf for parameter '%s' out of range\\n\", num, name); return AVERROR(ERANGE); } dst= ((uint8_t*)obj) + o->offset; switch (o->type) { case FF_OPT_TYPE_FLAGS: case FF_OPT_TYPE_INT: *(int *)dst= llrint(num/den)*intnum; break; case FF_OPT_TYPE_INT64: *(int64_t *)dst= llrint(num/den)*intnum; break; case FF_OPT_TYPE_FLOAT: *(float *)dst= num*intnum/den; break; case FF_OPT_TYPE_DOUBLE:*(double *)dst= num*intnum/den; break; case FF_OPT_TYPE_RATIONAL: if ((int)num == num) *(AVRational*)dst= (AVRational){num*intnum, den}; else *(AVRational*)dst= av_d2q(num*intnum/den, 1<<24); break; default: return AVERROR(EINVAL); } return 0; }"} {"target": 1, "idx": 5767, "func": "static void qxl_realize_common(PCIQXLDevice *qxl, Error **errp) { uint8_t* config = qxl->pci.config; uint32_t pci_device_rev; uint32_t io_size; qxl->mode = QXL_MODE_UNDEFINED; qxl->generation = 1; qxl->num_memslots = NUM_MEMSLOTS; qemu_mutex_init(&qxl->track_lock); qemu_mutex_init(&qxl->async_lock); qxl->current_async = QXL_UNDEFINED_IO; qxl->guest_bug = 0; switch (qxl->revision) { case 1: /* spice 0.4 -- qxl-1 */ pci_device_rev = QXL_REVISION_STABLE_V04; io_size = 8; break; case 2: /* spice 0.6 -- qxl-2 */ pci_device_rev = QXL_REVISION_STABLE_V06; io_size = 16; break; case 3: /* qxl-3 */ pci_device_rev = QXL_REVISION_STABLE_V10; io_size = 32; /* PCI region size must be pow2 */ break; case 4: /* qxl-4 */ pci_device_rev = QXL_REVISION_STABLE_V12; io_size = pow2ceil(QXL_IO_RANGE_SIZE); break; default: error_setg(errp, \"Invalid revision %d for qxl device (max %d)\", qxl->revision, QXL_DEFAULT_REVISION); return; } pci_set_byte(&config[PCI_REVISION_ID], pci_device_rev); pci_set_byte(&config[PCI_INTERRUPT_PIN], 1); qxl->rom_size = qxl_rom_size(); memory_region_init_ram(&qxl->rom_bar, OBJECT(qxl), \"qxl.vrom\", qxl->rom_size, &error_abort); vmstate_register_ram(&qxl->rom_bar, &qxl->pci.qdev); init_qxl_rom(qxl); init_qxl_ram(qxl); qxl->guest_surfaces.cmds = g_new0(QXLPHYSICAL, qxl->ssd.num_surfaces); memory_region_init_ram(&qxl->vram_bar, OBJECT(qxl), \"qxl.vram\", qxl->vram_size, &error_abort); vmstate_register_ram(&qxl->vram_bar, &qxl->pci.qdev); memory_region_init_alias(&qxl->vram32_bar, OBJECT(qxl), \"qxl.vram32\", &qxl->vram_bar, 0, qxl->vram32_size); memory_region_init_io(&qxl->io_bar, OBJECT(qxl), &qxl_io_ops, qxl, \"qxl-ioports\", io_size); if (qxl->id == 0) { vga_dirty_log_start(&qxl->vga); } memory_region_set_flush_coalesced(&qxl->io_bar); pci_register_bar(&qxl->pci, QXL_IO_RANGE_INDEX, PCI_BASE_ADDRESS_SPACE_IO, &qxl->io_bar); pci_register_bar(&qxl->pci, QXL_ROM_RANGE_INDEX, PCI_BASE_ADDRESS_SPACE_MEMORY, &qxl->rom_bar); pci_register_bar(&qxl->pci, QXL_RAM_RANGE_INDEX, PCI_BASE_ADDRESS_SPACE_MEMORY, &qxl->vga.vram); pci_register_bar(&qxl->pci, QXL_VRAM_RANGE_INDEX, PCI_BASE_ADDRESS_SPACE_MEMORY, &qxl->vram32_bar); if (qxl->vram32_size < qxl->vram_size) { /* * Make the 64bit vram bar show up only in case it is * configured to be larger than the 32bit vram bar. */ pci_register_bar(&qxl->pci, QXL_VRAM64_RANGE_INDEX, PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_64 | PCI_BASE_ADDRESS_MEM_PREFETCH, &qxl->vram_bar); } /* print pci bar details */ dprint(qxl, 1, \"ram/%s: %d MB [region 0]\\n\", qxl->id == 0 ? \"pri\" : \"sec\", qxl->vga.vram_size / (1024*1024)); dprint(qxl, 1, \"vram/32: %d MB [region 1]\\n\", qxl->vram32_size / (1024*1024)); dprint(qxl, 1, \"vram/64: %d MB %s\\n\", qxl->vram_size / (1024*1024), qxl->vram32_size < qxl->vram_size ? \"[region 4]\" : \"[unmapped]\"); qxl->ssd.qxl.base.sif = &qxl_interface.base; if (qemu_spice_add_display_interface(&qxl->ssd.qxl, qxl->vga.con) != 0) { error_setg(errp, \"qxl interface %d.%d not supported by spice-server\", SPICE_INTERFACE_QXL_MAJOR, SPICE_INTERFACE_QXL_MINOR); return; } qemu_add_vm_change_state_handler(qxl_vm_change_state_handler, qxl); qxl->update_irq = qemu_bh_new(qxl_update_irq_bh, qxl); qxl_reset_state(qxl); qxl->update_area_bh = qemu_bh_new(qxl_render_update_area_bh, qxl); qxl->ssd.cursor_bh = qemu_bh_new(qemu_spice_cursor_refresh_bh, &qxl->ssd); }"} {"target": 1, "idx": 5781, "func": "static void vc1_inv_trans_8x4_dc_c(uint8_t *dest, int linesize, DCTELEM *block) { int i; int dc = block[0]; const uint8_t *cm; dc = ( 3 * dc + 1) >> 1; dc = (17 * dc + 64) >> 7; cm = ff_cropTbl + MAX_NEG_CROP + dc; for(i = 0; i < 4; i++){ dest[0] = cm[dest[0]]; dest[1] = cm[dest[1]]; dest[2] = cm[dest[2]]; dest[3] = cm[dest[3]]; dest[4] = cm[dest[4]]; dest[5] = cm[dest[5]]; dest[6] = cm[dest[6]]; dest[7] = cm[dest[7]]; dest += linesize; } }"} {"target": 1, "idx": 5789, "func": "static always_inline uint8_t vc1_mspel_filter(const uint8_t *src, int stride, int mode, int r) { switch(mode){ case 0: //no shift return src[0]; case 1: // 1/4 shift return (-4*src[-stride] + 53*src[0] + 18*src[stride] - 3*src[stride*2] + 32 - r) >> 6; case 2: // 1/2 shift return (-src[-stride] + 9*src[0] + 9*src[stride] - src[stride*2] + 8 - r) >> 4; case 3: // 3/4 shift return (-3*src[-stride] + 18*src[0] + 53*src[stride] - 4*src[stride*2] + 32 - r) >> 6; } return 0; //should not occur }"} {"target": 1, "idx": 5797, "func": "static uint64_t openpic_tmr_read(void *opaque, hwaddr addr, unsigned len) { OpenPICState *opp = opaque; uint32_t retval = -1; int idx; DPRINTF(\"%s: addr %08x\\n\", __func__, addr); if (addr & 0xF) { goto out; } idx = (addr >> 6) & 0x3; if (addr == 0x0) { /* TIFR (TFRR) */ retval = opp->tifr; goto out; } switch (addr & 0x30) { case 0x00: /* TICC (GTCCR) */ retval = opp->timers[idx].ticc; break; case 0x10: /* TIBC (GTBCR) */ retval = opp->timers[idx].tibc; break; case 0x20: /* TIPV (TIPV) */ retval = read_IRQreg_ipvp(opp, opp->irq_tim0 + idx); break; case 0x30: /* TIDE (TIDR) */ retval = read_IRQreg_ide(opp, opp->irq_tim0 + idx); break; } out: DPRINTF(\"%s: => %08x\\n\", __func__, retval); return retval; }"} {"target": 0, "idx": 5810, "func": "static void gen_exception_insn(DisasContext *s, int offset, int excp) { gen_a64_set_pc_im(s->pc - offset); gen_exception(excp); s->is_jmp = DISAS_JUMP; }"} {"target": 0, "idx": 5820, "func": "static QDict *qmp_dispatch_check_obj(const QObject *request, Error **errp) { const QDictEntry *ent; const char *arg_name; const QObject *arg_obj; bool has_exec_key = false; QDict *dict = NULL; if (qobject_type(request) != QTYPE_QDICT) { error_setg(errp, QERR_QMP_BAD_INPUT_OBJECT, \"request is not a dictionary\"); return NULL; } dict = qobject_to_qdict(request); for (ent = qdict_first(dict); ent; ent = qdict_next(dict, ent)) { arg_name = qdict_entry_key(ent); arg_obj = qdict_entry_value(ent); if (!strcmp(arg_name, \"execute\")) { if (qobject_type(arg_obj) != QTYPE_QSTRING) { error_setg(errp, QERR_QMP_BAD_INPUT_OBJECT_MEMBER, \"execute\", \"string\"); return NULL; } has_exec_key = true; } else if (strcmp(arg_name, \"arguments\")) { error_setg(errp, QERR_QMP_EXTRA_MEMBER, arg_name); return NULL; } } if (!has_exec_key) { error_setg(errp, QERR_QMP_BAD_INPUT_OBJECT, \"execute\"); return NULL; } return dict; }"} {"target": 0, "idx": 5826, "func": "static void sha1_transform(uint32_t state[5], const uint8_t buffer[64]) { uint32_t block[80]; unsigned int i, a, b, c, d, e; a = state[0]; b = state[1]; c = state[2]; d = state[3]; e = state[4]; #if CONFIG_SMALL for (i = 0; i < 80; i++) { int t; if (i < 16) t = AV_RB32(buffer + 4 * i); else t = rol(block[i-3] ^ block[i-8] ^ block[i-14] ^ block[i-16], 1); block[i] = t; t += e + rol(a, 5); if (i < 40) { if (i < 20) t += ((b&(c^d))^d) + 0x5A827999; else t += ( b^c ^d) + 0x6ED9EBA1; } else { if (i < 60) t += (((b|c)&d)|(b&c)) + 0x8F1BBCDC; else t += ( b^c ^d) + 0xCA62C1D6; } e = d; d = c; c = rol(b, 30); b = a; a = t; } #else for (i = 0; i < 15; i += 5) { R0(a, b, c, d, e, 0 + i); R0(e, a, b, c, d, 1 + i); R0(d, e, a, b, c, 2 + i); R0(c, d, e, a, b, 3 + i); R0(b, c, d, e, a, 4 + i); } R0(a, b, c, d, e, 15); R1(e, a, b, c, d, 16); R1(d, e, a, b, c, 17); R1(c, d, e, a, b, 18); R1(b, c, d, e, a, 19); for (i = 20; i < 40; i += 5) { R2(a, b, c, d, e, 0 + i); R2(e, a, b, c, d, 1 + i); R2(d, e, a, b, c, 2 + i); R2(c, d, e, a, b, 3 + i); R2(b, c, d, e, a, 4 + i); } for (; i < 60; i += 5) { R3(a, b, c, d, e, 0 + i); R3(e, a, b, c, d, 1 + i); R3(d, e, a, b, c, 2 + i); R3(c, d, e, a, b, 3 + i); R3(b, c, d, e, a, 4 + i); } for (; i < 80; i += 5) { R4(a, b, c, d, e, 0 + i); R4(e, a, b, c, d, 1 + i); R4(d, e, a, b, c, 2 + i); R4(c, d, e, a, b, 3 + i); R4(b, c, d, e, a, 4 + i); } #endif state[0] += a; state[1] += b; state[2] += c; state[3] += d; state[4] += e; }"} {"target": 1, "idx": 5834, "func": "static int get_int64(QEMUFile *f, void *pv, size_t size) { int64_t *v = pv; qemu_get_sbe64s(f, v); return 0; }"} {"target": 1, "idx": 5837, "func": "static void acpi_build_update(void *build_opaque, uint32_t offset) { AcpiBuildState *build_state = build_opaque; AcpiBuildTables tables; /* No state to update or already patched? Nothing to do. */ if (!build_state || build_state->patched) { return; } build_state->patched = 1; acpi_build_tables_init(&tables); acpi_build(build_state->guest_info, &tables); assert(acpi_data_len(tables.table_data) == build_state->table_size); memcpy(build_state->table_ram, tables.table_data->data, build_state->table_size); acpi_build_tables_cleanup(&tables, true); }"} {"target": 1, "idx": 5845, "func": "static av_cold int amr_nb_encode_init(AVCodecContext *avctx) { AMRContext *s = avctx->priv_data; if (avctx->sample_rate != 8000) { av_log(avctx, AV_LOG_ERROR, \"Only 8000Hz sample rate supported\\n\"); return AVERROR(ENOSYS); } if (avctx->channels != 1) { av_log(avctx, AV_LOG_ERROR, \"Only mono supported\\n\"); return AVERROR(ENOSYS); } avctx->frame_size = 160; avctx->coded_frame = avcodec_alloc_frame(); s->enc_state = Encoder_Interface_init(s->enc_dtx); if (!s->enc_state) { av_log(avctx, AV_LOG_ERROR, \"Encoder_Interface_init error\\n\"); return -1; } s->enc_mode = get_bitrate_mode(avctx->bit_rate, avctx); s->enc_bitrate = avctx->bit_rate; return 0; }"} {"target": 1, "idx": 5846, "func": "void help(void) { printf(\"dct-test [-i] []\\n\" \"test-number 0 -> test with random matrixes\\n\" \" 1 -> test with random sparse matrixes\\n\" \" 2 -> do 3. test from mpeg4 std\\n\" \"-i test IDCT implementations\\n\" \"-4 test IDCT248 implementations\\n\"); exit(1); }"} {"target": 1, "idx": 5855, "func": "PPC_OP(slw) { if (T1 & 0x20) { T0 = 0; } else { T0 = T0 << T1; } RETURN(); }"} {"target": 1, "idx": 5864, "func": "static int get_num(ByteIOContext *pb, int *len) { int n, n1; n = get_be16(pb); (*len)-=2; // n &= 0x7FFF; if (n >= 0x4000) { return n - 0x4000; } else { n1 = get_be16(pb); (*len)-=2; return (n << 16) | n1; } }"} {"target": 0, "idx": 5875, "func": "CPUState *ppc440ep_init(MemoryRegion *address_space_mem, ram_addr_t *ram_size, PCIBus **pcip, const unsigned int pci_irq_nrs[4], int do_init, const char *cpu_model) { MemoryRegion *ram_memories = g_malloc(PPC440EP_SDRAM_NR_BANKS * sizeof(*ram_memories)); target_phys_addr_t ram_bases[PPC440EP_SDRAM_NR_BANKS]; target_phys_addr_t ram_sizes[PPC440EP_SDRAM_NR_BANKS]; CPUState *env; qemu_irq *pic; qemu_irq *irqs; qemu_irq *pci_irqs; if (cpu_model == NULL) { cpu_model = \"440-Xilinx\"; // XXX: should be 440EP } env = cpu_init(cpu_model); if (!env) { fprintf(stderr, \"Unable to initialize CPU!\\n\"); exit(1); } ppc_booke_timers_init(env, 400000000, 0); ppc_dcr_init(env, NULL, NULL); /* interrupt controller */ irqs = g_malloc0(sizeof(qemu_irq) * PPCUIC_OUTPUT_NB); irqs[PPCUIC_OUTPUT_INT] = ((qemu_irq *)env->irq_inputs)[PPC40x_INPUT_INT]; irqs[PPCUIC_OUTPUT_CINT] = ((qemu_irq *)env->irq_inputs)[PPC40x_INPUT_CINT]; pic = ppcuic_init(env, irqs, 0x0C0, 0, 1); /* SDRAM controller */ memset(ram_bases, 0, sizeof(ram_bases)); memset(ram_sizes, 0, sizeof(ram_sizes)); *ram_size = ppc4xx_sdram_adjust(*ram_size, PPC440EP_SDRAM_NR_BANKS, ram_memories, ram_bases, ram_sizes, ppc440ep_sdram_bank_sizes); /* XXX 440EP's ECC interrupts are on UIC1, but we've only created UIC0. */ ppc4xx_sdram_init(env, pic[14], PPC440EP_SDRAM_NR_BANKS, ram_memories, ram_bases, ram_sizes, do_init); /* PCI */ pci_irqs = g_malloc(sizeof(qemu_irq) * 4); pci_irqs[0] = pic[pci_irq_nrs[0]]; pci_irqs[1] = pic[pci_irq_nrs[1]]; pci_irqs[2] = pic[pci_irq_nrs[2]]; pci_irqs[3] = pic[pci_irq_nrs[3]]; *pcip = ppc4xx_pci_init(env, pci_irqs, PPC440EP_PCI_CONFIG, PPC440EP_PCI_INTACK, PPC440EP_PCI_SPECIAL, PPC440EP_PCI_REGS); if (!*pcip) printf(\"couldn't create PCI controller!\\n\"); isa_mmio_init(PPC440EP_PCI_IO, PPC440EP_PCI_IOLEN); if (serial_hds[0] != NULL) { serial_mm_init(address_space_mem, 0xef600300, 0, pic[0], PPC_SERIAL_MM_BAUDBASE, serial_hds[0], DEVICE_BIG_ENDIAN); } if (serial_hds[1] != NULL) { serial_mm_init(address_space_mem, 0xef600400, 0, pic[1], PPC_SERIAL_MM_BAUDBASE, serial_hds[1], DEVICE_BIG_ENDIAN); } return env; }"} {"target": 0, "idx": 5899, "func": "static int piix4_initfn(PCIDevice *dev) { PIIX4State *d = DO_UPCAST(PIIX4State, dev, dev); uint8_t *pci_conf; isa_bus_new(&d->dev.qdev); pci_conf = d->dev.config; pci_config_set_vendor_id(pci_conf, PCI_VENDOR_ID_INTEL); pci_config_set_device_id(pci_conf, PCI_DEVICE_ID_INTEL_82371AB_0); // 82371AB/EB/MB PIIX4 PCI-to-ISA bridge pci_config_set_class(pci_conf, PCI_CLASS_BRIDGE_ISA); piix4_dev = &d->dev; qemu_register_reset(piix4_reset, d); return 0; }"} {"target": 1, "idx": 5905, "func": "static void FUNCC(ff_h264_add_pixels8)(uint8_t *_dst, int16_t *_src, int stride) { int i; pixel *dst = (pixel *) _dst; dctcoef *src = (dctcoef *) _src; stride /= sizeof(pixel); for (i = 0; i < 8; i++) { dst[0] += src[0]; dst[1] += src[1]; dst[2] += src[2]; dst[3] += src[3]; dst[4] += src[4]; dst[5] += src[5]; dst[6] += src[6]; dst[7] += src[7]; dst += stride; src += 8; } memset(_src, 0, sizeof(dctcoef) * 64); }"} {"target": 1, "idx": 5908, "func": "static int output_frame(H264Context *h, AVFrame *dst, AVFrame *src) { int i; int ret = av_frame_ref(dst, src); if (ret < 0) return ret; if (!h->sps.crop) return 0; for (i = 0; i < 3; i++) { int hshift = (i > 0) ? h->chroma_x_shift : 0; int vshift = (i > 0) ? h->chroma_y_shift : 0; int off = ((h->sps.crop_left >> hshift) << h->pixel_shift) + (h->sps.crop_top >> vshift) * dst->linesize[i]; dst->data[i] += off; } return 0; }"} {"target": 1, "idx": 5920, "func": "void throttle_account(ThrottleState *ts, bool is_write, uint64_t size) { double units = 1.0; /* if cfg.op_size is defined and smaller than size we compute unit count */ if (ts->cfg.op_size && size > ts->cfg.op_size) { units = (double) size / ts->cfg.op_size; } ts->cfg.buckets[THROTTLE_BPS_TOTAL].level += size; ts->cfg.buckets[THROTTLE_OPS_TOTAL].level += units; if (is_write) { ts->cfg.buckets[THROTTLE_BPS_WRITE].level += size; ts->cfg.buckets[THROTTLE_OPS_WRITE].level += units; } else { ts->cfg.buckets[THROTTLE_BPS_READ].level += size; ts->cfg.buckets[THROTTLE_OPS_READ].level += units; } }"} {"target": 1, "idx": 5931, "func": "void bdrv_remove_aio_context_notifier(BlockDriverState *bs, void (*attached_aio_context)(AioContext *, void *), void (*detach_aio_context)(void *), void *opaque) { BdrvAioNotifier *ban, *ban_next; QLIST_FOREACH_SAFE(ban, &bs->aio_notifiers, list, ban_next) { if (ban->attached_aio_context == attached_aio_context && ban->detach_aio_context == detach_aio_context && ban->opaque == opaque) { QLIST_REMOVE(ban, list); g_free(ban); return; } } abort(); }"} {"target": 1, "idx": 5962, "func": "static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow) { TCGv_i64 tmp; TCGv tmp2; /* Load value and extend to 64 bits. */ tmp = tcg_temp_new_i64(); tmp2 = load_reg(s, rlow); tcg_gen_extu_i32_i64(tmp, tmp2); dead_tmp(tmp2); tcg_gen_add_i64(val, val, tmp); tcg_temp_free_i64(tmp); }"} {"target": 1, "idx": 5963, "func": "static void test_bmdma_simple_rw(void) { QPCIDevice *dev; void *bmdma_base, *ide_base; uint8_t status; uint8_t *buf; uint8_t *cmpbuf; size_t len = 512; uintptr_t guest_buf = guest_alloc(guest_malloc, len); PrdtEntry prdt[] = { { .addr = cpu_to_le32(guest_buf), .size = cpu_to_le32(len | PRDT_EOT), }, }; dev = get_pci_device(&bmdma_base, &ide_base); buf = g_malloc(len); cmpbuf = g_malloc(len); /* Write 0x55 pattern to sector 0 */ memset(buf, 0x55, len); memwrite(guest_buf, buf, len); status = send_dma_request(CMD_WRITE_DMA, 0, 1, prdt, ARRAY_SIZE(prdt), NULL); g_assert_cmphex(status, ==, BM_STS_INTR); assert_bit_clear(qpci_io_readb(dev, ide_base + reg_status), DF | ERR); /* Write 0xaa pattern to sector 1 */ memset(buf, 0xaa, len); memwrite(guest_buf, buf, len); status = send_dma_request(CMD_WRITE_DMA, 1, 1, prdt, ARRAY_SIZE(prdt), NULL); g_assert_cmphex(status, ==, BM_STS_INTR); assert_bit_clear(qpci_io_readb(dev, ide_base + reg_status), DF | ERR); /* Read and verify 0x55 pattern in sector 0 */ memset(cmpbuf, 0x55, len); status = send_dma_request(CMD_READ_DMA, 0, 1, prdt, ARRAY_SIZE(prdt), NULL); g_assert_cmphex(status, ==, BM_STS_INTR); assert_bit_clear(qpci_io_readb(dev, ide_base + reg_status), DF | ERR); memread(guest_buf, buf, len); g_assert(memcmp(buf, cmpbuf, len) == 0); /* Read and verify 0xaa pattern in sector 1 */ memset(cmpbuf, 0xaa, len); status = send_dma_request(CMD_READ_DMA, 1, 1, prdt, ARRAY_SIZE(prdt), NULL); g_assert_cmphex(status, ==, BM_STS_INTR); assert_bit_clear(qpci_io_readb(dev, ide_base + reg_status), DF | ERR); memread(guest_buf, buf, len); g_assert(memcmp(buf, cmpbuf, len) == 0); g_free(buf); g_free(cmpbuf); }"} {"target": 1, "idx": 5965, "func": "static int idcin_read_packet(AVFormatContext *s, AVPacket *pkt) { int ret; unsigned int command; unsigned int chunk_size; IdcinDemuxContext *idcin = s->priv_data; AVIOContext *pb = s->pb; int i; int palette_scale; unsigned char r, g, b; unsigned char palette_buffer[768]; uint32_t palette[256]; if (s->pb->eof_reached) return AVERROR(EIO); if (idcin->next_chunk_is_video) { command = avio_rl32(pb); if (command == 2) { return AVERROR(EIO); } else if (command == 1) { /* trigger a palette change */ if (avio_read(pb, palette_buffer, 768) != 768) return AVERROR(EIO); /* scale the palette as necessary */ palette_scale = 2; for (i = 0; i < 768; i++) if (palette_buffer[i] > 63) { palette_scale = 0; break; for (i = 0; i < 256; i++) { r = palette_buffer[i * 3 ] << palette_scale; g = palette_buffer[i * 3 + 1] << palette_scale; b = palette_buffer[i * 3 + 2] << palette_scale; palette[i] = (r << 16) | (g << 8) | (b); chunk_size = avio_rl32(pb); /* skip the number of decoded bytes (always equal to width * height) */ avio_skip(pb, 4); chunk_size -= 4; ret= av_get_packet(pb, pkt, chunk_size); if (ret < 0) return ret; if (command == 1) { uint8_t *pal; pal = av_packet_new_side_data(pkt, AV_PKT_DATA_PALETTE, AVPALETTE_SIZE); if (ret < 0) return ret; memcpy(pal, palette, AVPALETTE_SIZE); pkt->flags |= AV_PKT_FLAG_KEY; pkt->stream_index = idcin->video_stream_index; pkt->duration = 1; } else { /* send out the audio chunk */ if (idcin->current_audio_chunk) chunk_size = idcin->audio_chunk_size2; else chunk_size = idcin->audio_chunk_size1; ret= av_get_packet(pb, pkt, chunk_size); if (ret < 0) return ret; pkt->stream_index = idcin->audio_stream_index; pkt->duration = chunk_size / idcin->block_align; idcin->current_audio_chunk ^= 1; if (idcin->audio_present) idcin->next_chunk_is_video ^= 1; return ret;"} {"target": 1, "idx": 5981, "func": "static inline void RENAME(yuv2rgb555_2)(SwsContext *c, const uint16_t *buf0, const uint16_t *buf1, const uint16_t *ubuf0, const uint16_t *ubuf1, const uint16_t *vbuf0, const uint16_t *vbuf1, const uint16_t *abuf0, const uint16_t *abuf1, uint8_t *dest, int dstW, int yalpha, int uvalpha, int y) { x86_reg uv_off = c->uv_off << 1; //Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :( __asm__ volatile( \"mov %%\"REG_b\", \"ESP_OFFSET\"(%5) \\n\\t\" \"mov %4, %%\"REG_b\" \\n\\t\" \"push %%\"REG_BP\" \\n\\t\" YSCALEYUV2RGB(%%REGBP, %5, %6) \"pxor %%mm7, %%mm7 \\n\\t\" /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */ #ifdef DITHER1XBPP \"paddusb \"BLUE_DITHER\"(%5), %%mm2 \\n\\t\" \"paddusb \"GREEN_DITHER\"(%5), %%mm4 \\n\\t\" \"paddusb \"RED_DITHER\"(%5), %%mm5 \\n\\t\" #endif WRITERGB15(%%REGb, 8280(%5), %%REGBP) \"pop %%\"REG_BP\" \\n\\t\" \"mov \"ESP_OFFSET\"(%5), %%\"REG_b\" \\n\\t\" :: \"c\" (buf0), \"d\" (buf1), \"S\" (ubuf0), \"D\" (ubuf1), \"m\" (dest), \"a\" (&c->redDither), \"m\"(uv_off) ); }"} {"target": 0, "idx": 6007, "func": "static VirtIOBlockReq *virtio_blk_get_request(VirtIOBlock *s) { VirtIOBlockReq *req = virtio_blk_alloc_request(s); if (!virtqueue_pop(s->vq, &req->elem)) { virtio_blk_free_request(req); return NULL; } return req; }"} {"target": 0, "idx": 6008, "func": "static int output_packet(InputStream *ist, const AVPacket *pkt) { int ret = 0, i; int got_output; AVPacket avpkt; if (!ist->saw_first_ts) { ist->dts = ist->st->avg_frame_rate.num ? - ist->st->codec->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0; ist->pts = 0; if (pkt != NULL && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) { ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q); ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong } ist->saw_first_ts = 1; } if (ist->next_dts == AV_NOPTS_VALUE) ist->next_dts = ist->dts; if (ist->next_pts == AV_NOPTS_VALUE) ist->next_pts = ist->pts; if (pkt == NULL) { /* EOF handling */ av_init_packet(&avpkt); avpkt.data = NULL; avpkt.size = 0; goto handle_eof; } else { avpkt = *pkt; } if (pkt->dts != AV_NOPTS_VALUE) { ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q); if (ist->st->codec->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed) ist->next_pts = ist->pts = ist->dts; } // while we have more to decode or while the decoder did output something on EOF while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) { int duration; handle_eof: ist->pts = ist->next_pts; ist->dts = ist->next_dts; if (avpkt.size && avpkt.size != pkt->size) { av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING, \"Multiple frames in a packet from stream %d\\n\", pkt->stream_index); ist->showed_multi_packet_warning = 1; } switch (ist->st->codec->codec_type) { case AVMEDIA_TYPE_AUDIO: ret = decode_audio (ist, &avpkt, &got_output); break; case AVMEDIA_TYPE_VIDEO: ret = decode_video (ist, &avpkt, &got_output); if (avpkt.duration) { duration = av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q); } else if(ist->st->codec->time_base.num != 0 && ist->st->codec->time_base.den != 0) { int ticks= ist->st->parser ? ist->st->parser->repeat_pict+1 : ist->st->codec->ticks_per_frame; duration = ((int64_t)AV_TIME_BASE * ist->st->codec->time_base.num * ticks) / ist->st->codec->time_base.den; } else duration = 0; if(ist->dts != AV_NOPTS_VALUE && duration) { ist->next_dts += duration; }else ist->next_dts = AV_NOPTS_VALUE; if (got_output) ist->next_pts += duration; //FIXME the duration is not correct in some cases break; case AVMEDIA_TYPE_SUBTITLE: ret = transcode_subtitles(ist, &avpkt, &got_output); break; default: return -1; } if (ret < 0) return ret; avpkt.dts= avpkt.pts= AV_NOPTS_VALUE; // touch data and size only if not EOF if (pkt) { if(ist->st->codec->codec_type != AVMEDIA_TYPE_AUDIO) ret = avpkt.size; avpkt.data += ret; avpkt.size -= ret; } if (!got_output) { continue; } } /* handle stream copy */ if (!ist->decoding_needed) { rate_emu_sleep(ist); ist->dts = ist->next_dts; switch (ist->st->codec->codec_type) { case AVMEDIA_TYPE_AUDIO: ist->next_dts += ((int64_t)AV_TIME_BASE * ist->st->codec->frame_size) / ist->st->codec->sample_rate; break; case AVMEDIA_TYPE_VIDEO: if (pkt->duration) { ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q); } else if(ist->st->codec->time_base.num != 0) { int ticks= ist->st->parser ? ist->st->parser->repeat_pict + 1 : ist->st->codec->ticks_per_frame; ist->next_dts += ((int64_t)AV_TIME_BASE * ist->st->codec->time_base.num * ticks) / ist->st->codec->time_base.den; } break; } ist->pts = ist->dts; ist->next_pts = ist->next_dts; } for (i = 0; pkt && i < nb_output_streams; i++) { OutputStream *ost = output_streams[i]; if (!check_output_constraints(ist, ost) || ost->encoding_needed) continue; do_streamcopy(ist, ost, pkt); } return 0; }"} {"target": 1, "idx": 6025, "func": "void qemu_system_reset(void) { QEMUResetEntry *re, *nre; /* reset all devices */ QTAILQ_FOREACH_SAFE(re, &reset_handlers, entry, nre) { re->func(re->opaque); } }"} {"target": 0, "idx": 6032, "func": "static void sd_reset(SDState *sd, BlockDriverState *bdrv) { uint64_t size; uint64_t sect; if (bdrv) { bdrv_get_geometry(bdrv, §); } else { sect = 0; } size = sect << 9; sect = (size >> (HWBLOCK_SHIFT + SECTOR_SHIFT + WPGROUP_SHIFT)) + 1; sd->state = sd_idle_state; sd->rca = 0x0000; sd_set_ocr(sd); sd_set_scr(sd); sd_set_cid(sd); sd_set_csd(sd, size); sd_set_cardstatus(sd); sd_set_sdstatus(sd); sd->bdrv = bdrv; if (sd->wp_groups) g_free(sd->wp_groups); sd->wp_switch = bdrv ? bdrv_is_read_only(bdrv) : 0; sd->wp_groups = (int *) g_malloc0(sizeof(int) * sect); memset(sd->function_group, 0, sizeof(int) * 6); sd->erase_start = 0; sd->erase_end = 0; sd->size = size; sd->blk_len = 0x200; sd->pwd_len = 0; sd->expecting_acmd = 0; }"} {"target": 0, "idx": 6033, "func": "static void phys_page_set(AddressSpaceDispatch *d, target_phys_addr_t index, target_phys_addr_t nb, uint16_t leaf) { /* Wildly overreserve - it doesn't matter much. */ phys_map_node_reserve(3 * P_L2_LEVELS); phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1); }"} {"target": 0, "idx": 6043, "func": "static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, const int *const_args) { int c; switch (opc) { case INDEX_op_exit_tb: if (check_fit_tl(args[0], 13)) { tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN); tcg_out_movi_imm13(s, TCG_REG_O0, args[0]); } else { tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I0, args[0] & ~0x3ff); tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN); tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, args[0] & 0x3ff, ARITH_OR); } break; case INDEX_op_goto_tb: if (s->tb_jmp_offset) { /* direct jump method */ uint32_t old_insn = *(uint32_t *)s->code_ptr; s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf; /* Make sure to preserve links during retranslation. */ tcg_out32(s, CALL | (old_insn & ~INSN_OP(-1))); } else { /* indirect jump method */ tcg_out_ld_ptr(s, TCG_REG_T1, (uintptr_t)(s->tb_next + args[0])); tcg_out_arithi(s, TCG_REG_G0, TCG_REG_T1, 0, JMPL); } tcg_out_nop(s); s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf; break; case INDEX_op_call: if (const_args[0]) { tcg_out_calli(s, args[0]); } else { tcg_out_arithi(s, TCG_REG_O7, args[0], 0, JMPL); } /* delay slot */ tcg_out_nop(s); break; case INDEX_op_br: tcg_out_bpcc(s, COND_A, BPCC_PT, args[0]); tcg_out_nop(s); break; case INDEX_op_movi_i32: tcg_out_movi(s, TCG_TYPE_I32, args[0], (uint32_t)args[1]); break; #define OP_32_64(x) \\ glue(glue(case INDEX_op_, x), _i32): \\ glue(glue(case INDEX_op_, x), _i64) OP_32_64(ld8u): tcg_out_ldst(s, args[0], args[1], args[2], LDUB); break; OP_32_64(ld8s): tcg_out_ldst(s, args[0], args[1], args[2], LDSB); break; OP_32_64(ld16u): tcg_out_ldst(s, args[0], args[1], args[2], LDUH); break; OP_32_64(ld16s): tcg_out_ldst(s, args[0], args[1], args[2], LDSH); break; case INDEX_op_ld_i32: case INDEX_op_ld32u_i64: tcg_out_ldst(s, args[0], args[1], args[2], LDUW); break; OP_32_64(st8): tcg_out_ldst(s, args[0], args[1], args[2], STB); break; OP_32_64(st16): tcg_out_ldst(s, args[0], args[1], args[2], STH); break; case INDEX_op_st_i32: case INDEX_op_st32_i64: tcg_out_ldst(s, args[0], args[1], args[2], STW); break; OP_32_64(add): c = ARITH_ADD; goto gen_arith; OP_32_64(sub): c = ARITH_SUB; goto gen_arith; OP_32_64(and): c = ARITH_AND; goto gen_arith; OP_32_64(andc): c = ARITH_ANDN; goto gen_arith; OP_32_64(or): c = ARITH_OR; goto gen_arith; OP_32_64(orc): c = ARITH_ORN; goto gen_arith; OP_32_64(xor): c = ARITH_XOR; goto gen_arith; case INDEX_op_shl_i32: c = SHIFT_SLL; do_shift32: /* Limit immediate shift count lest we create an illegal insn. */ tcg_out_arithc(s, args[0], args[1], args[2] & 31, const_args[2], c); break; case INDEX_op_shr_i32: c = SHIFT_SRL; goto do_shift32; case INDEX_op_sar_i32: c = SHIFT_SRA; goto do_shift32; case INDEX_op_mul_i32: c = ARITH_UMUL; goto gen_arith; OP_32_64(neg): c = ARITH_SUB; goto gen_arith1; OP_32_64(not): c = ARITH_ORN; goto gen_arith1; case INDEX_op_div_i32: tcg_out_div32(s, args[0], args[1], args[2], const_args[2], 0); break; case INDEX_op_divu_i32: tcg_out_div32(s, args[0], args[1], args[2], const_args[2], 1); break; case INDEX_op_brcond_i32: tcg_out_brcond_i32(s, args[2], args[0], args[1], const_args[1], args[3]); break; case INDEX_op_setcond_i32: tcg_out_setcond_i32(s, args[3], args[0], args[1], args[2], const_args[2]); break; case INDEX_op_movcond_i32: tcg_out_movcond_i32(s, args[5], args[0], args[1], args[2], const_args[2], args[3], const_args[3]); break; case INDEX_op_add2_i32: tcg_out_addsub2(s, args[0], args[1], args[2], args[3], args[4], const_args[4], args[5], const_args[5], ARITH_ADDCC, ARITH_ADDX); break; case INDEX_op_sub2_i32: tcg_out_addsub2(s, args[0], args[1], args[2], args[3], args[4], const_args[4], args[5], const_args[5], ARITH_SUBCC, ARITH_SUBX); break; case INDEX_op_mulu2_i32: c = ARITH_UMUL; goto do_mul2; case INDEX_op_muls2_i32: c = ARITH_SMUL; do_mul2: /* The 32-bit multiply insns produce a full 64-bit result. If the destination register can hold it, we can avoid the slower RDY. */ tcg_out_arithc(s, args[0], args[2], args[3], const_args[3], c); if (SPARC64 || args[0] <= TCG_REG_O7) { tcg_out_arithi(s, args[1], args[0], 32, SHIFT_SRLX); } else { tcg_out_rdy(s, args[1]); } break; case INDEX_op_qemu_ld_i32: tcg_out_qemu_ld(s, args[0], args[1], args[2], args[3], false); break; case INDEX_op_qemu_ld_i64: tcg_out_qemu_ld(s, args[0], args[1], args[2], args[3], true); break; case INDEX_op_qemu_st_i32: tcg_out_qemu_st(s, args[0], args[1], args[2], args[3]); break; case INDEX_op_qemu_st_i64: tcg_out_qemu_st(s, args[0], args[1], args[2], args[3]); break; case INDEX_op_movi_i64: tcg_out_movi(s, TCG_TYPE_I64, args[0], args[1]); break; case INDEX_op_ld32s_i64: tcg_out_ldst(s, args[0], args[1], args[2], LDSW); break; case INDEX_op_ld_i64: tcg_out_ldst(s, args[0], args[1], args[2], LDX); break; case INDEX_op_st_i64: tcg_out_ldst(s, args[0], args[1], args[2], STX); break; case INDEX_op_shl_i64: c = SHIFT_SLLX; do_shift64: /* Limit immediate shift count lest we create an illegal insn. */ tcg_out_arithc(s, args[0], args[1], args[2] & 63, const_args[2], c); break; case INDEX_op_shr_i64: c = SHIFT_SRLX; goto do_shift64; case INDEX_op_sar_i64: c = SHIFT_SRAX; goto do_shift64; case INDEX_op_mul_i64: c = ARITH_MULX; goto gen_arith; case INDEX_op_div_i64: c = ARITH_SDIVX; goto gen_arith; case INDEX_op_divu_i64: c = ARITH_UDIVX; goto gen_arith; case INDEX_op_ext32s_i64: tcg_out_arithi(s, args[0], args[1], 0, SHIFT_SRA); break; case INDEX_op_ext32u_i64: tcg_out_arithi(s, args[0], args[1], 0, SHIFT_SRL); break; case INDEX_op_trunc_shr_i32: if (args[2] == 0) { tcg_out_mov(s, TCG_TYPE_I32, args[0], args[1]); } else { tcg_out_arithi(s, args[0], args[1], args[2], SHIFT_SRLX); } break; case INDEX_op_brcond_i64: tcg_out_brcond_i64(s, args[2], args[0], args[1], const_args[1], args[3]); break; case INDEX_op_setcond_i64: tcg_out_setcond_i64(s, args[3], args[0], args[1], args[2], const_args[2]); break; case INDEX_op_movcond_i64: tcg_out_movcond_i64(s, args[5], args[0], args[1], args[2], const_args[2], args[3], const_args[3]); break; gen_arith: tcg_out_arithc(s, args[0], args[1], args[2], const_args[2], c); break; gen_arith1: tcg_out_arithc(s, args[0], TCG_REG_G0, args[1], const_args[1], c); break; default: fprintf(stderr, \"unknown opcode 0x%x\\n\", opc); tcg_abort(); } }"} {"target": 0, "idx": 6063, "func": "static int eightsvx_decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr, AVPacket *avpkt) { EightSvxContext *esc = avctx->priv_data; int n, out_data_size; int ch, ret; uint8_t *src; /* decode and interleave the first packet */ if (!esc->samples && avpkt) { int packet_size = avpkt->size; if (packet_size % avctx->channels) { av_log(avctx, AV_LOG_WARNING, \"Packet with odd size, ignoring last byte\\n\"); if (packet_size < avctx->channels) return packet_size; packet_size -= packet_size % avctx->channels; } esc->samples_size = !esc->table ? packet_size : avctx->channels + (packet_size-avctx->channels) * 2; if (!(esc->samples = av_malloc(esc->samples_size))) return AVERROR(ENOMEM); /* decompress */ if (esc->table) { const uint8_t *buf = avpkt->data; uint8_t *dst; int buf_size = avpkt->size; int i, n = esc->samples_size; if (buf_size < 2) { av_log(avctx, AV_LOG_ERROR, \"packet size is too small\\n\"); return AVERROR(EINVAL); } /* the uncompressed starting value is contained in the first byte */ dst = esc->samples; for (i = 0; i < avctx->channels; i++) { *(dst++) = buf[0]+128; delta_decode(dst, buf + 1, buf_size / avctx->channels - 1, (buf[0]+128)&0xFF, esc->table); buf += buf_size / avctx->channels; dst += n / avctx->channels - 1; } } else { raw_decode(esc->samples, avpkt->data, esc->samples_size); } } /* get output buffer */ av_assert1(!(esc->samples_size % avctx->channels || esc->samples_idx % avctx->channels)); esc->frame.nb_samples = FFMIN(MAX_FRAME_SIZE, esc->samples_size - esc->samples_idx) / avctx->channels; if ((ret = avctx->get_buffer(avctx, &esc->frame)) < 0) { av_log(avctx, AV_LOG_ERROR, \"get_buffer() failed\\n\"); return ret; } *got_frame_ptr = 1; *(AVFrame *)data = esc->frame; out_data_size = esc->frame.nb_samples; for (ch = 0; chchannels; ch++) { src = esc->samples + esc->samples_idx / avctx->channels + ch * esc->samples_size / avctx->channels; memcpy(esc->frame.data[ch], src, out_data_size); } out_data_size *= avctx->channels; esc->samples_idx += out_data_size; return esc->table ? (avctx->frame_number == 0)*2 + out_data_size / 2 : out_data_size; }"} {"target": 0, "idx": 6069, "func": "static int virtio_serial_load(QEMUFile *f, void *opaque, int version_id) { VirtIOSerial *s = opaque; VirtIOSerialPort *port; uint32_t max_nr_ports, nr_active_ports, ports_map; unsigned int i; int ret; if (version_id > 3) { return -EINVAL; } /* The virtio device */ ret = virtio_load(&s->vdev, f); if (ret) { return ret; } if (version_id < 2) { return 0; } /* The config space */ qemu_get_be16s(f, &s->config.cols); qemu_get_be16s(f, &s->config.rows); qemu_get_be32s(f, &max_nr_ports); tswap32s(&max_nr_ports); if (max_nr_ports > tswap32(s->config.max_nr_ports)) { /* Source could have had more ports than us. Fail migration. */ return -EINVAL; } for (i = 0; i < (max_nr_ports + 31) / 32; i++) { qemu_get_be32s(f, &ports_map); if (ports_map != s->ports_map[i]) { /* * Ports active on source and destination don't * match. Fail migration. */ return -EINVAL; } } qemu_get_be32s(f, &nr_active_ports); /* Items in struct VirtIOSerialPort */ for (i = 0; i < nr_active_ports; i++) { uint32_t id; bool host_connected; id = qemu_get_be32(f); port = find_port_by_id(s, id); if (!port) { return -EINVAL; } port->guest_connected = qemu_get_byte(f); host_connected = qemu_get_byte(f); if (host_connected != port->host_connected) { /* * We have to let the guest know of the host connection * status change */ send_control_event(port, VIRTIO_CONSOLE_PORT_OPEN, port->host_connected); } if (version_id > 2) { uint32_t elem_popped; qemu_get_be32s(f, &elem_popped); if (elem_popped) { qemu_get_be32s(f, &port->iov_idx); qemu_get_be64s(f, &port->iov_offset); qemu_get_buffer(f, (unsigned char *)&port->elem, sizeof(port->elem)); virtqueue_map_sg(port->elem.in_sg, port->elem.in_addr, port->elem.in_num, 1); virtqueue_map_sg(port->elem.out_sg, port->elem.out_addr, port->elem.out_num, 1); /* * Port was throttled on source machine. Let's * unthrottle it here so data starts flowing again. */ virtio_serial_throttle_port(port, false); } } } return 0; }"} {"target": 0, "idx": 6092, "func": "static CharDriverState *qemu_chr_open_spice_port(const char *id, ChardevBackend *backend, ChardevReturn *ret, Error **errp) { const char *name = backend->spiceport->fqdn; CharDriverState *chr; SpiceCharDriver *s; if (name == NULL) { fprintf(stderr, \"spice-qemu-char: missing name parameter\\n\"); return NULL; } chr = chr_open(\"port\", spice_port_set_fe_open); s = chr->opaque; s->sin.portname = g_strdup(name); return chr; }"} {"target": 0, "idx": 6098, "func": "void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint) { #if defined(TARGET_HAS_ICE) QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry); breakpoint_invalidate(cpu, breakpoint->pc); g_free(breakpoint); #endif }"} {"target": 1, "idx": 6103, "func": "static int flv_write_trailer(AVFormatContext *s) { int64_t file_size; AVIOContext *pb = s->pb; FLVContext *flv = s->priv_data; int i; /* Add EOS tag */ for (i = 0; i < s->nb_streams; i++) { AVCodecContext *enc = s->streams[i]->codec; FLVStreamContext *sc = s->streams[i]->priv_data; if (enc->codec_type == AVMEDIA_TYPE_VIDEO && enc->codec_id == AV_CODEC_ID_H264) put_avc_eos_tag(pb, sc->last_ts); } file_size = avio_tell(pb); /* update information */ avio_seek(pb, flv->duration_offset, SEEK_SET); put_amf_double(pb, flv->duration / (double)1000); avio_seek(pb, flv->filesize_offset, SEEK_SET); put_amf_double(pb, file_size); avio_seek(pb, file_size, SEEK_SET); return 0; }"} {"target": 1, "idx": 6108, "func": "static const void *boston_fdt_filter(void *opaque, const void *fdt_orig, const void *match_data, hwaddr *load_addr) { BostonState *s = BOSTON(opaque); MachineState *machine = s->mach; const char *cmdline; int err; void *fdt; size_t fdt_sz, ram_low_sz, ram_high_sz; fdt_sz = fdt_totalsize(fdt_orig) * 2; fdt = g_malloc0(fdt_sz); err = fdt_open_into(fdt_orig, fdt, fdt_sz); if (err) { fprintf(stderr, \"unable to open FDT\\n\"); return NULL; } cmdline = (machine->kernel_cmdline && machine->kernel_cmdline[0]) ? machine->kernel_cmdline : \" \"; err = qemu_fdt_setprop_string(fdt, \"/chosen\", \"bootargs\", cmdline); if (err < 0) { fprintf(stderr, \"couldn't set /chosen/bootargs\\n\"); return NULL; } ram_low_sz = MIN(256 * M_BYTE, machine->ram_size); ram_high_sz = machine->ram_size - ram_low_sz; qemu_fdt_setprop_sized_cells(fdt, \"/memory@0\", \"reg\", 1, 0x00000000, 1, ram_low_sz, 1, 0x90000000, 1, ram_high_sz); fdt = g_realloc(fdt, fdt_totalsize(fdt)); qemu_fdt_dumpdtb(fdt, fdt_sz); s->fdt_base = *load_addr; return fdt; }"} {"target": 1, "idx": 6112, "func": "static int mov_read_cmov(MOVContext *c, AVIOContext *pb, MOVAtom atom) { #if CONFIG_ZLIB AVIOContext ctx; uint8_t *cmov_data; uint8_t *moov_data; /* uncompressed data */ long cmov_len, moov_len; int ret = -1; avio_rb32(pb); /* dcom atom */ if (avio_rl32(pb) != MKTAG('d','c','o','m')) return AVERROR_INVALIDDATA; if (avio_rl32(pb) != MKTAG('z','l','i','b')) { av_log(c->fc, AV_LOG_ERROR, \"unknown compression for cmov atom !\"); return AVERROR_INVALIDDATA; } avio_rb32(pb); /* cmvd atom */ if (avio_rl32(pb) != MKTAG('c','m','v','d')) return AVERROR_INVALIDDATA; moov_len = avio_rb32(pb); /* uncompressed size */ cmov_len = atom.size - 6 * 4; cmov_data = av_malloc(cmov_len); if (!cmov_data) return AVERROR(ENOMEM); moov_data = av_malloc(moov_len); if (!moov_data) { av_free(cmov_data); return AVERROR(ENOMEM); } avio_read(pb, cmov_data, cmov_len); if (uncompress (moov_data, (uLongf *) &moov_len, (const Bytef *)cmov_data, cmov_len) != Z_OK) goto free_and_return; if (ffio_init_context(&ctx, moov_data, moov_len, 0, NULL, NULL, NULL, NULL) != 0) goto free_and_return; atom.type = MKTAG('m','o','o','v'); atom.size = moov_len; ret = mov_read_default(c, &ctx, atom); free_and_return: av_free(moov_data); av_free(cmov_data); return ret; #else av_log(c->fc, AV_LOG_ERROR, \"this file requires zlib support compiled in\\n\"); return AVERROR(ENOSYS); #endif }"} {"target": 1, "idx": 6125, "func": "static int sdp_parse_rtpmap(AVFormatContext *s, AVCodecContext *codec, RTSPStream *rtsp_st, int payload_type, const char *p) { char buf[256]; int i; AVCodec *c; const char *c_name; /* Loop into AVRtpDynamicPayloadTypes[] and AVRtpPayloadTypes[] and * see if we can handle this kind of payload. * The space should normally not be there but some Real streams or * particular servers (\"RealServer Version 6.1.3.970\", see issue 1658) * have a trailing space. */ get_word_sep(buf, sizeof(buf), \"/ \", &p); if (payload_type >= RTP_PT_PRIVATE) { RTPDynamicProtocolHandler *handler; for (handler = RTPFirstDynamicPayloadHandler; handler; handler = handler->next) { if (!strcasecmp(buf, handler->enc_name) && codec->codec_type == handler->codec_type) { codec->codec_id = handler->codec_id; rtsp_st->dynamic_handler = handler; if (handler->open) rtsp_st->dynamic_protocol_context = handler->open(); break; } } } else { /* We are in a standard case * (from http://www.iana.org/assignments/rtp-parameters). */ /* search into AVRtpPayloadTypes[] */ codec->codec_id = ff_rtp_codec_id(buf, codec->codec_type); } c = avcodec_find_decoder(codec->codec_id); if (c && c->name) c_name = c->name; else c_name = (char *) NULL; if (c_name) { get_word_sep(buf, sizeof(buf), \"/\", &p); i = atoi(buf); switch (codec->codec_type) { case CODEC_TYPE_AUDIO: av_log(s, AV_LOG_DEBUG, \"audio codec set to: %s\\n\", c_name); codec->sample_rate = RTSP_DEFAULT_AUDIO_SAMPLERATE; codec->channels = RTSP_DEFAULT_NB_AUDIO_CHANNELS; if (i > 0) { codec->sample_rate = i; get_word_sep(buf, sizeof(buf), \"/\", &p); i = atoi(buf); if (i > 0) codec->channels = i; // TODO: there is a bug here; if it is a mono stream, and // less than 22000Hz, faad upconverts to stereo and twice // the frequency. No problem, but the sample rate is being // set here by the sdp line. Patch on its way. (rdm) } av_log(s, AV_LOG_DEBUG, \"audio samplerate set to: %i\\n\", codec->sample_rate); av_log(s, AV_LOG_DEBUG, \"audio channels set to: %i\\n\", codec->channels); break; case CODEC_TYPE_VIDEO: av_log(s, AV_LOG_DEBUG, \"video codec set to: %s\\n\", c_name); break; default: break; } return 0; } return -1; }"} {"target": 0, "idx": 6140, "func": "void ff_mpv_frame_end(MpegEncContext *s) { #if FF_API_XVMC FF_DISABLE_DEPRECATION_WARNINGS /* redraw edges for the frame if decoding didn't complete */ // just to make sure that all data is rendered. if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) { ff_xvmc_field_end(s); } else FF_ENABLE_DEPRECATION_WARNINGS #endif /* FF_API_XVMC */ emms_c(); if (s->current_picture.reference) ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0); }"} {"target": 1, "idx": 6160, "func": "static int read_ffserver_streams(AVFormatContext *s, const char *filename) { int i, err; AVFormatContext *ic; int nopts = 0; err = av_open_input_file(&ic, filename, NULL, FFM_PACKET_SIZE, NULL); if (err < 0) return err; /* copy stream format */ s->nb_streams = ic->nb_streams; for(i=0;inb_streams;i++) { AVStream *st; AVCodec *codec; // FIXME: a more elegant solution is needed st = av_mallocz(sizeof(AVStream)); memcpy(st, ic->streams[i], sizeof(AVStream)); st->codec = avcodec_alloc_context(); if (!st->codec) { print_error(filename, AVERROR(ENOMEM)); ffmpeg_exit(1); } avcodec_copy_context(st->codec, ic->streams[i]->codec); s->streams[i] = st; codec = avcodec_find_encoder(st->codec->codec_id); if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) { if (audio_stream_copy) { st->stream_copy = 1; } else choose_sample_fmt(st, codec); } else if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) { if (video_stream_copy) { st->stream_copy = 1; } else choose_pixel_fmt(st, codec); } if(!st->codec->thread_count) st->codec->thread_count = 1; if(st->codec->thread_count>1) avcodec_thread_init(st->codec, st->codec->thread_count); if(st->codec->flags & CODEC_FLAG_BITEXACT) nopts = 1; } if (!nopts) s->timestamp = av_gettime(); av_close_input_file(ic); return 0; }"} {"target": 1, "idx": 6165, "func": "static void uninit(AVFilterContext *ctx) { ZScaleContext *s = ctx->priv; zimg_filter_graph_free(s->graph); av_freep(&s->tmp); s->tmp_size = 0; }"} {"target": 0, "idx": 6186, "func": "static void decode_bo_addrmode_post_pre_base(CPUTriCoreState *env, DisasContext *ctx) { uint32_t op2; uint32_t off10; int32_t r1, r2; TCGv temp; r1 = MASK_OP_BO_S1D(ctx->opcode); r2 = MASK_OP_BO_S2(ctx->opcode); off10 = MASK_OP_BO_OFF10_SEXT(ctx->opcode); op2 = MASK_OP_BO_OP2(ctx->opcode); switch (op2) { case OPC2_32_BO_CACHEA_WI_SHORTOFF: case OPC2_32_BO_CACHEA_W_SHORTOFF: case OPC2_32_BO_CACHEA_I_SHORTOFF: /* instruction to access the cache */ break; case OPC2_32_BO_CACHEA_WI_POSTINC: case OPC2_32_BO_CACHEA_W_POSTINC: case OPC2_32_BO_CACHEA_I_POSTINC: /* instruction to access the cache, but we still need to handle the addressing mode */ tcg_gen_addi_tl(cpu_gpr_d[r2], cpu_gpr_d[r2], off10); break; case OPC2_32_BO_CACHEA_WI_PREINC: case OPC2_32_BO_CACHEA_W_PREINC: case OPC2_32_BO_CACHEA_I_PREINC: /* instruction to access the cache, but we still need to handle the addressing mode */ tcg_gen_addi_tl(cpu_gpr_d[r2], cpu_gpr_d[r2], off10); break; case OPC2_32_BO_CACHEI_WI_SHORTOFF: case OPC2_32_BO_CACHEI_W_SHORTOFF: /* TODO: Raise illegal opcode trap, if tricore_feature(TRICORE_FEATURE_13) */ break; case OPC2_32_BO_CACHEI_W_POSTINC: case OPC2_32_BO_CACHEI_WI_POSTINC: if (!tricore_feature(env, TRICORE_FEATURE_13)) { tcg_gen_addi_tl(cpu_gpr_d[r2], cpu_gpr_d[r2], off10); } /* TODO: else raise illegal opcode trap */ break; case OPC2_32_BO_CACHEI_W_PREINC: case OPC2_32_BO_CACHEI_WI_PREINC: if (!tricore_feature(env, TRICORE_FEATURE_13)) { tcg_gen_addi_tl(cpu_gpr_d[r2], cpu_gpr_d[r2], off10); } /* TODO: else raise illegal opcode trap */ break; case OPC2_32_BO_ST_A_SHORTOFF: gen_offset_st(ctx, cpu_gpr_a[r1], cpu_gpr_a[r2], off10, MO_LESL); break; case OPC2_32_BO_ST_A_POSTINC: tcg_gen_qemu_st_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESL); tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); break; case OPC2_32_BO_ST_A_PREINC: gen_st_preincr(ctx, cpu_gpr_a[r1], cpu_gpr_a[r2], off10, MO_LESL); break; case OPC2_32_BO_ST_B_SHORTOFF: gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_UB); break; case OPC2_32_BO_ST_B_POSTINC: tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_UB); tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); break; case OPC2_32_BO_ST_B_PREINC: gen_st_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_UB); break; case OPC2_32_BO_ST_D_SHORTOFF: gen_offset_st_2regs(cpu_gpr_d[r1+1], cpu_gpr_d[r1], cpu_gpr_a[r2], off10, ctx); break; case OPC2_32_BO_ST_D_POSTINC: gen_st_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], cpu_gpr_a[r2], ctx); tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); break; case OPC2_32_BO_ST_D_PREINC: temp = tcg_temp_new(); tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10); gen_st_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], temp, ctx); tcg_gen_mov_tl(cpu_gpr_a[r2], temp); tcg_temp_free(temp); break; case OPC2_32_BO_ST_DA_SHORTOFF: gen_offset_st_2regs(cpu_gpr_a[r1+1], cpu_gpr_a[r1], cpu_gpr_a[r2], off10, ctx); break; case OPC2_32_BO_ST_DA_POSTINC: gen_st_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], cpu_gpr_a[r2], ctx); tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); break; case OPC2_32_BO_ST_DA_PREINC: temp = tcg_temp_new(); tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10); gen_st_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], temp, ctx); tcg_gen_mov_tl(cpu_gpr_a[r2], temp); tcg_temp_free(temp); break; case OPC2_32_BO_ST_H_SHORTOFF: gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUW); break; case OPC2_32_BO_ST_H_POSTINC: tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUW); tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); break; case OPC2_32_BO_ST_H_PREINC: gen_st_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUW); break; case OPC2_32_BO_ST_Q_SHORTOFF: temp = tcg_temp_new(); tcg_gen_shri_tl(temp, cpu_gpr_d[r1], 16); gen_offset_st(ctx, temp, cpu_gpr_a[r2], off10, MO_LEUW); tcg_temp_free(temp); break; case OPC2_32_BO_ST_Q_POSTINC: temp = tcg_temp_new(); tcg_gen_shri_tl(temp, cpu_gpr_d[r1], 16); tcg_gen_qemu_st_tl(temp, cpu_gpr_a[r2], ctx->mem_idx, MO_LEUW); tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); tcg_temp_free(temp); break; case OPC2_32_BO_ST_Q_PREINC: temp = tcg_temp_new(); tcg_gen_shri_tl(temp, cpu_gpr_d[r1], 16); gen_st_preincr(ctx, temp, cpu_gpr_a[r2], off10, MO_LEUW); tcg_temp_free(temp); break; case OPC2_32_BO_ST_W_SHORTOFF: gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUL); break; case OPC2_32_BO_ST_W_POSTINC: tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUL); tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); break; case OPC2_32_BO_ST_W_PREINC: gen_st_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUL); break; } }"} {"target": 0, "idx": 6189, "func": "static int kvm_mips_get_fpu_registers(CPUState *cs) { MIPSCPU *cpu = MIPS_CPU(cs); CPUMIPSState *env = &cpu->env; int err, ret = 0; unsigned int i; /* Only get FPU state if we're emulating a CPU with an FPU */ if (env->CP0_Config1 & (1 << CP0C1_FP)) { /* FPU Control Registers */ err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FCR_IR, &env->active_fpu.fcr0); if (err < 0) { DPRINTF(\"%s: Failed to get FCR_IR (%d)\\n\", __func__, err); ret = err; } err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FCR_CSR, &env->active_fpu.fcr31); if (err < 0) { DPRINTF(\"%s: Failed to get FCR_CSR (%d)\\n\", __func__, err); ret = err; } else { restore_fp_status(env); } /* Floating point registers */ for (i = 0; i < 32; ++i) { if (env->CP0_Status & (1 << CP0St_FR)) { err = kvm_mips_get_one_ureg64(cs, KVM_REG_MIPS_FPR_64(i), &env->active_fpu.fpr[i].d); } else { err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FPR_32(i), &env->active_fpu.fpr[i].w[FP_ENDIAN_IDX]); } if (err < 0) { DPRINTF(\"%s: Failed to get FPR%u (%d)\\n\", __func__, i, err); ret = err; } } } return ret; }"} {"target": 0, "idx": 6193, "func": "static void gen_loongson_integer(DisasContext *ctx, uint32_t opc, int rd, int rs, int rt) { const char *opn = \"loongson\"; TCGv t0, t1; if (rd == 0) { /* Treat as NOP. */ MIPS_DEBUG(\"NOP\"); return; } switch (opc) { case OPC_MULT_G_2E: case OPC_MULT_G_2F: case OPC_MULTU_G_2E: case OPC_MULTU_G_2F: #if defined(TARGET_MIPS64) case OPC_DMULT_G_2E: case OPC_DMULT_G_2F: case OPC_DMULTU_G_2E: case OPC_DMULTU_G_2F: #endif t0 = tcg_temp_new(); t1 = tcg_temp_new(); break; default: t0 = tcg_temp_local_new(); t1 = tcg_temp_local_new(); break; } gen_load_gpr(t0, rs); gen_load_gpr(t1, rt); switch (opc) { case OPC_MULT_G_2E: case OPC_MULT_G_2F: tcg_gen_mul_tl(cpu_gpr[rd], t0, t1); tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]); opn = \"mult.g\"; break; case OPC_MULTU_G_2E: case OPC_MULTU_G_2F: tcg_gen_ext32u_tl(t0, t0); tcg_gen_ext32u_tl(t1, t1); tcg_gen_mul_tl(cpu_gpr[rd], t0, t1); tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]); opn = \"multu.g\"; break; case OPC_DIV_G_2E: case OPC_DIV_G_2F: { int l1 = gen_new_label(); int l2 = gen_new_label(); int l3 = gen_new_label(); tcg_gen_ext32s_tl(t0, t0); tcg_gen_ext32s_tl(t1, t1); tcg_gen_brcondi_tl(TCG_COND_NE, t1, 0, l1); tcg_gen_movi_tl(cpu_gpr[rd], 0); tcg_gen_br(l3); gen_set_label(l1); tcg_gen_brcondi_tl(TCG_COND_NE, t0, INT_MIN, l2); tcg_gen_brcondi_tl(TCG_COND_NE, t1, -1, l2); tcg_gen_mov_tl(cpu_gpr[rd], t0); tcg_gen_br(l3); gen_set_label(l2); tcg_gen_div_tl(cpu_gpr[rd], t0, t1); tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]); gen_set_label(l3); } opn = \"div.g\"; break; case OPC_DIVU_G_2E: case OPC_DIVU_G_2F: { int l1 = gen_new_label(); int l2 = gen_new_label(); tcg_gen_ext32u_tl(t0, t0); tcg_gen_ext32u_tl(t1, t1); tcg_gen_brcondi_tl(TCG_COND_NE, t1, 0, l1); tcg_gen_movi_tl(cpu_gpr[rd], 0); tcg_gen_br(l2); gen_set_label(l1); tcg_gen_divu_tl(cpu_gpr[rd], t0, t1); tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]); gen_set_label(l2); } opn = \"divu.g\"; break; case OPC_MOD_G_2E: case OPC_MOD_G_2F: { int l1 = gen_new_label(); int l2 = gen_new_label(); int l3 = gen_new_label(); tcg_gen_ext32u_tl(t0, t0); tcg_gen_ext32u_tl(t1, t1); tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, l1); tcg_gen_brcondi_tl(TCG_COND_NE, t0, INT_MIN, l2); tcg_gen_brcondi_tl(TCG_COND_NE, t1, -1, l2); gen_set_label(l1); tcg_gen_movi_tl(cpu_gpr[rd], 0); tcg_gen_br(l3); gen_set_label(l2); tcg_gen_rem_tl(cpu_gpr[rd], t0, t1); tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]); gen_set_label(l3); } opn = \"mod.g\"; break; case OPC_MODU_G_2E: case OPC_MODU_G_2F: { int l1 = gen_new_label(); int l2 = gen_new_label(); tcg_gen_ext32u_tl(t0, t0); tcg_gen_ext32u_tl(t1, t1); tcg_gen_brcondi_tl(TCG_COND_NE, t1, 0, l1); tcg_gen_movi_tl(cpu_gpr[rd], 0); tcg_gen_br(l2); gen_set_label(l1); tcg_gen_remu_tl(cpu_gpr[rd], t0, t1); tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]); gen_set_label(l2); } opn = \"modu.g\"; break; #if defined(TARGET_MIPS64) case OPC_DMULT_G_2E: case OPC_DMULT_G_2F: tcg_gen_mul_tl(cpu_gpr[rd], t0, t1); opn = \"dmult.g\"; break; case OPC_DMULTU_G_2E: case OPC_DMULTU_G_2F: tcg_gen_mul_tl(cpu_gpr[rd], t0, t1); opn = \"dmultu.g\"; break; case OPC_DDIV_G_2E: case OPC_DDIV_G_2F: { int l1 = gen_new_label(); int l2 = gen_new_label(); int l3 = gen_new_label(); tcg_gen_brcondi_tl(TCG_COND_NE, t1, 0, l1); tcg_gen_movi_tl(cpu_gpr[rd], 0); tcg_gen_br(l3); gen_set_label(l1); tcg_gen_brcondi_tl(TCG_COND_NE, t0, -1LL << 63, l2); tcg_gen_brcondi_tl(TCG_COND_NE, t1, -1LL, l2); tcg_gen_mov_tl(cpu_gpr[rd], t0); tcg_gen_br(l3); gen_set_label(l2); tcg_gen_div_tl(cpu_gpr[rd], t0, t1); gen_set_label(l3); } opn = \"ddiv.g\"; break; case OPC_DDIVU_G_2E: case OPC_DDIVU_G_2F: { int l1 = gen_new_label(); int l2 = gen_new_label(); tcg_gen_brcondi_tl(TCG_COND_NE, t1, 0, l1); tcg_gen_movi_tl(cpu_gpr[rd], 0); tcg_gen_br(l2); gen_set_label(l1); tcg_gen_divu_tl(cpu_gpr[rd], t0, t1); gen_set_label(l2); } opn = \"ddivu.g\"; break; case OPC_DMOD_G_2E: case OPC_DMOD_G_2F: { int l1 = gen_new_label(); int l2 = gen_new_label(); int l3 = gen_new_label(); tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, l1); tcg_gen_brcondi_tl(TCG_COND_NE, t0, -1LL << 63, l2); tcg_gen_brcondi_tl(TCG_COND_NE, t1, -1LL, l2); gen_set_label(l1); tcg_gen_movi_tl(cpu_gpr[rd], 0); tcg_gen_br(l3); gen_set_label(l2); tcg_gen_rem_tl(cpu_gpr[rd], t0, t1); gen_set_label(l3); } opn = \"dmod.g\"; break; case OPC_DMODU_G_2E: case OPC_DMODU_G_2F: { int l1 = gen_new_label(); int l2 = gen_new_label(); tcg_gen_brcondi_tl(TCG_COND_NE, t1, 0, l1); tcg_gen_movi_tl(cpu_gpr[rd], 0); tcg_gen_br(l2); gen_set_label(l1); tcg_gen_remu_tl(cpu_gpr[rd], t0, t1); gen_set_label(l2); } opn = \"dmodu.g\"; break; #endif } (void)opn; /* avoid a compiler warning */ MIPS_DEBUG(\"%s %s, %s\", opn, regnames[rd], regnames[rs]); tcg_temp_free(t0); tcg_temp_free(t1); }"} {"target": 0, "idx": 6196, "func": "static void vfio_pci_load_rom(VFIOPCIDevice *vdev) { struct vfio_region_info reg_info = { .argsz = sizeof(reg_info), .index = VFIO_PCI_ROM_REGION_INDEX }; uint64_t size; off_t off = 0; size_t bytes; if (ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_REGION_INFO, ®_info)) { error_report(\"vfio: Error getting ROM info: %m\"); return; } trace_vfio_pci_load_rom(vdev->vbasedev.name, (unsigned long)reg_info.size, (unsigned long)reg_info.offset, (unsigned long)reg_info.flags); vdev->rom_size = size = reg_info.size; vdev->rom_offset = reg_info.offset; if (!vdev->rom_size) { vdev->rom_read_failed = true; error_report(\"vfio-pci: Cannot read device rom at \" \"%s\", vdev->vbasedev.name); error_printf(\"Device option ROM contents are probably invalid \" \"(check dmesg).\\nSkip option ROM probe with rombar=0, \" \"or load from file with romfile=\\n\"); return; } vdev->rom = g_malloc(size); memset(vdev->rom, 0xff, size); while (size) { bytes = pread(vdev->vbasedev.fd, vdev->rom + off, size, vdev->rom_offset + off); if (bytes == 0) { break; } else if (bytes > 0) { off += bytes; size -= bytes; } else { if (errno == EINTR || errno == EAGAIN) { continue; } error_report(\"vfio: Error reading device ROM: %m\"); break; } } }"} {"target": 1, "idx": 6206, "func": "static bool try_poll_mode(AioContext *ctx, bool blocking) { if (blocking && ctx->poll_max_ns && ctx->poll_disable_cnt == 0) { /* See qemu_soonest_timeout() uint64_t hack */ int64_t max_ns = MIN((uint64_t)aio_compute_timeout(ctx), (uint64_t)ctx->poll_max_ns); if (max_ns) { if (run_poll_handlers(ctx, max_ns)) { return true; } } } return false; }"} {"target": 1, "idx": 6217, "func": "static void pci_basic(gconstpointer data) { QVirtioPCIDevice *dev; QPCIBus *bus; QVirtQueuePCI *tx, *rx; QGuestAllocator *alloc; void (*func) (const QVirtioBus *bus, QVirtioDevice *dev, QGuestAllocator *alloc, QVirtQueue *rvq, QVirtQueue *tvq, int socket) = data; int sv[2], ret; ret = socketpair(PF_UNIX, SOCK_STREAM, 0, sv); g_assert_cmpint(ret, !=, -1); bus = pci_test_start(sv[1]); dev = virtio_net_pci_init(bus, PCI_SLOT); alloc = pc_alloc_init(); rx = (QVirtQueuePCI *)qvirtqueue_setup(&qvirtio_pci, &dev->vdev, alloc, 0); tx = (QVirtQueuePCI *)qvirtqueue_setup(&qvirtio_pci, &dev->vdev, alloc, 1); driver_init(&qvirtio_pci, &dev->vdev); func(&qvirtio_pci, &dev->vdev, alloc, &rx->vq, &tx->vq, sv[0]); /* End test */ close(sv[0]); guest_free(alloc, tx->vq.desc); pc_alloc_uninit(alloc); qvirtio_pci_device_disable(dev); g_free(dev); qpci_free_pc(bus); test_end(); }"} {"target": 0, "idx": 6241, "func": "static void test_qemu_strtoul_full_negative(void) { const char *str = \" \\t -321\"; unsigned long res = 999; int err; err = qemu_strtoul(str, NULL, 0, &res); g_assert_cmpint(err, ==, 0); g_assert_cmpint(res, ==, -321ul); }"} {"target": 0, "idx": 6243, "func": "static int vnc_validate_certificate(struct VncState *vs) { int ret; unsigned int status; const gnutls_datum_t *certs; unsigned int nCerts, i; time_t now; VNC_DEBUG(\"Validating client certificate\\n\"); if ((ret = gnutls_certificate_verify_peers2 (vs->tls_session, &status)) < 0) { VNC_DEBUG(\"Verify failed %s\\n\", gnutls_strerror(ret)); return -1; } if ((now = time(NULL)) == ((time_t)-1)) { return -1; } if (status != 0) { if (status & GNUTLS_CERT_INVALID) VNC_DEBUG(\"The certificate is not trusted.\\n\"); if (status & GNUTLS_CERT_SIGNER_NOT_FOUND) VNC_DEBUG(\"The certificate hasn't got a known issuer.\\n\"); if (status & GNUTLS_CERT_REVOKED) VNC_DEBUG(\"The certificate has been revoked.\\n\"); if (status & GNUTLS_CERT_INSECURE_ALGORITHM) VNC_DEBUG(\"The certificate uses an insecure algorithm\\n\"); return -1; } else { VNC_DEBUG(\"Certificate is valid!\\n\"); } /* Only support x509 for now */ if (gnutls_certificate_type_get(vs->tls_session) != GNUTLS_CRT_X509) return -1; if (!(certs = gnutls_certificate_get_peers(vs->tls_session, &nCerts))) return -1; for (i = 0 ; i < nCerts ; i++) { gnutls_x509_crt_t cert; VNC_DEBUG (\"Checking certificate chain %d\\n\", i); if (gnutls_x509_crt_init (&cert) < 0) return -1; if (gnutls_x509_crt_import(cert, &certs[i], GNUTLS_X509_FMT_DER) < 0) { gnutls_x509_crt_deinit (cert); return -1; } if (gnutls_x509_crt_get_expiration_time (cert) < now) { VNC_DEBUG(\"The certificate has expired\\n\"); gnutls_x509_crt_deinit (cert); return -1; } if (gnutls_x509_crt_get_activation_time (cert) > now) { VNC_DEBUG(\"The certificate is not yet activated\\n\"); gnutls_x509_crt_deinit (cert); return -1; } if (gnutls_x509_crt_get_activation_time (cert) > now) { VNC_DEBUG(\"The certificate is not yet activated\\n\"); gnutls_x509_crt_deinit (cert); return -1; } gnutls_x509_crt_deinit (cert); } return 0; }"} {"target": 0, "idx": 6251, "func": "int fw_cfg_add_callback(void *opaque, uint16_t key, FWCfgCallback callback, void *callback_opaque, uint8_t *data, size_t len) { FWCfgState *s = opaque; int arch = !!(key & FW_CFG_ARCH_LOCAL); key &= FW_CFG_ENTRY_MASK; if (key >= FW_CFG_MAX_ENTRY || !(key & FW_CFG_WRITE_CHANNEL) || len > 65535) return 0; s->entries[arch][key].data = data; s->entries[arch][key].len = len; s->entries[arch][key].callback_opaque = callback_opaque; s->entries[arch][key].callback = callback; return 1; }"} {"target": 1, "idx": 6272, "func": "static void lpc_analyze_remodulate(int32_t *decoded, const int coeffs[32], int order, int qlevel, int len, int bps) { int i, j; int ebps = 1 << (bps-1); unsigned sigma = 0; for (i = order; i < len; i++) sigma |= decoded[i] + ebps; if (sigma < 2*ebps) return; for (i = len - 1; i >= order; i--) { int64_t p = 0; for (j = 0; j < order; j++) p += coeffs[j] * (int64_t)decoded[i-order+j]; decoded[i] -= p >> qlevel; } for (i = order; i < len; i++, decoded++) { int32_t p = 0; for (j = 0; j < order; j++) p += coeffs[j] * (uint32_t)decoded[j]; decoded[j] += p >> qlevel; } }"} {"target": 0, "idx": 6277, "func": "static int read_thread(void *arg) { VideoState *is = arg; AVFormatContext *ic = NULL; int err, i, ret; int st_index[AVMEDIA_TYPE_NB]; AVPacket pkt1, *pkt = &pkt1; int eof = 0; int pkt_in_play_range = 0; AVDictionaryEntry *t; AVDictionary **opts; int orig_nb_streams; SDL_mutex *wait_mutex = SDL_CreateMutex(); memset(st_index, -1, sizeof(st_index)); is->last_video_stream = is->video_stream = -1; is->last_audio_stream = is->audio_stream = -1; is->last_subtitle_stream = is->subtitle_stream = -1; ic = avformat_alloc_context(); ic->interrupt_callback.callback = decode_interrupt_cb; ic->interrupt_callback.opaque = is; err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts); if (err < 0) { print_error(is->filename, err); ret = -1; goto fail; } if ((t = av_dict_get(format_opts, \"\", NULL, AV_DICT_IGNORE_SUFFIX))) { av_log(NULL, AV_LOG_ERROR, \"Option %s not found.\\n\", t->key); ret = AVERROR_OPTION_NOT_FOUND; goto fail; } is->ic = ic; if (genpts) ic->flags |= AVFMT_FLAG_GENPTS; opts = setup_find_stream_info_opts(ic, codec_opts); orig_nb_streams = ic->nb_streams; err = avformat_find_stream_info(ic, opts); if (err < 0) { fprintf(stderr, \"%s: could not find codec parameters\\n\", is->filename); ret = -1; goto fail; } for (i = 0; i < orig_nb_streams; i++) av_dict_free(&opts[i]); av_freep(&opts); if (ic->pb) ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use url_feof() to test for the end if (seek_by_bytes < 0) seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT); is->max_frame_duration = (ic->iformat->flags & AVFMT_TS_DISCONT) ? 10.0 : 3600.0; /* if seeking requested, we execute it */ if (start_time != AV_NOPTS_VALUE) { int64_t timestamp; timestamp = start_time; /* add the stream start time */ if (ic->start_time != AV_NOPTS_VALUE) timestamp += ic->start_time; ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0); if (ret < 0) { fprintf(stderr, \"%s: could not seek to position %0.3f\\n\", is->filename, (double)timestamp / AV_TIME_BASE); } } is->realtime = is_realtime(ic); for (i = 0; i < ic->nb_streams; i++) ic->streams[i]->discard = AVDISCARD_ALL; if (!video_disable) st_index[AVMEDIA_TYPE_VIDEO] = av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO, wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0); if (!audio_disable) st_index[AVMEDIA_TYPE_AUDIO] = av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO, wanted_stream[AVMEDIA_TYPE_AUDIO], st_index[AVMEDIA_TYPE_VIDEO], NULL, 0); if (!video_disable) st_index[AVMEDIA_TYPE_SUBTITLE] = av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE, wanted_stream[AVMEDIA_TYPE_SUBTITLE], (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ? st_index[AVMEDIA_TYPE_AUDIO] : st_index[AVMEDIA_TYPE_VIDEO]), NULL, 0); if (show_status) { av_dump_format(ic, 0, is->filename, 0); } is->show_mode = show_mode; /* open the streams */ if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) { stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]); } ret = -1; if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) { ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]); } if (is->show_mode == SHOW_MODE_NONE) is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT; is->refresh_tid = SDL_CreateThread(refresh_thread, is); if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) { stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]); } if (is->video_stream < 0 && is->audio_stream < 0) { fprintf(stderr, \"%s: could not open codecs\\n\", is->filename); ret = -1; goto fail; } if (infinite_buffer < 0 && is->realtime) infinite_buffer = 1; for (;;) { if (is->abort_request) break; if (is->paused != is->last_paused) { is->last_paused = is->paused; if (is->paused) is->read_pause_return = av_read_pause(ic); else av_read_play(ic); } #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL if (is->paused && (!strcmp(ic->iformat->name, \"rtsp\") || (ic->pb && !strncmp(input_filename, \"mmsh:\", 5)))) { /* wait 10 ms to avoid trying to get another packet */ /* XXX: horrible */ SDL_Delay(10); continue; } #endif if (is->seek_req) { int64_t seek_target = is->seek_pos; int64_t seek_min = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN; int64_t seek_max = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX; // FIXME the +-2 is due to rounding being not done in the correct direction in generation // of the seek_pos/seek_rel variables ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags); if (ret < 0) { fprintf(stderr, \"%s: error while seeking\\n\", is->ic->filename); } else { if (is->audio_stream >= 0) { packet_queue_flush(&is->audioq); packet_queue_put(&is->audioq, &flush_pkt); } if (is->subtitle_stream >= 0) { packet_queue_flush(&is->subtitleq); packet_queue_put(&is->subtitleq, &flush_pkt); } if (is->video_stream >= 0) { packet_queue_flush(&is->videoq); packet_queue_put(&is->videoq, &flush_pkt); } if (is->seek_flags & AVSEEK_FLAG_BYTE) { //FIXME: use a cleaner way to signal obsolete external clock... update_external_clock_pts(is, (double)AV_NOPTS_VALUE); } else { update_external_clock_pts(is, seek_target / (double)AV_TIME_BASE); } } is->seek_req = 0; eof = 0; if (is->paused) step_to_next_frame(is); } if (is->queue_attachments_req) { avformat_queue_attached_pictures(ic); is->queue_attachments_req = 0; } /* if the queue are full, no need to read more */ if (infinite_buffer<1 && (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE || ( (is->audioq .nb_packets > MIN_FRAMES || is->audio_stream < 0 || is->audioq.abort_request) && (is->videoq .nb_packets > MIN_FRAMES || is->video_stream < 0 || is->videoq.abort_request) && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0 || is->subtitleq.abort_request)))) { /* wait 10 ms */ SDL_LockMutex(wait_mutex); SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10); SDL_UnlockMutex(wait_mutex); continue; } if (eof) { if (is->video_stream >= 0) { av_init_packet(pkt); pkt->data = NULL; pkt->size = 0; pkt->stream_index = is->video_stream; packet_queue_put(&is->videoq, pkt); } if (is->audio_stream >= 0 && is->audio_st->codec->codec->capabilities & CODEC_CAP_DELAY) { av_init_packet(pkt); pkt->data = NULL; pkt->size = 0; pkt->stream_index = is->audio_stream; packet_queue_put(&is->audioq, pkt); } SDL_Delay(10); if (is->audioq.size + is->videoq.size + is->subtitleq.size == 0) { if (loop != 1 && (!loop || --loop)) { stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0); } else if (autoexit) { ret = AVERROR_EOF; goto fail; } } eof=0; continue; } ret = av_read_frame(ic, pkt); if (ret < 0) { if (ret == AVERROR_EOF || url_feof(ic->pb)) eof = 1; if (ic->pb && ic->pb->error) break; SDL_LockMutex(wait_mutex); SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10); SDL_UnlockMutex(wait_mutex); continue; } /* check if packet is in play range specified by user, then queue, otherwise discard */ pkt_in_play_range = duration == AV_NOPTS_VALUE || (pkt->pts - ic->streams[pkt->stream_index]->start_time) * av_q2d(ic->streams[pkt->stream_index]->time_base) - (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000 <= ((double)duration / 1000000); if (pkt->stream_index == is->audio_stream && pkt_in_play_range) { packet_queue_put(&is->audioq, pkt); } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) { packet_queue_put(&is->videoq, pkt); } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) { packet_queue_put(&is->subtitleq, pkt); } else { av_free_packet(pkt); } } /* wait until the end */ while (!is->abort_request) { SDL_Delay(100); } ret = 0; fail: /* close each stream */ if (is->audio_stream >= 0) stream_component_close(is, is->audio_stream); if (is->video_stream >= 0) stream_component_close(is, is->video_stream); if (is->subtitle_stream >= 0) stream_component_close(is, is->subtitle_stream); if (is->ic) { avformat_close_input(&is->ic); } if (ret != 0) { SDL_Event event; event.type = FF_QUIT_EVENT; event.user.data1 = is; SDL_PushEvent(&event); } SDL_DestroyMutex(wait_mutex); return 0; }"} {"target": 0, "idx": 6293, "func": "int ff_xvmc_field_start(MpegEncContext *s, AVCodecContext *avctx) { struct xvmc_pix_fmt *last, *next, *render = (struct xvmc_pix_fmt*)s->current_picture.data[2]; const int mb_block_count = 4 + (1 << s->chroma_format); assert(avctx); if (!render || render->xvmc_id != AV_XVMC_ID || !render->data_blocks || !render->mv_blocks) { av_log(avctx, AV_LOG_ERROR, \"Render token doesn't look as expected.\\n\"); return -1; // make sure that this is a render packet } if (render->filled_mv_blocks_num) { av_log(avctx, AV_LOG_ERROR, \"Rendering surface contains %i unprocessed blocks.\\n\", render->filled_mv_blocks_num); return -1; } if (render->allocated_mv_blocks < 1 || render->allocated_data_blocks < render->allocated_mv_blocks*mb_block_count || render->start_mv_blocks_num >= render->allocated_mv_blocks || render->next_free_data_block_num > render->allocated_data_blocks - mb_block_count*(render->allocated_mv_blocks-render->start_mv_blocks_num)) { av_log(avctx, AV_LOG_ERROR, \"Rendering surface doesn't provide enough block structures to work with.\\n\"); return -1; } render->picture_structure = s->picture_structure; render->flags = s->first_field ? 0 : XVMC_SECOND_FIELD; render->p_future_surface = NULL; render->p_past_surface = NULL; switch(s->pict_type) { case FF_I_TYPE: return 0; // no prediction from other frames case FF_B_TYPE: next = (struct xvmc_pix_fmt*)s->next_picture.data[2]; if (!next) return -1; if (next->xvmc_id != AV_XVMC_ID) return -1; render->p_future_surface = next->p_surface; // no return here, going to set forward prediction case FF_P_TYPE: last = (struct xvmc_pix_fmt*)s->last_picture.data[2]; if (!last) last = render; // predict second field from the first if (last->xvmc_id != AV_XVMC_ID) return -1; render->p_past_surface = last->p_surface; return 0; } return -1; }"} {"target": 1, "idx": 6307, "func": "static int calculate_bitrate(AVFormatContext *s) { AVIContext *avi = s->priv_data; int i, j; int64_t lensum = 0; int64_t maxpos = 0; for (i = 0; inb_streams; i++) { int64_t len = 0; AVStream *st = s->streams[i]; if (!st->nb_index_entries) continue; for (j = 0; j < st->nb_index_entries; j++) len += st->index_entries[j].size; maxpos = FFMAX(maxpos, st->index_entries[j-1].pos); lensum += len; } if (maxpos < avi->io_fsize*9/10) // index does not cover the whole file return 0; if (lensum*9/10 > maxpos || lensum < maxpos*9/10) // frame sum and filesize mismatch return 0; for (i = 0; inb_streams; i++) { int64_t len = 0; AVStream *st = s->streams[i]; int64_t duration; for (j = 0; j < st->nb_index_entries; j++) len += st->index_entries[j].size; if (st->nb_index_entries < 2 || st->codec->bit_rate > 0) continue; duration = st->index_entries[j-1].timestamp - st->index_entries[0].timestamp; st->codec->bit_rate = av_rescale(8*len, st->time_base.den, duration * st->time_base.num); } return 1; }"} {"target": 1, "idx": 6335, "func": "static inline void gen_st16(TCGv val, TCGv addr, int index) { tcg_gen_qemu_st16(val, addr, index); dead_tmp(val); }"} {"target": 1, "idx": 6346, "func": "static void q35_host_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_CLASS(klass); hc->root_bus_path = q35_host_root_bus_path; dc->realize = q35_host_realize; dc->props = mch_props; /* Reason: needs to be wired up by pc_q35_init */ dc->user_creatable = false; set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); dc->fw_name = \"pci\"; }"} {"target": 1, "idx": 6374, "func": "static struct addrinfo *inet_parse_connect_saddr(InetSocketAddress *saddr, Error **errp) { struct addrinfo ai, *res; int rc; Error *err = NULL; memset(&ai, 0, sizeof(ai)); ai.ai_flags = AI_CANONNAME | AI_V4MAPPED | AI_ADDRCONFIG; ai.ai_family = inet_ai_family_from_address(saddr, &err); ai.ai_socktype = SOCK_STREAM; if (err) { error_propagate(errp, err); return NULL; } if (saddr->host == NULL || saddr->port == NULL) { error_setg(errp, \"host and/or port not specified\"); return NULL; } /* lookup */ rc = getaddrinfo(saddr->host, saddr->port, &ai, &res); if (rc != 0) { error_setg(errp, \"address resolution failed for %s:%s: %s\", saddr->host, saddr->port, gai_strerror(rc)); return NULL; } return res; }"} {"target": 1, "idx": 6385, "func": "static void bamboo_init(MachineState *machine) { ram_addr_t ram_size = machine->ram_size; const char *kernel_filename = machine->kernel_filename; const char *kernel_cmdline = machine->kernel_cmdline; const char *initrd_filename = machine->initrd_filename; unsigned int pci_irq_nrs[4] = { 28, 27, 26, 25 }; MemoryRegion *address_space_mem = get_system_memory(); MemoryRegion *isa = g_new(MemoryRegion, 1); MemoryRegion *ram_memories = g_malloc(PPC440EP_SDRAM_NR_BANKS * sizeof(*ram_memories)); hwaddr ram_bases[PPC440EP_SDRAM_NR_BANKS]; hwaddr ram_sizes[PPC440EP_SDRAM_NR_BANKS]; qemu_irq *pic; qemu_irq *irqs; PCIBus *pcibus; PowerPCCPU *cpu; CPUPPCState *env; uint64_t elf_entry; uint64_t elf_lowaddr; hwaddr loadaddr = 0; target_long initrd_size = 0; DeviceState *dev; int success; int i; /* Setup CPU. */ if (machine->cpu_model == NULL) { machine->cpu_model = \"440EP\"; } cpu = POWERPC_CPU(cpu_generic_init(TYPE_POWERPC_CPU, machine->cpu_model)); if (cpu == NULL) { fprintf(stderr, \"Unable to initialize CPU!\\n\"); exit(1); } env = &cpu->env; if (env->mmu_model != POWERPC_MMU_BOOKE) { fprintf(stderr, \"MMU model %i not supported by this machine.\\n\", env->mmu_model); exit(1); } qemu_register_reset(main_cpu_reset, cpu); ppc_booke_timers_init(cpu, 400000000, 0); ppc_dcr_init(env, NULL, NULL); /* interrupt controller */ irqs = g_malloc0(sizeof(qemu_irq) * PPCUIC_OUTPUT_NB); irqs[PPCUIC_OUTPUT_INT] = ((qemu_irq *)env->irq_inputs)[PPC40x_INPUT_INT]; irqs[PPCUIC_OUTPUT_CINT] = ((qemu_irq *)env->irq_inputs)[PPC40x_INPUT_CINT]; pic = ppcuic_init(env, irqs, 0x0C0, 0, 1); /* SDRAM controller */ memset(ram_bases, 0, sizeof(ram_bases)); memset(ram_sizes, 0, sizeof(ram_sizes)); ram_size = ppc4xx_sdram_adjust(ram_size, PPC440EP_SDRAM_NR_BANKS, ram_memories, ram_bases, ram_sizes, ppc440ep_sdram_bank_sizes); /* XXX 440EP's ECC interrupts are on UIC1, but we've only created UIC0. */ ppc4xx_sdram_init(env, pic[14], PPC440EP_SDRAM_NR_BANKS, ram_memories, ram_bases, ram_sizes, 1); /* PCI */ dev = sysbus_create_varargs(TYPE_PPC4xx_PCI_HOST_BRIDGE, PPC440EP_PCI_CONFIG, pic[pci_irq_nrs[0]], pic[pci_irq_nrs[1]], pic[pci_irq_nrs[2]], pic[pci_irq_nrs[3]], NULL); pcibus = (PCIBus *)qdev_get_child_bus(dev, \"pci.0\"); if (!pcibus) { fprintf(stderr, \"couldn't create PCI controller!\\n\"); exit(1); } memory_region_init_alias(isa, NULL, \"isa_mmio\", get_system_io(), 0, PPC440EP_PCI_IOLEN); memory_region_add_subregion(get_system_memory(), PPC440EP_PCI_IO, isa); if (serial_hds[0] != NULL) { serial_mm_init(address_space_mem, 0xef600300, 0, pic[0], PPC_SERIAL_MM_BAUDBASE, serial_hds[0], DEVICE_BIG_ENDIAN); } if (serial_hds[1] != NULL) { serial_mm_init(address_space_mem, 0xef600400, 0, pic[1], PPC_SERIAL_MM_BAUDBASE, serial_hds[1], DEVICE_BIG_ENDIAN); } if (pcibus) { /* Register network interfaces. */ for (i = 0; i < nb_nics; i++) { /* There are no PCI NICs on the Bamboo board, but there are * PCI slots, so we can pick whatever default model we want. */ pci_nic_init_nofail(&nd_table[i], pcibus, \"e1000\", NULL); } } /* Load kernel. */ if (kernel_filename) { success = load_uimage(kernel_filename, &entry, &loadaddr, NULL, NULL, NULL); if (success < 0) { success = load_elf(kernel_filename, NULL, NULL, &elf_entry, &elf_lowaddr, NULL, 1, PPC_ELF_MACHINE, 0, 0); entry = elf_entry; loadaddr = elf_lowaddr; } /* XXX try again as binary */ if (success < 0) { fprintf(stderr, \"qemu: could not load kernel '%s'\\n\", kernel_filename); exit(1); } } /* Load initrd. */ if (initrd_filename) { initrd_size = load_image_targphys(initrd_filename, RAMDISK_ADDR, ram_size - RAMDISK_ADDR); if (initrd_size < 0) { fprintf(stderr, \"qemu: could not load ram disk '%s' at %x\\n\", initrd_filename, RAMDISK_ADDR); exit(1); } } /* If we're loading a kernel directly, we must load the device tree too. */ if (kernel_filename) { if (bamboo_load_device_tree(FDT_ADDR, ram_size, RAMDISK_ADDR, initrd_size, kernel_cmdline) < 0) { fprintf(stderr, \"couldn't load device tree\\n\"); exit(1); } } }"} {"target": 0, "idx": 6408, "func": "void HELPER(stpq)(CPUS390XState *env, uint64_t addr, uint64_t low, uint64_t high) { uintptr_t ra = GETPC(); if (parallel_cpus) { #ifndef CONFIG_ATOMIC128 cpu_loop_exit_atomic(ENV_GET_CPU(env), ra); #else int mem_idx = cpu_mmu_index(env, false); TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx); Int128 v = int128_make128(low, high); helper_atomic_sto_be_mmu(env, addr, v, oi, ra); #endif } else { check_alignment(env, addr, 16, ra); cpu_stq_data_ra(env, addr + 0, high, ra); cpu_stq_data_ra(env, addr + 8, low, ra); } }"} {"target": 1, "idx": 6432, "func": "static inline void RENAME(yuy2ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width) { #ifdef HAVE_MMX asm volatile( \"movq \"MANGLE(bm01010101)\", %%mm4\\n\\t\" \"mov %0, %%\"REG_a\" \\n\\t\" \"1: \\n\\t\" \"movq (%1, %%\"REG_a\",4), %%mm0 \\n\\t\" \"movq 8(%1, %%\"REG_a\",4), %%mm1 \\n\\t\" \"psrlw $8, %%mm0 \\n\\t\" \"psrlw $8, %%mm1 \\n\\t\" \"packuswb %%mm1, %%mm0 \\n\\t\" \"movq %%mm0, %%mm1 \\n\\t\" \"psrlw $8, %%mm0 \\n\\t\" \"pand %%mm4, %%mm1 \\n\\t\" \"packuswb %%mm0, %%mm0 \\n\\t\" \"packuswb %%mm1, %%mm1 \\n\\t\" \"movd %%mm0, (%3, %%\"REG_a\") \\n\\t\" \"movd %%mm1, (%2, %%\"REG_a\") \\n\\t\" \"add $4, %%\"REG_a\" \\n\\t\" \" js 1b \\n\\t\" : : \"g\" (-width), \"r\" (src1+width*4), \"r\" (dstU+width), \"r\" (dstV+width) : \"%\"REG_a ); #else int i; for(i=0; i>=2; dest[xy] = (uint8_t)res; } } }"} {"target": 1, "idx": 6455, "func": "static inline void RENAME(uyvyToY)(uint8_t *dst, const uint8_t *src, int width, uint32_t *unused) { #if COMPILE_TEMPLATE_MMX __asm__ volatile( \"mov %0, %%\"REG_a\" \\n\\t\" \"1: \\n\\t\" \"movq (%1, %%\"REG_a\",2), %%mm0 \\n\\t\" \"movq 8(%1, %%\"REG_a\",2), %%mm1 \\n\\t\" \"psrlw $8, %%mm0 \\n\\t\" \"psrlw $8, %%mm1 \\n\\t\" \"packuswb %%mm1, %%mm0 \\n\\t\" \"movq %%mm0, (%2, %%\"REG_a\") \\n\\t\" \"add $8, %%\"REG_a\" \\n\\t\" \" js 1b \\n\\t\" : : \"g\" ((x86_reg)-width), \"r\" (src+width*2), \"r\" (dst+width) : \"%\"REG_a ); #else int i; for (i=0; ivideo_st->codec->width; int last_h = is->video_st->codec->height; if ((ret = configure_video_filters(graph, is, vfilters)) < 0) goto the_end; filt_out = is->out_video_filter; #endif for (;;) { #if !CONFIG_AVFILTER AVPacket pkt; #else AVFilterBufferRef *picref; AVRational tb; #endif while (is->paused && !is->videoq.abort_request) SDL_Delay(10); #if CONFIG_AVFILTER if ( last_w != is->video_st->codec->width || last_h != is->video_st->codec->height) { av_dlog(NULL, \"Changing size %dx%d -> %dx%d\\n\", last_w, last_h, is->video_st->codec->width, is->video_st->codec->height); avfilter_graph_free(&graph); graph = avfilter_graph_alloc(); if ((ret = configure_video_filters(graph, is, vfilters)) < 0) goto the_end; filt_out = is->out_video_filter; last_w = is->video_st->codec->width; last_h = is->video_st->codec->height; } ret = get_filtered_video_frame(filt_out, frame, &picref, &tb); if (picref) { pts_int = picref->pts; pos = picref->pos; frame->opaque = picref; } if (av_cmp_q(tb, is->video_st->time_base)) { av_unused int64_t pts1 = pts_int; pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base); av_dlog(NULL, \"video_thread(): \" \"tb:%d/%d pts:%\"PRId64\" -> tb:%d/%d pts:%\"PRId64\"\\n\", tb.num, tb.den, pts1, is->video_st->time_base.num, is->video_st->time_base.den, pts_int); } #else ret = get_video_frame(is, frame, &pts_int, &pkt); #endif if (ret < 0) goto the_end; if (!ret) continue; pts = pts_int * av_q2d(is->video_st->time_base); #if CONFIG_AVFILTER ret = output_picture2(is, frame, pts, pos); #else ret = output_picture2(is, frame, pts, pkt.pos); av_free_packet(&pkt); #endif if (ret < 0) goto the_end; if (step) if (cur_stream) stream_pause(cur_stream); } the_end: #if CONFIG_AVFILTER avfilter_graph_free(&graph); #endif av_free(frame); return 0; }"} {"target": 1, "idx": 6500, "func": "static int ccid_initfn(USBDevice *dev) { USBCCIDState *s = DO_UPCAST(USBCCIDState, dev, dev); s->bus = ccid_bus_new(&dev->qdev); s->card = NULL; s->cardinfo = NULL; s->migration_state = MIGRATION_NONE; s->migration_target_ip = 0; s->migration_target_port = 0; s->dev.speed = USB_SPEED_FULL; s->notify_slot_change = false; s->powered = true; s->pending_answers_num = 0; s->last_answer_error = 0; s->bulk_in_pending_start = 0; s->bulk_in_pending_end = 0; s->current_bulk_in = NULL; ccid_reset_error_status(s); s->bulk_out_pos = 0; ccid_reset_parameters(s); ccid_reset(s); return 0; }"} {"target": 1, "idx": 6507, "func": "static void ppc4xx_pob_reset (void *opaque) { ppc4xx_pob_t *pob; pob = opaque; /* No error */ pob->bear = 0x00000000; pob->besr[0] = 0x0000000; pob->besr[1] = 0x0000000; }"} {"target": 1, "idx": 6509, "func": "int av_aes_init(AVAES *a, const uint8_t *key, int key_bits, int decrypt) { int i, j, t, rconpointer = 0; uint8_t tk[8][4]; int KC = key_bits >> 5; int rounds = KC + 6; uint8_t log8[256]; uint8_t alog8[512]; if (!enc_multbl[FF_ARRAY_ELEMS(enc_multbl)-1][FF_ARRAY_ELEMS(enc_multbl[0])-1]) { j = 1; for (i = 0; i < 255; i++) { alog8[i] = alog8[i + 255] = j; log8[j] = i; j ^= j + j; if (j > 255) j ^= 0x11B; } for (i = 0; i < 256; i++) { j = i ? alog8[255 - log8[i]] : 0; j ^= (j << 1) ^ (j << 2) ^ (j << 3) ^ (j << 4); j = (j ^ (j >> 8) ^ 99) & 255; inv_sbox[j] = i; sbox[i] = j; } init_multbl2(dec_multbl[0], (const int[4]) { 0xe, 0x9, 0xd, 0xb }, log8, alog8, inv_sbox); init_multbl2(enc_multbl[0], (const int[4]) { 0x2, 0x1, 0x1, 0x3 }, log8, alog8, sbox); } if (key_bits != 128 && key_bits != 192 && key_bits != 256) return -1; a->rounds = rounds; memcpy(tk, key, KC * 4); for (t = 0; t < (rounds + 1) * 16;) { memcpy(a->round_key[0].u8 + t, tk, KC * 4); t += KC * 4; for (i = 0; i < 4; i++) tk[0][i] ^= sbox[tk[KC - 1][(i + 1) & 3]]; tk[0][0] ^= rcon[rconpointer++]; for (j = 1; j < KC; j++) { if (KC != 8 || j != KC >> 1) for (i = 0; i < 4; i++) tk[j][i] ^= tk[j - 1][i]; else for (i = 0; i < 4; i++) tk[j][i] ^= sbox[tk[j - 1][i]]; } } if (decrypt) { for (i = 1; i < rounds; i++) { av_aes_block tmp[3]; tmp[2] = a->round_key[i]; subshift(&tmp[1], 0, sbox); mix(tmp, dec_multbl, 1, 3); a->round_key[i] = tmp[0]; } } else { for (i = 0; i < (rounds + 1) >> 1; i++) { FFSWAP(av_aes_block, a->round_key[i], a->round_key[rounds-i]); } } return 0; }"} {"target": 1, "idx": 6519, "func": "static inline void RENAME(yv12touyvy)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst, unsigned int width, unsigned int height, int lumStride, int chromStride, int dstStride) { //FIXME interpolate chroma RENAME(yuvPlanartouyvy)(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride, 2); }"} {"target": 0, "idx": 6524, "func": "static void spapr_populate_cpu_dt(CPUState *cs, void *fdt, int offset, sPAPRMachineState *spapr) { PowerPCCPU *cpu = POWERPC_CPU(cs); CPUPPCState *env = &cpu->env; PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cs); int index = ppc_get_vcpu_dt_id(cpu); uint32_t segs[] = {cpu_to_be32(28), cpu_to_be32(40), 0xffffffff, 0xffffffff}; uint32_t tbfreq = kvm_enabled() ? kvmppc_get_tbfreq() : SPAPR_TIMEBASE_FREQ; uint32_t cpufreq = kvm_enabled() ? kvmppc_get_clockfreq() : 1000000000; uint32_t page_sizes_prop[64]; size_t page_sizes_prop_size; uint32_t vcpus_per_socket = smp_threads * smp_cores; uint32_t pft_size_prop[] = {0, cpu_to_be32(spapr->htab_shift)}; int compat_smt = MIN(smp_threads, ppc_compat_max_threads(cpu)); sPAPRDRConnector *drc; int drc_index; uint32_t radix_AP_encodings[PPC_PAGE_SIZES_MAX_SZ]; int i; drc = spapr_drc_by_id(TYPE_SPAPR_DRC_CPU, index); if (drc) { drc_index = spapr_drc_index(drc); _FDT((fdt_setprop_cell(fdt, offset, \"ibm,my-drc-index\", drc_index))); } _FDT((fdt_setprop_cell(fdt, offset, \"reg\", index))); _FDT((fdt_setprop_string(fdt, offset, \"device_type\", \"cpu\"))); _FDT((fdt_setprop_cell(fdt, offset, \"cpu-version\", env->spr[SPR_PVR]))); _FDT((fdt_setprop_cell(fdt, offset, \"d-cache-block-size\", env->dcache_line_size))); _FDT((fdt_setprop_cell(fdt, offset, \"d-cache-line-size\", env->dcache_line_size))); _FDT((fdt_setprop_cell(fdt, offset, \"i-cache-block-size\", env->icache_line_size))); _FDT((fdt_setprop_cell(fdt, offset, \"i-cache-line-size\", env->icache_line_size))); if (pcc->l1_dcache_size) { _FDT((fdt_setprop_cell(fdt, offset, \"d-cache-size\", pcc->l1_dcache_size))); } else { error_report(\"Warning: Unknown L1 dcache size for cpu\"); } if (pcc->l1_icache_size) { _FDT((fdt_setprop_cell(fdt, offset, \"i-cache-size\", pcc->l1_icache_size))); } else { error_report(\"Warning: Unknown L1 icache size for cpu\"); } _FDT((fdt_setprop_cell(fdt, offset, \"timebase-frequency\", tbfreq))); _FDT((fdt_setprop_cell(fdt, offset, \"clock-frequency\", cpufreq))); _FDT((fdt_setprop_cell(fdt, offset, \"slb-size\", env->slb_nr))); _FDT((fdt_setprop_cell(fdt, offset, \"ibm,slb-size\", env->slb_nr))); _FDT((fdt_setprop_string(fdt, offset, \"status\", \"okay\"))); _FDT((fdt_setprop(fdt, offset, \"64-bit\", NULL, 0))); if (env->spr_cb[SPR_PURR].oea_read) { _FDT((fdt_setprop(fdt, offset, \"ibm,purr\", NULL, 0))); } if (env->mmu_model & POWERPC_MMU_1TSEG) { _FDT((fdt_setprop(fdt, offset, \"ibm,processor-segment-sizes\", segs, sizeof(segs)))); } /* Advertise VMX/VSX (vector extensions) if available * 0 / no property == no vector extensions * 1 == VMX / Altivec available * 2 == VSX available */ if (env->insns_flags & PPC_ALTIVEC) { uint32_t vmx = (env->insns_flags2 & PPC2_VSX) ? 2 : 1; _FDT((fdt_setprop_cell(fdt, offset, \"ibm,vmx\", vmx))); } /* Advertise DFP (Decimal Floating Point) if available * 0 / no property == no DFP * 1 == DFP available */ if (env->insns_flags2 & PPC2_DFP) { _FDT((fdt_setprop_cell(fdt, offset, \"ibm,dfp\", 1))); } page_sizes_prop_size = ppc_create_page_sizes_prop(env, page_sizes_prop, sizeof(page_sizes_prop)); if (page_sizes_prop_size) { _FDT((fdt_setprop(fdt, offset, \"ibm,segment-page-sizes\", page_sizes_prop, page_sizes_prop_size))); } spapr_populate_pa_features(env, fdt, offset, false); _FDT((fdt_setprop_cell(fdt, offset, \"ibm,chip-id\", cs->cpu_index / vcpus_per_socket))); _FDT((fdt_setprop(fdt, offset, \"ibm,pft-size\", pft_size_prop, sizeof(pft_size_prop)))); if (nb_numa_nodes > 1) { _FDT(spapr_fixup_cpu_numa_dt(fdt, offset, cpu)); } _FDT(spapr_fixup_cpu_smt_dt(fdt, offset, cpu, compat_smt)); if (pcc->radix_page_info) { for (i = 0; i < pcc->radix_page_info->count; i++) { radix_AP_encodings[i] = cpu_to_be32(pcc->radix_page_info->entries[i]); } _FDT((fdt_setprop(fdt, offset, \"ibm,processor-radix-AP-encodings\", radix_AP_encodings, pcc->radix_page_info->count * sizeof(radix_AP_encodings[0])))); } }"} {"target": 0, "idx": 6532, "func": "static void xhci_kick_epctx(XHCIEPContext *epctx, unsigned int streamid) { XHCIState *xhci = epctx->xhci; XHCIStreamContext *stctx; XHCITransfer *xfer; XHCIRing *ring; USBEndpoint *ep = NULL; uint64_t mfindex; int length; int i; trace_usb_xhci_ep_kick(epctx->slotid, epctx->epid, streamid); /* If the device has been detached, but the guest has not noticed this yet the 2 above checks will succeed, but we must NOT continue */ if (!xhci->slots[epctx->slotid - 1].uport || !xhci->slots[epctx->slotid - 1].uport->dev || !xhci->slots[epctx->slotid - 1].uport->dev->attached) { return; } if (epctx->retry) { XHCITransfer *xfer = epctx->retry; trace_usb_xhci_xfer_retry(xfer); assert(xfer->running_retry); if (xfer->timed_xfer) { /* time to kick the transfer? */ mfindex = xhci_mfindex_get(xhci); xhci_check_intr_iso_kick(xhci, xfer, epctx, mfindex); if (xfer->running_retry) { return; } xfer->timed_xfer = 0; xfer->running_retry = 1; } if (xfer->iso_xfer) { /* retry iso transfer */ if (xhci_setup_packet(xfer) < 0) { return; } usb_handle_packet(xfer->packet.ep->dev, &xfer->packet); assert(xfer->packet.status != USB_RET_NAK); xhci_complete_packet(xfer); } else { /* retry nak'ed transfer */ if (xhci_setup_packet(xfer) < 0) { return; } usb_handle_packet(xfer->packet.ep->dev, &xfer->packet); if (xfer->packet.status == USB_RET_NAK) { return; } xhci_complete_packet(xfer); } assert(!xfer->running_retry); xhci_ep_free_xfer(epctx->retry); epctx->retry = NULL; } if (epctx->state == EP_HALTED) { DPRINTF(\"xhci: ep halted, not running schedule\\n\"); return; } if (epctx->nr_pstreams) { uint32_t err; stctx = xhci_find_stream(epctx, streamid, &err); if (stctx == NULL) { return; } ring = &stctx->ring; xhci_set_ep_state(xhci, epctx, stctx, EP_RUNNING); } else { ring = &epctx->ring; streamid = 0; xhci_set_ep_state(xhci, epctx, NULL, EP_RUNNING); } assert(ring->dequeue != 0); while (1) { length = xhci_ring_chain_length(xhci, ring); if (length <= 0) { break; } xfer = xhci_ep_alloc_xfer(epctx, length); if (xfer == NULL) { break; } for (i = 0; i < length; i++) { TRBType type; type = xhci_ring_fetch(xhci, ring, &xfer->trbs[i], NULL); assert(type); } xfer->streamid = streamid; if (epctx->epid == 1) { xhci_fire_ctl_transfer(xhci, xfer); } else { xhci_fire_transfer(xhci, xfer, epctx); } if (xfer->complete) { xhci_ep_free_xfer(xfer); xfer = NULL; } if (epctx->state == EP_HALTED) { break; } if (xfer != NULL && xfer->running_retry) { DPRINTF(\"xhci: xfer nacked, stopping schedule\\n\"); epctx->retry = xfer; break; } } ep = xhci_epid_to_usbep(epctx); if (ep) { usb_device_flush_ep_queue(ep->dev, ep); } }"} {"target": 0, "idx": 6535, "func": "static int opt_preset(const char *opt, const char *arg) { FILE *f=NULL; char filename[1000], tmp[1000], tmp2[1000], line[1000]; int i; const char *base[3]= { getenv(\"HOME\"), \"/usr/local/share\", \"/usr/share\", }; for(i=!base[0]; i<3 && !f; i++){ snprintf(filename, sizeof(filename), \"%s/%sffmpeg/%s.ffpreset\", base[i], i ? \"\" : \".\", arg); f= fopen(filename, \"r\"); if(!f){ char *codec_name= *opt == 'v' ? video_codec_name : *opt == 'a' ? audio_codec_name : subtitle_codec_name; snprintf(filename, sizeof(filename), \"%s/%sffmpeg/%s-%s.ffpreset\", base[i], i ? \"\" : \".\", codec_name, arg); f= fopen(filename, \"r\"); } } if(!f && ((arg[0]=='.' && arg[1]=='/') || arg[0]=='/' || is_dos_path(arg))){ snprintf(filename, sizeof(filename), arg); f= fopen(filename, \"r\"); } if(!f){ fprintf(stderr, \"File for preset '%s' not found\\n\", arg); av_exit(1); } while(!feof(f)){ int e= fscanf(f, \"%999[^\\n]\\n\", line) - 1; if(line[0] == '#' && !e) continue; e|= sscanf(line, \"%999[^=]=%999[^\\n]\\n\", tmp, tmp2) - 2; if(e){ fprintf(stderr, \"%s: Preset file invalid\\n\", filename); av_exit(1); } if(!strcmp(tmp, \"acodec\")){ opt_audio_codec(tmp2); }else if(!strcmp(tmp, \"vcodec\")){ opt_video_codec(tmp2); }else if(!strcmp(tmp, \"scodec\")){ opt_subtitle_codec(tmp2); }else if(opt_default(tmp, tmp2) < 0){ fprintf(stderr, \"%s: Invalid option or argument: %s=%s\\n\", filename, tmp, tmp2); av_exit(1); } } fclose(f); return 0; }"} {"target": 1, "idx": 6543, "func": "static int asf_read_ext_content(AVFormatContext *s, const GUIDParseTable *g) { ASFContext *asf = s->priv_data; AVIOContext *pb = s->pb; uint64_t size = avio_rl64(pb); uint16_t nb_desc = avio_rl16(pb); int i, ret; for (i = 0; i < nb_desc; i++) { uint16_t name_len, type, val_len; uint8_t *name = NULL; name_len = avio_rl16(pb); if (!name_len) return AVERROR_INVALIDDATA; name = av_malloc(name_len); if (!name) return AVERROR(ENOMEM); avio_get_str16le(pb, name_len, name, name_len); type = avio_rl16(pb); val_len = avio_rl16(pb); if ((ret = process_metadata(s, name, name_len, val_len, type, &s->metadata)) < 0) return ret; } align_position(pb, asf->offset, size); return 0; }"} {"target": 1, "idx": 6545, "func": "static int vhdx_log_write(BlockDriverState *bs, BDRVVHDXState *s, void *data, uint32_t length, uint64_t offset) { int ret = 0; void *buffer = NULL; void *merged_sector = NULL; void *data_tmp, *sector_write; unsigned int i; int sector_offset; uint32_t desc_sectors, sectors, total_length; uint32_t sectors_written = 0; uint32_t aligned_length; uint32_t leading_length = 0; uint32_t trailing_length = 0; uint32_t partial_sectors = 0; uint32_t bytes_written = 0; uint64_t file_offset; VHDXHeader *header; VHDXLogEntryHeader new_hdr; VHDXLogDescriptor *new_desc = NULL; VHDXLogDataSector *data_sector = NULL; MSGUID new_guid = { 0 }; header = s->headers[s->curr_header]; /* need to have offset read data, and be on 4096 byte boundary */ if (length > header->log_length) { /* no log present. we could create a log here instead of failing */ ret = -EINVAL; goto exit; } if (guid_eq(header->log_guid, zero_guid)) { vhdx_guid_generate(&new_guid); vhdx_update_headers(bs, s, false, &new_guid); } else { /* currently, we require that the log be flushed after * every write. */ ret = -ENOTSUP; goto exit; } /* 0 is an invalid sequence number, but may also represent the first * log write (or a wrapped seq) */ if (s->log.sequence == 0) { s->log.sequence = 1; } sector_offset = offset % VHDX_LOG_SECTOR_SIZE; file_offset = (offset / VHDX_LOG_SECTOR_SIZE) * VHDX_LOG_SECTOR_SIZE; aligned_length = length; /* add in the unaligned head and tail bytes */ if (sector_offset) { leading_length = (VHDX_LOG_SECTOR_SIZE - sector_offset); leading_length = leading_length > length ? length : leading_length; aligned_length -= leading_length; partial_sectors++; } sectors = aligned_length / VHDX_LOG_SECTOR_SIZE; trailing_length = aligned_length - (sectors * VHDX_LOG_SECTOR_SIZE); if (trailing_length) { partial_sectors++; } sectors += partial_sectors; /* sectors is now how many sectors the data itself takes, not * including the header and descriptor metadata */ new_hdr = (VHDXLogEntryHeader) { .signature = VHDX_LOG_SIGNATURE, .tail = s->log.tail, .sequence_number = s->log.sequence, .descriptor_count = sectors, .reserved = 0, .flushed_file_offset = bdrv_getlength(bs->file->bs), .last_file_offset = bdrv_getlength(bs->file->bs), }; new_hdr.log_guid = header->log_guid; desc_sectors = vhdx_compute_desc_sectors(new_hdr.descriptor_count); total_length = (desc_sectors + sectors) * VHDX_LOG_SECTOR_SIZE; new_hdr.entry_length = total_length; vhdx_log_entry_hdr_le_export(&new_hdr); buffer = qemu_blockalign(bs, total_length); memcpy(buffer, &new_hdr, sizeof(new_hdr)); new_desc = buffer + sizeof(new_hdr); data_sector = buffer + (desc_sectors * VHDX_LOG_SECTOR_SIZE); data_tmp = data; /* All log sectors are 4KB, so for any partial sectors we must * merge the data with preexisting data from the final file * destination */ merged_sector = qemu_blockalign(bs, VHDX_LOG_SECTOR_SIZE); for (i = 0; i < sectors; i++) { new_desc->signature = VHDX_LOG_DESC_SIGNATURE; new_desc->sequence_number = s->log.sequence; new_desc->file_offset = file_offset; if (i == 0 && leading_length) { /* partial sector at the front of the buffer */ ret = bdrv_pread(bs->file, file_offset, merged_sector, VHDX_LOG_SECTOR_SIZE); if (ret < 0) { goto exit; } memcpy(merged_sector + sector_offset, data_tmp, leading_length); bytes_written = leading_length; sector_write = merged_sector; } else if (i == sectors - 1 && trailing_length) { /* partial sector at the end of the buffer */ ret = bdrv_pread(bs->file, file_offset, merged_sector + trailing_length, VHDX_LOG_SECTOR_SIZE - trailing_length); if (ret < 0) { goto exit; } memcpy(merged_sector, data_tmp, trailing_length); bytes_written = trailing_length; sector_write = merged_sector; } else { bytes_written = VHDX_LOG_SECTOR_SIZE; sector_write = data_tmp; } /* populate the raw sector data into the proper structures, * as well as update the descriptor, and convert to proper * endianness */ vhdx_log_raw_to_le_sector(new_desc, data_sector, sector_write, s->log.sequence); data_tmp += bytes_written; data_sector++; new_desc++; file_offset += VHDX_LOG_SECTOR_SIZE; } /* checksum covers entire entry, from the log header through the * last data sector */ vhdx_update_checksum(buffer, total_length, offsetof(VHDXLogEntryHeader, checksum)); /* now write to the log */ ret = vhdx_log_write_sectors(bs, &s->log, §ors_written, buffer, desc_sectors + sectors); if (ret < 0) { goto exit; } if (sectors_written != desc_sectors + sectors) { /* instead of failing, we could flush the log here */ ret = -EINVAL; goto exit; } s->log.sequence++; /* write new tail */ s->log.tail = s->log.write; exit: qemu_vfree(buffer); qemu_vfree(merged_sector); return ret; }"} {"target": 1, "idx": 6547, "func": "static void vmxnet3_net_uninit(VMXNET3State *s) { g_free(s->mcast_list); vmxnet_tx_pkt_reset(s->tx_pkt); vmxnet_tx_pkt_uninit(s->tx_pkt); vmxnet_rx_pkt_uninit(s->rx_pkt); qemu_del_nic(s->nic); }"} {"target": 1, "idx": 6556, "func": "static int http_prepare_data(HTTPContext *c) { int i; switch(c->state) { case HTTPSTATE_SEND_DATA_HEADER: memset(&c->fmt_ctx, 0, sizeof(c->fmt_ctx)); if (c->stream->feed) { /* open output stream by using specified codecs */ c->fmt_ctx.oformat = c->stream->fmt; c->fmt_ctx.nb_streams = c->stream->nb_streams; for(i=0;ifmt_ctx.nb_streams;i++) { AVStream *st; st = av_mallocz(sizeof(AVStream)); c->fmt_ctx.streams[i] = st; if (c->stream->feed == c->stream) memcpy(st, c->stream->streams[i], sizeof(AVStream)); else memcpy(st, c->stream->feed->streams[c->stream->feed_streams[i]], sizeof(AVStream)); st->codec.frame_number = 0; /* XXX: should be done in AVStream, not in codec */ } c->got_key_frame = 0; } else { /* open output stream by using codecs in specified file */ c->fmt_ctx.oformat = c->stream->fmt; c->fmt_ctx.nb_streams = c->fmt_in->nb_streams; for(i=0;ifmt_ctx.nb_streams;i++) { AVStream *st; st = av_mallocz(sizeof(AVStream)); c->fmt_ctx.streams[i] = st; memcpy(st, c->fmt_in->streams[i], sizeof(AVStream)); st->codec.frame_number = 0; /* XXX: should be done in AVStream, not in codec */ } c->got_key_frame = 0; } init_put_byte(&c->fmt_ctx.pb, c->pbuffer, PACKET_MAX_SIZE, 1, c, NULL, http_write_packet, NULL); c->fmt_ctx.pb.is_streamed = 1; /* prepare header */ av_write_header(&c->fmt_ctx); c->state = HTTPSTATE_SEND_DATA; c->last_packet_sent = 0; break; case HTTPSTATE_SEND_DATA: /* find a new packet */ #if 0 fifo_total_size = http_fifo_write_count - c->last_http_fifo_write_count; if (fifo_total_size >= ((3 * FIFO_MAX_SIZE) / 4)) { /* overflow : resync. We suppose that wptr is at this point a pointer to a valid packet */ c->rptr = http_fifo.wptr; c->got_key_frame = 0; } start_rptr = c->rptr; if (fifo_read(&http_fifo, (UINT8 *)&hdr, sizeof(hdr), &c->rptr) < 0) return 0; payload_size = ntohs(hdr.payload_size); payload = av_malloc(payload_size); if (fifo_read(&http_fifo, payload, payload_size, &c->rptr) < 0) { /* cannot read all the payload */ av_free(payload); c->rptr = start_rptr; return 0; } c->last_http_fifo_write_count = http_fifo_write_count - fifo_size(&http_fifo, c->rptr); if (c->stream->stream_type != STREAM_TYPE_MASTER) { /* test if the packet can be handled by this format */ ret = 0; for(i=0;ifmt_ctx.nb_streams;i++) { AVStream *st = c->fmt_ctx.streams[i]; if (test_header(&hdr, &st->codec)) { /* only begin sending when got a key frame */ if (st->codec.key_frame) c->got_key_frame |= 1 << i; if (c->got_key_frame & (1 << i)) { ret = c->fmt_ctx.format->write_packet(&c->fmt_ctx, i, payload, payload_size); } break; } } if (ret) { /* must send trailer now */ c->state = HTTPSTATE_SEND_DATA_TRAILER; } } else { /* master case : send everything */ char *q; q = c->buffer; memcpy(q, &hdr, sizeof(hdr)); q += sizeof(hdr); memcpy(q, payload, payload_size); q += payload_size; c->buffer_ptr = c->buffer; c->buffer_end = q; } av_free(payload); #endif { AVPacket pkt; /* read a packet from the input stream */ if (c->stream->feed) { ffm_set_write_index(c->fmt_in, c->stream->feed->feed_write_index, c->stream->feed->feed_size); } if (av_read_packet(c->fmt_in, &pkt) < 0) { if (c->stream->feed && c->stream->feed->feed_opened) { /* if coming from feed, it means we reached the end of the ffm file, so must wait for more data */ c->state = HTTPSTATE_WAIT_FEED; return 1; /* state changed */ } else { /* must send trailer now because eof or error */ c->state = HTTPSTATE_SEND_DATA_TRAILER; } } else { /* send it to the appropriate stream */ if (c->stream->feed) { /* if coming from a feed, select the right stream */ for(i=0;istream->nb_streams;i++) { if (c->stream->feed_streams[i] == pkt.stream_index) { pkt.stream_index = i; if (pkt.flags & PKT_FLAG_KEY) { c->got_key_frame |= 1 << i; } /* See if we have all the key frames, then * we start to send. This logic is not quite * right, but it works for the case of a * single video stream with one or more * audio streams (for which every frame is * typically a key frame). */ if (!c->stream->send_on_key || ((c->got_key_frame + 1) >> c->stream->nb_streams)) { goto send_it; } } } } else { AVCodecContext *codec; send_it: /* Fudge here */ codec = &c->fmt_ctx.streams[pkt.stream_index]->codec; codec->key_frame = ((pkt.flags & PKT_FLAG_KEY) != 0); #ifdef PJSG if (codec->codec_type == CODEC_TYPE_AUDIO) { codec->frame_size = (codec->sample_rate * pkt.duration + 500000) / 1000000; /* printf(\"Calculated size %d, from sr %d, duration %d\\n\", codec->frame_size, codec->sample_rate, pkt.duration); */ } #endif if (av_write_packet(&c->fmt_ctx, &pkt, 0)) c->state = HTTPSTATE_SEND_DATA_TRAILER; codec->frame_number++; } av_free_packet(&pkt); } } break; default: case HTTPSTATE_SEND_DATA_TRAILER: /* last packet test ? */ if (c->last_packet_sent) return -1; /* prepare header */ av_write_trailer(&c->fmt_ctx); c->last_packet_sent = 1; break; } return 0; }"} {"target": 1, "idx": 6575, "func": "static void input_callback(MMAL_PORT_T *port, MMAL_BUFFER_HEADER_T *buffer) { if (!buffer->cmd) { AVBufferRef *buf = buffer->user_data; av_buffer_unref(&buf); } mmal_buffer_header_release(buffer); }"} {"target": 1, "idx": 6577, "func": "CPUArchState *cpu_copy(CPUArchState *env) { CPUState *cpu = ENV_GET_CPU(env); CPUState *new_cpu = cpu_init(cpu_model); CPUArchState *new_env = cpu->env_ptr; CPUBreakpoint *bp; CPUWatchpoint *wp; /* Reset non arch specific state */ cpu_reset(new_cpu); memcpy(new_env, env, sizeof(CPUArchState)); /* Clone all break/watchpoints. Note: Once we support ptrace with hw-debug register access, make sure BP_CPU break/watchpoints are handled correctly on clone. */ QTAILQ_INIT(&cpu->breakpoints); QTAILQ_INIT(&cpu->watchpoints); QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) { cpu_breakpoint_insert(new_cpu, bp->pc, bp->flags, NULL); } QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { cpu_watchpoint_insert(new_cpu, wp->vaddr, wp->len, wp->flags, NULL); } return new_env; }"} {"target": 0, "idx": 6580, "func": "static void avc_luma_vt_and_aver_dst_16x16_msa(const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride) { int32_t loop_cnt; int16_t filt_const0 = 0xfb01; int16_t filt_const1 = 0x1414; int16_t filt_const2 = 0x1fb; v16u8 dst0, dst1, dst2, dst3; v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8; v16i8 src10_r, src32_r, src54_r, src76_r, src21_r, src43_r, src65_r; v16i8 src87_r, src10_l, src32_l, src54_l, src76_l, src21_l, src43_l; v16i8 src65_l, src87_l; v8i16 out0_r, out1_r, out2_r, out3_r, out0_l, out1_l, out2_l, out3_l; v16i8 filt0, filt1, filt2; v16u8 res0, res1, res2, res3; filt0 = (v16i8) __msa_fill_h(filt_const0); filt1 = (v16i8) __msa_fill_h(filt_const1); filt2 = (v16i8) __msa_fill_h(filt_const2); LD_SB5(src, src_stride, src0, src1, src2, src3, src4); src += (5 * src_stride); XORI_B5_128_SB(src0, src1, src2, src3, src4); ILVR_B4_SB(src1, src0, src2, src1, src3, src2, src4, src3, src10_r, src21_r, src32_r, src43_r); ILVL_B4_SB(src1, src0, src2, src1, src3, src2, src4, src3, src10_l, src21_l, src32_l, src43_l); for (loop_cnt = 4; loop_cnt--;) { LD_SB4(src, src_stride, src5, src6, src7, src8); src += (4 * src_stride); XORI_B4_128_SB(src5, src6, src7, src8); ILVR_B4_SB(src5, src4, src6, src5, src7, src6, src8, src7, src54_r, src65_r, src76_r, src87_r); ILVL_B4_SB(src5, src4, src6, src5, src7, src6, src8, src7, src54_l, src65_l, src76_l, src87_l); out0_r = DPADD_SH3_SH(src10_r, src32_r, src54_r, filt0, filt1, filt2); out1_r = DPADD_SH3_SH(src21_r, src43_r, src65_r, filt0, filt1, filt2); out2_r = DPADD_SH3_SH(src32_r, src54_r, src76_r, filt0, filt1, filt2); out3_r = DPADD_SH3_SH(src43_r, src65_r, src87_r, filt0, filt1, filt2); out0_l = DPADD_SH3_SH(src10_l, src32_l, src54_l, filt0, filt1, filt2); out1_l = DPADD_SH3_SH(src21_l, src43_l, src65_l, filt0, filt1, filt2); out2_l = DPADD_SH3_SH(src32_l, src54_l, src76_l, filt0, filt1, filt2); out3_l = DPADD_SH3_SH(src43_l, src65_l, src87_l, filt0, filt1, filt2); SRARI_H4_SH(out0_r, out1_r, out2_r, out3_r, 5); SRARI_H4_SH(out0_l, out1_l, out2_l, out3_l, 5); SAT_SH4_SH(out0_r, out1_r, out2_r, out3_r, 7); SAT_SH4_SH(out0_l, out1_l, out2_l, out3_l, 7); LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3); PCKEV_B4_UB(out0_l, out0_r, out1_l, out1_r, out2_l, out2_r, out3_l, out3_r, res0, res1, res2, res3); XORI_B4_128_UB(res0, res1, res2, res3); AVER_UB4_UB(res0, dst0, res1, dst1, res2, dst2, res3, dst3, res0, res1, res2, res3); ST_UB4(res0, res1, res2, res3, dst, dst_stride); dst += (4 * dst_stride); src10_r = src54_r; src32_r = src76_r; src21_r = src65_r; src43_r = src87_r; src10_l = src54_l; src32_l = src76_l; src21_l = src65_l; src43_l = src87_l; src4 = src8; } }"} {"target": 0, "idx": 6594, "func": "static int net_socket_can_send(void *opaque) { NetSocketState *s = opaque; return qemu_can_send_packet(&s->nc); }"} {"target": 0, "idx": 6605, "func": "static void ppc_prep_init (ram_addr_t ram_size, const char *boot_device, const char *kernel_filename, const char *kernel_cmdline, const char *initrd_filename, const char *cpu_model) { CPUState *env = NULL; char *filename; nvram_t nvram; M48t59State *m48t59; int PPC_io_memory; int linux_boot, i, nb_nics1, bios_size; ram_addr_t ram_offset, bios_offset; uint32_t kernel_base, initrd_base; long kernel_size, initrd_size; PCIBus *pci_bus; qemu_irq *i8259; qemu_irq *cpu_exit_irq; int ppc_boot_device; DriveInfo *hd[MAX_IDE_BUS * MAX_IDE_DEVS]; DriveInfo *fd[MAX_FD]; sysctrl = qemu_mallocz(sizeof(sysctrl_t)); linux_boot = (kernel_filename != NULL); /* init CPUs */ if (cpu_model == NULL) cpu_model = \"602\"; for (i = 0; i < smp_cpus; i++) { env = cpu_init(cpu_model); if (!env) { fprintf(stderr, \"Unable to find PowerPC CPU definition\\n\"); exit(1); } if (env->flags & POWERPC_FLAG_RTC_CLK) { /* POWER / PowerPC 601 RTC clock frequency is 7.8125 MHz */ cpu_ppc_tb_init(env, 7812500UL); } else { /* Set time-base frequency to 100 Mhz */ cpu_ppc_tb_init(env, 100UL * 1000UL * 1000UL); } qemu_register_reset((QEMUResetHandler*)&cpu_reset, env); } /* allocate RAM */ ram_offset = qemu_ram_alloc(NULL, \"ppc_prep.ram\", ram_size); cpu_register_physical_memory(0, ram_size, ram_offset); /* allocate and load BIOS */ bios_offset = qemu_ram_alloc(NULL, \"ppc_prep.bios\", BIOS_SIZE); if (bios_name == NULL) bios_name = BIOS_FILENAME; filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); if (filename) { bios_size = get_image_size(filename); } else { bios_size = -1; } if (bios_size > 0 && bios_size <= BIOS_SIZE) { target_phys_addr_t bios_addr; bios_size = (bios_size + 0xfff) & ~0xfff; bios_addr = (uint32_t)(-bios_size); cpu_register_physical_memory(bios_addr, bios_size, bios_offset | IO_MEM_ROM); bios_size = load_image_targphys(filename, bios_addr, bios_size); } if (bios_size < 0 || bios_size > BIOS_SIZE) { hw_error(\"qemu: could not load PPC PREP bios '%s'\\n\", bios_name); } if (filename) { qemu_free(filename); } if (linux_boot) { kernel_base = KERNEL_LOAD_ADDR; /* now we can load the kernel */ kernel_size = load_image_targphys(kernel_filename, kernel_base, ram_size - kernel_base); if (kernel_size < 0) { hw_error(\"qemu: could not load kernel '%s'\\n\", kernel_filename); exit(1); } /* load initrd */ if (initrd_filename) { initrd_base = INITRD_LOAD_ADDR; initrd_size = load_image_targphys(initrd_filename, initrd_base, ram_size - initrd_base); if (initrd_size < 0) { hw_error(\"qemu: could not load initial ram disk '%s'\\n\", initrd_filename); } } else { initrd_base = 0; initrd_size = 0; } ppc_boot_device = 'm'; } else { kernel_base = 0; kernel_size = 0; initrd_base = 0; initrd_size = 0; ppc_boot_device = '\\0'; /* For now, OHW cannot boot from the network. */ for (i = 0; boot_device[i] != '\\0'; i++) { if (boot_device[i] >= 'a' && boot_device[i] <= 'f') { ppc_boot_device = boot_device[i]; break; } } if (ppc_boot_device == '\\0') { fprintf(stderr, \"No valid boot device for Mac99 machine\\n\"); exit(1); } } isa_mem_base = 0xc0000000; if (PPC_INPUT(env) != PPC_FLAGS_INPUT_6xx) { hw_error(\"Only 6xx bus is supported on PREP machine\\n\"); } i8259 = i8259_init(first_cpu->irq_inputs[PPC6xx_INPUT_INT]); pci_bus = pci_prep_init(i8259); /* Hmm, prep has no pci-isa bridge ??? */ isa_bus_new(NULL); isa_bus_irqs(i8259); // pci_bus = i440fx_init(); /* Register 8 MB of ISA IO space (needed for non-contiguous map) */ PPC_io_memory = cpu_register_io_memory(PPC_prep_io_read, PPC_prep_io_write, sysctrl, DEVICE_LITTLE_ENDIAN); cpu_register_physical_memory(0x80000000, 0x00800000, PPC_io_memory); /* init basic PC hardware */ pci_vga_init(pci_bus); // openpic = openpic_init(0x00000000, 0xF0000000, 1); // pit = pit_init(0x40, i8259[0]); rtc_init(2000, NULL); if (serial_hds[0]) serial_isa_init(0, serial_hds[0]); nb_nics1 = nb_nics; if (nb_nics1 > NE2000_NB_MAX) nb_nics1 = NE2000_NB_MAX; for(i = 0; i < nb_nics1; i++) { if (nd_table[i].model == NULL) { nd_table[i].model = qemu_strdup(\"ne2k_isa\"); } if (strcmp(nd_table[i].model, \"ne2k_isa\") == 0) { isa_ne2000_init(ne2000_io[i], ne2000_irq[i], &nd_table[i]); } else { pci_nic_init_nofail(&nd_table[i], \"ne2k_pci\", NULL); } } if (drive_get_max_bus(IF_IDE) >= MAX_IDE_BUS) { fprintf(stderr, \"qemu: too many IDE bus\\n\"); exit(1); } for(i = 0; i < MAX_IDE_BUS * MAX_IDE_DEVS; i++) { hd[i] = drive_get(IF_IDE, i / MAX_IDE_DEVS, i % MAX_IDE_DEVS); } for(i = 0; i < 1/*MAX_IDE_BUS*/; i++) { isa_ide_init(ide_iobase[i], ide_iobase2[i], ide_irq[i], hd[2 * i], hd[2 * i + 1]); } isa_create_simple(\"i8042\"); cpu_exit_irq = qemu_allocate_irqs(cpu_request_exit, NULL, 1); DMA_init(1, cpu_exit_irq); // SB16_init(); for(i = 0; i < MAX_FD; i++) { fd[i] = drive_get(IF_FLOPPY, 0, i); } fdctrl_init_isa(fd); /* Register speaker port */ register_ioport_read(0x61, 1, 1, speaker_ioport_read, NULL); register_ioport_write(0x61, 1, 1, speaker_ioport_write, NULL); /* Register fake IO ports for PREP */ sysctrl->reset_irq = first_cpu->irq_inputs[PPC6xx_INPUT_HRESET]; register_ioport_read(0x398, 2, 1, &PREP_io_read, sysctrl); register_ioport_write(0x398, 2, 1, &PREP_io_write, sysctrl); /* System control ports */ register_ioport_read(0x0092, 0x01, 1, &PREP_io_800_readb, sysctrl); register_ioport_write(0x0092, 0x01, 1, &PREP_io_800_writeb, sysctrl); register_ioport_read(0x0800, 0x52, 1, &PREP_io_800_readb, sysctrl); register_ioport_write(0x0800, 0x52, 1, &PREP_io_800_writeb, sysctrl); /* PCI intack location */ PPC_io_memory = cpu_register_io_memory(PPC_intack_read, PPC_intack_write, NULL, DEVICE_LITTLE_ENDIAN); cpu_register_physical_memory(0xBFFFFFF0, 0x4, PPC_io_memory); /* PowerPC control and status register group */ #if 0 PPC_io_memory = cpu_register_io_memory(PPC_XCSR_read, PPC_XCSR_write, NULL, DEVICE_LITTLE_ENDIAN); cpu_register_physical_memory(0xFEFF0000, 0x1000, PPC_io_memory); #endif if (usb_enabled) { usb_ohci_init_pci(pci_bus, -1); } m48t59 = m48t59_init(i8259[8], 0, 0x0074, NVRAM_SIZE, 59); if (m48t59 == NULL) return; sysctrl->nvram = m48t59; /* Initialise NVRAM */ nvram.opaque = m48t59; nvram.read_fn = &m48t59_read; nvram.write_fn = &m48t59_write; PPC_NVRAM_set_params(&nvram, NVRAM_SIZE, \"PREP\", ram_size, ppc_boot_device, kernel_base, kernel_size, kernel_cmdline, initrd_base, initrd_size, /* XXX: need an option to load a NVRAM image */ 0, graphic_width, graphic_height, graphic_depth); /* Special port to get debug messages from Open-Firmware */ register_ioport_write(0x0F00, 4, 1, &PPC_debug_write, NULL); }"} {"target": 0, "idx": 6622, "func": "static void copy_picture_field(InterlaceContext *s, AVFrame *src_frame, AVFrame *dst_frame, AVFilterLink *inlink, enum FieldType field_type, int lowpass) { const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format); int hsub = desc->log2_chroma_w; int vsub = desc->log2_chroma_h; int plane, j; for (plane = 0; plane < desc->nb_components; plane++) { int cols = (plane == 1 || plane == 2) ? -(-inlink->w) >> hsub : inlink->w; int lines = (plane == 1 || plane == 2) ? -(-inlink->h) >> vsub : inlink->h; uint8_t *dstp = dst_frame->data[plane]; const uint8_t *srcp = src_frame->data[plane]; av_assert0(cols >= 0 || lines >= 0); lines = (lines + (field_type == FIELD_UPPER)) / 2; if (field_type == FIELD_LOWER) srcp += src_frame->linesize[plane]; if (field_type == FIELD_LOWER) dstp += dst_frame->linesize[plane]; if (lowpass) { int srcp_linesize = src_frame->linesize[plane] * 2; int dstp_linesize = dst_frame->linesize[plane] * 2; for (j = lines; j > 0; j--) { const uint8_t *srcp_above = srcp - src_frame->linesize[plane]; const uint8_t *srcp_below = srcp + src_frame->linesize[plane]; if (j == lines) srcp_above = srcp; // there is no line above if (j == 1) srcp_below = srcp; // there is no line below s->lowpass_line(dstp, cols, srcp, srcp_above, srcp_below); dstp += dstp_linesize; srcp += srcp_linesize; } } else { av_image_copy_plane(dstp, dst_frame->linesize[plane] * 2, srcp, src_frame->linesize[plane] * 2, cols, lines); } } }"} {"target": 1, "idx": 6628, "func": "target_ulong helper_rdhwr_cpunum(CPUMIPSState *env) { check_hwrena(env, 0); return env->CP0_EBase & 0x3ff; }"} {"target": 1, "idx": 6635, "func": "void qmp_inject_nmi(Error **errp) { #if defined(TARGET_I386) CPUState *cs; CPU_FOREACH(cs) { X86CPU *cpu = X86_CPU(cs); if (!cpu->apic_state) { cpu_interrupt(cs, CPU_INTERRUPT_NMI); } else { apic_deliver_nmi(cpu->apic_state); } } #elif defined(TARGET_S390X) CPUState *cs; S390CPU *cpu; CPU_FOREACH(cs) { cpu = S390_CPU(cs); if (cpu->env.cpu_num == monitor_get_cpu_index()) { if (s390_cpu_restart(S390_CPU(cs)) == -1) { error_set(errp, QERR_UNSUPPORTED); return; } break; } } #else error_set(errp, QERR_UNSUPPORTED); #endif }"} {"target": 1, "idx": 6641, "func": "void helper_single_step(CPUX86State *env) { #ifndef CONFIG_USER_ONLY check_hw_breakpoints(env, 1); env->dr[6] |= DR6_BS; #endif raise_exception(env, EXCP01_DB); }"} {"target": 1, "idx": 6646, "func": "static av_always_inline void rv40_weak_loop_filter(uint8_t *src, const int step, const int stride, const int filter_p1, const int filter_q1, const int alpha, const int beta, const int lim_p0q0, const int lim_q1, const int lim_p1) { uint8_t *cm = ff_cropTbl + MAX_NEG_CROP; int i, t, u, diff; for (i = 0; i < 4; i++, src += stride) { int diff_p1p0 = src[-2*step] - src[-1*step]; int diff_q1q0 = src[ 1*step] - src[ 0*step]; int diff_p1p2 = src[-2*step] - src[-3*step]; int diff_q1q2 = src[ 1*step] - src[ 2*step]; t = src[0*step] - src[-1*step]; if (!t) continue; u = (alpha * FFABS(t)) >> 7; if (u > 3 - (filter_p1 && filter_q1)) continue; t <<= 2; if (filter_p1 && filter_q1) t += src[-2*step] - src[1*step]; diff = CLIP_SYMM((t + 4) >> 3, lim_p0q0); src[-1*step] = cm[src[-1*step] + diff]; src[ 0*step] = cm[src[ 0*step] - diff]; if (filter_p1 && FFABS(diff_p1p2) <= beta) { t = (diff_p1p0 + diff_p1p2 - diff) >> 1; src[-2*step] = cm[src[-2*step] - CLIP_SYMM(t, lim_p1)]; } if (filter_q1 && FFABS(diff_q1q2) <= beta) { t = (diff_q1q0 + diff_q1q2 + diff) >> 1; src[ 1*step] = cm[src[ 1*step] - CLIP_SYMM(t, lim_q1)]; } } }"} {"target": 1, "idx": 6650, "func": "static void con_disconnect(struct XenDevice *xendev) { struct XenConsole *con = container_of(xendev, struct XenConsole, xendev); if (con->chr) { qemu_chr_add_handlers(con->chr, NULL, NULL, NULL, NULL); qemu_chr_fe_release(con->chr); } xen_be_unbind_evtchn(&con->xendev); if (con->sring) { if (!xendev->dev) { munmap(con->sring, XC_PAGE_SIZE); } else { xengnttab_unmap(xendev->gnttabdev, con->sring, 1); } con->sring = NULL; } }"} {"target": 1, "idx": 6670, "func": "static int normalize_bits(int num, int width) { if (!num) return 0; if (num == -1) return width; if (num < 0) num = ~num; return width - av_log2(num); }"} {"target": 1, "idx": 6674, "func": "static void xen_pci_passthrough_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); k->realize = xen_pt_realize; k->exit = xen_pt_unregister_device; k->config_read = xen_pt_pci_read_config; k->config_write = xen_pt_pci_write_config; set_bit(DEVICE_CATEGORY_MISC, dc->categories); dc->desc = \"Assign an host PCI device with Xen\"; dc->props = xen_pci_passthrough_properties; };"} {"target": 1, "idx": 6685, "func": "static int ohci_bus_start(OHCIState *ohci) { ohci->eof_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, ohci_frame_boundary, ohci); if (ohci->eof_timer == NULL) { trace_usb_ohci_bus_eof_timer_failed(ohci->name); ohci_die(ohci); return 0; } trace_usb_ohci_start(ohci->name); /* Delay the first SOF event by one frame time as * linux driver is not ready to receive it and * can meet some race conditions */ ohci_eof_timer(ohci); return 1; }"} {"target": 1, "idx": 6699, "func": "void bdrv_image_info_specific_dump(fprintf_function func_fprintf, void *f, ImageInfoSpecific *info_spec) { QObject *obj, *data; Visitor *v = qmp_output_visitor_new(&obj); visit_type_ImageInfoSpecific(v, NULL, &info_spec, &error_abort); visit_complete(v, &obj); assert(qobject_type(obj) == QTYPE_QDICT); data = qdict_get(qobject_to_qdict(obj), \"data\"); dump_qobject(func_fprintf, f, 1, data); visit_free(v); }"} {"target": 0, "idx": 6708, "func": "static int srt_decode_frame(AVCodecContext *avctx, void *data, int *got_sub_ptr, AVPacket *avpkt) { AVSubtitle *sub = data; AVBPrint buffer; int x1 = -1, y1 = -1, x2 = -1, y2 = -1; int size, ret; const uint8_t *p = av_packet_get_side_data(avpkt, AV_PKT_DATA_SUBTITLE_POSITION, &size); FFASSDecoderContext *s = avctx->priv_data; if (p && size == 16) { x1 = AV_RL32(p ); y1 = AV_RL32(p + 4); x2 = AV_RL32(p + 8); y2 = AV_RL32(p + 12); } if (avpkt->size <= 0) return avpkt->size; av_bprint_init(&buffer, 0, AV_BPRINT_SIZE_UNLIMITED); srt_to_ass(avctx, &buffer, avpkt->data, x1, y1, x2, y2); ret = ff_ass_add_rect(sub, buffer.str, s->readorder++, 0, NULL, NULL); av_bprint_finalize(&buffer, NULL); if (ret < 0) return ret; *got_sub_ptr = sub->num_rects > 0; return avpkt->size; }"} {"target": 1, "idx": 6727, "func": "static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs) { YADIFContext *s = ctx->priv; ThreadData *td = arg; int refs = s->cur->linesize[td->plane]; int df = (s->csp->comp[td->plane].depth_minus1 + 8) / 8; int pix_3 = 3 * df; int slice_h = td->h / nb_jobs; int slice_start = jobnr * slice_h; int slice_end = (jobnr == nb_jobs - 1) ? td->h : (jobnr + 1) * slice_h; int y; /* filtering reads 3 pixels to the left/right; to avoid invalid reads, * we need to call the c variant which avoids this for border pixels */ for (y = slice_start; y < slice_end; y++) { if ((y ^ td->parity) & 1) { uint8_t *prev = &s->prev->data[td->plane][y * refs]; uint8_t *cur = &s->cur ->data[td->plane][y * refs]; uint8_t *next = &s->next->data[td->plane][y * refs]; uint8_t *dst = &td->frame->data[td->plane][y * td->frame->linesize[td->plane]]; int mode = y == 1 || y + 2 == td->h ? 2 : s->mode; s->filter_line(dst + pix_3, prev + pix_3, cur + pix_3, next + pix_3, td->w - 6, y + 1 < td->h ? refs : -refs, y ? -refs : refs, td->parity ^ td->tff, mode); s->filter_edges(dst, prev, cur, next, td->w, y + 1 < td->h ? refs : -refs, y ? -refs : refs, td->parity ^ td->tff, mode); } else { memcpy(&td->frame->data[td->plane][y * td->frame->linesize[td->plane]], &s->cur->data[td->plane][y * refs], td->w * df); } } return 0; }"} {"target": 0, "idx": 6753, "func": "int ppc64_v3_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx, int mmu_idx) { if (ppc64_radix_guest(cpu)) { /* Guest uses radix */ /* TODO - Unsupported */ error_report(\"Guest Radix Support Unimplemented\"); exit(1); } else { /* Guest uses hash */ return ppc_hash64_handle_mmu_fault(cpu, eaddr, rwx, mmu_idx); } }"} {"target": 1, "idx": 6763, "func": "int ff_img_read_packet(AVFormatContext *s1, AVPacket *pkt) { VideoDemuxData *s = s1->priv_data; char filename_bytes[1024]; char *filename = filename_bytes; int i; int size[3] = { 0 }, ret[3] = { 0 }; AVIOContext *f[3] = { NULL }; AVCodecContext *codec = s1->streams[0]->codec; if (!s->is_pipe) { /* loop over input */ if (s->loop && s->img_number > s->img_last) { s->img_number = s->img_first; } if (s->img_number > s->img_last) return AVERROR_EOF; if (s->use_glob) { #if HAVE_GLOB filename = s->globstate.gl_pathv[s->img_number]; #endif } else { if (av_get_frame_filename(filename_bytes, sizeof(filename_bytes), s->path, s->img_number) < 0 && s->img_number > 1) return AVERROR(EIO); } for (i = 0; i < 3; i++) { if (avio_open2(&f[i], filename, AVIO_FLAG_READ, &s1->interrupt_callback, NULL) < 0) { if (i >= 1) break; av_log(s1, AV_LOG_ERROR, \"Could not open file : %s\\n\", filename); return AVERROR(EIO); } size[i] = avio_size(f[i]); if (!s->split_planes) break; filename[strlen(filename) - 1] = 'U' + i; } if (codec->codec_id == AV_CODEC_ID_NONE) { AVProbeData pd; AVInputFormat *ifmt; uint8_t header[PROBE_BUF_MIN + AVPROBE_PADDING_SIZE]; int ret; int score = 0; ret = avio_read(f[0], header, PROBE_BUF_MIN); if (ret < 0) return ret; avio_skip(f[0], -ret); pd.buf = header; pd.buf_size = ret; pd.filename = filename; ifmt = av_probe_input_format3(&pd, 1, &score); if (ifmt && ifmt->read_packet == ff_img_read_packet && ifmt->raw_codec_id) codec->codec_id = ifmt->raw_codec_id; } if (codec->codec_id == AV_CODEC_ID_RAWVIDEO && !codec->width) infer_size(&codec->width, &codec->height, size[0]); } else { f[0] = s1->pb; if (url_feof(f[0])) return AVERROR(EIO); if (s->frame_size > 0) { size[0] = s->frame_size; } else { size[0] = 4096; } } if (av_new_packet(pkt, size[0] + size[1] + size[2]) < 0) return AVERROR(ENOMEM); pkt->stream_index = 0; pkt->flags |= AV_PKT_FLAG_KEY; if (s->ts_from_file) { struct stat img_stat; if (stat(filename, &img_stat)) return AVERROR(EIO); pkt->pts = (int64_t)img_stat.st_mtime; av_add_index_entry(s1->streams[0], s->img_number, pkt->pts, 0, 0, AVINDEX_KEYFRAME); } else if (!s->is_pipe) { pkt->pts = s->pts; } pkt->size = 0; for (i = 0; i < 3; i++) { if (f[i]) { ret[i] = avio_read(f[i], pkt->data + pkt->size, size[i]); if (!s->is_pipe) avio_close(f[i]); if (ret[i] > 0) pkt->size += ret[i]; } } if (ret[0] <= 0 || ret[1] < 0 || ret[2] < 0) { av_free_packet(pkt); return AVERROR(EIO); /* signal EOF */ } else { s->img_count++; s->img_number++; s->pts++; return 0; } }"} {"target": 1, "idx": 6775, "func": "void ide_data_writew(void *opaque, uint32_t addr, uint32_t val) { IDEBus *bus = opaque; IDEState *s = idebus_active_if(bus); uint8_t *p; /* PIO data access allowed only when DRQ bit is set */ if (!(s->status & DRQ_STAT)) return; p = s->data_ptr; *(uint16_t *)p = le16_to_cpu(val); p += 2; s->data_ptr = p; if (p >= s->data_end) s->end_transfer_func(s); }"} {"target": 1, "idx": 6778, "func": "static void quantize_and_encode_band_cost_SPAIR_mips(struct AACEncContext *s, PutBitContext *pb, const float *in, float *out, const float *scaled, int size, int scale_idx, int cb, const float lambda, const float uplim, int *bits, const float ROUNDING) { const float Q34 = ff_aac_pow34sf_tab[POW_SF2_ZERO - scale_idx + SCALE_ONE_POS - SCALE_DIV_512]; const float IQ = ff_aac_pow2sf_tab [POW_SF2_ZERO + scale_idx - SCALE_ONE_POS + SCALE_DIV_512]; int i; int qc1, qc2, qc3, qc4; uint8_t *p_bits = (uint8_t *)ff_aac_spectral_bits[cb-1]; uint16_t *p_codes = (uint16_t *)ff_aac_spectral_codes[cb-1]; float *p_vec = (float *)ff_aac_codebook_vectors[cb-1]; abs_pow34_v(s->scoefs, in, size); scaled = s->scoefs; for (i = 0; i < size; i += 4) { int curidx, curidx2; int *in_int = (int *)&in[i]; uint8_t v_bits; unsigned int v_codes; int t0, t1, t2, t3, t4, t5, t6, t7; const float *vec1, *vec2; qc1 = scaled[i ] * Q34 + ROUND_STANDARD; qc2 = scaled[i+1] * Q34 + ROUND_STANDARD; qc3 = scaled[i+2] * Q34 + ROUND_STANDARD; qc4 = scaled[i+3] * Q34 + ROUND_STANDARD; __asm__ volatile ( \".set push \\n\\t\" \".set noreorder \\n\\t\" \"ori %[t4], $zero, 4 \\n\\t\" \"slt %[t0], %[t4], %[qc1] \\n\\t\" \"slt %[t1], %[t4], %[qc2] \\n\\t\" \"slt %[t2], %[t4], %[qc3] \\n\\t\" \"slt %[t3], %[t4], %[qc4] \\n\\t\" \"movn %[qc1], %[t4], %[t0] \\n\\t\" \"movn %[qc2], %[t4], %[t1] \\n\\t\" \"movn %[qc3], %[t4], %[t2] \\n\\t\" \"movn %[qc4], %[t4], %[t3] \\n\\t\" \"lw %[t0], 0(%[in_int]) \\n\\t\" \"lw %[t1], 4(%[in_int]) \\n\\t\" \"lw %[t2], 8(%[in_int]) \\n\\t\" \"lw %[t3], 12(%[in_int]) \\n\\t\" \"srl %[t0], %[t0], 31 \\n\\t\" \"srl %[t1], %[t1], 31 \\n\\t\" \"srl %[t2], %[t2], 31 \\n\\t\" \"srl %[t3], %[t3], 31 \\n\\t\" \"subu %[t4], $zero, %[qc1] \\n\\t\" \"subu %[t5], $zero, %[qc2] \\n\\t\" \"subu %[t6], $zero, %[qc3] \\n\\t\" \"subu %[t7], $zero, %[qc4] \\n\\t\" \"movn %[qc1], %[t4], %[t0] \\n\\t\" \"movn %[qc2], %[t5], %[t1] \\n\\t\" \"movn %[qc3], %[t6], %[t2] \\n\\t\" \"movn %[qc4], %[t7], %[t3] \\n\\t\" \".set pop \\n\\t\" : [qc1]\"+r\"(qc1), [qc2]\"+r\"(qc2), [qc3]\"+r\"(qc3), [qc4]\"+r\"(qc4), [t0]\"=&r\"(t0), [t1]\"=&r\"(t1), [t2]\"=&r\"(t2), [t3]\"=&r\"(t3), [t4]\"=&r\"(t4), [t5]\"=&r\"(t5), [t6]\"=&r\"(t6), [t7]\"=&r\"(t7) : [in_int]\"r\"(in_int) : \"memory\" ); curidx = 9 * qc1; curidx += qc2 + 40; curidx2 = 9 * qc3; curidx2 += qc4 + 40; v_codes = (p_codes[curidx] << p_bits[curidx2]) | (p_codes[curidx2]); v_bits = p_bits[curidx] + p_bits[curidx2]; put_bits(pb, v_bits, v_codes); if (out) { vec1 = &p_vec[curidx*2 ]; vec2 = &p_vec[curidx2*2]; out[i+0] = vec1[0] * IQ; out[i+1] = vec1[1] * IQ; out[i+2] = vec2[0] * IQ; out[i+3] = vec2[1] * IQ; } } }"} {"target": 0, "idx": 6801, "func": "static int stream_component_open(PlayerState *is, int stream_index) { AVFormatContext *ic = is->ic; AVCodecContext *avctx; AVCodec *codec; SDL_AudioSpec wanted_spec, spec; AVDictionary *opts; AVDictionaryEntry *t = NULL; int ret = 0; if (stream_index < 0 || stream_index >= ic->nb_streams) return -1; avctx = ic->streams[stream_index]->codec; opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], NULL); codec = avcodec_find_decoder(avctx->codec_id); avctx->workaround_bugs = workaround_bugs; avctx->idct_algo = idct; avctx->skip_frame = skip_frame; avctx->skip_idct = skip_idct; avctx->skip_loop_filter = skip_loop_filter; avctx->error_concealment = error_concealment; if (fast) avctx->flags2 |= AV_CODEC_FLAG2_FAST; if (!av_dict_get(opts, \"threads\", NULL, 0)) av_dict_set(&opts, \"threads\", \"auto\", 0); if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) av_dict_set(&opts, \"refcounted_frames\", \"1\", 0); if (!codec || (ret = avcodec_open2(avctx, codec, &opts)) < 0) { goto fail; } if ((t = av_dict_get(opts, \"\", NULL, AV_DICT_IGNORE_SUFFIX))) { av_log(NULL, AV_LOG_ERROR, \"Option %s not found.\\n\", t->key); ret = AVERROR_OPTION_NOT_FOUND; goto fail; } /* prepare audio output */ if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) { is->sdl_sample_rate = avctx->sample_rate; if (!avctx->channel_layout) avctx->channel_layout = av_get_default_channel_layout(avctx->channels); if (!avctx->channel_layout) { fprintf(stderr, \"unable to guess channel layout\\n\"); ret = AVERROR_INVALIDDATA; goto fail; } if (avctx->channels == 1) is->sdl_channel_layout = AV_CH_LAYOUT_MONO; else is->sdl_channel_layout = AV_CH_LAYOUT_STEREO; is->sdl_channels = av_get_channel_layout_nb_channels(is->sdl_channel_layout); wanted_spec.format = AUDIO_S16SYS; wanted_spec.freq = is->sdl_sample_rate; wanted_spec.channels = is->sdl_channels; wanted_spec.silence = 0; wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE; wanted_spec.callback = sdl_audio_callback; wanted_spec.userdata = is; if (SDL_OpenAudio(&wanted_spec, &spec) < 0) { fprintf(stderr, \"SDL_OpenAudio: %s\\n\", SDL_GetError()); ret = AVERROR_UNKNOWN; goto fail; } is->audio_hw_buf_size = spec.size; is->sdl_sample_fmt = AV_SAMPLE_FMT_S16; is->resample_sample_fmt = is->sdl_sample_fmt; is->resample_channel_layout = avctx->channel_layout; is->resample_sample_rate = avctx->sample_rate; } ic->streams[stream_index]->discard = AVDISCARD_DEFAULT; switch (avctx->codec_type) { case AVMEDIA_TYPE_AUDIO: is->audio_stream = stream_index; is->audio_st = ic->streams[stream_index]; is->audio_buf_size = 0; is->audio_buf_index = 0; /* init averaging filter */ is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB); is->audio_diff_avg_count = 0; /* since we do not have a precise anough audio fifo fullness, we correct audio sync only if larger than this threshold */ is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate; memset(&is->audio_pkt, 0, sizeof(is->audio_pkt)); packet_queue_init(&is->audioq); SDL_PauseAudio(0); break; case AVMEDIA_TYPE_VIDEO: is->video_stream = stream_index; is->video_st = ic->streams[stream_index]; packet_queue_init(&is->videoq); is->video_tid = SDL_CreateThread(video_thread, is); break; case AVMEDIA_TYPE_SUBTITLE: is->subtitle_stream = stream_index; is->subtitle_st = ic->streams[stream_index]; packet_queue_init(&is->subtitleq); is->subtitle_tid = SDL_CreateThread(subtitle_thread, is); break; default: break; } fail: av_dict_free(&opts); return ret; }"} {"target": 1, "idx": 6816, "func": "static void keyword_literal(void) { QObject *obj; QBool *qbool; QObject *null; QString *str; obj = qobject_from_json(\"true\", NULL); qbool = qobject_to_qbool(obj); g_assert(qbool); g_assert(qbool_get_bool(qbool) == true); str = qobject_to_json(obj); g_assert(strcmp(qstring_get_str(str), \"true\") == 0); QDECREF(str); QDECREF(qbool); obj = qobject_from_json(\"false\", NULL); qbool = qobject_to_qbool(obj); g_assert(qbool); g_assert(qbool_get_bool(qbool) == false); str = qobject_to_json(obj); g_assert(strcmp(qstring_get_str(str), \"false\") == 0); QDECREF(str); QDECREF(qbool); qbool = qobject_to_qbool(qobject_from_jsonf(\"%i\", false)); g_assert(qbool); g_assert(qbool_get_bool(qbool) == false); QDECREF(qbool); /* Test that non-zero values other than 1 get collapsed to true */ qbool = qobject_to_qbool(qobject_from_jsonf(\"%i\", 2)); g_assert(qbool); g_assert(qbool_get_bool(qbool) == true); QDECREF(qbool); obj = qobject_from_json(\"null\", NULL); g_assert(obj != NULL); g_assert(qobject_type(obj) == QTYPE_QNULL); null = qnull(); g_assert(null == obj); qobject_decref(obj); qobject_decref(null); }"} {"target": 0, "idx": 6829, "func": "static void ppc_core99_init (ram_addr_t ram_size, const char *boot_device, const char *kernel_filename, const char *kernel_cmdline, const char *initrd_filename, const char *cpu_model) { CPUState *env = NULL, *envs[MAX_CPUS]; char *filename; qemu_irq *pic, **openpic_irqs; int unin_memory; int linux_boot, i; ram_addr_t ram_offset, bios_offset, vga_bios_offset; uint32_t kernel_base, kernel_size, initrd_base, initrd_size; PCIBus *pci_bus; MacIONVRAMState *nvr; int nvram_mem_index; int vga_bios_size, bios_size; int pic_mem_index, dbdma_mem_index, cuda_mem_index, escc_mem_index; int ide_mem_index[3]; int ppc_boot_device; DriveInfo *hd[MAX_IDE_BUS * MAX_IDE_DEVS]; void *fw_cfg; void *dbdma; uint8_t *vga_bios_ptr; int machine_arch; linux_boot = (kernel_filename != NULL); /* init CPUs */ if (cpu_model == NULL) #ifdef TARGET_PPC64 cpu_model = \"970fx\"; #else cpu_model = \"G4\"; #endif for (i = 0; i < smp_cpus; i++) { env = cpu_init(cpu_model); if (!env) { fprintf(stderr, \"Unable to find PowerPC CPU definition\\n\"); exit(1); } /* Set time-base frequency to 100 Mhz */ cpu_ppc_tb_init(env, 100UL * 1000UL * 1000UL); #if 0 env->osi_call = vga_osi_call; #endif qemu_register_reset((QEMUResetHandler*)&cpu_reset, env); envs[i] = env; } /* allocate RAM */ ram_offset = qemu_ram_alloc(NULL, \"ppc_core99.ram\", ram_size); cpu_register_physical_memory(0, ram_size, ram_offset); /* allocate and load BIOS */ bios_offset = qemu_ram_alloc(NULL, \"ppc_core99.bios\", BIOS_SIZE); if (bios_name == NULL) bios_name = PROM_FILENAME; filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); cpu_register_physical_memory(PROM_ADDR, BIOS_SIZE, bios_offset | IO_MEM_ROM); /* Load OpenBIOS (ELF) */ if (filename) { bios_size = load_elf(filename, NULL, NULL, NULL, NULL, NULL, 1, ELF_MACHINE, 0); qemu_free(filename); } else { bios_size = -1; } if (bios_size < 0 || bios_size > BIOS_SIZE) { hw_error(\"qemu: could not load PowerPC bios '%s'\\n\", bios_name); exit(1); } /* allocate and load VGA BIOS */ vga_bios_offset = qemu_ram_alloc(NULL, \"ppc_core99.vbios\", VGA_BIOS_SIZE); vga_bios_ptr = qemu_get_ram_ptr(vga_bios_offset); filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, VGABIOS_FILENAME); if (filename) { vga_bios_size = load_image(filename, vga_bios_ptr + 8); qemu_free(filename); } else { vga_bios_size = -1; } if (vga_bios_size < 0) { /* if no bios is present, we can still work */ fprintf(stderr, \"qemu: warning: could not load VGA bios '%s'\\n\", VGABIOS_FILENAME); vga_bios_size = 0; } else { /* set a specific header (XXX: find real Apple format for NDRV drivers) */ vga_bios_ptr[0] = 'N'; vga_bios_ptr[1] = 'D'; vga_bios_ptr[2] = 'R'; vga_bios_ptr[3] = 'V'; cpu_to_be32w((uint32_t *)(vga_bios_ptr + 4), vga_bios_size); vga_bios_size += 8; /* Round to page boundary */ vga_bios_size = (vga_bios_size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK; } if (linux_boot) { uint64_t lowaddr = 0; int bswap_needed; #ifdef BSWAP_NEEDED bswap_needed = 1; #else bswap_needed = 0; #endif kernel_base = KERNEL_LOAD_ADDR; kernel_size = load_elf(kernel_filename, translate_kernel_address, NULL, NULL, &lowaddr, NULL, 1, ELF_MACHINE, 0); if (kernel_size < 0) kernel_size = load_aout(kernel_filename, kernel_base, ram_size - kernel_base, bswap_needed, TARGET_PAGE_SIZE); if (kernel_size < 0) kernel_size = load_image_targphys(kernel_filename, kernel_base, ram_size - kernel_base); if (kernel_size < 0) { hw_error(\"qemu: could not load kernel '%s'\\n\", kernel_filename); exit(1); } /* load initrd */ if (initrd_filename) { initrd_base = INITRD_LOAD_ADDR; initrd_size = load_image_targphys(initrd_filename, initrd_base, ram_size - initrd_base); if (initrd_size < 0) { hw_error(\"qemu: could not load initial ram disk '%s'\\n\", initrd_filename); exit(1); } } else { initrd_base = 0; initrd_size = 0; } ppc_boot_device = 'm'; } else { kernel_base = 0; kernel_size = 0; initrd_base = 0; initrd_size = 0; ppc_boot_device = '\\0'; /* We consider that NewWorld PowerMac never have any floppy drive * For now, OHW cannot boot from the network. */ for (i = 0; boot_device[i] != '\\0'; i++) { if (boot_device[i] >= 'c' && boot_device[i] <= 'f') { ppc_boot_device = boot_device[i]; break; } } if (ppc_boot_device == '\\0') { fprintf(stderr, \"No valid boot device for Mac99 machine\\n\"); exit(1); } } isa_mem_base = 0x80000000; /* Register 8 MB of ISA IO space */ isa_mmio_init(0xf2000000, 0x00800000, 1); /* UniN init */ unin_memory = cpu_register_io_memory(unin_read, unin_write, NULL); cpu_register_physical_memory(0xf8000000, 0x00001000, unin_memory); openpic_irqs = qemu_mallocz(smp_cpus * sizeof(qemu_irq *)); openpic_irqs[0] = qemu_mallocz(smp_cpus * sizeof(qemu_irq) * OPENPIC_OUTPUT_NB); for (i = 0; i < smp_cpus; i++) { /* Mac99 IRQ connection between OpenPIC outputs pins * and PowerPC input pins */ switch (PPC_INPUT(env)) { case PPC_FLAGS_INPUT_6xx: openpic_irqs[i] = openpic_irqs[0] + (i * OPENPIC_OUTPUT_NB); openpic_irqs[i][OPENPIC_OUTPUT_INT] = ((qemu_irq *)env->irq_inputs)[PPC6xx_INPUT_INT]; openpic_irqs[i][OPENPIC_OUTPUT_CINT] = ((qemu_irq *)env->irq_inputs)[PPC6xx_INPUT_INT]; openpic_irqs[i][OPENPIC_OUTPUT_MCK] = ((qemu_irq *)env->irq_inputs)[PPC6xx_INPUT_MCP]; /* Not connected ? */ openpic_irqs[i][OPENPIC_OUTPUT_DEBUG] = NULL; /* Check this */ openpic_irqs[i][OPENPIC_OUTPUT_RESET] = ((qemu_irq *)env->irq_inputs)[PPC6xx_INPUT_HRESET]; break; #if defined(TARGET_PPC64) case PPC_FLAGS_INPUT_970: openpic_irqs[i] = openpic_irqs[0] + (i * OPENPIC_OUTPUT_NB); openpic_irqs[i][OPENPIC_OUTPUT_INT] = ((qemu_irq *)env->irq_inputs)[PPC970_INPUT_INT]; openpic_irqs[i][OPENPIC_OUTPUT_CINT] = ((qemu_irq *)env->irq_inputs)[PPC970_INPUT_INT]; openpic_irqs[i][OPENPIC_OUTPUT_MCK] = ((qemu_irq *)env->irq_inputs)[PPC970_INPUT_MCP]; /* Not connected ? */ openpic_irqs[i][OPENPIC_OUTPUT_DEBUG] = NULL; /* Check this */ openpic_irqs[i][OPENPIC_OUTPUT_RESET] = ((qemu_irq *)env->irq_inputs)[PPC970_INPUT_HRESET]; break; #endif /* defined(TARGET_PPC64) */ default: hw_error(\"Bus model not supported on mac99 machine\\n\"); exit(1); } } pic = openpic_init(NULL, &pic_mem_index, smp_cpus, openpic_irqs, NULL); if (PPC_INPUT(env) == PPC_FLAGS_INPUT_970) { /* 970 gets a U3 bus */ pci_bus = pci_pmac_u3_init(pic); machine_arch = ARCH_MAC99_U3; } else { pci_bus = pci_pmac_init(pic); machine_arch = ARCH_MAC99; } /* init basic PC hardware */ pci_vga_init(pci_bus, vga_bios_offset, vga_bios_size); escc_mem_index = escc_init(0x80013000, pic[0x25], pic[0x24], serial_hds[0], serial_hds[1], ESCC_CLOCK, 4); for(i = 0; i < nb_nics; i++) pci_nic_init_nofail(&nd_table[i], \"ne2k_pci\", NULL); if (drive_get_max_bus(IF_IDE) >= MAX_IDE_BUS) { fprintf(stderr, \"qemu: too many IDE bus\\n\"); exit(1); } dbdma = DBDMA_init(&dbdma_mem_index); /* We only emulate 2 out of 3 IDE controllers for now */ ide_mem_index[0] = -1; hd[0] = drive_get(IF_IDE, 0, 0); hd[1] = drive_get(IF_IDE, 0, 1); ide_mem_index[1] = pmac_ide_init(hd, pic[0x0d], dbdma, 0x16, pic[0x02]); hd[0] = drive_get(IF_IDE, 1, 0); hd[1] = drive_get(IF_IDE, 1, 1); ide_mem_index[2] = pmac_ide_init(hd, pic[0x0e], dbdma, 0x1a, pic[0x02]); /* cuda also initialize ADB */ if (machine_arch == ARCH_MAC99_U3) { usb_enabled = 1; } cuda_init(&cuda_mem_index, pic[0x19]); adb_kbd_init(&adb_bus); adb_mouse_init(&adb_bus); macio_init(pci_bus, PCI_DEVICE_ID_APPLE_UNI_N_KEYL, 0, pic_mem_index, dbdma_mem_index, cuda_mem_index, NULL, 3, ide_mem_index, escc_mem_index); if (usb_enabled) { usb_ohci_init_pci(pci_bus, -1); } /* U3 needs to use USB for input because Linux doesn't support via-cuda on PPC64 */ if (machine_arch == ARCH_MAC99_U3) { usbdevice_create(\"keyboard\"); usbdevice_create(\"mouse\"); } if (graphic_depth != 15 && graphic_depth != 32 && graphic_depth != 8) graphic_depth = 15; /* The NewWorld NVRAM is not located in the MacIO device */ nvr = macio_nvram_init(&nvram_mem_index, 0x2000, 1); pmac_format_nvram_partition(nvr, 0x2000); macio_nvram_map(nvr, 0xFFF04000); /* No PCI init: the BIOS will do it */ fw_cfg = fw_cfg_init(0, 0, CFG_ADDR, CFG_ADDR + 2); fw_cfg_add_i32(fw_cfg, FW_CFG_ID, 1); fw_cfg_add_i64(fw_cfg, FW_CFG_RAM_SIZE, (uint64_t)ram_size); fw_cfg_add_i16(fw_cfg, FW_CFG_MACHINE_ID, machine_arch); fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_ADDR, kernel_base); fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_SIZE, kernel_size); if (kernel_cmdline) { fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_CMDLINE, CMDLINE_ADDR); pstrcpy_targphys(\"cmdline\", CMDLINE_ADDR, TARGET_PAGE_SIZE, kernel_cmdline); } else { fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_CMDLINE, 0); } fw_cfg_add_i32(fw_cfg, FW_CFG_INITRD_ADDR, initrd_base); fw_cfg_add_i32(fw_cfg, FW_CFG_INITRD_SIZE, initrd_size); fw_cfg_add_i16(fw_cfg, FW_CFG_BOOT_DEVICE, ppc_boot_device); fw_cfg_add_i16(fw_cfg, FW_CFG_PPC_WIDTH, graphic_width); fw_cfg_add_i16(fw_cfg, FW_CFG_PPC_HEIGHT, graphic_height); fw_cfg_add_i16(fw_cfg, FW_CFG_PPC_DEPTH, graphic_depth); fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_IS_KVM, kvm_enabled()); if (kvm_enabled()) { #ifdef CONFIG_KVM uint8_t *hypercall; fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_TBFREQ, kvmppc_get_tbfreq()); hypercall = qemu_malloc(16); kvmppc_get_hypercall(env, hypercall, 16); fw_cfg_add_bytes(fw_cfg, FW_CFG_PPC_KVM_HC, hypercall, 16); fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_KVM_PID, getpid()); #endif } else { fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_TBFREQ, get_ticks_per_sec()); } qemu_register_boot_set(fw_cfg_boot_set, fw_cfg); }"} {"target": 0, "idx": 6838, "func": "void DMA_run(void) { /* XXXXX */ }"} {"target": 0, "idx": 6844, "func": "INLINE int16 extractFloat64Exp( float64 a ) { return ( a>>52 ) & 0x7FF; }"} {"target": 0, "idx": 6852, "func": "uint32_t helper_fcmp_un(uint32_t a, uint32_t b) { CPU_FloatU fa, fb; uint32_t r = 0; fa.l = a; fb.l = b; if (float32_is_signaling_nan(fa.f) || float32_is_signaling_nan(fb.f)) { update_fpu_flags(float_flag_invalid); r = 1; } if (float32_is_nan(fa.f) || float32_is_nan(fb.f)) { r = 1; } return r; }"} {"target": 0, "idx": 6867, "func": "static int vhost_user_start(VhostUserState *s) { VhostNetOptions options; if (vhost_user_running(s)) { return 0; } options.backend_type = VHOST_BACKEND_TYPE_USER; options.net_backend = &s->nc; options.opaque = s->chr; s->vhost_net = vhost_net_init(&options); return vhost_user_running(s) ? 0 : -1; }"} {"target": 0, "idx": 6879, "func": "static int alac_set_info(ALACContext *alac) { GetByteContext gb; bytestream2_init(&gb, alac->avctx->extradata, alac->avctx->extradata_size); bytestream2_skipu(&gb, 12); // size:4, alac:4, version:4 alac->max_samples_per_frame = bytestream2_get_be32u(&gb); if (alac->max_samples_per_frame >= UINT_MAX/4){ av_log(alac->avctx, AV_LOG_ERROR, \"max_samples_per_frame too large\\n\"); return AVERROR_INVALIDDATA; } bytestream2_skipu(&gb, 1); // compatible version alac->sample_size = bytestream2_get_byteu(&gb); alac->rice_history_mult = bytestream2_get_byteu(&gb); alac->rice_initial_history = bytestream2_get_byteu(&gb); alac->rice_limit = bytestream2_get_byteu(&gb); alac->channels = bytestream2_get_byteu(&gb); bytestream2_get_be16u(&gb); // maxRun bytestream2_get_be32u(&gb); // max coded frame size bytestream2_get_be32u(&gb); // average bitrate bytestream2_get_be32u(&gb); // samplerate return 0; }"} {"target": 1, "idx": 6891, "func": "static void sdhci_send_command(SDHCIState *s) { SDRequest request; uint8_t response[16]; int rlen; s->errintsts = 0; s->acmd12errsts = 0; request.cmd = s->cmdreg >> 8; request.arg = s->argument; DPRINT_L1(\"sending CMD%u ARG[0x%08x]\\n\", request.cmd, request.arg); rlen = sdbus_do_command(&s->sdbus, &request, response); if (s->cmdreg & SDHC_CMD_RESPONSE) { if (rlen == 4) { s->rspreg[0] = (response[0] << 24) | (response[1] << 16) | (response[2] << 8) | response[3]; s->rspreg[1] = s->rspreg[2] = s->rspreg[3] = 0; DPRINT_L1(\"Response: RSPREG[31..0]=0x%08x\\n\", s->rspreg[0]); } else if (rlen == 16) { s->rspreg[0] = (response[11] << 24) | (response[12] << 16) | (response[13] << 8) | response[14]; s->rspreg[1] = (response[7] << 24) | (response[8] << 16) | (response[9] << 8) | response[10]; s->rspreg[2] = (response[3] << 24) | (response[4] << 16) | (response[5] << 8) | response[6]; s->rspreg[3] = (response[0] << 16) | (response[1] << 8) | response[2]; DPRINT_L1(\"Response received:\\n RSPREG[127..96]=0x%08x, RSPREG[95..\" \"64]=0x%08x,\\n RSPREG[63..32]=0x%08x, RSPREG[31..0]=0x%08x\\n\", s->rspreg[3], s->rspreg[2], s->rspreg[1], s->rspreg[0]); } else { ERRPRINT(\"Timeout waiting for command response\\n\"); if (s->errintstsen & SDHC_EISEN_CMDTIMEOUT) { s->errintsts |= SDHC_EIS_CMDTIMEOUT; s->norintsts |= SDHC_NIS_ERR; } } if ((s->norintstsen & SDHC_NISEN_TRSCMP) && (s->cmdreg & SDHC_CMD_RESPONSE) == SDHC_CMD_RSP_WITH_BUSY) { s->norintsts |= SDHC_NIS_TRSCMP; } } if (s->norintstsen & SDHC_NISEN_CMDCMP) { s->norintsts |= SDHC_NIS_CMDCMP; } sdhci_update_irq(s); if (s->blksize && (s->cmdreg & SDHC_CMD_DATA_PRESENT)) { s->data_count = 0; sdhci_data_transfer(s); } }"} {"target": 0, "idx": 6919, "func": "static void test_dealloc_types(void) { UserDefOne *ud1test, *ud1a, *ud1b; UserDefOneList *ud1list; ud1test = g_malloc0(sizeof(UserDefOne)); ud1test->base = g_new0(UserDefZero, 1); ud1test->base->integer = 42; ud1test->string = g_strdup(\"hi there 42\"); qapi_free_UserDefOne(ud1test); ud1a = g_malloc0(sizeof(UserDefOne)); ud1a->base = g_new0(UserDefZero, 1); ud1a->base->integer = 43; ud1a->string = g_strdup(\"hi there 43\"); ud1b = g_malloc0(sizeof(UserDefOne)); ud1b->base = g_new0(UserDefZero, 1); ud1b->base->integer = 44; ud1b->string = g_strdup(\"hi there 44\"); ud1list = g_malloc0(sizeof(UserDefOneList)); ud1list->value = ud1a; ud1list->next = g_malloc0(sizeof(UserDefOneList)); ud1list->next->value = ud1b; qapi_free_UserDefOneList(ud1list); }"} {"target": 0, "idx": 6927, "func": "void net_checksum_calculate(uint8_t *data, int length) { int hlen, plen, proto, csum_offset; uint16_t csum; if ((data[14] & 0xf0) != 0x40) return; /* not IPv4 */ hlen = (data[14] & 0x0f) * 4; plen = (data[16] << 8 | data[17]) - hlen; proto = data[23]; switch (proto) { case PROTO_TCP: csum_offset = 16; break; case PROTO_UDP: csum_offset = 6; break; default: return; } if (plen < csum_offset+2) return; data[14+hlen+csum_offset] = 0; data[14+hlen+csum_offset+1] = 0; csum = net_checksum_tcpudp(plen, proto, data+14+12, data+14+hlen); data[14+hlen+csum_offset] = csum >> 8; data[14+hlen+csum_offset+1] = csum & 0xff; }"} {"target": 0, "idx": 6935, "func": "static void x86_cpu_parse_featurestr(const char *typename, char *features, Error **errp) { char *featurestr; /* Single 'key=value\" string being parsed */ static bool cpu_globals_initialized; bool ambiguous = false; if (cpu_globals_initialized) { return; } cpu_globals_initialized = true; if (!features) { return; } for (featurestr = strtok(features, \",\"); featurestr; featurestr = strtok(NULL, \",\")) { const char *name; const char *val = NULL; char *eq = NULL; char num[32]; GlobalProperty *prop; /* Compatibility syntax: */ if (featurestr[0] == '+') { plus_features = g_list_append(plus_features, g_strdup(featurestr + 1)); continue; } else if (featurestr[0] == '-') { minus_features = g_list_append(minus_features, g_strdup(featurestr + 1)); continue; } eq = strchr(featurestr, '='); if (eq) { *eq++ = 0; val = eq; } else { val = \"on\"; } feat2prop(featurestr); name = featurestr; if (g_list_find_custom(plus_features, name, compare_string)) { error_report(\"warning: Ambiguous CPU model string. \" \"Don't mix both \\\"+%s\\\" and \\\"%s=%s\\\"\", name, name, val); ambiguous = true; } if (g_list_find_custom(minus_features, name, compare_string)) { error_report(\"warning: Ambiguous CPU model string. \" \"Don't mix both \\\"-%s\\\" and \\\"%s=%s\\\"\", name, name, val); ambiguous = true; } /* Special case: */ if (!strcmp(name, \"tsc-freq\")) { int64_t tsc_freq; tsc_freq = qemu_strtosz_metric(val, NULL); if (tsc_freq < 0) { error_setg(errp, \"bad numerical value %s\", val); return; } snprintf(num, sizeof(num), \"%\" PRId64, tsc_freq); val = num; name = \"tsc-frequency\"; } prop = g_new0(typeof(*prop), 1); prop->driver = typename; prop->property = g_strdup(name); prop->value = g_strdup(val); prop->errp = &error_fatal; qdev_prop_register_global(prop); } if (ambiguous) { error_report(\"warning: Compatibility of ambiguous CPU model \" \"strings won't be kept on future QEMU versions\"); } }"} {"target": 1, "idx": 6946, "func": "static VirtIOSCSIVring *virtio_scsi_vring_init(VirtIOSCSI *s, VirtQueue *vq, EventNotifierHandler *handler, int n) { BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s))); VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); VirtIOSCSIVring *r = g_slice_new(VirtIOSCSIVring); int rc; /* Set up virtqueue notify */ rc = k->set_host_notifier(qbus->parent, n, true); if (rc != 0) { fprintf(stderr, \"virtio-scsi: Failed to set host notifier (%d)\\n\", rc); exit(1); } r->host_notifier = *virtio_queue_get_host_notifier(vq); r->guest_notifier = *virtio_queue_get_guest_notifier(vq); aio_set_event_notifier(s->ctx, &r->host_notifier, handler); r->parent = s; if (!vring_setup(&r->vring, VIRTIO_DEVICE(s), n)) { fprintf(stderr, \"virtio-scsi: VRing setup failed\\n\"); exit(1); } return r; }"} {"target": 1, "idx": 6951, "func": "void qemu_net_queue_purge(NetQueue *queue, NetClientState *from) { NetPacket *packet, *next; QTAILQ_FOREACH_SAFE(packet, &queue->packets, entry, next) { if (packet->sender == from) { QTAILQ_REMOVE(&queue->packets, packet, entry); g_free(packet); } } }"} {"target": 1, "idx": 6953, "func": "static ssize_t vnc_client_read_tls(gnutls_session_t *session, uint8_t *data, size_t datalen) { ssize_t ret = gnutls_read(*session, data, datalen); if (ret < 0) { if (ret == GNUTLS_E_AGAIN) { errno = EAGAIN; } else { errno = EIO; } ret = -1; } return ret; }"} {"target": 1, "idx": 6956, "func": "static int slice_end(AVCodecContext *avctx, AVFrame *pict) { Mpeg1Context *s1 = avctx->priv_data; MpegEncContext *s = &s1->mpeg_enc_ctx; if (!s1->mpeg_enc_ctx_allocated || !s->current_picture_ptr) return 0; if (s->avctx->hwaccel) { if (s->avctx->hwaccel->end_frame(s->avctx) < 0) av_log(avctx, AV_LOG_ERROR, \"hardware accelerator failed to decode picture\\n\"); } #if FF_API_XVMC FF_DISABLE_DEPRECATION_WARNINGS if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) ff_xvmc_field_end(s); FF_ENABLE_DEPRECATION_WARNINGS #endif /* FF_API_XVMC */ /* end of slice reached */ if (/* s->mb_y << field_pic == s->mb_height && */ !s->first_field) { /* end of image */ ff_er_frame_end(&s->er); ff_MPV_frame_end(s); if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) { int ret = av_frame_ref(pict, &s->current_picture_ptr->f); if (ret < 0) return ret; ff_print_debug_info(s, s->current_picture_ptr); } else { if (avctx->active_thread_type & FF_THREAD_FRAME) s->picture_number++; /* latency of 1 frame for I- and P-frames */ /* XXX: use another variable than picture_number */ if (s->last_picture_ptr != NULL) { int ret = av_frame_ref(pict, &s->last_picture_ptr->f); if (ret < 0) return ret; ff_print_debug_info(s, s->last_picture_ptr); } } return 1; } else { return 0; } }"} {"target": 1, "idx": 6972, "func": "static inline int mpeg2_fast_decode_block_non_intra(MpegEncContext *s, int16_t *block, int n) { int level, i, j, run; RLTable *rl = &ff_rl_mpeg1; uint8_t * const scantable = s->intra_scantable.permutated; const int qscale = s->qscale; OPEN_READER(re, &s->gb); i = -1; // special case for first coefficient, no need to add second VLC table UPDATE_CACHE(re, &s->gb); if (((int32_t)GET_CACHE(re, &s->gb)) < 0) { level = (3 * qscale) >> 1; if (GET_CACHE(re, &s->gb) & 0x40000000) level = -level; block[0] = level; i++; SKIP_BITS(re, &s->gb, 2); if (((int32_t)GET_CACHE(re, &s->gb)) <= (int32_t)0xBFFFFFFF) goto end; } /* now quantify & encode AC coefficients */ for (;;) { GET_RL_VLC(level, run, re, &s->gb, rl->rl_vlc[0], TEX_VLC_BITS, 2, 0); if (level != 0) { i += run; j = scantable[i]; level = ((level * 2 + 1) * qscale) >> 1; level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1); SKIP_BITS(re, &s->gb, 1); } else { /* escape */ run = SHOW_UBITS(re, &s->gb, 6) + 1; LAST_SKIP_BITS(re, &s->gb, 6); UPDATE_CACHE(re, &s->gb); level = SHOW_SBITS(re, &s->gb, 12); SKIP_BITS(re, &s->gb, 12); i += run; j = scantable[i]; if (level < 0) { level = ((-level * 2 + 1) * qscale) >> 1; level = -level; } else { level = ((level * 2 + 1) * qscale) >> 1; } } block[j] = level; if (((int32_t)GET_CACHE(re, &s->gb)) <= (int32_t)0xBFFFFFFF) break; UPDATE_CACHE(re, &s->gb); } end: LAST_SKIP_BITS(re, &s->gb, 2); CLOSE_READER(re, &s->gb); s->block_last_index[n] = i; return 0; }"} {"target": 0, "idx": 6997, "func": "static void stream_close(VideoState *is) { VideoPicture *vp; int i; /* XXX: use a special url_shutdown call to abort parse cleanly */ is->abort_request = 1; SDL_WaitThread(is->read_tid, NULL); SDL_WaitThread(is->refresh_tid, NULL); packet_queue_destroy(&is->videoq); packet_queue_destroy(&is->audioq); packet_queue_destroy(&is->subtitleq); /* free all pictures */ for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) { vp = &is->pictq[i]; #if CONFIG_AVFILTER avfilter_unref_bufferp(&vp->picref); #endif if (vp->bmp) { SDL_FreeYUVOverlay(vp->bmp); vp->bmp = NULL; } } SDL_DestroyMutex(is->pictq_mutex); SDL_DestroyCond(is->pictq_cond); SDL_DestroyMutex(is->subpq_mutex); SDL_DestroyCond(is->subpq_cond); SDL_DestroyCond(is->continue_read_thread); #if !CONFIG_AVFILTER sws_freeContext(is->img_convert_ctx); #endif av_free(is); }"} {"target": 1, "idx": 6999, "func": "static void init_gain_table(COOKContext *q) { int i; q->gain_size_factor = q->samples_per_channel/8; for (i=0 ; i<23 ; i++) { q->gain_table[i] = pow((double)q->pow2tab[i+52] , (1.0/(double)q->gain_size_factor)); } memset(&q->gain_copy, 0, sizeof(COOKgain)); memset(&q->gain_current, 0, sizeof(COOKgain)); memset(&q->gain_now, 0, sizeof(COOKgain)); memset(&q->gain_previous, 0, sizeof(COOKgain)); }"} {"target": 1, "idx": 7004, "func": "static void qpa_fini_out (HWVoiceOut *hw) { void *ret; PAVoiceOut *pa = (PAVoiceOut *) hw; audio_pt_lock (&pa->pt, AUDIO_FUNC); pa->done = 1; audio_pt_unlock_and_signal (&pa->pt, AUDIO_FUNC); audio_pt_join (&pa->pt, &ret, AUDIO_FUNC); if (pa->s) { pa_simple_free (pa->s); pa->s = NULL; } audio_pt_fini (&pa->pt, AUDIO_FUNC); g_free (pa->pcm_buf); pa->pcm_buf = NULL; }"} {"target": 0, "idx": 7017, "func": "static int scsi_device_init(SCSIDevice *s) { SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(s); if (sc->init) { return sc->init(s); } return 0; }"} {"target": 0, "idx": 7019, "func": "static void set_dirty_bitmap(BlockDriverState *bs, int64_t sector_num, int nb_sectors, int dirty) { int64_t start, end; unsigned long val, idx, bit; start = sector_num / BDRV_SECTORS_PER_DIRTY_CHUNK; end = (sector_num + nb_sectors - 1) / BDRV_SECTORS_PER_DIRTY_CHUNK; for (; start <= end; start++) { idx = start / (sizeof(unsigned long) * 8); bit = start % (sizeof(unsigned long) * 8); val = bs->dirty_bitmap[idx]; if (dirty) { val |= 1 << bit; } else { val &= ~(1 << bit); } bs->dirty_bitmap[idx] = val; } }"} {"target": 0, "idx": 7027, "func": "static size_t net_tx_pkt_fetch_fragment(struct NetTxPkt *pkt, int *src_idx, size_t *src_offset, struct iovec *dst, int *dst_idx) { size_t fetched = 0; struct iovec *src = pkt->vec; *dst_idx = NET_TX_PKT_FRAGMENT_HEADER_NUM; while (fetched < pkt->virt_hdr.gso_size) { /* no more place in fragment iov */ if (*dst_idx == NET_MAX_FRAG_SG_LIST) { break; } /* no more data in iovec */ if (*src_idx == (pkt->payload_frags + NET_TX_PKT_PL_START_FRAG)) { break; } dst[*dst_idx].iov_base = src[*src_idx].iov_base + *src_offset; dst[*dst_idx].iov_len = MIN(src[*src_idx].iov_len - *src_offset, pkt->virt_hdr.gso_size - fetched); *src_offset += dst[*dst_idx].iov_len; fetched += dst[*dst_idx].iov_len; if (*src_offset == src[*src_idx].iov_len) { *src_offset = 0; (*src_idx)++; } (*dst_idx)++; } return fetched; }"} {"target": 0, "idx": 7033, "func": "static void lsi_ram_write(void *opaque, target_phys_addr_t addr, uint64_t val, unsigned size) { LSIState *s = opaque; uint32_t newval; uint32_t mask; int shift; newval = s->script_ram[addr >> 2]; shift = (addr & 3) * 8; mask = ((uint64_t)1 << (size * 8)) - 1; newval &= ~(mask << shift); newval |= val << shift; s->script_ram[addr >> 2] = newval; }"} {"target": 1, "idx": 7048, "func": "static int get_buffer_sao(HEVCContext *s, AVFrame *frame, const HEVCSPS *sps) { int ret, i; frame->width = s->avctx->width + 2; frame->height = s->avctx->height + 2; if ((ret = ff_get_buffer(s->avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0) return ret; for (i = 0; frame->data[i]; i++) { int offset = frame->linesize[i] + (1 << sps->pixel_shift); frame->data[i] += offset; } frame->width = s->avctx->width; frame->height = s->avctx->height; return 0; }"} {"target": 0, "idx": 7066, "func": "static void avc_luma_vt_16w_msa(const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, int32_t height) { int32_t loop_cnt; int16_t filt_const0 = 0xfb01; int16_t filt_const1 = 0x1414; int16_t filt_const2 = 0x1fb; v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8; v16i8 src10_r, src32_r, src54_r, src76_r, src21_r, src43_r, src65_r; v16i8 src87_r, src10_l, src32_l, src54_l, src76_l, src21_l, src43_l; v16i8 src65_l, src87_l; v8i16 out0_r, out1_r, out2_r, out3_r, out0_l, out1_l, out2_l, out3_l; v16u8 res0, res1, res2, res3; v16i8 filt0, filt1, filt2; filt0 = (v16i8) __msa_fill_h(filt_const0); filt1 = (v16i8) __msa_fill_h(filt_const1); filt2 = (v16i8) __msa_fill_h(filt_const2); LD_SB5(src, src_stride, src0, src1, src2, src3, src4); src += (5 * src_stride); XORI_B5_128_SB(src0, src1, src2, src3, src4); ILVR_B4_SB(src1, src0, src2, src1, src3, src2, src4, src3, src10_r, src21_r, src32_r, src43_r); ILVL_B4_SB(src1, src0, src2, src1, src3, src2, src4, src3, src10_l, src21_l, src32_l, src43_l); for (loop_cnt = (height >> 2); loop_cnt--;) { LD_SB4(src, src_stride, src5, src6, src7, src8); src += (4 * src_stride); XORI_B4_128_SB(src5, src6, src7, src8); ILVR_B4_SB(src5, src4, src6, src5, src7, src6, src8, src7, src54_r, src65_r, src76_r, src87_r); ILVL_B4_SB(src5, src4, src6, src5, src7, src6, src8, src7, src54_l, src65_l, src76_l, src87_l); out0_r = DPADD_SH3_SH(src10_r, src32_r, src54_r, filt0, filt1, filt2); out1_r = DPADD_SH3_SH(src21_r, src43_r, src65_r, filt0, filt1, filt2); out2_r = DPADD_SH3_SH(src32_r, src54_r, src76_r, filt0, filt1, filt2); out3_r = DPADD_SH3_SH(src43_r, src65_r, src87_r, filt0, filt1, filt2); out0_l = DPADD_SH3_SH(src10_l, src32_l, src54_l, filt0, filt1, filt2); out1_l = DPADD_SH3_SH(src21_l, src43_l, src65_l, filt0, filt1, filt2); out2_l = DPADD_SH3_SH(src32_l, src54_l, src76_l, filt0, filt1, filt2); out3_l = DPADD_SH3_SH(src43_l, src65_l, src87_l, filt0, filt1, filt2); SRARI_H4_SH(out0_r, out1_r, out2_r, out3_r, 5); SAT_SH4_SH(out0_r, out1_r, out2_r, out3_r, 7); SRARI_H4_SH(out0_l, out1_l, out2_l, out3_l, 5); SAT_SH4_SH(out0_l, out1_l, out2_l, out3_l, 7); PCKEV_B4_UB(out0_l, out0_r, out1_l, out1_r, out2_l, out2_r, out3_l, out3_r, res0, res1, res2, res3); XORI_B4_128_UB(res0, res1, res2, res3); ST_UB4(res0, res1, res2, res3, dst, dst_stride); dst += (4 * dst_stride); src10_r = src54_r; src32_r = src76_r; src21_r = src65_r; src43_r = src87_r; src10_l = src54_l; src32_l = src76_l; src21_l = src65_l; src43_l = src87_l; src4 = src8; } }"} {"target": 0, "idx": 7069, "func": "static int img_commit(int argc, char **argv) { int c, ret, flags; const char *filename, *fmt, *cache, *base; BlockBackend *blk; BlockDriverState *bs, *base_bs; bool progress = false, quiet = false, drop = false; bool writethrough; Error *local_err = NULL; CommonBlockJobCBInfo cbi; bool image_opts = false; AioContext *aio_context; fmt = NULL; cache = BDRV_DEFAULT_CACHE; base = NULL; for(;;) { static const struct option long_options[] = { {\"help\", no_argument, 0, 'h'}, {\"object\", required_argument, 0, OPTION_OBJECT}, {\"image-opts\", no_argument, 0, OPTION_IMAGE_OPTS}, {0, 0, 0, 0} }; c = getopt_long(argc, argv, \"f:ht:b:dpq\", long_options, NULL); if (c == -1) { break; } switch(c) { case '?': case 'h': help(); break; case 'f': fmt = optarg; break; case 't': cache = optarg; break; case 'b': base = optarg; /* -b implies -d */ drop = true; break; case 'd': drop = true; break; case 'p': progress = true; break; case 'q': quiet = true; break; case OPTION_OBJECT: { QemuOpts *opts; opts = qemu_opts_parse_noisily(&qemu_object_opts, optarg, true); if (!opts) { return 1; } } break; case OPTION_IMAGE_OPTS: image_opts = true; break; } } /* Progress is not shown in Quiet mode */ if (quiet) { progress = false; } if (optind != argc - 1) { error_exit(\"Expecting one image file name\"); } filename = argv[optind++]; if (qemu_opts_foreach(&qemu_object_opts, user_creatable_add_opts_foreach, NULL, NULL)) { return 1; } flags = BDRV_O_RDWR | BDRV_O_UNMAP; ret = bdrv_parse_cache_mode(cache, &flags, &writethrough); if (ret < 0) { error_report(\"Invalid cache option: %s\", cache); return 1; } blk = img_open(image_opts, filename, fmt, flags, writethrough, quiet); if (!blk) { return 1; } bs = blk_bs(blk); qemu_progress_init(progress, 1.f); qemu_progress_print(0.f, 100); if (base) { base_bs = bdrv_find_backing_image(bs, base); if (!base_bs) { error_setg(&local_err, QERR_BASE_NOT_FOUND, base); goto done; } } else { /* This is different from QMP, which by default uses the deepest file in * the backing chain (i.e., the very base); however, the traditional * behavior of qemu-img commit is using the immediate backing file. */ base_bs = backing_bs(bs); if (!base_bs) { error_setg(&local_err, \"Image does not have a backing file\"); goto done; } } cbi = (CommonBlockJobCBInfo){ .errp = &local_err, .bs = bs, }; aio_context = bdrv_get_aio_context(bs); aio_context_acquire(aio_context); commit_active_start(\"commit\", bs, base_bs, BLOCK_JOB_DEFAULT, 0, BLOCKDEV_ON_ERROR_REPORT, common_block_job_cb, &cbi, &local_err, false); aio_context_release(aio_context); if (local_err) { goto done; } /* When the block job completes, the BlockBackend reference will point to * the old backing file. In order to avoid that the top image is already * deleted, so we can still empty it afterwards, increment the reference * counter here preemptively. */ if (!drop) { bdrv_ref(bs); } run_block_job(bs->job, &local_err); if (local_err) { goto unref_backing; } if (!drop && bs->drv->bdrv_make_empty) { ret = bs->drv->bdrv_make_empty(bs); if (ret) { error_setg_errno(&local_err, -ret, \"Could not empty %s\", filename); goto unref_backing; } } unref_backing: if (!drop) { bdrv_unref(bs); } done: qemu_progress_end(); blk_unref(blk); if (local_err) { error_report_err(local_err); return 1; } qprintf(quiet, \"Image committed.\\n\"); return 0; }"} {"target": 0, "idx": 7099, "func": "unsigned long setup_arg_pages(void * mh, char ** argv, char ** env) { unsigned long stack_base, error, size; int i; int * stack; int argc, envc; /* Create enough stack to hold everything. If we don't use * it for args, we'll use it for something else... */ size = stack_size; error = target_mmap(0, size + qemu_host_page_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); if (error == -1) qerror(\"stk mmap\"); /* we reserve one extra page at the top of the stack as guard */ target_mprotect(error + size, qemu_host_page_size, PROT_NONE); stack_base = error + size; stack = (void*)stack_base; /* * | STRING AREA | * +-------------+ * | 0 | * +-------------+ * | apple[n] | * +-------------+ * : * +-------------+ * | apple[0] | * +-------------+ * | 0 | * +-------------+ * | env[n] | * +-------------+ * : * : * +-------------+ * | env[0] | * +-------------+ * | 0 | * +-------------+ * | arg[argc-1] | * +-------------+ * : * : * +-------------+ * | arg[0] | * +-------------+ * | argc | * +-------------+ * sp-> | mh | address of where the a.out's file offset 0 is in memory * +-------------+ */ /* Construct the stack Stack grows down */ stack--; /* XXX: string should go up there */ *stack = 0; stack--; /* Push the absolute path of our executable */ DPRINTF(\"pushing apple %s (0x%x)\\n\", (char*)argv[0], (int)argv[0]); stl(stack, (int) argv[0]); stack--; stl(stack, 0); stack--; /* Get envc */ for(envc = 0; env[envc]; envc++); for(i = envc-1; i >= 0; i--) { DPRINTF(\"pushing env %s (0x%x)\\n\", (char*)env[i], (int)env[i]); stl(stack, (int)env[i]); stack--; /* XXX: remove that when string will be on top of the stack */ page_set_flags((int)env[i], (int)(env[i]+strlen(env[i])), PROT_READ | PAGE_VALID); } /* Add on the stack the interp_prefix choosen if so */ if(interp_prefix[0]) { char *dyld_root; asprintf(&dyld_root, \"DYLD_ROOT_PATH=%s\", interp_prefix); page_set_flags((int)dyld_root, (int)(dyld_root+strlen(interp_prefix)+1), PROT_READ | PAGE_VALID); stl(stack, (int)dyld_root); stack--; } #ifdef DONT_USE_DYLD_SHARED_MAP { char *shared_map_mode; asprintf(&shared_map_mode, \"DYLD_SHARED_REGION=avoid\"); page_set_flags((int)shared_map_mode, (int)(shared_map_mode+strlen(shared_map_mode)+1), PROT_READ | PAGE_VALID); stl(stack, (int)shared_map_mode); stack--; } #endif #ifdef ACTIVATE_DYLD_TRACE char * extra_env_static[] = {\"DYLD_DEBUG_TRACE=yes\", \"DYLD_PREBIND_DEBUG=3\", \"DYLD_UNKNOW_TRACE=yes\", \"DYLD_PRINT_INITIALIZERS=yes\", \"DYLD_PRINT_SEGMENTS=yes\", \"DYLD_PRINT_REBASINGS=yes\", \"DYLD_PRINT_BINDINGS=yes\", \"DYLD_PRINT_INITIALIZERS=yes\", \"DYLD_PRINT_WARNINGS=yes\" }; char ** extra_env = malloc(sizeof(extra_env_static)); bcopy(extra_env_static, extra_env, sizeof(extra_env_static)); page_set_flags((int)extra_env, (int)((void*)extra_env+sizeof(extra_env_static)), PROT_READ | PAGE_VALID); for(i = 0; i<9; i++) { DPRINTF(\"pushing (extra) env %s (0x%x)\\n\", (char*)extra_env[i], (int)extra_env[i]); stl(stack, (int) extra_env[i]); stack--; } #endif stl(stack, 0); stack--; /* Get argc */ for(argc = 0; argv[argc]; argc++); for(i = argc-1; i >= 0; i--) { DPRINTF(\"pushing arg %s (0x%x)\\n\", (char*)argv[i], (int)argv[i]); stl(stack, (int) argv[i]); stack--; /* XXX: remove that when string will be on top of the stack */ page_set_flags((int)argv[i], (int)(argv[i]+strlen(argv[i])), PROT_READ | PAGE_VALID); } DPRINTF(\"pushing argc %d \\n\", argc); stl(stack, argc); stack--; DPRINTF(\"pushing mh 0x%x \\n\", (int)mh); stl(stack, (int) mh); /* Stack points on the mh */ return (unsigned long)stack; }"} {"target": 0, "idx": 7105, "func": "int sd_do_command(SDState *sd, SDRequest *req, uint8_t *response) { uint32_t last_status = sd->card_status; sd_rsp_type_t rtype; int rsplen; if (!sd->bdrv || !bdrv_is_inserted(sd->bdrv) || !sd->enable) { return 0; } if (sd_req_crc_validate(req)) { sd->card_status |= COM_CRC_ERROR; rtype = sd_illegal; goto send_response; } sd->card_status &= ~CARD_STATUS_B; sd_set_status(sd); if (last_status & CARD_IS_LOCKED) { if (!cmd_valid_while_locked(sd, req)) { sd->card_status |= ILLEGAL_COMMAND; fprintf(stderr, \"SD: Card is locked\\n\"); rtype = sd_illegal; goto send_response; } } if (last_status & APP_CMD) { rtype = sd_app_command(sd, *req); sd->card_status &= ~APP_CMD; } else rtype = sd_normal_command(sd, *req); if (rtype == sd_illegal) { sd->card_status |= ILLEGAL_COMMAND; } sd->current_cmd = req->cmd; send_response: switch (rtype) { case sd_r1: case sd_r1b: sd_response_r1_make(sd, response, last_status); rsplen = 4; break; case sd_r2_i: memcpy(response, sd->cid, sizeof(sd->cid)); rsplen = 16; break; case sd_r2_s: memcpy(response, sd->csd, sizeof(sd->csd)); rsplen = 16; break; case sd_r3: sd_response_r3_make(sd, response); rsplen = 4; break; case sd_r6: sd_response_r6_make(sd, response); rsplen = 4; break; case sd_r7: sd_response_r7_make(sd, response); rsplen = 4; break; case sd_r0: case sd_illegal: default: rsplen = 0; break; } #ifdef DEBUG_SD if (rsplen) { int i; DPRINTF(\"Response:\"); for (i = 0; i < rsplen; i++) printf(\" %02x\", response[i]); printf(\" state %d\\n\", sd->state); } else { DPRINTF(\"No response %d\\n\", sd->state); } #endif return rsplen; }"} {"target": 0, "idx": 7109, "func": "void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx) { X86CPU *cpu = x86_env_get_cpu(env); CPUState *cs = CPU(cpu); uint32_t pkg_offset; /* test if maximum index reached */ if (index & 0x80000000) { if (index > env->cpuid_xlevel) { if (env->cpuid_xlevel2 > 0) { /* Handle the Centaur's CPUID instruction. */ if (index > env->cpuid_xlevel2) { index = env->cpuid_xlevel2; } else if (index < 0xC0000000) { index = env->cpuid_xlevel; } } else { /* Intel documentation states that invalid EAX input will * return the same information as EAX=cpuid_level * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID) */ index = env->cpuid_level; } } } else { if (index > env->cpuid_level) index = env->cpuid_level; } switch(index) { case 0: *eax = env->cpuid_level; *ebx = env->cpuid_vendor1; *edx = env->cpuid_vendor2; *ecx = env->cpuid_vendor3; break; case 1: *eax = env->cpuid_version; *ebx = (cpu->apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */ *ecx = env->features[FEAT_1_ECX]; if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) { *ecx |= CPUID_EXT_OSXSAVE; } *edx = env->features[FEAT_1_EDX]; if (cs->nr_cores * cs->nr_threads > 1) { *ebx |= (cs->nr_cores * cs->nr_threads) << 16; *edx |= CPUID_HT; } break; case 2: /* cache info: needed for Pentium Pro compatibility */ if (cpu->cache_info_passthrough) { host_cpuid(index, 0, eax, ebx, ecx, edx); break; } *eax = 1; /* Number of CPUID[EAX=2] calls required */ *ebx = 0; if (!cpu->enable_l3_cache) { *ecx = 0; } else { *ecx = L3_N_DESCRIPTOR; } *edx = (L1D_DESCRIPTOR << 16) | \\ (L1I_DESCRIPTOR << 8) | \\ (L2_DESCRIPTOR); break; case 4: /* cache info: needed for Core compatibility */ if (cpu->cache_info_passthrough) { host_cpuid(index, count, eax, ebx, ecx, edx); *eax &= ~0xFC000000; } else { *eax = 0; switch (count) { case 0: /* L1 dcache info */ *eax |= CPUID_4_TYPE_DCACHE | \\ CPUID_4_LEVEL(1) | \\ CPUID_4_SELF_INIT_LEVEL; *ebx = (L1D_LINE_SIZE - 1) | \\ ((L1D_PARTITIONS - 1) << 12) | \\ ((L1D_ASSOCIATIVITY - 1) << 22); *ecx = L1D_SETS - 1; *edx = CPUID_4_NO_INVD_SHARING; break; case 1: /* L1 icache info */ *eax |= CPUID_4_TYPE_ICACHE | \\ CPUID_4_LEVEL(1) | \\ CPUID_4_SELF_INIT_LEVEL; *ebx = (L1I_LINE_SIZE - 1) | \\ ((L1I_PARTITIONS - 1) << 12) | \\ ((L1I_ASSOCIATIVITY - 1) << 22); *ecx = L1I_SETS - 1; *edx = CPUID_4_NO_INVD_SHARING; break; case 2: /* L2 cache info */ *eax |= CPUID_4_TYPE_UNIFIED | \\ CPUID_4_LEVEL(2) | \\ CPUID_4_SELF_INIT_LEVEL; if (cs->nr_threads > 1) { *eax |= (cs->nr_threads - 1) << 14; } *ebx = (L2_LINE_SIZE - 1) | \\ ((L2_PARTITIONS - 1) << 12) | \\ ((L2_ASSOCIATIVITY - 1) << 22); *ecx = L2_SETS - 1; *edx = CPUID_4_NO_INVD_SHARING; break; case 3: /* L3 cache info */ if (!cpu->enable_l3_cache) { *eax = 0; *ebx = 0; *ecx = 0; *edx = 0; break; } *eax |= CPUID_4_TYPE_UNIFIED | \\ CPUID_4_LEVEL(3) | \\ CPUID_4_SELF_INIT_LEVEL; pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads); *eax |= ((1 << pkg_offset) - 1) << 14; *ebx = (L3_N_LINE_SIZE - 1) | \\ ((L3_N_PARTITIONS - 1) << 12) | \\ ((L3_N_ASSOCIATIVITY - 1) << 22); *ecx = L3_N_SETS - 1; *edx = CPUID_4_INCLUSIVE | CPUID_4_COMPLEX_IDX; break; default: /* end of info */ *eax = 0; *ebx = 0; *ecx = 0; *edx = 0; break; } } /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */ if ((*eax & 31) && cs->nr_cores > 1) { *eax |= (cs->nr_cores - 1) << 26; } break; case 5: /* mwait info: needed for Core compatibility */ *eax = 0; /* Smallest monitor-line size in bytes */ *ebx = 0; /* Largest monitor-line size in bytes */ *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE; *edx = 0; break; case 6: /* Thermal and Power Leaf */ *eax = env->features[FEAT_6_EAX]; *ebx = 0; *ecx = 0; *edx = 0; break; case 7: /* Structured Extended Feature Flags Enumeration Leaf */ if (count == 0) { *eax = 0; /* Maximum ECX value for sub-leaves */ *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */ *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */ if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) { *ecx |= CPUID_7_0_ECX_OSPKE; } *edx = 0; /* Reserved */ } else { *eax = 0; *ebx = 0; *ecx = 0; *edx = 0; } break; case 9: /* Direct Cache Access Information Leaf */ *eax = 0; /* Bits 0-31 in DCA_CAP MSR */ *ebx = 0; *ecx = 0; *edx = 0; break; case 0xA: /* Architectural Performance Monitoring Leaf */ if (kvm_enabled() && cpu->enable_pmu) { KVMState *s = cs->kvm_state; *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX); *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX); *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX); *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX); } else { *eax = 0; *ebx = 0; *ecx = 0; *edx = 0; } break; case 0xB: /* Extended Topology Enumeration Leaf */ if (!cpu->enable_cpuid_0xb) { *eax = *ebx = *ecx = *edx = 0; break; } *ecx = count & 0xff; *edx = cpu->apic_id; switch (count) { case 0: *eax = apicid_core_offset(cs->nr_cores, cs->nr_threads); *ebx = cs->nr_threads; *ecx |= CPUID_TOPOLOGY_LEVEL_SMT; break; case 1: *eax = apicid_pkg_offset(cs->nr_cores, cs->nr_threads); *ebx = cs->nr_cores * cs->nr_threads; *ecx |= CPUID_TOPOLOGY_LEVEL_CORE; break; default: *eax = 0; *ebx = 0; *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID; } assert(!(*eax & ~0x1f)); *ebx &= 0xffff; /* The count doesn't need to be reliable. */ break; case 0xD: { KVMState *s = cs->kvm_state; uint64_t ena_mask; int i; /* Processor Extended State */ *eax = 0; *ebx = 0; *ecx = 0; *edx = 0; if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) { break; } if (kvm_enabled()) { ena_mask = kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX); ena_mask <<= 32; ena_mask |= kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX); } else { ena_mask = -1; } if (count == 0) { *ecx = 0x240; for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) { const ExtSaveArea *esa = &x86_ext_save_areas[i]; if ((env->features[esa->feature] & esa->bits) == esa->bits && ((ena_mask >> i) & 1) != 0) { if (i < 32) { *eax |= 1u << i; } else { *edx |= 1u << (i - 32); } *ecx = MAX(*ecx, esa->offset + esa->size); } } *eax |= ena_mask & (XSTATE_FP_MASK | XSTATE_SSE_MASK); *ebx = *ecx; } else if (count == 1) { *eax = env->features[FEAT_XSAVE]; } else if (count < ARRAY_SIZE(x86_ext_save_areas)) { const ExtSaveArea *esa = &x86_ext_save_areas[count]; if ((env->features[esa->feature] & esa->bits) == esa->bits && ((ena_mask >> count) & 1) != 0) { *eax = esa->size; *ebx = esa->offset; } } break; } case 0x80000000: *eax = env->cpuid_xlevel; *ebx = env->cpuid_vendor1; *edx = env->cpuid_vendor2; *ecx = env->cpuid_vendor3; break; case 0x80000001: *eax = env->cpuid_version; *ebx = 0; *ecx = env->features[FEAT_8000_0001_ECX]; *edx = env->features[FEAT_8000_0001_EDX]; /* The Linux kernel checks for the CMPLegacy bit and * discards multiple thread information if it is set. * So don't set it here for Intel to make Linux guests happy. */ if (cs->nr_cores * cs->nr_threads > 1) { if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 || env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 || env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) { *ecx |= 1 << 1; /* CmpLegacy bit */ } } break; case 0x80000002: case 0x80000003: case 0x80000004: *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0]; *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1]; *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2]; *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3]; break; case 0x80000005: /* cache info (L1 cache) */ if (cpu->cache_info_passthrough) { host_cpuid(index, 0, eax, ebx, ecx, edx); break; } *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \\ (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES); *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \\ (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES); *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \\ (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE); *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \\ (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE); break; case 0x80000006: /* cache info (L2 cache) */ if (cpu->cache_info_passthrough) { host_cpuid(index, 0, eax, ebx, ecx, edx); break; } *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \\ (L2_DTLB_2M_ENTRIES << 16) | \\ (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \\ (L2_ITLB_2M_ENTRIES); *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \\ (L2_DTLB_4K_ENTRIES << 16) | \\ (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \\ (L2_ITLB_4K_ENTRIES); *ecx = (L2_SIZE_KB_AMD << 16) | \\ (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \\ (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE); if (!cpu->enable_l3_cache) { *edx = ((L3_SIZE_KB / 512) << 18) | \\ (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \\ (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE); } else { *edx = ((L3_N_SIZE_KB_AMD / 512) << 18) | \\ (AMD_ENC_ASSOC(L3_N_ASSOCIATIVITY) << 12) | \\ (L3_N_LINES_PER_TAG << 8) | (L3_N_LINE_SIZE); } break; case 0x80000007: *eax = 0; *ebx = 0; *ecx = 0; *edx = env->features[FEAT_8000_0007_EDX]; break; case 0x80000008: /* virtual & phys address size in low 2 bytes. */ if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) { /* 64 bit processor, 48 bits virtual, configurable * physical bits. */ *eax = 0x00003000 + cpu->phys_bits; } else { *eax = cpu->phys_bits; } *ebx = 0; *ecx = 0; *edx = 0; if (cs->nr_cores * cs->nr_threads > 1) { *ecx |= (cs->nr_cores * cs->nr_threads) - 1; } break; case 0x8000000A: if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) { *eax = 0x00000001; /* SVM Revision */ *ebx = 0x00000010; /* nr of ASIDs */ *ecx = 0; *edx = env->features[FEAT_SVM]; /* optional features */ } else { *eax = 0; *ebx = 0; *ecx = 0; *edx = 0; } break; case 0xC0000000: *eax = env->cpuid_xlevel2; *ebx = 0; *ecx = 0; *edx = 0; break; case 0xC0000001: /* Support for VIA CPU's CPUID instruction */ *eax = env->cpuid_version; *ebx = 0; *ecx = 0; *edx = env->features[FEAT_C000_0001_EDX]; break; case 0xC0000002: case 0xC0000003: case 0xC0000004: /* Reserved for the future, and now filled with zero */ *eax = 0; *ebx = 0; *ecx = 0; *edx = 0; break; default: /* reserved values: zero */ *eax = 0; *ebx = 0; *ecx = 0; *edx = 0; break; } }"} {"target": 0, "idx": 7111, "func": "static void prepare_app_arguments(int *argc_ptr, char ***argv_ptr) { char *argstr_flat; wchar_t **argv_w; int i, buffsize = 0, offset = 0; if (win32_argv_utf8) { *argc_ptr = win32_argc; *argv_ptr = win32_argv_utf8; return; } win32_argc = 0; argv_w = CommandLineToArgvW(GetCommandLineW(), &win32_argc); if (win32_argc <= 0 || !argv_w) return; /* determine the UTF-8 buffer size (including NULL-termination symbols) */ for (i = 0; i < win32_argc; i++) buffsize += WideCharToMultiByte(CP_UTF8, 0, argv_w[i], -1, NULL, 0, NULL, NULL); win32_argv_utf8 = av_mallocz(sizeof(char *) * (win32_argc + 1) + buffsize); argstr_flat = (char *)win32_argv_utf8 + sizeof(char *) * (win32_argc + 1); if (win32_argv_utf8 == NULL) { LocalFree(argv_w); return; } for (i = 0; i < win32_argc; i++) { win32_argv_utf8[i] = &argstr_flat[offset]; offset += WideCharToMultiByte(CP_UTF8, 0, argv_w[i], -1, &argstr_flat[offset], buffsize - offset, NULL, NULL); } win32_argv_utf8[i] = NULL; LocalFree(argv_w); *argc_ptr = win32_argc; *argv_ptr = win32_argv_utf8; }"} {"target": 0, "idx": 7119, "func": "static inline void gen_neon_addl_saturate(TCGv op0, TCGv op1, int size) { switch (size) { case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break; case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break; default: abort(); } }"} {"target": 0, "idx": 7122, "func": "int MP3lame_encode_frame(AVCodecContext *avctx, unsigned char *frame, int buf_size, void *data) { Mp3AudioContext *s = avctx->priv_data; int num, i; //av_log(avctx, AV_LOG_DEBUG, \"%X %d %X\\n\", (int)frame, buf_size, (int)data); // if(data==NULL) // return lame_encode_flush(s->gfp, frame, buf_size); /* lame 3.91 dies on '1-channel interleaved' data */ if (s->stereo) { num = lame_encode_buffer_interleaved(s->gfp, data, MPA_FRAME_SIZE, frame, buf_size); } else { num = lame_encode_buffer(s->gfp, data, data, MPA_FRAME_SIZE, frame, buf_size); /*av_log(avctx, AV_LOG_DEBUG, \"in:%d out:%d\\n\", MPA_FRAME_SIZE, num); for(i=0; ichr_sync_read) { return 0; } if (s->replay && replay_mode == REPLAY_MODE_PLAY) { return replay_char_read_all_load(buf); } while (offset < len) { do { res = s->chr_sync_read(s, buf + offset, len - offset); if (res == -1 && errno == EAGAIN) { g_usleep(100); } } while (res == -1 && errno == EAGAIN); if (res == 0) { break; } if (res < 0) { if (s->replay && replay_mode == REPLAY_MODE_RECORD) { replay_char_read_all_save_error(res); } return res; } offset += res; if (!counter--) { break; } } if (s->replay && replay_mode == REPLAY_MODE_RECORD) { replay_char_read_all_save_buf(buf, offset); } return offset; }"} {"target": 1, "idx": 7130, "func": "static int decode_b_picture_secondary_header(VC9Context *v) { GetBitContext *gb = &v->s.gb; int status; bitplane_decoding(&v->skip_mb_plane, v); if (status < 0) return -1; #if TRACE if (v->mv_mode == MV_PMODE_MIXED_MV) { status = bitplane_decoding(&v->mv_type_mb_plane, v); if (status < 0) return -1; #if TRACE av_log(v->s.avctx, AV_LOG_DEBUG, \"MB MV Type plane encoding: \" \"Imode: %i, Invert: %i\\n\", status>>1, status&1); #endif } //bitplane status = bitplane_decoding(&v->direct_mb_plane, v); if (status < 0) return -1; #if TRACE av_log(v->s.avctx, AV_LOG_DEBUG, \"MB Direct plane encoding: \" \"Imode: %i, Invert: %i\\n\", status>>1, status&1); #endif av_log(v->s.avctx, AV_LOG_DEBUG, \"Skip MB plane encoding: \" \"Imode: %i, Invert: %i\\n\", status>>1, status&1); #endif /* FIXME: what is actually chosen for B frames ? */ v->s.mv_table_index = get_bits(gb, 2); //but using vc9_ tables v->cbpcy_vlc = &vc9_cbpcy_p_vlc[get_bits(gb, 2)]; if (v->dquant) { vop_dquant_decoding(v); } if (v->vstransform) { v->ttmbf = get_bits(gb, 1); if (v->ttmbf) { v->ttfrm = get_bits(gb, 2); av_log(v->s.avctx, AV_LOG_INFO, \"Transform used: %ix%i\\n\", (v->ttfrm & 2) ? 4 : 8, (v->ttfrm & 1) ? 4 : 8); } } /* Epilog (AC/DC syntax) should be done in caller */ return 0; }"} {"target": 1, "idx": 7150, "func": "static int mxf_get_stream_index(AVFormatContext *s, KLVPacket *klv) { int i; for (i = 0; i < s->nb_streams; i++) { MXFTrack *track = s->streams[i]->priv_data; /* SMPTE 379M 7.3 */ if (!memcmp(klv->key + sizeof(mxf_essence_element_key), track->track_number, sizeof(track->track_number))) return i; } /* return 0 if only one stream, for OP Atom files with 0 as track number */ return s->nb_streams == 1 ? 0 : -1; }"} {"target": 1, "idx": 7168, "func": "static void vc1_mc_1mv(VC1Context *v, int dir) { MpegEncContext *s = &v->s; H264ChromaContext *h264chroma = &v->h264chroma; uint8_t *srcY, *srcU, *srcV; int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y; int v_edge_pos = s->v_edge_pos >> v->field_mode; int i; uint8_t (*luty)[256], (*lutuv)[256]; int use_ic; if ((!v->field_mode || (v->ref_field_type[dir] == 1 && v->cur_field_type == 1)) && !v->s.last_picture.f.data[0]) return; mx = s->mv[dir][0][0]; my = s->mv[dir][0][1]; // store motion vectors for further use in B frames if (s->pict_type == AV_PICTURE_TYPE_P) { for (i = 0; i < 4; i++) { s->current_picture.motion_val[1][s->block_index[i] + v->blocks_off][0] = mx; s->current_picture.motion_val[1][s->block_index[i] + v->blocks_off][1] = my; } } uvmx = (mx + ((mx & 3) == 3)) >> 1; uvmy = (my + ((my & 3) == 3)) >> 1; v->luma_mv[s->mb_x][0] = uvmx; v->luma_mv[s->mb_x][1] = uvmy; if (v->field_mode && v->cur_field_type != v->ref_field_type[dir]) { my = my - 2 + 4 * v->cur_field_type; uvmy = uvmy - 2 + 4 * v->cur_field_type; } // fastuvmc shall be ignored for interlaced frame picture if (v->fastuvmc && (v->fcm != ILACE_FRAME)) { uvmx = uvmx + ((uvmx < 0) ? (uvmx & 1) : -(uvmx & 1)); uvmy = uvmy + ((uvmy < 0) ? (uvmy & 1) : -(uvmy & 1)); } if (!dir) { if (v->field_mode && (v->cur_field_type != v->ref_field_type[dir]) && v->second_field) { srcY = s->current_picture.f.data[0]; srcU = s->current_picture.f.data[1]; srcV = s->current_picture.f.data[2]; luty = v->curr_luty; lutuv = v->curr_lutuv; use_ic = v->curr_use_ic; } else { srcY = s->last_picture.f.data[0]; srcU = s->last_picture.f.data[1]; srcV = s->last_picture.f.data[2]; luty = v->last_luty; lutuv = v->last_lutuv; use_ic = v->last_use_ic; } } else { srcY = s->next_picture.f.data[0]; srcU = s->next_picture.f.data[1]; srcV = s->next_picture.f.data[2]; luty = v->next_luty; lutuv = v->next_lutuv; use_ic = v->next_use_ic; } if (!srcY || !srcU) { av_log(v->s.avctx, AV_LOG_ERROR, \"Referenced frame missing.\\n\"); return; } src_x = s->mb_x * 16 + (mx >> 2); src_y = s->mb_y * 16 + (my >> 2); uvsrc_x = s->mb_x * 8 + (uvmx >> 2); uvsrc_y = s->mb_y * 8 + (uvmy >> 2); if (v->profile != PROFILE_ADVANCED) { src_x = av_clip( src_x, -16, s->mb_width * 16); src_y = av_clip( src_y, -16, s->mb_height * 16); uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8); uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8); } else { src_x = av_clip( src_x, -17, s->avctx->coded_width); src_y = av_clip( src_y, -18, s->avctx->coded_height + 1); uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1); uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1); } srcY += src_y * s->linesize + src_x; srcU += uvsrc_y * s->uvlinesize + uvsrc_x; srcV += uvsrc_y * s->uvlinesize + uvsrc_x; if (v->field_mode && v->ref_field_type[dir]) { srcY += s->current_picture_ptr->f.linesize[0]; srcU += s->current_picture_ptr->f.linesize[1]; srcV += s->current_picture_ptr->f.linesize[2]; } /* for grayscale we should not try to read from unknown area */ if (s->flags & CODEC_FLAG_GRAY) { srcU = s->edge_emu_buffer + 18 * s->linesize; srcV = s->edge_emu_buffer + 18 * s->linesize; } if (v->rangeredfrm || use_ic || s->h_edge_pos < 22 || v_edge_pos < 22 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 16 - s->mspel * 3 || (unsigned)(src_y - 1) > v_edge_pos - (my&3) - 16 - 3) { uint8_t *uvbuf = s->edge_emu_buffer + 19 * s->linesize; srcY -= s->mspel * (1 + s->linesize); s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize, s->linesize, 17 + s->mspel * 2, 17 + s->mspel * 2, src_x - s->mspel, src_y - s->mspel, s->h_edge_pos, v_edge_pos); srcY = s->edge_emu_buffer; s->vdsp.emulated_edge_mc(uvbuf, srcU, s->uvlinesize, s->uvlinesize, 8 + 1, 8 + 1, uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1); s->vdsp.emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, s->uvlinesize, 8 + 1, 8 + 1, uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1); srcU = uvbuf; srcV = uvbuf + 16; /* if we deal with range reduction we need to scale source blocks */ if (v->rangeredfrm) { int i, j; uint8_t *src, *src2; src = srcY; for (j = 0; j < 17 + s->mspel * 2; j++) { for (i = 0; i < 17 + s->mspel * 2; i++) src[i] = ((src[i] - 128) >> 1) + 128; src += s->linesize; } src = srcU; src2 = srcV; for (j = 0; j < 9; j++) { for (i = 0; i < 9; i++) { src[i] = ((src[i] - 128) >> 1) + 128; src2[i] = ((src2[i] - 128) >> 1) + 128; } src += s->uvlinesize; src2 += s->uvlinesize; } } /* if we deal with intensity compensation we need to scale source blocks */ if (use_ic) { int i, j; uint8_t *src, *src2; src = srcY; for (j = 0; j < 17 + s->mspel * 2; j++) { int f = v->field_mode ? v->ref_field_type[dir] : ((j + src_y - s->mspel) & 1) ; for (i = 0; i < 17 + s->mspel * 2; i++) src[i] = luty[f][src[i]]; src += s->linesize; } src = srcU; src2 = srcV; for (j = 0; j < 9; j++) { int f = v->field_mode ? v->ref_field_type[dir] : ((j + uvsrc_y) & 1); for (i = 0; i < 9; i++) { src[i] = lutuv[f][src[i]]; src2[i] = lutuv[f][src2[i]]; } src += s->uvlinesize; src2 += s->uvlinesize; } } srcY += s->mspel * (1 + s->linesize); } if (s->mspel) { dxy = ((my & 3) << 2) | (mx & 3); v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] , srcY , s->linesize, v->rnd); v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8, srcY + 8, s->linesize, v->rnd); srcY += s->linesize * 8; v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize , srcY , s->linesize, v->rnd); v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd); } else { // hpel mc - always used for luma dxy = (my & 2) | ((mx & 2) >> 1); if (!v->rnd) s->hdsp.put_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16); else s->hdsp.put_no_rnd_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16); } if (s->flags & CODEC_FLAG_GRAY) return; /* Chroma MC always uses qpel bilinear */ uvmx = (uvmx & 3) << 1; uvmy = (uvmy & 3) << 1; if (!v->rnd) { h264chroma->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy); h264chroma->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy); } else { v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy); v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy); } }"} {"target": 1, "idx": 7174, "func": "static inline void RENAME(yuy2ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width) { #if defined (HAVE_MMX2) || defined (HAVE_3DNOW) asm volatile( \"movq \"MANGLE(bm01010101)\", %%mm4\\n\\t\" \"mov %0, %%\"REG_a\" \\n\\t\" \"1: \\n\\t\" \"movq (%1, %%\"REG_a\",4), %%mm0 \\n\\t\" \"movq 8(%1, %%\"REG_a\",4), %%mm1 \\n\\t\" \"movq (%2, %%\"REG_a\",4), %%mm2 \\n\\t\" \"movq 8(%2, %%\"REG_a\",4), %%mm3 \\n\\t\" PAVGB(%%mm2, %%mm0) PAVGB(%%mm3, %%mm1) \"psrlw $8, %%mm0 \\n\\t\" \"psrlw $8, %%mm1 \\n\\t\" \"packuswb %%mm1, %%mm0 \\n\\t\" \"movq %%mm0, %%mm1 \\n\\t\" \"psrlw $8, %%mm0 \\n\\t\" \"pand %%mm4, %%mm1 \\n\\t\" \"packuswb %%mm0, %%mm0 \\n\\t\" \"packuswb %%mm1, %%mm1 \\n\\t\" \"movd %%mm0, (%4, %%\"REG_a\") \\n\\t\" \"movd %%mm1, (%3, %%\"REG_a\") \\n\\t\" \"add $4, %%\"REG_a\" \\n\\t\" \" js 1b \\n\\t\" : : \"g\" ((long)-width), \"r\" (src1+width*4), \"r\" (src2+width*4), \"r\" (dstU+width), \"r\" (dstV+width) : \"%\"REG_a ); #else int i; for(i=0; i>1; dstV[i]= (src1[4*i + 3] + src2[4*i + 3])>>1; } #endif }"} {"target": 0, "idx": 7177, "func": "static int mtv_read_header(AVFormatContext *s) { MTVDemuxContext *mtv = s->priv_data; AVIOContext *pb = s->pb; AVStream *st; unsigned int audio_subsegments; avio_skip(pb, 3); mtv->file_size = avio_rl32(pb); mtv->segments = avio_rl32(pb); avio_skip(pb, 32); mtv->audio_identifier = avio_rl24(pb); mtv->audio_br = avio_rl16(pb); mtv->img_colorfmt = avio_rl24(pb); mtv->img_bpp = avio_r8(pb); mtv->img_width = avio_rl16(pb); mtv->img_height = avio_rl16(pb); mtv->img_segment_size = avio_rl16(pb); /* Calculate width and height if missing from header */ if(mtv->img_bpp>>3){ if(!mtv->img_width && mtv->img_height) mtv->img_width=mtv->img_segment_size / (mtv->img_bpp>>3) / mtv->img_height; if(!mtv->img_height && mtv->img_width) mtv->img_height=mtv->img_segment_size / (mtv->img_bpp>>3) / mtv->img_width; } if(!mtv->img_height || !mtv->img_width || !mtv->img_segment_size){ av_log(s, AV_LOG_ERROR, \"width or height or segment_size is invalid and I cannot calculate them from other information\\n\"); return AVERROR(EINVAL); } avio_skip(pb, 4); audio_subsegments = avio_rl16(pb); if (audio_subsegments == 0) { avpriv_request_sample(s, \"MTV files without audio\"); return AVERROR_PATCHWELCOME; } mtv->full_segment_size = audio_subsegments * (MTV_AUDIO_PADDING_SIZE + MTV_ASUBCHUNK_DATA_SIZE) + mtv->img_segment_size; mtv->video_fps = (mtv->audio_br / 4) / audio_subsegments; // FIXME Add sanity check here // all systems go! init decoders // video - raw rgb565 st = avformat_new_stream(s, NULL); if(!st) return AVERROR(ENOMEM); avpriv_set_pts_info(st, 64, 1, mtv->video_fps); st->codec->codec_type = AVMEDIA_TYPE_VIDEO; st->codec->codec_id = AV_CODEC_ID_RAWVIDEO; st->codec->pix_fmt = AV_PIX_FMT_RGB565BE; st->codec->width = mtv->img_width; st->codec->height = mtv->img_height; st->codec->sample_rate = mtv->video_fps; st->codec->extradata = av_strdup(\"BottomUp\"); st->codec->extradata_size = 9; // audio - mp3 st = avformat_new_stream(s, NULL); if(!st) return AVERROR(ENOMEM); avpriv_set_pts_info(st, 64, 1, AUDIO_SAMPLING_RATE); st->codec->codec_type = AVMEDIA_TYPE_AUDIO; st->codec->codec_id = AV_CODEC_ID_MP3; st->codec->bit_rate = mtv->audio_br; st->need_parsing = AVSTREAM_PARSE_FULL; // Jump over header if(avio_seek(pb, MTV_HEADER_SIZE, SEEK_SET) != MTV_HEADER_SIZE) return AVERROR(EIO); return 0; }"} {"target": 1, "idx": 7191, "func": "static bool qht_insert__locked(struct qht *ht, struct qht_map *map, struct qht_bucket *head, void *p, uint32_t hash, bool *needs_resize) { struct qht_bucket *b = head; struct qht_bucket *prev = NULL; struct qht_bucket *new = NULL; int i; do { for (i = 0; i < QHT_BUCKET_ENTRIES; i++) { if (b->pointers[i]) { if (unlikely(b->pointers[i] == p)) { return false; } } else { goto found; } } prev = b; b = b->next; } while (b); b = qemu_memalign(QHT_BUCKET_ALIGN, sizeof(*b)); memset(b, 0, sizeof(*b)); new = b; i = 0; atomic_inc(&map->n_added_buckets); if (unlikely(qht_map_needs_resize(map)) && needs_resize) { *needs_resize = true; } found: /* found an empty key: acquire the seqlock and write */ seqlock_write_begin(&head->sequence); if (new) { atomic_rcu_set(&prev->next, b); } b->hashes[i] = hash; /* smp_wmb() implicit in seqlock_write_begin. */ atomic_set(&b->pointers[i], p); seqlock_write_end(&head->sequence); return true; }"} {"target": 1, "idx": 7204, "func": "static int copy_packet_data(AVPacket *pkt, AVPacket *src, int dup) { pkt->data = NULL; pkt->side_data = NULL; if (pkt->buf) { AVBufferRef *ref = av_buffer_ref(src->buf); if (!ref) return AVERROR(ENOMEM); pkt->buf = ref; pkt->data = ref->data; } else { DUP_DATA(pkt->data, src->data, pkt->size, 1, ALLOC_BUF); } #if FF_API_DESTRUCT_PACKET FF_DISABLE_DEPRECATION_WARNINGS pkt->destruct = dummy_destruct_packet; FF_ENABLE_DEPRECATION_WARNINGS #endif if (pkt->side_data_elems && dup) pkt->side_data = src->side_data; if (pkt->side_data_elems && !dup) { return av_copy_packet_side_data(pkt, src); } return 0; failed_alloc: av_destruct_packet(pkt); return AVERROR(ENOMEM); }"} {"target": 0, "idx": 7213, "func": "static void virtio_crypto_instance_init(Object *obj) { VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(obj); /* * The default config_size is sizeof(struct virtio_crypto_config). * Can be overriden with virtio_crypto_set_config_size. */ vcrypto->config_size = sizeof(struct virtio_crypto_config); object_property_add_link(obj, \"cryptodev\", TYPE_CRYPTODEV_BACKEND, (Object **)&vcrypto->conf.cryptodev, virtio_crypto_check_cryptodev_is_used, OBJ_PROP_LINK_UNREF_ON_RELEASE, NULL); }"} {"target": 0, "idx": 7222, "func": "int cpu_sh4_handle_mmu_fault(CPUState * env, target_ulong address, int rw, int mmu_idx, int is_softmmu) { target_ulong physical; int prot, ret, access_type; access_type = ACCESS_INT; ret = get_physical_address(env, &physical, &prot, address, rw, access_type); if (ret != MMU_OK) { env->tea = address; switch (ret) { case MMU_ITLB_MISS: case MMU_DTLB_MISS_READ: env->exception_index = 0x040; break; case MMU_DTLB_MULTIPLE: case MMU_ITLB_MULTIPLE: env->exception_index = 0x140; break; case MMU_ITLB_VIOLATION: env->exception_index = 0x0a0; break; case MMU_DTLB_MISS_WRITE: env->exception_index = 0x060; break; case MMU_DTLB_INITIAL_WRITE: env->exception_index = 0x080; break; case MMU_DTLB_VIOLATION_READ: env->exception_index = 0x0a0; break; case MMU_DTLB_VIOLATION_WRITE: env->exception_index = 0x0c0; break; case MMU_IADDR_ERROR: case MMU_DADDR_ERROR_READ: env->exception_index = 0x0c0; break; case MMU_DADDR_ERROR_WRITE: env->exception_index = 0x100; break; default: assert(0); } return 1; } address &= TARGET_PAGE_MASK; physical &= TARGET_PAGE_MASK; return tlb_set_page(env, address, physical, prot, mmu_idx, is_softmmu); }"} {"target": 1, "idx": 7243, "func": "static int virtio_blk_device_exit(DeviceState *dev) { VirtIODevice *vdev = VIRTIO_DEVICE(dev); VirtIOBlock *s = VIRTIO_BLK(dev); #ifdef CONFIG_VIRTIO_BLK_DATA_PLANE remove_migration_state_change_notifier(&s->migration_state_notifier); virtio_blk_data_plane_destroy(s->dataplane); s->dataplane = NULL; #endif qemu_del_vm_change_state_handler(s->change); unregister_savevm(dev, \"virtio-blk\", s); blockdev_mark_auto_del(s->bs); virtio_cleanup(vdev); return 0; }"} {"target": 1, "idx": 7267, "func": "static void pc_dimm_get_size(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { uint64_t value; MemoryRegion *mr; PCDIMMDevice *dimm = PC_DIMM(obj); PCDIMMDeviceClass *ddc = PC_DIMM_GET_CLASS(obj); mr = ddc->get_memory_region(dimm); value = memory_region_size(mr); visit_type_uint64(v, name, &value, errp); }"} {"target": 1, "idx": 7269, "func": "target_ulong spapr_rtas_call(sPAPREnvironment *spapr, uint32_t token, uint32_t nargs, target_ulong args, uint32_t nret, target_ulong rets) { if ((token >= TOKEN_BASE) && ((token - TOKEN_BASE) < TOKEN_MAX)) { struct rtas_call *call = rtas_table + (token - TOKEN_BASE); if (call->fn) { call->fn(spapr, token, nargs, args, nret, rets); hcall_dprintf(\"Unknown RTAS token 0x%x\\n\", token); rtas_st(rets, 0, -3); return H_PARAMETER;"} {"target": 0, "idx": 7315, "func": "static int32_t scsi_send_command(SCSIRequest *req, uint8_t *cmd) { SCSIGenericState *s = DO_UPCAST(SCSIGenericState, qdev, req->dev); SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req); int ret; if (cmd[0] != REQUEST_SENSE && req->lun != s->qdev.lun) { DPRINTF(\"Unimplemented LUN %d\\n\", req->lun); scsi_req_build_sense(&r->req, SENSE_CODE(LUN_NOT_SUPPORTED)); scsi_req_complete(&r->req, CHECK_CONDITION); return 0; } if (-1 == scsi_req_parse(&r->req, cmd)) { BADF(\"Unsupported command length, command %x\\n\", cmd[0]); scsi_command_complete(r, -EINVAL); return 0; } scsi_req_fixup(&r->req); DPRINTF(\"Command: lun=%d tag=0x%x len %zd data=0x%02x\", lun, tag, r->req.cmd.xfer, cmd[0]); #ifdef DEBUG_SCSI { int i; for (i = 1; i < r->req.cmd.len; i++) { printf(\" 0x%02x\", cmd[i]); } printf(\"\\n\"); } #endif if (r->req.cmd.xfer == 0) { if (r->buf != NULL) qemu_free(r->buf); r->buflen = 0; r->buf = NULL; ret = execute_command(s->bs, r, SG_DXFER_NONE, scsi_command_complete); if (ret < 0) { scsi_command_complete(r, ret); return 0; } return 0; } if (r->buflen != r->req.cmd.xfer) { if (r->buf != NULL) qemu_free(r->buf); r->buf = qemu_malloc(r->req.cmd.xfer); r->buflen = r->req.cmd.xfer; } memset(r->buf, 0, r->buflen); r->len = r->req.cmd.xfer; if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { r->len = 0; return -r->req.cmd.xfer; } else { return r->req.cmd.xfer; } }"} {"target": 0, "idx": 7324, "func": "void ip6_input(struct mbuf *m) { struct ip6 *ip6; DEBUG_CALL(\"ip6_input\"); DEBUG_ARG(\"m = %lx\", (long)m); DEBUG_ARG(\"m_len = %d\", m->m_len); if (m->m_len < sizeof(struct ip6)) { goto bad; } ip6 = mtod(m, struct ip6 *); if (ip6->ip_v != IP6VERSION) { goto bad; } /* check ip_ttl for a correct ICMP reply */ if (ip6->ip_hl == 0) { /*icmp_send_error(m, ICMP_TIMXCEED,ICMP_TIMXCEED_INTRANS, 0,\"ttl\");*/ goto bad; } /* * Switch out to protocol's input routine. */ switch (ip6->ip_nh) { case IPPROTO_TCP: /*tcp_input(m, hlen, (struct socket *)NULL);*/ break; case IPPROTO_UDP: /*udp_input(m, hlen);*/ break; case IPPROTO_ICMPV6: icmp6_input(m); break; default: m_free(m); } return; bad: m_free(m); }"} {"target": 0, "idx": 7326, "func": "void fork_start(void) { pthread_mutex_lock(&tcg_ctx.tb_ctx.tb_lock); pthread_mutex_lock(&exclusive_lock); mmap_fork_start(); }"} {"target": 0, "idx": 7327, "func": "static int write_dump_pages(DumpState *s) { int ret = 0; DataCache page_desc, page_data; size_t len_buf_out, size_out; #ifdef CONFIG_LZO lzo_bytep wrkmem = NULL; #endif uint8_t *buf_out = NULL; off_t offset_desc, offset_data; PageDescriptor pd, pd_zero; uint8_t *buf; int endian = s->dump_info.d_endian; GuestPhysBlock *block_iter = NULL; uint64_t pfn_iter; /* get offset of page_desc and page_data in dump file */ offset_desc = s->offset_page; offset_data = offset_desc + sizeof(PageDescriptor) * s->num_dumpable; prepare_data_cache(&page_desc, s, offset_desc); prepare_data_cache(&page_data, s, offset_data); /* prepare buffer to store compressed data */ len_buf_out = get_len_buf_out(s->page_size, s->flag_compress); if (len_buf_out == 0) { dump_error(s, \"dump: failed to get length of output buffer.\\n\"); goto out; } #ifdef CONFIG_LZO wrkmem = g_malloc(LZO1X_1_MEM_COMPRESS); #endif buf_out = g_malloc(len_buf_out); /* * init zero page's page_desc and page_data, because every zero page * uses the same page_data */ pd_zero.size = cpu_convert_to_target32(s->page_size, endian); pd_zero.flags = cpu_convert_to_target32(0, endian); pd_zero.offset = cpu_convert_to_target64(offset_data, endian); pd_zero.page_flags = cpu_convert_to_target64(0, endian); buf = g_malloc0(s->page_size); ret = write_cache(&page_data, buf, s->page_size, false); g_free(buf); if (ret < 0) { dump_error(s, \"dump: failed to write page data(zero page).\\n\"); goto out; } offset_data += s->page_size; /* * dump memory to vmcore page by page. zero page will all be resided in the * first page of page section */ while (get_next_page(&block_iter, &pfn_iter, &buf, s)) { /* check zero page */ if (is_zero_page(buf, s->page_size)) { ret = write_cache(&page_desc, &pd_zero, sizeof(PageDescriptor), false); if (ret < 0) { dump_error(s, \"dump: failed to write page desc.\\n\"); goto out; } } else { /* * not zero page, then: * 1. compress the page * 2. write the compressed page into the cache of page_data * 3. get page desc of the compressed page and write it into the * cache of page_desc * * only one compression format will be used here, for * s->flag_compress is set. But when compression fails to work, * we fall back to save in plaintext. */ size_out = len_buf_out; if ((s->flag_compress & DUMP_DH_COMPRESSED_ZLIB) && (compress2(buf_out, (uLongf *)&size_out, buf, s->page_size, Z_BEST_SPEED) == Z_OK) && (size_out < s->page_size)) { pd.flags = cpu_convert_to_target32(DUMP_DH_COMPRESSED_ZLIB, endian); pd.size = cpu_convert_to_target32(size_out, endian); ret = write_cache(&page_data, buf_out, size_out, false); if (ret < 0) { dump_error(s, \"dump: failed to write page data.\\n\"); goto out; } #ifdef CONFIG_LZO } else if ((s->flag_compress & DUMP_DH_COMPRESSED_LZO) && (lzo1x_1_compress(buf, s->page_size, buf_out, (lzo_uint *)&size_out, wrkmem) == LZO_E_OK) && (size_out < s->page_size)) { pd.flags = cpu_convert_to_target32(DUMP_DH_COMPRESSED_LZO, endian); pd.size = cpu_convert_to_target32(size_out, endian); ret = write_cache(&page_data, buf_out, size_out, false); if (ret < 0) { dump_error(s, \"dump: failed to write page data.\\n\"); goto out; } #endif #ifdef CONFIG_SNAPPY } else if ((s->flag_compress & DUMP_DH_COMPRESSED_SNAPPY) && (snappy_compress((char *)buf, s->page_size, (char *)buf_out, &size_out) == SNAPPY_OK) && (size_out < s->page_size)) { pd.flags = cpu_convert_to_target32( DUMP_DH_COMPRESSED_SNAPPY, endian); pd.size = cpu_convert_to_target32(size_out, endian); ret = write_cache(&page_data, buf_out, size_out, false); if (ret < 0) { dump_error(s, \"dump: failed to write page data.\\n\"); goto out; } #endif } else { /* * fall back to save in plaintext, size_out should be * assigned to s->page_size */ pd.flags = cpu_convert_to_target32(0, endian); size_out = s->page_size; pd.size = cpu_convert_to_target32(size_out, endian); ret = write_cache(&page_data, buf, s->page_size, false); if (ret < 0) { dump_error(s, \"dump: failed to write page data.\\n\"); goto out; } } /* get and write page desc here */ pd.page_flags = cpu_convert_to_target64(0, endian); pd.offset = cpu_convert_to_target64(offset_data, endian); offset_data += size_out; ret = write_cache(&page_desc, &pd, sizeof(PageDescriptor), false); if (ret < 0) { dump_error(s, \"dump: failed to write page desc.\\n\"); goto out; } } } ret = write_cache(&page_desc, NULL, 0, true); if (ret < 0) { dump_error(s, \"dump: failed to sync cache for page_desc.\\n\"); goto out; } ret = write_cache(&page_data, NULL, 0, true); if (ret < 0) { dump_error(s, \"dump: failed to sync cache for page_data.\\n\"); goto out; } out: free_data_cache(&page_desc); free_data_cache(&page_data); #ifdef CONFIG_LZO g_free(wrkmem); #endif g_free(buf_out); return ret; }"} {"target": 0, "idx": 7332, "func": "static void do_ext_interrupt(CPUS390XState *env) { S390CPU *cpu = s390_env_get_cpu(env); uint64_t mask, addr; LowCore *lowcore; ExtQueue *q; if (!(env->psw.mask & PSW_MASK_EXT)) { cpu_abort(CPU(cpu), \"Ext int w/o ext mask\\n\"); } lowcore = cpu_map_lowcore(env); if (env->pending_int & INTERRUPT_EXT_CLOCK_COMPARATOR) { lowcore->ext_int_code = cpu_to_be16(EXT_CLOCK_COMP); lowcore->cpu_addr = 0; env->pending_int &= ~INTERRUPT_EXT_CLOCK_COMPARATOR; } else if (env->pending_int & INTERRUPT_EXT_CPU_TIMER) { lowcore->ext_int_code = cpu_to_be16(EXT_CPU_TIMER); lowcore->cpu_addr = 0; env->pending_int &= ~INTERRUPT_EXT_CPU_TIMER; } else if (env->pending_int & INTERRUPT_EXT_SERVICE) { g_assert(env->ext_index >= 0); /* * FIXME: floating IRQs should be considered by all CPUs and * shuld not get cleared by CPU reset. */ q = &env->ext_queue[env->ext_index]; lowcore->ext_int_code = cpu_to_be16(q->code); lowcore->ext_params = cpu_to_be32(q->param); lowcore->ext_params2 = cpu_to_be64(q->param64); lowcore->cpu_addr = cpu_to_be16(env->core_id | VIRTIO_SUBCODE_64); env->ext_index--; if (env->ext_index == -1) { env->pending_int &= ~INTERRUPT_EXT_SERVICE; } } else { g_assert_not_reached(); } mask = be64_to_cpu(lowcore->external_new_psw.mask); addr = be64_to_cpu(lowcore->external_new_psw.addr); lowcore->external_old_psw.mask = cpu_to_be64(get_psw_mask(env)); lowcore->external_old_psw.addr = cpu_to_be64(env->psw.addr); cpu_unmap_lowcore(lowcore); DPRINTF(\"%s: %\" PRIx64 \" %\" PRIx64 \"\\n\", __func__, env->psw.mask, env->psw.addr); load_psw(env, mask, addr); }"} {"target": 0, "idx": 7334, "func": "static void verdex_init(MachineState *machine) { const char *cpu_model = machine->cpu_model; PXA2xxState *cpu; DriveInfo *dinfo; int be; MemoryRegion *address_space_mem = get_system_memory(); uint32_t verdex_rom = 0x02000000; uint32_t verdex_ram = 0x10000000; cpu = pxa270_init(address_space_mem, verdex_ram, cpu_model ?: \"pxa270-c0\"); dinfo = drive_get(IF_PFLASH, 0, 0); if (!dinfo && !qtest_enabled()) { fprintf(stderr, \"A flash image must be given with the \" \"'pflash' parameter\\n\"); exit(1); } #ifdef TARGET_WORDS_BIGENDIAN be = 1; #else be = 0; #endif if (!pflash_cfi01_register(0x00000000, NULL, \"verdex.rom\", verdex_rom, dinfo ? blk_bs(blk_by_legacy_dinfo(dinfo)) : NULL, sector_len, verdex_rom / sector_len, 2, 0, 0, 0, 0, be)) { fprintf(stderr, \"qemu: Error registering flash memory.\\n\"); exit(1); } /* Interrupt line of NIC is connected to GPIO line 99 */ smc91c111_init(&nd_table[0], 0x04000300, qdev_get_gpio_in(cpu->gpio, 99)); }"} {"target": 1, "idx": 7347, "func": "static int bochs_open(BlockDriverState *bs, QDict *options, int flags, Error **errp) { BDRVBochsState *s = bs->opaque; uint32_t i; struct bochs_header bochs; int ret; bs->read_only = 1; // no write support yet ret = bdrv_pread(bs->file, 0, &bochs, sizeof(bochs)); if (ret < 0) { return ret; if (strcmp(bochs.magic, HEADER_MAGIC) || strcmp(bochs.type, REDOLOG_TYPE) || strcmp(bochs.subtype, GROWING_TYPE) || ((le32_to_cpu(bochs.version) != HEADER_VERSION) && (le32_to_cpu(bochs.version) != HEADER_V1))) { error_setg(errp, \"Image not in Bochs format\"); return -EINVAL; if (le32_to_cpu(bochs.version) == HEADER_V1) { bs->total_sectors = le64_to_cpu(bochs.extra.redolog_v1.disk) / 512; } else { bs->total_sectors = le64_to_cpu(bochs.extra.redolog.disk) / 512; s->catalog_size = le32_to_cpu(bochs.catalog); s->catalog_bitmap = g_malloc(s->catalog_size * 4); ret = bdrv_pread(bs->file, le32_to_cpu(bochs.header), s->catalog_bitmap, s->catalog_size * 4); if (ret < 0) { goto fail; for (i = 0; i < s->catalog_size; i++) le32_to_cpus(&s->catalog_bitmap[i]); s->data_offset = le32_to_cpu(bochs.header) + (s->catalog_size * 4); s->bitmap_blocks = 1 + (le32_to_cpu(bochs.bitmap) - 1) / 512; s->extent_blocks = 1 + (le32_to_cpu(bochs.extent) - 1) / 512; s->extent_size = le32_to_cpu(bochs.extent); if (s->catalog_size < bs->total_sectors / s->extent_size) { error_setg(errp, \"Catalog size is too small for this disk size\"); ret = -EINVAL; goto fail; qemu_co_mutex_init(&s->lock); return 0; fail: g_free(s->catalog_bitmap); return ret;"} {"target": 1, "idx": 7351, "func": "offset_t url_fseek(ByteIOContext *s, offset_t offset, int whence) { offset_t offset1; offset_t pos= s->pos - (s->write_flag ? 0 : (s->buf_end - s->buffer)); if (whence != SEEK_CUR && whence != SEEK_SET) return -EINVAL; if (whence == SEEK_CUR) { offset1 = pos + (s->buf_ptr - s->buffer); if (offset == 0) return offset1; offset += offset1; } offset1 = offset - pos; if (!s->must_flush && offset1 >= 0 && offset1 < (s->buf_end - s->buffer)) { /* can do the seek inside the buffer */ s->buf_ptr = s->buffer + offset1; } else { if (!s->seek) return -EPIPE; #ifdef CONFIG_MUXERS if (s->write_flag) { flush_buffer(s); s->must_flush = 1; } else #endif //CONFIG_MUXERS { s->buf_end = s->buffer; } s->buf_ptr = s->buffer; if (s->seek(s->opaque, offset, SEEK_SET) == (offset_t)-EPIPE) return -EPIPE; s->pos = offset; } s->eof_reached = 0; return offset; }"} {"target": 0, "idx": 7360, "func": "void ppc_translate_init(void) { int i; char* p; size_t cpu_reg_names_size; static int done_init = 0; if (done_init) return; cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, \"env\"); p = cpu_reg_names; cpu_reg_names_size = sizeof(cpu_reg_names); for (i = 0; i < 8; i++) { snprintf(p, cpu_reg_names_size, \"crf%d\", i); cpu_crf[i] = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUState, crf[i]), p); p += 5; cpu_reg_names_size -= 5; } for (i = 0; i < 32; i++) { snprintf(p, cpu_reg_names_size, \"r%d\", i); cpu_gpr[i] = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, gpr[i]), p); p += (i < 10) ? 3 : 4; cpu_reg_names_size -= (i < 10) ? 3 : 4; #if !defined(TARGET_PPC64) snprintf(p, cpu_reg_names_size, \"r%dH\", i); cpu_gprh[i] = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUState, gprh[i]), p); p += (i < 10) ? 4 : 5; cpu_reg_names_size -= (i < 10) ? 4 : 5; #endif snprintf(p, cpu_reg_names_size, \"fp%d\", i); cpu_fpr[i] = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUState, fpr[i]), p); p += (i < 10) ? 4 : 5; cpu_reg_names_size -= (i < 10) ? 4 : 5; snprintf(p, cpu_reg_names_size, \"avr%dH\", i); #ifdef HOST_WORDS_BIGENDIAN cpu_avrh[i] = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUState, avr[i].u64[0]), p); #else cpu_avrh[i] = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUState, avr[i].u64[1]), p); #endif p += (i < 10) ? 6 : 7; cpu_reg_names_size -= (i < 10) ? 6 : 7; snprintf(p, cpu_reg_names_size, \"avr%dL\", i); #ifdef HOST_WORDS_BIGENDIAN cpu_avrl[i] = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUState, avr[i].u64[1]), p); #else cpu_avrl[i] = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUState, avr[i].u64[0]), p); #endif p += (i < 10) ? 6 : 7; cpu_reg_names_size -= (i < 10) ? 6 : 7; } cpu_nip = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, nip), \"nip\"); cpu_msr = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, msr), \"msr\"); cpu_ctr = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, ctr), \"ctr\"); cpu_lr = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, lr), \"lr\"); cpu_xer = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, xer), \"xer\"); cpu_reserve = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, reserve), \"reserve\"); cpu_fpscr = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUState, fpscr), \"fpscr\"); cpu_access_type = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUState, access_type), \"access_type\"); /* register helpers */ #define GEN_HELPER 2 #include \"helper.h\" done_init = 1; }"} {"target": 0, "idx": 7380, "func": "static void gen_mtfsfi(DisasContext *ctx) { int bf, sh; TCGv_i64 t0; TCGv_i32 t1; if (unlikely(!ctx->fpu_enabled)) { gen_exception(ctx, POWERPC_EXCP_FPU); return; } bf = crbD(ctx->opcode) >> 2; sh = 7 - bf; /* NIP cannot be restored if the memory exception comes from an helper */ gen_update_nip(ctx, ctx->nip - 4); gen_reset_fpstatus(); t0 = tcg_const_i64(FPIMM(ctx->opcode) << (4 * sh)); t1 = tcg_const_i32(1 << sh); gen_helper_store_fpscr(cpu_env, t0, t1); tcg_temp_free_i64(t0); tcg_temp_free_i32(t1); if (unlikely(Rc(ctx->opcode) != 0)) { tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr); tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX); } /* We can raise a differed exception */ gen_helper_float_check_status(cpu_env); }"} {"target": 1, "idx": 7393, "func": "void os_mem_prealloc(int fd, char *area, size_t memory, Error **errp) { int i; size_t pagesize = getpagesize(); memory = (memory + pagesize - 1) & -pagesize; for (i = 0; i < memory / pagesize; i++) { memset(area + pagesize * i, 0, 1); } }"} {"target": 1, "idx": 7398, "func": "static int cuvid_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { CuvidContext *ctx = avctx->priv_data; AVHWDeviceContext *device_ctx = (AVHWDeviceContext*)ctx->hwdevice->data; AVCUDADeviceContext *device_hwctx = device_ctx->hwctx; CUcontext dummy, cuda_ctx = device_hwctx->cuda_ctx; AVFrame *frame = data; CUVIDSOURCEDATAPACKET cupkt; AVPacket filter_packet = { 0 }; AVPacket filtered_packet = { 0 }; CUdeviceptr mapped_frame = 0; int ret = 0, eret = 0; if (ctx->bsf && avpkt->size) { if ((ret = av_packet_ref(&filter_packet, avpkt)) < 0) { av_log(avctx, AV_LOG_ERROR, \"av_packet_ref failed\\n\"); return ret; } if ((ret = av_bsf_send_packet(ctx->bsf, &filter_packet)) < 0) { av_log(avctx, AV_LOG_ERROR, \"av_bsf_send_packet failed\\n\"); av_packet_unref(&filter_packet); return ret; } if ((ret = av_bsf_receive_packet(ctx->bsf, &filtered_packet)) < 0) { av_log(avctx, AV_LOG_ERROR, \"av_bsf_receive_packet failed\\n\"); return ret; } avpkt = &filtered_packet; } ret = CHECK_CU(cuCtxPushCurrent(cuda_ctx)); if (ret < 0) { av_packet_unref(&filtered_packet); return ret; } memset(&cupkt, 0, sizeof(cupkt)); if (avpkt->size) { cupkt.payload_size = avpkt->size; cupkt.payload = avpkt->data; if (avpkt->pts != AV_NOPTS_VALUE) { cupkt.flags = CUVID_PKT_TIMESTAMP; if (avctx->pkt_timebase.num && avctx->pkt_timebase.den) cupkt.timestamp = av_rescale_q(avpkt->pts, avctx->pkt_timebase, (AVRational){1, 10000000}); else cupkt.timestamp = avpkt->pts; } } else { cupkt.flags = CUVID_PKT_ENDOFSTREAM; } ret = CHECK_CU(cuvidParseVideoData(ctx->cuparser, &cupkt)); av_packet_unref(&filtered_packet); if (ret < 0) { if (ctx->internal_error) ret = ctx->internal_error; goto error; } if (av_fifo_size(ctx->frame_queue)) { CUVIDPARSERDISPINFO dispinfo; CUVIDPROCPARAMS params; unsigned int pitch = 0; int offset = 0; int i; av_fifo_generic_read(ctx->frame_queue, &dispinfo, sizeof(CUVIDPARSERDISPINFO), NULL); memset(¶ms, 0, sizeof(params)); params.progressive_frame = dispinfo.progressive_frame; params.second_field = 0; params.top_field_first = dispinfo.top_field_first; ret = CHECK_CU(cuvidMapVideoFrame(ctx->cudecoder, dispinfo.picture_index, &mapped_frame, &pitch, ¶ms)); if (ret < 0) goto error; if (avctx->pix_fmt == AV_PIX_FMT_CUDA) { ret = av_hwframe_get_buffer(ctx->hwframe, frame, 0); if (ret < 0) { av_log(avctx, AV_LOG_ERROR, \"av_hwframe_get_buffer failed\\n\"); goto error; } ret = ff_decode_frame_props(avctx, frame); if (ret < 0) { av_log(avctx, AV_LOG_ERROR, \"ff_decode_frame_props failed\\n\"); goto error; } for (i = 0; i < 2; i++) { CUDA_MEMCPY2D cpy = { .srcMemoryType = CU_MEMORYTYPE_DEVICE, .dstMemoryType = CU_MEMORYTYPE_DEVICE, .srcDevice = mapped_frame, .dstDevice = (CUdeviceptr)frame->data[i], .srcPitch = pitch, .dstPitch = frame->linesize[i], .srcY = offset, .WidthInBytes = FFMIN(pitch, frame->linesize[i]), .Height = avctx->coded_height >> (i ? 1 : 0), }; ret = CHECK_CU(cuMemcpy2D(&cpy)); if (ret < 0) goto error; offset += avctx->coded_height; } } else if (avctx->pix_fmt == AV_PIX_FMT_NV12) { AVFrame *tmp_frame = av_frame_alloc(); if (!tmp_frame) { av_log(avctx, AV_LOG_ERROR, \"av_frame_alloc failed\\n\"); ret = AVERROR(ENOMEM); goto error; } tmp_frame->format = AV_PIX_FMT_CUDA; tmp_frame->hw_frames_ctx = av_buffer_ref(ctx->hwframe); tmp_frame->data[0] = (uint8_t*)mapped_frame; tmp_frame->linesize[0] = pitch; tmp_frame->data[1] = (uint8_t*)(mapped_frame + avctx->coded_height * pitch); tmp_frame->linesize[1] = pitch; tmp_frame->width = avctx->width; tmp_frame->height = avctx->height; ret = ff_get_buffer(avctx, frame, 0); if (ret < 0) { av_log(avctx, AV_LOG_ERROR, \"ff_get_buffer failed\\n\"); av_frame_free(&tmp_frame); goto error; } ret = av_hwframe_transfer_data(frame, tmp_frame, 0); if (ret) { av_log(avctx, AV_LOG_ERROR, \"av_hwframe_transfer_data failed\\n\"); av_frame_free(&tmp_frame); goto error; } av_frame_free(&tmp_frame); } else { ret = AVERROR_BUG; goto error; } frame->width = avctx->width; frame->height = avctx->height; if (avctx->pkt_timebase.num && avctx->pkt_timebase.den) frame->pts = av_rescale_q(dispinfo.timestamp, (AVRational){1, 10000000}, avctx->pkt_timebase); else frame->pts = dispinfo.timestamp; /* CUVIDs opaque reordering breaks the internal pkt logic. * So set pkt_pts and clear all the other pkt_ fields. */ frame->pkt_pts = frame->pts; av_frame_set_pkt_pos(frame, -1); av_frame_set_pkt_duration(frame, 0); av_frame_set_pkt_size(frame, -1); frame->interlaced_frame = !dispinfo.progressive_frame; if (!dispinfo.progressive_frame) frame->top_field_first = dispinfo.top_field_first; *got_frame = 1; } else { *got_frame = 0; } error: if (mapped_frame) eret = CHECK_CU(cuvidUnmapVideoFrame(ctx->cudecoder, mapped_frame)); eret = CHECK_CU(cuCtxPopCurrent(&dummy)); if (eret < 0) return eret; else return ret; }"} {"target": 0, "idx": 7420, "func": "static void hotplug(void) { qtest_start(\"-device virtio-net-pci\"); qpci_plug_device_test(\"virtio-net-pci\", \"net1\", PCI_SLOT_HP, NULL); qpci_unplug_acpi_device_test(\"net1\", PCI_SLOT_HP); test_end(); }"} {"target": 0, "idx": 7428, "func": "static void ff_h264_idct8_add_sse2(uint8_t *dst, int16_t *block, int stride) { __asm__ volatile( \"movdqa 0x10(%1), %%xmm1 \\n\" \"movdqa 0x20(%1), %%xmm2 \\n\" \"movdqa 0x30(%1), %%xmm3 \\n\" \"movdqa 0x50(%1), %%xmm5 \\n\" \"movdqa 0x60(%1), %%xmm6 \\n\" \"movdqa 0x70(%1), %%xmm7 \\n\" H264_IDCT8_1D_SSE2(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm6, %%xmm7) TRANSPOSE8(%%xmm4, %%xmm1, %%xmm7, %%xmm3, %%xmm5, %%xmm0, %%xmm2, %%xmm6, (%1)) \"paddw %4, %%xmm4 \\n\" \"movdqa %%xmm4, 0x00(%1) \\n\" \"movdqa %%xmm2, 0x40(%1) \\n\" H264_IDCT8_1D_SSE2(%%xmm4, %%xmm0, %%xmm6, %%xmm3, %%xmm2, %%xmm5, %%xmm7, %%xmm1) \"movdqa %%xmm6, 0x60(%1) \\n\" \"movdqa %%xmm7, 0x70(%1) \\n\" \"pxor %%xmm7, %%xmm7 \\n\" STORE_DIFF_8P(%%xmm2, (%0), %%xmm6, %%xmm7) STORE_DIFF_8P(%%xmm0, (%0,%2), %%xmm6, %%xmm7) STORE_DIFF_8P(%%xmm1, (%0,%2,2), %%xmm6, %%xmm7) STORE_DIFF_8P(%%xmm3, (%0,%3), %%xmm6, %%xmm7) \"lea (%0,%2,4), %0 \\n\" STORE_DIFF_8P(%%xmm5, (%0), %%xmm6, %%xmm7) STORE_DIFF_8P(%%xmm4, (%0,%2), %%xmm6, %%xmm7) \"movdqa 0x60(%1), %%xmm0 \\n\" \"movdqa 0x70(%1), %%xmm1 \\n\" STORE_DIFF_8P(%%xmm0, (%0,%2,2), %%xmm6, %%xmm7) STORE_DIFF_8P(%%xmm1, (%0,%3), %%xmm6, %%xmm7) :\"+r\"(dst) :\"r\"(block), \"r\"((x86_reg)stride), \"r\"((x86_reg)3L*stride), \"m\"(ff_pw_32) ); }"} {"target": 0, "idx": 7430, "func": "static void ide_identify(IDEState *s) { uint16_t *p; unsigned int oldsize; memset(s->io_buffer, 0, 512); p = (uint16_t *)s->io_buffer; stw(p + 0, 0x0040); stw(p + 1, s->cylinders); stw(p + 3, s->heads); stw(p + 4, 512 * s->sectors); /* sectors */ stw(p + 5, 512); /* sector size */ stw(p + 6, s->sectors); stw(p + 20, 3); /* buffer type */ stw(p + 21, 512); /* cache size in sectors */ stw(p + 22, 4); /* ecc bytes */ padstr((uint8_t *)(p + 27), \"QEMU HARDDISK\", 40); #if MAX_MULT_SECTORS > 1 stw(p + 47, MAX_MULT_SECTORS); #endif stw(p + 48, 1); /* dword I/O */ stw(p + 49, 1 << 9); /* LBA supported, no DMA */ stw(p + 51, 0x200); /* PIO transfer cycle */ stw(p + 52, 0x200); /* DMA transfer cycle */ stw(p + 54, s->cylinders); stw(p + 55, s->heads); stw(p + 56, s->sectors); oldsize = s->cylinders * s->heads * s->sectors; stw(p + 57, oldsize); stw(p + 58, oldsize >> 16); if (s->mult_sectors) stw(p + 59, 0x100 | s->mult_sectors); stw(p + 60, s->nb_sectors); stw(p + 61, s->nb_sectors >> 16); stw(p + 80, (1 << 1) | (1 << 2)); stw(p + 82, (1 << 14)); stw(p + 83, (1 << 14)); stw(p + 84, (1 << 14)); stw(p + 85, (1 << 14)); stw(p + 86, 0); stw(p + 87, (1 << 14)); }"} {"target": 0, "idx": 7448, "func": "static int encode_apng(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pict, int *got_packet) { PNGEncContext *s = avctx->priv_data; int ret; int enc_row_size; size_t max_packet_size; APNGFctlChunk fctl_chunk = {0}; if (pict && avctx->codec_id == AV_CODEC_ID_APNG && s->color_type == PNG_COLOR_TYPE_PALETTE) { uint32_t checksum = ~av_crc(av_crc_get_table(AV_CRC_32_IEEE_LE), ~0U, pict->data[1], 256 * sizeof(uint32_t)); if (avctx->frame_number == 0) { s->palette_checksum = checksum; } else if (checksum != s->palette_checksum) { av_log(avctx, AV_LOG_ERROR, \"Input contains more than one unique palette. APNG does not support multiple palettes.\\n\"); return -1; } } enc_row_size = deflateBound(&s->zstream, (avctx->width * s->bits_per_pixel + 7) >> 3); max_packet_size = AV_INPUT_BUFFER_MIN_SIZE + // headers avctx->height * ( enc_row_size + (4 + 12) * (((int64_t)enc_row_size + IOBUF_SIZE - 1) / IOBUF_SIZE) // fdAT * ceil(enc_row_size / IOBUF_SIZE) ); if (max_packet_size > INT_MAX) return AVERROR(ENOMEM); if (avctx->frame_number == 0) { if (!pict) return AVERROR(EINVAL); s->bytestream = avctx->extradata = av_malloc(FF_MIN_BUFFER_SIZE); if (!avctx->extradata) return AVERROR(ENOMEM); ret = encode_headers(avctx, pict); if (ret < 0) return ret; avctx->extradata_size = s->bytestream - avctx->extradata; s->last_frame_packet = av_malloc(max_packet_size); if (!s->last_frame_packet) return AVERROR(ENOMEM); } else if (s->last_frame) { ret = ff_alloc_packet2(avctx, pkt, max_packet_size, 0); if (ret < 0) return ret; memcpy(pkt->data, s->last_frame_packet, s->last_frame_packet_size); pkt->size = s->last_frame_packet_size; pkt->pts = pkt->dts = s->last_frame->pts; } if (pict) { s->bytestream_start = s->bytestream = s->last_frame_packet; s->bytestream_end = s->bytestream + max_packet_size; // We're encoding the frame first, so we have to do a bit of shuffling around // to have the image data write to the correct place in the buffer fctl_chunk.sequence_number = s->sequence_number; ++s->sequence_number; s->bytestream += 26 + 12; ret = apng_encode_frame(avctx, pict, &fctl_chunk, &s->last_frame_fctl); if (ret < 0) return ret; fctl_chunk.delay_num = 0; // delay filled in during muxing fctl_chunk.delay_den = 0; } else { s->last_frame_fctl.dispose_op = APNG_DISPOSE_OP_NONE; } if (s->last_frame) { uint8_t* last_fctl_chunk_start = pkt->data; uint8_t buf[26]; AV_WB32(buf + 0, s->last_frame_fctl.sequence_number); AV_WB32(buf + 4, s->last_frame_fctl.width); AV_WB32(buf + 8, s->last_frame_fctl.height); AV_WB32(buf + 12, s->last_frame_fctl.x_offset); AV_WB32(buf + 16, s->last_frame_fctl.y_offset); AV_WB16(buf + 20, s->last_frame_fctl.delay_num); AV_WB16(buf + 22, s->last_frame_fctl.delay_den); buf[24] = s->last_frame_fctl.dispose_op; buf[25] = s->last_frame_fctl.blend_op; png_write_chunk(&last_fctl_chunk_start, MKTAG('f', 'c', 'T', 'L'), buf, 26); *got_packet = 1; } if (pict) { if (!s->last_frame) { s->last_frame = av_frame_alloc(); if (!s->last_frame) return AVERROR(ENOMEM); } else if (s->last_frame_fctl.dispose_op != APNG_DISPOSE_OP_PREVIOUS) { if (!s->prev_frame) { s->prev_frame = av_frame_alloc(); if (!s->prev_frame) return AVERROR(ENOMEM); s->prev_frame->format = pict->format; s->prev_frame->width = pict->width; s->prev_frame->height = pict->height; if ((ret = av_frame_get_buffer(s->prev_frame, 32)) < 0) return ret; } // Do disposal, but not blending memcpy(s->prev_frame->data[0], s->last_frame->data[0], s->last_frame->linesize[0] * s->last_frame->height); if (s->last_frame_fctl.dispose_op == APNG_DISPOSE_OP_BACKGROUND) { uint32_t y; uint8_t bpp = (s->bits_per_pixel + 7) >> 3; for (y = s->last_frame_fctl.y_offset; y < s->last_frame_fctl.y_offset + s->last_frame_fctl.height; ++y) { size_t row_start = s->last_frame->linesize[0] * y + bpp * s->last_frame_fctl.x_offset; memset(s->prev_frame->data[0] + row_start, 0, bpp * s->last_frame_fctl.width); } } } av_frame_unref(s->last_frame); ret = av_frame_ref(s->last_frame, (AVFrame*)pict); if (ret < 0) return ret; s->last_frame_fctl = fctl_chunk; s->last_frame_packet_size = s->bytestream - s->bytestream_start; } else { av_frame_free(&s->last_frame); } return 0; }"} {"target": 0, "idx": 7449, "func": "BlockDriverAIOCB *bdrv_aio_readv(BlockDriverState *bs, int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, BlockDriverCompletionFunc *cb, void *opaque) { BlockDriver *drv = bs->drv; BlockDriverAIOCB *ret; trace_bdrv_aio_readv(bs, sector_num, nb_sectors, opaque); if (!drv) return NULL; if (bdrv_check_request(bs, sector_num, nb_sectors)) return NULL; ret = drv->bdrv_aio_readv(bs, sector_num, qiov, nb_sectors, cb, opaque); if (ret) { /* Update stats even though technically transfer has not happened. */ bs->rd_bytes += (unsigned) nb_sectors * BDRV_SECTOR_SIZE; bs->rd_ops ++; } return ret; }"} {"target": 0, "idx": 7458, "func": "static void migrate_set_downtime(QTestState *who, const char *value) { QDict *rsp; gchar *cmd; cmd = g_strdup_printf(\"{ 'execute': 'migrate_set_downtime',\" \"'arguments': { 'value': %s } }\", value); rsp = qtest_qmp(who, cmd); g_free(cmd); g_assert(qdict_haskey(rsp, \"return\")); QDECREF(rsp); }"} {"target": 0, "idx": 7462, "func": "target_ulong do_arm_semihosting(CPUARMState *env) { ARMCPU *cpu = arm_env_get_cpu(env); CPUState *cs = CPU(cpu); target_ulong args; target_ulong arg0, arg1, arg2, arg3; char * s; int nr; uint32_t ret; uint32_t len; #ifdef CONFIG_USER_ONLY TaskState *ts = cs->opaque; #else CPUARMState *ts = env; #endif if (is_a64(env)) { /* Note that the syscall number is in W0, not X0 */ nr = env->xregs[0] & 0xffffffffU; args = env->xregs[1]; } else { nr = env->regs[0]; args = env->regs[1]; } switch (nr) { case TARGET_SYS_OPEN: GET_ARG(0); GET_ARG(1); GET_ARG(2); s = lock_user_string(arg0); if (!s) { /* FIXME - should this error code be -TARGET_EFAULT ? */ return (uint32_t)-1; } if (arg1 >= 12) { unlock_user(s, arg0, 0); return (uint32_t)-1; } if (strcmp(s, \":tt\") == 0) { int result_fileno = arg1 < 4 ? STDIN_FILENO : STDOUT_FILENO; unlock_user(s, arg0, 0); return result_fileno; } if (use_gdb_syscalls()) { ret = arm_gdb_syscall(cpu, arm_semi_cb, \"open,%s,%x,1a4\", arg0, (int)arg2+1, gdb_open_modeflags[arg1]); } else { ret = set_swi_errno(ts, open(s, open_modeflags[arg1], 0644)); } unlock_user(s, arg0, 0); return ret; case TARGET_SYS_CLOSE: GET_ARG(0); if (use_gdb_syscalls()) { return arm_gdb_syscall(cpu, arm_semi_cb, \"close,%x\", arg0); } else { return set_swi_errno(ts, close(arg0)); } case TARGET_SYS_WRITEC: { char c; if (get_user_u8(c, args)) /* FIXME - should this error code be -TARGET_EFAULT ? */ return (uint32_t)-1; /* Write to debug console. stderr is near enough. */ if (use_gdb_syscalls()) { return arm_gdb_syscall(cpu, arm_semi_cb, \"write,2,%x,1\", args); } else { return write(STDERR_FILENO, &c, 1); } } case TARGET_SYS_WRITE0: if (!(s = lock_user_string(args))) /* FIXME - should this error code be -TARGET_EFAULT ? */ return (uint32_t)-1; len = strlen(s); if (use_gdb_syscalls()) { return arm_gdb_syscall(cpu, arm_semi_cb, \"write,2,%x,%x\", args, len); } else { ret = write(STDERR_FILENO, s, len); } unlock_user(s, args, 0); return ret; case TARGET_SYS_WRITE: GET_ARG(0); GET_ARG(1); GET_ARG(2); len = arg2; if (use_gdb_syscalls()) { arm_semi_syscall_len = len; return arm_gdb_syscall(cpu, arm_semi_cb, \"write,%x,%x,%x\", arg0, arg1, len); } else { s = lock_user(VERIFY_READ, arg1, len, 1); if (!s) { /* FIXME - should this error code be -TARGET_EFAULT ? */ return (uint32_t)-1; } ret = set_swi_errno(ts, write(arg0, s, len)); unlock_user(s, arg1, 0); if (ret == (uint32_t)-1) return -1; return len - ret; } case TARGET_SYS_READ: GET_ARG(0); GET_ARG(1); GET_ARG(2); len = arg2; if (use_gdb_syscalls()) { arm_semi_syscall_len = len; return arm_gdb_syscall(cpu, arm_semi_cb, \"read,%x,%x,%x\", arg0, arg1, len); } else { s = lock_user(VERIFY_WRITE, arg1, len, 0); if (!s) { /* FIXME - should this error code be -TARGET_EFAULT ? */ return (uint32_t)-1; } do { ret = set_swi_errno(ts, read(arg0, s, len)); } while (ret == -1 && errno == EINTR); unlock_user(s, arg1, len); if (ret == (uint32_t)-1) return -1; return len - ret; } case TARGET_SYS_READC: /* XXX: Read from debug console. Not implemented. */ return 0; case TARGET_SYS_ISTTY: GET_ARG(0); if (use_gdb_syscalls()) { return arm_gdb_syscall(cpu, arm_semi_cb, \"isatty,%x\", arg0); } else { return isatty(arg0); } case TARGET_SYS_SEEK: GET_ARG(0); GET_ARG(1); if (use_gdb_syscalls()) { return arm_gdb_syscall(cpu, arm_semi_cb, \"lseek,%x,%x,0\", arg0, arg1); } else { ret = set_swi_errno(ts, lseek(arg0, arg1, SEEK_SET)); if (ret == (uint32_t)-1) return -1; return 0; } case TARGET_SYS_FLEN: GET_ARG(0); if (use_gdb_syscalls()) { return arm_gdb_syscall(cpu, arm_semi_flen_cb, \"fstat,%x,%x\", arg0, arm_flen_buf(cpu)); } else { struct stat buf; ret = set_swi_errno(ts, fstat(arg0, &buf)); if (ret == (uint32_t)-1) return -1; return buf.st_size; } case TARGET_SYS_TMPNAM: /* XXX: Not implemented. */ return -1; case TARGET_SYS_REMOVE: GET_ARG(0); GET_ARG(1); if (use_gdb_syscalls()) { ret = arm_gdb_syscall(cpu, arm_semi_cb, \"unlink,%s\", arg0, (int)arg1+1); } else { s = lock_user_string(arg0); if (!s) { /* FIXME - should this error code be -TARGET_EFAULT ? */ return (uint32_t)-1; } ret = set_swi_errno(ts, remove(s)); unlock_user(s, arg0, 0); } return ret; case TARGET_SYS_RENAME: GET_ARG(0); GET_ARG(1); GET_ARG(2); GET_ARG(3); if (use_gdb_syscalls()) { return arm_gdb_syscall(cpu, arm_semi_cb, \"rename,%s,%s\", arg0, (int)arg1+1, arg2, (int)arg3+1); } else { char *s2; s = lock_user_string(arg0); s2 = lock_user_string(arg2); if (!s || !s2) /* FIXME - should this error code be -TARGET_EFAULT ? */ ret = (uint32_t)-1; else ret = set_swi_errno(ts, rename(s, s2)); if (s2) unlock_user(s2, arg2, 0); if (s) unlock_user(s, arg0, 0); return ret; } case TARGET_SYS_CLOCK: return clock() / (CLOCKS_PER_SEC / 100); case TARGET_SYS_TIME: return set_swi_errno(ts, time(NULL)); case TARGET_SYS_SYSTEM: GET_ARG(0); GET_ARG(1); if (use_gdb_syscalls()) { return arm_gdb_syscall(cpu, arm_semi_cb, \"system,%s\", arg0, (int)arg1+1); } else { s = lock_user_string(arg0); if (!s) { /* FIXME - should this error code be -TARGET_EFAULT ? */ return (uint32_t)-1; } ret = set_swi_errno(ts, system(s)); unlock_user(s, arg0, 0); return ret; } case TARGET_SYS_ERRNO: #ifdef CONFIG_USER_ONLY return ts->swi_errno; #else return syscall_err; #endif case TARGET_SYS_GET_CMDLINE: { /* Build a command-line from the original argv. * * The inputs are: * * arg0, pointer to a buffer of at least the size * specified in arg1. * * arg1, size of the buffer pointed to by arg0 in * bytes. * * The outputs are: * * arg0, pointer to null-terminated string of the * command line. * * arg1, length of the string pointed to by arg0. */ char *output_buffer; size_t input_size; size_t output_size; int status = 0; #if !defined(CONFIG_USER_ONLY) const char *cmdline; #endif GET_ARG(0); GET_ARG(1); input_size = arg1; /* Compute the size of the output string. */ #if !defined(CONFIG_USER_ONLY) cmdline = semihosting_get_cmdline(); if (cmdline == NULL) { cmdline = \"\"; /* Default to an empty line. */ } output_size = strlen(cmdline) + 1; /* Count terminating 0. */ #else unsigned int i; output_size = ts->info->arg_end - ts->info->arg_start; if (!output_size) { /* We special-case the \"empty command line\" case (argc==0). Just provide the terminating 0. */ output_size = 1; } #endif if (output_size > input_size) { /* Not enough space to store command-line arguments. */ return -1; } /* Adjust the command-line length. */ if (SET_ARG(1, output_size - 1)) { /* Couldn't write back to argument block */ return -1; } /* Lock the buffer on the ARM side. */ output_buffer = lock_user(VERIFY_WRITE, arg0, output_size, 0); if (!output_buffer) { return -1; } /* Copy the command-line arguments. */ #if !defined(CONFIG_USER_ONLY) pstrcpy(output_buffer, output_size, cmdline); #else if (output_size == 1) { /* Empty command-line. */ output_buffer[0] = '\\0'; goto out; } if (copy_from_user(output_buffer, ts->info->arg_start, output_size)) { status = -1; goto out; } /* Separate arguments by white spaces. */ for (i = 0; i < output_size - 1; i++) { if (output_buffer[i] == 0) { output_buffer[i] = ' '; } } out: #endif /* Unlock the buffer on the ARM side. */ unlock_user(output_buffer, arg0, output_size); return status; } case TARGET_SYS_HEAPINFO: { target_ulong retvals[4]; uint32_t limit; int i; GET_ARG(0); #ifdef CONFIG_USER_ONLY /* Some C libraries assume the heap immediately follows .bss, so allocate it using sbrk. */ if (!ts->heap_limit) { abi_ulong ret; ts->heap_base = do_brk(0); limit = ts->heap_base + ARM_ANGEL_HEAP_SIZE; /* Try a big heap, and reduce the size if that fails. */ for (;;) { ret = do_brk(limit); if (ret >= limit) { break; } limit = (ts->heap_base >> 1) + (limit >> 1); } ts->heap_limit = limit; } retvals[0] = ts->heap_base; retvals[1] = ts->heap_limit; retvals[2] = ts->stack_base; retvals[3] = 0; /* Stack limit. */ #else limit = ram_size; /* TODO: Make this use the limit of the loaded application. */ retvals[0] = limit / 2; retvals[1] = limit; retvals[2] = limit; /* Stack base */ retvals[3] = 0; /* Stack limit. */ #endif for (i = 0; i < ARRAY_SIZE(retvals); i++) { bool fail; if (is_a64(env)) { fail = put_user_u64(retvals[i], arg0 + i * 8); } else { fail = put_user_u32(retvals[i], arg0 + i * 4); } if (fail) { /* Couldn't write back to argument block */ return -1; } } return 0; } case TARGET_SYS_EXIT: if (is_a64(env)) { /* The A64 version of this call takes a parameter block, * so the application-exit type can return a subcode which * is the exit status code from the application. */ GET_ARG(0); GET_ARG(1); if (arg0 == ADP_Stopped_ApplicationExit) { ret = arg1; } else { ret = 1; } } else { /* ARM specifies only Stopped_ApplicationExit as normal * exit, everything else is considered an error */ ret = (args == ADP_Stopped_ApplicationExit) ? 0 : 1; } gdb_exit(env, ret); exit(ret); case TARGET_SYS_SYNCCACHE: /* Clean the D-cache and invalidate the I-cache for the specified * virtual address range. This is a nop for us since we don't * implement caches. This is only present on A64. */ if (is_a64(env)) { return 0; } /* fall through -- invalid for A32/T32 */ default: fprintf(stderr, \"qemu: Unsupported SemiHosting SWI 0x%02x\\n\", nr); cpu_dump_state(cs, stderr, fprintf, 0); abort(); } }"} {"target": 0, "idx": 7471, "func": "static int decode_unregistered_user_data(H264SEIUnregistered *h, GetBitContext *gb, void *logctx, int size) { uint8_t *user_data; int e, build, i; if (size < 16 || size >= INT_MAX - 16) return AVERROR_INVALIDDATA; user_data = av_malloc(16 + size + 1); if (!user_data) return AVERROR(ENOMEM); for (i = 0; i < size + 16; i++) user_data[i] = get_bits(gb, 8); user_data[i] = 0; e = sscanf(user_data + 16, \"x264 - core %d\", &build); if (e == 1 && build > 0) h->x264_build = build; if (e == 1 && build == 1 && !strncmp(user_data+16, \"x264 - core 0000\", 16)) h->x264_build = 67; if (strlen(user_data + 16) > 0) av_log(logctx, AV_LOG_DEBUG, \"user data:\\\"%s\\\"\\n\", user_data + 16); av_free(user_data); return 0; }"} {"target": 0, "idx": 7472, "func": "void avfilter_filter_samples(AVFilterLink *link, AVFilterBufferRef *samplesref) { void (*filter_samples)(AVFilterLink *, AVFilterBufferRef *); AVFilterPad *dst = link->dstpad; int i; FF_DPRINTF_START(NULL, filter_samples); ff_dlog_link(NULL, link, 1); if (!(filter_samples = dst->filter_samples)) filter_samples = avfilter_default_filter_samples; /* prepare to copy the samples if the buffer has insufficient permissions */ if ((dst->min_perms & samplesref->perms) != dst->min_perms || dst->rej_perms & samplesref->perms) { av_log(link->dst, AV_LOG_DEBUG, \"Copying audio data in avfilter (have perms %x, need %x, reject %x)\\n\", samplesref->perms, link->dstpad->min_perms, link->dstpad->rej_perms); link->cur_buf = avfilter_default_get_audio_buffer(link, dst->min_perms, samplesref->audio->nb_samples); link->cur_buf->pts = samplesref->pts; link->cur_buf->audio->sample_rate = samplesref->audio->sample_rate; /* Copy actual data into new samples buffer */ for (i = 0; samplesref->data[i]; i++) memcpy(link->cur_buf->data[i], samplesref->data[i], samplesref->linesize[0]); avfilter_unref_buffer(samplesref); } else link->cur_buf = samplesref; filter_samples(link, link->cur_buf); }"} {"target": 0, "idx": 7480, "func": "static qemu_irq *ppce500_init_mpic(PPCE500Params *params, MemoryRegion *ccsr, qemu_irq **irqs) { qemu_irq *mpic; DeviceState *dev; SysBusDevice *s; int i, j, k; mpic = g_new(qemu_irq, 256); dev = qdev_create(NULL, \"openpic\"); qdev_prop_set_uint32(dev, \"nb_cpus\", smp_cpus); qdev_prop_set_uint32(dev, \"model\", params->mpic_version); qdev_init_nofail(dev); s = SYS_BUS_DEVICE(dev); k = 0; for (i = 0; i < smp_cpus; i++) { for (j = 0; j < OPENPIC_OUTPUT_NB; j++) { sysbus_connect_irq(s, k++, irqs[i][j]); } } for (i = 0; i < 256; i++) { mpic[i] = qdev_get_gpio_in(dev, i); } memory_region_add_subregion(ccsr, MPC8544_MPIC_REGS_OFFSET, s->mmio[0].memory); return mpic; }"} {"target": 0, "idx": 7481, "func": "CharDriverState *qemu_chr_new(const char *label, const char *filename, void (*init)(struct CharDriverState *s)) { const char *p; CharDriverState *chr; QemuOpts *opts; Error *err = NULL; if (strstart(filename, \"chardev:\", &p)) { return qemu_chr_find(p); } opts = qemu_chr_parse_compat(label, filename); if (!opts) return NULL; chr = qemu_chr_new_from_opts(opts, init, &err); if (err) { error_report_err(err); } if (chr && qemu_opt_get_bool(opts, \"mux\", 0)) { qemu_chr_fe_claim_no_fail(chr); monitor_init(chr, MONITOR_USE_READLINE); } return chr; }"} {"target": 0, "idx": 7490, "func": "static int net_init_tap_one(const NetdevTapOptions *tap, NetClientState *peer, const char *model, const char *name, const char *ifname, const char *script, const char *downscript, const char *vhostfdname, int vnet_hdr, int fd) { TAPState *s; s = net_tap_fd_init(peer, model, name, fd, vnet_hdr); if (!s) { close(fd); return -1; } if (tap_set_sndbuf(s->fd, tap) < 0) { return -1; } if (tap->has_fd || tap->has_fds) { snprintf(s->nc.info_str, sizeof(s->nc.info_str), \"fd=%d\", fd); } else if (tap->has_helper) { snprintf(s->nc.info_str, sizeof(s->nc.info_str), \"helper=%s\", tap->helper); } else { snprintf(s->nc.info_str, sizeof(s->nc.info_str), \"ifname=%s,script=%s,downscript=%s\", ifname, script, downscript); if (strcmp(downscript, \"no\") != 0) { snprintf(s->down_script, sizeof(s->down_script), \"%s\", downscript); snprintf(s->down_script_arg, sizeof(s->down_script_arg), \"%s\", ifname); } } if (tap->has_vhost ? tap->vhost : vhostfdname || (tap->has_vhostforce && tap->vhostforce)) { int vhostfd; if (tap->has_vhostfd) { vhostfd = monitor_handle_fd_param(cur_mon, vhostfdname); if (vhostfd == -1) { return -1; } } else { vhostfd = -1; } s->vhost_net = vhost_net_init(&s->nc, vhostfd, tap->has_vhostforce && tap->vhostforce); if (!s->vhost_net) { error_report(\"vhost-net requested but could not be initialized\"); return -1; } } else if (tap->has_vhostfd || tap->has_vhostfds) { error_report(\"vhostfd= is not valid without vhost\"); return -1; } return 0; }"} {"target": 0, "idx": 7491, "func": "int qdev_prop_check_globals(void) { GList *l; int ret = 0; for (l = global_props; l; l = l->next) { GlobalProperty *prop = l->data; ObjectClass *oc; DeviceClass *dc; if (prop->used) { continue; } if (!prop->user_provided) { continue; } oc = object_class_by_name(prop->driver); oc = object_class_dynamic_cast(oc, TYPE_DEVICE); if (!oc) { error_report(\"Warning: global %s.%s has invalid class name\", prop->driver, prop->property); ret = 1; continue; } dc = DEVICE_CLASS(oc); if (!dc->hotpluggable && !prop->used) { error_report(\"Warning: global %s.%s=%s not used\", prop->driver, prop->property, prop->value); ret = 1; continue; } } return ret; }"} {"target": 0, "idx": 7498, "func": "static int qesd_init_out (HWVoiceOut *hw, audsettings_t *as) { ESDVoiceOut *esd = (ESDVoiceOut *) hw; audsettings_t obt_as = *as; int esdfmt = ESD_STREAM | ESD_PLAY; int err; sigset_t set, old_set; sigfillset (&set); esdfmt |= (as->nchannels == 2) ? ESD_STEREO : ESD_MONO; switch (as->fmt) { case AUD_FMT_S8: case AUD_FMT_U8: esdfmt |= ESD_BITS8; obt_as.fmt = AUD_FMT_U8; break; case AUD_FMT_S32: case AUD_FMT_U32: dolog (\"Will use 16 instead of 32 bit samples\\n\"); case AUD_FMT_S16: case AUD_FMT_U16: deffmt: esdfmt |= ESD_BITS16; obt_as.fmt = AUD_FMT_S16; break; default: dolog (\"Internal logic error: Bad audio format %d\\n\", as->fmt); goto deffmt; } obt_as.endianness = AUDIO_HOST_ENDIANNESS; audio_pcm_init_info (&hw->info, &obt_as); hw->samples = conf.samples; esd->pcm_buf = audio_calloc (AUDIO_FUNC, hw->samples, 1 << hw->info.shift); if (!esd->pcm_buf) { dolog (\"Could not allocate buffer (%d bytes)\\n\", hw->samples << hw->info.shift); return -1; } esd->fd = -1; err = pthread_sigmask (SIG_BLOCK, &set, &old_set); if (err) { qesd_logerr (err, \"pthread_sigmask failed\\n\"); goto fail1; } esd->fd = esd_play_stream (esdfmt, as->freq, conf.dac_host, NULL); if (esd->fd < 0) { qesd_logerr (errno, \"esd_play_stream failed\\n\"); goto fail2; } if (audio_pt_init (&esd->pt, qesd_thread_out, esd, AUDIO_CAP, AUDIO_FUNC)) { goto fail3; } err = pthread_sigmask (SIG_SETMASK, &old_set, NULL); if (err) { qesd_logerr (err, \"pthread_sigmask(restore) failed\\n\"); } return 0; fail3: if (close (esd->fd)) { qesd_logerr (errno, \"%s: close on esd socket(%d) failed\\n\", AUDIO_FUNC, esd->fd); } esd->fd = -1; fail2: err = pthread_sigmask (SIG_SETMASK, &old_set, NULL); if (err) { qesd_logerr (err, \"pthread_sigmask(restore) failed\\n\"); } fail1: qemu_free (esd->pcm_buf); esd->pcm_buf = NULL; return -1; }"} {"target": 1, "idx": 7521, "func": "static void monitor_find_completion(const char *cmdline) { const char *cmdname; char *args[MAX_ARGS]; int nb_args, i, len; const char *ptype, *str; const mon_cmd_t *cmd; const KeyDef *key; parse_cmdline(cmdline, &nb_args, args); #ifdef DEBUG_COMPLETION for(i = 0; i < nb_args; i++) { monitor_printf(cur_mon, \"arg%d = '%s'\\n\", i, (char *)args[i]); } #endif /* if the line ends with a space, it means we want to complete the next arg */ len = strlen(cmdline); if (len > 0 && qemu_isspace(cmdline[len - 1])) { if (nb_args >= MAX_ARGS) return; args[nb_args++] = qemu_strdup(\"\"); } if (nb_args <= 1) { /* command completion */ if (nb_args == 0) cmdname = \"\"; else cmdname = args[0]; readline_set_completion_index(cur_mon->rs, strlen(cmdname)); for(cmd = mon_cmds; cmd->name != NULL; cmd++) { cmd_completion(cmdname, cmd->name); } } else { /* find the command */ for(cmd = mon_cmds; cmd->name != NULL; cmd++) { if (compare_cmd(args[0], cmd->name)) goto found; } return; found: ptype = next_arg_type(cmd->args_type); for(i = 0; i < nb_args - 2; i++) { if (*ptype != '\\0') { ptype = next_arg_type(ptype); while (*ptype == '?') ptype = next_arg_type(ptype); } } str = args[nb_args - 1]; if (*ptype == '-' && ptype[1] != '\\0') { ptype += 2; } switch(*ptype) { case 'F': /* file completion */ readline_set_completion_index(cur_mon->rs, strlen(str)); file_completion(str); break; case 'B': /* block device name completion */ readline_set_completion_index(cur_mon->rs, strlen(str)); bdrv_iterate(block_completion_it, (void *)str); break; case 's': /* XXX: more generic ? */ if (!strcmp(cmd->name, \"info\")) { readline_set_completion_index(cur_mon->rs, strlen(str)); for(cmd = info_cmds; cmd->name != NULL; cmd++) { cmd_completion(str, cmd->name); } } else if (!strcmp(cmd->name, \"sendkey\")) { char *sep = strrchr(str, '-'); if (sep) str = sep + 1; readline_set_completion_index(cur_mon->rs, strlen(str)); for(key = key_defs; key->name != NULL; key++) { cmd_completion(str, key->name); } } else if (!strcmp(cmd->name, \"help|?\")) { readline_set_completion_index(cur_mon->rs, strlen(str)); for (cmd = mon_cmds; cmd->name != NULL; cmd++) { cmd_completion(str, cmd->name); } } break; default: break; } } for(i = 0; i < nb_args; i++) qemu_free(args[i]); }"} {"target": 0, "idx": 7556, "func": "static void sd_response_r1_make(SDState *sd, uint8_t *response, uint32_t last_status) { uint32_t mask = CARD_STATUS_B ^ ILLEGAL_COMMAND; uint32_t status; status = (sd->card_status & ~mask) | (last_status & mask); sd->card_status &= ~CARD_STATUS_C | APP_CMD; response[0] = (status >> 24) & 0xff; response[1] = (status >> 16) & 0xff; response[2] = (status >> 8) & 0xff; response[3] = (status >> 0) & 0xff; }"} {"target": 0, "idx": 7558, "func": "static int mpc8_decode_frame(AVCodecContext * avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; MPCContext *c = avctx->priv_data; GetBitContext gb2, *gb = &gb2; int i, j, k, ch, cnt, res, t; Band *bands = c->bands; int off; int maxband, keyframe; int last[2]; keyframe = c->cur_frame == 0; if(keyframe){ memset(c->Q, 0, sizeof(c->Q)); c->last_bits_used = 0; } init_get_bits(gb, buf, buf_size * 8); skip_bits(gb, c->last_bits_used & 7); if(keyframe) maxband = mpc8_get_mod_golomb(gb, c->maxbands + 1); else{ maxband = c->last_max_band + get_vlc2(gb, band_vlc.table, MPC8_BANDS_BITS, 2); if(maxband > 32) maxband -= 33; } c->last_max_band = maxband; /* read subband indexes */ if(maxband){ last[0] = last[1] = 0; for(i = maxband - 1; i >= 0; i--){ for(ch = 0; ch < 2; ch++){ last[ch] = get_vlc2(gb, res_vlc[last[ch] > 2].table, MPC8_RES_BITS, 2) + last[ch]; if(last[ch] > 15) last[ch] -= 17; bands[i].res[ch] = last[ch]; } } if(c->MSS){ int mask; cnt = 0; for(i = 0; i < maxband; i++) if(bands[i].res[0] || bands[i].res[1]) cnt++; t = mpc8_get_mod_golomb(gb, cnt); mask = mpc8_get_mask(gb, cnt, t); for(i = maxband - 1; i >= 0; i--) if(bands[i].res[0] || bands[i].res[1]){ bands[i].msf = mask & 1; mask >>= 1; } } } for(i = maxband; i < c->maxbands; i++) bands[i].res[0] = bands[i].res[1] = 0; if(keyframe){ for(i = 0; i < 32; i++) c->oldDSCF[0][i] = c->oldDSCF[1][i] = 1; } for(i = 0; i < maxband; i++){ if(bands[i].res[0] || bands[i].res[1]){ cnt = !!bands[i].res[0] + !!bands[i].res[1] - 1; if(cnt >= 0){ t = get_vlc2(gb, scfi_vlc[cnt].table, scfi_vlc[cnt].bits, 1); if(bands[i].res[0]) bands[i].scfi[0] = t >> (2 * cnt); if(bands[i].res[1]) bands[i].scfi[1] = t & 3; } } } for(i = 0; i < maxband; i++){ for(ch = 0; ch < 2; ch++){ if(!bands[i].res[ch]) continue; if(c->oldDSCF[ch][i]){ bands[i].scf_idx[ch][0] = get_bits(gb, 7) - 6; c->oldDSCF[ch][i] = 0; }else{ t = get_vlc2(gb, dscf_vlc[1].table, MPC8_DSCF1_BITS, 2); if(t == 64) t += get_bits(gb, 6); bands[i].scf_idx[ch][0] = ((bands[i].scf_idx[ch][2] + t - 25) & 0x7F) - 6; } for(j = 0; j < 2; j++){ if((bands[i].scfi[ch] << j) & 2) bands[i].scf_idx[ch][j + 1] = bands[i].scf_idx[ch][j]; else{ t = get_vlc2(gb, dscf_vlc[0].table, MPC8_DSCF0_BITS, 2); if(t == 31) t = 64 + get_bits(gb, 6); bands[i].scf_idx[ch][j + 1] = ((bands[i].scf_idx[ch][j] + t - 25) & 0x7F) - 6; } } } } for(i = 0, off = 0; i < maxband; i++, off += SAMPLES_PER_BAND){ for(ch = 0; ch < 2; ch++){ res = bands[i].res[ch]; switch(res){ case -1: for(j = 0; j < SAMPLES_PER_BAND; j++) c->Q[ch][off + j] = (av_lfg_get(&c->rnd) & 0x3FC) - 510; break; case 0: break; case 1: for(j = 0; j < SAMPLES_PER_BAND; j += SAMPLES_PER_BAND / 2){ cnt = get_vlc2(gb, q1_vlc.table, MPC8_Q1_BITS, 2); t = mpc8_get_mask(gb, 18, cnt); for(k = 0; k < SAMPLES_PER_BAND / 2; k++, t <<= 1) c->Q[ch][off + j + k] = (t & 0x20000) ? (get_bits1(gb) << 1) - 1 : 0; } break; case 2: cnt = 6;//2*mpc8_thres[res] for(j = 0; j < SAMPLES_PER_BAND; j += 3){ t = get_vlc2(gb, q2_vlc[cnt > 3].table, MPC8_Q2_BITS, 2); c->Q[ch][off + j + 0] = mpc8_idx50[t]; c->Q[ch][off + j + 1] = mpc8_idx51[t]; c->Q[ch][off + j + 2] = mpc8_idx52[t]; cnt = (cnt >> 1) + mpc8_huffq2[t]; } break; case 3: case 4: for(j = 0; j < SAMPLES_PER_BAND; j += 2){ t = get_vlc2(gb, q3_vlc[res - 3].table, MPC8_Q3_BITS, 2) + q3_offsets[res - 3]; c->Q[ch][off + j + 1] = t >> 4; c->Q[ch][off + j + 0] = (t & 8) ? (t & 0xF) - 16 : (t & 0xF); } break; case 5: case 6: case 7: case 8: cnt = 2 * mpc8_thres[res]; for(j = 0; j < SAMPLES_PER_BAND; j++){ t = get_vlc2(gb, quant_vlc[res - 5][cnt > mpc8_thres[res]].table, quant_vlc[res - 5][cnt > mpc8_thres[res]].bits, 2) + quant_offsets[res - 5]; c->Q[ch][off + j] = t; cnt = (cnt >> 1) + FFABS(c->Q[ch][off + j]); } break; default: for(j = 0; j < SAMPLES_PER_BAND; j++){ c->Q[ch][off + j] = get_vlc2(gb, q9up_vlc.table, MPC8_Q9UP_BITS, 2); if(res != 9){ c->Q[ch][off + j] <<= res - 9; c->Q[ch][off + j] |= get_bits(gb, res - 9); } c->Q[ch][off + j] -= (1 << (res - 2)) - 1; } } } } ff_mpc_dequantize_and_synth(c, maxband, data, avctx->channels); c->cur_frame++; c->last_bits_used = get_bits_count(gb); if(c->cur_frame >= c->frames) c->cur_frame = 0; *data_size = MPC_FRAME_SIZE * 2 * avctx->channels; return c->cur_frame ? c->last_bits_used >> 3 : buf_size; }"} {"target": 0, "idx": 7579, "func": "DeviceState *qdev_try_create(BusState *bus, const char *name) { DeviceState *dev; if (object_class_by_name(name) == NULL) { return NULL; } dev = DEVICE(object_new(name)); if (!dev) { return NULL; } if (!bus) { bus = sysbus_get_default(); } qdev_set_parent_bus(dev, bus); qdev_prop_set_globals(dev); return dev; }"} {"target": 1, "idx": 7590, "func": "Visitor *qobject_input_visitor_new_keyval(QObject *obj) { QObjectInputVisitor *v = qobject_input_visitor_base_new(obj); v->visitor.type_int64 = qobject_input_type_int64_keyval; v->visitor.type_uint64 = qobject_input_type_uint64_keyval; v->visitor.type_bool = qobject_input_type_bool_keyval; v->visitor.type_str = qobject_input_type_str; v->visitor.type_number = qobject_input_type_number_keyval; v->visitor.type_any = qobject_input_type_any; v->visitor.type_null = qobject_input_type_null; v->visitor.type_size = qobject_input_type_size_keyval; return &v->visitor; }"} {"target": 1, "idx": 7596, "func": "void *pl080_init(uint32_t base, qemu_irq irq, int nchannels) { int iomemtype; pl080_state *s; s = (pl080_state *)qemu_mallocz(sizeof(pl080_state)); iomemtype = cpu_register_io_memory(0, pl080_readfn, pl080_writefn, s); cpu_register_physical_memory(base, 0x00000fff, iomemtype); s->base = base; s->irq = irq; s->nchannels = nchannels; /* ??? Save/restore. */ return s; }"} {"target": 0, "idx": 7598, "func": "static av_cold void init_coef_vlc(VLC *vlc, uint16_t **prun_table, float **plevel_table, uint16_t **pint_table, const CoefVLCTable *vlc_table) { int n = vlc_table->n; const uint8_t *table_bits = vlc_table->huffbits; const uint32_t *table_codes = vlc_table->huffcodes; const uint16_t *levels_table = vlc_table->levels; uint16_t *run_table, *level_table, *int_table; float *flevel_table; int i, l, j, k, level; init_vlc(vlc, VLCBITS, n, table_bits, 1, 1, table_codes, 4, 4, 0); run_table = av_malloc(n * sizeof(uint16_t)); level_table = av_malloc(n * sizeof(uint16_t)); flevel_table = av_malloc(n * sizeof(*flevel_table)); int_table = av_malloc(n * sizeof(uint16_t)); i = 2; level = 1; k = 0; while (i < n) { int_table[k] = i; l = levels_table[k++]; for (j = 0; j < l; j++) { run_table[i] = j; level_table[i] = level; flevel_table[i] = level; i++; } level++; } *prun_table = run_table; *plevel_table = flevel_table; *pint_table = int_table; av_free(level_table); }"} {"target": 0, "idx": 7609, "func": "static inline int parse_nal_units(AVCodecParserContext *s, AVCodecContext *avctx, const uint8_t *buf, int buf_size) { H264ParseContext *p = s->priv_data; const uint8_t *buf_end = buf + buf_size; H2645NAL nal = { NULL }; unsigned int pps_id; unsigned int slice_type; int state = -1, got_reset = 0; int field_poc[2]; int ret; /* set some sane default values */ s->pict_type = AV_PICTURE_TYPE_I; s->key_frame = 0; s->picture_structure = AV_PICTURE_STRUCTURE_UNKNOWN; ff_h264_sei_uninit(&p->sei); if (!buf_size) return 0; for (;;) { const SPS *sps; int src_length, consumed; buf = avpriv_find_start_code(buf, buf_end, &state); if (buf >= buf_end) break; --buf; src_length = buf_end - buf; switch (state & 0x1f) { case H264_NAL_SLICE: case H264_NAL_IDR_SLICE: // Do not walk the whole buffer just to decode slice header if ((state & 0x1f) == H264_NAL_IDR_SLICE || ((state >> 5) & 0x3) == 0) { /* IDR or disposable slice * No need to decode many bytes because MMCOs shall not be present. */ if (src_length > 60) src_length = 60; } else { /* To decode up to MMCOs */ if (src_length > 1000) src_length = 1000; } break; } consumed = ff_h2645_extract_rbsp(buf, src_length, &nal); if (consumed < 0) break; ret = init_get_bits(&nal.gb, nal.data, nal.size * 8); if (ret < 0) goto fail; get_bits1(&nal.gb); nal.ref_idc = get_bits(&nal.gb, 2); nal.type = get_bits(&nal.gb, 5); switch (nal.type) { case H264_NAL_SPS: ff_h264_decode_seq_parameter_set(&nal.gb, avctx, &p->ps); break; case H264_NAL_PPS: ff_h264_decode_picture_parameter_set(&nal.gb, avctx, &p->ps, nal.size_bits); break; case H264_NAL_SEI: ff_h264_sei_decode(&p->sei, &nal.gb, &p->ps, avctx); break; case H264_NAL_IDR_SLICE: s->key_frame = 1; p->poc.prev_frame_num = 0; p->poc.prev_frame_num_offset = 0; p->poc.prev_poc_msb = p->poc.prev_poc_lsb = 0; /* fall through */ case H264_NAL_SLICE: get_ue_golomb(&nal.gb); // skip first_mb_in_slice slice_type = get_ue_golomb_31(&nal.gb); s->pict_type = ff_h264_golomb_to_pict_type[slice_type % 5]; if (p->sei.recovery_point.recovery_frame_cnt >= 0) { /* key frame, since recovery_frame_cnt is set */ s->key_frame = 1; } pps_id = get_ue_golomb(&nal.gb); if (pps_id >= MAX_PPS_COUNT) { av_log(avctx, AV_LOG_ERROR, \"pps_id %u out of range\\n\", pps_id); goto fail; } if (!p->ps.pps_list[pps_id]) { av_log(avctx, AV_LOG_ERROR, \"non-existing PPS %u referenced\\n\", pps_id); goto fail; } p->ps.pps = (const PPS*)p->ps.pps_list[pps_id]->data; if (!p->ps.sps_list[p->ps.pps->sps_id]) { av_log(avctx, AV_LOG_ERROR, \"non-existing SPS %u referenced\\n\", p->ps.pps->sps_id); goto fail; } p->ps.sps = (SPS*)p->ps.sps_list[p->ps.pps->sps_id]->data; sps = p->ps.sps; p->poc.frame_num = get_bits(&nal.gb, sps->log2_max_frame_num); s->coded_width = 16 * sps->mb_width; s->coded_height = 16 * sps->mb_height; s->width = s->coded_width - (sps->crop_right + sps->crop_left); s->height = s->coded_height - (sps->crop_top + sps->crop_bottom); if (s->width <= 0 || s->height <= 0) { s->width = s->coded_width; s->height = s->coded_height; } switch (sps->bit_depth_luma) { case 9: if (sps->chroma_format_idc == 3) s->format = AV_PIX_FMT_YUV444P9; else if (sps->chroma_format_idc == 2) s->format = AV_PIX_FMT_YUV422P9; else s->format = AV_PIX_FMT_YUV420P9; break; case 10: if (sps->chroma_format_idc == 3) s->format = AV_PIX_FMT_YUV444P10; else if (sps->chroma_format_idc == 2) s->format = AV_PIX_FMT_YUV422P10; else s->format = AV_PIX_FMT_YUV420P10; break; case 8: if (sps->chroma_format_idc == 3) s->format = AV_PIX_FMT_YUV444P; else if (sps->chroma_format_idc == 2) s->format = AV_PIX_FMT_YUV422P; else s->format = AV_PIX_FMT_YUV420P; break; default: s->format = AV_PIX_FMT_NONE; } avctx->profile = ff_h264_get_profile(sps); avctx->level = sps->level_idc; if (sps->frame_mbs_only_flag) { p->picture_structure = PICT_FRAME; } else { if (get_bits1(&nal.gb)) { // field_pic_flag p->picture_structure = PICT_TOP_FIELD + get_bits1(&nal.gb); // bottom_field_flag } else { p->picture_structure = PICT_FRAME; } } if (nal.type == H264_NAL_IDR_SLICE) get_ue_golomb(&nal.gb); /* idr_pic_id */ if (sps->poc_type == 0) { p->poc.poc_lsb = get_bits(&nal.gb, sps->log2_max_poc_lsb); if (p->ps.pps->pic_order_present == 1 && p->picture_structure == PICT_FRAME) p->poc.delta_poc_bottom = get_se_golomb(&nal.gb); } if (sps->poc_type == 1 && !sps->delta_pic_order_always_zero_flag) { p->poc.delta_poc[0] = get_se_golomb(&nal.gb); if (p->ps.pps->pic_order_present == 1 && p->picture_structure == PICT_FRAME) p->poc.delta_poc[1] = get_se_golomb(&nal.gb); } /* Decode POC of this picture. * The prev_ values needed for decoding POC of the next picture are not set here. */ field_poc[0] = field_poc[1] = INT_MAX; ff_h264_init_poc(field_poc, &s->output_picture_number, sps, &p->poc, p->picture_structure, nal.ref_idc); /* Continue parsing to check if MMCO_RESET is present. * FIXME: MMCO_RESET could appear in non-first slice. * Maybe, we should parse all undisposable non-IDR slice of this * picture until encountering MMCO_RESET in a slice of it. */ if (nal.ref_idc && nal.type != H264_NAL_IDR_SLICE) { got_reset = scan_mmco_reset(s, &nal.gb, avctx); if (got_reset < 0) goto fail; } /* Set up the prev_ values for decoding POC of the next picture. */ p->poc.prev_frame_num = got_reset ? 0 : p->poc.frame_num; p->poc.prev_frame_num_offset = got_reset ? 0 : p->poc.frame_num_offset; if (nal.ref_idc != 0) { if (!got_reset) { p->poc.prev_poc_msb = p->poc.poc_msb; p->poc.prev_poc_lsb = p->poc.poc_lsb; } else { p->poc.prev_poc_msb = 0; p->poc.prev_poc_lsb = p->picture_structure == PICT_BOTTOM_FIELD ? 0 : field_poc[0]; } } if (sps->pic_struct_present_flag) { switch (p->sei.picture_timing.pic_struct) { case SEI_PIC_STRUCT_TOP_FIELD: case SEI_PIC_STRUCT_BOTTOM_FIELD: s->repeat_pict = 0; break; case SEI_PIC_STRUCT_FRAME: case SEI_PIC_STRUCT_TOP_BOTTOM: case SEI_PIC_STRUCT_BOTTOM_TOP: s->repeat_pict = 1; break; case SEI_PIC_STRUCT_TOP_BOTTOM_TOP: case SEI_PIC_STRUCT_BOTTOM_TOP_BOTTOM: s->repeat_pict = 2; break; case SEI_PIC_STRUCT_FRAME_DOUBLING: s->repeat_pict = 3; break; case SEI_PIC_STRUCT_FRAME_TRIPLING: s->repeat_pict = 5; break; default: s->repeat_pict = p->picture_structure == PICT_FRAME ? 1 : 0; break; } } else { s->repeat_pict = p->picture_structure == PICT_FRAME ? 1 : 0; } if (p->picture_structure == PICT_FRAME) { s->picture_structure = AV_PICTURE_STRUCTURE_FRAME; if (sps->pic_struct_present_flag) { switch (p->sei.picture_timing.pic_struct) { case SEI_PIC_STRUCT_TOP_BOTTOM: case SEI_PIC_STRUCT_TOP_BOTTOM_TOP: s->field_order = AV_FIELD_TT; break; case SEI_PIC_STRUCT_BOTTOM_TOP: case SEI_PIC_STRUCT_BOTTOM_TOP_BOTTOM: s->field_order = AV_FIELD_BB; break; default: s->field_order = AV_FIELD_PROGRESSIVE; break; } } else { if (field_poc[0] < field_poc[1]) s->field_order = AV_FIELD_TT; else if (field_poc[0] > field_poc[1]) s->field_order = AV_FIELD_BB; else s->field_order = AV_FIELD_PROGRESSIVE; } } else { if (p->picture_structure == PICT_TOP_FIELD) s->picture_structure = AV_PICTURE_STRUCTURE_TOP_FIELD; else s->picture_structure = AV_PICTURE_STRUCTURE_BOTTOM_FIELD; s->field_order = AV_FIELD_UNKNOWN; } av_freep(&nal.rbsp_buffer); return 0; /* no need to evaluate the rest */ } buf += consumed; } /* didn't find a picture! */ av_log(avctx, AV_LOG_ERROR, \"missing picture in access unit\\n\"); fail: av_freep(&nal.rbsp_buffer); return -1; }"} {"target": 1, "idx": 7622, "func": "static av_always_inline void FUNC(intra_pred)(HEVCContext *s, int x0, int y0, int log2_size, int c_idx) { #define PU(x) \\ ((x) >> s->ps.sps->log2_min_pu_size) #define MVF(x, y) \\ (s->ref->tab_mvf[(x) + (y) * min_pu_width]) #define MVF_PU(x, y) \\ MVF(PU(x0 + ((x) << hshift)), PU(y0 + ((y) << vshift))) #define IS_INTRA(x, y) \\ (MVF_PU(x, y).pred_flag == PF_INTRA) #define MIN_TB_ADDR_ZS(x, y) \\ s->ps.pps->min_tb_addr_zs[(y) * (s->ps.sps->tb_mask+2) + (x)] #define EXTEND(ptr, val, len) \\ do { \\ pixel4 pix = PIXEL_SPLAT_X4(val); \\ for (i = 0; i < (len); i += 4) \\ AV_WN4P(ptr + i, pix); \\ } while (0) #define EXTEND_RIGHT_CIP(ptr, start, length) \\ for (i = start; i < (start) + (length); i += 4) \\ if (!IS_INTRA(i, -1)) \\ AV_WN4P(&ptr[i], a); \\ else \\ a = PIXEL_SPLAT_X4(ptr[i+3]) #define EXTEND_LEFT_CIP(ptr, start, length) \\ for (i = start; i > (start) - (length); i--) \\ if (!IS_INTRA(i - 1, -1)) \\ ptr[i - 1] = ptr[i] #define EXTEND_UP_CIP(ptr, start, length) \\ for (i = (start); i > (start) - (length); i -= 4) \\ if (!IS_INTRA(-1, i - 3)) \\ AV_WN4P(&ptr[i - 3], a); \\ else \\ a = PIXEL_SPLAT_X4(ptr[i - 3]) #define EXTEND_DOWN_CIP(ptr, start, length) \\ for (i = start; i < (start) + (length); i += 4) \\ if (!IS_INTRA(-1, i)) \\ AV_WN4P(&ptr[i], a); \\ else \\ a = PIXEL_SPLAT_X4(ptr[i + 3]) HEVCLocalContext *lc = s->HEVClc; int i; int hshift = s->ps.sps->hshift[c_idx]; int vshift = s->ps.sps->vshift[c_idx]; int size = (1 << log2_size); int size_in_luma_h = size << hshift; int size_in_tbs_h = size_in_luma_h >> s->ps.sps->log2_min_tb_size; int size_in_luma_v = size << vshift; int size_in_tbs_v = size_in_luma_v >> s->ps.sps->log2_min_tb_size; int x = x0 >> hshift; int y = y0 >> vshift; int x_tb = (x0 >> s->ps.sps->log2_min_tb_size) & s->ps.sps->tb_mask; int y_tb = (y0 >> s->ps.sps->log2_min_tb_size) & s->ps.sps->tb_mask; int cur_tb_addr = MIN_TB_ADDR_ZS(x_tb, y_tb); ptrdiff_t stride = s->frame->linesize[c_idx] / sizeof(pixel); pixel *src = (pixel*)s->frame->data[c_idx] + x + y * stride; int min_pu_width = s->ps.sps->min_pu_width; enum IntraPredMode mode = c_idx ? lc->tu.intra_pred_mode_c : lc->tu.intra_pred_mode; pixel4 a; pixel left_array[2 * MAX_TB_SIZE + 1]; pixel filtered_left_array[2 * MAX_TB_SIZE + 1]; pixel top_array[2 * MAX_TB_SIZE + 1]; pixel filtered_top_array[2 * MAX_TB_SIZE + 1]; pixel *left = left_array + 1; pixel *top = top_array + 1; pixel *filtered_left = filtered_left_array + 1; pixel *filtered_top = filtered_top_array + 1; int cand_bottom_left = lc->na.cand_bottom_left && cur_tb_addr > MIN_TB_ADDR_ZS( x_tb - 1, (y_tb + size_in_tbs_v) & s->ps.sps->tb_mask); int cand_left = lc->na.cand_left; int cand_up_left = lc->na.cand_up_left; int cand_up = lc->na.cand_up; int cand_up_right = lc->na.cand_up_right && cur_tb_addr > MIN_TB_ADDR_ZS((x_tb + size_in_tbs_h) & s->ps.sps->tb_mask, y_tb - 1); int bottom_left_size = (FFMIN(y0 + 2 * size_in_luma_v, s->ps.sps->height) - (y0 + size_in_luma_v)) >> vshift; int top_right_size = (FFMIN(x0 + 2 * size_in_luma_h, s->ps.sps->width) - (x0 + size_in_luma_h)) >> hshift; if (s->ps.pps->constrained_intra_pred_flag == 1) { int size_in_luma_pu_v = PU(size_in_luma_v); int size_in_luma_pu_h = PU(size_in_luma_h); int on_pu_edge_x = !av_mod_uintp2(x0, s->ps.sps->log2_min_pu_size); int on_pu_edge_y = !av_mod_uintp2(y0, s->ps.sps->log2_min_pu_size); if (!size_in_luma_pu_h) size_in_luma_pu_h++; if (cand_bottom_left == 1 && on_pu_edge_x) { int x_left_pu = PU(x0 - 1); int y_bottom_pu = PU(y0 + size_in_luma_v); int max = FFMIN(size_in_luma_pu_v, s->ps.sps->min_pu_height - y_bottom_pu); cand_bottom_left = 0; for (i = 0; i < max; i += 2) cand_bottom_left |= (MVF(x_left_pu, y_bottom_pu + i).pred_flag == PF_INTRA); } if (cand_left == 1 && on_pu_edge_x) { int x_left_pu = PU(x0 - 1); int y_left_pu = PU(y0); int max = FFMIN(size_in_luma_pu_v, s->ps.sps->min_pu_height - y_left_pu); cand_left = 0; for (i = 0; i < max; i += 2) cand_left |= (MVF(x_left_pu, y_left_pu + i).pred_flag == PF_INTRA); } if (cand_up_left == 1) { int x_left_pu = PU(x0 - 1); int y_top_pu = PU(y0 - 1); cand_up_left = MVF(x_left_pu, y_top_pu).pred_flag == PF_INTRA; } if (cand_up == 1 && on_pu_edge_y) { int x_top_pu = PU(x0); int y_top_pu = PU(y0 - 1); int max = FFMIN(size_in_luma_pu_h, s->ps.sps->min_pu_width - x_top_pu); cand_up = 0; for (i = 0; i < max; i += 2) cand_up |= (MVF(x_top_pu + i, y_top_pu).pred_flag == PF_INTRA); } if (cand_up_right == 1 && on_pu_edge_y) { int y_top_pu = PU(y0 - 1); int x_right_pu = PU(x0 + size_in_luma_h); int max = FFMIN(size_in_luma_pu_h, s->ps.sps->min_pu_width - x_right_pu); cand_up_right = 0; for (i = 0; i < max; i += 2) cand_up_right |= (MVF(x_right_pu + i, y_top_pu).pred_flag == PF_INTRA); } memset(left, 128, 2 * MAX_TB_SIZE*sizeof(pixel)); memset(top , 128, 2 * MAX_TB_SIZE*sizeof(pixel)); top[-1] = 128; } if (cand_up_left) { left[-1] = POS(-1, -1); top[-1] = left[-1]; } if (cand_up) memcpy(top, src - stride, size * sizeof(pixel)); if (cand_up_right) { memcpy(top + size, src - stride + size, size * sizeof(pixel)); EXTEND(top + size + top_right_size, POS(size + top_right_size - 1, -1), size - top_right_size); } if (cand_left) for (i = 0; i < size; i++) left[i] = POS(-1, i); if (cand_bottom_left) { for (i = size; i < size + bottom_left_size; i++) left[i] = POS(-1, i); EXTEND(left + size + bottom_left_size, POS(-1, size + bottom_left_size - 1), size - bottom_left_size); } if (s->ps.pps->constrained_intra_pred_flag == 1) { if (cand_bottom_left || cand_left || cand_up_left || cand_up || cand_up_right) { int size_max_x = x0 + ((2 * size) << hshift) < s->ps.sps->width ? 2 * size : (s->ps.sps->width - x0) >> hshift; int size_max_y = y0 + ((2 * size) << vshift) < s->ps.sps->height ? 2 * size : (s->ps.sps->height - y0) >> vshift; int j = size + (cand_bottom_left? bottom_left_size: 0) -1; if (!cand_up_right) { size_max_x = x0 + ((size) << hshift) < s->ps.sps->width ? size : (s->ps.sps->width - x0) >> hshift; } if (!cand_bottom_left) { size_max_y = y0 + (( size) << vshift) < s->ps.sps->height ? size : (s->ps.sps->height - y0) >> vshift; } if (cand_bottom_left || cand_left || cand_up_left) { while (j > -1 && !IS_INTRA(-1, j)) j--; if (!IS_INTRA(-1, j)) { j = 0; while (j < size_max_x && !IS_INTRA(j, -1)) j++; EXTEND_LEFT_CIP(top, j, j + 1); left[-1] = top[-1]; } } else { j = 0; while (j < size_max_x && !IS_INTRA(j, -1)) j++; if (j > 0) if (x0 > 0) { EXTEND_LEFT_CIP(top, j, j + 1); } else { EXTEND_LEFT_CIP(top, j, j); top[-1] = top[0]; } left[-1] = top[-1]; } left[-1] = top[-1]; if (cand_bottom_left || cand_left) { a = PIXEL_SPLAT_X4(left[-1]); EXTEND_DOWN_CIP(left, 0, size_max_y); } if (!cand_left) EXTEND(left, left[-1], size); if (!cand_bottom_left) EXTEND(left + size, left[size - 1], size); if (x0 != 0 && y0 != 0) { a = PIXEL_SPLAT_X4(left[size_max_y - 1]); EXTEND_UP_CIP(left, size_max_y - 1, size_max_y); if (!IS_INTRA(-1, - 1)) left[-1] = left[0]; } else if (x0 == 0) { EXTEND(left, 0, size_max_y); } else { a = PIXEL_SPLAT_X4(left[size_max_y - 1]); EXTEND_UP_CIP(left, size_max_y - 1, size_max_y); } top[-1] = left[-1]; if (y0 != 0) { a = PIXEL_SPLAT_X4(left[-1]); EXTEND_RIGHT_CIP(top, 0, size_max_x); } } } // Infer the unavailable samples if (!cand_bottom_left) { if (cand_left) { EXTEND(left + size, left[size - 1], size); } else if (cand_up_left) { EXTEND(left, left[-1], 2 * size); cand_left = 1; } else if (cand_up) { left[-1] = top[0]; EXTEND(left, left[-1], 2 * size); cand_up_left = 1; cand_left = 1; } else if (cand_up_right) { EXTEND(top, top[size], size); left[-1] = top[size]; EXTEND(left, left[-1], 2 * size); cand_up = 1; cand_up_left = 1; cand_left = 1; } else { // No samples available left[-1] = (1 << (BIT_DEPTH - 1)); EXTEND(top, left[-1], 2 * size); EXTEND(left, left[-1], 2 * size); } } if (!cand_left) EXTEND(left, left[size], size); if (!cand_up_left) { left[-1] = left[0]; } if (!cand_up) EXTEND(top, left[-1], size); if (!cand_up_right) EXTEND(top + size, top[size - 1], size); top[-1] = left[-1]; // Filtering process if (!s->ps.sps->intra_smoothing_disabled_flag && (c_idx == 0 || s->ps.sps->chroma_format_idc == 3)) { if (mode != INTRA_DC && size != 4){ int intra_hor_ver_dist_thresh[] = { 7, 1, 0 }; int min_dist_vert_hor = FFMIN(FFABS((int)(mode - 26U)), FFABS((int)(mode - 10U))); if (min_dist_vert_hor > intra_hor_ver_dist_thresh[log2_size - 3]) { int threshold = 1 << (BIT_DEPTH - 5); if (s->ps.sps->sps_strong_intra_smoothing_enable_flag && c_idx == 0 && log2_size == 5 && FFABS(top[-1] + top[63] - 2 * top[31]) < threshold && FFABS(left[-1] + left[63] - 2 * left[31]) < threshold) { // We can't just overwrite values in top because it could be // a pointer into src filtered_top[-1] = top[-1]; filtered_top[63] = top[63]; for (i = 0; i < 63; i++) filtered_top[i] = ((64 - (i + 1)) * top[-1] + (i + 1) * top[63] + 32) >> 6; for (i = 0; i < 63; i++) left[i] = ((64 - (i + 1)) * left[-1] + (i + 1) * left[63] + 32) >> 6; top = filtered_top; } else { filtered_left[2 * size - 1] = left[2 * size - 1]; filtered_top[2 * size - 1] = top[2 * size - 1]; for (i = 2 * size - 2; i >= 0; i--) filtered_left[i] = (left[i + 1] + 2 * left[i] + left[i - 1] + 2) >> 2; filtered_top[-1] = filtered_left[-1] = (left[0] + 2 * left[-1] + top[0] + 2) >> 2; for (i = 2 * size - 2; i >= 0; i--) filtered_top[i] = (top[i + 1] + 2 * top[i] + top[i - 1] + 2) >> 2; left = filtered_left; top = filtered_top; } } } } switch (mode) { case INTRA_PLANAR: s->hpc.pred_planar[log2_size - 2]((uint8_t *)src, (uint8_t *)top, (uint8_t *)left, stride); break; case INTRA_DC: s->hpc.pred_dc((uint8_t *)src, (uint8_t *)top, (uint8_t *)left, stride, log2_size, c_idx); break; default: s->hpc.pred_angular[log2_size - 2]((uint8_t *)src, (uint8_t *)top, (uint8_t *)left, stride, c_idx, mode); break; } }"} {"target": 0, "idx": 7649, "func": "static void end_last_frame(AVFilterContext *ctx) { TileContext *tile = ctx->priv; AVFilterLink *outlink = ctx->outputs[0]; AVFilterBufferRef *out_buf = outlink->out_buf; outlink->out_buf = NULL; ff_start_frame(outlink, out_buf); while (tile->current < tile->nb_frames) draw_blank_frame(ctx, out_buf); ff_draw_slice(outlink, 0, out_buf->video->h, 1); ff_end_frame(outlink); tile->current = 0; }"} {"target": 0, "idx": 7651, "func": "void av_register_output_format(AVOutputFormat *format) { AVOutputFormat **p = &first_oformat; while (*p != NULL) p = &(*p)->next; *p = format; format->next = NULL; }"} {"target": 1, "idx": 7654, "func": "static void decode_32Bit_opc(CPUTriCoreState *env, DisasContext *ctx) { int op1; int32_t r1, r2, r3; int32_t address, const16; int8_t b, const4; int32_t bpos; TCGv temp, temp2, temp3; op1 = MASK_OP_MAJOR(ctx->opcode); /* handle JNZ.T opcode only being 7 bit long */ if (unlikely((op1 & 0x7f) == OPCM_32_BRN_JTT)) { op1 = OPCM_32_BRN_JTT; } switch (op1) { /* ABS-format */ case OPCM_32_ABS_LDW: decode_abs_ldw(env, ctx); case OPCM_32_ABS_LDB: decode_abs_ldb(env, ctx); case OPCM_32_ABS_LDMST_SWAP: decode_abs_ldst_swap(env, ctx); case OPCM_32_ABS_LDST_CONTEXT: decode_abs_ldst_context(env, ctx); case OPCM_32_ABS_STORE: decode_abs_store(env, ctx); case OPCM_32_ABS_STOREB_H: decode_abs_storeb_h(env, ctx); case OPC1_32_ABS_STOREQ: address = MASK_OP_ABS_OFF18(ctx->opcode); r1 = MASK_OP_ABS_S1D(ctx->opcode); temp = tcg_const_i32(EA_ABS_FORMAT(address)); temp2 = tcg_temp_new(); tcg_gen_shri_tl(temp2, cpu_gpr_d[r1], 16); tcg_gen_qemu_st_tl(temp2, temp, ctx->mem_idx, MO_LEUW); tcg_temp_free(temp2); tcg_temp_free(temp); case OPC1_32_ABS_LD_Q: address = MASK_OP_ABS_OFF18(ctx->opcode); r1 = MASK_OP_ABS_S1D(ctx->opcode); temp = tcg_const_i32(EA_ABS_FORMAT(address)); tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LEUW); tcg_gen_shli_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 16); tcg_temp_free(temp); case OPC1_32_ABS_LEA: address = MASK_OP_ABS_OFF18(ctx->opcode); r1 = MASK_OP_ABS_S1D(ctx->opcode); tcg_gen_movi_tl(cpu_gpr_a[r1], EA_ABS_FORMAT(address)); /* ABSB-format */ case OPC1_32_ABSB_ST_T: address = MASK_OP_ABS_OFF18(ctx->opcode); b = MASK_OP_ABSB_B(ctx->opcode); bpos = MASK_OP_ABSB_BPOS(ctx->opcode); temp = tcg_const_i32(EA_ABS_FORMAT(address)); temp2 = tcg_temp_new(); tcg_gen_qemu_ld_tl(temp2, temp, ctx->mem_idx, MO_UB); tcg_gen_andi_tl(temp2, temp2, ~(0x1u << bpos)); tcg_gen_ori_tl(temp2, temp2, (b << bpos)); tcg_gen_qemu_st_tl(temp2, temp, ctx->mem_idx, MO_UB); tcg_temp_free(temp); tcg_temp_free(temp2); /* B-format */ case OPC1_32_B_CALL: case OPC1_32_B_CALLA: case OPC1_32_B_J: case OPC1_32_B_JA: case OPC1_32_B_JL: case OPC1_32_B_JLA: address = MASK_OP_B_DISP24(ctx->opcode); gen_compute_branch(ctx, op1, 0, 0, 0, address); /* Bit-format */ case OPCM_32_BIT_ANDACC: decode_bit_andacc(env, ctx); case OPCM_32_BIT_LOGICAL_T1: decode_bit_logical_t(env, ctx); case OPCM_32_BIT_INSERT: decode_bit_insert(env, ctx); case OPCM_32_BIT_LOGICAL_T2: decode_bit_logical_t2(env, ctx); case OPCM_32_BIT_ORAND: decode_bit_orand(env, ctx); case OPCM_32_BIT_SH_LOGIC1: decode_bit_sh_logic1(env, ctx); case OPCM_32_BIT_SH_LOGIC2: decode_bit_sh_logic2(env, ctx); /* BO Format */ case OPCM_32_BO_ADDRMODE_POST_PRE_BASE: decode_bo_addrmode_post_pre_base(env, ctx); case OPCM_32_BO_ADDRMODE_BITREVERSE_CIRCULAR: decode_bo_addrmode_bitreverse_circular(env, ctx); case OPCM_32_BO_ADDRMODE_LD_POST_PRE_BASE: decode_bo_addrmode_ld_post_pre_base(env, ctx); case OPCM_32_BO_ADDRMODE_LD_BITREVERSE_CIRCULAR: decode_bo_addrmode_ld_bitreverse_circular(env, ctx); case OPCM_32_BO_ADDRMODE_STCTX_POST_PRE_BASE: decode_bo_addrmode_stctx_post_pre_base(env, ctx); case OPCM_32_BO_ADDRMODE_LDMST_BITREVERSE_CIRCULAR: decode_bo_addrmode_ldmst_bitreverse_circular(env, ctx); /* BOL-format */ case OPC1_32_BOL_LD_A_LONGOFF: case OPC1_32_BOL_LD_W_LONGOFF: case OPC1_32_BOL_LEA_LONGOFF: case OPC1_32_BOL_ST_W_LONGOFF: case OPC1_32_BOL_ST_A_LONGOFF: decode_bol_opc(env, ctx, op1); /* BRC Format */ case OPCM_32_BRC_EQ_NEQ: case OPCM_32_BRC_GE: case OPCM_32_BRC_JLT: case OPCM_32_BRC_JNE: const4 = MASK_OP_BRC_CONST4_SEXT(ctx->opcode); address = MASK_OP_BRC_DISP15_SEXT(ctx->opcode); r1 = MASK_OP_BRC_S1(ctx->opcode); gen_compute_branch(ctx, op1, r1, 0, const4, address); /* BRN Format */ case OPCM_32_BRN_JTT: address = MASK_OP_BRN_DISP15_SEXT(ctx->opcode); r1 = MASK_OP_BRN_S1(ctx->opcode); gen_compute_branch(ctx, op1, r1, 0, 0, address); /* BRR Format */ case OPCM_32_BRR_EQ_NEQ: case OPCM_32_BRR_ADDR_EQ_NEQ: case OPCM_32_BRR_GE: case OPCM_32_BRR_JLT: case OPCM_32_BRR_JNE: case OPCM_32_BRR_JNZ: case OPCM_32_BRR_LOOP: address = MASK_OP_BRR_DISP15_SEXT(ctx->opcode); r2 = MASK_OP_BRR_S2(ctx->opcode); r1 = MASK_OP_BRR_S1(ctx->opcode); gen_compute_branch(ctx, op1, r1, r2, 0, address); /* RC Format */ case OPCM_32_RC_LOGICAL_SHIFT: decode_rc_logical_shift(env, ctx); case OPCM_32_RC_ACCUMULATOR: decode_rc_accumulator(env, ctx); case OPCM_32_RC_SERVICEROUTINE: decode_rc_serviceroutine(env, ctx); case OPCM_32_RC_MUL: decode_rc_mul(env, ctx); /* RCPW Format */ case OPCM_32_RCPW_MASK_INSERT: decode_rcpw_insert(env, ctx); /* RCRR Format */ case OPC1_32_RCRR_INSERT: r1 = MASK_OP_RCRR_S1(ctx->opcode); r2 = MASK_OP_RCRR_S3(ctx->opcode); r3 = MASK_OP_RCRR_D(ctx->opcode); const16 = MASK_OP_RCRR_CONST4(ctx->opcode); temp = tcg_const_i32(const16); temp2 = tcg_temp_new(); /* width*/ temp3 = tcg_temp_new(); /* pos */ tcg_gen_andi_tl(temp2, cpu_gpr_d[r3+1], 0x1f); tcg_gen_andi_tl(temp3, cpu_gpr_d[r3], 0x1f); gen_insert(cpu_gpr_d[r2], cpu_gpr_d[r1], temp, temp2, temp3); tcg_temp_free(temp); tcg_temp_free(temp2); tcg_temp_free(temp3); /* RCRW Format */ case OPCM_32_RCRW_MASK_INSERT: decode_rcrw_insert(env, ctx); /* RCR Format */ case OPCM_32_RCR_COND_SELECT: decode_rcr_cond_select(env, ctx); case OPCM_32_RCR_MADD: decode_rcr_madd(env, ctx); case OPCM_32_RCR_MSUB: decode_rcr_msub(env, ctx); /* RLC Format */ case OPC1_32_RLC_ADDI: case OPC1_32_RLC_ADDIH: case OPC1_32_RLC_ADDIH_A: case OPC1_32_RLC_MFCR: case OPC1_32_RLC_MOV: case OPC1_32_RLC_MOV_64: case OPC1_32_RLC_MOV_U: case OPC1_32_RLC_MOV_H: case OPC1_32_RLC_MOVH_A: case OPC1_32_RLC_MTCR: decode_rlc_opc(env, ctx, op1); } }"} {"target": 0, "idx": 7659, "func": "AVFilterBufferRef *avfilter_get_video_buffer_ref_from_frame(const AVFrame *frame, int perms) { AVFilterBufferRef *picref = avfilter_get_video_buffer_ref_from_arrays(frame->data, frame->linesize, perms, frame->width, frame->height, frame->format); if (!picref) return NULL; avfilter_copy_frame_props(picref, frame); return picref; }"} {"target": 1, "idx": 7662, "func": "void *g_realloc(void *ptr, size_t size) { size_t old_size, copy; void *new_ptr; if (!ptr) return g_malloc(size); old_size = *(size_t *)((char *)ptr - 16); copy = old_size < size ? old_size : size; new_ptr = g_malloc(size); memcpy(new_ptr, ptr, copy); g_free(ptr); return new_ptr; }"} {"target": 1, "idx": 7666, "func": "static OutputStream *new_audio_stream(OptionsContext *o, AVFormatContext *oc) { AVStream *st; OutputStream *ost; AVCodecContext *audio_enc; ost = new_output_stream(o, oc, AVMEDIA_TYPE_AUDIO); st = ost->st; audio_enc = st->codec; audio_enc->codec_type = AVMEDIA_TYPE_AUDIO; if (!ost->stream_copy) { char *sample_fmt = NULL; MATCH_PER_STREAM_OPT(audio_channels, i, audio_enc->channels, oc, st); MATCH_PER_STREAM_OPT(sample_fmts, str, sample_fmt, oc, st); if (sample_fmt && (audio_enc->sample_fmt = av_get_sample_fmt(sample_fmt)) == AV_SAMPLE_FMT_NONE) { av_log(NULL, AV_LOG_FATAL, \"Invalid sample format '%s'\\n\", sample_fmt); exit_program(1); } MATCH_PER_STREAM_OPT(audio_sample_rate, i, audio_enc->sample_rate, oc, st); } return ost; }"} {"target": 1, "idx": 7670, "func": "static int pcx_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; AVFrame *const p = data; int compressed, xmin, ymin, xmax, ymax; unsigned int w, h, bits_per_pixel, bytes_per_line, nplanes, stride, y, x, bytes_per_scanline; uint8_t *ptr; const uint8_t *buf_end = buf + buf_size; const uint8_t *bufstart = buf; uint8_t *scanline; int ret = -1; if (buf[0] != 0x0a || buf[1] > 5) { av_log(avctx, AV_LOG_ERROR, \"this is not PCX encoded data\\n\"); compressed = buf[2]; xmin = AV_RL16(buf + 4); ymin = AV_RL16(buf + 6); xmax = AV_RL16(buf + 8); ymax = AV_RL16(buf + 10); if (xmax < xmin || ymax < ymin) { av_log(avctx, AV_LOG_ERROR, \"invalid image dimensions\\n\"); w = xmax - xmin + 1; h = ymax - ymin + 1; bits_per_pixel = buf[3]; bytes_per_line = AV_RL16(buf + 66); nplanes = buf[65]; bytes_per_scanline = nplanes * bytes_per_line; if (bytes_per_scanline < (w * bits_per_pixel * nplanes + 7) / 8 || (!compressed && bytes_per_scanline > buf_size / h)) { av_log(avctx, AV_LOG_ERROR, \"PCX data is corrupted\\n\"); switch ((nplanes << 8) + bits_per_pixel) { case 0x0308: avctx->pix_fmt = AV_PIX_FMT_RGB24; break; case 0x0108: case 0x0104: case 0x0102: case 0x0101: case 0x0401: case 0x0301: case 0x0201: avctx->pix_fmt = AV_PIX_FMT_PAL8; break; default: av_log(avctx, AV_LOG_ERROR, \"invalid PCX file\\n\"); buf += 128; if ((ret = ff_set_dimensions(avctx, w, h)) < 0) return ret; if ((ret = ff_get_buffer(avctx, p, 0)) < 0) { av_log(avctx, AV_LOG_ERROR, \"get_buffer() failed\\n\"); return ret; p->pict_type = AV_PICTURE_TYPE_I; ptr = p->data[0]; stride = p->linesize[0]; scanline = av_malloc(bytes_per_scanline + AV_INPUT_BUFFER_PADDING_SIZE); if (!scanline) return AVERROR(ENOMEM); if (nplanes == 3 && bits_per_pixel == 8) { for (y = 0; y < h; y++) { buf = pcx_rle_decode(buf, buf_end, scanline, bytes_per_scanline, compressed); for (x = 0; x < w; x++) { ptr[3 * x] = scanline[x]; ptr[3 * x + 1] = scanline[x + bytes_per_line]; ptr[3 * x + 2] = scanline[x + (bytes_per_line << 1)]; ptr += stride; } else if (nplanes == 1 && bits_per_pixel == 8) { const uint8_t *palstart = bufstart + buf_size - 769; if (buf_size < 769) { av_log(avctx, AV_LOG_ERROR, \"File is too short\\n\"); ret = avctx->err_recognition & AV_EF_EXPLODE ? AVERROR_INVALIDDATA : buf_size; goto end; for (y = 0; y < h; y++, ptr += stride) { buf = pcx_rle_decode(buf, buf_end, scanline, bytes_per_scanline, compressed); memcpy(ptr, scanline, w); if (buf != palstart) { av_log(avctx, AV_LOG_WARNING, \"image data possibly corrupted\\n\"); buf = palstart; if (*buf++ != 12) { av_log(avctx, AV_LOG_ERROR, \"expected palette after image data\\n\"); ret = avctx->err_recognition & AV_EF_EXPLODE ? AVERROR_INVALIDDATA : buf_size; goto end; } else if (nplanes == 1) { /* all packed formats, max. 16 colors */ GetBitContext s; for (y = 0; y < h; y++) { init_get_bits(&s, scanline, bytes_per_scanline << 3); buf = pcx_rle_decode(buf, buf_end, scanline, bytes_per_scanline, compressed); for (x = 0; x < w; x++) ptr[x] = get_bits(&s, bits_per_pixel); ptr += stride; } else { /* planar, 4, 8 or 16 colors */ int i; for (y = 0; y < h; y++) { buf = pcx_rle_decode(buf, buf_end, scanline, bytes_per_scanline, compressed); for (x = 0; x < w; x++) { int m = 0x80 >> (x & 7), v = 0; for (i = nplanes - 1; i >= 0; i--) { v <<= 1; v += !!(scanline[i * bytes_per_line + (x >> 3)] & m); ptr[x] = v; ptr += stride; if (nplanes == 1 && bits_per_pixel == 8) { pcx_palette(&buf, (uint32_t *)p->data[1], 256); } else if (bits_per_pixel < 8) { const uint8_t *palette = bufstart + 16; pcx_palette(&palette, (uint32_t *)p->data[1], 16); *got_frame = 1; ret = buf - bufstart; end: av_free(scanline); return ret;"} {"target": 1, "idx": 7696, "func": "static void ioport_write(void *opaque, uint32_t addr, uint32_t val) { PCIQXLDevice *d = opaque; uint32_t io_port = addr - d->io_base; switch (io_port) { case QXL_IO_RESET: case QXL_IO_SET_MODE: case QXL_IO_MEMSLOT_ADD: case QXL_IO_MEMSLOT_DEL: case QXL_IO_CREATE_PRIMARY: break; default: if (d->mode == QXL_MODE_NATIVE || d->mode == QXL_MODE_COMPAT) break; dprint(d, 1, \"%s: unexpected port 0x%x in vga mode\\n\", __FUNCTION__, io_port); return; } switch (io_port) { case QXL_IO_UPDATE_AREA: { QXLRect update = d->ram->update_area; qemu_mutex_unlock_iothread(); d->ssd.worker->update_area(d->ssd.worker, d->ram->update_surface, &update, NULL, 0, 0); qemu_mutex_lock_iothread(); break; } case QXL_IO_NOTIFY_CMD: d->ssd.worker->wakeup(d->ssd.worker); break; case QXL_IO_NOTIFY_CURSOR: d->ssd.worker->wakeup(d->ssd.worker); break; case QXL_IO_UPDATE_IRQ: qxl_set_irq(d); break; case QXL_IO_NOTIFY_OOM: if (!SPICE_RING_IS_EMPTY(&d->ram->release_ring)) { break; } pthread_yield(); if (!SPICE_RING_IS_EMPTY(&d->ram->release_ring)) { break; } d->oom_running = 1; d->ssd.worker->oom(d->ssd.worker); d->oom_running = 0; break; case QXL_IO_SET_MODE: dprint(d, 1, \"QXL_SET_MODE %d\\n\", val); qxl_set_mode(d, val, 0); break; case QXL_IO_LOG: if (d->guestdebug) { fprintf(stderr, \"qxl/guest: %s\", d->ram->log_buf); } break; case QXL_IO_RESET: dprint(d, 1, \"QXL_IO_RESET\\n\"); qxl_hard_reset(d, 0); break; case QXL_IO_MEMSLOT_ADD: PANIC_ON(val >= NUM_MEMSLOTS); PANIC_ON(d->guest_slots[val].active); d->guest_slots[val].slot = d->ram->mem_slot; qxl_add_memslot(d, val, 0); break; case QXL_IO_MEMSLOT_DEL: qxl_del_memslot(d, val); break; case QXL_IO_CREATE_PRIMARY: PANIC_ON(val != 0); dprint(d, 1, \"QXL_IO_CREATE_PRIMARY\\n\"); d->guest_primary.surface = d->ram->create_surface; qxl_create_guest_primary(d, 0); break; case QXL_IO_DESTROY_PRIMARY: PANIC_ON(val != 0); dprint(d, 1, \"QXL_IO_DESTROY_PRIMARY\\n\"); qxl_destroy_primary(d); break; case QXL_IO_DESTROY_SURFACE_WAIT: d->ssd.worker->destroy_surface_wait(d->ssd.worker, val); break; case QXL_IO_DESTROY_ALL_SURFACES: d->ssd.worker->destroy_surfaces(d->ssd.worker); break; default: fprintf(stderr, \"%s: ioport=0x%x, abort()\\n\", __FUNCTION__, io_port); abort(); } }"} {"target": 0, "idx": 7716, "func": "static int h264_slice_header_parse(H264Context *h, H264SliceContext *sl) { const SPS *sps; const PPS *pps; unsigned int first_mb_in_slice; unsigned int pps_id; int ret; unsigned int slice_type, tmp, i; int last_pic_structure, last_pic_droppable; int needs_reinit = 0; int field_pic_flag, bottom_field_flag; int frame_num, droppable, picture_structure; int mb_aff_frame = 0; first_mb_in_slice = get_ue_golomb(&sl->gb); if (first_mb_in_slice == 0) { // FIXME better field boundary detection if (h->current_slice && h->cur_pic_ptr && FIELD_PICTURE(h)) { ff_h264_field_end(h, sl, 1); } h->current_slice = 0; if (!h->first_field) { if (h->cur_pic_ptr && !h->droppable) { ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, h->picture_structure == PICT_BOTTOM_FIELD); } h->cur_pic_ptr = NULL; } } slice_type = get_ue_golomb_31(&sl->gb); if (slice_type > 9) { av_log(h->avctx, AV_LOG_ERROR, \"slice type %d too large at %d\\n\", slice_type, first_mb_in_slice); return AVERROR_INVALIDDATA; } if (slice_type > 4) { slice_type -= 5; sl->slice_type_fixed = 1; } else sl->slice_type_fixed = 0; slice_type = ff_h264_golomb_to_pict_type[slice_type]; sl->slice_type = slice_type; sl->slice_type_nos = slice_type & 3; if (h->nal_unit_type == NAL_IDR_SLICE && sl->slice_type_nos != AV_PICTURE_TYPE_I) { av_log(h->avctx, AV_LOG_ERROR, \"A non-intra slice in an IDR NAL unit.\\n\"); return AVERROR_INVALIDDATA; } pps_id = get_ue_golomb(&sl->gb); if (pps_id >= MAX_PPS_COUNT) { av_log(h->avctx, AV_LOG_ERROR, \"pps_id %u out of range\\n\", pps_id); return AVERROR_INVALIDDATA; } if (!h->ps.pps_list[pps_id]) { av_log(h->avctx, AV_LOG_ERROR, \"non-existing PPS %u referenced\\n\", pps_id); return AVERROR_INVALIDDATA; } if (!h->setup_finished) { h->ps.pps = (const PPS*)h->ps.pps_list[pps_id]->data; } else if (h->ps.pps != (const PPS*)h->ps.pps_list[pps_id]->data) { av_log(h->avctx, AV_LOG_ERROR, \"PPS changed between slices\\n\"); return AVERROR_INVALIDDATA; } if (!h->ps.sps_list[h->ps.pps->sps_id]) { av_log(h->avctx, AV_LOG_ERROR, \"non-existing SPS %u referenced\\n\", h->ps.pps->sps_id); return AVERROR_INVALIDDATA; } if (h->ps.sps != (const SPS*)h->ps.sps_list[h->ps.pps->sps_id]->data) { h->ps.sps = (SPS*)h->ps.sps_list[h->ps.pps->sps_id]->data; if (h->bit_depth_luma != h->ps.sps->bit_depth_luma || h->chroma_format_idc != h->ps.sps->chroma_format_idc) needs_reinit = 1; } pps = h->ps.pps; sps = h->ps.sps; if (!h->setup_finished) { h->avctx->profile = ff_h264_get_profile(sps); h->avctx->level = sps->level_idc; h->avctx->refs = sps->ref_frame_count; if (h->mb_width != sps->mb_width || h->mb_height != sps->mb_height * (2 - sps->frame_mbs_only_flag)) needs_reinit = 1; h->mb_width = sps->mb_width; h->mb_height = sps->mb_height * (2 - sps->frame_mbs_only_flag); h->mb_num = h->mb_width * h->mb_height; h->mb_stride = h->mb_width + 1; h->b_stride = h->mb_width * 4; h->chroma_y_shift = sps->chroma_format_idc <= 1; // 400 uses yuv420p h->width = 16 * h->mb_width; h->height = 16 * h->mb_height; ret = init_dimensions(h); if (ret < 0) return ret; if (sps->video_signal_type_present_flag) { h->avctx->color_range = sps->full_range ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG; if (sps->colour_description_present_flag) { if (h->avctx->colorspace != sps->colorspace) needs_reinit = 1; h->avctx->color_primaries = sps->color_primaries; h->avctx->color_trc = sps->color_trc; h->avctx->colorspace = sps->colorspace; } } } if (h->context_initialized && needs_reinit) { h->context_initialized = 0; if (sl != h->slice_ctx) { av_log(h->avctx, AV_LOG_ERROR, \"changing width %d -> %d / height %d -> %d on \" \"slice %d\\n\", h->width, h->avctx->coded_width, h->height, h->avctx->coded_height, h->current_slice + 1); return AVERROR_INVALIDDATA; } ff_h264_flush_change(h); if ((ret = get_pixel_format(h)) < 0) return ret; h->avctx->pix_fmt = ret; av_log(h->avctx, AV_LOG_INFO, \"Reinit context to %dx%d, \" \"pix_fmt: %d\\n\", h->width, h->height, h->avctx->pix_fmt); if ((ret = h264_slice_header_init(h)) < 0) { av_log(h->avctx, AV_LOG_ERROR, \"h264_slice_header_init() failed\\n\"); return ret; } } if (!h->context_initialized) { if (sl != h->slice_ctx) { av_log(h->avctx, AV_LOG_ERROR, \"Cannot (re-)initialize context during parallel decoding.\\n\"); return AVERROR_PATCHWELCOME; } if ((ret = get_pixel_format(h)) < 0) return ret; h->avctx->pix_fmt = ret; if ((ret = h264_slice_header_init(h)) < 0) { av_log(h->avctx, AV_LOG_ERROR, \"h264_slice_header_init() failed\\n\"); return ret; } } frame_num = get_bits(&sl->gb, sps->log2_max_frame_num); if (!h->setup_finished) h->poc.frame_num = frame_num; sl->mb_mbaff = 0; last_pic_structure = h->picture_structure; last_pic_droppable = h->droppable; droppable = h->nal_ref_idc == 0; if (sps->frame_mbs_only_flag) { picture_structure = PICT_FRAME; } else { field_pic_flag = get_bits1(&sl->gb); if (field_pic_flag) { bottom_field_flag = get_bits1(&sl->gb); picture_structure = PICT_TOP_FIELD + bottom_field_flag; } else { picture_structure = PICT_FRAME; mb_aff_frame = sps->mb_aff; } } if (!h->setup_finished) { h->droppable = droppable; h->picture_structure = picture_structure; h->mb_aff_frame = mb_aff_frame; } sl->mb_field_decoding_flag = h->picture_structure != PICT_FRAME; if (h->current_slice != 0) { if (last_pic_structure != picture_structure || last_pic_droppable != droppable) { av_log(h->avctx, AV_LOG_ERROR, \"Changing field mode (%d -> %d) between slices is not allowed\\n\", last_pic_structure, h->picture_structure); return AVERROR_INVALIDDATA; } else if (!h->cur_pic_ptr) { av_log(h->avctx, AV_LOG_ERROR, \"unset cur_pic_ptr on slice %d\\n\", h->current_slice + 1); return AVERROR_INVALIDDATA; } } else { /* Shorten frame num gaps so we don't have to allocate reference * frames just to throw them away */ if (h->poc.frame_num != h->poc.prev_frame_num) { int unwrap_prev_frame_num = h->poc.prev_frame_num; int max_frame_num = 1 << sps->log2_max_frame_num; if (unwrap_prev_frame_num > h->poc.frame_num) unwrap_prev_frame_num -= max_frame_num; if ((h->poc.frame_num - unwrap_prev_frame_num) > sps->ref_frame_count) { unwrap_prev_frame_num = (h->poc.frame_num - sps->ref_frame_count) - 1; if (unwrap_prev_frame_num < 0) unwrap_prev_frame_num += max_frame_num; h->poc.prev_frame_num = unwrap_prev_frame_num; } } /* See if we have a decoded first field looking for a pair... * Here, we're using that to see if we should mark previously * decode frames as \"finished\". * We have to do that before the \"dummy\" in-between frame allocation, * since that can modify s->current_picture_ptr. */ if (h->first_field) { assert(h->cur_pic_ptr); assert(h->cur_pic_ptr->f->buf[0]); assert(h->cur_pic_ptr->reference != DELAYED_PIC_REF); /* figure out if we have a complementary field pair */ if (!FIELD_PICTURE(h) || h->picture_structure == last_pic_structure) { /* Previous field is unmatched. Don't display it, but let it * remain for reference if marked as such. */ if (!last_pic_droppable && last_pic_structure != PICT_FRAME) { ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, last_pic_structure == PICT_TOP_FIELD); } } else { if (h->cur_pic_ptr->frame_num != h->poc.frame_num) { /* This and previous field were reference, but had * different frame_nums. Consider this field first in * pair. Throw away previous field except for reference * purposes. */ if (!last_pic_droppable && last_pic_structure != PICT_FRAME) { ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, last_pic_structure == PICT_TOP_FIELD); } } else { /* Second field in complementary pair */ if (!((last_pic_structure == PICT_TOP_FIELD && h->picture_structure == PICT_BOTTOM_FIELD) || (last_pic_structure == PICT_BOTTOM_FIELD && h->picture_structure == PICT_TOP_FIELD))) { av_log(h->avctx, AV_LOG_ERROR, \"Invalid field mode combination %d/%d\\n\", last_pic_structure, h->picture_structure); h->picture_structure = last_pic_structure; h->droppable = last_pic_droppable; return AVERROR_INVALIDDATA; } else if (last_pic_droppable != h->droppable) { avpriv_request_sample(h->avctx, \"Found reference and non-reference fields in the same frame, which\"); h->picture_structure = last_pic_structure; h->droppable = last_pic_droppable; return AVERROR_PATCHWELCOME; } } } } while (h->poc.frame_num != h->poc.prev_frame_num && h->poc.frame_num != (h->poc.prev_frame_num + 1) % (1 << sps->log2_max_frame_num)) { H264Picture *prev = h->short_ref_count ? h->short_ref[0] : NULL; av_log(h->avctx, AV_LOG_DEBUG, \"Frame num gap %d %d\\n\", h->poc.frame_num, h->poc.prev_frame_num); ret = initialize_cur_frame(h); if (ret < 0) { h->first_field = 0; return ret; } h->poc.prev_frame_num++; h->poc.prev_frame_num %= 1 << sps->log2_max_frame_num; h->cur_pic_ptr->frame_num = h->poc.prev_frame_num; ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 0); ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 1); ret = ff_generate_sliding_window_mmcos(h, 1); if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE)) return ret; ret = ff_h264_execute_ref_pic_marking(h, h->mmco, h->mmco_index); if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE)) return ret; /* Error concealment: If a ref is missing, copy the previous ref * in its place. * FIXME: Avoiding a memcpy would be nice, but ref handling makes * many assumptions about there being no actual duplicates. * FIXME: This does not copy padding for out-of-frame motion * vectors. Given we are concealing a lost frame, this probably * is not noticeable by comparison, but it should be fixed. */ if (h->short_ref_count) { if (prev && h->short_ref[0]->f->width == prev->f->width && h->short_ref[0]->f->height == prev->f->height && h->short_ref[0]->f->format == prev->f->format) { av_image_copy(h->short_ref[0]->f->data, h->short_ref[0]->f->linesize, (const uint8_t **)prev->f->data, prev->f->linesize, prev->f->format, h->mb_width * 16, h->mb_height * 16); h->short_ref[0]->poc = prev->poc + 2; } h->short_ref[0]->frame_num = h->poc.prev_frame_num; } } /* See if we have a decoded first field looking for a pair... * We're using that to see whether to continue decoding in that * frame, or to allocate a new one. */ if (h->first_field) { assert(h->cur_pic_ptr); assert(h->cur_pic_ptr->f->buf[0]); assert(h->cur_pic_ptr->reference != DELAYED_PIC_REF); /* figure out if we have a complementary field pair */ if (!FIELD_PICTURE(h) || h->picture_structure == last_pic_structure) { /* Previous field is unmatched. Don't display it, but let it * remain for reference if marked as such. */ h->cur_pic_ptr = NULL; h->first_field = FIELD_PICTURE(h); } else { if (h->cur_pic_ptr->frame_num != h->poc.frame_num) { /* This and the previous field had different frame_nums. * Consider this field first in pair. Throw away previous * one except for reference purposes. */ h->first_field = 1; h->cur_pic_ptr = NULL; } else { /* Second field in complementary pair */ h->first_field = 0; } } } else { /* Frame or first field in a potentially complementary pair */ h->first_field = FIELD_PICTURE(h); } if (!FIELD_PICTURE(h) || h->first_field) { if (h264_frame_start(h) < 0) { h->first_field = 0; return AVERROR_INVALIDDATA; } } else { release_unused_pictures(h, 0); } } assert(h->mb_num == h->mb_width * h->mb_height); if (first_mb_in_slice << FIELD_OR_MBAFF_PICTURE(h) >= h->mb_num || first_mb_in_slice >= h->mb_num) { av_log(h->avctx, AV_LOG_ERROR, \"first_mb_in_slice overflow\\n\"); return AVERROR_INVALIDDATA; } sl->resync_mb_x = sl->mb_x = first_mb_in_slice % h->mb_width; sl->resync_mb_y = sl->mb_y = (first_mb_in_slice / h->mb_width) << FIELD_OR_MBAFF_PICTURE(h); if (h->picture_structure == PICT_BOTTOM_FIELD) sl->resync_mb_y = sl->mb_y = sl->mb_y + 1; assert(sl->mb_y < h->mb_height); if (h->picture_structure == PICT_FRAME) { h->curr_pic_num = h->poc.frame_num; h->max_pic_num = 1 << sps->log2_max_frame_num; } else { h->curr_pic_num = 2 * h->poc.frame_num + 1; h->max_pic_num = 1 << (sps->log2_max_frame_num + 1); } if (h->nal_unit_type == NAL_IDR_SLICE) get_ue_golomb(&sl->gb); /* idr_pic_id */ if (sps->poc_type == 0) { int poc_lsb = get_bits(&sl->gb, sps->log2_max_poc_lsb); if (!h->setup_finished) h->poc.poc_lsb = poc_lsb; if (pps->pic_order_present == 1 && h->picture_structure == PICT_FRAME) { int delta_poc_bottom = get_se_golomb(&sl->gb); if (!h->setup_finished) h->poc.delta_poc_bottom = delta_poc_bottom; } } if (sps->poc_type == 1 && !sps->delta_pic_order_always_zero_flag) { int delta_poc = get_se_golomb(&sl->gb); if (!h->setup_finished) h->poc.delta_poc[0] = delta_poc; if (pps->pic_order_present == 1 && h->picture_structure == PICT_FRAME) { delta_poc = get_se_golomb(&sl->gb); if (!h->setup_finished) h->poc.delta_poc[1] = delta_poc; } } if (!h->setup_finished) ff_h264_init_poc(h->cur_pic_ptr->field_poc, &h->cur_pic_ptr->poc, sps, &h->poc, h->picture_structure, h->nal_ref_idc); if (pps->redundant_pic_cnt_present) sl->redundant_pic_count = get_ue_golomb(&sl->gb); if (sl->slice_type_nos == AV_PICTURE_TYPE_B) sl->direct_spatial_mv_pred = get_bits1(&sl->gb); ret = ff_h264_parse_ref_count(&sl->list_count, sl->ref_count, &sl->gb, pps, sl->slice_type_nos, h->picture_structure); if (ret < 0) return ret; if (sl->slice_type_nos != AV_PICTURE_TYPE_I) { ret = ff_h264_decode_ref_pic_list_reordering(h, sl); if (ret < 0) { sl->ref_count[1] = sl->ref_count[0] = 0; return ret; } } sl->pwt.use_weight = 0; for (i = 0; i < 2; i++) { sl->pwt.luma_weight_flag[i] = 0; sl->pwt.chroma_weight_flag[i] = 0; } if ((pps->weighted_pred && sl->slice_type_nos == AV_PICTURE_TYPE_P) || (pps->weighted_bipred_idc == 1 && sl->slice_type_nos == AV_PICTURE_TYPE_B)) ff_h264_pred_weight_table(&sl->gb, sps, sl->ref_count, sl->slice_type_nos, &sl->pwt); // If frame-mt is enabled, only update mmco tables for the first slice // in a field. Subsequent slices can temporarily clobber h->mmco_index // or h->mmco, which will cause ref list mix-ups and decoding errors // further down the line. This may break decoding if the first slice is // corrupt, thus we only do this if frame-mt is enabled. if (h->nal_ref_idc) { ret = ff_h264_decode_ref_pic_marking(h, &sl->gb, !(h->avctx->active_thread_type & FF_THREAD_FRAME) || h->current_slice == 0); if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE)) return AVERROR_INVALIDDATA; } if (sl->slice_type_nos != AV_PICTURE_TYPE_I && pps->cabac) { tmp = get_ue_golomb_31(&sl->gb); if (tmp > 2) { av_log(h->avctx, AV_LOG_ERROR, \"cabac_init_idc %u overflow\\n\", tmp); return AVERROR_INVALIDDATA; } sl->cabac_init_idc = tmp; } sl->last_qscale_diff = 0; tmp = pps->init_qp + get_se_golomb(&sl->gb); if (tmp > 51 + 6 * (sps->bit_depth_luma - 8)) { av_log(h->avctx, AV_LOG_ERROR, \"QP %u out of range\\n\", tmp); return AVERROR_INVALIDDATA; } sl->qscale = tmp; sl->chroma_qp[0] = get_chroma_qp(h, 0, sl->qscale); sl->chroma_qp[1] = get_chroma_qp(h, 1, sl->qscale); // FIXME qscale / qp ... stuff if (sl->slice_type == AV_PICTURE_TYPE_SP) get_bits1(&sl->gb); /* sp_for_switch_flag */ if (sl->slice_type == AV_PICTURE_TYPE_SP || sl->slice_type == AV_PICTURE_TYPE_SI) get_se_golomb(&sl->gb); /* slice_qs_delta */ sl->deblocking_filter = 1; sl->slice_alpha_c0_offset = 0; sl->slice_beta_offset = 0; if (pps->deblocking_filter_parameters_present) { tmp = get_ue_golomb_31(&sl->gb); if (tmp > 2) { av_log(h->avctx, AV_LOG_ERROR, \"deblocking_filter_idc %u out of range\\n\", tmp); return AVERROR_INVALIDDATA; } sl->deblocking_filter = tmp; if (sl->deblocking_filter < 2) sl->deblocking_filter ^= 1; // 1<->0 if (sl->deblocking_filter) { sl->slice_alpha_c0_offset = get_se_golomb(&sl->gb) * 2; sl->slice_beta_offset = get_se_golomb(&sl->gb) * 2; if (sl->slice_alpha_c0_offset > 12 || sl->slice_alpha_c0_offset < -12 || sl->slice_beta_offset > 12 || sl->slice_beta_offset < -12) { av_log(h->avctx, AV_LOG_ERROR, \"deblocking filter parameters %d %d out of range\\n\", sl->slice_alpha_c0_offset, sl->slice_beta_offset); return AVERROR_INVALIDDATA; } } } return 0; }"} {"target": 0, "idx": 7729, "func": "int ffurl_shutdown(URLContext *h, int flags) { if (!h->prot->url_shutdown) return AVERROR(EINVAL); return h->prot->url_shutdown(h, flags); }"} {"target": 1, "idx": 7732, "func": "static void i82378_init(DeviceState *dev, I82378State *s) { ISABus *isabus = DO_UPCAST(ISABus, qbus, qdev_get_child_bus(dev, \"isa.0\")); ISADevice *pit; qemu_irq *out0_irq; /* This device has: 2 82C59 (irq) 1 82C54 (pit) 2 82C37 (dma) NMI Utility Bus Support Registers All devices accept byte access only, except timer */ qdev_init_gpio_out(dev, s->out, 2); qdev_init_gpio_in(dev, i82378_request_pic_irq, 16); /* Workaround the fact that i8259 is not qdev'ified... */ out0_irq = qemu_allocate_irqs(i82378_request_out0_irq, s, 1); /* 2 82C59 (irq) */ s->i8259 = i8259_init(isabus, *out0_irq); isa_bus_irqs(isabus, s->i8259); /* 1 82C54 (pit) */ pit = pit_init(isabus, 0x40, 0, NULL); /* speaker */ pcspk_init(isabus, pit); /* 2 82C37 (dma) */ DMA_init(1, &s->out[1]); isa_create_simple(isabus, \"i82374\"); /* timer */ isa_create_simple(isabus, \"mc146818rtc\"); }"} {"target": 0, "idx": 7749, "func": "static void realview_init(ram_addr_t ram_size, int vga_ram_size, const char *boot_device, DisplayState *ds, const char *kernel_filename, const char *kernel_cmdline, const char *initrd_filename, const char *cpu_model) { CPUState *env; qemu_irq *pic; void *scsi_hba; PCIBus *pci_bus; NICInfo *nd; int n; int done_smc = 0; qemu_irq cpu_irq[4]; int ncpu; int index; if (!cpu_model) cpu_model = \"arm926\"; /* FIXME: obey smp_cpus. */ if (strcmp(cpu_model, \"arm11mpcore\") == 0) { ncpu = 4; } else { ncpu = 1; } for (n = 0; n < ncpu; n++) { env = cpu_init(cpu_model); if (!env) { fprintf(stderr, \"Unable to find CPU definition\\n\"); exit(1); } pic = arm_pic_init_cpu(env); cpu_irq[n] = pic[ARM_PIC_CPU_IRQ]; if (n > 0) { /* Set entry point for secondary CPUs. This assumes we're using the init code from arm_boot.c. Real hardware resets all CPUs the same. */ env->regs[15] = 0x80000000; } } /* ??? RAM should repeat to fill physical memory space. */ /* SDRAM at address zero. */ cpu_register_physical_memory(0, ram_size, IO_MEM_RAM); arm_sysctl_init(0x10000000, 0xc1400400); if (ncpu == 1) { /* ??? The documentation says GIC1 is nFIQ and either GIC2 or GIC3 is nIRQ (there are inconsistencies). However Linux 2.6.17 expects GIC1 to be nIRQ and ignores all the others, so do that for now. */ pic = realview_gic_init(0x10040000, cpu_irq[0]); } else { pic = mpcore_irq_init(cpu_irq); } pl050_init(0x10006000, pic[20], 0); pl050_init(0x10007000, pic[21], 1); pl011_init(0x10009000, pic[12], serial_hds[0], PL011_ARM); pl011_init(0x1000a000, pic[13], serial_hds[1], PL011_ARM); pl011_init(0x1000b000, pic[14], serial_hds[2], PL011_ARM); pl011_init(0x1000c000, pic[15], serial_hds[3], PL011_ARM); /* DMA controller is optional, apparently. */ pl080_init(0x10030000, pic[24], 2); sp804_init(0x10011000, pic[4]); sp804_init(0x10012000, pic[5]); pl110_init(ds, 0x10020000, pic[23], 1); index = drive_get_index(IF_SD, 0, 0); if (index == -1) { fprintf(stderr, \"qemu: missing SecureDigital card\\n\"); exit(1); } pl181_init(0x10005000, drives_table[index].bdrv, pic[17], pic[18]); pl031_init(0x10017000, pic[10]); pci_bus = pci_vpb_init(pic, 48, 1); if (usb_enabled) { usb_ohci_init_pci(pci_bus, 3, -1); } if (drive_get_max_bus(IF_SCSI) > 0) { fprintf(stderr, \"qemu: too many SCSI bus\\n\"); exit(1); } scsi_hba = lsi_scsi_init(pci_bus, -1); for (n = 0; n < LSI_MAX_DEVS; n++) { index = drive_get_index(IF_SCSI, 0, n); if (index == -1) continue; lsi_scsi_attach(scsi_hba, drives_table[index].bdrv, n); } for(n = 0; n < nb_nics; n++) { nd = &nd_table[n]; if (!nd->model) nd->model = done_smc ? \"rtl8139\" : \"smc91c111\"; if (strcmp(nd->model, \"smc91c111\") == 0) { smc91c111_init(nd, 0x4e000000, pic[28]); } else { pci_nic_init(pci_bus, nd, -1); } } /* Memory map for RealView Emulation Baseboard: */ /* 0x10000000 System registers. */ /* 0x10001000 System controller. */ /* 0x10002000 Two-Wire Serial Bus. */ /* 0x10003000 Reserved. */ /* 0x10004000 AACI. */ /* 0x10005000 MCI. */ /* 0x10006000 KMI0. */ /* 0x10007000 KMI1. */ /* 0x10008000 Character LCD. */ /* 0x10009000 UART0. */ /* 0x1000a000 UART1. */ /* 0x1000b000 UART2. */ /* 0x1000c000 UART3. */ /* 0x1000d000 SSPI. */ /* 0x1000e000 SCI. */ /* 0x1000f000 Reserved. */ /* 0x10010000 Watchdog. */ /* 0x10011000 Timer 0+1. */ /* 0x10012000 Timer 2+3. */ /* 0x10013000 GPIO 0. */ /* 0x10014000 GPIO 1. */ /* 0x10015000 GPIO 2. */ /* 0x10016000 Reserved. */ /* 0x10017000 RTC. */ /* 0x10018000 DMC. */ /* 0x10019000 PCI controller config. */ /* 0x10020000 CLCD. */ /* 0x10030000 DMA Controller. */ /* 0x10040000 GIC1. */ /* 0x10050000 GIC2. */ /* 0x10060000 GIC3. */ /* 0x10070000 GIC4. */ /* 0x10080000 SMC. */ /* 0x40000000 NOR flash. */ /* 0x44000000 DoC flash. */ /* 0x48000000 SRAM. */ /* 0x4c000000 Configuration flash. */ /* 0x4e000000 Ethernet. */ /* 0x4f000000 USB. */ /* 0x50000000 PISMO. */ /* 0x54000000 PISMO. */ /* 0x58000000 PISMO. */ /* 0x5c000000 PISMO. */ /* 0x60000000 PCI. */ /* 0x61000000 PCI Self Config. */ /* 0x62000000 PCI Config. */ /* 0x63000000 PCI IO. */ /* 0x64000000 PCI mem 0. */ /* 0x68000000 PCI mem 1. */ /* 0x6c000000 PCI mem 2. */ realview_binfo.ram_size = ram_size; realview_binfo.kernel_filename = kernel_filename; realview_binfo.kernel_cmdline = kernel_cmdline; realview_binfo.initrd_filename = initrd_filename; realview_binfo.nb_cpus = ncpu; arm_load_kernel(first_cpu, &realview_binfo); /* ??? Hack to map an additional page of ram for the secondary CPU startup code. I guess this works on real hardware because the BootROM happens to be in ROM/flash or in memory that isn't clobbered until after Linux boots the secondary CPUs. */ cpu_register_physical_memory(0x80000000, 0x1000, IO_MEM_RAM + ram_size); }"} {"target": 0, "idx": 7754, "func": "QString *qstring_from_substr(const char *str, int start, int end) { QString *qstring; qstring = g_malloc(sizeof(*qstring)); qstring->length = end - start + 1; qstring->capacity = qstring->length; qstring->string = g_malloc(qstring->capacity + 1); memcpy(qstring->string, str + start, qstring->length); qstring->string[qstring->length] = 0; QOBJECT_INIT(qstring, &qstring_type); return qstring; }"} {"target": 0, "idx": 7761, "func": "static void test_source_wait_event_notifier(void) { EventNotifierTestData data = { .n = 0, .active = 1 }; event_notifier_init(&data.e, false); aio_set_event_notifier(ctx, &data.e, event_ready_cb); g_assert(g_main_context_iteration(NULL, false)); g_assert_cmpint(data.n, ==, 0); g_assert_cmpint(data.active, ==, 1); event_notifier_set(&data.e); g_assert(g_main_context_iteration(NULL, false)); g_assert_cmpint(data.n, ==, 1); g_assert_cmpint(data.active, ==, 0); while (g_main_context_iteration(NULL, false)); g_assert_cmpint(data.n, ==, 1); g_assert_cmpint(data.active, ==, 0); aio_set_event_notifier(ctx, &data.e, NULL); while (g_main_context_iteration(NULL, false)); g_assert_cmpint(data.n, ==, 1); event_notifier_cleanup(&data.e); }"} {"target": 0, "idx": 7762, "func": "static int coroutine_fn is_allocated_base(BlockDriverState *top, BlockDriverState *base, int64_t sector_num, int nb_sectors, int *pnum) { BlockDriverState *intermediate; int ret, n; ret = bdrv_co_is_allocated(top, sector_num, nb_sectors, &n); if (ret) { *pnum = n; return ret; } /* * Is the unallocated chunk [sector_num, n] also * unallocated between base and top? */ intermediate = top->backing_hd; while (intermediate != base) { int pnum_inter; ret = bdrv_co_is_allocated(intermediate, sector_num, nb_sectors, &pnum_inter); if (ret < 0) { return ret; } else if (ret) { *pnum = pnum_inter; return 0; } /* * [sector_num, nb_sectors] is unallocated on top but intermediate * might have * * [sector_num+x, nr_sectors] allocated. */ if (n > pnum_inter) { n = pnum_inter; } intermediate = intermediate->backing_hd; } *pnum = n; return 1; }"} {"target": 0, "idx": 7766, "func": "static DisplayType select_display(const char *p) { Error *err = NULL; const char *opts; DisplayType display = DT_DEFAULT; if (strstart(p, \"sdl\", &opts)) { #ifdef CONFIG_SDL display = DT_SDL; while (*opts) { const char *nextopt; if (strstart(opts, \",frame=\", &nextopt)) { opts = nextopt; if (strstart(opts, \"on\", &nextopt)) { no_frame = 0; } else if (strstart(opts, \"off\", &nextopt)) { no_frame = 1; } else { goto invalid_sdl_args; } } else if (strstart(opts, \",alt_grab=\", &nextopt)) { opts = nextopt; if (strstart(opts, \"on\", &nextopt)) { alt_grab = 1; } else if (strstart(opts, \"off\", &nextopt)) { alt_grab = 0; } else { goto invalid_sdl_args; } } else if (strstart(opts, \",ctrl_grab=\", &nextopt)) { opts = nextopt; if (strstart(opts, \"on\", &nextopt)) { ctrl_grab = 1; } else if (strstart(opts, \"off\", &nextopt)) { ctrl_grab = 0; } else { goto invalid_sdl_args; } } else if (strstart(opts, \",window_close=\", &nextopt)) { opts = nextopt; if (strstart(opts, \"on\", &nextopt)) { no_quit = 0; } else if (strstart(opts, \"off\", &nextopt)) { no_quit = 1; } else { goto invalid_sdl_args; } } else if (strstart(opts, \",gl=\", &nextopt)) { opts = nextopt; if (strstart(opts, \"on\", &nextopt)) { request_opengl = 1; } else if (strstart(opts, \"off\", &nextopt)) { request_opengl = 0; } else { goto invalid_sdl_args; } } else { invalid_sdl_args: fprintf(stderr, \"Invalid SDL option string: %s\\n\", p); exit(1); } opts = nextopt; } #else fprintf(stderr, \"SDL support is disabled\\n\"); exit(1); #endif } else if (strstart(p, \"vnc\", &opts)) { #ifdef CONFIG_VNC if (*opts == '=') { if (vnc_parse(opts + 1, &err) == NULL) { error_report_err(err); exit(1); } } else { fprintf(stderr, \"VNC requires a display argument vnc=\\n\"); exit(1); } #else fprintf(stderr, \"VNC support is disabled\\n\"); exit(1); #endif } else if (strstart(p, \"curses\", &opts)) { #ifdef CONFIG_CURSES display = DT_CURSES; #else fprintf(stderr, \"Curses support is disabled\\n\"); exit(1); #endif } else if (strstart(p, \"gtk\", &opts)) { #ifdef CONFIG_GTK display = DT_GTK; while (*opts) { const char *nextopt; if (strstart(opts, \",grab_on_hover=\", &nextopt)) { opts = nextopt; if (strstart(opts, \"on\", &nextopt)) { grab_on_hover = true; } else if (strstart(opts, \"off\", &nextopt)) { grab_on_hover = false; } else { goto invalid_gtk_args; } } else if (strstart(opts, \",gl=\", &nextopt)) { opts = nextopt; if (strstart(opts, \"on\", &nextopt)) { request_opengl = 1; } else if (strstart(opts, \"off\", &nextopt)) { request_opengl = 0; } else { goto invalid_gtk_args; } } else { invalid_gtk_args: fprintf(stderr, \"Invalid GTK option string: %s\\n\", p); exit(1); } opts = nextopt; } #else fprintf(stderr, \"GTK support is disabled\\n\"); exit(1); #endif } else if (strstart(p, \"none\", &opts)) { display = DT_NONE; } else { fprintf(stderr, \"Unknown display type: %s\\n\", p); exit(1); } return display; }"} {"target": 0, "idx": 7777, "func": "static int get_packetheader(NUTContext *nut, ByteIOContext *bc, int prefix_length, int calculate_checksum) { int64_t start, size, last_size; start= url_ftell(bc) - prefix_length; if(start != nut->packet_start + nut->written_packet_size){ av_log(nut->avf, AV_LOG_ERROR, \"get_packetheader called at weird position\\n\"); return -1; } if(calculate_checksum) init_checksum(bc, update_adler32, 0); size= get_v(bc); last_size= get_v(bc); if(nut->written_packet_size != last_size){ av_log(nut->avf, AV_LOG_ERROR, \"packet size missmatch %d != %lld at %lld\\n\", nut->written_packet_size, last_size, start); return -1; } nut->last_packet_start = nut->packet_start; nut->packet_start = start; nut->written_packet_size= size; return size; }"} {"target": 0, "idx": 7779, "func": "static void megasas_unmap_frame(MegasasState *s, MegasasCmd *cmd) { PCIDevice *p = PCI_DEVICE(s); pci_dma_unmap(p, cmd->frame, cmd->pa_size, 0, 0); cmd->frame = NULL; cmd->pa = 0; clear_bit(cmd->index, s->frame_map); }"} {"target": 0, "idx": 7780, "func": "int v9fs_co_open2(V9fsState *s, V9fsFidState *fidp, char *fullname, gid_t gid, int flags, int mode) { int err; FsCred cred; cred_init(&cred); cred.fc_mode = mode & 07777; cred.fc_uid = fidp->uid; cred.fc_gid = gid; v9fs_co_run_in_worker( { fidp->fs.fd = s->ops->open2(&s->ctx, fullname, flags, &cred); err = 0; if (fidp->fs.fd == -1) { err = -errno; } }); if (!err) { total_open_fd++; if (total_open_fd > open_fd_hw) { v9fs_reclaim_fd(s); } } return err; }"} {"target": 0, "idx": 7782, "func": "static int receive_filter(VirtIONet *n, const uint8_t *buf, int size) { static const uint8_t bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; static const uint8_t vlan[] = {0x81, 0x00}; uint8_t *ptr = (uint8_t *)buf; int i; if (n->promisc) return 1; if (!memcmp(&ptr[12], vlan, sizeof(vlan))) { int vid = be16_to_cpup((uint16_t *)(ptr + 14)) & 0xfff; if (!(n->vlans[vid >> 5] & (1U << (vid & 0x1f)))) return 0; } if ((ptr[0] & 1) && n->allmulti) return 1; if (!memcmp(ptr, bcast, sizeof(bcast))) return 1; if (!memcmp(ptr, n->mac, ETH_ALEN)) return 1; for (i = 0; i < n->mac_table.in_use; i++) { if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) return 1; } return 0; }"} {"target": 0, "idx": 7808, "func": "static int output_packet(InputStream *ist, int ist_index, OutputStream *ost_table, int nb_ostreams, const AVPacket *pkt) { AVFormatContext *os; OutputStream *ost; int ret, i; int got_output; void *buffer_to_free = NULL; static unsigned int samples_size= 0; AVSubtitle subtitle, *subtitle_to_free; int64_t pkt_pts = AV_NOPTS_VALUE; #if CONFIG_AVFILTER int frame_available; #endif float quality; AVPacket avpkt; int bps = av_get_bytes_per_sample(ist->st->codec->sample_fmt); if(ist->next_pts == AV_NOPTS_VALUE) ist->next_pts= ist->pts; if (pkt == NULL) { /* EOF handling */ av_init_packet(&avpkt); avpkt.data = NULL; avpkt.size = 0; goto handle_eof; } else { avpkt = *pkt; } if(pkt->dts != AV_NOPTS_VALUE) ist->next_pts = ist->pts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q); if(pkt->pts != AV_NOPTS_VALUE) pkt_pts = av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q); //while we have more to decode or while the decoder did output something on EOF while (avpkt.size > 0 || (!pkt && got_output)) { uint8_t *data_buf, *decoded_data_buf; int data_size, decoded_data_size; AVFrame *decoded_frame, *filtered_frame; handle_eof: ist->pts= ist->next_pts; if(avpkt.size && avpkt.size != pkt->size) av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING, \"Multiple frames in a packet from stream %d\\n\", pkt->stream_index); ist->showed_multi_packet_warning=1; /* decode the packet if needed */ decoded_frame = filtered_frame = NULL; decoded_data_buf = NULL; /* fail safe */ decoded_data_size= 0; data_buf = avpkt.data; data_size = avpkt.size; subtitle_to_free = NULL; if (ist->decoding_needed) { switch(ist->st->codec->codec_type) { case AVMEDIA_TYPE_AUDIO:{ if(pkt && samples_size < FFMAX(pkt->size * bps, AVCODEC_MAX_AUDIO_FRAME_SIZE)) { samples_size = FFMAX(pkt->size * bps, AVCODEC_MAX_AUDIO_FRAME_SIZE); av_free(samples); samples= av_malloc(samples_size); } decoded_data_size= samples_size; /* XXX: could avoid copy if PCM 16 bits with same endianness as CPU */ ret = avcodec_decode_audio3(ist->st->codec, samples, &decoded_data_size, &avpkt); if (ret < 0) return ret; avpkt.data += ret; avpkt.size -= ret; data_size = ret; got_output = decoded_data_size > 0; /* Some bug in mpeg audio decoder gives */ /* decoded_data_size < 0, it seems they are overflows */ if (!got_output) { /* no audio frame */ continue; } decoded_data_buf = (uint8_t *)samples; ist->next_pts += ((int64_t)AV_TIME_BASE/bps * decoded_data_size) / (ist->st->codec->sample_rate * ist->st->codec->channels); break;} case AVMEDIA_TYPE_VIDEO: decoded_data_size = (ist->st->codec->width * ist->st->codec->height * 3) / 2; if (!(decoded_frame = avcodec_alloc_frame())) return AVERROR(ENOMEM); avpkt.pts = pkt_pts; avpkt.dts = ist->pts; pkt_pts = AV_NOPTS_VALUE; ret = avcodec_decode_video2(ist->st->codec, decoded_frame, &got_output, &avpkt); quality = same_quant ? decoded_frame->quality : 0; if (ret < 0) goto fail; if (!got_output) { /* no picture yet */ av_freep(&decoded_frame); goto discard_packet; } ist->next_pts = ist->pts = guess_correct_pts(&ist->pts_ctx, decoded_frame->pkt_pts, decoded_frame->pkt_dts); if (ist->st->codec->time_base.num != 0) { int ticks= ist->st->parser ? ist->st->parser->repeat_pict+1 : ist->st->codec->ticks_per_frame; ist->next_pts += ((int64_t)AV_TIME_BASE * ist->st->codec->time_base.num * ticks) / ist->st->codec->time_base.den; } avpkt.size = 0; buffer_to_free = NULL; pre_process_video_frame(ist, (AVPicture *)decoded_frame, &buffer_to_free); break; case AVMEDIA_TYPE_SUBTITLE: ret = avcodec_decode_subtitle2(ist->st->codec, &subtitle, &got_output, &avpkt); if (ret < 0) return ret; if (!got_output) { goto discard_packet; } subtitle_to_free = &subtitle; avpkt.size = 0; break; default: return -1; } } else { switch(ist->st->codec->codec_type) { case AVMEDIA_TYPE_AUDIO: ist->next_pts += ((int64_t)AV_TIME_BASE * ist->st->codec->frame_size) / ist->st->codec->sample_rate; break; case AVMEDIA_TYPE_VIDEO: if (ist->st->codec->time_base.num != 0) { int ticks= ist->st->parser ? ist->st->parser->repeat_pict+1 : ist->st->codec->ticks_per_frame; ist->next_pts += ((int64_t)AV_TIME_BASE * ist->st->codec->time_base.num * ticks) / ist->st->codec->time_base.den; } break; } avpkt.size = 0; } // preprocess audio (volume) if (ist->st->codec->codec_type == AVMEDIA_TYPE_AUDIO) { if (audio_volume != 256) { switch (ist->st->codec->sample_fmt) { case AV_SAMPLE_FMT_U8: { uint8_t *volp = samples; for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) { int v = (((*volp - 128) * audio_volume + 128) >> 8) + 128; *volp++ = av_clip_uint8(v); } break; } case AV_SAMPLE_FMT_S16: { short *volp; volp = samples; for(i=0;i<(decoded_data_size / sizeof(short));i++) { int v = ((*volp) * audio_volume + 128) >> 8; *volp++ = av_clip_int16(v); } break; } case AV_SAMPLE_FMT_S32: { int32_t *volp = samples; for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) { int64_t v = (((int64_t)*volp * audio_volume + 128) >> 8); *volp++ = av_clipl_int32(v); } break; } case AV_SAMPLE_FMT_FLT: { float *volp = samples; float scale = audio_volume / 256.f; for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) { *volp++ *= scale; } break; } case AV_SAMPLE_FMT_DBL: { double *volp = samples; double scale = audio_volume / 256.; for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) { *volp++ *= scale; } break; } default: av_log(NULL, AV_LOG_FATAL, \"Audio volume adjustment on sample format %s is not supported.\\n\", av_get_sample_fmt_name(ist->st->codec->sample_fmt)); exit_program(1); } } } /* frame rate emulation */ if (input_files[ist->file_index].rate_emu) { int64_t pts = av_rescale(ist->pts, 1000000, AV_TIME_BASE); int64_t now = av_gettime() - ist->start; if (pts > now) usleep(pts - now); } /* if output time reached then transcode raw format, encode packets and output them */ for (i = 0; i < nb_ostreams; i++) { OutputFile *of = &output_files[ost_table[i].file_index]; int frame_size; ost = &ost_table[i]; if (ost->source_index != ist_index) continue; if (of->start_time && ist->pts < of->start_time) continue; if (of->recording_time != INT64_MAX && av_compare_ts(ist->pts, AV_TIME_BASE_Q, of->recording_time + of->start_time, (AVRational){1, 1000000}) >= 0) { ost->is_past_recording_time = 1; continue; } #if CONFIG_AVFILTER if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && ost->input_video_filter) { AVRational sar; if (ist->st->sample_aspect_ratio.num) sar = ist->st->sample_aspect_ratio; else sar = ist->st->codec->sample_aspect_ratio; av_vsrc_buffer_add_frame(ost->input_video_filter, decoded_frame, ist->pts, sar); if (!(filtered_frame = avcodec_alloc_frame())) { ret = AVERROR(ENOMEM); goto fail; } } frame_available = ist->st->codec->codec_type != AVMEDIA_TYPE_VIDEO || !ost->output_video_filter || avfilter_poll_frame(ost->output_video_filter->inputs[0]); while (frame_available) { AVRational ist_pts_tb; if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && ost->output_video_filter) get_filtered_video_frame(ost->output_video_filter, filtered_frame, &ost->picref, &ist_pts_tb); if (ost->picref) ist->pts = av_rescale_q(ost->picref->pts, ist_pts_tb, AV_TIME_BASE_Q); #else filtered_frame = decoded_frame; #endif os = output_files[ost->file_index].ctx; /* set the input output pts pairs */ //ost->sync_ipts = (double)(ist->pts + input_files[ist->file_index].ts_offset - start_time)/ AV_TIME_BASE; if (ost->encoding_needed) { av_assert0(ist->decoding_needed); switch(ost->st->codec->codec_type) { case AVMEDIA_TYPE_AUDIO: do_audio_out(os, ost, ist, decoded_data_buf, decoded_data_size); break; case AVMEDIA_TYPE_VIDEO: #if CONFIG_AVFILTER if (ost->picref->video && !ost->frame_aspect_ratio) ost->st->codec->sample_aspect_ratio = ost->picref->video->pixel_aspect; #endif do_video_out(os, ost, ist, filtered_frame, &frame_size, same_quant ? quality : ost->st->codec->global_quality); if (vstats_filename && frame_size) do_video_stats(os, ost, frame_size); break; case AVMEDIA_TYPE_SUBTITLE: do_subtitle_out(os, ost, ist, &subtitle, pkt->pts); break; default: abort(); } } else { AVPacket opkt; int64_t ost_tb_start_time= av_rescale_q(of->start_time, AV_TIME_BASE_Q, ost->st->time_base); av_init_packet(&opkt); if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) && !copy_initial_nonkeyframes) #if !CONFIG_AVFILTER continue; #else goto cont; #endif /* no reencoding needed : output the packet directly */ /* force the input stream PTS */ if(ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO) audio_size += data_size; else if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) { video_size += data_size; ost->sync_opts++; } opkt.stream_index= ost->index; if(pkt->pts != AV_NOPTS_VALUE) opkt.pts= av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time; else opkt.pts= AV_NOPTS_VALUE; if (pkt->dts == AV_NOPTS_VALUE) opkt.dts = av_rescale_q(ist->pts, AV_TIME_BASE_Q, ost->st->time_base); else opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base); opkt.dts -= ost_tb_start_time; opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base); opkt.flags= pkt->flags; //FIXME remove the following 2 lines they shall be replaced by the bitstream filters if( ost->st->codec->codec_id != CODEC_ID_H264 && ost->st->codec->codec_id != CODEC_ID_MPEG1VIDEO && ost->st->codec->codec_id != CODEC_ID_MPEG2VIDEO ) { if(av_parser_change(ist->st->parser, ost->st->codec, &opkt.data, &opkt.size, data_buf, data_size, pkt->flags & AV_PKT_FLAG_KEY)) opkt.destruct= av_destruct_packet; } else { opkt.data = data_buf; opkt.size = data_size; } write_frame(os, &opkt, ost->st->codec, ost->bitstream_filters); ost->st->codec->frame_number++; ost->frame_number++; av_free_packet(&opkt); } #if CONFIG_AVFILTER cont: frame_available = (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) && ost->output_video_filter && avfilter_poll_frame(ost->output_video_filter->inputs[0]); if (ost->picref) avfilter_unref_buffer(ost->picref); } av_freep(&filtered_frame); #endif } fail: av_free(buffer_to_free); /* XXX: allocate the subtitles in the codec ? */ if (subtitle_to_free) { avsubtitle_free(subtitle_to_free); subtitle_to_free = NULL; } av_freep(&decoded_frame); if (ret < 0) return ret; } discard_packet: return 0; }"} {"target": 0, "idx": 7814, "func": "static void gen_tlbsx_440(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); #else TCGv t0; if (unlikely(ctx->pr)) { gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); return; } t0 = tcg_temp_new(); gen_addr_reg_index(ctx, t0); gen_helper_440_tlbsx(cpu_gpr[rD(ctx->opcode)], cpu_env, t0); tcg_temp_free(t0); if (Rc(ctx->opcode)) { int l1 = gen_new_label(); tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so); tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_gpr[rD(ctx->opcode)], -1, l1); tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], 0x02); gen_set_label(l1); } #endif }"} {"target": 0, "idx": 7828, "func": "GSource *iohandler_get_g_source(void) { iohandler_init(); return aio_get_g_source(iohandler_ctx); }"} {"target": 0, "idx": 7834, "func": "int ff_mpeg1_find_frame_end(ParseContext *pc, const uint8_t *buf, int buf_size) { int i; uint32_t state= pc->state; /* EOF considered as end of frame */ if (buf_size == 0) return 0; /* 0 frame start -> 1/4 1 first_SEQEXT -> 0/2 2 first field start -> 3/0 3 second_SEQEXT -> 2/0 4 searching end */ for(i=0; iframe_start_found>=0 && pc->frame_start_found<=4); if(pc->frame_start_found&1){ if(state == EXT_START_CODE && (buf[i]&0xF0) != 0x80) pc->frame_start_found--; else if(state == EXT_START_CODE+2){ if((buf[i]&3) == 3) pc->frame_start_found= 0; else pc->frame_start_found= (pc->frame_start_found+1)&3; } state++; }else{ i= ff_find_start_code(buf+i, buf+buf_size, &state) - buf - 1; if(pc->frame_start_found==0 && state >= SLICE_MIN_START_CODE && state <= SLICE_MAX_START_CODE){ i++; pc->frame_start_found=4; } if(state == SEQ_END_CODE){ pc->state=-1; return i+1; } if(pc->frame_start_found==2 && state == SEQ_START_CODE) pc->frame_start_found= 0; if(pc->frame_start_found<4 && state == EXT_START_CODE) pc->frame_start_found++; if(pc->frame_start_found == 4 && (state&0xFFFFFF00) == 0x100){ if(state < SLICE_MIN_START_CODE || state > SLICE_MAX_START_CODE){ pc->frame_start_found=0; pc->state=-1; return i-3; } } } } pc->state= state; return END_NOT_FOUND; }"} {"target": 0, "idx": 7852, "func": "static av_cold int encode_init(AVCodecContext *avctx) { FFV1Context *s = avctx->priv_data; const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt); int i, j, k, m, ret; if ((ret = ff_ffv1_common_init(avctx)) < 0) return ret; s->version = 0; if ((avctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2)) || avctx->slices > 1) s->version = FFMAX(s->version, 2); // Unspecified level & slices, we choose version 1.2+ to ensure multithreaded decodability if (avctx->slices == 0 && avctx->level < 0 && avctx->width * avctx->height > 720*576) s->version = FFMAX(s->version, 2); if (avctx->level <= 0 && s->version == 2) { s->version = 3; } if (avctx->level >= 0 && avctx->level <= 4) { if (avctx->level < s->version) { av_log(avctx, AV_LOG_ERROR, \"Version %d needed for requested features but %d requested\\n\", s->version, avctx->level); return AVERROR(EINVAL); } s->version = avctx->level; } if (s->ec < 0) { s->ec = (s->version >= 3); } if ((s->version == 2 || s->version>3) && avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) { av_log(avctx, AV_LOG_ERROR, \"Version 2 needed for requested features but version 2 is experimental and not enabled\\n\"); return AVERROR_INVALIDDATA; } #if FF_API_CODER_TYPE FF_DISABLE_DEPRECATION_WARNINGS if (avctx->coder_type != -1) s->ac = avctx->coder_type > 0 ? AC_RANGE_CUSTOM_TAB : AC_GOLOMB_RICE; else FF_ENABLE_DEPRECATION_WARNINGS #endif if (s->ac == 1) // Compatbility with common command line usage s->ac = AC_RANGE_CUSTOM_TAB; else if (s->ac == AC_RANGE_DEFAULT_TAB_FORCE) s->ac = AC_RANGE_DEFAULT_TAB; s->plane_count = 3; switch(avctx->pix_fmt) { case AV_PIX_FMT_YUV444P9: case AV_PIX_FMT_YUV422P9: case AV_PIX_FMT_YUV420P9: case AV_PIX_FMT_YUVA444P9: case AV_PIX_FMT_YUVA422P9: case AV_PIX_FMT_YUVA420P9: if (!avctx->bits_per_raw_sample) s->bits_per_raw_sample = 9; case AV_PIX_FMT_GRAY10: case AV_PIX_FMT_YUV444P10: case AV_PIX_FMT_YUV420P10: case AV_PIX_FMT_YUV422P10: case AV_PIX_FMT_YUVA444P10: case AV_PIX_FMT_YUVA422P10: case AV_PIX_FMT_YUVA420P10: if (!avctx->bits_per_raw_sample && !s->bits_per_raw_sample) s->bits_per_raw_sample = 10; case AV_PIX_FMT_GRAY12: case AV_PIX_FMT_YUV444P12: case AV_PIX_FMT_YUV420P12: case AV_PIX_FMT_YUV422P12: s->packed_at_lsb = 1; if (!avctx->bits_per_raw_sample && !s->bits_per_raw_sample) s->bits_per_raw_sample = 12; case AV_PIX_FMT_GRAY16: case AV_PIX_FMT_YUV444P16: case AV_PIX_FMT_YUV422P16: case AV_PIX_FMT_YUV420P16: case AV_PIX_FMT_YUVA444P16: case AV_PIX_FMT_YUVA422P16: case AV_PIX_FMT_YUVA420P16: if (!avctx->bits_per_raw_sample && !s->bits_per_raw_sample) { s->bits_per_raw_sample = 16; } else if (!s->bits_per_raw_sample) { s->bits_per_raw_sample = avctx->bits_per_raw_sample; } if (s->bits_per_raw_sample <= 8) { av_log(avctx, AV_LOG_ERROR, \"bits_per_raw_sample invalid\\n\"); return AVERROR_INVALIDDATA; } s->version = FFMAX(s->version, 1); case AV_PIX_FMT_GRAY8: case AV_PIX_FMT_YA8: case AV_PIX_FMT_YUV444P: case AV_PIX_FMT_YUV440P: case AV_PIX_FMT_YUV422P: case AV_PIX_FMT_YUV420P: case AV_PIX_FMT_YUV411P: case AV_PIX_FMT_YUV410P: case AV_PIX_FMT_YUVA444P: case AV_PIX_FMT_YUVA422P: case AV_PIX_FMT_YUVA420P: s->chroma_planes = desc->nb_components < 3 ? 0 : 1; s->colorspace = 0; s->transparency = desc->nb_components == 4 || desc->nb_components == 2; if (!avctx->bits_per_raw_sample && !s->bits_per_raw_sample) s->bits_per_raw_sample = 8; else if (!s->bits_per_raw_sample) s->bits_per_raw_sample = 8; break; case AV_PIX_FMT_RGB32: s->colorspace = 1; s->transparency = 1; s->chroma_planes = 1; s->bits_per_raw_sample = 8; break; case AV_PIX_FMT_RGB48: s->colorspace = 1; s->chroma_planes = 1; s->bits_per_raw_sample = 16; s->use32bit = 1; s->version = FFMAX(s->version, 1); if (avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) { av_log(avctx, AV_LOG_ERROR, \"16bit RGB is experimental and under development, only use it for experiments\\n\"); return AVERROR_INVALIDDATA; } break; case AV_PIX_FMT_0RGB32: s->colorspace = 1; s->chroma_planes = 1; s->bits_per_raw_sample = 8; break; case AV_PIX_FMT_GBRP9: if (!avctx->bits_per_raw_sample) s->bits_per_raw_sample = 9; case AV_PIX_FMT_GBRP10: if (!avctx->bits_per_raw_sample && !s->bits_per_raw_sample) s->bits_per_raw_sample = 10; case AV_PIX_FMT_GBRP12: if (!avctx->bits_per_raw_sample && !s->bits_per_raw_sample) s->bits_per_raw_sample = 12; case AV_PIX_FMT_GBRP14: if (!avctx->bits_per_raw_sample && !s->bits_per_raw_sample) s->bits_per_raw_sample = 14; case AV_PIX_FMT_GBRP16: if (!avctx->bits_per_raw_sample && !s->bits_per_raw_sample) s->bits_per_raw_sample = 16; else if (!s->bits_per_raw_sample) s->bits_per_raw_sample = avctx->bits_per_raw_sample; s->colorspace = 1; s->chroma_planes = 1; if (s->bits_per_raw_sample >= 16) { s->use32bit = 1; if (avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) { av_log(avctx, AV_LOG_ERROR, \"16bit RGB is experimental and under development, only use it for experiments\\n\"); return AVERROR_INVALIDDATA; } } s->version = FFMAX(s->version, 1); break; default: av_log(avctx, AV_LOG_ERROR, \"format not supported\\n\"); return AVERROR(ENOSYS); } av_assert0(s->bits_per_raw_sample >= 8); if (s->bits_per_raw_sample > 8) { if (s->ac == AC_GOLOMB_RICE) { av_log(avctx, AV_LOG_INFO, \"bits_per_raw_sample > 8, forcing range coder\\n\"); s->ac = AC_RANGE_CUSTOM_TAB; } } if (s->transparency) { av_log(avctx, AV_LOG_WARNING, \"Storing alpha plane, this will require a recent FFV1 decoder to playback!\\n\"); } #if FF_API_PRIVATE_OPT FF_DISABLE_DEPRECATION_WARNINGS if (avctx->context_model) s->context_model = avctx->context_model; if (avctx->context_model > 1U) { av_log(avctx, AV_LOG_ERROR, \"Invalid context model %d, valid values are 0 and 1\\n\", avctx->context_model); return AVERROR(EINVAL); } FF_ENABLE_DEPRECATION_WARNINGS #endif if (s->ac == AC_RANGE_CUSTOM_TAB) { for (i = 1; i < 256; i++) s->state_transition[i] = ver2_state[i]; } else { RangeCoder c; ff_build_rac_states(&c, 0.05 * (1LL << 32), 256 - 8); for (i = 1; i < 256; i++) s->state_transition[i] = c.one_state[i]; } for (i = 0; i < 256; i++) { s->quant_table_count = 2; if (s->bits_per_raw_sample <= 8) { s->quant_tables[0][0][i]= quant11[i]; s->quant_tables[0][1][i]= 11*quant11[i]; s->quant_tables[0][2][i]= 11*11*quant11[i]; s->quant_tables[1][0][i]= quant11[i]; s->quant_tables[1][1][i]= 11*quant11[i]; s->quant_tables[1][2][i]= 11*11*quant5 [i]; s->quant_tables[1][3][i]= 5*11*11*quant5 [i]; s->quant_tables[1][4][i]= 5*5*11*11*quant5 [i]; } else { s->quant_tables[0][0][i]= quant9_10bit[i]; s->quant_tables[0][1][i]= 11*quant9_10bit[i]; s->quant_tables[0][2][i]= 11*11*quant9_10bit[i]; s->quant_tables[1][0][i]= quant9_10bit[i]; s->quant_tables[1][1][i]= 11*quant9_10bit[i]; s->quant_tables[1][2][i]= 11*11*quant5_10bit[i]; s->quant_tables[1][3][i]= 5*11*11*quant5_10bit[i]; s->quant_tables[1][4][i]= 5*5*11*11*quant5_10bit[i]; } } s->context_count[0] = (11 * 11 * 11 + 1) / 2; s->context_count[1] = (11 * 11 * 5 * 5 * 5 + 1) / 2; memcpy(s->quant_table, s->quant_tables[s->context_model], sizeof(s->quant_table)); for (i = 0; i < s->plane_count; i++) { PlaneContext *const p = &s->plane[i]; memcpy(p->quant_table, s->quant_table, sizeof(p->quant_table)); p->quant_table_index = s->context_model; p->context_count = s->context_count[p->quant_table_index]; } if ((ret = ff_ffv1_allocate_initial_states(s)) < 0) return ret; #if FF_API_CODED_FRAME FF_DISABLE_DEPRECATION_WARNINGS avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I; FF_ENABLE_DEPRECATION_WARNINGS #endif if (!s->transparency) s->plane_count = 2; if (!s->chroma_planes && s->version > 3) s->plane_count--; avcodec_get_chroma_sub_sample(avctx->pix_fmt, &s->chroma_h_shift, &s->chroma_v_shift); s->picture_number = 0; if (avctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2)) { for (i = 0; i < s->quant_table_count; i++) { s->rc_stat2[i] = av_mallocz(s->context_count[i] * sizeof(*s->rc_stat2[i])); if (!s->rc_stat2[i]) return AVERROR(ENOMEM); } } if (avctx->stats_in) { char *p = avctx->stats_in; uint8_t (*best_state)[256] = av_malloc_array(256, 256); int gob_count = 0; char *next; if (!best_state) return AVERROR(ENOMEM); av_assert0(s->version >= 2); for (;;) { for (j = 0; j < 256; j++) for (i = 0; i < 2; i++) { s->rc_stat[j][i] = strtol(p, &next, 0); if (next == p) { av_log(avctx, AV_LOG_ERROR, \"2Pass file invalid at %d %d [%s]\\n\", j, i, p); av_freep(&best_state); return AVERROR_INVALIDDATA; } p = next; } for (i = 0; i < s->quant_table_count; i++) for (j = 0; j < s->context_count[i]; j++) { for (k = 0; k < 32; k++) for (m = 0; m < 2; m++) { s->rc_stat2[i][j][k][m] = strtol(p, &next, 0); if (next == p) { av_log(avctx, AV_LOG_ERROR, \"2Pass file invalid at %d %d %d %d [%s]\\n\", i, j, k, m, p); av_freep(&best_state); return AVERROR_INVALIDDATA; } p = next; } } gob_count = strtol(p, &next, 0); if (next == p || gob_count <= 0) { av_log(avctx, AV_LOG_ERROR, \"2Pass file invalid\\n\"); av_freep(&best_state); return AVERROR_INVALIDDATA; } p = next; while (*p == '\\n' || *p == ' ') p++; if (p[0] == 0) break; } if (s->ac == AC_RANGE_CUSTOM_TAB) sort_stt(s, s->state_transition); find_best_state(best_state, s->state_transition); for (i = 0; i < s->quant_table_count; i++) { for (k = 0; k < 32; k++) { double a=0, b=0; int jp = 0; for (j = 0; j < s->context_count[i]; j++) { double p = 128; if (s->rc_stat2[i][j][k][0] + s->rc_stat2[i][j][k][1] > 200 && j || a+b > 200) { if (a+b) p = 256.0 * b / (a + b); s->initial_states[i][jp][k] = best_state[av_clip(round(p), 1, 255)][av_clip_uint8((a + b) / gob_count)]; for(jp++; jpinitial_states[i][jp][k] = s->initial_states[i][jp-1][k]; a=b=0; } a += s->rc_stat2[i][j][k][0]; b += s->rc_stat2[i][j][k][1]; if (a+b) { p = 256.0 * b / (a + b); } s->initial_states[i][j][k] = best_state[av_clip(round(p), 1, 255)][av_clip_uint8((a + b) / gob_count)]; } } } av_freep(&best_state); } if (s->version > 1) { int plane_count = 1 + 2*s->chroma_planes + s->transparency; s->num_v_slices = (avctx->width > 352 || avctx->height > 288 || !avctx->slices) ? 2 : 1; if (avctx->height < 5) s->num_v_slices = 1; for (; s->num_v_slices < 32; s->num_v_slices++) { for (s->num_h_slices = s->num_v_slices; s->num_h_slices < 2*s->num_v_slices; s->num_h_slices++) { int maxw = (avctx->width + s->num_h_slices - 1) / s->num_h_slices; int maxh = (avctx->height + s->num_v_slices - 1) / s->num_v_slices; if (s->num_h_slices > avctx->width || s->num_v_slices > avctx->height) continue; if (maxw * maxh * (int64_t)(s->bits_per_raw_sample+1) * plane_count > 8<<24) continue; if (avctx->slices == s->num_h_slices * s->num_v_slices && avctx->slices <= MAX_SLICES || !avctx->slices) goto slices_ok; } } av_log(avctx, AV_LOG_ERROR, \"Unsupported number %d of slices requested, please specify a \" \"supported number with -slices (ex:4,6,9,12,16, ...)\\n\", avctx->slices); return AVERROR(ENOSYS); slices_ok: if ((ret = write_extradata(s)) < 0) return ret; } if ((ret = ff_ffv1_init_slice_contexts(s)) < 0) return ret; s->slice_count = s->max_slice_count; if ((ret = ff_ffv1_init_slices_state(s)) < 0) return ret; #define STATS_OUT_SIZE 1024 * 1024 * 6 if (avctx->flags & AV_CODEC_FLAG_PASS1) { avctx->stats_out = av_mallocz(STATS_OUT_SIZE); if (!avctx->stats_out) return AVERROR(ENOMEM); for (i = 0; i < s->quant_table_count; i++) for (j = 0; j < s->max_slice_count; j++) { FFV1Context *sf = s->slice_context[j]; av_assert0(!sf->rc_stat2[i]); sf->rc_stat2[i] = av_mallocz(s->context_count[i] * sizeof(*sf->rc_stat2[i])); if (!sf->rc_stat2[i]) return AVERROR(ENOMEM); } } return 0; }"} {"target": 0, "idx": 7854, "func": "static int hls_slice_data_wpp(HEVCContext *s, const HEVCNAL *nal) { const uint8_t *data = nal->data; int length = nal->size; HEVCLocalContext *lc = s->HEVClc; int *ret = av_malloc_array(s->sh.num_entry_point_offsets + 1, sizeof(int)); int *arg = av_malloc_array(s->sh.num_entry_point_offsets + 1, sizeof(int)); int64_t offset; int startheader, cmpt = 0; int i, j, res = 0; if (!ret || !arg) { av_free(ret); av_free(arg); return AVERROR(ENOMEM); } if (!s->sList[1]) { ff_alloc_entries(s->avctx, s->sh.num_entry_point_offsets + 1); for (i = 1; i < s->threads_number; i++) { s->sList[i] = av_malloc(sizeof(HEVCContext)); memcpy(s->sList[i], s, sizeof(HEVCContext)); s->HEVClcList[i] = av_mallocz(sizeof(HEVCLocalContext)); s->sList[i]->HEVClc = s->HEVClcList[i]; } } offset = (lc->gb.index >> 3); for (j = 0, cmpt = 0, startheader = offset + s->sh.entry_point_offset[0]; j < nal->skipped_bytes; j++) { if (nal->skipped_bytes_pos[j] >= offset && nal->skipped_bytes_pos[j] < startheader) { startheader--; cmpt++; } } for (i = 1; i < s->sh.num_entry_point_offsets; i++) { offset += (s->sh.entry_point_offset[i - 1] - cmpt); for (j = 0, cmpt = 0, startheader = offset + s->sh.entry_point_offset[i]; j < nal->skipped_bytes; j++) { if (nal->skipped_bytes_pos[j] >= offset && nal->skipped_bytes_pos[j] < startheader) { startheader--; cmpt++; } } s->sh.size[i - 1] = s->sh.entry_point_offset[i] - cmpt; s->sh.offset[i - 1] = offset; } if (s->sh.num_entry_point_offsets != 0) { offset += s->sh.entry_point_offset[s->sh.num_entry_point_offsets - 1] - cmpt; if (length < offset) { av_log(s->avctx, AV_LOG_ERROR, \"entry_point_offset table is corrupted\\n\"); res = AVERROR_INVALIDDATA; goto error; } s->sh.size[s->sh.num_entry_point_offsets - 1] = length - offset; s->sh.offset[s->sh.num_entry_point_offsets - 1] = offset; } s->data = data; for (i = 1; i < s->threads_number; i++) { s->sList[i]->HEVClc->first_qp_group = 1; s->sList[i]->HEVClc->qp_y = s->sList[0]->HEVClc->qp_y; memcpy(s->sList[i], s, sizeof(HEVCContext)); s->sList[i]->HEVClc = s->HEVClcList[i]; } avpriv_atomic_int_set(&s->wpp_err, 0); ff_reset_entries(s->avctx); for (i = 0; i <= s->sh.num_entry_point_offsets; i++) { arg[i] = i; ret[i] = 0; } if (s->ps.pps->entropy_coding_sync_enabled_flag) s->avctx->execute2(s->avctx, hls_decode_entry_wpp, arg, ret, s->sh.num_entry_point_offsets + 1); for (i = 0; i <= s->sh.num_entry_point_offsets; i++) res += ret[i]; error: av_free(ret); av_free(arg); return res; }"} {"target": 0, "idx": 7866, "func": "static void pxa2xx_lcdc_dma0_redraw_rot0(PXA2xxLCDState *s, hwaddr addr, int *miny, int *maxy) { DisplaySurface *surface = qemu_console_surface(s->con); int src_width, dest_width; drawfn fn = NULL; if (s->dest_width) fn = s->line_fn[s->transp][s->bpp]; if (!fn) return; src_width = (s->xres + 3) & ~3; /* Pad to a 4 pixels multiple */ if (s->bpp == pxa_lcdc_19pbpp || s->bpp == pxa_lcdc_18pbpp) src_width *= 3; else if (s->bpp > pxa_lcdc_16bpp) src_width *= 4; else if (s->bpp > pxa_lcdc_8bpp) src_width *= 2; dest_width = s->xres * s->dest_width; *miny = 0; framebuffer_update_display(surface, s->sysmem, addr, s->xres, s->yres, src_width, dest_width, s->dest_width, s->invalidated, fn, s->dma_ch[0].palette, miny, maxy); }"} {"target": 0, "idx": 7869, "func": "build_dmar_q35(GArray *table_data, GArray *linker) { int dmar_start = table_data->len; AcpiTableDmar *dmar; AcpiDmarHardwareUnit *drhd; dmar = acpi_data_push(table_data, sizeof(*dmar)); dmar->host_address_width = VTD_HOST_ADDRESS_WIDTH - 1; dmar->flags = 0; /* No intr_remap for now */ /* DMAR Remapping Hardware Unit Definition structure */ drhd = acpi_data_push(table_data, sizeof(*drhd)); drhd->type = cpu_to_le16(ACPI_DMAR_TYPE_HARDWARE_UNIT); drhd->length = cpu_to_le16(sizeof(*drhd)); /* No device scope now */ drhd->flags = ACPI_DMAR_INCLUDE_PCI_ALL; drhd->pci_segment = cpu_to_le16(0); drhd->address = cpu_to_le64(Q35_HOST_BRIDGE_IOMMU_ADDR); build_header(linker, table_data, (void *)(table_data->data + dmar_start), \"DMAR\", table_data->len - dmar_start, 1, NULL); }"} {"target": 0, "idx": 7873, "func": "static GtkWidget *gd_create_menu_machine(GtkDisplayState *s, GtkAccelGroup *accel_group) { GtkWidget *machine_menu; GtkWidget *separator; machine_menu = gtk_menu_new(); gtk_menu_set_accel_group(GTK_MENU(machine_menu), accel_group); s->pause_item = gtk_check_menu_item_new_with_mnemonic(_(\"_Pause\")); gtk_menu_shell_append(GTK_MENU_SHELL(machine_menu), s->pause_item); separator = gtk_separator_menu_item_new(); gtk_menu_shell_append(GTK_MENU_SHELL(machine_menu), separator); s->reset_item = gtk_menu_item_new_with_mnemonic(_(\"_Reset\")); gtk_menu_shell_append(GTK_MENU_SHELL(machine_menu), s->reset_item); s->powerdown_item = gtk_menu_item_new_with_mnemonic(_(\"Power _Down\")); gtk_menu_shell_append(GTK_MENU_SHELL(machine_menu), s->powerdown_item); separator = gtk_separator_menu_item_new(); gtk_menu_shell_append(GTK_MENU_SHELL(machine_menu), separator); s->quit_item = gtk_menu_item_new_with_mnemonic(_(\"_Quit\")); gtk_menu_item_set_accel_path(GTK_MENU_ITEM(s->quit_item), \"/Machine/Quit\"); gtk_accel_map_add_entry(\"/Machine/Quit\", GDK_KEY_q, GDK_CONTROL_MASK); gtk_menu_shell_append(GTK_MENU_SHELL(machine_menu), s->quit_item); return machine_menu; }"} {"target": 0, "idx": 7874, "func": "static void mv88w8618_flashcfg_write(void *opaque, target_phys_addr_t offset, uint64_t value, unsigned size) { mv88w8618_flashcfg_state *s = opaque; switch (offset) { case MP_FLASHCFG_CFGR0: s->cfgr0 = value; break; } }"} {"target": 1, "idx": 7881, "func": "static int qemu_rdma_get_fd(void *opaque) { QEMUFileRDMA *rfile = opaque; RDMAContext *rdma = rfile->rdma; return rdma->comp_channel->fd; }"} {"target": 1, "idx": 7882, "func": "static void dump_ppc_insns (CPUPPCState *env) { opc_handler_t **table, *handler; const char *p, *q; uint8_t opc1, opc2, opc3; printf(\"Instructions set:\\n\"); /* opc1 is 6 bits long */ for (opc1 = 0x00; opc1 < PPC_CPU_OPCODES_LEN; opc1++) { table = env->opcodes; handler = table[opc1]; if (is_indirect_opcode(handler)) { /* opc2 is 5 bits long */ for (opc2 = 0; opc2 < PPC_CPU_INDIRECT_OPCODES_LEN; opc2++) { table = env->opcodes; handler = env->opcodes[opc1]; table = ind_table(handler); handler = table[opc2]; if (is_indirect_opcode(handler)) { table = ind_table(handler); /* opc3 is 5 bits long */ for (opc3 = 0; opc3 < PPC_CPU_INDIRECT_OPCODES_LEN; opc3++) { handler = table[opc3]; if (handler->handler != &gen_invalid) { /* Special hack to properly dump SPE insns */ p = strchr(handler->oname, '_'); if (p == NULL) { printf(\"INSN: %02x %02x %02x (%02d %04d) : \" \"%s\\n\", opc1, opc2, opc3, opc1, (opc3 << 5) | opc2, handler->oname); } else { q = \"speundef\"; if ((p - handler->oname) != strlen(q) || memcmp(handler->oname, q, strlen(q)) != 0) { /* First instruction */ printf(\"INSN: %02x %02x %02x (%02d %04d) : \" \"%.*s\\n\", opc1, opc2 << 1, opc3, opc1, (opc3 << 6) | (opc2 << 1), (int)(p - handler->oname), handler->oname); } if (strcmp(p + 1, q) != 0) { /* Second instruction */ printf(\"INSN: %02x %02x %02x (%02d %04d) : \" \"%s\\n\", opc1, (opc2 << 1) | 1, opc3, opc1, (opc3 << 6) | (opc2 << 1) | 1, p + 1); } } } } } else { if (handler->handler != &gen_invalid) { printf(\"INSN: %02x %02x -- (%02d %04d) : %s\\n\", opc1, opc2, opc1, opc2, handler->oname); } } } } else { if (handler->handler != &gen_invalid) { printf(\"INSN: %02x -- -- (%02d ----) : %s\\n\", opc1, opc1, handler->oname); } } } }"} {"target": 1, "idx": 7885, "func": "yuv2rgb_2_c_template(SwsContext *c, const int16_t *buf[2], const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y, enum PixelFormat target, int hasAlpha) { const int16_t *buf0 = buf[0], *buf1 = buf[1], *ubuf0 = ubuf[0], *ubuf1 = ubuf[1], *vbuf0 = vbuf[0], *vbuf1 = vbuf[1], *abuf0 = abuf[0], *abuf1 = abuf[1]; int yalpha1 = 4095 - yalpha; int uvalpha1 = 4095 - uvalpha; int i; for (i = 0; i < (dstW >> 1); i++) { int Y1 = (buf0[i * 2] * yalpha1 + buf1[i * 2] * yalpha) >> 19; int Y2 = (buf0[i * 2 + 1] * yalpha1 + buf1[i * 2 + 1] * yalpha) >> 19; int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha) >> 19; int V = (vbuf0[i] * uvalpha1 + vbuf1[i] * uvalpha) >> 19; int A1, A2; const void *r = c->table_rV[V], *g = (c->table_gU[U] + c->table_gV[V]), *b = c->table_bU[U]; if (hasAlpha) { A1 = (abuf0[i * 2 ] * yalpha1 + abuf1[i * 2 ] * yalpha) >> 19; A2 = (abuf0[i * 2 + 1] * yalpha1 + abuf1[i * 2 + 1] * yalpha) >> 19; } yuv2rgb_write(dest, i, Y1, Y2, U, V, hasAlpha ? A1 : 0, hasAlpha ? A2 : 0, r, g, b, y, target, hasAlpha); } }"} {"target": 1, "idx": 7892, "func": "static int minimum_frame_bits(VC2EncContext *s) { int slice_x, slice_y, bits = 0; s->size_scaler = 64; for (slice_y = 0; slice_y < s->num_y; slice_y++) { for (slice_x = 0; slice_x < s->num_x; slice_x++) { bits += count_hq_slice(s, NULL, slice_x, slice_y, s->q_ceil); } } return bits; }"} {"target": 1, "idx": 7893, "func": "static void fd_coroutine_enter(void *opaque) { FDYieldUntilData *data = opaque; qemu_set_fd_handler(data->fd, NULL, NULL, NULL); qemu_coroutine_enter(data->co, NULL); }"} {"target": 1, "idx": 7916, "func": "static void qcow_aio_write_cb(void *opaque, int ret) { QCowAIOCB *acb = opaque; BlockDriverState *bs = acb->common.bs; BDRVQcowState *s = bs->opaque; int index_in_cluster; uint64_t cluster_offset; const uint8_t *src_buf; int n_end; acb->hd_aiocb = NULL; if (ret < 0) { fail: acb->common.cb(acb->common.opaque, ret); qemu_aio_release(acb); return; } acb->nb_sectors -= acb->n; acb->sector_num += acb->n; acb->buf += acb->n * 512; if (acb->nb_sectors == 0) { /* request completed */ acb->common.cb(acb->common.opaque, 0); qemu_aio_release(acb); return; } index_in_cluster = acb->sector_num & (s->cluster_sectors - 1); n_end = index_in_cluster + acb->nb_sectors; if (s->crypt_method && n_end > QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors) n_end = QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors; cluster_offset = alloc_cluster_offset(bs, acb->sector_num << 9, index_in_cluster, n_end, &acb->n); if (!cluster_offset || (cluster_offset & 511) != 0) { ret = -EIO; goto fail; } if (s->crypt_method) { if (!acb->cluster_data) { acb->cluster_data = qemu_mallocz(QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size); if (!acb->cluster_data) { ret = -ENOMEM; goto fail; } } encrypt_sectors(s, acb->sector_num, acb->cluster_data, acb->buf, acb->n, 1, &s->aes_encrypt_key); src_buf = acb->cluster_data; } else { src_buf = acb->buf; } acb->hd_aiocb = bdrv_aio_write(s->hd, (cluster_offset >> 9) + index_in_cluster, src_buf, acb->n, qcow_aio_write_cb, acb); if (acb->hd_aiocb == NULL) goto fail; }"} {"target": 0, "idx": 7925, "func": "static int opt_input_file(const char *opt, const char *filename) { AVFormatContext *ic; AVInputFormat *file_iformat = NULL; int err, i, ret, rfps, rfps_base; int64_t timestamp; uint8_t buf[128]; AVDictionary **opts; int orig_nb_streams; // number of streams before avformat_find_stream_info if (last_asked_format) { if (!(file_iformat = av_find_input_format(last_asked_format))) { fprintf(stderr, \"Unknown input format: '%s'\\n\", last_asked_format); ffmpeg_exit(1); } last_asked_format = NULL; } if (!strcmp(filename, \"-\")) filename = \"pipe:\"; using_stdin |= !strncmp(filename, \"pipe:\", 5) || !strcmp(filename, \"/dev/stdin\"); /* get default parameters from command line */ ic = avformat_alloc_context(); if (!ic) { print_error(filename, AVERROR(ENOMEM)); ffmpeg_exit(1); } if (audio_sample_rate) { snprintf(buf, sizeof(buf), \"%d\", audio_sample_rate); av_dict_set(&format_opts, \"sample_rate\", buf, 0); } if (audio_channels) { snprintf(buf, sizeof(buf), \"%d\", audio_channels); av_dict_set(&format_opts, \"channels\", buf, 0); } if (frame_rate.num) { snprintf(buf, sizeof(buf), \"%d/%d\", frame_rate.num, frame_rate.den); av_dict_set(&format_opts, \"framerate\", buf, 0); } if (frame_width && frame_height) { snprintf(buf, sizeof(buf), \"%dx%d\", frame_width, frame_height); av_dict_set(&format_opts, \"video_size\", buf, 0); } if (frame_pix_fmt != PIX_FMT_NONE) av_dict_set(&format_opts, \"pixel_format\", av_get_pix_fmt_name(frame_pix_fmt), 0); ic->video_codec_id = find_codec_or_die(video_codec_name , AVMEDIA_TYPE_VIDEO , 0); ic->audio_codec_id = find_codec_or_die(audio_codec_name , AVMEDIA_TYPE_AUDIO , 0); ic->subtitle_codec_id= find_codec_or_die(subtitle_codec_name, AVMEDIA_TYPE_SUBTITLE, 0); ic->flags |= AVFMT_FLAG_NONBLOCK; /* open the input file with generic libav function */ err = avformat_open_input(&ic, filename, file_iformat, &format_opts); if (err < 0) { print_error(filename, err); ffmpeg_exit(1); } assert_avoptions(format_opts); if(opt_programid) { int i, j; int found=0; for(i=0; inb_streams; i++){ ic->streams[i]->discard= AVDISCARD_ALL; } for(i=0; inb_programs; i++){ AVProgram *p= ic->programs[i]; if(p->id != opt_programid){ p->discard = AVDISCARD_ALL; }else{ found=1; for(j=0; jnb_stream_indexes; j++){ ic->streams[p->stream_index[j]]->discard= AVDISCARD_DEFAULT; } } } if(!found){ fprintf(stderr, \"Specified program id not found\\n\"); ffmpeg_exit(1); } opt_programid=0; } if (loop_input) { av_log(NULL, AV_LOG_WARNING, \"-loop_input is deprecated, use -loop 1\\n\"); ic->loop_input = loop_input; } /* Set AVCodecContext options for avformat_find_stream_info */ opts = setup_find_stream_info_opts(ic, codec_opts); orig_nb_streams = ic->nb_streams; /* If not enough info to get the stream parameters, we decode the first frames to get it. (used in mpeg case for example) */ ret = avformat_find_stream_info(ic, opts); if (ret < 0 && verbose >= 0) { fprintf(stderr, \"%s: could not find codec parameters\\n\", filename); av_close_input_file(ic); ffmpeg_exit(1); } timestamp = start_time; /* add the stream start time */ if (ic->start_time != AV_NOPTS_VALUE) timestamp += ic->start_time; /* if seeking requested, we execute it */ if (start_time != 0) { ret = av_seek_frame(ic, -1, timestamp, AVSEEK_FLAG_BACKWARD); if (ret < 0) { fprintf(stderr, \"%s: could not seek to position %0.3f\\n\", filename, (double)timestamp / AV_TIME_BASE); } /* reset seek info */ start_time = 0; } /* update the current parameters so that they match the one of the input stream */ for(i=0;inb_streams;i++) { AVStream *st = ic->streams[i]; AVCodecContext *dec = st->codec; InputStream *ist; dec->thread_count = thread_count; input_streams = grow_array(input_streams, sizeof(*input_streams), &nb_input_streams, nb_input_streams + 1); ist = &input_streams[nb_input_streams - 1]; ist->st = st; ist->file_index = nb_input_files; ist->discard = 1; ist->opts = filter_codec_opts(codec_opts, ist->st->codec->codec_id, 0); if (i < nb_ts_scale) ist->ts_scale = ts_scale[i]; switch (dec->codec_type) { case AVMEDIA_TYPE_AUDIO: ist->dec = avcodec_find_decoder_by_name(audio_codec_name); if(!ist->dec) ist->dec = avcodec_find_decoder(dec->codec_id); if(audio_disable) st->discard= AVDISCARD_ALL; break; case AVMEDIA_TYPE_VIDEO: ist->dec= avcodec_find_decoder_by_name(video_codec_name); if(!ist->dec) ist->dec = avcodec_find_decoder(dec->codec_id); rfps = ic->streams[i]->r_frame_rate.num; rfps_base = ic->streams[i]->r_frame_rate.den; if (dec->lowres) { dec->flags |= CODEC_FLAG_EMU_EDGE; } if(me_threshold) dec->debug |= FF_DEBUG_MV; if (dec->time_base.den != rfps*dec->ticks_per_frame || dec->time_base.num != rfps_base) { if (verbose >= 0) fprintf(stderr,\"\\nSeems stream %d codec frame rate differs from container frame rate: %2.2f (%d/%d) -> %2.2f (%d/%d)\\n\", i, (float)dec->time_base.den / dec->time_base.num, dec->time_base.den, dec->time_base.num, (float)rfps / rfps_base, rfps, rfps_base); } if(video_disable) st->discard= AVDISCARD_ALL; else if(video_discard) st->discard= video_discard; break; case AVMEDIA_TYPE_DATA: break; case AVMEDIA_TYPE_SUBTITLE: ist->dec = avcodec_find_decoder_by_name(subtitle_codec_name); if(!ist->dec) ist->dec = avcodec_find_decoder(dec->codec_id); if(subtitle_disable) st->discard = AVDISCARD_ALL; break; case AVMEDIA_TYPE_ATTACHMENT: case AVMEDIA_TYPE_UNKNOWN: break; default: abort(); } } /* dump the file content */ if (verbose >= 0) av_dump_format(ic, nb_input_files, filename, 0); input_files = grow_array(input_files, sizeof(*input_files), &nb_input_files, nb_input_files + 1); input_files[nb_input_files - 1].ctx = ic; input_files[nb_input_files - 1].ist_index = nb_input_streams - ic->nb_streams; input_files[nb_input_files - 1].ts_offset = input_ts_offset - (copy_ts ? 0 : timestamp); top_field_first = -1; frame_rate = (AVRational){0, 0}; frame_pix_fmt = PIX_FMT_NONE; frame_height = 0; frame_width = 0; audio_sample_rate = 0; audio_channels = 0; audio_sample_fmt = AV_SAMPLE_FMT_NONE; av_freep(&ts_scale); nb_ts_scale = 0; for (i = 0; i < orig_nb_streams; i++) av_dict_free(&opts[i]); av_freep(&opts); av_freep(&video_codec_name); av_freep(&audio_codec_name); av_freep(&subtitle_codec_name); uninit_opts(); init_opts(); return 0; }"} {"target": 0, "idx": 7936, "func": "static int usb_hub_handle_control(USBDevice *dev, USBPacket *p, int request, int value, int index, int length, uint8_t *data) { USBHubState *s = (USBHubState *)dev; int ret; ret = usb_desc_handle_control(dev, p, request, value, index, length, data); if (ret >= 0) { return ret; } switch(request) { case EndpointOutRequest | USB_REQ_CLEAR_FEATURE: if (value == 0 && index != 0x81) { /* clear ep halt */ goto fail; } ret = 0; break; case DeviceRequest | USB_REQ_GET_INTERFACE: data[0] = 0; ret = 1; break; case DeviceOutRequest | USB_REQ_SET_INTERFACE: ret = 0; break; /* usb specific requests */ case GetHubStatus: data[0] = 0; data[1] = 0; data[2] = 0; data[3] = 0; ret = 4; break; case GetPortStatus: { unsigned int n = index - 1; USBHubPort *port; if (n >= NUM_PORTS) { goto fail; } port = &s->ports[n]; data[0] = port->wPortStatus; data[1] = port->wPortStatus >> 8; data[2] = port->wPortChange; data[3] = port->wPortChange >> 8; ret = 4; } break; case SetHubFeature: case ClearHubFeature: if (value == 0 || value == 1) { } else { goto fail; } ret = 0; break; case SetPortFeature: { unsigned int n = index - 1; USBHubPort *port; USBDevice *dev; if (n >= NUM_PORTS) { goto fail; } port = &s->ports[n]; dev = port->port.dev; switch(value) { case PORT_SUSPEND: port->wPortStatus |= PORT_STAT_SUSPEND; break; case PORT_RESET: if (dev) { usb_send_msg(dev, USB_MSG_RESET); port->wPortChange |= PORT_STAT_C_RESET; /* set enable bit */ port->wPortStatus |= PORT_STAT_ENABLE; } break; case PORT_POWER: break; default: goto fail; } ret = 0; } break; case ClearPortFeature: { unsigned int n = index - 1; USBHubPort *port; if (n >= NUM_PORTS) { goto fail; } port = &s->ports[n]; switch(value) { case PORT_ENABLE: port->wPortStatus &= ~PORT_STAT_ENABLE; break; case PORT_C_ENABLE: port->wPortChange &= ~PORT_STAT_C_ENABLE; break; case PORT_SUSPEND: port->wPortStatus &= ~PORT_STAT_SUSPEND; break; case PORT_C_SUSPEND: port->wPortChange &= ~PORT_STAT_C_SUSPEND; break; case PORT_C_CONNECTION: port->wPortChange &= ~PORT_STAT_C_CONNECTION; break; case PORT_C_OVERCURRENT: port->wPortChange &= ~PORT_STAT_C_OVERCURRENT; break; case PORT_C_RESET: port->wPortChange &= ~PORT_STAT_C_RESET; break; default: goto fail; } ret = 0; } break; case GetHubDescriptor: { unsigned int n, limit, var_hub_size = 0; memcpy(data, qemu_hub_hub_descriptor, sizeof(qemu_hub_hub_descriptor)); data[2] = NUM_PORTS; /* fill DeviceRemovable bits */ limit = ((NUM_PORTS + 1 + 7) / 8) + 7; for (n = 7; n < limit; n++) { data[n] = 0x00; var_hub_size++; } /* fill PortPwrCtrlMask bits */ limit = limit + ((NUM_PORTS + 7) / 8); for (;n < limit; n++) { data[n] = 0xff; var_hub_size++; } ret = sizeof(qemu_hub_hub_descriptor) + var_hub_size; data[0] = ret; break; } default: fail: ret = USB_RET_STALL; break; } return ret; }"} {"target": 0, "idx": 7967, "func": "static int mxf_write_packet(AVFormatContext *s, AVPacket *pkt) { MXFContext *mxf = s->priv_data; AVIOContext *pb = s->pb; AVStream *st = s->streams[pkt->stream_index]; MXFStreamContext *sc = st->priv_data; MXFIndexEntry ie = {0}; if (!mxf->edit_unit_byte_count && !(mxf->edit_units_count % EDIT_UNITS_PER_BODY)) { mxf->index_entries = av_realloc(mxf->index_entries, (mxf->edit_units_count + EDIT_UNITS_PER_BODY)*sizeof(*mxf->index_entries)); if (!mxf->index_entries) { av_log(s, AV_LOG_ERROR, \"could not allocate index entries\\n\"); return -1; } } if (st->codec->codec_id == AV_CODEC_ID_MPEG2VIDEO) { if (!mxf_parse_mpeg2_frame(s, st, pkt, &ie)) { av_log(s, AV_LOG_ERROR, \"could not get mpeg2 profile and level\\n\"); return -1; } } if (!mxf->header_written) { if (mxf->edit_unit_byte_count) { mxf_write_partition(s, 1, 2, header_open_partition_key, 1); mxf_write_klv_fill(s); mxf_write_index_table_segment(s); } else { mxf_write_partition(s, 0, 0, header_open_partition_key, 1); } mxf->header_written = 1; } if (st->index == 0) { if (!mxf->edit_unit_byte_count && (!mxf->edit_units_count || mxf->edit_units_count > EDIT_UNITS_PER_BODY) && !(ie.flags & 0x33)) { // I frame, Gop start mxf_write_klv_fill(s); mxf_write_partition(s, 1, 2, body_partition_key, 0); mxf_write_klv_fill(s); mxf_write_index_table_segment(s); } mxf_write_klv_fill(s); mxf_write_system_item(s); if (!mxf->edit_unit_byte_count) { mxf->index_entries[mxf->edit_units_count].offset = mxf->body_offset; mxf->index_entries[mxf->edit_units_count].flags = ie.flags; mxf->index_entries[mxf->edit_units_count].temporal_ref = ie.temporal_ref; mxf->body_offset += KAG_SIZE; // size of system element } mxf->edit_units_count++; } else if (!mxf->edit_unit_byte_count && st->index == 1) { mxf->index_entries[mxf->edit_units_count-1].slice_offset = mxf->body_offset - mxf->index_entries[mxf->edit_units_count-1].offset; } mxf_write_klv_fill(s); avio_write(pb, sc->track_essence_element_key, 16); // write key if (s->oformat == &ff_mxf_d10_muxer) { if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) mxf_write_d10_video_packet(s, st, pkt); else mxf_write_d10_audio_packet(s, st, pkt); } else { klv_encode_ber4_length(pb, pkt->size); // write length avio_write(pb, pkt->data, pkt->size); mxf->body_offset += 16+4+pkt->size + klv_fill_size(16+4+pkt->size); } avio_flush(pb); return 0; }"} {"target": 0, "idx": 7972, "func": "int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address, int access_type, int mmu_idx, int is_softmmu) { uint32_t phys_addr; target_ulong page_size; int prot; int ret, is_user; is_user = mmu_idx == MMU_USER_IDX; ret = get_phys_addr(env, address, access_type, is_user, &phys_addr, &prot, &page_size); if (ret == 0) { /* Map a single [sub]page. */ phys_addr &= ~(uint32_t)0x3ff; address &= ~(uint32_t)0x3ff; tlb_set_page (env, address, phys_addr, prot | PAGE_EXEC, mmu_idx, page_size); return 0; } if (access_type == 2) { env->cp15.c5_insn = ret; env->cp15.c6_insn = address; env->exception_index = EXCP_PREFETCH_ABORT; } else { env->cp15.c5_data = ret; if (access_type == 1 && arm_feature(env, ARM_FEATURE_V6)) env->cp15.c5_data |= (1 << 11); env->cp15.c6_data = address; env->exception_index = EXCP_DATA_ABORT; } return 1; }"} {"target": 0, "idx": 7977, "func": "static inline int get_chroma_qp(H264Context *h, int t, int qscale){ return h->pps.chroma_qp_table[t][qscale]; }"} {"target": 0, "idx": 7980, "func": "static int binkb_decode_plane(BinkContext *c, AVFrame *frame, BitstreamContext *bc, int plane_idx, int is_key, int is_chroma) { int blk, ret; int i, j, bx, by; uint8_t *dst, *ref, *ref_start, *ref_end; int v, col[2]; const uint8_t *scan; int xoff, yoff; LOCAL_ALIGNED_16(int16_t, block, [64]); LOCAL_ALIGNED_16(int32_t, dctblock, [64]); int coordmap[64]; int ybias = is_key ? -15 : 0; int qp; const int stride = frame->linesize[plane_idx]; int bw = is_chroma ? (c->avctx->width + 15) >> 4 : (c->avctx->width + 7) >> 3; int bh = is_chroma ? (c->avctx->height + 15) >> 4 : (c->avctx->height + 7) >> 3; binkb_init_bundles(c); ref_start = frame->data[plane_idx]; ref_end = frame->data[plane_idx] + (bh * frame->linesize[plane_idx] + bw) * 8; for (i = 0; i < 64; i++) coordmap[i] = (i & 7) + (i >> 3) * stride; for (by = 0; by < bh; by++) { for (i = 0; i < BINKB_NB_SRC; i++) { if ((ret = binkb_read_bundle(c, bc, i)) < 0) return ret; } dst = frame->data[plane_idx] + 8*by*stride; for (bx = 0; bx < bw; bx++, dst += 8) { blk = binkb_get_value(c, BINKB_SRC_BLOCK_TYPES); switch (blk) { case 0: break; case 1: scan = bink_patterns[bitstream_read(bc, 4)]; i = 0; do { int mode = bitstream_read_bit(bc); int run = bitstream_read(bc, binkb_runbits[i]) + 1; i += run; if (i > 64) { av_log(c->avctx, AV_LOG_ERROR, \"Run went out of bounds\\n\"); return AVERROR_INVALIDDATA; } if (mode) { v = binkb_get_value(c, BINKB_SRC_COLORS); for (j = 0; j < run; j++) dst[coordmap[*scan++]] = v; } else { for (j = 0; j < run; j++) dst[coordmap[*scan++]] = binkb_get_value(c, BINKB_SRC_COLORS); } } while (i < 63); if (i == 63) dst[coordmap[*scan++]] = binkb_get_value(c, BINKB_SRC_COLORS); break; case 2: memset(dctblock, 0, sizeof(*dctblock) * 64); dctblock[0] = binkb_get_value(c, BINKB_SRC_INTRA_DC); qp = binkb_get_value(c, BINKB_SRC_INTRA_Q); read_dct_coeffs(bc, dctblock, bink_scan, binkb_intra_quant, qp); c->binkdsp.idct_put(dst, stride, dctblock); break; case 3: xoff = binkb_get_value(c, BINKB_SRC_X_OFF); yoff = binkb_get_value(c, BINKB_SRC_Y_OFF) + ybias; ref = dst + xoff + yoff * stride; if (ref < ref_start || ref + 8*stride > ref_end) { av_log(c->avctx, AV_LOG_WARNING, \"Reference block is out of bounds\\n\"); } else if (ref + 8*stride < dst || ref >= dst + 8*stride) { c->hdsp.put_pixels_tab[1][0](dst, ref, stride, 8); } else { put_pixels8x8_overlapped(dst, ref, stride); } c->bdsp.clear_block(block); v = binkb_get_value(c, BINKB_SRC_INTER_COEFS); read_residue(bc, block, v); c->binkdsp.add_pixels8(dst, block, stride); break; case 4: xoff = binkb_get_value(c, BINKB_SRC_X_OFF); yoff = binkb_get_value(c, BINKB_SRC_Y_OFF) + ybias; ref = dst + xoff + yoff * stride; if (ref < ref_start || ref + 8 * stride > ref_end) { av_log(c->avctx, AV_LOG_WARNING, \"Reference block is out of bounds\\n\"); } else if (ref + 8*stride < dst || ref >= dst + 8*stride) { c->hdsp.put_pixels_tab[1][0](dst, ref, stride, 8); } else { put_pixels8x8_overlapped(dst, ref, stride); } memset(dctblock, 0, sizeof(*dctblock) * 64); dctblock[0] = binkb_get_value(c, BINKB_SRC_INTER_DC); qp = binkb_get_value(c, BINKB_SRC_INTER_Q); read_dct_coeffs(bc, dctblock, bink_scan, binkb_inter_quant, qp); c->binkdsp.idct_add(dst, stride, dctblock); break; case 5: v = binkb_get_value(c, BINKB_SRC_COLORS); c->bdsp.fill_block_tab[1](dst, v, stride, 8); break; case 6: for (i = 0; i < 2; i++) col[i] = binkb_get_value(c, BINKB_SRC_COLORS); for (i = 0; i < 8; i++) { v = binkb_get_value(c, BINKB_SRC_PATTERN); for (j = 0; j < 8; j++, v >>= 1) dst[i*stride + j] = col[v & 1]; } break; case 7: xoff = binkb_get_value(c, BINKB_SRC_X_OFF); yoff = binkb_get_value(c, BINKB_SRC_Y_OFF) + ybias; ref = dst + xoff + yoff * stride; if (ref < ref_start || ref + 8 * stride > ref_end) { av_log(c->avctx, AV_LOG_WARNING, \"Reference block is out of bounds\\n\"); } else if (ref + 8*stride < dst || ref >= dst + 8*stride) { c->hdsp.put_pixels_tab[1][0](dst, ref, stride, 8); } else { put_pixels8x8_overlapped(dst, ref, stride); } break; case 8: for (i = 0; i < 8; i++) memcpy(dst + i*stride, c->bundle[BINKB_SRC_COLORS].cur_ptr + i*8, 8); c->bundle[BINKB_SRC_COLORS].cur_ptr += 64; break; default: av_log(c->avctx, AV_LOG_ERROR, \"Unknown block type %d\\n\", blk); return AVERROR_INVALIDDATA; } } } if (bitstream_tell(bc) & 0x1F) // next plane data starts at 32-bit boundary bitstream_skip(bc, 32 - (bitstream_tell(bc) & 0x1F)); return 0; }"} {"target": 1, "idx": 7982, "func": "static int get_cpsr(QEMUFile *f, void *opaque, size_t size) { ARMCPU *cpu = opaque; CPUARMState *env = &cpu->env; uint32_t val = qemu_get_be32(f); env->aarch64 = ((val & PSTATE_nRW) == 0); if (is_a64(env)) { pstate_write(env, val); return 0; } /* Avoid mode switch when restoring CPSR */ env->uncached_cpsr = val & CPSR_M; cpsr_write(env, val, 0xffffffff, CPSRWriteRaw); return 0; }"} {"target": 0, "idx": 7992, "func": "static void term_handle_byte(int ch) { switch(term_esc_state) { case IS_NORM: switch(ch) { case 1: term_bol(); break; case 4: term_delete_char(); break; case 5: term_eol(); break; case 9: term_completion(); break; case 10: case 13: term_cmd_buf[term_cmd_buf_size] = '\\0'; term_hist_add(term_cmd_buf); term_printf(\"\\n\"); term_handle_command(term_cmd_buf); term_show_prompt(); break; case 27: term_esc_state = IS_ESC; break; case 127: case 8: term_backspace(); break; case 155: term_esc_state = IS_CSI; break; default: if (ch >= 32) { term_insert_char(ch); } break; } break; case IS_ESC: if (ch == '[') { term_esc_state = IS_CSI; term_esc_param = 0; } else { term_esc_state = IS_NORM; } break; case IS_CSI: switch(ch) { case 'A': case 'F': term_up_char(); break; case 'B': case 'E': term_down_char(); break; case 'D': term_backward_char(); break; case 'C': term_forward_char(); break; case '0' ... '9': term_esc_param = term_esc_param * 10 + (ch - '0'); goto the_end; case '~': switch(term_esc_param) { case 1: term_bol(); break; case 3: term_delete_char(); break; case 4: term_eol(); break; } break; default: break; } term_esc_state = IS_NORM; the_end: break; } term_update(); }"} {"target": 0, "idx": 8010, "func": "struct omap_l4_s *omap_l4_init(target_phys_addr_t base, int ta_num) { struct omap_l4_s *bus = g_malloc0( sizeof(*bus) + ta_num * sizeof(*bus->ta)); bus->ta_num = ta_num; bus->base = base; #ifdef L4_MUX_HACK omap_l4_io_entries = 1; omap_l4_io_entry = g_malloc0(125 * sizeof(*omap_l4_io_entry)); omap_cpu_io_entry = cpu_register_io_memory(omap_l4_io_readfn, omap_l4_io_writefn, bus, DEVICE_NATIVE_ENDIAN); # define L4_PAGES (0xb4000 / TARGET_PAGE_SIZE) omap_l4_io_readb_fn = g_malloc0(sizeof(void *) * L4_PAGES); omap_l4_io_readh_fn = g_malloc0(sizeof(void *) * L4_PAGES); omap_l4_io_readw_fn = g_malloc0(sizeof(void *) * L4_PAGES); omap_l4_io_writeb_fn = g_malloc0(sizeof(void *) * L4_PAGES); omap_l4_io_writeh_fn = g_malloc0(sizeof(void *) * L4_PAGES); omap_l4_io_writew_fn = g_malloc0(sizeof(void *) * L4_PAGES); omap_l4_io_opaque = g_malloc0(sizeof(void *) * L4_PAGES); #endif return bus; }"} {"target": 0, "idx": 8018, "func": "static void qbus_list_bus(DeviceState *dev, char *dest, int len) { BusState *child; const char *sep = \" \"; int pos = 0; pos += snprintf(dest+pos, len-pos,\"child busses at \\\"%s\\\":\", dev->id ? dev->id : dev->info->name); LIST_FOREACH(child, &dev->child_bus, sibling) { pos += snprintf(dest+pos, len-pos, \"%s\\\"%s\\\"\", sep, child->name); sep = \", \"; } }"} {"target": 1, "idx": 8021, "func": "static int read_directory(BDRVVVFATState* s, int mapping_index) { mapping_t* mapping = array_get(&(s->mapping), mapping_index); direntry_t* direntry; const char* dirname = mapping->path; int first_cluster = mapping->begin; int parent_index = mapping->info.dir.parent_mapping_index; mapping_t* parent_mapping = (mapping_t*) (parent_index >= 0 ? array_get(&(s->mapping), parent_index) : NULL); int first_cluster_of_parent = parent_mapping ? parent_mapping->begin : -1; DIR* dir=opendir(dirname); struct dirent* entry; int i; assert(mapping->mode & MODE_DIRECTORY); if(!dir) { mapping->end = mapping->begin; return -1; } i = mapping->info.dir.first_dir_index = first_cluster == 0 ? 0 : s->directory.next; if (first_cluster != 0) { /* create the top entries of a subdirectory */ (void)create_short_and_long_name(s, i, \".\", 1); (void)create_short_and_long_name(s, i, \"..\", 1); } /* actually read the directory, and allocate the mappings */ while((entry=readdir(dir))) { unsigned int length=strlen(dirname)+2+strlen(entry->d_name); char* buffer; direntry_t* direntry; struct stat st; int is_dot=!strcmp(entry->d_name,\".\"); int is_dotdot=!strcmp(entry->d_name,\"..\"); if(first_cluster == 0 && (is_dotdot || is_dot)) continue; buffer = g_malloc(length); snprintf(buffer,length,\"%s/%s\",dirname,entry->d_name); if(stat(buffer,&st)<0) { g_free(buffer); continue; } /* create directory entry for this file */ if (!is_dot && !is_dotdot) { direntry = create_short_and_long_name(s, i, entry->d_name, 0); } else { direntry = array_get(&(s->directory), is_dot ? i : i + 1); } direntry->attributes=(S_ISDIR(st.st_mode)?0x10:0x20); direntry->reserved[0]=direntry->reserved[1]=0; direntry->ctime=fat_datetime(st.st_ctime,1); direntry->cdate=fat_datetime(st.st_ctime,0); direntry->adate=fat_datetime(st.st_atime,0); direntry->begin_hi=0; direntry->mtime=fat_datetime(st.st_mtime,1); direntry->mdate=fat_datetime(st.st_mtime,0); if(is_dotdot) set_begin_of_direntry(direntry, first_cluster_of_parent); else if(is_dot) set_begin_of_direntry(direntry, first_cluster); else direntry->begin=0; /* do that later */ if (st.st_size > 0x7fffffff) { fprintf(stderr, \"File %s is larger than 2GB\\n\", buffer); g_free(buffer); closedir(dir); return -2; } direntry->size=cpu_to_le32(S_ISDIR(st.st_mode)?0:st.st_size); /* create mapping for this file */ if(!is_dot && !is_dotdot && (S_ISDIR(st.st_mode) || st.st_size)) { s->current_mapping = array_get_next(&(s->mapping)); s->current_mapping->begin=0; s->current_mapping->end=st.st_size; /* * we get the direntry of the most recent direntry, which * contains the short name and all the relevant information. */ s->current_mapping->dir_index=s->directory.next-1; s->current_mapping->first_mapping_index = -1; if (S_ISDIR(st.st_mode)) { s->current_mapping->mode = MODE_DIRECTORY; s->current_mapping->info.dir.parent_mapping_index = mapping_index; } else { s->current_mapping->mode = MODE_UNDEFINED; s->current_mapping->info.file.offset = 0; } s->current_mapping->path=buffer; s->current_mapping->read_only = (st.st_mode & (S_IWUSR | S_IWGRP | S_IWOTH)) == 0; } else { g_free(buffer); } } closedir(dir); /* fill with zeroes up to the end of the cluster */ while(s->directory.next%(0x10*s->sectors_per_cluster)) { direntry_t* direntry=array_get_next(&(s->directory)); memset(direntry,0,sizeof(direntry_t)); } /* TODO: if there are more entries, bootsector has to be adjusted! */ #define ROOT_ENTRIES (0x02 * 0x10 * s->sectors_per_cluster) if (mapping_index == 0 && s->directory.next < ROOT_ENTRIES) { /* root directory */ int cur = s->directory.next; array_ensure_allocated(&(s->directory), ROOT_ENTRIES - 1); s->directory.next = ROOT_ENTRIES; memset(array_get(&(s->directory), cur), 0, (ROOT_ENTRIES - cur) * sizeof(direntry_t)); } /* re-get the mapping, since s->mapping was possibly realloc()ed */ mapping = array_get(&(s->mapping), mapping_index); first_cluster += (s->directory.next - mapping->info.dir.first_dir_index) * 0x20 / s->cluster_size; mapping->end = first_cluster; direntry = array_get(&(s->directory), mapping->dir_index); set_begin_of_direntry(direntry, mapping->begin); return 0; }"} {"target": 0, "idx": 8046, "func": "static int replace_int_data_in_filename(char *buf, int buf_size, const char *filename, char placeholder, int64_t number) { const char *p; char *q, buf1[20], c; int nd, len, addchar_count; int found_count = 0; q = buf; p = filename; for (;;) { c = *p; if (c == '\\0') break; if (c == '%' && *(p+1) == '%') // %% addchar_count = 2; else if (c == '%' && (av_isdigit(*(p+1)) || *(p+1) == placeholder)) { nd = 0; addchar_count = 1; while (av_isdigit(*(p + addchar_count))) { nd = nd * 10 + *(p + addchar_count) - '0'; addchar_count++; } if (*(p + addchar_count) == placeholder) { len = snprintf(buf1, sizeof(buf1), \"%0*\"PRId64, (number < 0) ? nd : nd++, number); if (len < 1) // returned error or empty buf1 goto fail; if ((q - buf + len) > buf_size - 1) goto fail; memcpy(q, buf1, len); q += len; p += (addchar_count + 1); addchar_count = 0; found_count++; } } else addchar_count = 1; while (addchar_count--) if ((q - buf) < buf_size - 1) *q++ = *p++; else goto fail; } *q = '\\0'; return found_count; fail: *q = '\\0'; return -1; }"} {"target": 0, "idx": 8049, "func": "static void cubieboard_init(QEMUMachineInitArgs *args) { CubieBoardState *s = g_new(CubieBoardState, 1); Error *err = NULL; s->a10 = AW_A10(object_new(TYPE_AW_A10)); object_property_set_bool(OBJECT(s->a10), true, \"realized\", &err); if (err != NULL) { error_report(\"Couldn't realize Allwinner A10: %s\\n\", error_get_pretty(err)); exit(1); } memory_region_init_ram(&s->sdram, NULL, \"cubieboard.ram\", args->ram_size); vmstate_register_ram_global(&s->sdram); memory_region_add_subregion(get_system_memory(), AW_A10_SDRAM_BASE, &s->sdram); cubieboard_binfo.ram_size = args->ram_size; cubieboard_binfo.kernel_filename = args->kernel_filename; cubieboard_binfo.kernel_cmdline = args->kernel_cmdline; arm_load_kernel(&s->a10->cpu, &cubieboard_binfo); }"} {"target": 0, "idx": 8071, "func": "void esp_init(target_phys_addr_t espaddr, int it_shift, ESPDMAMemoryReadWriteFunc dma_memory_read, ESPDMAMemoryReadWriteFunc dma_memory_write, void *dma_opaque, qemu_irq irq, qemu_irq *reset, qemu_irq *dma_enable) { DeviceState *dev; SysBusDevice *s; SysBusESPState *sysbus; ESPState *esp; dev = qdev_create(NULL, \"esp\"); sysbus = DO_UPCAST(SysBusESPState, busdev.qdev, dev); esp = &sysbus->esp; esp->dma_memory_read = dma_memory_read; esp->dma_memory_write = dma_memory_write; esp->dma_opaque = dma_opaque; sysbus->it_shift = it_shift; /* XXX for now until rc4030 has been changed to use DMA enable signal */ esp->dma_enabled = 1; qdev_init_nofail(dev); s = sysbus_from_qdev(dev); sysbus_connect_irq(s, 0, irq); sysbus_mmio_map(s, 0, espaddr); *reset = qdev_get_gpio_in(dev, 0); *dma_enable = qdev_get_gpio_in(dev, 1); }"} {"target": 0, "idx": 8085, "func": "static void apply_window_and_mdct(AVCodecContext *avctx, const AVFrame *frame) { WMACodecContext *s = avctx->priv_data; float **audio = (float **) frame->extended_data; int len = frame->nb_samples; int window_index = s->frame_len_bits - s->block_len_bits; FFTContext *mdct = &s->mdct_ctx[window_index]; int ch; const float *win = s->windows[window_index]; int window_len = 1 << s->block_len_bits; float n = 2.0 * 32768.0 / window_len; for (ch = 0; ch < avctx->channels; ch++) { memcpy(s->output, s->frame_out[ch], window_len * sizeof(*s->output)); s->fdsp->vector_fmul_scalar(s->frame_out[ch], audio[ch], n, len); s->fdsp->vector_fmul_reverse(&s->output[window_len], s->frame_out[ch], win, len); s->fdsp->vector_fmul(s->frame_out[ch], s->frame_out[ch], win, len); mdct->mdct_calc(mdct, s->coefs[ch], s->output); } }"} {"target": 1, "idx": 8103, "func": "static void diff_pixels_altivec(int16_t *restrict block, const uint8_t *s1, const uint8_t *s2, int stride) { int i; vec_u8 perm1 = vec_lvsl(0, s1); vec_u8 perm2 = vec_lvsl(0, s2); const vec_u8 zero = (const vec_u8)vec_splat_u8(0); vec_s16 shorts1, shorts2; for (i = 0; i < 4; i++) { /* Read potentially unaligned pixels. * We're reading 16 pixels, and actually only want 8, * but we simply ignore the extras. */ vec_u8 pixl = vec_ld(0, s1); vec_u8 pixr = vec_ld(15, s1); vec_u8 bytes = vec_perm(pixl, pixr, perm1); // Convert the bytes into shorts. shorts1 = (vec_s16)vec_mergeh(zero, bytes); // Do the same for the second block of pixels. pixl = vec_ld(0, s2); pixr = vec_ld(15, s2); bytes = vec_perm(pixl, pixr, perm2); // Convert the bytes into shorts. shorts2 = (vec_s16)vec_mergeh(zero, bytes); // Do the subtraction. shorts1 = vec_sub(shorts1, shorts2); // Save the data to the block, we assume the block is 16-byte aligned. vec_st(shorts1, 0, (vec_s16 *)block); s1 += stride; s2 += stride; block += 8; /* The code below is a copy of the code above... * This is a manual unroll. */ /* Read potentially unaligned pixels. * We're reading 16 pixels, and actually only want 8, * but we simply ignore the extras. */ pixl = vec_ld(0, s1); pixr = vec_ld(15, s1); bytes = vec_perm(pixl, pixr, perm1); // Convert the bytes into shorts. shorts1 = (vec_s16)vec_mergeh(zero, bytes); // Do the same for the second block of pixels. pixl = vec_ld(0, s2); pixr = vec_ld(15, s2); bytes = vec_perm(pixl, pixr, perm2); // Convert the bytes into shorts. shorts2 = (vec_s16)vec_mergeh(zero, bytes); // Do the subtraction. shorts1 = vec_sub(shorts1, shorts2); // Save the data to the block, we assume the block is 16-byte aligned. vec_st(shorts1, 0, (vec_s16 *)block); s1 += stride; s2 += stride; block += 8; } }"} {"target": 1, "idx": 8105, "func": "int ff_thread_get_buffer(AVCodecContext *avctx, AVFrame *f) { PerThreadContext *p = avctx->thread_opaque; int *progress, err; f->owner = avctx; ff_init_buffer_info(avctx, f); if (!(avctx->active_thread_type&FF_THREAD_FRAME)) { f->thread_opaque = NULL; return avctx->get_buffer(avctx, f); } if (p->state != STATE_SETTING_UP && (avctx->codec->update_thread_context || (!avctx->thread_safe_callbacks && avctx->get_buffer != avcodec_default_get_buffer))) { av_log(avctx, AV_LOG_ERROR, \"get_buffer() cannot be called after ff_thread_finish_setup()\\n\"); return -1; } pthread_mutex_lock(&p->parent->buffer_mutex); f->thread_opaque = progress = allocate_progress(p); if (!progress) { pthread_mutex_unlock(&p->parent->buffer_mutex); return -1; } progress[0] = progress[1] = -1; if (avctx->thread_safe_callbacks || avctx->get_buffer == avcodec_default_get_buffer) { err = avctx->get_buffer(avctx, f); } else { p->requested_frame = f; p->state = STATE_GET_BUFFER; pthread_mutex_lock(&p->progress_mutex); pthread_cond_signal(&p->progress_cond); while (p->state != STATE_SETTING_UP) pthread_cond_wait(&p->progress_cond, &p->progress_mutex); err = p->result; pthread_mutex_unlock(&p->progress_mutex); if (!avctx->codec->update_thread_context) ff_thread_finish_setup(avctx); } pthread_mutex_unlock(&p->parent->buffer_mutex); return err; }"} {"target": 1, "idx": 8111, "func": "bool qio_task_propagate_error(QIOTask *task, Error **errp) { if (task->err) { error_propagate(errp, task->err); return true; } return false; }"} {"target": 0, "idx": 8112, "func": "static void hds_free(AVFormatContext *s) { HDSContext *c = s->priv_data; int i, j; if (!c->streams) return; for (i = 0; i < s->nb_streams; i++) { OutputStream *os = &c->streams[i]; if (os->out) avio_close(os->out); os->out = NULL; if (os->ctx && os->ctx_inited) av_write_trailer(os->ctx); if (os->ctx && os->ctx->pb) av_free(os->ctx->pb); if (os->ctx) avformat_free_context(os->ctx); av_free(os->metadata); for (j = 0; j < os->nb_extra_packets; j++) av_free(os->extra_packets[j]); for (j = 0; j < os->nb_fragments; j++) av_free(os->fragments[j]); av_free(os->fragments); } av_freep(&c->streams); }"} {"target": 1, "idx": 8124, "func": "yuv2ya8_2_c(SwsContext *c, const int16_t *buf[2], const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y) { int hasAlpha = abuf[0] && abuf[1]; const int16_t *buf0 = buf[0], *buf1 = buf[1], *abuf0 = hasAlpha ? abuf[0] : NULL, *abuf1 = hasAlpha ? abuf[1] : NULL; int yalpha1 = 4096 - yalpha; int i; for (i = 0; i < dstW; i++) { int Y = (buf0[i * 2] * yalpha1 + buf1[i * 2] * yalpha) >> 19; int A; Y = av_clip_uint8(Y); if (hasAlpha) { A = (abuf0[i * 2] * yalpha1 + abuf1[i * 2] * yalpha) >> 19; A = av_clip_uint8(A); } dest[i * 2 ] = Y; dest[i * 2 + 1] = hasAlpha ? A : 255; } }"} {"target": 1, "idx": 8130, "func": "static void device_unparent(Object *obj) { DeviceState *dev = DEVICE(obj); BusState *bus; if (dev->realized) { object_property_set_bool(obj, false, \"realized\", NULL); } while (dev->num_child_bus) { bus = QLIST_FIRST(&dev->child_bus); object_unparent(OBJECT(bus)); } if (dev->parent_bus) { bus_remove_child(dev->parent_bus, dev); object_unref(OBJECT(dev->parent_bus)); dev->parent_bus = NULL; } /* Only send event if the device had been completely realized */ if (dev->pending_deleted_event) { g_assert(dev->canonical_path); qapi_event_send_device_deleted(!!dev->id, dev->id, dev->canonical_path, &error_abort); g_free(dev->canonical_path); dev->canonical_path = NULL; } }"} {"target": 0, "idx": 8134, "func": "QBool *qbool_from_bool(bool value) { QBool *qb; qb = g_malloc(sizeof(*qb)); qb->value = value; QOBJECT_INIT(qb, &qbool_type); return qb; }"} {"target": 0, "idx": 8145, "func": "static void setup_rt_frame(int usig, struct emulated_sigaction *ka, target_siginfo_t *info, target_sigset_t *set, CPUState *env) { struct rt_sigframe *frame = get_sigframe(ka, env, sizeof(*frame)); int err = 0; #if 0 if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) return 1; #endif __put_user_error(&frame->info, (target_ulong *)&frame->pinfo, err); __put_user_error(&frame->uc, (target_ulong *)&frame->puc, err); err |= copy_siginfo_to_user(&frame->info, info); /* Clear all the bits of the ucontext we don't use. */ err |= __clear_user(&frame->uc, offsetof(struct ucontext, uc_mcontext)); err |= setup_sigcontext(&frame->uc.uc_mcontext, /*&frame->fpstate,*/ env, set->sig[0]); err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); if (err == 0) err = setup_return(env, ka, &frame->retcode, frame, usig); if (err == 0) { /* * For realtime signals we must also set the second and third * arguments for the signal handler. * -- Peter Maydell 2000-12-06 */ env->regs[1] = (target_ulong)frame->pinfo; env->regs[2] = (target_ulong)frame->puc; } // return err; }"} {"target": 1, "idx": 8154, "func": "static void spapr_phb_placement(sPAPRMachineState *spapr, uint32_t index, uint64_t *buid, hwaddr *pio, hwaddr *mmio32, hwaddr *mmio64, unsigned n_dma, uint32_t *liobns, Error **errp) { /* * New-style PHB window placement. * * Goals: Gives large (1TiB), naturally aligned 64-bit MMIO window * for each PHB, in addition to 2GiB 32-bit MMIO and 64kiB PIO * windows. * * Some guest kernels can't work with MMIO windows above 1<<46 * (64TiB), so we place up to 31 PHBs in the area 32TiB..64TiB * * 32TiB..(33TiB+1984kiB) contains the 64kiB PIO windows for each * PHB stacked together. (32TiB+2GiB)..(32TiB+64GiB) contains the * 2GiB 32-bit MMIO windows for each PHB. Then 33..64TiB has the * 1TiB 64-bit MMIO windows for each PHB. */ const uint64_t base_buid = 0x800000020000000ULL; const int max_phbs = (SPAPR_PCI_LIMIT - SPAPR_PCI_BASE) / SPAPR_PCI_MEM64_WIN_SIZE - 1; int i; /* Sanity check natural alignments */ QEMU_BUILD_BUG_ON((SPAPR_PCI_BASE % SPAPR_PCI_MEM64_WIN_SIZE) != 0); QEMU_BUILD_BUG_ON((SPAPR_PCI_LIMIT % SPAPR_PCI_MEM64_WIN_SIZE) != 0); QEMU_BUILD_BUG_ON((SPAPR_PCI_MEM64_WIN_SIZE % SPAPR_PCI_MEM32_WIN_SIZE) != 0); QEMU_BUILD_BUG_ON((SPAPR_PCI_MEM32_WIN_SIZE % SPAPR_PCI_IO_WIN_SIZE) != 0); /* Sanity check bounds */ QEMU_BUILD_BUG_ON((max_phbs * SPAPR_PCI_IO_WIN_SIZE) > SPAPR_PCI_MEM32_WIN_SIZE); QEMU_BUILD_BUG_ON((max_phbs * SPAPR_PCI_MEM32_WIN_SIZE) > SPAPR_PCI_MEM64_WIN_SIZE); if (index >= max_phbs) { error_setg(errp, \"\\\"index\\\" for PAPR PHB is too large (max %u)\", max_phbs - 1); return; } *buid = base_buid + index; for (i = 0; i < n_dma; ++i) { liobns[i] = SPAPR_PCI_LIOBN(index, i); } *pio = SPAPR_PCI_BASE + index * SPAPR_PCI_IO_WIN_SIZE; *mmio32 = SPAPR_PCI_BASE + (index + 1) * SPAPR_PCI_MEM32_WIN_SIZE; *mmio64 = SPAPR_PCI_BASE + (index + 1) * SPAPR_PCI_MEM64_WIN_SIZE; }"} {"target": 1, "idx": 8155, "func": "static void ppc405ep_compute_clocks (ppc405ep_cpc_t *cpc) { uint32_t CPU_clk, PLB_clk, OPB_clk, EBC_clk, MAL_clk, PCI_clk; uint32_t UART0_clk, UART1_clk; uint64_t VCO_out, PLL_out; int M, D; VCO_out = 0; if ((cpc->pllmr[1] & 0x80000000) && !(cpc->pllmr[1] & 0x40000000)) { M = (((cpc->pllmr[1] >> 20) - 1) & 0xF) + 1; /* FBMUL */ #ifdef DEBUG_CLOCKS_LL printf(\"FBMUL %01\" PRIx32 \" %d\\n\", (cpc->pllmr[1] >> 20) & 0xF, M); #endif D = 8 - ((cpc->pllmr[1] >> 16) & 0x7); /* FWDA */ #ifdef DEBUG_CLOCKS_LL printf(\"FWDA %01\" PRIx32 \" %d\\n\", (cpc->pllmr[1] >> 16) & 0x7, D); #endif VCO_out = cpc->sysclk * M * D; if (VCO_out < 500000000UL || VCO_out > 1000000000UL) { /* Error - unlock the PLL */ printf(\"VCO out of range %\" PRIu64 \"\\n\", VCO_out); #if 0 cpc->pllmr[1] &= ~0x80000000; goto pll_bypass; #endif } PLL_out = VCO_out / D; /* Pretend the PLL is locked */ cpc->boot |= 0x00000001; } else { #if 0 pll_bypass: #endif PLL_out = cpc->sysclk; if (cpc->pllmr[1] & 0x40000000) { /* Pretend the PLL is not locked */ cpc->boot &= ~0x00000001; } } /* Now, compute all other clocks */ D = ((cpc->pllmr[0] >> 20) & 0x3) + 1; /* CCDV */ #ifdef DEBUG_CLOCKS_LL printf(\"CCDV %01\" PRIx32 \" %d\\n\", (cpc->pllmr[0] >> 20) & 0x3, D); #endif CPU_clk = PLL_out / D; D = ((cpc->pllmr[0] >> 16) & 0x3) + 1; /* CBDV */ #ifdef DEBUG_CLOCKS_LL printf(\"CBDV %01\" PRIx32 \" %d\\n\", (cpc->pllmr[0] >> 16) & 0x3, D); #endif PLB_clk = CPU_clk / D; D = ((cpc->pllmr[0] >> 12) & 0x3) + 1; /* OPDV */ #ifdef DEBUG_CLOCKS_LL printf(\"OPDV %01\" PRIx32 \" %d\\n\", (cpc->pllmr[0] >> 12) & 0x3, D); #endif OPB_clk = PLB_clk / D; D = ((cpc->pllmr[0] >> 8) & 0x3) + 2; /* EPDV */ #ifdef DEBUG_CLOCKS_LL printf(\"EPDV %01\" PRIx32 \" %d\\n\", (cpc->pllmr[0] >> 8) & 0x3, D); #endif EBC_clk = PLB_clk / D; D = ((cpc->pllmr[0] >> 4) & 0x3) + 1; /* MPDV */ #ifdef DEBUG_CLOCKS_LL printf(\"MPDV %01\" PRIx32 \" %d\\n\", (cpc->pllmr[0] >> 4) & 0x3, D); #endif MAL_clk = PLB_clk / D; D = (cpc->pllmr[0] & 0x3) + 1; /* PPDV */ #ifdef DEBUG_CLOCKS_LL printf(\"PPDV %01\" PRIx32 \" %d\\n\", cpc->pllmr[0] & 0x3, D); #endif PCI_clk = PLB_clk / D; D = ((cpc->ucr - 1) & 0x7F) + 1; /* U0DIV */ #ifdef DEBUG_CLOCKS_LL printf(\"U0DIV %01\" PRIx32 \" %d\\n\", cpc->ucr & 0x7F, D); #endif UART0_clk = PLL_out / D; D = (((cpc->ucr >> 8) - 1) & 0x7F) + 1; /* U1DIV */ #ifdef DEBUG_CLOCKS_LL printf(\"U1DIV %01\" PRIx32 \" %d\\n\", (cpc->ucr >> 8) & 0x7F, D); #endif UART1_clk = PLL_out / D; #ifdef DEBUG_CLOCKS printf(\"Setup PPC405EP clocks - sysclk %\" PRIu32 \" VCO %\" PRIu64 \" PLL out %\" PRIu64 \" Hz\\n\", cpc->sysclk, VCO_out, PLL_out); printf(\"CPU %\" PRIu32 \" PLB %\" PRIu32 \" OPB %\" PRIu32 \" EBC %\" PRIu32 \" MAL %\" PRIu32 \" PCI %\" PRIu32 \" UART0 %\" PRIu32 \" UART1 %\" PRIu32 \"\\n\", CPU_clk, PLB_clk, OPB_clk, EBC_clk, MAL_clk, PCI_clk, UART0_clk, UART1_clk); #endif /* Setup CPU clocks */ clk_setup(&cpc->clk_setup[PPC405EP_CPU_CLK], CPU_clk); /* Setup PLB clock */ clk_setup(&cpc->clk_setup[PPC405EP_PLB_CLK], PLB_clk); /* Setup OPB clock */ clk_setup(&cpc->clk_setup[PPC405EP_OPB_CLK], OPB_clk); /* Setup external clock */ clk_setup(&cpc->clk_setup[PPC405EP_EBC_CLK], EBC_clk); /* Setup MAL clock */ clk_setup(&cpc->clk_setup[PPC405EP_MAL_CLK], MAL_clk); /* Setup PCI clock */ clk_setup(&cpc->clk_setup[PPC405EP_PCI_CLK], PCI_clk); /* Setup UART0 clock */ clk_setup(&cpc->clk_setup[PPC405EP_UART0_CLK], UART0_clk); /* Setup UART1 clock */ clk_setup(&cpc->clk_setup[PPC405EP_UART1_CLK], UART1_clk); }"} {"target": 1, "idx": 8157, "func": "av_cold void ff_wmv2_common_init(Wmv2Context * w){ MpegEncContext * const s= &w->s; ff_init_scantable(s->dsp.idct_permutation, &w->abt_scantable[0], ff_wmv2_scantableA); ff_init_scantable(s->dsp.idct_permutation, &w->abt_scantable[1], ff_wmv2_scantableB); }"} {"target": 1, "idx": 8158, "func": "void tap_fd_set_offload(int fd, int csum, int tso4, int tso6, int ecn, int ufo) { unsigned int offload = 0; if (csum) { offload |= TUN_F_CSUM; if (tso4) offload |= TUN_F_TSO4; if (tso6) offload |= TUN_F_TSO6; if ((tso4 || tso6) && ecn) offload |= TUN_F_TSO_ECN; if (ufo) offload |= TUN_F_UFO; if (ioctl(fd, TUNSETOFFLOAD, offload) != 0) { offload &= ~TUN_F_UFO; if (ioctl(fd, TUNSETOFFLOAD, offload) != 0) { fprintf(stderr, \"TUNSETOFFLOAD ioctl() failed: %s\\n\", strerror(errno));"} {"target": 1, "idx": 8169, "func": "void ff_mpeg1_encode_init(MpegEncContext *s) { static int done=0; common_init(s); if(!done){ int f_code; int mv; int i; done=1; init_rl(&rl_mpeg1); for(i=0; i<64; i++) { mpeg1_max_level[0][i]= rl_mpeg1.max_level[0][i]; mpeg1_index_run[0][i]= rl_mpeg1.index_run[0][i]; } init_uni_ac_vlc(&rl_mpeg1, uni_mpeg1_ac_vlc_bits, uni_mpeg1_ac_vlc_len); /* build unified dc encoding tables */ for(i=-255; i<256; i++) { int adiff, index; int bits, code; int diff=i; adiff = ABS(diff); if(diff<0) diff--; index = av_log2(2*adiff); bits= vlc_dc_lum_bits[index] + index; code= (vlc_dc_lum_code[index]<> bit_size) + 1; if(code<17){ len= mbMotionVectorTable[code][1] + 1 + bit_size; }else{ len= mbMotionVectorTable[16][1] + 2 + bit_size; } } mv_penalty[f_code][mv+MAX_MV]= len; } } for(f_code=MAX_FCODE; f_code>0; f_code--){ for(mv=-(8<me.mv_penalty= mv_penalty; s->fcode_tab= fcode_tab; if(s->codec_id == CODEC_ID_MPEG1VIDEO){ s->min_qcoeff=-255; s->max_qcoeff= 255; }else{ s->min_qcoeff=-2047; s->max_qcoeff= 2047; } s->intra_ac_vlc_length= s->inter_ac_vlc_length= s->intra_ac_vlc_last_length= s->inter_ac_vlc_last_length= uni_mpeg1_ac_vlc_len; }"} {"target": 1, "idx": 8171, "func": "static av_cold int ac3_decode_init(AVCodecContext *avctx) { AC3DecodeContext *s = avctx->priv_data; s->avctx = avctx; ff_ac3_common_init(); ac3_tables_init(); ff_mdct_init(&s->imdct_256, 8, 1, 1.0); ff_mdct_init(&s->imdct_512, 9, 1, 1.0); ff_kbd_window_init(s->window, 5.0, 256); dsputil_init(&s->dsp, avctx); ff_fmt_convert_init(&s->fmt_conv, avctx); av_lfg_init(&s->dith_state, 0); /* set scale value for float to int16 conversion */ s->mul_bias = 32767.0f; /* allow downmixing to stereo or mono */ if (avctx->channels > 0 && avctx->request_channels > 0 && avctx->request_channels < avctx->channels && avctx->request_channels <= 2) { avctx->channels = avctx->request_channels; } s->downmixed = 1; /* allocate context input buffer */ if (avctx->error_recognition >= FF_ER_CAREFUL) { s->input_buffer = av_mallocz(AC3_FRAME_BUFFER_SIZE + FF_INPUT_BUFFER_PADDING_SIZE); if (!s->input_buffer) return AVERROR(ENOMEM); } avctx->sample_fmt = AV_SAMPLE_FMT_S16; return 0; }"} {"target": 0, "idx": 8181, "func": "static void count_frame_bits_fixed(AC3EncodeContext *s) { static const int frame_bits_inc[8] = { 0, 0, 2, 2, 2, 4, 2, 4 }; int blk; int frame_bits; /* assumptions: * no dynamic range codes * bit allocation parameters do not change between blocks * no delta bit allocation * no skipped data * no auxilliary data * no E-AC-3 metadata */ /* header */ frame_bits = 16; /* sync info */ if (s->eac3) { /* bitstream info header */ frame_bits += 35; frame_bits += 1 + 1 + 1; /* audio frame header */ frame_bits += 2; frame_bits += 10; /* exponent strategy */ for (blk = 0; blk < AC3_MAX_BLOCKS; blk++) frame_bits += 2 * s->fbw_channels + s->lfe_on; /* converter exponent strategy */ frame_bits += s->fbw_channels * 5; /* snr offsets */ frame_bits += 10; /* block start info */ frame_bits++; } else { frame_bits += 49; frame_bits += frame_bits_inc[s->channel_mode]; } /* audio blocks */ for (blk = 0; blk < AC3_MAX_BLOCKS; blk++) { if (!s->eac3) { /* block switch flags */ frame_bits += s->fbw_channels; /* dither flags */ frame_bits += s->fbw_channels; } /* dynamic range */ frame_bits++; /* spectral extension */ if (s->eac3) frame_bits++; if (!s->eac3) { /* exponent strategy */ frame_bits += 2 * s->fbw_channels; if (s->lfe_on) frame_bits++; /* bit allocation params */ frame_bits++; if (!blk) frame_bits += 2 + 2 + 2 + 2 + 3; } /* converter snr offset */ if (s->eac3) frame_bits++; if (!s->eac3) { /* delta bit allocation */ frame_bits++; /* skipped data */ frame_bits++; } } /* auxiliary data */ frame_bits++; /* CRC */ frame_bits += 1 + 16; s->frame_bits_fixed = frame_bits; }"} {"target": 0, "idx": 8197, "func": "static void av_noinline filter_mb_edgev( uint8_t *pix, int stride, int16_t bS[4], unsigned int qp, H264Context *h) { const unsigned int index_a = 52 + qp + h->slice_alpha_c0_offset; const int alpha = alpha_table[index_a]; const int beta = (beta_table+52)[qp + h->slice_beta_offset]; if (alpha ==0 || beta == 0) return; if( bS[0] < 4 ) { int8_t tc[4]; tc[0] = tc0_table[index_a][bS[0]]; tc[1] = tc0_table[index_a][bS[1]]; tc[2] = tc0_table[index_a][bS[2]]; tc[3] = tc0_table[index_a][bS[3]]; h->s.dsp.h264_h_loop_filter_luma(pix, stride, alpha, beta, tc); } else { h->s.dsp.h264_h_loop_filter_luma_intra(pix, stride, alpha, beta); } }"} {"target": 0, "idx": 8199, "func": "int avcodec_decode_audio(AVCodecContext *avctx, int16_t *samples, int *frame_size_ptr, uint8_t *buf, int buf_size) { int ret; *frame_size_ptr= 0; if((avctx->codec->capabilities & CODEC_CAP_DELAY) || buf_size){ ret = avctx->codec->decode(avctx, samples, frame_size_ptr, buf, buf_size); avctx->frame_number++; }else ret= 0; return ret; }"} {"target": 1, "idx": 8208, "func": "void vp8_decode_mvs(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y, int layout) { VP8Macroblock *mb_edge[3] = { 0 /* top */, mb - 1 /* left */, 0 /* top-left */ }; enum { CNT_ZERO, CNT_NEAREST, CNT_NEAR, CNT_SPLITMV }; enum { VP8_EDGE_TOP, VP8_EDGE_LEFT, VP8_EDGE_TOPLEFT }; int idx = CNT_ZERO; int cur_sign_bias = s->sign_bias[mb->ref_frame]; int8_t *sign_bias = s->sign_bias; VP56mv near_mv[4]; uint8_t cnt[4] = { 0 }; VP56RangeCoder *c = &s->c; if (!layout) { // layout is inlined (s->mb_layout is not) mb_edge[0] = mb + 2; mb_edge[2] = mb + 1; } else { mb_edge[0] = mb - s->mb_width - 1; mb_edge[2] = mb - s->mb_width - 2; } AV_ZERO32(&near_mv[0]); AV_ZERO32(&near_mv[1]); AV_ZERO32(&near_mv[2]); /* Process MB on top, left and top-left */ #define MV_EDGE_CHECK(n) \\ { \\ VP8Macroblock *edge = mb_edge[n]; \\ int edge_ref = edge->ref_frame; \\ if (edge_ref != VP56_FRAME_CURRENT) { \\ uint32_t mv = AV_RN32A(&edge->mv); \\ if (mv) { \\ if (cur_sign_bias != sign_bias[edge_ref]) { \\ /* SWAR negate of the values in mv. */ \\ mv = ~mv; \\ mv = ((mv & 0x7fff7fff) + \\ 0x00010001) ^ (mv & 0x80008000); \\ } \\ if (!n || mv != AV_RN32A(&near_mv[idx])) \\ AV_WN32A(&near_mv[++idx], mv); \\ cnt[idx] += 1 + (n != 2); \\ } else \\ cnt[CNT_ZERO] += 1 + (n != 2); \\ } \\ } MV_EDGE_CHECK(0) MV_EDGE_CHECK(1) MV_EDGE_CHECK(2) mb->partitioning = VP8_SPLITMVMODE_NONE; if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_ZERO]][0])) { mb->mode = VP8_MVMODE_MV; /* If we have three distinct MVs, merge first and last if they're the same */ if (cnt[CNT_SPLITMV] && AV_RN32A(&near_mv[1 + VP8_EDGE_TOP]) == AV_RN32A(&near_mv[1 + VP8_EDGE_TOPLEFT])) cnt[CNT_NEAREST] += 1; /* Swap near and nearest if necessary */ if (cnt[CNT_NEAR] > cnt[CNT_NEAREST]) { FFSWAP(uint8_t, cnt[CNT_NEAREST], cnt[CNT_NEAR]); FFSWAP( VP56mv, near_mv[CNT_NEAREST], near_mv[CNT_NEAR]); } if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_NEAREST]][1])) { if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_NEAR]][2])) { /* Choose the best mv out of 0,0 and the nearest mv */ clamp_mv(s, &mb->mv, &near_mv[CNT_ZERO + (cnt[CNT_NEAREST] >= cnt[CNT_ZERO])]); cnt[CNT_SPLITMV] = ((mb_edge[VP8_EDGE_LEFT]->mode == VP8_MVMODE_SPLIT) + (mb_edge[VP8_EDGE_TOP]->mode == VP8_MVMODE_SPLIT)) * 2 + (mb_edge[VP8_EDGE_TOPLEFT]->mode == VP8_MVMODE_SPLIT); if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_SPLITMV]][3])) { mb->mode = VP8_MVMODE_SPLIT; mb->mv = mb->bmv[decode_splitmvs(s, c, mb, layout, IS_VP8) - 1]; } else { mb->mv.y += vp8_read_mv_component(c, s->prob->mvc[0]); mb->mv.x += vp8_read_mv_component(c, s->prob->mvc[1]); mb->bmv[0] = mb->mv; } } else { clamp_mv(s, &mb->mv, &near_mv[CNT_NEAR]); mb->bmv[0] = mb->mv; } } else { clamp_mv(s, &mb->mv, &near_mv[CNT_NEAREST]); mb->bmv[0] = mb->mv; } } else { mb->mode = VP8_MVMODE_ZERO; AV_ZERO32(&mb->mv); mb->bmv[0] = mb->mv; } }"} {"target": 1, "idx": 8209, "func": "int main() { int rd, rt, dsp; int result, resultdsp; rt = 0x12345678; result = 0xA000C000; resultdsp = 1; __asm (\"shll.ph %0, %2, 0x0B\\n\\t\" \"rddsp %1\\n\\t\" : \"=r\"(rd), \"=r\"(dsp) : \"r\"(rt) ); dsp = (dsp >> 22) & 0x01; assert(dsp == resultdsp); assert(rd == result); return 0; }"} {"target": 0, "idx": 8211, "func": "int ff_rle_encode(uint8_t *outbuf, int out_size, const uint8_t *ptr , int bpp, int w, int8_t add, uint8_t xor) { int count, x; uint8_t *out; out = outbuf; for(x = 0; x < w; x += count) { /* see if we can encode the next set of pixels with RLE */ if((count = count_pixels(ptr, w-x, bpp, 1)) > 1) { if(out + bpp + 1 > outbuf + out_size) return -1; *out++ = (count ^ xor) + add; memcpy(out, ptr, bpp); out += bpp; } else { /* fall back on uncompressed */ count = count_pixels(ptr, w-x, bpp, 0); *out++ = count - 1; if(out + bpp*count > outbuf + out_size) return -1; memcpy(out, ptr, bpp * count); out += bpp * count; } ptr += count * bpp; } return out - outbuf; }"} {"target": 1, "idx": 8217, "func": "static int vdpau_frames_init(AVHWFramesContext *ctx) { VDPAUDeviceContext *device_priv = ctx->device_ctx->internal->priv; VDPAUFramesContext *priv = ctx->internal->priv; int i; switch (ctx->sw_format) { case AV_PIX_FMT_YUV420P: priv->chroma_type = VDP_CHROMA_TYPE_420; break; case AV_PIX_FMT_YUV422P: priv->chroma_type = VDP_CHROMA_TYPE_422; break; case AV_PIX_FMT_YUV444P: priv->chroma_type = VDP_CHROMA_TYPE_444; break; default: av_log(ctx, AV_LOG_ERROR, \"Unsupported data layout: %s\\n\", av_get_pix_fmt_name(ctx->sw_format)); return AVERROR(ENOSYS); } for (i = 0; i < FF_ARRAY_ELEMS(vdpau_pix_fmts); i++) { if (vdpau_pix_fmts[i].chroma_type == priv->chroma_type) { priv->chroma_idx = i; priv->pix_fmts = device_priv->pix_fmts[i]; priv->nb_pix_fmts = device_priv->nb_pix_fmts[i]; break; } } if (!priv->pix_fmts) { av_log(ctx, AV_LOG_ERROR, \"Unsupported chroma type: %d\\n\", priv->chroma_type); return AVERROR(ENOSYS); } if (!ctx->pool) { ctx->internal->pool_internal = av_buffer_pool_init2(sizeof(VdpVideoSurface), ctx, vdpau_pool_alloc, NULL); if (!ctx->internal->pool_internal) return AVERROR(ENOMEM); } priv->get_data = device_priv->get_data; priv->put_data = device_priv->put_data; return 0; }"} {"target": 1, "idx": 8221, "func": "int vm_stop(RunState state) { if (qemu_in_vcpu_thread()) { qemu_system_vmstop_request(state); /* * FIXME: should not return to device code in case * vm_stop() has been requested. */ cpu_stop_current(); return 0; } return do_vm_stop(state); }"} {"target": 0, "idx": 8241, "func": "static int dca_decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr, AVPacket *avpkt) { AVFrame *frame = data; const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; int lfe_samples; int num_core_channels = 0; int i, ret; float **samples_flt; DCAContext *s = avctx->priv_data; int channels, full_channels; int core_ss_end; s->xch_present = 0; s->dca_buffer_size = ff_dca_convert_bitstream(buf, buf_size, s->dca_buffer, DCA_MAX_FRAME_SIZE + DCA_MAX_EXSS_HEADER_SIZE); if (s->dca_buffer_size == AVERROR_INVALIDDATA) { av_log(avctx, AV_LOG_ERROR, \"Not a valid DCA frame\\n\"); return AVERROR_INVALIDDATA; } init_get_bits(&s->gb, s->dca_buffer, s->dca_buffer_size * 8); if ((ret = dca_parse_frame_header(s)) < 0) { //seems like the frame is corrupt, try with the next one return ret; } //set AVCodec values with parsed data avctx->sample_rate = s->sample_rate; avctx->bit_rate = s->bit_rate; s->profile = FF_PROFILE_DTS; for (i = 0; i < (s->sample_blocks / 8); i++) { if ((ret = dca_decode_block(s, 0, i))) { av_log(avctx, AV_LOG_ERROR, \"error decoding block\\n\"); return ret; } } /* record number of core channels incase less than max channels are requested */ num_core_channels = s->prim_channels; if (s->ext_coding) s->core_ext_mask = dca_ext_audio_descr_mask[s->ext_descr]; else s->core_ext_mask = 0; core_ss_end = FFMIN(s->frame_size, s->dca_buffer_size) * 8; /* only scan for extensions if ext_descr was unknown or indicated a * supported XCh extension */ if (s->core_ext_mask < 0 || s->core_ext_mask & DCA_EXT_XCH) { /* if ext_descr was unknown, clear s->core_ext_mask so that the * extensions scan can fill it up */ s->core_ext_mask = FFMAX(s->core_ext_mask, 0); /* extensions start at 32-bit boundaries into bitstream */ skip_bits_long(&s->gb, (-get_bits_count(&s->gb)) & 31); while (core_ss_end - get_bits_count(&s->gb) >= 32) { uint32_t bits = get_bits_long(&s->gb, 32); switch (bits) { case 0x5a5a5a5a: { int ext_amode, xch_fsize; s->xch_base_channel = s->prim_channels; /* validate sync word using XCHFSIZE field */ xch_fsize = show_bits(&s->gb, 10); if ((s->frame_size != (get_bits_count(&s->gb) >> 3) - 4 + xch_fsize) && (s->frame_size != (get_bits_count(&s->gb) >> 3) - 4 + xch_fsize + 1)) continue; /* skip length-to-end-of-frame field for the moment */ skip_bits(&s->gb, 10); s->core_ext_mask |= DCA_EXT_XCH; /* extension amode(number of channels in extension) should be 1 */ /* AFAIK XCh is not used for more channels */ if ((ext_amode = get_bits(&s->gb, 4)) != 1) { av_log(avctx, AV_LOG_ERROR, \"XCh extension amode %d not\" \" supported!\\n\", ext_amode); continue; } /* much like core primary audio coding header */ dca_parse_audio_coding_header(s, s->xch_base_channel); for (i = 0; i < (s->sample_blocks / 8); i++) if ((ret = dca_decode_block(s, s->xch_base_channel, i))) { av_log(avctx, AV_LOG_ERROR, \"error decoding XCh extension\\n\"); continue; } s->xch_present = 1; break; } case 0x47004a03: /* XXCh: extended channels */ /* usually found either in core or HD part in DTS-HD HRA streams, * but not in DTS-ES which contains XCh extensions instead */ s->core_ext_mask |= DCA_EXT_XXCH; break; case 0x1d95f262: { int fsize96 = show_bits(&s->gb, 12) + 1; if (s->frame_size != (get_bits_count(&s->gb) >> 3) - 4 + fsize96) continue; av_log(avctx, AV_LOG_DEBUG, \"X96 extension found at %d bits\\n\", get_bits_count(&s->gb)); skip_bits(&s->gb, 12); av_log(avctx, AV_LOG_DEBUG, \"FSIZE96 = %d bytes\\n\", fsize96); av_log(avctx, AV_LOG_DEBUG, \"REVNO = %d\\n\", get_bits(&s->gb, 4)); s->core_ext_mask |= DCA_EXT_X96; break; } } skip_bits_long(&s->gb, (-get_bits_count(&s->gb)) & 31); } } else { /* no supported extensions, skip the rest of the core substream */ skip_bits_long(&s->gb, core_ss_end - get_bits_count(&s->gb)); } if (s->core_ext_mask & DCA_EXT_X96) s->profile = FF_PROFILE_DTS_96_24; else if (s->core_ext_mask & (DCA_EXT_XCH | DCA_EXT_XXCH)) s->profile = FF_PROFILE_DTS_ES; /* check for ExSS (HD part) */ if (s->dca_buffer_size - s->frame_size > 32 && get_bits_long(&s->gb, 32) == DCA_HD_MARKER) dca_exss_parse_header(s); avctx->profile = s->profile; full_channels = channels = s->prim_channels + !!s->lfe; if (s->amode < 16) { avctx->channel_layout = dca_core_channel_layout[s->amode]; if (s->prim_channels + !!s->lfe > 2 && avctx->request_channel_layout == AV_CH_LAYOUT_STEREO) { /* * Neither the core's auxiliary data nor our default tables contain * downmix coefficients for the additional channel coded in the XCh * extension, so when we're doing a Stereo downmix, don't decode it. */ s->xch_disable = 1; } #if FF_API_REQUEST_CHANNELS FF_DISABLE_DEPRECATION_WARNINGS if (s->xch_present && !s->xch_disable && (!avctx->request_channels || avctx->request_channels > num_core_channels + !!s->lfe)) { FF_ENABLE_DEPRECATION_WARNINGS #else if (s->xch_present && !s->xch_disable) { #endif avctx->channel_layout |= AV_CH_BACK_CENTER; if (s->lfe) { avctx->channel_layout |= AV_CH_LOW_FREQUENCY; s->channel_order_tab = dca_channel_reorder_lfe_xch[s->amode]; } else { s->channel_order_tab = dca_channel_reorder_nolfe_xch[s->amode]; } } else { channels = num_core_channels + !!s->lfe; s->xch_present = 0; /* disable further xch processing */ if (s->lfe) { avctx->channel_layout |= AV_CH_LOW_FREQUENCY; s->channel_order_tab = dca_channel_reorder_lfe[s->amode]; } else s->channel_order_tab = dca_channel_reorder_nolfe[s->amode]; } if (channels > !!s->lfe && s->channel_order_tab[channels - 1 - !!s->lfe] < 0) return AVERROR_INVALIDDATA; if (s->prim_channels + !!s->lfe > 2 && avctx->request_channel_layout == AV_CH_LAYOUT_STEREO) { channels = 2; s->output = s->prim_channels == 2 ? s->amode : DCA_STEREO; avctx->channel_layout = AV_CH_LAYOUT_STEREO; /* Stereo downmix coefficients * * The decoder can only downmix to 2-channel, so we need to ensure * embedded downmix coefficients are actually targeting 2-channel. */ if (s->core_downmix && (s->core_downmix_amode == DCA_STEREO || s->core_downmix_amode == DCA_STEREO_TOTAL)) { int sign, code; for (i = 0; i < s->prim_channels + !!s->lfe; i++) { sign = s->core_downmix_codes[i][0] & 0x100 ? 1 : -1; code = s->core_downmix_codes[i][0] & 0x0FF; s->downmix_coef[i][0] = (!code ? 0.0f : sign * dca_dmixtable[code - 1]); sign = s->core_downmix_codes[i][1] & 0x100 ? 1 : -1; code = s->core_downmix_codes[i][1] & 0x0FF; s->downmix_coef[i][1] = (!code ? 0.0f : sign * dca_dmixtable[code - 1]); } s->output = s->core_downmix_amode; } else { int am = s->amode & DCA_CHANNEL_MASK; if (am >= FF_ARRAY_ELEMS(dca_default_coeffs)) { av_log(s->avctx, AV_LOG_ERROR, \"Invalid channel mode %d\\n\", am); return AVERROR_INVALIDDATA; } if (s->prim_channels + !!s->lfe > FF_ARRAY_ELEMS(dca_default_coeffs[0])) { avpriv_request_sample(s->avctx, \"Downmixing %d channels\", s->prim_channels + !!s->lfe); return AVERROR_PATCHWELCOME; } for (i = 0; i < s->prim_channels + !!s->lfe; i++) { s->downmix_coef[i][0] = dca_default_coeffs[am][i][0]; s->downmix_coef[i][1] = dca_default_coeffs[am][i][1]; } } av_dlog(s->avctx, \"Stereo downmix coeffs:\\n\"); for (i = 0; i < s->prim_channels + !!s->lfe; i++) { av_dlog(s->avctx, \"L, input channel %d = %f\\n\", i, s->downmix_coef[i][0]); av_dlog(s->avctx, \"R, input channel %d = %f\\n\", i, s->downmix_coef[i][1]); } av_dlog(s->avctx, \"\\n\"); } } else { av_log(avctx, AV_LOG_ERROR, \"Non standard configuration %d !\\n\", s->amode); return AVERROR_INVALIDDATA; } avctx->channels = channels; /* get output buffer */ frame->nb_samples = 256 * (s->sample_blocks / 8); if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) { av_log(avctx, AV_LOG_ERROR, \"get_buffer() failed\\n\"); return ret; } samples_flt = (float **)frame->extended_data; /* allocate buffer for extra channels if downmixing */ if (avctx->channels < full_channels) { ret = av_samples_get_buffer_size(NULL, full_channels - channels, frame->nb_samples, avctx->sample_fmt, 0); if (ret < 0) return ret; av_fast_malloc(&s->extra_channels_buffer, &s->extra_channels_buffer_size, ret); if (!s->extra_channels_buffer) return AVERROR(ENOMEM); ret = av_samples_fill_arrays((uint8_t **)s->extra_channels, NULL, s->extra_channels_buffer, full_channels - channels, frame->nb_samples, avctx->sample_fmt, 0); if (ret < 0) return ret; } /* filter to get final output */ for (i = 0; i < (s->sample_blocks / 8); i++) { int ch; for (ch = 0; ch < channels; ch++) s->samples_chanptr[ch] = samples_flt[ch] + i * 256; for (; ch < full_channels; ch++) s->samples_chanptr[ch] = s->extra_channels[ch - channels] + i * 256; dca_filter_channels(s, i); /* If this was marked as a DTS-ES stream we need to subtract back- */ /* channel from SL & SR to remove matrixed back-channel signal */ if ((s->source_pcm_res & 1) && s->xch_present) { float *back_chan = s->samples_chanptr[s->channel_order_tab[s->xch_base_channel]]; float *lt_chan = s->samples_chanptr[s->channel_order_tab[s->xch_base_channel - 2]]; float *rt_chan = s->samples_chanptr[s->channel_order_tab[s->xch_base_channel - 1]]; s->fdsp.vector_fmac_scalar(lt_chan, back_chan, -M_SQRT1_2, 256); s->fdsp.vector_fmac_scalar(rt_chan, back_chan, -M_SQRT1_2, 256); } } /* update lfe history */ lfe_samples = 2 * s->lfe * (s->sample_blocks / 8); for (i = 0; i < 2 * s->lfe * 4; i++) s->lfe_data[i] = s->lfe_data[i + lfe_samples]; /* AVMatrixEncoding * * DCA_STEREO_TOTAL (Lt/Rt) is equivalent to Dolby Surround */ ret = ff_side_data_update_matrix_encoding(frame, (s->output & ~DCA_LFE) == DCA_STEREO_TOTAL ? AV_MATRIX_ENCODING_DOLBY : AV_MATRIX_ENCODING_NONE); if (ret < 0) return ret; *got_frame_ptr = 1; return buf_size; }"} {"target": 0, "idx": 8247, "func": "static unsigned do_stfle(CPUS390XState *env, uint64_t words[MAX_STFL_WORDS]) { S390CPU *cpu = s390_env_get_cpu(env); const unsigned long *features = cpu->model->features; unsigned max_bit = 0; S390Feat feat; memset(words, 0, sizeof(uint64_t) * MAX_STFL_WORDS); if (test_bit(S390_FEAT_ZARCH, features)) { /* z/Architecture is always active if around */ words[0] = 1ull << (63 - 2); } for (feat = find_first_bit(features, S390_FEAT_MAX); feat < S390_FEAT_MAX; feat = find_next_bit(features, S390_FEAT_MAX, feat + 1)) { const S390FeatDef *def = s390_feat_def(feat); if (def->type == S390_FEAT_TYPE_STFL) { unsigned bit = def->bit; if (bit > max_bit) { max_bit = bit; } assert(bit / 64 < MAX_STFL_WORDS); words[bit / 64] |= 1ULL << (63 - bit % 64); } } return max_bit / 64; }"} {"target": 0, "idx": 8254, "func": "static int nbd_establish_connection(BlockDriverState *bs) { BDRVNBDState *s = bs->opaque; int sock; int ret; off_t size; size_t blocksize; if (s->host_spec[0] == '/') { sock = unix_socket_outgoing(s->host_spec); } else { sock = tcp_socket_outgoing_spec(s->host_spec); } /* Failed to establish connection */ if (sock < 0) { logout(\"Failed to establish connection to NBD server\\n\"); return -errno; } /* NBD handshake */ ret = nbd_receive_negotiate(sock, s->export_name, &s->nbdflags, &size, &blocksize); if (ret < 0) { logout(\"Failed to negotiate with the NBD server\\n\"); closesocket(sock); return -errno; } /* Now that we're connected, set the socket to be non-blocking and * kick the reply mechanism. */ socket_set_nonblock(sock); qemu_aio_set_fd_handler(s->sock, nbd_reply_ready, NULL, nbd_have_request, NULL, s); s->sock = sock; s->size = size; s->blocksize = blocksize; logout(\"Established connection with NBD server\\n\"); return 0; }"} {"target": 0, "idx": 8271, "func": "static av_cold int vc2_encode_init(AVCodecContext *avctx) { Plane *p; SubBand *b; int i, j, level, o, shift; const AVPixFmtDescriptor *fmt = av_pix_fmt_desc_get(avctx->pix_fmt); const int depth = fmt->comp[0].depth; VC2EncContext *s = avctx->priv_data; s->picture_number = 0; /* Total allowed quantization range */ s->q_ceil = DIRAC_MAX_QUANT_INDEX; s->ver.major = 2; s->ver.minor = 0; s->profile = 3; s->level = 3; s->base_vf = -1; s->strict_compliance = 1; s->q_avg = 0; s->slice_max_bytes = 0; s->slice_min_bytes = 0; /* Mark unknown as progressive */ s->interlaced = !((avctx->field_order == AV_FIELD_UNKNOWN) || (avctx->field_order == AV_FIELD_PROGRESSIVE)); for (i = 0; i < base_video_fmts_len; i++) { const VC2BaseVideoFormat *fmt = &base_video_fmts[i]; if (avctx->pix_fmt != fmt->pix_fmt) continue; if (avctx->time_base.num != fmt->time_base.num) continue; if (avctx->time_base.den != fmt->time_base.den) continue; if (avctx->width != fmt->width) continue; if (avctx->height != fmt->height) continue; if (s->interlaced != fmt->interlaced) continue; s->base_vf = i; s->level = base_video_fmts[i].level; break; } if (s->interlaced) av_log(avctx, AV_LOG_WARNING, \"Interlacing enabled!\\n\"); if ((s->slice_width & (s->slice_width - 1)) || (s->slice_height & (s->slice_height - 1))) { av_log(avctx, AV_LOG_ERROR, \"Slice size is not a power of two!\\n\"); return AVERROR_UNKNOWN; } if ((s->slice_width > avctx->width) || (s->slice_height > avctx->height)) { av_log(avctx, AV_LOG_ERROR, \"Slice size is bigger than the image!\\n\"); return AVERROR_UNKNOWN; } if (s->base_vf <= 0) { if (avctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL) { s->strict_compliance = s->base_vf = 0; av_log(avctx, AV_LOG_WARNING, \"Disabling strict compliance\\n\"); } else { av_log(avctx, AV_LOG_WARNING, \"Given format does not strictly comply with \" \"the specifications, please add a -strict -1 flag to use it\\n\"); return AVERROR_UNKNOWN; } } else { av_log(avctx, AV_LOG_INFO, \"Selected base video format = %i (%s)\\n\", s->base_vf, base_video_fmts[s->base_vf].name); } /* Chroma subsampling */ avcodec_get_chroma_sub_sample(avctx->pix_fmt, &s->chroma_x_shift, &s->chroma_y_shift); /* Bit depth and color range index */ if (depth == 8 && avctx->color_range == AVCOL_RANGE_JPEG) { s->bpp = 1; s->bpp_idx = 1; s->diff_offset = 128; } else if (depth == 8 && (avctx->color_range == AVCOL_RANGE_MPEG || avctx->color_range == AVCOL_RANGE_UNSPECIFIED)) { s->bpp = 1; s->bpp_idx = 2; s->diff_offset = 128; } else if (depth == 10) { s->bpp = 2; s->bpp_idx = 3; s->diff_offset = 512; } else { s->bpp = 2; s->bpp_idx = 4; s->diff_offset = 2048; } /* Planes initialization */ for (i = 0; i < 3; i++) { int w, h; p = &s->plane[i]; p->width = avctx->width >> (i ? s->chroma_x_shift : 0); p->height = avctx->height >> (i ? s->chroma_y_shift : 0); if (s->interlaced) p->height >>= 1; p->dwt_width = w = FFALIGN(p->width, (1 << s->wavelet_depth)); p->dwt_height = h = FFALIGN(p->height, (1 << s->wavelet_depth)); p->coef_stride = FFALIGN(p->dwt_width, 32); p->coef_buf = av_malloc(p->coef_stride*p->dwt_height*sizeof(dwtcoef)); if (!p->coef_buf) goto alloc_fail; for (level = s->wavelet_depth-1; level >= 0; level--) { w = w >> 1; h = h >> 1; for (o = 0; o < 4; o++) { b = &p->band[level][o]; b->width = w; b->height = h; b->stride = p->coef_stride; shift = (o > 1)*b->height*b->stride + (o & 1)*b->width; b->buf = p->coef_buf + shift; } } /* DWT init */ if (ff_vc2enc_init_transforms(&s->transform_args[i].t, s->plane[i].coef_stride, s->plane[i].dwt_height)) goto alloc_fail; } /* Slices */ s->num_x = s->plane[0].dwt_width/s->slice_width; s->num_y = s->plane[0].dwt_height/s->slice_height; s->slice_args = av_calloc(s->num_x*s->num_y, sizeof(SliceArgs)); if (!s->slice_args) goto alloc_fail; /* Lookup tables */ s->coef_lut_len = av_malloc(COEF_LUT_TAB*(s->q_ceil+1)*sizeof(*s->coef_lut_len)); if (!s->coef_lut_len) goto alloc_fail; s->coef_lut_val = av_malloc(COEF_LUT_TAB*(s->q_ceil+1)*sizeof(*s->coef_lut_val)); if (!s->coef_lut_val) goto alloc_fail; for (i = 0; i < s->q_ceil; i++) { uint8_t *len_lut = &s->coef_lut_len[i*COEF_LUT_TAB]; uint32_t *val_lut = &s->coef_lut_val[i*COEF_LUT_TAB]; for (j = 0; j < COEF_LUT_TAB; j++) { get_vc2_ue_uint(QUANT(j, ff_dirac_qscale_tab[i]), &len_lut[j], &val_lut[j]); if (len_lut[j] != 1) { len_lut[j] += 1; val_lut[j] <<= 1; } else { val_lut[j] = 1; } } } return 0; alloc_fail: vc2_encode_end(avctx); av_log(avctx, AV_LOG_ERROR, \"Unable to allocate memory!\\n\"); return AVERROR(ENOMEM); }"} {"target": 0, "idx": 8273, "func": "void ff_fix_long_b_mvs(MpegEncContext * s, int16_t (*mv_table)[2], int f_code, int type) { int y; uint8_t * fcode_tab= s->fcode_tab; // RAL: 8 in MPEG-1, 16 in MPEG-4 int range = (((s->codec_id == CODEC_ID_MPEG1VIDEO) ? 8 : 16) << f_code); /* clip / convert to intra 16x16 type MVs */ for(y=0; ymb_height; y++){ int x; int xy= (y+1)* (s->mb_width+2)+1; int i= y*s->mb_width; for(x=0; xmb_width; x++) { if (s->mb_type[i] & type) // RAL: \"type\" test added... { if (fcode_tab[mv_table[xy][0] + MAX_MV] > f_code || fcode_tab[mv_table[xy][0] + MAX_MV] == 0) { if(mv_table[xy][0]>0) mv_table[xy][0]= range-1; else mv_table[xy][0]= -range; } if (fcode_tab[mv_table[xy][1] + MAX_MV] > f_code || fcode_tab[mv_table[xy][1] + MAX_MV] == 0) { if(mv_table[xy][1]>0) mv_table[xy][1]= range-1; else mv_table[xy][1]= -range; } } xy++; i++; } } }"} {"target": 0, "idx": 8291, "func": "S390CPU *s390x_new_cpu(const char *cpu_model, uint32_t core_id, Error **errp) { S390CPU *cpu; Error *err = NULL; cpu = cpu_s390x_create(cpu_model, &err); if (err != NULL) { goto out; } object_property_set_int(OBJECT(cpu), core_id, \"core-id\", &err); if (err != NULL) { goto out; } object_property_set_bool(OBJECT(cpu), true, \"realized\", &err); out: if (err) { error_propagate(errp, err); object_unref(OBJECT(cpu)); cpu = NULL; } return cpu; }"} {"target": 0, "idx": 8296, "func": "aio_compute_timeout(AioContext *ctx) { int64_t deadline; int timeout = -1; QEMUBH *bh; for (bh = atomic_rcu_read(&ctx->first_bh); bh; bh = atomic_rcu_read(&bh->next)) { if (bh->scheduled) { if (bh->idle) { /* idle bottom halves will be polled at least * every 10ms */ timeout = 10000000; } else { /* non-idle bottom halves will be executed * immediately */ return 0; } } } deadline = timerlistgroup_deadline_ns(&ctx->tlg); if (deadline == 0) { return 0; } else { return qemu_soonest_timeout(timeout, deadline); } }"} {"target": 0, "idx": 8313, "func": "static void create_cel_evals(RoqContext *enc, RoqTempdata *tempData) { int n=0, x, y, i; tempData->cel_evals = av_malloc(enc->width*enc->height/64 * sizeof(CelEvaluation)); /* Map to the ROQ quadtree order */ for (y=0; yheight; y+=16) for (x=0; xwidth; x+=16) for(i=0; i<4; i++) { tempData->cel_evals[n ].sourceX = x + (i&1)*8; tempData->cel_evals[n++].sourceY = y + (i&2)*4; } }"} {"target": 0, "idx": 8340, "func": "static int ea_read_packet(AVFormatContext *s, AVPacket *pkt) { EaDemuxContext *ea = s->priv_data; AVIOContext *pb = s->pb; int ret = 0; int packet_read = 0; unsigned int chunk_type, chunk_size; int key = 0; int av_uninit(num_samples); while (!packet_read) { chunk_type = avio_rl32(pb); chunk_size = (ea->big_endian ? avio_rb32(pb) : avio_rl32(pb)) - 8; switch (chunk_type) { /* audio data */ case ISNh_TAG: /* header chunk also contains data; skip over the header portion*/ avio_skip(pb, 32); chunk_size -= 32; case ISNd_TAG: case SCDl_TAG: case SNDC_TAG: case SDEN_TAG: if (!ea->audio_codec) { avio_skip(pb, chunk_size); break; } else if (ea->audio_codec == CODEC_ID_PCM_S16LE_PLANAR || ea->audio_codec == CODEC_ID_MP3) { num_samples = avio_rl32(pb); avio_skip(pb, 8); chunk_size -= 12; } ret = av_get_packet(pb, pkt, chunk_size); if (ret < 0) return ret; pkt->stream_index = ea->audio_stream_index; switch (ea->audio_codec) { case CODEC_ID_ADPCM_EA: case CODEC_ID_ADPCM_EA_R1: case CODEC_ID_ADPCM_EA_R2: case CODEC_ID_ADPCM_IMA_EA_EACS: pkt->duration = AV_RL32(pkt->data); break; case CODEC_ID_ADPCM_EA_R3: pkt->duration = AV_RB32(pkt->data); break; case CODEC_ID_ADPCM_IMA_EA_SEAD: pkt->duration = ret * 2 / ea->num_channels; break; case CODEC_ID_PCM_S16LE_PLANAR: case CODEC_ID_MP3: pkt->duration = num_samples; break; default: pkt->duration = chunk_size / (ea->bytes * ea->num_channels); } packet_read = 1; break; /* ending tag */ case 0: case ISNe_TAG: case SCEl_TAG: case SEND_TAG: case SEEN_TAG: ret = AVERROR(EIO); packet_read = 1; break; case MVIh_TAG: case kVGT_TAG: case pQGT_TAG: case TGQs_TAG: case MADk_TAG: key = AV_PKT_FLAG_KEY; case MVIf_TAG: case fVGT_TAG: case MADm_TAG: case MADe_TAG: avio_seek(pb, -8, SEEK_CUR); // include chunk preamble chunk_size += 8; goto get_video_packet; case mTCD_TAG: avio_skip(pb, 8); // skip ea dct header chunk_size -= 8; goto get_video_packet; case MV0K_TAG: case MPCh_TAG: case pIQT_TAG: key = AV_PKT_FLAG_KEY; case MV0F_TAG: get_video_packet: ret = av_get_packet(pb, pkt, chunk_size); if (ret < 0) return ret; pkt->stream_index = ea->video_stream_index; pkt->flags |= key; packet_read = 1; break; default: avio_skip(pb, chunk_size); break; } } return ret; }"} {"target": 1, "idx": 8357, "func": "static void type_initialize_interface(TypeImpl *ti, const char *parent) { InterfaceClass *new_iface; TypeInfo info = { }; TypeImpl *iface_impl; info.parent = parent; info.name = g_strdup_printf(\"%s::%s\", ti->name, info.parent); info.abstract = true; iface_impl = type_register(&info); type_initialize(iface_impl); g_free((char *)info.name); new_iface = (InterfaceClass *)iface_impl->class; new_iface->concrete_class = ti->class; ti->class->interfaces = g_slist_append(ti->class->interfaces, iface_impl->class); }"} {"target": 0, "idx": 8370, "func": "static int video_open(VideoState *is){ int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL; int w,h; if(is_full_screen) flags |= SDL_FULLSCREEN; else flags |= SDL_RESIZABLE; if (is_full_screen && fs_screen_width) { w = fs_screen_width; h = fs_screen_height; } else if(!is_full_screen && screen_width){ w = screen_width; h = screen_height; }else if (is->video_st && is->video_st->codec->width){ w = is->video_st->codec->width; h = is->video_st->codec->height; } else { w = 640; h = 480; } #ifndef SYS_DARWIN screen = SDL_SetVideoMode(w, h, 0, flags); #else /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */ screen = SDL_SetVideoMode(w, h, 24, flags); #endif if (!screen) { fprintf(stderr, \"SDL: could not set video mode - exiting\\n\"); return -1; } SDL_WM_SetCaption(\"FFplay\", \"FFplay\"); is->width = screen->w; is->height = screen->h; return 0; }"} {"target": 0, "idx": 8380, "func": "static uint64_t nand_read(void *opaque, target_phys_addr_t addr, unsigned size) { struct nand_state_t *s = opaque; uint32_t r; int rdy; r = nand_getio(s->nand); nand_getpins(s->nand, &rdy); s->rdy = rdy; DNAND(printf(\"%s addr=%x r=%x\\n\", __func__, addr, r)); return r; }"} {"target": 0, "idx": 8409, "func": "void qmp_migrate_set_speed(int64_t value, Error **errp) { MigrationState *s; if (value < 0) { value = 0; } s = migrate_get_current(); s->bandwidth_limit = value; qemu_file_set_rate_limit(s->file, s->bandwidth_limit); }"} {"target": 1, "idx": 8412, "func": "void block_job_resume_all(void) { BlockJob *job = NULL; while ((job = block_job_next(job))) { AioContext *aio_context = blk_get_aio_context(job->blk); aio_context_acquire(aio_context); block_job_resume(job); aio_context_release(aio_context); } }"} {"target": 0, "idx": 8425, "func": "static int write_option(void *optctx, const OptionDef *po, const char *opt, const char *arg) { /* new-style options contain an offset into optctx, old-style address of * a global var*/ void *dst = po->flags & (OPT_OFFSET | OPT_SPEC) ? (uint8_t *)optctx + po->u.off : po->u.dst_ptr; int *dstcount; if (po->flags & OPT_SPEC) { SpecifierOpt **so = dst; char *p = strchr(opt, ':'); dstcount = (int *)(so + 1); *so = grow_array(*so, sizeof(**so), dstcount, *dstcount + 1); (*so)[*dstcount - 1].specifier = av_strdup(p ? p + 1 : \"\"); dst = &(*so)[*dstcount - 1].u; } if (po->flags & OPT_STRING) { char *str; str = av_strdup(arg); av_freep(dst); *(char **)dst = str; } else if (po->flags & OPT_BOOL || po->flags & OPT_INT) { *(int *)dst = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX); } else if (po->flags & OPT_INT64) { *(int64_t *)dst = parse_number_or_die(opt, arg, OPT_INT64, INT64_MIN, INT64_MAX); } else if (po->flags & OPT_TIME) { *(int64_t *)dst = parse_time_or_die(opt, arg, 1); } else if (po->flags & OPT_FLOAT) { *(float *)dst = parse_number_or_die(opt, arg, OPT_FLOAT, -INFINITY, INFINITY); } else if (po->flags & OPT_DOUBLE) { *(double *)dst = parse_number_or_die(opt, arg, OPT_DOUBLE, -INFINITY, INFINITY); } else if (po->u.func_arg) { int ret = po->u.func_arg(optctx, opt, arg); if (ret < 0) { av_log(NULL, AV_LOG_ERROR, \"Failed to set value '%s' for option '%s'\\n\", arg, opt); return ret; } } if (po->flags & OPT_EXIT) exit_program(0); return 0; }"} {"target": 1, "idx": 8431, "func": "static void put_uint8(QEMUFile *f, void *pv, size_t size) { uint8_t *v = pv; qemu_put_8s(f, v); }"} {"target": 1, "idx": 8435, "func": "static RemoveResult remove_hpte(PowerPCCPU *cpu, target_ulong ptex, target_ulong avpn, target_ulong flags, target_ulong *vp, target_ulong *rp) { CPUPPCState *env = &cpu->env; uint64_t token; target_ulong v, r, rb; if (!valid_pte_index(env, ptex)) { return REMOVE_PARM; } token = ppc_hash64_start_access(cpu, ptex); v = ppc_hash64_load_hpte0(cpu, token, 0); r = ppc_hash64_load_hpte1(cpu, token, 0); ppc_hash64_stop_access(token); if ((v & HPTE64_V_VALID) == 0 || ((flags & H_AVPN) && (v & ~0x7fULL) != avpn) || ((flags & H_ANDCOND) && (v & avpn) != 0)) { return REMOVE_NOT_FOUND; } *vp = v; *rp = r; ppc_hash64_store_hpte(cpu, ptex, HPTE64_V_HPTE_DIRTY, 0); rb = compute_tlbie_rb(v, r, ptex); ppc_tlb_invalidate_one(env, rb); return REMOVE_SUCCESS; }"} {"target": 1, "idx": 8447, "func": "int pcie_aer_init(PCIDevice *dev, uint16_t offset, uint16_t size) { PCIExpressDevice *exp; pcie_add_capability(dev, PCI_EXT_CAP_ID_ERR, PCI_ERR_VER, offset, size); exp = &dev->exp; exp->aer_cap = offset; /* log_max is property */ if (dev->exp.aer_log.log_max == PCIE_AER_LOG_MAX_UNSET) { dev->exp.aer_log.log_max = PCIE_AER_LOG_MAX_DEFAULT; } /* clip down the value to avoid unreasobale memory usage */ if (dev->exp.aer_log.log_max > PCIE_AER_LOG_MAX_LIMIT) { return -EINVAL; } dev->exp.aer_log.log = g_malloc0(sizeof dev->exp.aer_log.log[0] * dev->exp.aer_log.log_max); pci_set_long(dev->w1cmask + offset + PCI_ERR_UNCOR_STATUS, PCI_ERR_UNC_SUPPORTED); pci_set_long(dev->config + offset + PCI_ERR_UNCOR_SEVER, PCI_ERR_UNC_SEVERITY_DEFAULT); pci_set_long(dev->wmask + offset + PCI_ERR_UNCOR_SEVER, PCI_ERR_UNC_SUPPORTED); pci_long_test_and_set_mask(dev->w1cmask + offset + PCI_ERR_COR_STATUS, PCI_ERR_COR_SUPPORTED); pci_set_long(dev->config + offset + PCI_ERR_COR_MASK, PCI_ERR_COR_MASK_DEFAULT); pci_set_long(dev->wmask + offset + PCI_ERR_COR_MASK, PCI_ERR_COR_SUPPORTED); /* capabilities and control. multiple header logging is supported */ if (dev->exp.aer_log.log_max > 0) { pci_set_long(dev->config + offset + PCI_ERR_CAP, PCI_ERR_CAP_ECRC_GENC | PCI_ERR_CAP_ECRC_CHKC | PCI_ERR_CAP_MHRC); pci_set_long(dev->wmask + offset + PCI_ERR_CAP, PCI_ERR_CAP_ECRC_GENE | PCI_ERR_CAP_ECRC_CHKE | PCI_ERR_CAP_MHRE); } else { pci_set_long(dev->config + offset + PCI_ERR_CAP, PCI_ERR_CAP_ECRC_GENC | PCI_ERR_CAP_ECRC_CHKC); pci_set_long(dev->wmask + offset + PCI_ERR_CAP, PCI_ERR_CAP_ECRC_GENE | PCI_ERR_CAP_ECRC_CHKE); } switch (pcie_cap_get_type(dev)) { case PCI_EXP_TYPE_ROOT_PORT: /* this case will be set by pcie_aer_root_init() */ /* fallthrough */ case PCI_EXP_TYPE_DOWNSTREAM: case PCI_EXP_TYPE_UPSTREAM: pci_word_test_and_set_mask(dev->wmask + PCI_BRIDGE_CONTROL, PCI_BRIDGE_CTL_SERR); pci_long_test_and_set_mask(dev->w1cmask + PCI_STATUS, PCI_SEC_STATUS_RCV_SYSTEM_ERROR); break; default: /* nothing */ break; } return 0; }"} {"target": 1, "idx": 8457, "func": "static void qemu_tcg_init_vcpu(CPUState *cpu) { char thread_name[VCPU_THREAD_NAME_SIZE]; static QemuCond *tcg_halt_cond; static QemuThread *tcg_cpu_thread; /* share a single thread for all cpus with TCG */ if (!tcg_cpu_thread) { cpu->thread = g_malloc0(sizeof(QemuThread)); cpu->halt_cond = g_malloc0(sizeof(QemuCond)); qemu_cond_init(cpu->halt_cond); tcg_halt_cond = cpu->halt_cond; snprintf(thread_name, VCPU_THREAD_NAME_SIZE, \"CPU %d/TCG\", cpu->cpu_index); qemu_thread_create(cpu->thread, thread_name, qemu_tcg_cpu_thread_fn, cpu, QEMU_THREAD_JOINABLE); #ifdef _WIN32 cpu->hThread = qemu_thread_get_handle(cpu->thread); #endif while (!cpu->created) { qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex); } tcg_cpu_thread = cpu->thread; } else { cpu->thread = tcg_cpu_thread; cpu->halt_cond = tcg_halt_cond; } }"} {"target": 1, "idx": 8466, "func": "ssize_t vnc_client_write_buf(VncState *vs, const uint8_t *data, size_t datalen) { ssize_t ret; #ifdef CONFIG_VNC_TLS if (vs->tls.session) { ret = vnc_client_write_tls(&vs->tls.session, data, datalen); } else { #endif /* CONFIG_VNC_TLS */ ret = send(vs->csock, (const void *)data, datalen, 0); #ifdef CONFIG_VNC_TLS } #endif /* CONFIG_VNC_TLS */ VNC_DEBUG(\"Wrote wire %p %zd -> %ld\\n\", data, datalen, ret); return vnc_client_io_error(vs, ret, socket_error()); }"} {"target": 0, "idx": 8482, "func": "static av_cold int decode_init(AVCodecContext * avctx) { MPADecodeContext *s = avctx->priv_data; static int init=0; int i, j, k; s->avctx = avctx; ff_mpadsp_init(&s->mpadsp); avctx->sample_fmt= OUT_FMT; s->error_recognition= avctx->error_recognition; if (!init && !avctx->parse_only) { int offset; /* scale factors table for layer 1/2 */ for(i=0;i<64;i++) { int shift, mod; /* 1.0 (i = 3) is normalized to 2 ^ FRAC_BITS */ shift = (i / 3); mod = i % 3; scale_factor_modshift[i] = mod | (shift << 2); } /* scale factor multiply for layer 1 */ for(i=0;i<15;i++) { int n, norm; n = i + 2; norm = ((INT64_C(1) << n) * FRAC_ONE) / ((1 << n) - 1); scale_factor_mult[i][0] = MULLx(norm, FIXR(1.0 * 2.0), FRAC_BITS); scale_factor_mult[i][1] = MULLx(norm, FIXR(0.7937005259 * 2.0), FRAC_BITS); scale_factor_mult[i][2] = MULLx(norm, FIXR(0.6299605249 * 2.0), FRAC_BITS); av_dlog(avctx, \"%d: norm=%x s=%x %x %x\\n\", i, norm, scale_factor_mult[i][0], scale_factor_mult[i][1], scale_factor_mult[i][2]); } RENAME(ff_mpa_synth_init)(RENAME(ff_mpa_synth_window)); /* huffman decode tables */ offset = 0; for(i=1;i<16;i++) { const HuffTable *h = &mpa_huff_tables[i]; int xsize, x, y; uint8_t tmp_bits [512]; uint16_t tmp_codes[512]; memset(tmp_bits , 0, sizeof(tmp_bits )); memset(tmp_codes, 0, sizeof(tmp_codes)); xsize = h->xsize; j = 0; for(x=0;xbits [j ]; tmp_codes[(x << 5) | y | ((x&&y)<<4)]= h->codes[j++]; } } /* XXX: fail test */ huff_vlc[i].table = huff_vlc_tables+offset; huff_vlc[i].table_allocated = huff_vlc_tables_sizes[i]; init_vlc(&huff_vlc[i], 7, 512, tmp_bits, 1, 1, tmp_codes, 2, 2, INIT_VLC_USE_NEW_STATIC); offset += huff_vlc_tables_sizes[i]; } assert(offset == FF_ARRAY_ELEMS(huff_vlc_tables)); offset = 0; for(i=0;i<2;i++) { huff_quad_vlc[i].table = huff_quad_vlc_tables+offset; huff_quad_vlc[i].table_allocated = huff_quad_vlc_tables_sizes[i]; init_vlc(&huff_quad_vlc[i], i == 0 ? 7 : 4, 16, mpa_quad_bits[i], 1, 1, mpa_quad_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC); offset += huff_quad_vlc_tables_sizes[i]; } assert(offset == FF_ARRAY_ELEMS(huff_quad_vlc_tables)); for(i=0;i<9;i++) { k = 0; for(j=0;j<22;j++) { band_index_long[i][j] = k; k += band_size_long[i][j]; } band_index_long[i][22] = k; } /* compute n ^ (4/3) and store it in mantissa/exp format */ int_pow_init(); mpegaudio_tableinit(); for (i = 0; i < 4; i++) if (ff_mpa_quant_bits[i] < 0) for (j = 0; j < (1<<(-ff_mpa_quant_bits[i]+1)); j++) { int val1, val2, val3, steps; int val = j; steps = ff_mpa_quant_steps[i]; val1 = val % steps; val /= steps; val2 = val % steps; val3 = val / steps; division_tabs[i][j] = val1 + (val2 << 4) + (val3 << 8); } for(i=0;i<7;i++) { float f; INTFLOAT v; if (i != 6) { f = tan((double)i * M_PI / 12.0); v = FIXR(f / (1.0 + f)); } else { v = FIXR(1.0); } is_table[0][i] = v; is_table[1][6 - i] = v; } /* invalid values */ for(i=7;i<16;i++) is_table[0][i] = is_table[1][i] = 0.0; for(i=0;i<16;i++) { double f; int e, k; for(j=0;j<2;j++) { e = -(j + 1) * ((i + 1) >> 1); f = pow(2.0, e / 4.0); k = i & 1; is_table_lsf[j][k ^ 1][i] = FIXR(f); is_table_lsf[j][k][i] = FIXR(1.0); av_dlog(avctx, \"is_table_lsf %d %d: %x %x\\n\", i, j, is_table_lsf[j][0][i], is_table_lsf[j][1][i]); } } for(i=0;i<8;i++) { float ci, cs, ca; ci = ci_table[i]; cs = 1.0 / sqrt(1.0 + ci * ci); ca = cs * ci; csa_table[i][0] = FIXHR(cs/4); csa_table[i][1] = FIXHR(ca/4); csa_table[i][2] = FIXHR(ca/4) + FIXHR(cs/4); csa_table[i][3] = FIXHR(ca/4) - FIXHR(cs/4); csa_table_float[i][0] = cs; csa_table_float[i][1] = ca; csa_table_float[i][2] = ca + cs; csa_table_float[i][3] = ca - cs; } /* compute mdct windows */ for(i=0;i<36;i++) { for(j=0; j<4; j++){ double d; if(j==2 && i%3 != 1) continue; d= sin(M_PI * (i + 0.5) / 36.0); if(j==1){ if (i>=30) d= 0; else if(i>=24) d= sin(M_PI * (i - 18 + 0.5) / 12.0); else if(i>=18) d= 1; }else if(j==3){ if (i< 6) d= 0; else if(i< 12) d= sin(M_PI * (i - 6 + 0.5) / 12.0); else if(i< 18) d= 1; } //merge last stage of imdct into the window coefficients d*= 0.5 / cos(M_PI*(2*i + 19)/72); if(j==2) mdct_win[j][i/3] = FIXHR((d / (1<<5))); else mdct_win[j][i ] = FIXHR((d / (1<<5))); } } /* NOTE: we do frequency inversion adter the MDCT by changing the sign of the right window coefs */ for(j=0;j<4;j++) { for(i=0;i<36;i+=2) { mdct_win[j + 4][i] = mdct_win[j][i]; mdct_win[j + 4][i + 1] = -mdct_win[j][i + 1]; } } init = 1; } if (avctx->codec_id == CODEC_ID_MP3ADU) s->adu_mode = 1; return 0; }"} {"target": 0, "idx": 8487, "func": "static int libgsm_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { uint8_t *buf = avpkt->data; int buf_size = avpkt->size; int out_size = avctx->frame_size * av_get_bytes_per_sample(avctx->sample_fmt); if (*data_size < out_size) { av_log(avctx, AV_LOG_ERROR, \"Output buffer is too small\\n\"); return AVERROR(EINVAL); } if (buf_size < avctx->block_align) { av_log(avctx, AV_LOG_ERROR, \"Packet is too small\\n\"); return AVERROR_INVALIDDATA; } switch(avctx->codec_id) { case CODEC_ID_GSM: if(gsm_decode(avctx->priv_data,buf,data)) return -1; break; case CODEC_ID_GSM_MS: if(gsm_decode(avctx->priv_data,buf,data) || gsm_decode(avctx->priv_data,buf+33,((int16_t*)data)+GSM_FRAME_SIZE)) return -1; } *data_size = out_size; return avctx->block_align; }"} {"target": 1, "idx": 8518, "func": "static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr) { unsigned access_size_min = mr->ops->impl.min_access_size; unsigned access_size_max = mr->ops->impl.max_access_size; /* Regions are assumed to support 1-4 byte accesses unless otherwise specified. */ if (access_size_min == 0) { access_size_min = 1; } if (access_size_max == 0) { access_size_max = 4; } /* Bound the maximum access by the alignment of the address. */ if (!mr->ops->impl.unaligned) { unsigned align_size_max = addr & -addr; if (align_size_max != 0 && align_size_max < access_size_max) { access_size_max = align_size_max; } } /* Don't attempt accesses larger than the maximum. */ if (l > access_size_max) { l = access_size_max; } /* ??? The users of this function are wrong, not supporting minimums larger than the remaining length. C.f. memory.c:access_with_adjusted_size. */ assert(l >= access_size_min); return l; }"} {"target": 1, "idx": 8522, "func": "void fw_cfg_add_file_callback(FWCfgState *s, const char *filename, FWCfgCallback select_cb, FWCfgWriteCallback write_cb, void *callback_opaque, void *data, size_t len, bool read_only) { int i, index, count; size_t dsize; MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine()); int order = 0; if (!s->files) { dsize = sizeof(uint32_t) + sizeof(FWCfgFile) * fw_cfg_file_slots(s); s->files = g_malloc0(dsize); fw_cfg_add_bytes(s, FW_CFG_FILE_DIR, s->files, dsize); } count = be32_to_cpu(s->files->count); assert(count < fw_cfg_file_slots(s)); /* Find the insertion point. */ if (mc->legacy_fw_cfg_order) { /* * Sort by order. For files with the same order, we keep them * in the sequence in which they were added. */ order = get_fw_cfg_order(s, filename); for (index = count; index > 0 && order < s->entry_order[index - 1]; index--); } else { /* Sort by file name. */ for (index = count; index > 0 && strcmp(filename, s->files->f[index - 1].name) < 0; index--); } /* * Move all the entries from the index point and after down one * to create a slot for the new entry. Because calculations are * being done with the index, make it so that \"i\" is the current * index and \"i - 1\" is the one being copied from, thus the * unusual start and end in the for statement. */ for (i = count + 1; i > index; i--) { s->files->f[i] = s->files->f[i - 1]; s->files->f[i].select = cpu_to_be16(FW_CFG_FILE_FIRST + i); s->entries[0][FW_CFG_FILE_FIRST + i] = s->entries[0][FW_CFG_FILE_FIRST + i - 1]; s->entry_order[i] = s->entry_order[i - 1]; } memset(&s->files->f[index], 0, sizeof(FWCfgFile)); memset(&s->entries[0][FW_CFG_FILE_FIRST + index], 0, sizeof(FWCfgEntry)); pstrcpy(s->files->f[index].name, sizeof(s->files->f[index].name), filename); for (i = 0; i <= count; i++) { if (i != index && strcmp(s->files->f[index].name, s->files->f[i].name) == 0) { error_report(\"duplicate fw_cfg file name: %s\", s->files->f[index].name); exit(1); } } fw_cfg_add_bytes_callback(s, FW_CFG_FILE_FIRST + index, select_cb, write_cb, callback_opaque, data, len, read_only); s->files->f[index].size = cpu_to_be32(len); s->files->f[index].select = cpu_to_be16(FW_CFG_FILE_FIRST + index); s->entry_order[index] = order; trace_fw_cfg_add_file(s, index, s->files->f[index].name, len); s->files->count = cpu_to_be32(count+1); }"} {"target": 1, "idx": 8526, "func": "static void stellaris_enet_save(QEMUFile *f, void *opaque) { stellaris_enet_state *s = (stellaris_enet_state *)opaque; int i; qemu_put_be32(f, s->ris); qemu_put_be32(f, s->im); qemu_put_be32(f, s->rctl); qemu_put_be32(f, s->tctl); qemu_put_be32(f, s->thr); qemu_put_be32(f, s->mctl); qemu_put_be32(f, s->mdv); qemu_put_be32(f, s->mtxd); qemu_put_be32(f, s->mrxd); qemu_put_be32(f, s->np); qemu_put_be32(f, s->tx_fifo_len); qemu_put_buffer(f, s->tx_fifo, sizeof(s->tx_fifo)); for (i = 0; i < 31; i++) { qemu_put_be32(f, s->rx[i].len); qemu_put_buffer(f, s->rx[i].data, sizeof(s->rx[i].data)); } qemu_put_be32(f, s->next_packet); qemu_put_be32(f, s->rx_fifo_offset); }"} {"target": 0, "idx": 8534, "func": "static void exit_program(void) { int i, j; for (i = 0; i < nb_filtergraphs; i++) { avfilter_graph_free(&filtergraphs[i]->graph); for (j = 0; j < filtergraphs[i]->nb_inputs; j++) { av_freep(&filtergraphs[i]->inputs[j]->name); av_freep(&filtergraphs[i]->inputs[j]); } av_freep(&filtergraphs[i]->inputs); for (j = 0; j < filtergraphs[i]->nb_outputs; j++) { av_freep(&filtergraphs[i]->outputs[j]->name); av_freep(&filtergraphs[i]->outputs[j]); } av_freep(&filtergraphs[i]->outputs); av_freep(&filtergraphs[i]->graph_desc); av_freep(&filtergraphs[i]); } av_freep(&filtergraphs); /* close files */ for (i = 0; i < nb_output_files; i++) { AVFormatContext *s = output_files[i]->ctx; if (!(s->oformat->flags & AVFMT_NOFILE) && s->pb) avio_close(s->pb); avformat_free_context(s); av_dict_free(&output_files[i]->opts); av_freep(&output_files[i]); } for (i = 0; i < nb_output_streams; i++) { AVBitStreamFilterContext *bsfc = output_streams[i]->bitstream_filters; while (bsfc) { AVBitStreamFilterContext *next = bsfc->next; av_bitstream_filter_close(bsfc); bsfc = next; } output_streams[i]->bitstream_filters = NULL; avcodec_free_frame(&output_streams[i]->filtered_frame); av_freep(&output_streams[i]->forced_keyframes); av_freep(&output_streams[i]->avfilter); av_freep(&output_streams[i]->logfile_prefix); av_freep(&output_streams[i]); } for (i = 0; i < nb_input_files; i++) { avformat_close_input(&input_files[i]->ctx); av_freep(&input_files[i]); } for (i = 0; i < nb_input_streams; i++) { av_frame_free(&input_streams[i]->decoded_frame); av_frame_free(&input_streams[i]->filter_frame); av_dict_free(&input_streams[i]->opts); av_freep(&input_streams[i]->filters); av_freep(&input_streams[i]); } if (vstats_file) fclose(vstats_file); av_free(vstats_filename); av_freep(&input_streams); av_freep(&input_files); av_freep(&output_streams); av_freep(&output_files); uninit_opts(); avfilter_uninit(); avformat_network_deinit(); if (received_sigterm) { av_log(NULL, AV_LOG_INFO, \"Received signal %d: terminating.\\n\", (int) received_sigterm); exit (255); } }"} {"target": 1, "idx": 8543, "func": "static int coroutine_fn cow_co_is_allocated(BlockDriverState *bs, int64_t sector_num, int nb_sectors, int *num_same) { int64_t bitnum = sector_num + sizeof(struct cow_header_v2) * 8; uint64_t offset = (bitnum / 8) & -BDRV_SECTOR_SIZE; uint8_t bitmap[BDRV_SECTOR_SIZE]; int ret; int changed; ret = bdrv_pread(bs->file, offset, &bitmap, sizeof(bitmap)); if (ret < 0) { return ret; } bitnum &= BITS_PER_BITMAP_SECTOR - 1; changed = cow_test_bit(bitnum, bitmap); *num_same = cow_find_streak(bitmap, changed, bitnum, nb_sectors); return changed; }"} {"target": 0, "idx": 8565, "func": "bool timerlist_expired(QEMUTimerList *timer_list) { int64_t expire_time; if (!atomic_read(&timer_list->active_timers)) { return false; } qemu_mutex_lock(&timer_list->active_timers_lock); if (!timer_list->active_timers) { qemu_mutex_unlock(&timer_list->active_timers_lock); return false; } expire_time = timer_list->active_timers->expire_time; qemu_mutex_unlock(&timer_list->active_timers_lock); return expire_time < qemu_clock_get_ns(timer_list->clock->type); }"} {"target": 0, "idx": 8568, "func": "static void check_refcounts(BlockDriverState *bs) { BDRVQcowState *s = bs->opaque; int64_t size; int nb_clusters, refcount1, refcount2, i; QCowSnapshot *sn; uint16_t *refcount_table; size = bdrv_getlength(s->hd); nb_clusters = size_to_clusters(s, size); refcount_table = qemu_mallocz(nb_clusters * sizeof(uint16_t)); /* header */ inc_refcounts(bs, refcount_table, nb_clusters, 0, s->cluster_size); check_refcounts_l1(bs, refcount_table, nb_clusters, s->l1_table_offset, s->l1_size, 1); /* snapshots */ for(i = 0; i < s->nb_snapshots; i++) { sn = s->snapshots + i; check_refcounts_l1(bs, refcount_table, nb_clusters, sn->l1_table_offset, sn->l1_size, 0); } inc_refcounts(bs, refcount_table, nb_clusters, s->snapshots_offset, s->snapshots_size); /* refcount data */ inc_refcounts(bs, refcount_table, nb_clusters, s->refcount_table_offset, s->refcount_table_size * sizeof(uint64_t)); for(i = 0; i < s->refcount_table_size; i++) { int64_t offset; offset = s->refcount_table[i]; if (offset != 0) { inc_refcounts(bs, refcount_table, nb_clusters, offset, s->cluster_size); } } /* compare ref counts */ for(i = 0; i < nb_clusters; i++) { refcount1 = get_refcount(bs, i); refcount2 = refcount_table[i]; if (refcount1 != refcount2) fprintf(stderr, \"ERROR cluster %d refcount=%d reference=%d\\n\", i, refcount1, refcount2); } qemu_free(refcount_table); }"} {"target": 1, "idx": 8577, "func": "static int ipoctal_init(IPackDevice *ip) { IPOctalState *s = IPOCTAL(ip); unsigned i; for (i = 0; i < N_CHANNELS; i++) { SCC2698Channel *ch = &s->ch[i]; ch->ipoctal = s; /* Redirect IP-Octal channels to host character devices */ if (ch->devpath) { const char chr_name[] = \"ipoctal\"; char label[ARRAY_SIZE(chr_name) + 2]; static int index; snprintf(label, sizeof(label), \"%s%d\", chr_name, index); ch->dev = qemu_chr_new(label, ch->devpath, NULL); if (ch->dev) { index++; qemu_chr_add_handlers(ch->dev, hostdev_can_receive, hostdev_receive, hostdev_event, ch); DPRINTF(\"Redirecting channel %u to %s (%s)\\n\", i, ch->devpath, label); } else { DPRINTF(\"Could not redirect channel %u to %s\\n\", i, ch->devpath); } } } return 0; }"} {"target": 1, "idx": 8592, "func": "static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2) { uint64_t start = MAX(r1.start, r2.start); /* off-by-one arithmetic to prevent overflow */ uint64_t end = MIN(addrrange_end(r1) - 1, addrrange_end(r2) - 1); return addrrange_make(start, end - start + 1); }"} {"target": 0, "idx": 8617, "func": "static int wc3_read_header(AVFormatContext *s, AVFormatParameters *ap) { Wc3DemuxContext *wc3 = s->priv_data; ByteIOContext *pb = s->pb; unsigned int fourcc_tag; unsigned int size; AVStream *st; unsigned char preamble[WC3_PREAMBLE_SIZE]; int ret = 0; int current_palette = 0; int bytes_to_read; int i; unsigned char rotate; /* default context members */ wc3->width = WC3_DEFAULT_WIDTH; wc3->height = WC3_DEFAULT_HEIGHT; wc3->palettes = NULL; wc3->palette_count = 0; wc3->pts = 0; wc3->video_stream_index = wc3->audio_stream_index = 0; /* skip the first 3 32-bit numbers */ url_fseek(pb, 12, SEEK_CUR); /* traverse through the chunks and load the header information before * the first BRCH tag */ if ((ret = get_buffer(pb, preamble, WC3_PREAMBLE_SIZE)) != WC3_PREAMBLE_SIZE) return AVERROR(EIO); fourcc_tag = AV_RL32(&preamble[0]); size = (AV_RB32(&preamble[4]) + 1) & (~1); do { switch (fourcc_tag) { case SOND_TAG: case INDX_TAG: /* SOND unknown, INDX unnecessary; ignore both */ url_fseek(pb, size, SEEK_CUR); break; case _PC__TAG: /* need the number of palettes */ url_fseek(pb, 8, SEEK_CUR); if ((ret = get_buffer(pb, preamble, 4)) != 4) return AVERROR(EIO); wc3->palette_count = AV_RL32(&preamble[0]); if((unsigned)wc3->palette_count >= UINT_MAX / PALETTE_SIZE){ wc3->palette_count= 0; return -1; } wc3->palettes = av_malloc(wc3->palette_count * PALETTE_SIZE); break; case BNAM_TAG: /* load up the name */ if ((unsigned)size < 512) bytes_to_read = size; else bytes_to_read = 512; if ((ret = get_buffer(pb, s->title, bytes_to_read)) != bytes_to_read) return AVERROR(EIO); break; case SIZE_TAG: /* video resolution override */ if ((ret = get_buffer(pb, preamble, WC3_PREAMBLE_SIZE)) != WC3_PREAMBLE_SIZE) return AVERROR(EIO); wc3->width = AV_RL32(&preamble[0]); wc3->height = AV_RL32(&preamble[4]); break; case PALT_TAG: /* one of several palettes */ if ((unsigned)current_palette >= wc3->palette_count) return AVERROR_INVALIDDATA; if ((ret = get_buffer(pb, &wc3->palettes[current_palette * PALETTE_SIZE], PALETTE_SIZE)) != PALETTE_SIZE) return AVERROR(EIO); /* transform the current palette in place */ for (i = current_palette * PALETTE_SIZE; i < (current_palette + 1) * PALETTE_SIZE; i++) { /* rotate each palette component left by 2 and use the result * as an index into the color component table */ rotate = ((wc3->palettes[i] << 2) & 0xFF) | ((wc3->palettes[i] >> 6) & 0xFF); wc3->palettes[i] = wc3_pal_lookup[rotate]; } current_palette++; break; default: av_log(s, AV_LOG_ERROR, \" unrecognized WC3 chunk: %c%c%c%c (0x%02X%02X%02X%02X)\\n\", preamble[0], preamble[1], preamble[2], preamble[3], preamble[0], preamble[1], preamble[2], preamble[3]); return AVERROR_INVALIDDATA; break; } if ((ret = get_buffer(pb, preamble, WC3_PREAMBLE_SIZE)) != WC3_PREAMBLE_SIZE) return AVERROR(EIO); fourcc_tag = AV_RL32(&preamble[0]); /* chunk sizes are 16-bit aligned */ size = (AV_RB32(&preamble[4]) + 1) & (~1); } while (fourcc_tag != BRCH_TAG); /* initialize the decoder streams */ st = av_new_stream(s, 0); if (!st) return AVERROR(ENOMEM); av_set_pts_info(st, 33, 1, 90000); wc3->video_stream_index = st->index; st->codec->codec_type = CODEC_TYPE_VIDEO; st->codec->codec_id = CODEC_ID_XAN_WC3; st->codec->codec_tag = 0; /* no fourcc */ st->codec->width = wc3->width; st->codec->height = wc3->height; /* palette considerations */ st->codec->palctrl = &wc3->palette_control; st = av_new_stream(s, 0); if (!st) return AVERROR(ENOMEM); av_set_pts_info(st, 33, 1, 90000); wc3->audio_stream_index = st->index; st->codec->codec_type = CODEC_TYPE_AUDIO; st->codec->codec_id = CODEC_ID_PCM_S16LE; st->codec->codec_tag = 1; st->codec->channels = WC3_AUDIO_CHANNELS; st->codec->bits_per_sample = WC3_AUDIO_BITS; st->codec->sample_rate = WC3_SAMPLE_RATE; st->codec->bit_rate = st->codec->channels * st->codec->sample_rate * st->codec->bits_per_sample; st->codec->block_align = WC3_AUDIO_BITS * WC3_AUDIO_CHANNELS; return 0; }"} {"target": 0, "idx": 8673, "func": "static int64_t do_strtosz(const char *nptr, char **end, const char default_suffix, int64_t unit) { int64_t retval; char *endptr; unsigned char c; int mul_required = 0; double val, mul, integral, fraction; errno = 0; val = strtod(nptr, &endptr); if (isnan(val) || endptr == nptr || errno != 0) { retval = -EINVAL; goto out; } fraction = modf(val, &integral); if (fraction != 0) { mul_required = 1; } c = *endptr; mul = suffix_mul(c, unit); if (mul >= 0) { endptr++; } else { mul = suffix_mul(default_suffix, unit); assert(mul >= 0); } if (mul == 1 && mul_required) { retval = -EINVAL; goto out; } if ((val * mul >= INT64_MAX) || val < 0) { retval = -ERANGE; goto out; } retval = val * mul; out: if (end) { *end = endptr; } else if (*endptr) { retval = -EINVAL; } return retval; }"} {"target": 1, "idx": 8707, "func": "static int check_refcounts_l1(BlockDriverState *bs, BdrvCheckResult *res, uint16_t **refcount_table, int64_t *refcount_table_size, int64_t l1_table_offset, int l1_size, int flags) { BDRVQcowState *s = bs->opaque; uint64_t *l1_table = NULL, l2_offset, l1_size2; int i, ret; l1_size2 = l1_size * sizeof(uint64_t); /* Mark L1 table as used */ ret = inc_refcounts(bs, res, refcount_table, refcount_table_size, l1_table_offset, l1_size2); if (ret < 0) { goto fail; } /* Read L1 table entries from disk */ if (l1_size2 > 0) { l1_table = g_try_malloc(l1_size2); if (l1_table == NULL) { ret = -ENOMEM; res->check_errors++; goto fail; } ret = bdrv_pread(bs->file, l1_table_offset, l1_table, l1_size2); if (ret < 0) { fprintf(stderr, \"ERROR: I/O error in check_refcounts_l1\\n\"); res->check_errors++; goto fail; } for(i = 0;i < l1_size; i++) be64_to_cpus(&l1_table[i]); } /* Do the actual checks */ for(i = 0; i < l1_size; i++) { l2_offset = l1_table[i]; if (l2_offset) { /* Mark L2 table as used */ l2_offset &= L1E_OFFSET_MASK; ret = inc_refcounts(bs, res, refcount_table, refcount_table_size, l2_offset, s->cluster_size); if (ret < 0) { goto fail; } /* L2 tables are cluster aligned */ if (offset_into_cluster(s, l2_offset)) { fprintf(stderr, \"ERROR l2_offset=%\" PRIx64 \": Table is not \" \"cluster aligned; L1 entry corrupted\\n\", l2_offset); res->corruptions++; } /* Process and check L2 entries */ ret = check_refcounts_l2(bs, res, refcount_table, refcount_table_size, l2_offset, flags); if (ret < 0) { goto fail; } } } g_free(l1_table); return 0; fail: g_free(l1_table); return ret; }"} {"target": 1, "idx": 8720, "func": "static uint32_t dcr_read_pob (void *opaque, int dcrn) { ppc4xx_pob_t *pob; uint32_t ret; pob = opaque; switch (dcrn) { case POB0_BEAR: ret = pob->bear; break; case POB0_BESR0: case POB0_BESR1: ret = pob->besr[dcrn - POB0_BESR0]; break; default: /* Avoid gcc warning */ ret = 0; break; } return ret; }"} {"target": 0, "idx": 8755, "func": "static inline int onenand_erase(OneNANDState *s, int sec, int num) { uint8_t *blankbuf, *tmpbuf; blankbuf = g_malloc(512); if (!blankbuf) { return 1; } tmpbuf = g_malloc(512); if (!tmpbuf) { g_free(blankbuf); return 1; } memset(blankbuf, 0xff, 512); for (; num > 0; num--, sec++) { if (s->bdrv_cur) { int erasesec = s->secs_cur + (sec >> 5); if (bdrv_write(s->bdrv_cur, sec, blankbuf, 1) < 0) { goto fail; } if (bdrv_read(s->bdrv_cur, erasesec, tmpbuf, 1) < 0) { goto fail; } memcpy(tmpbuf + ((sec & 31) << 4), blankbuf, 1 << 4); if (bdrv_write(s->bdrv_cur, erasesec, tmpbuf, 1) < 0) { goto fail; } } else { if (sec + 1 > s->secs_cur) { goto fail; } memcpy(s->current + (sec << 9), blankbuf, 512); memcpy(s->current + (s->secs_cur << 9) + (sec << 4), blankbuf, 1 << 4); } } g_free(tmpbuf); g_free(blankbuf); return 0; fail: g_free(tmpbuf); g_free(blankbuf); return 1; }"} {"target": 1, "idx": 8793, "func": "static int vmdk_parse_extents(const char *desc, BlockDriverState *bs, const char *desc_file_path, Error **errp) { int ret; char access[11]; char type[11]; char fname[512]; const char *p = desc; int64_t sectors = 0; int64_t flat_offset; char extent_path[PATH_MAX]; BlockDriverState *extent_file; BDRVVmdkState *s = bs->opaque; VmdkExtent *extent; while (*p) { /* parse extent line: * RW [size in sectors] FLAT \"file-name.vmdk\" OFFSET * or * RW [size in sectors] SPARSE \"file-name.vmdk\" */ flat_offset = -1; ret = sscanf(p, \"%10s %\" SCNd64 \" %10s \\\"%511[^\\n\\r\\\"]\\\" %\" SCNd64, access, §ors, type, fname, &flat_offset); if (ret < 4 || strcmp(access, \"RW\")) { goto next_line; } else if (!strcmp(type, \"FLAT\")) { if (ret != 5 || flat_offset < 0) { error_setg(errp, \"Invalid extent lines: \\n%s\", p); return -EINVAL; } } else if (!strcmp(type, \"VMFS\")) { if (ret == 4) { flat_offset = 0; } else { error_setg(errp, \"Invalid extent lines:\\n%s\", p); return -EINVAL; } } else if (ret != 4) { error_setg(errp, \"Invalid extent lines:\\n%s\", p); return -EINVAL; } if (sectors <= 0 || (strcmp(type, \"FLAT\") && strcmp(type, \"SPARSE\") && strcmp(type, \"VMFS\") && strcmp(type, \"VMFSSPARSE\")) || (strcmp(access, \"RW\"))) { goto next_line; } path_combine(extent_path, sizeof(extent_path), desc_file_path, fname); extent_file = NULL; ret = bdrv_open(&extent_file, extent_path, NULL, NULL, bs->open_flags | BDRV_O_PROTOCOL, NULL, errp); if (ret) { return ret; } /* save to extents array */ if (!strcmp(type, \"FLAT\") || !strcmp(type, \"VMFS\")) { /* FLAT extent */ ret = vmdk_add_extent(bs, extent_file, true, sectors, 0, 0, 0, 0, 0, &extent, errp); if (ret < 0) { return ret; } extent->flat_start_offset = flat_offset << 9; } else if (!strcmp(type, \"SPARSE\") || !strcmp(type, \"VMFSSPARSE\")) { /* SPARSE extent and VMFSSPARSE extent are both \"COWD\" sparse file*/ char *buf = vmdk_read_desc(extent_file, 0, errp); if (!buf) { ret = -EINVAL; } else { ret = vmdk_open_sparse(bs, extent_file, bs->open_flags, buf, errp); } if (ret) { g_free(buf); return ret; } extent = &s->extents[s->num_extents - 1]; } else { error_setg(errp, \"Unsupported extent type '%s'\", type); return -ENOTSUP; } extent->type = g_strdup(type); next_line: /* move to next line */ while (*p) { if (*p == '\\n') { p++; break; } p++; } } return 0; }"} {"target": 1, "idx": 8794, "func": "static int default_lockmgr_cb(void **arg, enum AVLockOp op) { void * volatile * mutex = arg; int err; switch (op) { case AV_LOCK_CREATE: return 0; case AV_LOCK_OBTAIN: if (!*mutex) { pthread_mutex_t *tmp = av_malloc(sizeof(pthread_mutex_t)); if (!tmp) return AVERROR(ENOMEM); if ((err = pthread_mutex_init(tmp, NULL))) { av_free(tmp); return AVERROR(err); } if (avpriv_atomic_ptr_cas(mutex, NULL, tmp)) { pthread_mutex_destroy(tmp); av_free(tmp); } } if ((err = pthread_mutex_lock(*mutex))) return AVERROR(err); return 0; case AV_LOCK_RELEASE: if ((err = pthread_mutex_unlock(*mutex))) return AVERROR(err); return 0; case AV_LOCK_DESTROY: if (*mutex) pthread_mutex_destroy(*mutex); av_free(*mutex); avpriv_atomic_ptr_cas(mutex, *mutex, NULL); return 0; } return 1; }"} {"target": 1, "idx": 8815, "func": "static inline int check_input_motion(MpegEncContext * s, int mb_x, int mb_y, int p_type){ MotionEstContext * const c= &s->me; Picture *p= s->current_picture_ptr; int mb_xy= mb_x + mb_y*s->mb_stride; int xy= 2*mb_x + 2*mb_y*s->b8_stride; int mb_type= s->current_picture.mb_type[mb_xy]; int flags= c->flags; int shift= (flags&FLAG_QPEL) + 1; int mask= (1<dsp.sse[0]; me_cmp_func chroma_cmpf= s->dsp.sse[1]; assert(p_type==0 || !USES_LIST(mb_type, 1)); assert(IS_INTRA(mb_type) || USES_LIST(mb_type,0) || USES_LIST(mb_type,1)); if(IS_INTERLACED(mb_type)){ int xy2= xy + s->b8_stride; s->mb_type[mb_xy]=CANDIDATE_MB_TYPE_INTRA; c->stride<<=1; c->uvstride<<=1; init_interlaced_ref(s, 2); assert(s->flags & CODEC_FLAG_INTERLACED_ME); if(USES_LIST(mb_type, 0)){ int field_select0= p->ref_index[0][xy ]; int field_select1= p->ref_index[0][xy2]; assert(field_select0==0 ||field_select0==1); assert(field_select1==0 ||field_select1==1); if(p_type){ s->p_field_select_table[0][mb_xy]= field_select0; s->p_field_select_table[1][mb_xy]= field_select1; *(uint32_t*)s->p_field_mv_table[0][field_select0][mb_xy]= *(uint32_t*)p->motion_val[0][xy ]; *(uint32_t*)s->p_field_mv_table[1][field_select1][mb_xy]= *(uint32_t*)p->motion_val[0][xy2]; s->mb_type[mb_xy]=CANDIDATE_MB_TYPE_INTER_I; }else{ s->b_field_select_table[0][0][mb_xy]= field_select0; s->b_field_select_table[0][1][mb_xy]= field_select1; *(uint32_t*)s->b_field_mv_table[0][0][field_select0][mb_xy]= *(uint32_t*)p->motion_val[0][xy ]; *(uint32_t*)s->b_field_mv_table[0][1][field_select1][mb_xy]= *(uint32_t*)p->motion_val[0][xy2]; s->mb_type[mb_xy]= CANDIDATE_MB_TYPE_FORWARD_I; } x= p->motion_val[0][xy ][0]; y= p->motion_val[0][xy ][1]; d = cmp(s, x>>shift, y>>shift, x&mask, y&mask, 0, 8, field_select0, 0, cmpf, chroma_cmpf, flags); x= p->motion_val[0][xy2][0]; y= p->motion_val[0][xy2][1]; d+= cmp(s, x>>shift, y>>shift, x&mask, y&mask, 0, 8, field_select1, 1, cmpf, chroma_cmpf, flags); } if(USES_LIST(mb_type, 1)){ int field_select0= p->ref_index[1][xy ]; int field_select1= p->ref_index[1][xy2]; assert(field_select0==0 ||field_select0==1); assert(field_select1==0 ||field_select1==1); s->b_field_select_table[1][0][mb_xy]= field_select0; s->b_field_select_table[1][1][mb_xy]= field_select1; *(uint32_t*)s->b_field_mv_table[1][0][field_select0][mb_xy]= *(uint32_t*)p->motion_val[1][xy ]; *(uint32_t*)s->b_field_mv_table[1][1][field_select1][mb_xy]= *(uint32_t*)p->motion_val[1][xy2]; if(USES_LIST(mb_type, 0)){ s->mb_type[mb_xy]= CANDIDATE_MB_TYPE_BIDIR_I; }else{ s->mb_type[mb_xy]= CANDIDATE_MB_TYPE_BACKWARD_I; } x= p->motion_val[1][xy ][0]; y= p->motion_val[1][xy ][1]; d = cmp(s, x>>shift, y>>shift, x&mask, y&mask, 0, 8, field_select0+2, 0, cmpf, chroma_cmpf, flags); x= p->motion_val[1][xy2][0]; y= p->motion_val[1][xy2][1]; d+= cmp(s, x>>shift, y>>shift, x&mask, y&mask, 0, 8, field_select1+2, 1, cmpf, chroma_cmpf, flags); //FIXME bidir scores } c->stride>>=1; c->uvstride>>=1; }else if(IS_8X8(mb_type)){ cmpf= s->dsp.sse[1]; chroma_cmpf= s->dsp.sse[1]; init_mv4_ref(s); for(i=0; i<4; i++){ xy= s->block_index[i]; x= p->motion_val[0][xy][0]; y= p->motion_val[0][xy][1]; d+= cmp(s, x>>shift, y>>shift, x&mask, y&mask, 1, 8, i, i, cmpf, chroma_cmpf, flags); } s->mb_type[mb_xy]=CANDIDATE_MB_TYPE_INTER4V; }else{ if(USES_LIST(mb_type, 0)){ if(p_type){ *(uint32_t*)s->p_mv_table[mb_xy]= *(uint32_t*)p->motion_val[0][xy]; s->mb_type[mb_xy]=CANDIDATE_MB_TYPE_INTER; }else if(USES_LIST(mb_type, 1)){ *(uint32_t*)s->b_bidir_forw_mv_table[mb_xy]= *(uint32_t*)p->motion_val[0][xy]; *(uint32_t*)s->b_bidir_back_mv_table[mb_xy]= *(uint32_t*)p->motion_val[1][xy]; s->mb_type[mb_xy]=CANDIDATE_MB_TYPE_BIDIR; }else{ *(uint32_t*)s->b_forw_mv_table[mb_xy]= *(uint32_t*)p->motion_val[0][xy]; s->mb_type[mb_xy]=CANDIDATE_MB_TYPE_FORWARD; } x= p->motion_val[0][xy][0]; y= p->motion_val[0][xy][1]; d = cmp(s, x>>shift, y>>shift, x&mask, y&mask, 0, 16, 0, 0, cmpf, chroma_cmpf, flags); }else if(USES_LIST(mb_type, 1)){ *(uint32_t*)s->b_back_mv_table[mb_xy]= *(uint32_t*)p->motion_val[1][xy]; s->mb_type[mb_xy]=CANDIDATE_MB_TYPE_BACKWARD; x= p->motion_val[1][xy][0]; y= p->motion_val[1][xy][1]; d = cmp(s, x>>shift, y>>shift, x&mask, y&mask, 0, 16, 2, 0, cmpf, chroma_cmpf, flags); }else s->mb_type[mb_xy]=CANDIDATE_MB_TYPE_INTRA; } return d; }"} {"target": 0, "idx": 8825, "func": "void ff_estimate_p_frame_motion(MpegEncContext * s, int mb_x, int mb_y) { UINT8 *pix, *ppix; int sum, varc, vard, mx, my, range, dmin, xx, yy; int xmin, ymin, xmax, ymax; int rel_xmin, rel_ymin, rel_xmax, rel_ymax; int pred_x=0, pred_y=0; int P[6][2]; const int shift= 1+s->quarter_sample; int mb_type=0; uint8_t *ref_picture= s->last_picture[0]; get_limits(s, &range, &xmin, &ymin, &xmax, &ymax, s->f_code); switch(s->me_method) { case ME_ZERO: default: no_motion_search(s, &mx, &my); dmin = 0; break; case ME_FULL: dmin = full_motion_search(s, &mx, &my, range, xmin, ymin, xmax, ymax, ref_picture); break; case ME_LOG: dmin = log_motion_search(s, &mx, &my, range / 2, xmin, ymin, xmax, ymax, ref_picture); break; case ME_PHODS: dmin = phods_motion_search(s, &mx, &my, range / 2, xmin, ymin, xmax, ymax, ref_picture); break; case ME_X1: case ME_EPZS: { const int mot_stride = s->block_wrap[0]; const int mot_xy = s->block_index[0]; rel_xmin= xmin - mb_x*16; rel_xmax= xmax - mb_x*16; rel_ymin= ymin - mb_y*16; rel_ymax= ymax - mb_y*16; P[0][0] = s->motion_val[mot_xy ][0]; P[0][1] = s->motion_val[mot_xy ][1]; P[1][0] = s->motion_val[mot_xy - 1][0]; P[1][1] = s->motion_val[mot_xy - 1][1]; if(P[1][0] > (rel_xmax<first_slice_line || s->first_gob_line)) { P[4][0] = P[1][0]; P[4][1] = P[1][1]; } else { P[2][0] = s->motion_val[mot_xy - mot_stride ][0]; P[2][1] = s->motion_val[mot_xy - mot_stride ][1]; P[3][0] = s->motion_val[mot_xy - mot_stride + 2 ][0]; P[3][1] = s->motion_val[mot_xy - mot_stride + 2 ][1]; if(P[2][1] > (rel_ymax< (rel_ymax<out_format == FMT_H263){ pred_x = P[4][0]; pred_y = P[4][1]; }else { /* mpeg1 at least */ pred_x= P[1][0]; pred_y= P[1][1]; } } dmin = epzs_motion_search(s, &mx, &my, P, pred_x, pred_y, rel_xmin, rel_ymin, rel_xmax, rel_ymax, ref_picture); mx+= mb_x*16; my+= mb_y*16; break; } if(s->flags&CODEC_FLAG_4MV){ int block; mb_type|= MB_TYPE_INTER4V; for(block=0; block<4; block++){ int mx4, my4; int pred_x4, pred_y4; int dmin4; static const int off[4]= {2, 1, 1, -1}; const int mot_stride = s->block_wrap[0]; const int mot_xy = s->block_index[block]; const int block_x= mb_x*2 + (block&1); const int block_y= mb_y*2 + (block>>1); const int rel_xmin4= xmin - block_x*8; const int rel_xmax4= xmax - block_x*8 + 8; const int rel_ymin4= ymin - block_y*8; const int rel_ymax4= ymax - block_y*8 + 8; P[0][0] = s->motion_val[mot_xy ][0]; P[0][1] = s->motion_val[mot_xy ][1]; P[1][0] = s->motion_val[mot_xy - 1][0]; P[1][1] = s->motion_val[mot_xy - 1][1]; if(P[1][0] > (rel_xmax4<first_slice_line || s->first_gob_line) && block<2) { P[4][0] = P[1][0]; P[4][1] = P[1][1]; } else { P[2][0] = s->motion_val[mot_xy - mot_stride ][0]; P[2][1] = s->motion_val[mot_xy - mot_stride ][1]; P[3][0] = s->motion_val[mot_xy - mot_stride + off[block]][0]; P[3][1] = s->motion_val[mot_xy - mot_stride + off[block]][1]; if(P[2][1] > (rel_ymax4< (rel_xmax4< (rel_ymax4<out_format == FMT_H263){ pred_x4 = P[4][0]; pred_y4 = P[4][1]; }else { /* mpeg1 at least */ pred_x4= P[1][0]; pred_y4= P[1][1]; } P[5][0]= mx - mb_x*16; P[5][1]= my - mb_y*16; dmin4 = epzs_motion_search4(s, block, &mx4, &my4, P, pred_x4, pred_y4, rel_xmin4, rel_ymin4, rel_xmax4, rel_ymax4, ref_picture); halfpel_motion_search4(s, &mx4, &my4, dmin4, rel_xmin4, rel_ymin4, rel_xmax4, rel_ymax4, pred_x4, pred_y4, block_x, block_y, ref_picture); s->motion_val[ s->block_index[block] ][0]= mx4; s->motion_val[ s->block_index[block] ][1]= my4; } } /* intra / predictive decision */ xx = mb_x * 16; yy = mb_y * 16; pix = s->new_picture[0] + (yy * s->linesize) + xx; /* At this point (mx,my) are full-pell and the absolute displacement */ ppix = ref_picture + (my * s->linesize) + mx; sum = pix_sum(pix, s->linesize); #if 0 varc = pix_dev(pix, s->linesize, (sum+128)>>8) + INTER_BIAS; vard = pix_abs16x16(pix, ppix, s->linesize); #else sum= (sum+8)>>4; varc = ((pix_norm1(pix, s->linesize) - sum*sum + 128 + 500)>>8); vard = (pix_norm(pix, ppix, s->linesize)+128)>>8; #endif s->mb_var[s->mb_width * mb_y + mb_x] = varc; s->avg_mb_var+= varc; s->mc_mb_var += vard; #if 0 printf(\"varc=%4d avg_var=%4d (sum=%4d) vard=%4d mx=%2d my=%2d\\n\", varc, s->avg_mb_var, sum, vard, mx - xx, my - yy); #endif if(s->flags&CODEC_FLAG_HQ){ if (vard*2 + 200 > varc) mb_type|= MB_TYPE_INTRA; if (varc*2 + 200 > vard){ mb_type|= MB_TYPE_INTER; halfpel_motion_search(s, &mx, &my, dmin, xmin, ymin, xmax, ymax, pred_x, pred_y, ref_picture); }else{ mx = mx*2 - mb_x*32; my = my*2 - mb_y*32; } }else{ if (vard <= 64 || vard < varc) { mb_type|= MB_TYPE_INTER; if (s->me_method != ME_ZERO) { halfpel_motion_search(s, &mx, &my, dmin, xmin, ymin, xmax, ymax, pred_x, pred_y, ref_picture); } else { mx -= 16 * mb_x; my -= 16 * mb_y; } #if 0 if (vard < 10) { skip++; fprintf(stderr,\"\\nEarly skip: %d vard: %2d varc: %5d dmin: %d\", skip, vard, varc, dmin); } #endif }else{ mb_type|= MB_TYPE_INTRA; mx = 0;//mx*2 - 32 * mb_x; my = 0;//my*2 - 32 * mb_y; } } s->mb_type[mb_y*s->mb_width + mb_x]= mb_type; set_p_mv_tables(s, mx, my); }"} {"target": 0, "idx": 8835, "func": "av_cold int ff_vaapi_encode_close(AVCodecContext *avctx) { VAAPIEncodeContext *ctx = avctx->priv_data; VAAPIEncodePicture *pic, *next; for (pic = ctx->pic_start; pic; pic = next) { next = pic->next; vaapi_encode_free(avctx, pic); } if (ctx->va_context != VA_INVALID_ID) { vaDestroyContext(ctx->hwctx->display, ctx->va_context); ctx->va_context = VA_INVALID_ID; } if (ctx->va_config != VA_INVALID_ID) { vaDestroyConfig(ctx->hwctx->display, ctx->va_config); ctx->va_config = VA_INVALID_ID; } if (ctx->codec->close) ctx->codec->close(avctx); av_buffer_pool_uninit(&ctx->output_buffer_pool); av_freep(&ctx->codec_sequence_params); av_freep(&ctx->codec_picture_params); av_buffer_unref(&ctx->recon_frames_ref); av_buffer_unref(&ctx->input_frames_ref); av_buffer_unref(&ctx->device_ref); av_freep(&ctx->priv_data); return 0; }"} {"target": 0, "idx": 8846, "func": "static ssize_t socket_read(int sockfd, void *buff, size_t size) { ssize_t retval, total = 0; while (size) { retval = read(sockfd, buff, size); if (retval == 0) { return -EIO; } if (retval < 0) { if (errno == EINTR) { continue; } return -errno; } size -= retval; buff += retval; total += retval; } return total; }"} {"target": 0, "idx": 8855, "func": "AioContext *blk_get_aio_context(BlockBackend *blk) { return bdrv_get_aio_context(blk->bs); }"} {"target": 0, "idx": 8858, "func": "static int vfio_load_rom(VFIODevice *vdev) { uint64_t size = vdev->rom_size; char name[32]; off_t off = 0, voff = vdev->rom_offset; ssize_t bytes; void *ptr; /* If loading ROM from file, pci handles it */ if (vdev->pdev.romfile || !vdev->pdev.rom_bar || !size) { return 0; } DPRINTF(\"%s(%04x:%02x:%02x.%x)\\n\", __func__, vdev->host.domain, vdev->host.bus, vdev->host.slot, vdev->host.function); snprintf(name, sizeof(name), \"vfio[%04x:%02x:%02x.%x].rom\", vdev->host.domain, vdev->host.bus, vdev->host.slot, vdev->host.function); memory_region_init_ram(&vdev->pdev.rom, OBJECT(vdev), name, size); ptr = memory_region_get_ram_ptr(&vdev->pdev.rom); memset(ptr, 0xff, size); while (size) { bytes = pread(vdev->fd, ptr + off, size, voff + off); if (bytes == 0) { break; /* expect that we could get back less than the ROM BAR */ } else if (bytes > 0) { off += bytes; size -= bytes; } else { if (errno == EINTR || errno == EAGAIN) { continue; } error_report(\"vfio: Error reading device ROM: %m\"); memory_region_destroy(&vdev->pdev.rom); return -errno; } } pci_register_bar(&vdev->pdev, PCI_ROM_SLOT, 0, &vdev->pdev.rom); vdev->pdev.has_rom = true; return 0; }"} {"target": 0, "idx": 8866, "func": "UuidInfo *qmp_query_uuid(Error **errp) { UuidInfo *info = g_malloc0(sizeof(*info)); char uuid[64]; snprintf(uuid, sizeof(uuid), UUID_FMT, qemu_uuid[0], qemu_uuid[1], qemu_uuid[2], qemu_uuid[3], qemu_uuid[4], qemu_uuid[5], qemu_uuid[6], qemu_uuid[7], qemu_uuid[8], qemu_uuid[9], qemu_uuid[10], qemu_uuid[11], qemu_uuid[12], qemu_uuid[13], qemu_uuid[14], qemu_uuid[15]); info->UUID = g_strdup(uuid); return info; }"} {"target": 0, "idx": 8878, "func": "static int mpc7_decode_frame(AVCodecContext * avctx, void *data, int *got_frame_ptr, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size; MPCContext *c = avctx->priv_data; GetBitContext gb; int i, ch; int mb = -1; Band *bands = c->bands; int off, ret, last_frame, skip; int bits_used, bits_avail; memset(bands, 0, sizeof(*bands) * (c->maxbands + 1)); buf_size = avpkt->size & ~3; if (buf_size <= 0) { av_log(avctx, AV_LOG_ERROR, \"packet size is too small (%i bytes)\\n\", avpkt->size); return AVERROR_INVALIDDATA; } if (buf_size != avpkt->size) { av_log(avctx, AV_LOG_WARNING, \"packet size is not a multiple of 4. \" \"extra bytes at the end will be skipped.\\n\"); } skip = buf[0]; last_frame = buf[1]; buf += 4; buf_size -= 4; /* get output buffer */ c->frame.nb_samples = last_frame ? c->lastframelen : MPC_FRAME_SIZE; if ((ret = avctx->get_buffer(avctx, &c->frame)) < 0) { av_log(avctx, AV_LOG_ERROR, \"get_buffer() failed\\n\"); return ret; } av_fast_padded_malloc(&c->bits, &c->buf_size, buf_size); if (!c->bits) return AVERROR(ENOMEM); c->dsp.bswap_buf((uint32_t *)c->bits, (const uint32_t *)buf, buf_size >> 2); init_get_bits(&gb, c->bits, buf_size * 8); skip_bits_long(&gb, skip); /* read subband indexes */ for(i = 0; i <= c->maxbands; i++){ for(ch = 0; ch < 2; ch++){ int t = 4; if(i) t = get_vlc2(&gb, hdr_vlc.table, MPC7_HDR_BITS, 1) - 5; if(t == 4) bands[i].res[ch] = get_bits(&gb, 4); else bands[i].res[ch] = bands[i-1].res[ch] + t; } if(bands[i].res[0] || bands[i].res[1]){ mb = i; if(c->MSS) bands[i].msf = get_bits1(&gb); } } /* get scale indexes coding method */ for(i = 0; i <= mb; i++) for(ch = 0; ch < 2; ch++) if(bands[i].res[ch]) bands[i].scfi[ch] = get_vlc2(&gb, scfi_vlc.table, MPC7_SCFI_BITS, 1); /* get scale indexes */ for(i = 0; i <= mb; i++){ for(ch = 0; ch < 2; ch++){ if(bands[i].res[ch]){ bands[i].scf_idx[ch][2] = c->oldDSCF[ch][i]; bands[i].scf_idx[ch][0] = get_scale_idx(&gb, bands[i].scf_idx[ch][2]); switch(bands[i].scfi[ch]){ case 0: bands[i].scf_idx[ch][1] = get_scale_idx(&gb, bands[i].scf_idx[ch][0]); bands[i].scf_idx[ch][2] = get_scale_idx(&gb, bands[i].scf_idx[ch][1]); break; case 1: bands[i].scf_idx[ch][1] = get_scale_idx(&gb, bands[i].scf_idx[ch][0]); bands[i].scf_idx[ch][2] = bands[i].scf_idx[ch][1]; break; case 2: bands[i].scf_idx[ch][1] = bands[i].scf_idx[ch][0]; bands[i].scf_idx[ch][2] = get_scale_idx(&gb, bands[i].scf_idx[ch][1]); break; case 3: bands[i].scf_idx[ch][2] = bands[i].scf_idx[ch][1] = bands[i].scf_idx[ch][0]; break; } c->oldDSCF[ch][i] = bands[i].scf_idx[ch][2]; } } } /* get quantizers */ memset(c->Q, 0, sizeof(c->Q)); off = 0; for(i = 0; i < BANDS; i++, off += SAMPLES_PER_BAND) for(ch = 0; ch < 2; ch++) idx_to_quant(c, &gb, bands[i].res[ch], c->Q[ch] + off); ff_mpc_dequantize_and_synth(c, mb, c->frame.data[0], 2); bits_used = get_bits_count(&gb); bits_avail = buf_size * 8; if (!last_frame && ((bits_avail < bits_used) || (bits_used + 32 <= bits_avail))) { av_log(avctx, AV_LOG_ERROR, \"Error decoding frame: used %i of %i bits\\n\", bits_used, bits_avail); return -1; } if(c->frames_to_skip){ c->frames_to_skip--; *got_frame_ptr = 0; return avpkt->size; } *got_frame_ptr = 1; *(AVFrame *)data = c->frame; return avpkt->size; }"} {"target": 1, "idx": 8900, "func": "static int rle_unpack(const unsigned char *src, int src_len, int src_count, unsigned char *dest, int dest_len) { const unsigned char *ps; const unsigned char *ps_end; unsigned char *pd; int i, l; unsigned char *dest_end = dest + dest_len; ps = src; ps_end = src + src_len; pd = dest; if (src_count & 1) { if (ps_end - ps < 1) return 0; *pd++ = *ps++; } src_count >>= 1; i = 0; do { if (ps_end - ps < 1) break; l = *ps++; if (l & 0x80) { l = (l & 0x7F) * 2; if (pd + l > dest_end || ps_end - ps < l) return ps - src; memcpy(pd, ps, l); ps += l; pd += l; } else { if (pd + i > dest_end || ps_end - ps < 2) return ps - src; for (i = 0; i < l; i++) { *pd++ = ps[0]; *pd++ = ps[1]; } ps += 2; } i += l; } while (i < src_count); return ps - src; }"} {"target": 1, "idx": 8901, "func": "int vc1_decode_sequence_header(AVCodecContext *avctx, VC1Context *v, GetBitContext *gb) { av_log(avctx, AV_LOG_DEBUG, \"Header: %0X\\n\", show_bits(gb, 32)); v->profile = get_bits(gb, 2); if (v->profile == PROFILE_COMPLEX) { av_log(avctx, AV_LOG_WARNING, \"WMV3 Complex Profile is not fully supported\\n\"); } if (v->profile == PROFILE_ADVANCED) { v->zz_8x4 = ff_vc1_adv_progressive_8x4_zz; v->zz_4x8 = ff_vc1_adv_progressive_4x8_zz; return decode_sequence_header_adv(v, gb); } else { v->zz_8x4 = wmv2_scantableA; v->zz_4x8 = wmv2_scantableB; v->res_y411 = get_bits1(gb); v->res_sprite = get_bits1(gb); if (v->res_y411) { av_log(avctx, AV_LOG_ERROR, \"Old interlaced mode is not supported\\n\"); return -1; } if (v->res_sprite) { av_log(avctx, AV_LOG_ERROR, \"WMVP is not fully supported\\n\"); } } // (fps-2)/4 (->30) v->frmrtq_postproc = get_bits(gb, 3); //common // (bitrate-32kbps)/64kbps v->bitrtq_postproc = get_bits(gb, 5); //common v->s.loop_filter = get_bits1(gb); //common if(v->s.loop_filter == 1 && v->profile == PROFILE_SIMPLE) { av_log(avctx, AV_LOG_ERROR, \"LOOPFILTER shall not be enabled in Simple Profile\\n\"); } if(v->s.avctx->skip_loop_filter >= AVDISCARD_ALL) v->s.loop_filter = 0; v->res_x8 = get_bits1(gb); //reserved v->multires = get_bits1(gb); v->res_fasttx = get_bits1(gb); if (!v->res_fasttx) { v->vc1dsp.vc1_inv_trans_8x8 = ff_simple_idct_8; v->vc1dsp.vc1_inv_trans_8x4 = ff_simple_idct84_add; v->vc1dsp.vc1_inv_trans_4x8 = ff_simple_idct48_add; v->vc1dsp.vc1_inv_trans_4x4 = ff_simple_idct44_add; v->vc1dsp.vc1_inv_trans_8x8_dc = ff_simple_idct_add_8; v->vc1dsp.vc1_inv_trans_8x4_dc = ff_simple_idct84_add; v->vc1dsp.vc1_inv_trans_4x8_dc = ff_simple_idct48_add; v->vc1dsp.vc1_inv_trans_4x4_dc = ff_simple_idct44_add; } v->fastuvmc = get_bits1(gb); //common if (!v->profile && !v->fastuvmc) { av_log(avctx, AV_LOG_ERROR, \"FASTUVMC unavailable in Simple Profile\\n\"); return -1; } v->extended_mv = get_bits1(gb); //common if (!v->profile && v->extended_mv) { av_log(avctx, AV_LOG_ERROR, \"Extended MVs unavailable in Simple Profile\\n\"); return -1; } v->dquant = get_bits(gb, 2); //common v->vstransform = get_bits1(gb); //common v->res_transtab = get_bits1(gb); if (v->res_transtab) { av_log(avctx, AV_LOG_ERROR, \"1 for reserved RES_TRANSTAB is forbidden\\n\"); return -1; } v->overlap = get_bits1(gb); //common v->s.resync_marker = get_bits1(gb); v->rangered = get_bits1(gb); if (v->rangered && v->profile == PROFILE_SIMPLE) { av_log(avctx, AV_LOG_INFO, \"RANGERED should be set to 0 in Simple Profile\\n\"); } v->s.max_b_frames = avctx->max_b_frames = get_bits(gb, 3); //common v->quantizer_mode = get_bits(gb, 2); //common v->finterpflag = get_bits1(gb); //common if (v->res_sprite) { v->s.avctx->width = v->s.avctx->coded_width = get_bits(gb, 11); v->s.avctx->height = v->s.avctx->coded_height = get_bits(gb, 11); skip_bits(gb, 5); //frame rate v->res_x8 = get_bits1(gb); if (get_bits1(gb)) { // something to do with DC VLC selection av_log(avctx, AV_LOG_ERROR, \"Unsupported sprite feature\\n\"); return -1; } skip_bits(gb, 3); //slice code v->res_rtm_flag = 0; } else { v->res_rtm_flag = get_bits1(gb); //reserved } if (!v->res_rtm_flag) { // av_log(avctx, AV_LOG_ERROR, // \"0 for reserved RES_RTM_FLAG is forbidden\\n\"); av_log(avctx, AV_LOG_ERROR, \"Old WMV3 version detected, some frames may be decoded incorrectly\\n\"); //return -1; } //TODO: figure out what they mean (always 0x402F) if(!v->res_fasttx) skip_bits(gb, 16); av_log(avctx, AV_LOG_DEBUG, \"Profile %i:\\nfrmrtq_postproc=%i, bitrtq_postproc=%i\\n\" \"LoopFilter=%i, MultiRes=%i, FastUVMC=%i, Extended MV=%i\\n\" \"Rangered=%i, VSTransform=%i, Overlap=%i, SyncMarker=%i\\n\" \"DQuant=%i, Quantizer mode=%i, Max B frames=%i\\n\", v->profile, v->frmrtq_postproc, v->bitrtq_postproc, v->s.loop_filter, v->multires, v->fastuvmc, v->extended_mv, v->rangered, v->vstransform, v->overlap, v->s.resync_marker, v->dquant, v->quantizer_mode, avctx->max_b_frames ); return 0; }"} {"target": 1, "idx": 8903, "func": "static int s390_virtio_rng_init(VirtIOS390Device *s390_dev) { VirtIORNGS390 *dev = VIRTIO_RNG_S390(s390_dev); DeviceState *vdev = DEVICE(&dev->vdev); qdev_set_parent_bus(vdev, BUS(&s390_dev->bus)); if (qdev_init(vdev) < 0) { return -1; } object_property_set_link(OBJECT(dev), OBJECT(dev->vdev.conf.default_backend), \"rng\", NULL); return s390_virtio_device_init(s390_dev, VIRTIO_DEVICE(vdev)); }"} {"target": 0, "idx": 8927, "func": "static void apic_update_irq(APICState *s) { int irrv, ppr; if (!(s->spurious_vec & APIC_SV_ENABLE)) return; irrv = get_highest_priority_int(s->irr); if (irrv < 0) return; ppr = apic_get_ppr(s); if (ppr && (irrv & 0xf0) <= (ppr & 0xf0)) return; cpu_interrupt(s->cpu_env, CPU_INTERRUPT_HARD); }"} {"target": 0, "idx": 8931, "func": "setup_sigcontext(struct target_sigcontext *sc, struct target_fpstate *fpstate, CPUX86State *env, abi_ulong mask, abi_ulong fpstate_addr) { CPUState *cs = CPU(x86_env_get_cpu(env)); int err = 0; uint16_t magic; /* already locked in setup_frame() */ __put_user(env->segs[R_GS].selector, (unsigned int *)&sc->gs); __put_user(env->segs[R_FS].selector, (unsigned int *)&sc->fs); __put_user(env->segs[R_ES].selector, (unsigned int *)&sc->es); __put_user(env->segs[R_DS].selector, (unsigned int *)&sc->ds); __put_user(env->regs[R_EDI], &sc->edi); __put_user(env->regs[R_ESI], &sc->esi); __put_user(env->regs[R_EBP], &sc->ebp); __put_user(env->regs[R_ESP], &sc->esp); __put_user(env->regs[R_EBX], &sc->ebx); __put_user(env->regs[R_EDX], &sc->edx); __put_user(env->regs[R_ECX], &sc->ecx); __put_user(env->regs[R_EAX], &sc->eax); __put_user(cs->exception_index, &sc->trapno); __put_user(env->error_code, &sc->err); __put_user(env->eip, &sc->eip); __put_user(env->segs[R_CS].selector, (unsigned int *)&sc->cs); __put_user(env->eflags, &sc->eflags); __put_user(env->regs[R_ESP], &sc->esp_at_signal); __put_user(env->segs[R_SS].selector, (unsigned int *)&sc->ss); cpu_x86_fsave(env, fpstate_addr, 1); fpstate->status = fpstate->sw; magic = 0xffff; __put_user(magic, &fpstate->magic); __put_user(fpstate_addr, &sc->fpstate); /* non-iBCS2 extensions.. */ __put_user(mask, &sc->oldmask); __put_user(env->cr[2], &sc->cr2); return err; }"} {"target": 0, "idx": 8946, "func": "static void coroutine_fn qed_aio_complete(QEDAIOCB *acb) { BDRVQEDState *s = acb_to_s(acb); /* Free resources */ qemu_iovec_destroy(&acb->cur_qiov); qed_unref_l2_cache_entry(acb->request.l2_table); /* Free the buffer we may have allocated for zero writes */ if (acb->flags & QED_AIOCB_ZERO) { qemu_vfree(acb->qiov->iov[0].iov_base); acb->qiov->iov[0].iov_base = NULL; } /* Start next allocating write request waiting behind this one. Note that * requests enqueue themselves when they first hit an unallocated cluster * but they wait until the entire request is finished before waking up the * next request in the queue. This ensures that we don't cycle through * requests multiple times but rather finish one at a time completely. */ if (acb == s->allocating_acb) { s->allocating_acb = NULL; if (!qemu_co_queue_empty(&s->allocating_write_reqs)) { qemu_co_enter_next(&s->allocating_write_reqs); } else if (s->header.features & QED_F_NEED_CHECK) { qed_start_need_check_timer(s); } } }"} {"target": 0, "idx": 8949, "func": "static bool vtd_do_iommu_translate(VTDAddressSpace *vtd_as, PCIBus *bus, uint8_t devfn, hwaddr addr, bool is_write, IOMMUTLBEntry *entry) { IntelIOMMUState *s = vtd_as->iommu_state; VTDContextEntry ce; uint8_t bus_num = pci_bus_num(bus); VTDContextCacheEntry *cc_entry = &vtd_as->context_cache_entry; uint64_t slpte, page_mask; uint32_t level; uint16_t source_id = vtd_make_source_id(bus_num, devfn); int ret_fr; bool is_fpd_set = false; bool reads = true; bool writes = true; uint8_t access_flags; VTDIOTLBEntry *iotlb_entry; /* * We have standalone memory region for interrupt addresses, we * should never receive translation requests in this region. */ assert(!vtd_is_interrupt_addr(addr)); /* Try to fetch slpte form IOTLB */ iotlb_entry = vtd_lookup_iotlb(s, source_id, addr); if (iotlb_entry) { trace_vtd_iotlb_page_hit(source_id, addr, iotlb_entry->slpte, iotlb_entry->domain_id); slpte = iotlb_entry->slpte; access_flags = iotlb_entry->access_flags; page_mask = iotlb_entry->mask; goto out; } /* Try to fetch context-entry from cache first */ if (cc_entry->context_cache_gen == s->context_cache_gen) { trace_vtd_iotlb_cc_hit(bus_num, devfn, cc_entry->context_entry.hi, cc_entry->context_entry.lo, cc_entry->context_cache_gen); ce = cc_entry->context_entry; is_fpd_set = ce.lo & VTD_CONTEXT_ENTRY_FPD; } else { ret_fr = vtd_dev_to_context_entry(s, bus_num, devfn, &ce); is_fpd_set = ce.lo & VTD_CONTEXT_ENTRY_FPD; if (ret_fr) { ret_fr = -ret_fr; if (is_fpd_set && vtd_is_qualified_fault(ret_fr)) { trace_vtd_fault_disabled(); } else { vtd_report_dmar_fault(s, source_id, addr, ret_fr, is_write); } goto error; } /* Update context-cache */ trace_vtd_iotlb_cc_update(bus_num, devfn, ce.hi, ce.lo, cc_entry->context_cache_gen, s->context_cache_gen); cc_entry->context_entry = ce; cc_entry->context_cache_gen = s->context_cache_gen; } /* * We don't need to translate for pass-through context entries. * Also, let's ignore IOTLB caching as well for PT devices. */ if (vtd_ce_get_type(&ce) == VTD_CONTEXT_TT_PASS_THROUGH) { entry->iova = addr & VTD_PAGE_MASK_4K; entry->translated_addr = entry->iova; entry->addr_mask = ~VTD_PAGE_MASK_4K; entry->perm = IOMMU_RW; trace_vtd_translate_pt(source_id, entry->iova); /* * When this happens, it means firstly caching-mode is not * enabled, and this is the first passthrough translation for * the device. Let's enable the fast path for passthrough. * * When passthrough is disabled again for the device, we can * capture it via the context entry invalidation, then the * IOMMU region can be swapped back. */ vtd_pt_enable_fast_path(s, source_id); return true; } ret_fr = vtd_iova_to_slpte(&ce, addr, is_write, &slpte, &level, &reads, &writes); if (ret_fr) { ret_fr = -ret_fr; if (is_fpd_set && vtd_is_qualified_fault(ret_fr)) { trace_vtd_fault_disabled(); } else { vtd_report_dmar_fault(s, source_id, addr, ret_fr, is_write); } goto error; } page_mask = vtd_slpt_level_page_mask(level); access_flags = IOMMU_ACCESS_FLAG(reads, writes); vtd_update_iotlb(s, source_id, VTD_CONTEXT_ENTRY_DID(ce.hi), addr, slpte, access_flags, level); out: entry->iova = addr & page_mask; entry->translated_addr = vtd_get_slpte_addr(slpte) & page_mask; entry->addr_mask = ~page_mask; entry->perm = access_flags; return true; error: entry->iova = 0; entry->translated_addr = 0; entry->addr_mask = 0; entry->perm = IOMMU_NONE; return false; }"} {"target": 0, "idx": 8955, "func": "int load_multiboot(void *fw_cfg, FILE *f, const char *kernel_filename, const char *initrd_filename, const char *kernel_cmdline, int kernel_file_size, uint8_t *header) { int i, is_multiboot = 0; uint32_t flags = 0; uint32_t mh_entry_addr; uint32_t mh_load_addr; uint32_t mb_kernel_size; MultibootState mbs; uint8_t bootinfo[MBI_SIZE]; uint8_t *mb_bootinfo_data; /* Ok, let's see if it is a multiboot image. The header is 12x32bit long, so the latest entry may be 8192 - 48. */ for (i = 0; i < (8192 - 48); i += 4) { if (ldl_p(header+i) == 0x1BADB002) { uint32_t checksum = ldl_p(header+i+8); flags = ldl_p(header+i+4); checksum += flags; checksum += (uint32_t)0x1BADB002; if (!checksum) { is_multiboot = 1; break; } } } if (!is_multiboot) return 0; /* no multiboot */ mb_debug(\"qemu: I believe we found a multiboot image!\\n\"); memset(bootinfo, 0, sizeof(bootinfo)); memset(&mbs, 0, sizeof(mbs)); if (flags & 0x00000004) { /* MULTIBOOT_HEADER_HAS_VBE */ fprintf(stderr, \"qemu: multiboot knows VBE. we don't.\\n\"); } if (!(flags & 0x00010000)) { /* MULTIBOOT_HEADER_HAS_ADDR */ uint64_t elf_entry; uint64_t elf_low, elf_high; int kernel_size; fclose(f); if (((struct elf64_hdr*)header)->e_machine == EM_X86_64) { fprintf(stderr, \"Cannot load x86-64 image, give a 32bit one.\\n\"); exit(1); } kernel_size = load_elf(kernel_filename, NULL, NULL, &elf_entry, &elf_low, &elf_high, 0, ELF_MACHINE, 0); if (kernel_size < 0) { fprintf(stderr, \"Error while loading elf kernel\\n\"); exit(1); } mh_load_addr = elf_low; mb_kernel_size = elf_high - elf_low; mh_entry_addr = elf_entry; mbs.mb_buf = g_malloc(mb_kernel_size); if (rom_copy(mbs.mb_buf, mh_load_addr, mb_kernel_size) != mb_kernel_size) { fprintf(stderr, \"Error while fetching elf kernel from rom\\n\"); exit(1); } mb_debug(\"qemu: loading multiboot-elf kernel (%#x bytes) with entry %#zx\\n\", mb_kernel_size, (size_t)mh_entry_addr); } else { /* Valid if mh_flags sets MULTIBOOT_HEADER_HAS_ADDR. */ uint32_t mh_header_addr = ldl_p(header+i+12); uint32_t mh_load_end_addr = ldl_p(header+i+20); uint32_t mh_bss_end_addr = ldl_p(header+i+24); mh_load_addr = ldl_p(header+i+16); uint32_t mb_kernel_text_offset = i - (mh_header_addr - mh_load_addr); uint32_t mb_load_size = 0; mh_entry_addr = ldl_p(header+i+28); if (mh_load_end_addr) { mb_kernel_size = mh_bss_end_addr - mh_load_addr; mb_load_size = mh_load_end_addr - mh_load_addr; } else { mb_kernel_size = kernel_file_size - mb_kernel_text_offset; mb_load_size = mb_kernel_size; } /* Valid if mh_flags sets MULTIBOOT_HEADER_HAS_VBE. uint32_t mh_mode_type = ldl_p(header+i+32); uint32_t mh_width = ldl_p(header+i+36); uint32_t mh_height = ldl_p(header+i+40); uint32_t mh_depth = ldl_p(header+i+44); */ mb_debug(\"multiboot: mh_header_addr = %#x\\n\", mh_header_addr); mb_debug(\"multiboot: mh_load_addr = %#x\\n\", mh_load_addr); mb_debug(\"multiboot: mh_load_end_addr = %#x\\n\", mh_load_end_addr); mb_debug(\"multiboot: mh_bss_end_addr = %#x\\n\", mh_bss_end_addr); mb_debug(\"qemu: loading multiboot kernel (%#x bytes) at %#x\\n\", mb_load_size, mh_load_addr); mbs.mb_buf = g_malloc(mb_kernel_size); fseek(f, mb_kernel_text_offset, SEEK_SET); if (fread(mbs.mb_buf, 1, mb_load_size, f) != mb_load_size) { fprintf(stderr, \"fread() failed\\n\"); exit(1); } memset(mbs.mb_buf + mb_load_size, 0, mb_kernel_size - mb_load_size); fclose(f); } mbs.mb_buf_phys = mh_load_addr; mbs.mb_buf_size = TARGET_PAGE_ALIGN(mb_kernel_size); mbs.offset_mbinfo = mbs.mb_buf_size; /* Calculate space for cmdlines and mb_mods */ mbs.mb_buf_size += strlen(kernel_filename) + 1; mbs.mb_buf_size += strlen(kernel_cmdline) + 1; if (initrd_filename) { const char *r = initrd_filename; mbs.mb_buf_size += strlen(r) + 1; mbs.mb_mods_avail = 1; while (*(r = get_opt_value(NULL, 0, r))) { mbs.mb_mods_avail++; r++; } mbs.mb_buf_size += MB_MOD_SIZE * mbs.mb_mods_avail; } mbs.mb_buf_size = TARGET_PAGE_ALIGN(mbs.mb_buf_size); /* enlarge mb_buf to hold cmdlines and mb-info structs */ mbs.mb_buf = g_realloc(mbs.mb_buf, mbs.mb_buf_size); mbs.offset_cmdlines = mbs.offset_mbinfo + mbs.mb_mods_avail * MB_MOD_SIZE; if (initrd_filename) { char *next_initrd, not_last; mbs.offset_mods = mbs.mb_buf_size; do { char *next_space; int mb_mod_length; uint32_t offs = mbs.mb_buf_size; next_initrd = (char *)get_opt_value(NULL, 0, initrd_filename); not_last = *next_initrd; *next_initrd = '\\0'; /* if a space comes after the module filename, treat everything after that as parameters */ target_phys_addr_t c = mb_add_cmdline(&mbs, initrd_filename); if ((next_space = strchr(initrd_filename, ' '))) *next_space = '\\0'; mb_debug(\"multiboot loading module: %s\\n\", initrd_filename); mb_mod_length = get_image_size(initrd_filename); if (mb_mod_length < 0) { fprintf(stderr, \"Failed to open file '%s'\\n\", initrd_filename); exit(1); } mbs.mb_buf_size = TARGET_PAGE_ALIGN(mb_mod_length + mbs.mb_buf_size); mbs.mb_buf = g_realloc(mbs.mb_buf, mbs.mb_buf_size); load_image(initrd_filename, (unsigned char *)mbs.mb_buf + offs); mb_add_mod(&mbs, mbs.mb_buf_phys + offs, mbs.mb_buf_phys + offs + mb_mod_length, c); mb_debug(\"mod_start: %p\\nmod_end: %p\\n cmdline: \"TARGET_FMT_plx\"\\n\", (char *)mbs.mb_buf + offs, (char *)mbs.mb_buf + offs + mb_mod_length, c); initrd_filename = next_initrd+1; } while (not_last); } /* Commandline support */ char kcmdline[strlen(kernel_filename) + strlen(kernel_cmdline) + 2]; snprintf(kcmdline, sizeof(kcmdline), \"%s %s\", kernel_filename, kernel_cmdline); stl_p(bootinfo + MBI_CMDLINE, mb_add_cmdline(&mbs, kcmdline)); stl_p(bootinfo + MBI_MODS_ADDR, mbs.mb_buf_phys + mbs.offset_mbinfo); stl_p(bootinfo + MBI_MODS_COUNT, mbs.mb_mods_count); /* mods_count */ /* the kernel is where we want it to be now */ stl_p(bootinfo + MBI_FLAGS, MULTIBOOT_FLAGS_MEMORY | MULTIBOOT_FLAGS_BOOT_DEVICE | MULTIBOOT_FLAGS_CMDLINE | MULTIBOOT_FLAGS_MODULES | MULTIBOOT_FLAGS_MMAP); stl_p(bootinfo + MBI_MEM_LOWER, 640); stl_p(bootinfo + MBI_MEM_UPPER, (ram_size / 1024) - 1024); stl_p(bootinfo + MBI_BOOT_DEVICE, 0x8000ffff); /* XXX: use the -boot switch? */ stl_p(bootinfo + MBI_MMAP_ADDR, ADDR_E820_MAP); mb_debug(\"multiboot: mh_entry_addr = %#x\\n\", mh_entry_addr); mb_debug(\" mb_buf_phys = \"TARGET_FMT_plx\"\\n\", mbs.mb_buf_phys); mb_debug(\" mod_start = \"TARGET_FMT_plx\"\\n\", mbs.mb_buf_phys + mbs.offset_mods); mb_debug(\" mb_mods_count = %d\\n\", mbs.mb_mods_count); /* save bootinfo off the stack */ mb_bootinfo_data = g_malloc(sizeof(bootinfo)); memcpy(mb_bootinfo_data, bootinfo, sizeof(bootinfo)); /* Pass variables to option rom */ fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_ENTRY, mh_entry_addr); fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_ADDR, mh_load_addr); fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_SIZE, mbs.mb_buf_size); fw_cfg_add_bytes(fw_cfg, FW_CFG_KERNEL_DATA, mbs.mb_buf, mbs.mb_buf_size); fw_cfg_add_i32(fw_cfg, FW_CFG_INITRD_ADDR, ADDR_MBI); fw_cfg_add_i32(fw_cfg, FW_CFG_INITRD_SIZE, sizeof(bootinfo)); fw_cfg_add_bytes(fw_cfg, FW_CFG_INITRD_DATA, mb_bootinfo_data, sizeof(bootinfo)); option_rom[nb_option_roms].name = \"multiboot.bin\"; option_rom[nb_option_roms].bootindex = 0; nb_option_roms++; return 1; /* yes, we are multiboot */ }"} {"target": 0, "idx": 8963, "func": "static void FUNC(hevc_h_loop_filter_luma)(uint8_t *pix, ptrdiff_t stride, int *beta, int *tc, uint8_t *no_p, uint8_t *no_q) { FUNC(hevc_loop_filter_luma)(pix, stride, sizeof(pixel), beta, tc, no_p, no_q); }"} {"target": 0, "idx": 8970, "func": "static void tcg_out_brcond(TCGContext *s, TCGMemOp ext, TCGCond c, TCGArg a, TCGArg b, bool b_const, TCGLabel *l) { intptr_t offset; bool need_cmp; if (b_const && b == 0 && (c == TCG_COND_EQ || c == TCG_COND_NE)) { need_cmp = false; } else { need_cmp = true; tcg_out_cmp(s, ext, a, b, b_const); } if (!l->has_value) { tcg_out_reloc(s, s->code_ptr, R_AARCH64_CONDBR19, l, 0); offset = tcg_in32(s) >> 5; } else { offset = l->u.value_ptr - s->code_ptr; assert(offset == sextract64(offset, 0, 19)); } if (need_cmp) { tcg_out_insn(s, 3202, B_C, c, offset); } else if (c == TCG_COND_EQ) { tcg_out_insn(s, 3201, CBZ, ext, a, offset); } else { tcg_out_insn(s, 3201, CBNZ, ext, a, offset); } }"} {"target": 0, "idx": 8988, "func": "static int nbd_negotiate_options(NBDClient *client, Error **errp) { uint32_t flags; bool fixedNewstyle = false; /* Client sends: [ 0 .. 3] client flags [ 0 .. 7] NBD_OPTS_MAGIC [ 8 .. 11] NBD option [12 .. 15] Data length ... Rest of request [ 0 .. 7] NBD_OPTS_MAGIC [ 8 .. 11] Second NBD option [12 .. 15] Data length ... Rest of request */ if (nbd_read(client->ioc, &flags, sizeof(flags), errp) < 0) { error_prepend(errp, \"read failed: \"); return -EIO; } trace_nbd_negotiate_options_flags(); be32_to_cpus(&flags); if (flags & NBD_FLAG_C_FIXED_NEWSTYLE) { trace_nbd_negotiate_options_newstyle(); fixedNewstyle = true; flags &= ~NBD_FLAG_C_FIXED_NEWSTYLE; } if (flags & NBD_FLAG_C_NO_ZEROES) { trace_nbd_negotiate_options_no_zeroes(); client->no_zeroes = true; flags &= ~NBD_FLAG_C_NO_ZEROES; } if (flags != 0) { error_setg(errp, \"Unknown client flags 0x%\" PRIx32 \" received\", flags); return -EIO; } while (1) { int ret; uint32_t option, length; uint64_t magic; if (nbd_read(client->ioc, &magic, sizeof(magic), errp) < 0) { error_prepend(errp, \"read failed: \"); return -EINVAL; } magic = be64_to_cpu(magic); trace_nbd_negotiate_options_check_magic(magic); if (magic != NBD_OPTS_MAGIC) { error_setg(errp, \"Bad magic received\"); return -EINVAL; } if (nbd_read(client->ioc, &option, sizeof(option), errp) < 0) { error_prepend(errp, \"read failed: \"); return -EINVAL; } option = be32_to_cpu(option); if (nbd_read(client->ioc, &length, sizeof(length), errp) < 0) { error_prepend(errp, \"read failed: \"); return -EINVAL; } length = be32_to_cpu(length); trace_nbd_negotiate_options_check_option(option); if (client->tlscreds && client->ioc == (QIOChannel *)client->sioc) { QIOChannel *tioc; if (!fixedNewstyle) { error_setg(errp, \"Unsupported option 0x%\" PRIx32, option); return -EINVAL; } switch (option) { case NBD_OPT_STARTTLS: tioc = nbd_negotiate_handle_starttls(client, length, errp); if (!tioc) { return -EIO; } object_unref(OBJECT(client->ioc)); client->ioc = QIO_CHANNEL(tioc); break; case NBD_OPT_EXPORT_NAME: /* No way to return an error to client, so drop connection */ error_setg(errp, \"Option 0x%x not permitted before TLS\", option); return -EINVAL; default: if (nbd_drop(client->ioc, length, errp) < 0) { return -EIO; } ret = nbd_negotiate_send_rep_err(client->ioc, NBD_REP_ERR_TLS_REQD, option, errp, \"Option 0x%\" PRIx32 \"not permitted before TLS\", option); if (ret < 0) { return ret; } /* Let the client keep trying, unless they asked to * quit. In this mode, we've already sent an error, so * we can't ack the abort. */ if (option == NBD_OPT_ABORT) { return 1; } break; } } else if (fixedNewstyle) { switch (option) { case NBD_OPT_LIST: ret = nbd_negotiate_handle_list(client, length, errp); if (ret < 0) { return ret; } break; case NBD_OPT_ABORT: /* NBD spec says we must try to reply before * disconnecting, but that we must also tolerate * guests that don't wait for our reply. */ nbd_negotiate_send_rep(client->ioc, NBD_REP_ACK, option, NULL); return 1; case NBD_OPT_EXPORT_NAME: return nbd_negotiate_handle_export_name(client, length, errp); case NBD_OPT_STARTTLS: if (nbd_drop(client->ioc, length, errp) < 0) { return -EIO; } if (client->tlscreds) { ret = nbd_negotiate_send_rep_err(client->ioc, NBD_REP_ERR_INVALID, option, errp, \"TLS already enabled\"); } else { ret = nbd_negotiate_send_rep_err(client->ioc, NBD_REP_ERR_POLICY, option, errp, \"TLS not configured\"); } if (ret < 0) { return ret; } break; default: if (nbd_drop(client->ioc, length, errp) < 0) { return -EIO; } ret = nbd_negotiate_send_rep_err(client->ioc, NBD_REP_ERR_UNSUP, option, errp, \"Unsupported option 0x%\" PRIx32, option); if (ret < 0) { return ret; } break; } } else { /* * If broken new-style we should drop the connection * for anything except NBD_OPT_EXPORT_NAME */ switch (option) { case NBD_OPT_EXPORT_NAME: return nbd_negotiate_handle_export_name(client, length, errp); default: error_setg(errp, \"Unsupported option 0x%\" PRIx32, option); return -EINVAL; } } } }"} {"target": 1, "idx": 9025, "func": "static void pl181_fifo_run(pl181_state *s) { uint32_t bits; uint32_t value; int n; int is_read; is_read = (s->datactrl & PL181_DATA_DIRECTION) != 0; if (s->datacnt != 0 && (!is_read || sd_data_ready(s->card)) && !s->linux_hack) { if (is_read) { n = 0; value = 0; while (s->datacnt && s->fifo_len < PL181_FIFO_LEN) { value |= (uint32_t)sd_read_data(s->card) << (n * 8); s->datacnt--; n++; if (n == 4) { pl181_fifo_push(s, value); n = 0; value = 0; } } if (n != 0) { pl181_fifo_push(s, value); } } else { /* write */ n = 0; while (s->datacnt > 0 && (s->fifo_len > 0 || n > 0)) { if (n == 0) { value = pl181_fifo_pop(s); n = 4; } n--; s->datacnt--; sd_write_data(s->card, value & 0xff); value >>= 8; } } } s->status &= ~(PL181_STATUS_RX_FIFO | PL181_STATUS_TX_FIFO); if (s->datacnt == 0) { s->status |= PL181_STATUS_DATAEND; /* HACK: */ s->status |= PL181_STATUS_DATABLOCKEND; DPRINTF(\"Transfer Complete\\n\"); } if (s->datacnt == 0 && s->fifo_len == 0) { s->datactrl &= ~PL181_DATA_ENABLE; DPRINTF(\"Data engine idle\\n\"); } else { /* Update FIFO bits. */ bits = PL181_STATUS_TXACTIVE | PL181_STATUS_RXACTIVE; if (s->fifo_len == 0) { bits |= PL181_STATUS_TXFIFOEMPTY; bits |= PL181_STATUS_RXFIFOEMPTY; } else { bits |= PL181_STATUS_TXDATAAVLBL; bits |= PL181_STATUS_RXDATAAVLBL; } if (s->fifo_len == 16) { bits |= PL181_STATUS_TXFIFOFULL; bits |= PL181_STATUS_RXFIFOFULL; } if (s->fifo_len <= 8) { bits |= PL181_STATUS_TXFIFOHALFEMPTY; } if (s->fifo_len >= 8) { bits |= PL181_STATUS_RXFIFOHALFFULL; } if (s->datactrl & PL181_DATA_DIRECTION) { bits &= PL181_STATUS_RX_FIFO; } else { bits &= PL181_STATUS_TX_FIFO; } s->status |= bits; } }"} {"target": 0, "idx": 9036, "func": "static int flv_read_metabody(AVFormatContext *s, int64_t next_pos) { AMFDataType type; AVStream *stream, *astream, *vstream; AVIOContext *ioc; int i; // only needs to hold the string \"onMetaData\". // Anything longer is something we don't want. char buffer[11]; astream = NULL; vstream = NULL; ioc = s->pb; // first object needs to be \"onMetaData\" string type = avio_r8(ioc); if (type != AMF_DATA_TYPE_STRING || amf_get_string(ioc, buffer, sizeof(buffer)) < 0) return -1; if (!strcmp(buffer, \"onTextData\")) return 1; if (strcmp(buffer, \"onMetaData\")) return -1; // find the streams now so that amf_parse_object doesn't need to do // the lookup every time it is called. for (i = 0; i < s->nb_streams; i++) { stream = s->streams[i]; if (stream->codec->codec_type == AVMEDIA_TYPE_AUDIO) astream = stream; else if (stream->codec->codec_type == AVMEDIA_TYPE_VIDEO) vstream = stream; } // parse the second object (we want a mixed array) if (amf_parse_object(s, astream, vstream, buffer, next_pos, 0) < 0) return -1; return 0; }"} {"target": 0, "idx": 9038, "func": "static void mptsas_scsi_init(PCIDevice *dev, Error **errp) { DeviceState *d = DEVICE(dev); MPTSASState *s = MPT_SAS(dev); Error *err = NULL; int ret; dev->config[PCI_LATENCY_TIMER] = 0; dev->config[PCI_INTERRUPT_PIN] = 0x01; if (s->msi != ON_OFF_AUTO_OFF) { ret = msi_init(dev, 0, 1, true, false, &err); /* Any error other than -ENOTSUP(board's MSI support is broken) * is a programming error */ assert(!ret || ret == -ENOTSUP); if (ret && s->msi == ON_OFF_AUTO_ON) { /* Can't satisfy user's explicit msi=on request, fail */ error_append_hint(&err, \"You have to use msi=auto (default) or \" \"msi=off with this machine type.\\n\"); error_propagate(errp, err); s->msi_in_use = false; return; } else if (ret) { /* With msi=auto, we fall back to MSI off silently */ error_free(err); s->msi_in_use = false; } else { s->msi_in_use = true; } } memory_region_init_io(&s->mmio_io, OBJECT(s), &mptsas_mmio_ops, s, \"mptsas-mmio\", 0x4000); memory_region_init_io(&s->port_io, OBJECT(s), &mptsas_port_ops, s, \"mptsas-io\", 256); memory_region_init_io(&s->diag_io, OBJECT(s), &mptsas_diag_ops, s, \"mptsas-diag\", 0x10000); pci_register_bar(dev, 0, PCI_BASE_ADDRESS_SPACE_IO, &s->port_io); pci_register_bar(dev, 1, PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_32, &s->mmio_io); pci_register_bar(dev, 2, PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_32, &s->diag_io); if (!s->sas_addr) { s->sas_addr = ((NAA_LOCALLY_ASSIGNED_ID << 24) | IEEE_COMPANY_LOCALLY_ASSIGNED) << 36; s->sas_addr |= (pci_bus_num(dev->bus) << 16); s->sas_addr |= (PCI_SLOT(dev->devfn) << 8); s->sas_addr |= PCI_FUNC(dev->devfn); } s->max_devices = MPTSAS_NUM_PORTS; s->request_bh = qemu_bh_new(mptsas_fetch_requests, s); QTAILQ_INIT(&s->pending); scsi_bus_new(&s->bus, sizeof(s->bus), &dev->qdev, &mptsas_scsi_info, NULL); if (!d->hotplugged) { scsi_bus_legacy_handle_cmdline(&s->bus, errp); } }"} {"target": 0, "idx": 9039, "func": "static void kvm_arm_gic_realize(DeviceState *dev, Error **errp) { int i; GICState *s = KVM_ARM_GIC(dev); SysBusDevice *sbd = SYS_BUS_DEVICE(dev); KVMARMGICClass *kgc = KVM_ARM_GIC_GET_CLASS(s); kgc->parent_realize(dev, errp); if (error_is_set(errp)) { return; } i = s->num_irq - GIC_INTERNAL; /* For the GIC, also expose incoming GPIO lines for PPIs for each CPU. * GPIO array layout is thus: * [0..N-1] SPIs * [N..N+31] PPIs for CPU 0 * [N+32..N+63] PPIs for CPU 1 * ... */ i += (GIC_INTERNAL * s->num_cpu); qdev_init_gpio_in(dev, kvm_arm_gic_set_irq, i); /* We never use our outbound IRQ lines but provide them so that * we maintain the same interface as the non-KVM GIC. */ for (i = 0; i < s->num_cpu; i++) { sysbus_init_irq(sbd, &s->parent_irq[i]); } /* Distributor */ memory_region_init_reservation(&s->iomem, OBJECT(s), \"kvm-gic_dist\", 0x1000); sysbus_init_mmio(sbd, &s->iomem); kvm_arm_register_device(&s->iomem, (KVM_ARM_DEVICE_VGIC_V2 << KVM_ARM_DEVICE_ID_SHIFT) | KVM_VGIC_V2_ADDR_TYPE_DIST); /* CPU interface for current core. Unlike arm_gic, we don't * provide the \"interface for core #N\" memory regions, because * cores with a VGIC don't have those. */ memory_region_init_reservation(&s->cpuiomem[0], OBJECT(s), \"kvm-gic_cpu\", 0x1000); sysbus_init_mmio(sbd, &s->cpuiomem[0]); kvm_arm_register_device(&s->cpuiomem[0], (KVM_ARM_DEVICE_VGIC_V2 << KVM_ARM_DEVICE_ID_SHIFT) | KVM_VGIC_V2_ADDR_TYPE_CPU); }"} {"target": 0, "idx": 9047, "func": "static int write_fragments(struct Tracks *tracks, int start_index, AVIOContext *in) { char dirname[100], filename[500]; int i, j; for (i = start_index; i < tracks->nb_tracks; i++) { struct Track *track = tracks->tracks[i]; const char *type = track->is_video ? \"video\" : \"audio\"; snprintf(dirname, sizeof(dirname), \"QualityLevels(%d)\", track->bitrate); mkdir(dirname, 0777); for (j = 0; j < track->chunks; j++) { snprintf(filename, sizeof(filename), \"%s/Fragments(%s=%\"PRId64\")\", dirname, type, track->offsets[j].time); avio_seek(in, track->offsets[j].offset, SEEK_SET); write_fragment(filename, in); } } return 0; }"} {"target": 0, "idx": 9049, "func": "char *qemu_find_file(int type, const char *name) { int len; const char *subdir; char *buf; /* If name contains path separators then try it as a straight path. */ if ((strchr(name, '/') || strchr(name, '\\\\')) && access(name, R_OK) == 0) { return g_strdup(name); } switch (type) { case QEMU_FILE_TYPE_BIOS: subdir = \"\"; break; case QEMU_FILE_TYPE_KEYMAP: subdir = \"keymaps/\"; break; default: abort(); } len = strlen(data_dir) + strlen(name) + strlen(subdir) + 2; buf = g_malloc0(len); snprintf(buf, len, \"%s/%s%s\", data_dir, subdir, name); if (access(buf, R_OK)) { g_free(buf); return NULL; } return buf; }"} {"target": 0, "idx": 9055, "func": "void term_flush(void) { }"} {"target": 0, "idx": 9067, "func": "static uint64_t elcr_ioport_read(void *opaque, target_phys_addr_t addr, unsigned size) { PICCommonState *s = opaque; return s->elcr; }"} {"target": 0, "idx": 9072, "func": "static int inc_refcounts(BlockDriverState *bs, BdrvCheckResult *res, uint16_t *refcount_table, int64_t refcount_table_size, int64_t offset, int64_t size) { BDRVQcowState *s = bs->opaque; uint64_t start, last, cluster_offset, k; if (size <= 0) { return 0; } start = start_of_cluster(s, offset); last = start_of_cluster(s, offset + size - 1); for(cluster_offset = start; cluster_offset <= last; cluster_offset += s->cluster_size) { k = cluster_offset >> s->cluster_bits; if (k >= refcount_table_size) { fprintf(stderr, \"Warning: cluster offset=0x%\" PRIx64 \" is after \" \"the end of the image file, can't properly check refcounts.\\n\", cluster_offset); res->check_errors++; } else { if (++refcount_table[k] == 0) { fprintf(stderr, \"ERROR: overflow cluster offset=0x%\" PRIx64 \"\\n\", cluster_offset); res->corruptions++; } } } return 0; }"} {"target": 1, "idx": 9087, "func": "static int nbd_co_request(BlockDriverState *bs, NBDRequest *request, QEMUIOVector *qiov) { NBDClientSession *client = nbd_get_client_session(bs); int ret; if (qiov) { assert(request->type == NBD_CMD_WRITE || request->type == NBD_CMD_READ); assert(request->len == iov_size(qiov->iov, qiov->niov)); } else { assert(request->type != NBD_CMD_WRITE && request->type != NBD_CMD_READ); } ret = nbd_co_send_request(bs, request, request->type == NBD_CMD_WRITE ? qiov : NULL); if (ret < 0) { return ret; } return nbd_co_receive_reply(client, request->handle, request->type == NBD_CMD_READ ? qiov : NULL); }"} {"target": 1, "idx": 9091, "func": "static ExitStatus trans_log(DisasContext *ctx, uint32_t insn, const DisasInsn *di) { unsigned r2 = extract32(insn, 21, 5); unsigned r1 = extract32(insn, 16, 5); unsigned cf = extract32(insn, 12, 4); unsigned rt = extract32(insn, 0, 5); TCGv tcg_r1, tcg_r2; ExitStatus ret; if (cf) { nullify_over(ctx); } tcg_r1 = load_gpr(ctx, r1); tcg_r2 = load_gpr(ctx, r2); ret = do_log(ctx, rt, tcg_r1, tcg_r2, cf, di->f_ttt); return nullify_end(ctx, ret); }"} {"target": 0, "idx": 9094, "func": "static av_always_inline void xchg_mb_border(H264Context *h, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, int linesize, int uvlinesize, int xchg, int chroma444, int simple, int pixel_shift) { int deblock_topleft; int deblock_top; int top_idx = 1; uint8_t *top_border_m1; uint8_t *top_border; if (!simple && FRAME_MBAFF(h)) { if (h->mb_y & 1) { if (!MB_MBAFF(h)) return; } else { top_idx = MB_MBAFF(h) ? 0 : 1; } } if (h->deblocking_filter == 2) { deblock_topleft = h->slice_table[h->mb_xy - 1 - h->mb_stride] == h->slice_num; deblock_top = h->top_type; } else { deblock_topleft = (h->mb_x > 0); deblock_top = (h->mb_y > !!MB_FIELD(h)); } src_y -= linesize + 1 + pixel_shift; src_cb -= uvlinesize + 1 + pixel_shift; src_cr -= uvlinesize + 1 + pixel_shift; top_border_m1 = h->top_borders[top_idx][h->mb_x - 1]; top_border = h->top_borders[top_idx][h->mb_x]; #define XCHG(a, b, xchg) \\ if (pixel_shift) { \\ if (xchg) { \\ AV_SWAP64(b + 0, a + 0); \\ AV_SWAP64(b + 8, a + 8); \\ } else { \\ AV_COPY128(b, a); \\ } \\ } else if (xchg) \\ AV_SWAP64(b, a); \\ else \\ AV_COPY64(b, a); if (deblock_top) { if (deblock_topleft) { XCHG(top_border_m1 + (8 << pixel_shift), src_y - (7 << pixel_shift), 1); } XCHG(top_border + (0 << pixel_shift), src_y + (1 << pixel_shift), xchg); XCHG(top_border + (8 << pixel_shift), src_y + (9 << pixel_shift), 1); if (h->mb_x + 1 < h->mb_width) { XCHG(h->top_borders[top_idx][h->mb_x + 1], src_y + (17 << pixel_shift), 1); } } if (simple || !CONFIG_GRAY || !(h->flags & CODEC_FLAG_GRAY)) { if (chroma444) { if (deblock_topleft) { XCHG(top_border_m1 + (24 << pixel_shift), src_cb - (7 << pixel_shift), 1); XCHG(top_border_m1 + (40 << pixel_shift), src_cr - (7 << pixel_shift), 1); } XCHG(top_border + (16 << pixel_shift), src_cb + (1 << pixel_shift), xchg); XCHG(top_border + (24 << pixel_shift), src_cb + (9 << pixel_shift), 1); XCHG(top_border + (32 << pixel_shift), src_cr + (1 << pixel_shift), xchg); XCHG(top_border + (40 << pixel_shift), src_cr + (9 << pixel_shift), 1); if (h->mb_x + 1 < h->mb_width) { XCHG(h->top_borders[top_idx][h->mb_x + 1] + (16 << pixel_shift), src_cb + (17 << pixel_shift), 1); XCHG(h->top_borders[top_idx][h->mb_x + 1] + (32 << pixel_shift), src_cr + (17 << pixel_shift), 1); } } else { if (deblock_top) { if (deblock_topleft) { XCHG(top_border_m1 + (16 << pixel_shift), src_cb - (7 << pixel_shift), 1); XCHG(top_border_m1 + (24 << pixel_shift), src_cr - (7 << pixel_shift), 1); } XCHG(top_border + (16 << pixel_shift), src_cb + 1 + pixel_shift, 1); XCHG(top_border + (24 << pixel_shift), src_cr + 1 + pixel_shift, 1); } } } }"} {"target": 1, "idx": 9095, "func": "static int gif_image_write_image(AVCodecContext *avctx, uint8_t **bytestream, uint8_t *end, const uint8_t *buf, int linesize) { GIFContext *s = avctx->priv_data; int len, height; const uint8_t *ptr; /* image block */ bytestream_put_byte(bytestream, 0x2c); bytestream_put_le16(bytestream, 0); bytestream_put_le16(bytestream, 0); bytestream_put_le16(bytestream, avctx->width); bytestream_put_le16(bytestream, avctx->height); bytestream_put_byte(bytestream, 0x00); /* flags */ /* no local clut */ bytestream_put_byte(bytestream, 0x08); ff_lzw_encode_init(s->lzw, s->buf, avctx->width*avctx->height, 12, FF_LZW_GIF, put_bits); ptr = buf; for (height = avctx->height; height--;) { len += ff_lzw_encode(s->lzw, ptr, avctx->width); ptr += linesize; } len += ff_lzw_encode_flush(s->lzw, flush_put_bits); ptr = s->buf; while (len > 0) { int size = FFMIN(255, len); bytestream_put_byte(bytestream, size); if (end - *bytestream < size) return -1; bytestream_put_buffer(bytestream, ptr, size); ptr += size; len -= size; } bytestream_put_byte(bytestream, 0x00); /* end of image block */ bytestream_put_byte(bytestream, 0x3b); return 0; }"} {"target": 1, "idx": 9101, "func": "static int mov_write_stbl_tag(AVIOContext *pb, MOVTrack *track) { int64_t pos = avio_tell(pb); avio_wb32(pb, 0); /* size */ ffio_wfourcc(pb, \"stbl\"); mov_write_stsd_tag(pb, track); mov_write_stts_tag(pb, track); if ((track->enc->codec_type == AVMEDIA_TYPE_VIDEO || track->enc->codec_tag == MKTAG('r','t','p',' ')) && track->has_keyframes && track->has_keyframes < track->entry) mov_write_stss_tag(pb, track, MOV_SYNC_SAMPLE); if (track->mode == MODE_MOV && track->flags & MOV_TRACK_STPS) mov_write_stss_tag(pb, track, MOV_PARTIAL_SYNC_SAMPLE); if (track->enc->codec_type == AVMEDIA_TYPE_VIDEO && track->flags & MOV_TRACK_CTTS && track->entry) mov_write_ctts_tag(pb, track); mov_write_stsc_tag(pb, track); mov_write_stsz_tag(pb, track); mov_write_stco_tag(pb, track); return update_size(pb, pos); }"} {"target": 1, "idx": 9106, "func": "uint64_t qcow2_get_cluster_offset(BlockDriverState *bs, uint64_t offset, int *num) { BDRVQcowState *s = bs->opaque; int l1_index, l2_index; uint64_t l2_offset, *l2_table, cluster_offset; int l1_bits, c; int index_in_cluster, nb_available, nb_needed, nb_clusters; index_in_cluster = (offset >> 9) & (s->cluster_sectors - 1); nb_needed = *num + index_in_cluster; l1_bits = s->l2_bits + s->cluster_bits; /* compute how many bytes there are between the offset and * the end of the l1 entry */ nb_available = (1 << l1_bits) - (offset & ((1 << l1_bits) - 1)); /* compute the number of available sectors */ nb_available = (nb_available >> 9) + index_in_cluster; if (nb_needed > nb_available) { nb_needed = nb_available; } cluster_offset = 0; /* seek the the l2 offset in the l1 table */ l1_index = offset >> l1_bits; if (l1_index >= s->l1_size) goto out; l2_offset = s->l1_table[l1_index]; /* seek the l2 table of the given l2 offset */ if (!l2_offset) goto out; /* load the l2 table in memory */ l2_offset &= ~QCOW_OFLAG_COPIED; l2_table = l2_load(bs, l2_offset); if (l2_table == NULL) return 0; /* find the cluster offset for the given disk offset */ l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1); cluster_offset = be64_to_cpu(l2_table[l2_index]); nb_clusters = size_to_clusters(s, nb_needed << 9); if (!cluster_offset) { /* how many empty clusters ? */ c = count_contiguous_free_clusters(nb_clusters, &l2_table[l2_index]); } else { /* how many allocated clusters ? */ c = count_contiguous_clusters(nb_clusters, s->cluster_size, &l2_table[l2_index], 0, QCOW_OFLAG_COPIED); } nb_available = (c * s->cluster_sectors); out: if (nb_available > nb_needed) nb_available = nb_needed; *num = nb_available - index_in_cluster; return cluster_offset & ~QCOW_OFLAG_COPIED; }"} {"target": 1, "idx": 9109, "func": "static inline int RENAME(yuv420_rgb16)(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY, int srcSliceH, uint8_t* dst[], int dstStride[]){ int y, h_size; if(c->srcFormat == PIX_FMT_YUV422P){ srcStride[1] *= 2; srcStride[2] *= 2; } h_size= (c->dstW+7)&~7; if(h_size*2 > FFABS(dstStride[0])) h_size-=8; __asm__ __volatile__ (\"pxor %mm4, %mm4;\" /* zero mm4 */ ); //printf(\"%X %X %X %X %X %X %X %X %X %X\\n\", (int)&c->redDither, (int)&b5Dither, (int)src[0], (int)src[1], (int)src[2], (int)dst[0], //srcStride[0],srcStride[1],srcStride[2],dstStride[0]); for (y= 0; y>1)*srcStride[1]; uint8_t *_pv = src[2] + (y>>1)*srcStride[2]; long index= -h_size/2; b5Dither= dither8[y&1]; g6Dither= dither4[y&1]; g5Dither= dither8[y&1]; r5Dither= dither8[(y+1)&1]; /* this mmx assembly code deals with SINGLE scan line at a time, it convert 8 pixels in each iteration */ __asm__ __volatile__ ( /* load data for start of next scan line */ \"movd (%2, %0), %%mm0;\" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */ \"movd (%3, %0), %%mm1;\" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */ \"movq (%5, %0, 2), %%mm6;\" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */ // \".balign 16 \\n\\t\" \"1: \\n\\t\" /* no speed diference on my p3@500 with prefetch, * if it is faster for anyone with -benchmark then tell me PREFETCH\" 64(%0) \\n\\t\" PREFETCH\" 64(%1) \\n\\t\" PREFETCH\" 64(%2) \\n\\t\" */ YUV2RGB #ifdef DITHER1XBPP \"paddusb \"MANGLE(b5Dither)\", %%mm0;\" \"paddusb \"MANGLE(g6Dither)\", %%mm2;\" \"paddusb \"MANGLE(r5Dither)\", %%mm1;\" #endif /* mask unneeded bits off */ \"pand \"MANGLE(mmx_redmask)\", %%mm0;\" /* b7b6b5b4 b3_0_0_0 b7b6b5b4 b3_0_0_0 */ \"pand \"MANGLE(mmx_grnmask)\", %%mm2;\" /* g7g6g5g4 g3g2_0_0 g7g6g5g4 g3g2_0_0 */ \"pand \"MANGLE(mmx_redmask)\", %%mm1;\" /* r7r6r5r4 r3_0_0_0 r7r6r5r4 r3_0_0_0 */ \"psrlw $3,%%mm0;\" /* 0_0_0_b7 b6b5b4b3 0_0_0_b7 b6b5b4b3 */ \"pxor %%mm4, %%mm4;\" /* zero mm4 */ \"movq %%mm0, %%mm5;\" /* Copy B7-B0 */ \"movq %%mm2, %%mm7;\" /* Copy G7-G0 */ /* convert rgb24 plane to rgb16 pack for pixel 0-3 */ \"punpcklbw %%mm4, %%mm2;\" /* 0_0_0_0 0_0_0_0 g7g6g5g4 g3g2_0_0 */ \"punpcklbw %%mm1, %%mm0;\" /* r7r6r5r4 r3_0_0_0 0_0_0_b7 b6b5b4b3 */ \"psllw $3, %%mm2;\" /* 0_0_0_0 0_g7g6g5 g4g3g2_0 0_0_0_0 */ \"por %%mm2, %%mm0;\" /* r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3 */ \"movq 8 (%5, %0, 2), %%mm6;\" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */ MOVNTQ \" %%mm0, (%1);\" /* store pixel 0-3 */ /* convert rgb24 plane to rgb16 pack for pixel 0-3 */ \"punpckhbw %%mm4, %%mm7;\" /* 0_0_0_0 0_0_0_0 g7g6g5g4 g3g2_0_0 */ \"punpckhbw %%mm1, %%mm5;\" /* r7r6r5r4 r3_0_0_0 0_0_0_b7 b6b5b4b3 */ \"psllw $3, %%mm7;\" /* 0_0_0_0 0_g7g6g5 g4g3g2_0 0_0_0_0 */ \"movd 4 (%2, %0), %%mm0;\" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */ \"por %%mm7, %%mm5;\" /* r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3 */ \"movd 4 (%3, %0), %%mm1;\" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */ MOVNTQ \" %%mm5, 8 (%1);\" /* store pixel 4-7 */ \"add $16, %1 \\n\\t\" \"add $4, %0 \\n\\t\" \" js 1b \\n\\t\" : \"+r\" (index), \"+r\" (_image) : \"r\" (_pu - index), \"r\" (_pv - index), \"r\"(&c->redDither), \"r\" (_py - 2*index) ); } __asm__ __volatile__ (EMMS); return srcSliceH; }"} {"target": 1, "idx": 9122, "func": "void memory_region_add_eventfd(MemoryRegion *mr, hwaddr addr, unsigned size, bool match_data, uint64_t data, EventNotifier *e) { MemoryRegionIoeventfd mrfd = { .addr.start = int128_make64(addr), .addr.size = int128_make64(size), .match_data = match_data, .data = data, .e = e, }; unsigned i; adjust_endianness(mr, &mrfd.data, size); memory_region_transaction_begin(); for (i = 0; i < mr->ioeventfd_nb; ++i) { if (memory_region_ioeventfd_before(mrfd, mr->ioeventfds[i])) { break; } } ++mr->ioeventfd_nb; mr->ioeventfds = g_realloc(mr->ioeventfds, sizeof(*mr->ioeventfds) * mr->ioeventfd_nb); memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i], sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i)); mr->ioeventfds[i] = mrfd; ioeventfd_update_pending |= mr->enabled; memory_region_transaction_commit(); }"} {"target": 0, "idx": 9134, "func": "enum AVPixelFormat choose_pixel_fmt(AVStream *st, AVCodec *codec, enum AVPixelFormat target) { if (codec && codec->pix_fmts) { const enum AVPixelFormat *p = codec->pix_fmts; int has_alpha= av_pix_fmt_desc_get(target)->nb_components % 2 == 0; enum AVPixelFormat best= AV_PIX_FMT_NONE; if (st->codec->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL) { if (st->codec->codec_id == AV_CODEC_ID_MJPEG) { p = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_NONE }; } else if (st->codec->codec_id == AV_CODEC_ID_LJPEG) { p = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_BGRA, AV_PIX_FMT_NONE }; } } for (; *p != AV_PIX_FMT_NONE; p++) { best= avcodec_find_best_pix_fmt_of_2(best, *p, target, has_alpha, NULL); if (*p == target) break; } if (*p == AV_PIX_FMT_NONE) { if (target != AV_PIX_FMT_NONE) av_log(NULL, AV_LOG_WARNING, \"Incompatible pixel format '%s' for codec '%s', auto-selecting format '%s'\\n\", av_get_pix_fmt_name(target), codec->name, av_get_pix_fmt_name(best)); return best; } } return target; }"} {"target": 1, "idx": 9150, "func": "static void gic_set_irq(void *opaque, int irq, int level) { /* Meaning of the 'irq' parameter: * [0..N-1] : external interrupts * [N..N+31] : PPI (internal) interrupts for CPU 0 * [N+32..N+63] : PPI (internal interrupts for CPU 1 * ... */ GICState *s = (GICState *)opaque; int cm, target; if (irq < (s->num_irq - GIC_INTERNAL)) { /* The first external input line is internal interrupt 32. */ cm = ALL_CPU_MASK; irq += GIC_INTERNAL; target = GIC_TARGET(irq); } else { int cpu; irq -= (s->num_irq - GIC_INTERNAL); cpu = irq / GIC_INTERNAL; irq %= GIC_INTERNAL; cm = 1 << cpu; target = cm; } assert(irq >= GIC_NR_SGIS); if (level == GIC_TEST_LEVEL(irq, cm)) { return; } if (s->revision == REV_11MPCORE || s->revision == REV_NVIC) { gic_set_irq_11mpcore(s, irq, level, cm, target); } else { gic_set_irq_generic(s, irq, level, cm, target); } gic_update(s); }"} {"target": 1, "idx": 9162, "func": "static int decode_mb_info(IVI5DecContext *ctx, IVIBandDesc *band, IVITile *tile, AVCodecContext *avctx) { int x, y, mv_x, mv_y, mv_delta, offs, mb_offset, mv_scale, blks_per_mb; IVIMbInfo *mb, *ref_mb; int row_offset = band->mb_size * band->pitch; mb = tile->mbs; ref_mb = tile->ref_mbs; offs = tile->ypos * band->pitch + tile->xpos; /* scale factor for motion vectors */ mv_scale = (ctx->planes[0].bands[0].mb_size >> 3) - (band->mb_size >> 3); mv_x = mv_y = 0; for (y = tile->ypos; y < (tile->ypos + tile->height); y += band->mb_size) { mb_offset = offs; for (x = tile->xpos; x < (tile->xpos + tile->width); x += band->mb_size) { mb->xpos = x; mb->ypos = y; mb->buf_offs = mb_offset; if (get_bits1(&ctx->gb)) { if (ctx->frame_type == FRAMETYPE_INTRA) { av_log(avctx, AV_LOG_ERROR, \"Empty macroblock in an INTRA picture!\\n\"); return -1; } mb->type = 1; /* empty macroblocks are always INTER */ mb->cbp = 0; /* all blocks are empty */ mb->q_delta = 0; if (!band->plane && !band->band_num && (ctx->frame_flags & 8)) { mb->q_delta = get_vlc2(&ctx->gb, ctx->mb_vlc.tab->table, IVI_VLC_BITS, 1); mb->q_delta = IVI_TOSIGNED(mb->q_delta); } mb->mv_x = mb->mv_y = 0; /* no motion vector coded */ if (band->inherit_mv){ /* motion vector inheritance */ if (mv_scale) { mb->mv_x = ivi_scale_mv(ref_mb->mv_x, mv_scale); mb->mv_y = ivi_scale_mv(ref_mb->mv_y, mv_scale); } else { mb->mv_x = ref_mb->mv_x; mb->mv_y = ref_mb->mv_y; } } } else { if (band->inherit_mv) { mb->type = ref_mb->type; /* copy mb_type from corresponding reference mb */ } else if (ctx->frame_type == FRAMETYPE_INTRA) { mb->type = 0; /* mb_type is always INTRA for intra-frames */ } else { mb->type = get_bits1(&ctx->gb); } blks_per_mb = band->mb_size != band->blk_size ? 4 : 1; mb->cbp = get_bits(&ctx->gb, blks_per_mb); mb->q_delta = 0; if (band->qdelta_present) { if (band->inherit_qdelta) { if (ref_mb) mb->q_delta = ref_mb->q_delta; } else if (mb->cbp || (!band->plane && !band->band_num && (ctx->frame_flags & 8))) { mb->q_delta = get_vlc2(&ctx->gb, ctx->mb_vlc.tab->table, IVI_VLC_BITS, 1); mb->q_delta = IVI_TOSIGNED(mb->q_delta); } } if (!mb->type) { mb->mv_x = mb->mv_y = 0; /* there is no motion vector in intra-macroblocks */ } else { if (band->inherit_mv){ /* motion vector inheritance */ if (mv_scale) { mb->mv_x = ivi_scale_mv(ref_mb->mv_x, mv_scale); mb->mv_y = ivi_scale_mv(ref_mb->mv_y, mv_scale); } else { mb->mv_x = ref_mb->mv_x; mb->mv_y = ref_mb->mv_y; } } else { /* decode motion vector deltas */ mv_delta = get_vlc2(&ctx->gb, ctx->mb_vlc.tab->table, IVI_VLC_BITS, 1); mv_y += IVI_TOSIGNED(mv_delta); mv_delta = get_vlc2(&ctx->gb, ctx->mb_vlc.tab->table, IVI_VLC_BITS, 1); mv_x += IVI_TOSIGNED(mv_delta); mb->mv_x = mv_x; mb->mv_y = mv_y; } } } mb++; if (ref_mb) ref_mb++; mb_offset += band->mb_size; } offs += row_offset; } align_get_bits(&ctx->gb); return 0; }"} {"target": 1, "idx": 9172, "func": "VIOsPAPRDevice *vty_lookup(sPAPRMachineState *spapr, target_ulong reg) { VIOsPAPRDevice *sdev; sdev = spapr_vio_find_by_reg(spapr->vio_bus, reg); if (!sdev && reg == 0) { /* Hack for kernel early debug, which always specifies reg==0. * We search all VIO devices, and grab the vty with the lowest * reg. This attempts to mimic existing PowerVM behaviour * (early debug does work there, despite having no vty with * reg==0. */ return spapr_vty_get_default(spapr->vio_bus); return sdev;"} {"target": 1, "idx": 9174, "func": "void coroutine_fn block_job_pause_point(BlockJob *job) { assert(job && block_job_started(job)); if (!block_job_should_pause(job)) { return; } if (block_job_is_cancelled(job)) { return; } if (job->driver->pause) { job->driver->pause(job); } if (block_job_should_pause(job) && !block_job_is_cancelled(job)) { job->paused = true; job->busy = false; qemu_coroutine_yield(); /* wait for block_job_resume() */ job->busy = true; job->paused = false; } if (job->driver->resume) { job->driver->resume(job); } }"} {"target": 1, "idx": 9176, "func": "int bdrv_pread(BlockDriverState *bs, int64_t offset, void *buf1, int count1) { BlockDriver *drv = bs->drv; if (!drv) return -ENOMEDIUM; if (!drv->bdrv_pread) return bdrv_pread_em(bs, offset, buf1, count1); return drv->bdrv_pread(bs, offset, buf1, count1); }"} {"target": 1, "idx": 9178, "func": "int avpriv_dca_convert_bitstream(const uint8_t *src, int src_size, uint8_t *dst, int max_size) { uint32_t mrk; int i, tmp; const uint16_t *ssrc = (const uint16_t *) src; uint16_t *sdst = (uint16_t *) dst; PutBitContext pb; if ((unsigned) src_size > (unsigned) max_size) src_size = max_size; mrk = AV_RB32(src); switch (mrk) { case DCA_SYNCWORD_CORE_BE: memcpy(dst, src, src_size); return src_size; case DCA_SYNCWORD_CORE_LE: for (i = 0; i < (src_size + 1) >> 1; i++) *sdst++ = av_bswap16(*ssrc++); return src_size; case DCA_SYNCWORD_CORE_14B_BE: case DCA_SYNCWORD_CORE_14B_LE: init_put_bits(&pb, dst, max_size); for (i = 0; i < (src_size + 1) >> 1; i++, src += 2) { tmp = ((mrk == DCA_SYNCWORD_CORE_14B_BE) ? AV_RB16(src) : AV_RL16(src)) & 0x3FFF; put_bits(&pb, 14, tmp); } flush_put_bits(&pb); return (put_bits_count(&pb) + 7) >> 3; default: return AVERROR_INVALIDDATA; } }"} {"target": 1, "idx": 9189, "func": "static TRBCCode xhci_disable_ep(XHCIState *xhci, unsigned int slotid, unsigned int epid) { XHCISlot *slot; XHCIEPContext *epctx; int i; trace_usb_xhci_ep_disable(slotid, epid); assert(slotid >= 1 && slotid <= xhci->numslots); assert(epid >= 1 && epid <= 31); slot = &xhci->slots[slotid-1]; if (!slot->eps[epid-1]) { DPRINTF(\"xhci: slot %d ep %d already disabled\\n\", slotid, epid); return CC_SUCCESS; xhci_ep_nuke_xfers(xhci, slotid, epid); epctx = slot->eps[epid-1]; if (epctx->nr_pstreams) { xhci_free_streams(epctx); xhci_set_ep_state(xhci, epctx, NULL, EP_DISABLED); timer_free(epctx->kick_timer); g_free(epctx); slot->eps[epid-1] = NULL; return CC_SUCCESS;"} {"target": 1, "idx": 9193, "func": "int pvpanic_init(ISABus *bus) { isa_create_simple(bus, TYPE_ISA_PVPANIC_DEVICE); return 0; }"} {"target": 0, "idx": 9210, "func": "static uint32_t gic_dist_readb(void *opaque, hwaddr offset, MemTxAttrs attrs) { GICState *s = (GICState *)opaque; uint32_t res; int irq; int i; int cpu; int cm; int mask; cpu = gic_get_current_cpu(s); cm = 1 << cpu; if (offset < 0x100) { if (offset == 0) return s->enabled; if (offset == 4) /* Interrupt Controller Type Register */ return ((s->num_irq / 32) - 1) | ((NUM_CPU(s) - 1) << 5) | (s->security_extn << 10); if (offset < 0x08) return 0; if (offset >= 0x80) { /* Interrupt Group Registers: these RAZ/WI if this is an NS * access to a GIC with the security extensions, or if the GIC * doesn't have groups at all. */ res = 0; if (!(s->security_extn && !attrs.secure) && gic_has_groups(s)) { /* Every byte offset holds 8 group status bits */ irq = (offset - 0x080) * 8 + GIC_BASE_IRQ; if (irq >= s->num_irq) { goto bad_reg; } for (i = 0; i < 8; i++) { if (GIC_TEST_GROUP(irq + i, cm)) { res |= (1 << i); } } } return res; } goto bad_reg; } else if (offset < 0x200) { /* Interrupt Set/Clear Enable. */ if (offset < 0x180) irq = (offset - 0x100) * 8; else irq = (offset - 0x180) * 8; irq += GIC_BASE_IRQ; if (irq >= s->num_irq) goto bad_reg; res = 0; for (i = 0; i < 8; i++) { if (GIC_TEST_ENABLED(irq + i, cm)) { res |= (1 << i); } } } else if (offset < 0x300) { /* Interrupt Set/Clear Pending. */ if (offset < 0x280) irq = (offset - 0x200) * 8; else irq = (offset - 0x280) * 8; irq += GIC_BASE_IRQ; if (irq >= s->num_irq) goto bad_reg; res = 0; mask = (irq < GIC_INTERNAL) ? cm : ALL_CPU_MASK; for (i = 0; i < 8; i++) { if (gic_test_pending(s, irq + i, mask)) { res |= (1 << i); } } } else if (offset < 0x400) { /* Interrupt Active. */ irq = (offset - 0x300) * 8 + GIC_BASE_IRQ; if (irq >= s->num_irq) goto bad_reg; res = 0; mask = (irq < GIC_INTERNAL) ? cm : ALL_CPU_MASK; for (i = 0; i < 8; i++) { if (GIC_TEST_ACTIVE(irq + i, mask)) { res |= (1 << i); } } } else if (offset < 0x800) { /* Interrupt Priority. */ irq = (offset - 0x400) + GIC_BASE_IRQ; if (irq >= s->num_irq) goto bad_reg; res = GIC_GET_PRIORITY(irq, cpu); } else if (offset < 0xc00) { /* Interrupt CPU Target. */ if (s->num_cpu == 1 && s->revision != REV_11MPCORE) { /* For uniprocessor GICs these RAZ/WI */ res = 0; } else { irq = (offset - 0x800) + GIC_BASE_IRQ; if (irq >= s->num_irq) { goto bad_reg; } if (irq >= 29 && irq <= 31) { res = cm; } else { res = GIC_TARGET(irq); } } } else if (offset < 0xf00) { /* Interrupt Configuration. */ irq = (offset - 0xc00) * 4 + GIC_BASE_IRQ; if (irq >= s->num_irq) goto bad_reg; res = 0; for (i = 0; i < 4; i++) { if (GIC_TEST_MODEL(irq + i)) res |= (1 << (i * 2)); if (GIC_TEST_EDGE_TRIGGER(irq + i)) res |= (2 << (i * 2)); } } else if (offset < 0xf10) { goto bad_reg; } else if (offset < 0xf30) { if (s->revision == REV_11MPCORE || s->revision == REV_NVIC) { goto bad_reg; } if (offset < 0xf20) { /* GICD_CPENDSGIRn */ irq = (offset - 0xf10); } else { irq = (offset - 0xf20); /* GICD_SPENDSGIRn */ } res = s->sgi_pending[irq][cpu]; } else if (offset < 0xfe0) { goto bad_reg; } else /* offset >= 0xfe0 */ { if (offset & 3) { res = 0; } else { res = gic_id[(offset - 0xfe0) >> 2]; } } return res; bad_reg: qemu_log_mask(LOG_GUEST_ERROR, \"gic_dist_readb: Bad offset %x\\n\", (int)offset); return 0; }"} {"target": 0, "idx": 9215, "func": "static inline int get_segment_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx, target_ulong eaddr, int rw, int type) { hwaddr hash; target_ulong vsid; int ds, pr, target_page_bits; int ret; target_ulong sr, pgidx; pr = msr_pr; ctx->eaddr = eaddr; sr = env->sr[eaddr >> 28]; ctx->key = (((sr & 0x20000000) && (pr != 0)) || ((sr & 0x40000000) && (pr == 0))) ? 1 : 0; ds = sr & 0x80000000 ? 1 : 0; ctx->nx = sr & 0x10000000 ? 1 : 0; vsid = sr & 0x00FFFFFF; target_page_bits = TARGET_PAGE_BITS; qemu_log_mask(CPU_LOG_MMU, \"Check segment v=\" TARGET_FMT_lx \" %d \" TARGET_FMT_lx \" nip=\" TARGET_FMT_lx \" lr=\" TARGET_FMT_lx \" ir=%d dr=%d pr=%d %d t=%d\\n\", eaddr, (int)(eaddr >> 28), sr, env->nip, env->lr, (int)msr_ir, (int)msr_dr, pr != 0 ? 1 : 0, rw, type); pgidx = (eaddr & ~SEGMENT_MASK_256M) >> target_page_bits; hash = vsid ^ pgidx; ctx->ptem = (vsid << 7) | (pgidx >> 10); qemu_log_mask(CPU_LOG_MMU, \"pte segment: key=%d ds %d nx %d vsid \" TARGET_FMT_lx \"\\n\", ctx->key, ds, ctx->nx, vsid); ret = -1; if (!ds) { /* Check if instruction fetch is allowed, if needed */ if (type != ACCESS_CODE || ctx->nx == 0) { /* Page address translation */ qemu_log_mask(CPU_LOG_MMU, \"htab_base \" TARGET_FMT_plx \" htab_mask \" TARGET_FMT_plx \" hash \" TARGET_FMT_plx \"\\n\", env->htab_base, env->htab_mask, hash); ctx->hash[0] = hash; ctx->hash[1] = ~hash; /* Initialize real address with an invalid value */ ctx->raddr = (hwaddr)-1ULL; /* Software TLB search */ ret = ppc6xx_tlb_check(env, ctx, eaddr, rw, type); #if defined(DUMP_PAGE_TABLES) if (qemu_loglevel_mask(CPU_LOG_MMU)) { CPUState *cs = ENV_GET_CPU(env); hwaddr curaddr; uint32_t a0, a1, a2, a3; qemu_log(\"Page table: \" TARGET_FMT_plx \" len \" TARGET_FMT_plx \"\\n\", env->htab_base, env->htab_mask + 0x80); for (curaddr = env->htab_base; curaddr < (env->htab_base + env->htab_mask + 0x80); curaddr += 16) { a0 = ldl_phys(cs->as, curaddr); a1 = ldl_phys(cs->as, curaddr + 4); a2 = ldl_phys(cs->as, curaddr + 8); a3 = ldl_phys(cs->as, curaddr + 12); if (a0 != 0 || a1 != 0 || a2 != 0 || a3 != 0) { qemu_log(TARGET_FMT_plx \": %08x %08x %08x %08x\\n\", curaddr, a0, a1, a2, a3); } } } #endif } else { qemu_log_mask(CPU_LOG_MMU, \"No access allowed\\n\"); ret = -3; } } else { target_ulong sr; qemu_log_mask(CPU_LOG_MMU, \"direct store...\\n\"); /* Direct-store segment : absolutely *BUGGY* for now */ /* Direct-store implies a 32-bit MMU. * Check the Segment Register's bus unit ID (BUID). */ sr = env->sr[eaddr >> 28]; if ((sr & 0x1FF00000) >> 20 == 0x07f) { /* Memory-forced I/O controller interface access */ /* If T=1 and BUID=x'07F', the 601 performs a memory access * to SR[28-31] LA[4-31], bypassing all protection mechanisms. */ ctx->raddr = ((sr & 0xF) << 28) | (eaddr & 0x0FFFFFFF); ctx->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; return 0; } switch (type) { case ACCESS_INT: /* Integer load/store : only access allowed */ break; case ACCESS_CODE: /* No code fetch is allowed in direct-store areas */ return -4; case ACCESS_FLOAT: /* Floating point load/store */ return -4; case ACCESS_RES: /* lwarx, ldarx or srwcx. */ return -4; case ACCESS_CACHE: /* dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi */ /* Should make the instruction do no-op. * As it already do no-op, it's quite easy :-) */ ctx->raddr = eaddr; return 0; case ACCESS_EXT: /* eciwx or ecowx */ return -4; default: qemu_log_mask(CPU_LOG_MMU, \"ERROR: instruction should not need \" \"address translation\\n\"); return -4; } if ((rw == 1 || ctx->key != 1) && (rw == 0 || ctx->key != 0)) { ctx->raddr = eaddr; ret = 2; } else { ret = -2; } } return ret; }"} {"target": 0, "idx": 9225, "func": "static void slirp_socket_save(QEMUFile *f, struct socket *so) { qemu_put_be32(f, so->so_urgc); qemu_put_be16(f, so->so_ffamily); switch (so->so_ffamily) { case AF_INET: qemu_put_be32(f, so->so_faddr.s_addr); qemu_put_be16(f, so->so_fport); break; default: error_report( \"so_ffamily unknown, unable to save so_faddr and so_fport\\n\"); } qemu_put_be16(f, so->so_lfamily); switch (so->so_lfamily) { case AF_INET: qemu_put_be32(f, so->so_laddr.s_addr); qemu_put_be16(f, so->so_lport); break; default: error_report( \"so_ffamily unknown, unable to save so_laddr and so_lport\\n\"); } qemu_put_byte(f, so->so_iptos); qemu_put_byte(f, so->so_emu); qemu_put_byte(f, so->so_type); qemu_put_be32(f, so->so_state); slirp_sbuf_save(f, &so->so_rcv); slirp_sbuf_save(f, &so->so_snd); slirp_tcp_save(f, so->so_tcpcb); }"} {"target": 1, "idx": 9231, "func": "void object_property_allow_set_link(Object *obj, const char *name, Object *val, Error **errp) { /* Allow the link to be set, always */ }"} {"target": 0, "idx": 9238, "func": "static inline void mix_3f_1r_to_mono(AC3DecodeContext *ctx) { int i; float (*output)[256] = ctx->audio_block.block_output; for (i = 0; i < 256; i++) output[1][i] = (output[2][i] + output[3][i] + output[4][i]); memset(output[2], 0, sizeof(output[2])); memset(output[3], 0, sizeof(output[3])); memset(output[4], 0, sizeof(output[4])); }"} {"target": 0, "idx": 9246, "func": "static inline void RENAME(nv21ToUV)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1, const uint8_t *src2, long width, uint32_t *unused) { RENAME(nvXXtoUV)(dstV, dstU, src1, width); }"} {"target": 0, "idx": 9255, "func": "static uint64_t lan9118_readl(void *opaque, target_phys_addr_t offset, unsigned size) { lan9118_state *s = (lan9118_state *)opaque; //DPRINTF(\"Read reg 0x%02x\\n\", (int)offset); if (offset < 0x20) { /* RX FIFO */ return rx_fifo_pop(s); } switch (offset) { case 0x40: return rx_status_fifo_pop(s); case 0x44: return s->rx_status_fifo[s->tx_status_fifo_head]; case 0x48: return tx_status_fifo_pop(s); case 0x4c: return s->tx_status_fifo[s->tx_status_fifo_head]; case CSR_ID_REV: return 0x01180001; case CSR_IRQ_CFG: return s->irq_cfg; case CSR_INT_STS: return s->int_sts; case CSR_INT_EN: return s->int_en; case CSR_BYTE_TEST: return 0x87654321; case CSR_FIFO_INT: return s->fifo_int; case CSR_RX_CFG: return s->rx_cfg; case CSR_TX_CFG: return s->tx_cfg; case CSR_HW_CFG: return s->hw_cfg; case CSR_RX_DP_CTRL: return 0; case CSR_RX_FIFO_INF: return (s->rx_status_fifo_used << 16) | (s->rx_fifo_used << 2); case CSR_TX_FIFO_INF: return (s->tx_status_fifo_used << 16) | (s->tx_fifo_size - s->txp->fifo_used); case CSR_PMT_CTRL: return s->pmt_ctrl; case CSR_GPIO_CFG: return s->gpio_cfg; case CSR_GPT_CFG: return s->gpt_cfg; case CSR_GPT_CNT: return ptimer_get_count(s->timer); case CSR_WORD_SWAP: return s->word_swap; case CSR_FREE_RUN: return (qemu_get_clock_ns(vm_clock) / 40) - s->free_timer_start; case CSR_RX_DROP: /* TODO: Implement dropped frames counter. */ return 0; case CSR_MAC_CSR_CMD: return s->mac_cmd; case CSR_MAC_CSR_DATA: return s->mac_data; case CSR_AFC_CFG: return s->afc_cfg; case CSR_E2P_CMD: return s->e2p_cmd; case CSR_E2P_DATA: return s->e2p_data; } hw_error(\"lan9118_read: Bad reg 0x%x\\n\", (int)offset); return 0; }"} {"target": 0, "idx": 9258, "func": "static int read_low_coeffs(AVCodecContext *avctx, int16_t *dst, int size, int width, ptrdiff_t stride) { PixletContext *ctx = avctx->priv_data; GetBitContext *b = &ctx->gbit; unsigned cnt1, nbits, k, j = 0, i = 0; int64_t value, state = 3; int rlen, escape, flag = 0; while (i < size) { nbits = FFMIN(ff_clz((state >> 8) + 3) ^ 0x1F, 14); cnt1 = get_unary(b, 0, 8); if (cnt1 < 8) { value = show_bits(b, nbits); if (value <= 1) { skip_bits(b, nbits - 1); escape = ((1 << nbits) - 1) * cnt1; } else { skip_bits(b, nbits); escape = value + ((1 << nbits) - 1) * cnt1 - 1; } } else { escape = get_bits(b, 16); } value = -((escape + flag) & 1) | 1; dst[j++] = value * ((escape + flag + 1) >> 1); i++; if (j == width) { j = 0; dst += stride; } state = 120 * (escape + flag) + state - (120 * state >> 8); flag = 0; if (state * 4 > 0xFF || i >= size) continue; nbits = ((state + 8) >> 5) + (state ? ff_clz(state) : 32) - 24; escape = av_mod_uintp2(16383, nbits); cnt1 = get_unary(b, 0, 8); if (cnt1 > 7) { rlen = get_bits(b, 16); } else { value = show_bits(b, nbits); if (value > 1) { skip_bits(b, nbits); rlen = value + escape * cnt1 - 1; } else { skip_bits(b, nbits - 1); rlen = escape * cnt1; } } if (i + rlen > size) return AVERROR_INVALIDDATA; i += rlen; for (k = 0; k < rlen; k++) { dst[j++] = 0; if (j == width) { j = 0; dst += stride; } } state = 0; flag = rlen < 0xFFFF ? 1 : 0; } align_get_bits(b); return get_bits_count(b) >> 3; }"} {"target": 0, "idx": 9260, "func": "static int nbd_co_readv_1(BlockDriverState *bs, int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, int offset) { BDRVNBDState *s = bs->opaque; struct nbd_request request; struct nbd_reply reply; request.type = NBD_CMD_READ; request.from = sector_num * 512; request.len = nb_sectors * 512; nbd_coroutine_start(s, &request); if (nbd_co_send_request(s, &request, NULL, 0) == -1) { reply.error = errno; } else { nbd_co_receive_reply(s, &request, &reply, qiov->iov, offset); } nbd_coroutine_end(s, &request); return -reply.error; }"} {"target": 0, "idx": 9282, "func": "static int request_frame(AVFilterLink *link) { AVFilterContext *ctx = link->src; IDETContext *idet = ctx->priv; do { int ret; if (idet->eof) return AVERROR_EOF; ret = ff_request_frame(link->src->inputs[0]); if (ret == AVERROR_EOF && idet->cur) { AVFrame *next = av_frame_clone(idet->next); if (!next) return AVERROR(ENOMEM); filter_frame(link->src->inputs[0], next); idet->eof = 1; } else if (ret < 0) { return ret; } } while (!idet->cur); return 0; }"} {"target": 1, "idx": 9283, "func": "static int dpcm_decode_frame(AVCodecContext *avctx, void *data, int *data_size, uint8_t *buf, int buf_size) { DPCMContext *s = avctx->priv_data; int in, out = 0; int predictor[2]; int channel_number = 0; short *output_samples = data; int shift[2]; unsigned char byte; short diff; if (!buf_size) return 0; switch(avctx->codec->id) { case CODEC_ID_ROQ_DPCM: if (s->channels == 1) predictor[0] = AV_RL16(&buf[6]); else { predictor[0] = buf[7] << 8; predictor[1] = buf[6] << 8; } SE_16BIT(predictor[0]); SE_16BIT(predictor[1]); /* decode the samples */ for (in = 8, out = 0; in < buf_size; in++, out++) { predictor[channel_number] += s->roq_square_array[buf[in]]; predictor[channel_number] = av_clip_int16(predictor[channel_number]); output_samples[out] = predictor[channel_number]; /* toggle channel */ channel_number ^= s->channels - 1; } break; case CODEC_ID_INTERPLAY_DPCM: in = 6; /* skip over the stream mask and stream length */ predictor[0] = AV_RL16(&buf[in]); in += 2; SE_16BIT(predictor[0]) output_samples[out++] = predictor[0]; if (s->channels == 2) { predictor[1] = AV_RL16(&buf[in]); in += 2; SE_16BIT(predictor[1]) output_samples[out++] = predictor[1]; } while (in < buf_size) { predictor[channel_number] += interplay_delta_table[buf[in++]]; predictor[channel_number] = av_clip_int16(predictor[channel_number]); output_samples[out++] = predictor[channel_number]; /* toggle channel */ channel_number ^= s->channels - 1; } break; case CODEC_ID_XAN_DPCM: in = 0; shift[0] = shift[1] = 4; predictor[0] = AV_RL16(&buf[in]); in += 2; SE_16BIT(predictor[0]); if (s->channels == 2) { predictor[1] = AV_RL16(&buf[in]); in += 2; SE_16BIT(predictor[1]); } while (in < buf_size) { byte = buf[in++]; diff = (byte & 0xFC) << 8; if ((byte & 0x03) == 3) shift[channel_number]++; else shift[channel_number] -= (2 * (byte & 3)); /* saturate the shifter to a lower limit of 0 */ if (shift[channel_number] < 0) shift[channel_number] = 0; diff >>= shift[channel_number]; predictor[channel_number] += diff; predictor[channel_number] = av_clip_int16(predictor[channel_number]); output_samples[out++] = predictor[channel_number]; /* toggle channel */ channel_number ^= s->channels - 1; } break; case CODEC_ID_SOL_DPCM: in = 0; if (avctx->codec_tag != 3) { if(*data_size/4 < buf_size) while (in < buf_size) { int n1, n2; n1 = (buf[in] >> 4) & 0xF; n2 = buf[in++] & 0xF; s->sample[0] += s->sol_table[n1]; if (s->sample[0] < 0) s->sample[0] = 0; if (s->sample[0] > 255) s->sample[0] = 255; output_samples[out++] = (s->sample[0] - 128) << 8; s->sample[s->channels - 1] += s->sol_table[n2]; if (s->sample[s->channels - 1] < 0) s->sample[s->channels - 1] = 0; if (s->sample[s->channels - 1] > 255) s->sample[s->channels - 1] = 255; output_samples[out++] = (s->sample[s->channels - 1] - 128) << 8; } } else { while (in < buf_size) { int n; n = buf[in++]; if (n & 0x80) s->sample[channel_number] -= s->sol_table[n & 0x7F]; else s->sample[channel_number] += s->sol_table[n & 0x7F]; s->sample[channel_number] = av_clip_int16(s->sample[channel_number]); output_samples[out++] = s->sample[channel_number]; /* toggle channel */ channel_number ^= s->channels - 1; } } break; } *data_size = out * sizeof(short); return buf_size; }"} {"target": 1, "idx": 9301, "func": "int ff_rm_read_mdpr_codecdata(AVFormatContext *s, AVIOContext *pb, AVStream *st, RMStream *rst, unsigned int codec_data_size, const uint8_t *mime) { unsigned int v; int size; int64_t codec_pos; int ret; if (codec_data_size > INT_MAX) return AVERROR_INVALIDDATA; avpriv_set_pts_info(st, 64, 1, 1000); codec_pos = avio_tell(pb); v = avio_rb32(pb); if (v == MKBETAG('M', 'L', 'T', 'I')) { int number_of_streams = avio_rb16(pb); int number_of_mdpr; int i; for (i = 0; icodec, codec_data_size)) < 0) return ret; st->codec->codec_type = AVMEDIA_TYPE_AUDIO; st->codec->codec_tag = AV_RL32(st->codec->extradata); st->codec->codec_id = ff_codec_get_id(ff_rm_codec_tags, st->codec->codec_tag); } else if(mime && !strcmp(mime, \"logical-fileinfo\")){ int stream_count, rule_count, property_count, i; ff_free_stream(s, st); if (avio_rb16(pb) != 0) { av_log(s, AV_LOG_WARNING, \"Unsupported version\\n\"); goto skip; } stream_count = avio_rb16(pb); avio_skip(pb, 6*stream_count); rule_count = avio_rb16(pb); avio_skip(pb, 2*rule_count); property_count = avio_rb16(pb); for(i=0; imetadata, name, val, 0); break; default: avio_skip(pb, avio_rb16(pb)); } } } else { int fps; if (avio_rl32(pb) != MKTAG('V', 'I', 'D', 'O')) { fail1: av_log(s, AV_LOG_WARNING, \"Unsupported stream type %08x\\n\", v); goto skip; } st->codec->codec_tag = avio_rl32(pb); st->codec->codec_id = ff_codec_get_id(ff_rm_codec_tags, st->codec->codec_tag); av_dlog(s, \"%X %X\\n\", st->codec->codec_tag, MKTAG('R', 'V', '2', '0')); if (st->codec->codec_id == AV_CODEC_ID_NONE) goto fail1; st->codec->width = avio_rb16(pb); st->codec->height = avio_rb16(pb); avio_skip(pb, 2); // looks like bits per sample avio_skip(pb, 4); // always zero? st->codec->codec_type = AVMEDIA_TYPE_VIDEO; st->need_parsing = AVSTREAM_PARSE_TIMESTAMPS; fps = avio_rb32(pb); if ((ret = rm_read_extradata(pb, st->codec, codec_data_size - (avio_tell(pb) - codec_pos))) < 0) return ret; if (fps > 0) { av_reduce(&st->avg_frame_rate.den, &st->avg_frame_rate.num, 0x10000, fps, (1 << 30) - 1); #if FF_API_R_FRAME_RATE st->r_frame_rate = st->avg_frame_rate; #endif } else if (s->error_recognition & AV_EF_EXPLODE) { av_log(s, AV_LOG_ERROR, \"Invalid framerate\\n\"); return AVERROR_INVALIDDATA; } } skip: /* skip codec info */ size = avio_tell(pb) - codec_pos; avio_skip(pb, codec_data_size - size); return 0; }"} {"target": 1, "idx": 9330, "func": "int sd_do_command(SDState *sd, SDRequest *req, uint8_t *response) { uint32_t last_status = sd->card_status; sd_rsp_type_t rtype; int rsplen; if (!sd->bdrv || !bdrv_is_inserted(sd->bdrv) || !sd->enable) { return 0; } if (sd_req_crc_validate(req)) { sd->card_status &= ~COM_CRC_ERROR; return 0; } sd->card_status &= ~CARD_STATUS_B; sd_set_status(sd); if (last_status & CARD_IS_LOCKED) { if (!cmd_valid_while_locked(sd, req)) { sd->card_status |= ILLEGAL_COMMAND; fprintf(stderr, \"SD: Card is locked\\n\"); return 0; } } if (last_status & APP_CMD) { rtype = sd_app_command(sd, *req); sd->card_status &= ~APP_CMD; } else rtype = sd_normal_command(sd, *req); sd->current_cmd = req->cmd; switch (rtype) { case sd_r1: case sd_r1b: sd_response_r1_make(sd, response, last_status); rsplen = 4; break; case sd_r2_i: memcpy(response, sd->cid, sizeof(sd->cid)); rsplen = 16; break; case sd_r2_s: memcpy(response, sd->csd, sizeof(sd->csd)); rsplen = 16; break; case sd_r3: sd_response_r3_make(sd, response); rsplen = 4; break; case sd_r6: sd_response_r6_make(sd, response); rsplen = 4; break; case sd_r7: sd_response_r7_make(sd, response); rsplen = 4; break; case sd_r0: default: rsplen = 0; break; } if (sd->card_status & ILLEGAL_COMMAND) rsplen = 0; #ifdef DEBUG_SD if (rsplen) { int i; DPRINTF(\"Response:\"); for (i = 0; i < rsplen; i++) printf(\" %02x\", response[i]); printf(\" state %d\\n\", sd->state); } else { DPRINTF(\"No response %d\\n\", sd->state); } #endif return rsplen; }"} {"target": 0, "idx": 9343, "func": "static int dpcm_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; DPCMContext *s = avctx->priv_data; int in, out = 0; int predictor[2]; int ch = 0; int stereo = s->channels - 1; short *output_samples = data; int shift[2]; unsigned char byte; short diff; if (!buf_size) return 0; // almost every DPCM variant expands one byte of data into two if(*data_size/2 < buf_size) return -1; switch(avctx->codec->id) { case CODEC_ID_ROQ_DPCM: if (stereo) { predictor[0] = buf[7] << 8; predictor[1] = buf[6] << 8; } else { predictor[0] = AV_RL16(&buf[6]); } SE_16BIT(predictor[0]); SE_16BIT(predictor[1]); /* decode the samples */ for (in = 8, out = 0; in < buf_size; in++, out++) { predictor[ch] += s->roq_square_array[buf[in]]; predictor[ch] = av_clip_int16(predictor[ch]); output_samples[out] = predictor[ch]; /* toggle channel */ ch ^= stereo; } break; case CODEC_ID_INTERPLAY_DPCM: in = 6; /* skip over the stream mask and stream length */ predictor[0] = AV_RL16(&buf[in]); in += 2; SE_16BIT(predictor[0]) output_samples[out++] = predictor[0]; if (stereo) { predictor[1] = AV_RL16(&buf[in]); in += 2; SE_16BIT(predictor[1]) output_samples[out++] = predictor[1]; } while (in < buf_size) { predictor[ch] += interplay_delta_table[buf[in++]]; predictor[ch] = av_clip_int16(predictor[ch]); output_samples[out++] = predictor[ch]; /* toggle channel */ ch ^= stereo; } break; case CODEC_ID_XAN_DPCM: in = 0; shift[0] = shift[1] = 4; predictor[0] = AV_RL16(&buf[in]); in += 2; SE_16BIT(predictor[0]); if (stereo) { predictor[1] = AV_RL16(&buf[in]); in += 2; SE_16BIT(predictor[1]); } while (in < buf_size) { byte = buf[in++]; diff = (byte & 0xFC) << 8; if ((byte & 0x03) == 3) shift[ch]++; else shift[ch] -= (2 * (byte & 3)); /* saturate the shifter to a lower limit of 0 */ if (shift[ch] < 0) shift[ch] = 0; diff >>= shift[ch]; predictor[ch] += diff; predictor[ch] = av_clip_int16(predictor[ch]); output_samples[out++] = predictor[ch]; /* toggle channel */ ch ^= stereo; } break; case CODEC_ID_SOL_DPCM: in = 0; if (avctx->codec_tag != 3) { if(*data_size/4 < buf_size) return -1; while (in < buf_size) { int n1, n2; n1 = (buf[in] >> 4) & 0xF; n2 = buf[in++] & 0xF; s->sample[0] += s->sol_table[n1]; if (s->sample[0] < 0) s->sample[0] = 0; if (s->sample[0] > 255) s->sample[0] = 255; output_samples[out++] = (s->sample[0] - 128) << 8; s->sample[stereo] += s->sol_table[n2]; if (s->sample[stereo] < 0) s->sample[stereo] = 0; if (s->sample[stereo] > 255) s->sample[stereo] = 255; output_samples[out++] = (s->sample[stereo] - 128) << 8; } } else { while (in < buf_size) { int n; n = buf[in++]; if (n & 0x80) s->sample[ch] -= s->sol_table[n & 0x7F]; else s->sample[ch] += s->sol_table[n & 0x7F]; s->sample[ch] = av_clip_int16(s->sample[ch]); output_samples[out++] = s->sample[ch]; /* toggle channel */ ch ^= stereo; } } break; } *data_size = out * sizeof(short); return buf_size; }"} {"target": 1, "idx": 9347, "func": "static void picmemset(PicContext *s, AVFrame *frame, unsigned value, int run, int *x, int *y, int *plane, int bits_per_plane) { uint8_t *d; int shift = *plane * bits_per_plane; unsigned mask = ((1 << bits_per_plane) - 1) << shift; value <<= shift; while (run > 0) { int j; for (j = 8-bits_per_plane; j >= 0; j -= bits_per_plane) { d = frame->data[0] + *y * frame->linesize[0]; d[*x] |= (value >> j) & mask; *x += 1; if (*x == s->width) { *y -= 1; *x = 0; if (*y < 0) { *y = s->height - 1; *plane += 1; if (*plane >= s->nb_planes) return; value <<= bits_per_plane; mask <<= bits_per_plane; } } } run--; } }"} {"target": 1, "idx": 9349, "func": "mp_image_t* vf_get_image(vf_instance_t* vf, unsigned int outfmt, int mp_imgtype, int mp_imgflag, int w, int h){ MPContext *m= (MPContext*)(((uint8_t*)vf) - offsetof(MPContext, next_vf)); mp_image_t* mpi=NULL; int w2; int number = mp_imgtype >> 16; av_assert0(vf->next == NULL); // all existing filters call this just on next //vf_dint needs these as it calls vf_get_image() before configuring the output if(vf->w==0 && w>0) vf->w=w; if(vf->h==0 && h>0) vf->h=h; av_assert0(w == -1 || w >= vf->w); av_assert0(h == -1 || h >= vf->h); av_assert0(vf->w > 0); av_assert0(vf->h > 0); av_log(m->avfctx, AV_LOG_DEBUG, \"get_image: %d:%d, vf: %d:%d\\n\", w,h,vf->w,vf->h); if (w == -1) w = vf->w; if (h == -1) h = vf->h; w2=(mp_imgflag&MP_IMGFLAG_ACCEPT_ALIGNED_STRIDE)?((w+15)&(~15)):w; // Note: we should call libvo first to check if it supports direct rendering // and if not, then fallback to software buffers: switch(mp_imgtype & 0xff){ case MP_IMGTYPE_EXPORT: if(!vf->imgctx.export_images[0]) vf->imgctx.export_images[0]=new_mp_image(w2,h); mpi=vf->imgctx.export_images[0]; break; case MP_IMGTYPE_STATIC: if(!vf->imgctx.static_images[0]) vf->imgctx.static_images[0]=new_mp_image(w2,h); mpi=vf->imgctx.static_images[0]; break; case MP_IMGTYPE_TEMP: if(!vf->imgctx.temp_images[0]) vf->imgctx.temp_images[0]=new_mp_image(w2,h); mpi=vf->imgctx.temp_images[0]; break; case MP_IMGTYPE_IPB: if(!(mp_imgflag&MP_IMGFLAG_READABLE)){ // B frame: if(!vf->imgctx.temp_images[0]) vf->imgctx.temp_images[0]=new_mp_image(w2,h); mpi=vf->imgctx.temp_images[0]; break; } case MP_IMGTYPE_IP: if(!vf->imgctx.static_images[vf->imgctx.static_idx]) vf->imgctx.static_images[vf->imgctx.static_idx]=new_mp_image(w2,h); mpi=vf->imgctx.static_images[vf->imgctx.static_idx]; vf->imgctx.static_idx^=1; break; case MP_IMGTYPE_NUMBERED: if (number == -1) { int i; for (i = 0; i < NUM_NUMBERED_MPI; i++) if (!vf->imgctx.numbered_images[i] || !vf->imgctx.numbered_images[i]->usage_count) break; number = i; } if (number < 0 || number >= NUM_NUMBERED_MPI) return NULL; if (!vf->imgctx.numbered_images[number]) vf->imgctx.numbered_images[number] = new_mp_image(w2,h); mpi = vf->imgctx.numbered_images[number]; mpi->number = number; break; } if(mpi){ mpi->type=mp_imgtype; mpi->w=vf->w; mpi->h=vf->h; // keep buffer allocation status & color flags only: // mpi->flags&=~(MP_IMGFLAG_PRESERVE|MP_IMGFLAG_READABLE|MP_IMGFLAG_DIRECT); mpi->flags&=MP_IMGFLAG_ALLOCATED|MP_IMGFLAG_TYPE_DISPLAYED|MP_IMGFLAGMASK_COLORS; // accept restrictions, draw_slice and palette flags only: mpi->flags|=mp_imgflag&(MP_IMGFLAGMASK_RESTRICTIONS|MP_IMGFLAG_DRAW_CALLBACK|MP_IMGFLAG_RGB_PALETTE); if(!vf->draw_slice) mpi->flags&=~MP_IMGFLAG_DRAW_CALLBACK; if(mpi->width!=w2 || mpi->height!=h){ // printf(\"vf.c: MPI parameters changed! %dx%d -> %dx%d \\n\", mpi->width,mpi->height,w2,h); if(mpi->flags&MP_IMGFLAG_ALLOCATED){ if(mpi->widthheightplanes[0]); mpi->flags&=~MP_IMGFLAG_ALLOCATED; mp_msg(MSGT_VFILTER,MSGL_V,\"vf.c: have to REALLOCATE buffer memory :(\\n\"); } // } else { } { mpi->width=w2; mpi->chroma_width=(w2 + (1<chroma_x_shift) - 1)>>mpi->chroma_x_shift; mpi->height=h; mpi->chroma_height=(h + (1<chroma_y_shift) - 1)>>mpi->chroma_y_shift; } } if(!mpi->bpp) mp_image_setfmt(mpi,outfmt); if(!(mpi->flags&MP_IMGFLAG_ALLOCATED) && mpi->type>MP_IMGTYPE_EXPORT){ av_assert0(!vf->get_image); // check libvo first! if(vf->get_image) vf->get_image(vf,mpi); if(!(mpi->flags&MP_IMGFLAG_DIRECT)){ // non-direct and not yet allocated image. allocate it! if (!mpi->bpp) { // no way we can allocate this mp_msg(MSGT_DECVIDEO, MSGL_FATAL, \"vf_get_image: Tried to allocate a format that can not be allocated!\\n\"); return NULL; } // check if codec prefer aligned stride: if(mp_imgflag&MP_IMGFLAG_PREFER_ALIGNED_STRIDE){ int align=(mpi->flags&MP_IMGFLAG_PLANAR && mpi->flags&MP_IMGFLAG_YUV) ? (8<chroma_x_shift)-1 : 15; // -- maybe FIXME w2=((w+align)&(~align)); if(mpi->width!=w2){ #if 0 // we have to change width... check if we CAN co it: int flags=vf->query_format(vf,outfmt); // should not fail if(!(flags&3)) mp_msg(MSGT_DECVIDEO,MSGL_WARN,\"??? vf_get_image{vf->query_format(outfmt)} failed!\\n\"); // printf(\"query -> 0x%X \\n\",flags); if(flags&VFCAP_ACCEPT_STRIDE){ #endif mpi->width=w2; mpi->chroma_width=(w2 + (1<chroma_x_shift) - 1)>>mpi->chroma_x_shift; // } } } mp_image_alloc_planes(mpi); // printf(\"clearing img!\\n\"); vf_mpi_clear(mpi,0,0,mpi->width,mpi->height); } } av_assert0(!vf->start_slice); if(mpi->flags&MP_IMGFLAG_DRAW_CALLBACK) if(vf->start_slice) vf->start_slice(vf,mpi); if(!(mpi->flags&MP_IMGFLAG_TYPE_DISPLAYED)){ mp_msg(MSGT_DECVIDEO,MSGL_V,\"*** [%s] %s%s mp_image_t, %dx%dx%dbpp %s %s, %d bytes\\n\", \"NULL\"/*vf->info->name*/, (mpi->type==MP_IMGTYPE_EXPORT)?\"Exporting\": ((mpi->flags&MP_IMGFLAG_DIRECT)?\"Direct Rendering\":\"Allocating\"), (mpi->flags&MP_IMGFLAG_DRAW_CALLBACK)?\" (slices)\":\"\", mpi->width,mpi->height,mpi->bpp, (mpi->flags&MP_IMGFLAG_YUV)?\"YUV\":((mpi->flags&MP_IMGFLAG_SWAPPED)?\"BGR\":\"RGB\"), (mpi->flags&MP_IMGFLAG_PLANAR)?\"planar\":\"packed\", mpi->bpp*mpi->width*mpi->height/8); mp_msg(MSGT_DECVIDEO,MSGL_DBG2,\"(imgfmt: %x, planes: %p,%p,%p strides: %d,%d,%d, chroma: %dx%d, shift: h:%d,v:%d)\\n\", mpi->imgfmt, mpi->planes[0], mpi->planes[1], mpi->planes[2], mpi->stride[0], mpi->stride[1], mpi->stride[2], mpi->chroma_width, mpi->chroma_height, mpi->chroma_x_shift, mpi->chroma_y_shift); mpi->flags|=MP_IMGFLAG_TYPE_DISPLAYED; } mpi->qscale = NULL; } mpi->usage_count++; // printf(\"\\rVF_MPI: %p %p %p %d %d %d \\n\", // mpi->planes[0],mpi->planes[1],mpi->planes[2], // mpi->stride[0],mpi->stride[1],mpi->stride[2]); return mpi; }"} {"target": 1, "idx": 9351, "func": "static int parse_MP4SLDescrTag(MP4DescrParseContext *d, int64_t off, int len) { Mp4Descr *descr = d->active_descr; int predefined; if (!descr) return -1; predefined = avio_r8(&d->pb); if (!predefined) { int lengths; int flags = avio_r8(&d->pb); descr->sl.use_au_start = !!(flags & 0x80); descr->sl.use_au_end = !!(flags & 0x40); descr->sl.use_rand_acc_pt = !!(flags & 0x20); descr->sl.use_padding = !!(flags & 0x08); descr->sl.use_timestamps = !!(flags & 0x04); descr->sl.use_idle = !!(flags & 0x02); descr->sl.timestamp_res = avio_rb32(&d->pb); avio_rb32(&d->pb); descr->sl.timestamp_len = avio_r8(&d->pb); descr->sl.ocr_len = avio_r8(&d->pb); descr->sl.au_len = avio_r8(&d->pb); descr->sl.inst_bitrate_len = avio_r8(&d->pb); lengths = avio_rb16(&d->pb); descr->sl.degr_prior_len = lengths >> 12; descr->sl.au_seq_num_len = (lengths >> 7) & 0x1f; descr->sl.packet_seq_num_len = (lengths >> 2) & 0x1f; } else { avpriv_report_missing_feature(d->s, \"Predefined SLConfigDescriptor\"); return 0;"} {"target": 1, "idx": 9352, "func": "target_ulong helper_mul_suov(CPUTriCoreState *env, target_ulong r1, target_ulong r2) { int64_t t1 = extract64(r1, 0, 32); int64_t t2 = extract64(r2, 0, 32); int64_t result = t1 * t2; return suov32(env, result); }"} {"target": 1, "idx": 9366, "func": "static int parse_chunks(AVFormatContext *s, int mode, int64_t seekts, int *len_ptr) { WtvContext *wtv = s->priv_data; AVIOContext *pb = wtv->pb; while (!url_feof(pb)) { ff_asf_guid g; int len, sid, consumed; ff_get_guid(pb, &g); len = avio_rl32(pb); if (len < 32) break; sid = avio_rl32(pb) & 0x7FFF; avio_skip(pb, 8); consumed = 32; if (!ff_guidcmp(g, ff_SBE2_STREAM_DESC_EVENT)) { if (ff_find_stream_index(s, sid) < 0) { ff_asf_guid mediatype, subtype, formattype; int size; avio_skip(pb, 28); ff_get_guid(pb, &mediatype); ff_get_guid(pb, &subtype); avio_skip(pb, 12); ff_get_guid(pb, &formattype); size = avio_rl32(pb); parse_media_type(s, 0, sid, mediatype, subtype, formattype, size); consumed += 92 + size; } } else if (!ff_guidcmp(g, ff_stream2_guid)) { int stream_index = ff_find_stream_index(s, sid); if (stream_index >= 0 && !((WtvStream*)s->streams[stream_index]->priv_data)->seen_data) { ff_asf_guid mediatype, subtype, formattype; int size; avio_skip(pb, 12); ff_get_guid(pb, &mediatype); ff_get_guid(pb, &subtype); avio_skip(pb, 12); ff_get_guid(pb, &formattype); size = avio_rl32(pb); parse_media_type(s, s->streams[stream_index], sid, mediatype, subtype, formattype, size); consumed += 76 + size; } } else if (!ff_guidcmp(g, EVENTID_AudioDescriptorSpanningEvent) || !ff_guidcmp(g, EVENTID_CtxADescriptorSpanningEvent) || !ff_guidcmp(g, EVENTID_CSDescriptorSpanningEvent) || !ff_guidcmp(g, EVENTID_StreamIDSpanningEvent) || !ff_guidcmp(g, EVENTID_SubtitleSpanningEvent) || !ff_guidcmp(g, EVENTID_TeletextSpanningEvent)) { int stream_index = ff_find_stream_index(s, sid); if (stream_index >= 0) { AVStream *st = s->streams[stream_index]; uint8_t buf[258]; const uint8_t *pbuf = buf; int buf_size; avio_skip(pb, 8); consumed += 8; if (!ff_guidcmp(g, EVENTID_CtxADescriptorSpanningEvent) || !ff_guidcmp(g, EVENTID_CSDescriptorSpanningEvent)) { avio_skip(pb, 6); consumed += 6; } buf_size = FFMIN(len - consumed, sizeof(buf)); avio_read(pb, buf, buf_size); consumed += buf_size; ff_parse_mpeg2_descriptor(s, st, 0, &pbuf, buf + buf_size, NULL, 0, 0, NULL); } } else if (!ff_guidcmp(g, EVENTID_AudioTypeSpanningEvent)) { int stream_index = ff_find_stream_index(s, sid); if (stream_index >= 0) { AVStream *st = s->streams[stream_index]; int audio_type; avio_skip(pb, 8); audio_type = avio_r8(pb); if (audio_type == 2) st->disposition |= AV_DISPOSITION_HEARING_IMPAIRED; else if (audio_type == 3) st->disposition |= AV_DISPOSITION_VISUAL_IMPAIRED; consumed += 9; } } else if (!ff_guidcmp(g, EVENTID_DVBScramblingControlSpanningEvent)) { int stream_index = ff_find_stream_index(s, sid); if (stream_index >= 0) { avio_skip(pb, 12); if (avio_rl32(pb)) av_log(s, AV_LOG_WARNING, \"DVB scrambled stream detected (st:%d), decoding will likely fail\\n\", stream_index); consumed += 16; } } else if (!ff_guidcmp(g, EVENTID_LanguageSpanningEvent)) { int stream_index = ff_find_stream_index(s, sid); if (stream_index >= 0) { AVStream *st = s->streams[stream_index]; uint8_t language[4]; avio_skip(pb, 12); avio_read(pb, language, 3); if (language[0]) { language[3] = 0; av_dict_set(&st->metadata, \"language\", language, 0); if (!strcmp(language, \"nar\") || !strcmp(language, \"NAR\")) st->disposition |= AV_DISPOSITION_VISUAL_IMPAIRED; } consumed += 15; } } else if (!ff_guidcmp(g, ff_timestamp_guid)) { int stream_index = ff_find_stream_index(s, sid); if (stream_index >= 0) { avio_skip(pb, 8); wtv->pts = avio_rl64(pb); consumed += 16; if (wtv->pts == -1) wtv->pts = AV_NOPTS_VALUE; else { wtv->last_valid_pts = wtv->pts; if (wtv->epoch == AV_NOPTS_VALUE || wtv->pts < wtv->epoch) wtv->epoch = wtv->pts; if (mode == SEEK_TO_PTS && wtv->pts >= seekts) { avio_skip(pb, WTV_PAD8(len) - consumed); return 0; } } } } else if (!ff_guidcmp(g, ff_data_guid)) { int stream_index = ff_find_stream_index(s, sid); if (mode == SEEK_TO_DATA && stream_index >= 0 && len > 32 && s->streams[stream_index]->priv_data) { WtvStream *wst = s->streams[stream_index]->priv_data; wst->seen_data = 1; if (len_ptr) { *len_ptr = len; } return stream_index; } } else if (!ff_guidcmp(g, /* DSATTRIB_WMDRMProtectionInfo */ (const ff_asf_guid){0x83,0x95,0x74,0x40,0x9D,0x6B,0xEC,0x4E,0xB4,0x3C,0x67,0xA1,0x80,0x1E,0x1A,0x9B})) { int stream_index = ff_find_stream_index(s, sid); if (stream_index >= 0) av_log(s, AV_LOG_WARNING, \"encrypted stream detected (st:%d), decoding will likely fail\\n\", stream_index); } else if ( !ff_guidcmp(g, /* DSATTRIB_CAPTURE_STREAMTIME */ (const ff_asf_guid){0x14,0x56,0x1A,0x0C,0xCD,0x30,0x40,0x4F,0xBC,0xBF,0xD0,0x3E,0x52,0x30,0x62,0x07}) || !ff_guidcmp(g, /* DSATTRIB_PBDATAG_ATTRIBUTE */ (const ff_asf_guid){0x79,0x66,0xB5,0xE0,0xB9,0x12,0xCC,0x43,0xB7,0xDF,0x57,0x8C,0xAA,0x5A,0x7B,0x63}) || !ff_guidcmp(g, /* DSATTRIB_PicSampleSeq */ (const ff_asf_guid){0x02,0xAE,0x5B,0x2F,0x8F,0x7B,0x60,0x4F,0x82,0xD6,0xE4,0xEA,0x2F,0x1F,0x4C,0x99}) || !ff_guidcmp(g, /* DSATTRIB_TRANSPORT_PROPERTIES */ ff_DSATTRIB_TRANSPORT_PROPERTIES) || !ff_guidcmp(g, /* dvr_ms_vid_frame_rep_data */ (const ff_asf_guid){0xCC,0x32,0x64,0xDD,0x29,0xE2,0xDB,0x40,0x80,0xF6,0xD2,0x63,0x28,0xD2,0x76,0x1F}) || !ff_guidcmp(g, /* EVENTID_ChannelChangeSpanningEvent */ (const ff_asf_guid){0xE5,0xC5,0x67,0x90,0x5C,0x4C,0x05,0x42,0x86,0xC8,0x7A,0xFE,0x20,0xFE,0x1E,0xFA}) || !ff_guidcmp(g, /* EVENTID_ChannelInfoSpanningEvent */ (const ff_asf_guid){0x80,0x6D,0xF3,0x41,0x32,0x41,0xC2,0x4C,0xB1,0x21,0x01,0xA4,0x32,0x19,0xD8,0x1B}) || !ff_guidcmp(g, /* EVENTID_ChannelTypeSpanningEvent */ (const ff_asf_guid){0x51,0x1D,0xAB,0x72,0xD2,0x87,0x9B,0x48,0xBA,0x11,0x0E,0x08,0xDC,0x21,0x02,0x43}) || !ff_guidcmp(g, /* EVENTID_PIDListSpanningEvent */ (const ff_asf_guid){0x65,0x8F,0xFC,0x47,0xBB,0xE2,0x34,0x46,0x9C,0xEF,0xFD,0xBF,0xE6,0x26,0x1D,0x5C}) || !ff_guidcmp(g, /* EVENTID_SignalAndServiceStatusSpanningEvent */ (const ff_asf_guid){0xCB,0xC5,0x68,0x80,0x04,0x3C,0x2B,0x49,0xB4,0x7D,0x03,0x08,0x82,0x0D,0xCE,0x51}) || !ff_guidcmp(g, /* EVENTID_StreamTypeSpanningEvent */ (const ff_asf_guid){0xBC,0x2E,0xAF,0x82,0xA6,0x30,0x64,0x42,0xA8,0x0B,0xAD,0x2E,0x13,0x72,0xAC,0x60}) || !ff_guidcmp(g, (const ff_asf_guid){0x1E,0xBE,0xC3,0xC5,0x43,0x92,0xDC,0x11,0x85,0xE5,0x00,0x12,0x3F,0x6F,0x73,0xB9}) || !ff_guidcmp(g, (const ff_asf_guid){0x3B,0x86,0xA2,0xB1,0xEB,0x1E,0xC3,0x44,0x8C,0x88,0x1C,0xA3,0xFF,0xE3,0xE7,0x6A}) || !ff_guidcmp(g, (const ff_asf_guid){0x4E,0x7F,0x4C,0x5B,0xC4,0xD0,0x38,0x4B,0xA8,0x3E,0x21,0x7F,0x7B,0xBF,0x52,0xE7}) || !ff_guidcmp(g, (const ff_asf_guid){0x63,0x36,0xEB,0xFE,0xA1,0x7E,0xD9,0x11,0x83,0x08,0x00,0x07,0xE9,0x5E,0xAD,0x8D}) || !ff_guidcmp(g, (const ff_asf_guid){0x70,0xE9,0xF1,0xF8,0x89,0xA4,0x4C,0x4D,0x83,0x73,0xB8,0x12,0xE0,0xD5,0xF8,0x1E}) || !ff_guidcmp(g, ff_index_guid) || !ff_guidcmp(g, ff_sync_guid) || !ff_guidcmp(g, ff_stream1_guid) || !ff_guidcmp(g, (const ff_asf_guid){0xF7,0x10,0x02,0xB9,0xEE,0x7C,0xED,0x4E,0xBD,0x7F,0x05,0x40,0x35,0x86,0x18,0xA1})) { //ignore known guids } else av_log(s, AV_LOG_WARNING, \"unsupported chunk:\"FF_PRI_GUID\"\\n\", FF_ARG_GUID(g)); avio_skip(pb, WTV_PAD8(len) - consumed); } return AVERROR_EOF; }"} {"target": 1, "idx": 9399, "func": "static void virtio_rng_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); dc->props = virtio_rng_properties; set_bit(DEVICE_CATEGORY_MISC, dc->categories); vdc->realize = virtio_rng_device_realize; vdc->unrealize = virtio_rng_device_unrealize; vdc->get_features = get_features; vdc->load = virtio_rng_load_device; }"} {"target": 1, "idx": 9418, "func": "int kvm_init(int smp_cpus) { KVMState *s; int ret; int i; if (smp_cpus > 1) return -EINVAL; s = qemu_mallocz(sizeof(KVMState)); if (s == NULL) return -ENOMEM; for (i = 0; i < ARRAY_SIZE(s->slots); i++) s->slots[i].slot = i; s->vmfd = -1; s->fd = open(\"/dev/kvm\", O_RDWR); if (s->fd == -1) { fprintf(stderr, \"Could not access KVM kernel module: %m\\n\"); ret = -errno; goto err; } ret = kvm_ioctl(s, KVM_GET_API_VERSION, 0); if (ret < KVM_API_VERSION) { ret = -EINVAL; fprintf(stderr, \"kvm version too old\\n\"); goto err; } if (ret > KVM_API_VERSION) { ret = -EINVAL; fprintf(stderr, \"kvm version not supported\\n\"); goto err; } s->vmfd = kvm_ioctl(s, KVM_CREATE_VM, 0); if (s->vmfd < 0) goto err; /* initially, KVM allocated its own memory and we had to jump through * hooks to make phys_ram_base point to this. Modern versions of KVM * just use a user allocated buffer so we can use phys_ram_base * unmodified. Make sure we have a sufficiently modern version of KVM. */ ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, KVM_CAP_USER_MEMORY); if (ret <= 0) { if (ret == 0) ret = -EINVAL; fprintf(stderr, \"kvm does not support KVM_CAP_USER_MEMORY\\n\"); goto err; } /* There was a nasty bug in < kvm-80 that prevents memory slots from being * destroyed properly. Since we rely on this capability, refuse to work * with any kernel without this capability. */ ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, KVM_CAP_DESTROY_MEMORY_REGION_WORKS); if (ret <= 0) { if (ret == 0) ret = -EINVAL; fprintf(stderr, \"KVM kernel module broken (DESTROY_MEMORY_REGION)\\n\" \"Please upgrade to at least kvm-81.\\n\"); goto err; } ret = kvm_arch_init(s, smp_cpus); if (ret < 0) goto err; kvm_state = s; return 0; err: if (s) { if (s->vmfd != -1) close(s->vmfd); if (s->fd != -1) close(s->fd); } qemu_free(s); return ret; }"} {"target": 1, "idx": 9434, "func": "static int64_t nfs_client_open(NFSClient *client, const char *filename, int flags, Error **errp) { int ret = -EINVAL, i; struct stat st; URI *uri; QueryParams *qp = NULL; char *file = NULL, *strp = NULL; uri = uri_parse(filename); if (!uri) { error_setg(errp, \"Invalid URL specified\"); goto fail; } if (!uri->server) { error_setg(errp, \"Invalid URL specified\"); goto fail; } strp = strrchr(uri->path, '/'); if (strp == NULL) { error_setg(errp, \"Invalid URL specified\"); goto fail; } file = g_strdup(strp); *strp = 0; client->context = nfs_init_context(); if (client->context == NULL) { error_setg(errp, \"Failed to init NFS context\"); goto fail; } qp = query_params_parse(uri->query); for (i = 0; i < qp->n; i++) { if (!qp->p[i].value) { error_setg(errp, \"Value for NFS parameter expected: %s\", qp->p[i].name); goto fail; } if (!strncmp(qp->p[i].name, \"uid\", 3)) { nfs_set_uid(client->context, atoi(qp->p[i].value)); } else if (!strncmp(qp->p[i].name, \"gid\", 3)) { nfs_set_gid(client->context, atoi(qp->p[i].value)); } else if (!strncmp(qp->p[i].name, \"tcp-syncnt\", 10)) { nfs_set_tcp_syncnt(client->context, atoi(qp->p[i].value)); } else { error_setg(errp, \"Unknown NFS parameter name: %s\", qp->p[i].name); goto fail; } } ret = nfs_mount(client->context, uri->server, uri->path); if (ret < 0) { error_setg(errp, \"Failed to mount nfs share: %s\", nfs_get_error(client->context)); goto fail; } if (flags & O_CREAT) { ret = nfs_creat(client->context, file, 0600, &client->fh); if (ret < 0) { error_setg(errp, \"Failed to create file: %s\", nfs_get_error(client->context)); goto fail; } } else { ret = nfs_open(client->context, file, flags, &client->fh); if (ret < 0) { error_setg(errp, \"Failed to open file : %s\", nfs_get_error(client->context)); goto fail; } } ret = nfs_fstat(client->context, client->fh, &st); if (ret < 0) { error_setg(errp, \"Failed to fstat file: %s\", nfs_get_error(client->context)); goto fail; } ret = DIV_ROUND_UP(st.st_size, BDRV_SECTOR_SIZE); client->has_zero_init = S_ISREG(st.st_mode); goto out; fail: nfs_client_close(client); out: if (qp) { query_params_free(qp); } uri_free(uri); g_free(file); return ret; }"} {"target": 1, "idx": 9435, "func": "static int nut_read_header(AVFormatContext * avf, AVFormatParameters * ap) { NUTContext * priv = avf->priv_data; AVIOContext * bc = avf->pb; nut_demuxer_opts_tt dopts = { .input = { .priv = bc, .seek = av_seek, .read = av_read, .eof = NULL, .file_pos = 0, }, .alloc = { av_malloc, av_realloc, av_free }, .read_index = 1, .cache_syncpoints = 1, }; nut_context_tt * nut = priv->nut = nut_demuxer_init(&dopts); nut_stream_header_tt * s; int ret, i; if ((ret = nut_read_headers(nut, &s, NULL))) { av_log(avf, AV_LOG_ERROR, \" NUT error: %s\\n\", nut_error(ret)); return -1; priv->s = s; for (i = 0; s[i].type != -1 && i < 2; i++) { AVStream * st = avformat_new_stream(avf, NULL); int j; for (j = 0; j < s[i].fourcc_len && j < 8; j++) st->codec->codec_tag |= s[i].fourcc[j]<<(j*8); st->codec->has_b_frames = s[i].decode_delay; st->codec->extradata_size = s[i].codec_specific_len; if (st->codec->extradata_size) { st->codec->extradata = av_mallocz(st->codec->extradata_size); memcpy(st->codec->extradata, s[i].codec_specific, st->codec->extradata_size); avpriv_set_pts_info(avf->streams[i], 60, s[i].time_base.num, s[i].time_base.den); st->start_time = 0; st->duration = s[i].max_pts; st->codec->codec_id = ff_codec_get_id(nut_tags, st->codec->codec_tag); switch(s[i].type) { case NUT_AUDIO_CLASS: st->codec->codec_type = AVMEDIA_TYPE_AUDIO; if (st->codec->codec_id == CODEC_ID_NONE) st->codec->codec_id = ff_codec_get_id(ff_codec_wav_tags, st->codec->codec_tag); st->codec->channels = s[i].channel_count; st->codec->sample_rate = s[i].samplerate_num / s[i].samplerate_denom; break; case NUT_VIDEO_CLASS: st->codec->codec_type = AVMEDIA_TYPE_VIDEO; if (st->codec->codec_id == CODEC_ID_NONE) st->codec->codec_id = ff_codec_get_id(ff_codec_bmp_tags, st->codec->codec_tag); st->codec->width = s[i].width; st->codec->height = s[i].height; st->sample_aspect_ratio.num = s[i].sample_width; st->sample_aspect_ratio.den = s[i].sample_height; break; if (st->codec->codec_id == CODEC_ID_NONE) av_log(avf, AV_LOG_ERROR, \"Unknown codec?!\\n\"); return 0;"} {"target": 1, "idx": 9437, "func": "static void ppc_cpu_realizefn(DeviceState *dev, Error **errp) { CPUState *cs = CPU(dev); PowerPCCPU *cpu = POWERPC_CPU(dev); PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); Error *local_err = NULL; #if !defined(CONFIG_USER_ONLY) int max_smt = kvm_enabled() ? kvmppc_smt_threads() : 1; #endif #if !defined(CONFIG_USER_ONLY) if (smp_threads > max_smt) { error_setg(errp, \"Cannot support more than %d threads on PPC with %s\", max_smt, kvm_enabled() ? \"KVM\" : \"TCG\"); if (!is_power_of_2(smp_threads)) { error_setg(errp, \"Cannot support %d threads on PPC with %s, \" \"threads count must be a power of 2.\", smp_threads, kvm_enabled() ? \"KVM\" : \"TCG\"); #endif cpu_exec_init(cs, &local_err); if (local_err != NULL) { error_propagate(errp, local_err); #if !defined(CONFIG_USER_ONLY) cpu->cpu_dt_id = (cs->cpu_index / smp_threads) * max_smt + (cs->cpu_index % smp_threads); #endif if (tcg_enabled()) { if (ppc_fixup_cpu(cpu) != 0) { error_setg(errp, \"Unable to emulate selected CPU with TCG\"); #if defined(TARGET_PPCEMB) if (!ppc_cpu_is_valid(pcc)) { error_setg(errp, \"CPU does not possess a BookE or 4xx MMU. \" \"Please use qemu-system-ppc or qemu-system-ppc64 instead \" \"or choose another CPU model.\"); #endif create_ppc_opcodes(cpu, &local_err); if (local_err != NULL) { error_propagate(errp, local_err); init_ppc_proc(cpu); if (pcc->insns_flags & PPC_FLOAT) { gdb_register_coprocessor(cs, gdb_get_float_reg, gdb_set_float_reg, 33, \"power-fpu.xml\", 0); if (pcc->insns_flags & PPC_ALTIVEC) { gdb_register_coprocessor(cs, gdb_get_avr_reg, gdb_set_avr_reg, 34, \"power-altivec.xml\", 0); if (pcc->insns_flags & PPC_SPE) { gdb_register_coprocessor(cs, gdb_get_spe_reg, gdb_set_spe_reg, 34, \"power-spe.xml\", 0); if (pcc->insns_flags2 & PPC2_VSX) { gdb_register_coprocessor(cs, gdb_get_vsx_reg, gdb_set_vsx_reg, 32, \"power-vsx.xml\", 0); qemu_init_vcpu(cs); pcc->parent_realize(dev, errp); #if defined(PPC_DUMP_CPU) { CPUPPCState *env = &cpu->env; const char *mmu_model, *excp_model, *bus_model; switch (env->mmu_model) { case POWERPC_MMU_32B: mmu_model = \"PowerPC 32\"; break; case POWERPC_MMU_SOFT_6xx: mmu_model = \"PowerPC 6xx/7xx with software driven TLBs\"; break; case POWERPC_MMU_SOFT_74xx: mmu_model = \"PowerPC 74xx with software driven TLBs\"; break; case POWERPC_MMU_SOFT_4xx: mmu_model = \"PowerPC 4xx with software driven TLBs\"; break; case POWERPC_MMU_SOFT_4xx_Z: mmu_model = \"PowerPC 4xx with software driven TLBs \" \"and zones protections\"; break; case POWERPC_MMU_REAL: mmu_model = \"PowerPC real mode only\"; break; case POWERPC_MMU_MPC8xx: mmu_model = \"PowerPC MPC8xx\"; break; case POWERPC_MMU_BOOKE: mmu_model = \"PowerPC BookE\"; break; case POWERPC_MMU_BOOKE206: mmu_model = \"PowerPC BookE 2.06\"; break; case POWERPC_MMU_601: mmu_model = \"PowerPC 601\"; break; #if defined (TARGET_PPC64) case POWERPC_MMU_64B: mmu_model = \"PowerPC 64\"; break; #endif default: mmu_model = \"Unknown or invalid\"; break; switch (env->excp_model) { case POWERPC_EXCP_STD: excp_model = \"PowerPC\"; break; case POWERPC_EXCP_40x: excp_model = \"PowerPC 40x\"; break; case POWERPC_EXCP_601: excp_model = \"PowerPC 601\"; break; case POWERPC_EXCP_602: excp_model = \"PowerPC 602\"; break; case POWERPC_EXCP_603: excp_model = \"PowerPC 603\"; break; case POWERPC_EXCP_603E: excp_model = \"PowerPC 603e\"; break; case POWERPC_EXCP_604: excp_model = \"PowerPC 604\"; break; case POWERPC_EXCP_7x0: excp_model = \"PowerPC 740/750\"; break; case POWERPC_EXCP_7x5: excp_model = \"PowerPC 745/755\"; break; case POWERPC_EXCP_74xx: excp_model = \"PowerPC 74xx\"; break; case POWERPC_EXCP_BOOKE: excp_model = \"PowerPC BookE\"; break; #if defined (TARGET_PPC64) case POWERPC_EXCP_970: excp_model = \"PowerPC 970\"; break; #endif default: excp_model = \"Unknown or invalid\"; break; switch (env->bus_model) { case PPC_FLAGS_INPUT_6xx: bus_model = \"PowerPC 6xx\"; break; case PPC_FLAGS_INPUT_BookE: bus_model = \"PowerPC BookE\"; break; case PPC_FLAGS_INPUT_405: bus_model = \"PowerPC 405\"; break; case PPC_FLAGS_INPUT_401: bus_model = \"PowerPC 401/403\"; break; case PPC_FLAGS_INPUT_RCPU: bus_model = \"RCPU / MPC8xx\"; break; #if defined (TARGET_PPC64) case PPC_FLAGS_INPUT_970: bus_model = \"PowerPC 970\"; break; #endif default: bus_model = \"Unknown or invalid\"; break; printf(\"PowerPC %-12s : PVR %08x MSR %016\" PRIx64 \"\\n\" \" MMU model : %s\\n\", object_class_get_name(OBJECT_CLASS(pcc)), pcc->pvr, pcc->msr_mask, mmu_model); #if !defined(CONFIG_USER_ONLY) if (env->tlb.tlb6) { printf(\" %d %s TLB in %d ways\\n\", env->nb_tlb, env->id_tlbs ? \"splitted\" : \"merged\", env->nb_ways); #endif printf(\" Exceptions model : %s\\n\" \" Bus model : %s\\n\", excp_model, bus_model); printf(\" MSR features :\\n\"); if (env->flags & POWERPC_FLAG_SPE) printf(\" signal processing engine enable\" \"\\n\"); else if (env->flags & POWERPC_FLAG_VRE) printf(\" vector processor enable\\n\"); if (env->flags & POWERPC_FLAG_TGPR) printf(\" temporary GPRs\\n\"); else if (env->flags & POWERPC_FLAG_CE) printf(\" critical input enable\\n\"); if (env->flags & POWERPC_FLAG_SE) printf(\" single-step trace mode\\n\"); else if (env->flags & POWERPC_FLAG_DWE) printf(\" debug wait enable\\n\"); else if (env->flags & POWERPC_FLAG_UBLE) printf(\" user BTB lock enable\\n\"); if (env->flags & POWERPC_FLAG_BE) printf(\" branch-step trace mode\\n\"); else if (env->flags & POWERPC_FLAG_DE) printf(\" debug interrupt enable\\n\"); if (env->flags & POWERPC_FLAG_PX) printf(\" inclusive protection\\n\"); else if (env->flags & POWERPC_FLAG_PMM) printf(\" performance monitor mark\\n\"); if (env->flags == POWERPC_FLAG_NONE) printf(\" none\\n\"); printf(\" Time-base/decrementer clock source: %s\\n\", env->flags & POWERPC_FLAG_RTC_CLK ? \"RTC clock\" : \"bus clock\"); dump_ppc_insns(env); dump_ppc_sprs(env); fflush(stdout); #endif"} {"target": 1, "idx": 9443, "func": "static void ram_init(target_phys_addr_t addr, ram_addr_t RAM_size) { DeviceState *dev; SysBusDevice *s; RamDevice *d; /* allocate RAM */ dev = qdev_create(NULL, \"memory\"); s = sysbus_from_qdev(dev); d = FROM_SYSBUS(RamDevice, s); d->size = RAM_size; qdev_init(dev); sysbus_mmio_map(s, 0, addr); }"} {"target": 1, "idx": 9445, "func": "QemuOpts *qemu_chr_parse_compat(const char *label, const char *filename) { char host[65], port[33], width[8], height[8]; int pos; const char *p; QemuOpts *opts; Error *local_err = NULL; opts = qemu_opts_create(qemu_find_opts(\"chardev\"), label, 1, &local_err); if (error_is_set(&local_err)) { qerror_report_err(local_err); error_free(local_err); return NULL; } if (strstart(filename, \"mon:\", &p)) { filename = p; qemu_opt_set(opts, \"mux\", \"on\"); if (strcmp(filename, \"stdio\") == 0) { /* Monitor is muxed to stdio: do not exit on Ctrl+C by default * but pass it to the guest. Handle this only for compat syntax, * for -chardev syntax we have special option for this. * This is what -nographic did, redirecting+muxing serial+monitor * to stdio causing Ctrl+C to be passed to guest. */ qemu_opt_set(opts, \"signal\", \"off\"); } } if (strcmp(filename, \"null\") == 0 || strcmp(filename, \"pty\") == 0 || strcmp(filename, \"msmouse\") == 0 || strcmp(filename, \"braille\") == 0 || strcmp(filename, \"stdio\") == 0) { qemu_opt_set(opts, \"backend\", filename); return opts; } if (strstart(filename, \"vc\", &p)) { qemu_opt_set(opts, \"backend\", \"vc\"); if (*p == ':') { if (sscanf(p+1, \"%8[0-9]x%8[0-9]\", width, height) == 2) { /* pixels */ qemu_opt_set(opts, \"width\", width); qemu_opt_set(opts, \"height\", height); } else if (sscanf(p+1, \"%8[0-9]Cx%8[0-9]C\", width, height) == 2) { /* chars */ qemu_opt_set(opts, \"cols\", width); qemu_opt_set(opts, \"rows\", height); } else { goto fail; } } return opts; } if (strcmp(filename, \"con:\") == 0) { qemu_opt_set(opts, \"backend\", \"console\"); return opts; } if (strstart(filename, \"COM\", NULL)) { qemu_opt_set(opts, \"backend\", \"serial\"); qemu_opt_set(opts, \"path\", filename); return opts; } if (strstart(filename, \"file:\", &p)) { qemu_opt_set(opts, \"backend\", \"file\"); qemu_opt_set(opts, \"path\", p); return opts; } if (strstart(filename, \"pipe:\", &p)) { qemu_opt_set(opts, \"backend\", \"pipe\"); qemu_opt_set(opts, \"path\", p); return opts; } if (strstart(filename, \"tcp:\", &p) || strstart(filename, \"telnet:\", &p)) { if (sscanf(p, \"%64[^:]:%32[^,]%n\", host, port, &pos) < 2) { host[0] = 0; if (sscanf(p, \":%32[^,]%n\", port, &pos) < 1) goto fail; } qemu_opt_set(opts, \"backend\", \"socket\"); qemu_opt_set(opts, \"host\", host); qemu_opt_set(opts, \"port\", port); if (p[pos] == ',') { if (qemu_opts_do_parse(opts, p+pos+1, NULL) != 0) goto fail; } if (strstart(filename, \"telnet:\", &p)) qemu_opt_set(opts, \"telnet\", \"on\"); return opts; } if (strstart(filename, \"udp:\", &p)) { qemu_opt_set(opts, \"backend\", \"udp\"); if (sscanf(p, \"%64[^:]:%32[^@,]%n\", host, port, &pos) < 2) { host[0] = 0; if (sscanf(p, \":%32[^@,]%n\", port, &pos) < 1) { goto fail; } } qemu_opt_set(opts, \"host\", host); qemu_opt_set(opts, \"port\", port); if (p[pos] == '@') { p += pos + 1; if (sscanf(p, \"%64[^:]:%32[^,]%n\", host, port, &pos) < 2) { host[0] = 0; if (sscanf(p, \":%32[^,]%n\", port, &pos) < 1) { goto fail; } } qemu_opt_set(opts, \"localaddr\", host); qemu_opt_set(opts, \"localport\", port); } return opts; } if (strstart(filename, \"unix:\", &p)) { qemu_opt_set(opts, \"backend\", \"socket\"); if (qemu_opts_do_parse(opts, p, \"path\") != 0) goto fail; return opts; } if (strstart(filename, \"/dev/parport\", NULL) || strstart(filename, \"/dev/ppi\", NULL)) { qemu_opt_set(opts, \"backend\", \"parport\"); qemu_opt_set(opts, \"path\", filename); return opts; } if (strstart(filename, \"/dev/\", NULL)) { qemu_opt_set(opts, \"backend\", \"tty\"); qemu_opt_set(opts, \"path\", filename); return opts; } fail: qemu_opts_del(opts); return NULL; }"} {"target": 1, "idx": 9457, "func": "POWERPC_FAMILY(POWER8)(ObjectClass *oc, void *data) { DeviceClass *dc = DEVICE_CLASS(oc); PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); dc->fw_name = \"PowerPC,POWER8\"; dc->desc = \"POWER8\"; pcc->pvr = CPU_POWERPC_POWER8_BASE; pcc->pvr_mask = CPU_POWERPC_POWER8_MASK; pcc->init_proc = init_proc_POWER7; pcc->check_pow = check_pow_nocheck; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX | PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_64B | PPC_ALTIVEC | PPC_SEGMENT_64B | PPC_SLBI | PPC_POPCNTB | PPC_POPCNTWD; pcc->insns_flags2 = PPC2_VSX | PPC2_VSX207 | PPC2_DFP | PPC2_DBRX | PPC2_PERM_ISA206 | PPC2_DIVE_ISA206 | PPC2_ATOMIC_ISA206 | PPC2_FP_CVT_ISA206; pcc->msr_mask = 0x800000000284FF36ULL; pcc->mmu_model = POWERPC_MMU_2_06; #if defined(CONFIG_SOFTMMU) pcc->handle_mmu_fault = ppc_hash64_handle_mmu_fault; #endif pcc->excp_model = POWERPC_EXCP_POWER7; pcc->bus_model = PPC_FLAGS_INPUT_POWER7; pcc->bfd_mach = bfd_mach_ppc64; pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE | POWERPC_FLAG_BE | POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK | POWERPC_FLAG_CFAR | POWERPC_FLAG_VSX; pcc->l1_dcache_size = 0x8000; pcc->l1_icache_size = 0x8000; }"} {"target": 0, "idx": 9482, "func": "static int parse_object_segment(AVCodecContext *avctx, const uint8_t *buf, int buf_size) { PGSSubContext *ctx = avctx->priv_data; PGSSubObject *object; uint8_t sequence_desc; unsigned int rle_bitmap_len, width, height; int id; if (buf_size <= 4) return AVERROR_INVALIDDATA; buf_size -= 4; id = bytestream_get_be16(&buf); object = find_object(id, &ctx->objects); if (!object) { if (ctx->objects.count >= MAX_EPOCH_OBJECTS) { av_log(avctx, AV_LOG_ERROR, \"Too many objects in epoch\\n\"); return AVERROR_INVALIDDATA; } object = &ctx->objects.object[ctx->objects.count++]; object->id = id; } /* skip object version number */ buf += 1; /* Read the Sequence Description to determine if start of RLE data or appended to previous RLE */ sequence_desc = bytestream_get_byte(&buf); if (!(sequence_desc & 0x80)) { /* Additional RLE data */ if (buf_size > object->rle_remaining_len) return AVERROR_INVALIDDATA; memcpy(object->rle + object->rle_data_len, buf, buf_size); object->rle_data_len += buf_size; object->rle_remaining_len -= buf_size; return 0; } if (buf_size <= 7) return AVERROR_INVALIDDATA; buf_size -= 7; /* Decode rle bitmap length, stored size includes width/height data */ rle_bitmap_len = bytestream_get_be24(&buf) - 2*2; if (buf_size > rle_bitmap_len) { av_log(avctx, AV_LOG_ERROR, \"Buffer dimension %d larger than the expected RLE data %d\\n\", buf_size, rle_bitmap_len); return AVERROR_INVALIDDATA; } /* Get bitmap dimensions from data */ width = bytestream_get_be16(&buf); height = bytestream_get_be16(&buf); /* Make sure the bitmap is not too large */ if (avctx->width < width || avctx->height < height || !width || !height) { av_log(avctx, AV_LOG_ERROR, \"Bitmap dimensions (%dx%d) invalid.\\n\", width, height); return AVERROR_INVALIDDATA; } object->w = width; object->h = height; av_fast_padded_malloc(&object->rle, &object->rle_buffer_size, rle_bitmap_len); if (!object->rle) return AVERROR(ENOMEM); memcpy(object->rle, buf, buf_size); object->rle_data_len = buf_size; object->rle_remaining_len = rle_bitmap_len - buf_size; return 0; }"} {"target": 1, "idx": 9508, "func": "static int qemu_rdma_register_and_get_keys(RDMAContext *rdma, RDMALocalBlock *block, uintptr_t host_addr, uint32_t *lkey, uint32_t *rkey, int chunk, uint8_t *chunk_start, uint8_t *chunk_end) { if (block->mr) { if (lkey) { *lkey = block->mr->lkey; } if (rkey) { *rkey = block->mr->rkey; } return 0; } /* allocate memory to store chunk MRs */ if (!block->pmr) { block->pmr = g_malloc0(block->nb_chunks * sizeof(struct ibv_mr *)); } /* * If 'rkey', then we're the destination, so grant access to the source. * * If 'lkey', then we're the source VM, so grant access only to ourselves. */ if (!block->pmr[chunk]) { uint64_t len = chunk_end - chunk_start; trace_qemu_rdma_register_and_get_keys(len, chunk_start); block->pmr[chunk] = ibv_reg_mr(rdma->pd, chunk_start, len, (rkey ? (IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE) : 0)); if (!block->pmr[chunk]) { perror(\"Failed to register chunk!\"); fprintf(stderr, \"Chunk details: block: %d chunk index %d\" \" start %\" PRIuPTR \" end %\" PRIuPTR \" host %\" PRIuPTR \" local %\" PRIuPTR \" registrations: %d\\n\", block->index, chunk, (uintptr_t)chunk_start, (uintptr_t)chunk_end, host_addr, (uintptr_t)block->local_host_addr, rdma->total_registrations); return -1; } rdma->total_registrations++; } if (lkey) { *lkey = block->pmr[chunk]->lkey; } if (rkey) { *rkey = block->pmr[chunk]->rkey; } return 0; }"} {"target": 1, "idx": 9509, "func": "static void vga_draw_graphic(VGAState *s, int full_update) { int y1, y, update, page_min, page_max, linesize, y_start, double_scan, mask, depth; int width, height, shift_control, line_offset, page0, page1, bwidth, bits; int disp_width, multi_scan, multi_run; uint8_t *d; uint32_t v, addr1, addr; vga_draw_line_func *vga_draw_line; full_update |= update_basic_params(s); if (!full_update) vga_sync_dirty_bitmap(s); s->get_resolution(s, &width, &height); disp_width = width; shift_control = (s->gr[0x05] >> 5) & 3; double_scan = (s->cr[0x09] >> 7); if (shift_control != 1) { multi_scan = (((s->cr[0x09] & 0x1f) + 1) << double_scan) - 1; } else { /* in CGA modes, multi_scan is ignored */ /* XXX: is it correct ? */ multi_scan = double_scan; } multi_run = multi_scan; if (shift_control != s->shift_control || double_scan != s->double_scan) { full_update = 1; s->shift_control = shift_control; s->double_scan = double_scan; } if (shift_control == 0) { full_update |= update_palette16(s); if (s->sr[0x01] & 8) { v = VGA_DRAW_LINE4D2; disp_width <<= 1; } else { v = VGA_DRAW_LINE4; } bits = 4; } else if (shift_control == 1) { full_update |= update_palette16(s); if (s->sr[0x01] & 8) { v = VGA_DRAW_LINE2D2; disp_width <<= 1; } else { v = VGA_DRAW_LINE2; } bits = 4; } else { switch(s->get_bpp(s)) { default: case 0: full_update |= update_palette256(s); v = VGA_DRAW_LINE8D2; bits = 4; break; case 8: full_update |= update_palette256(s); v = VGA_DRAW_LINE8; bits = 8; break; case 15: v = VGA_DRAW_LINE15; bits = 16; break; case 16: v = VGA_DRAW_LINE16; bits = 16; break; case 24: v = VGA_DRAW_LINE24; bits = 24; break; case 32: v = VGA_DRAW_LINE32; bits = 32; break; } } vga_draw_line = vga_draw_line_table[v * NB_DEPTHS + get_depth_index(s->ds)]; depth = s->get_bpp(s); if (s->line_offset != s->last_line_offset || disp_width != s->last_width || height != s->last_height || s->last_depth != depth) { if (depth == 16 || depth == 32) { if (is_graphic_console()) { qemu_free_displaysurface(s->ds->surface); s->ds->surface = qemu_create_displaysurface_from(disp_width, height, depth, s->line_offset, s->vram_ptr + (s->start_addr * 4)); dpy_resize(s->ds); } else { qemu_console_resize(s->ds, disp_width, height); } } else { qemu_console_resize(s->ds, disp_width, height); } s->last_scr_width = disp_width; s->last_scr_height = height; s->last_width = disp_width; s->last_height = height; s->last_line_offset = s->line_offset; s->last_depth = depth; full_update = 1; } else if (is_graphic_console() && is_buffer_shared(s->ds->surface) && (full_update || s->ds->surface->data != s->vram_ptr + (s->start_addr * 4))) { s->ds->surface->data = s->vram_ptr + (s->start_addr * 4); dpy_setdata(s->ds); } s->rgb_to_pixel = rgb_to_pixel_dup_table[get_depth_index(s->ds)]; if (!is_buffer_shared(s->ds->surface) && s->cursor_invalidate) s->cursor_invalidate(s); line_offset = s->line_offset; #if 0 printf(\"w=%d h=%d v=%d line_offset=%d cr[0x09]=0x%02x cr[0x17]=0x%02x linecmp=%d sr[0x01]=0x%02x\\n\", width, height, v, line_offset, s->cr[9], s->cr[0x17], s->line_compare, s->sr[0x01]); #endif addr1 = (s->start_addr * 4); bwidth = (width * bits + 7) / 8; y_start = -1; page_min = 0x7fffffff; page_max = -1; d = ds_get_data(s->ds); linesize = ds_get_linesize(s->ds); y1 = 0; for(y = 0; y < height; y++) { addr = addr1; if (!(s->cr[0x17] & 1)) { int shift; /* CGA compatibility handling */ shift = 14 + ((s->cr[0x17] >> 6) & 1); addr = (addr & ~(1 << shift)) | ((y1 & 1) << shift); } if (!(s->cr[0x17] & 2)) { addr = (addr & ~0x8000) | ((y1 & 2) << 14); } page0 = s->vram_offset + (addr & TARGET_PAGE_MASK); page1 = s->vram_offset + ((addr + bwidth - 1) & TARGET_PAGE_MASK); update = full_update | cpu_physical_memory_get_dirty(page0, VGA_DIRTY_FLAG) | cpu_physical_memory_get_dirty(page1, VGA_DIRTY_FLAG); if ((page1 - page0) > TARGET_PAGE_SIZE) { /* if wide line, can use another page */ update |= cpu_physical_memory_get_dirty(page0 + TARGET_PAGE_SIZE, VGA_DIRTY_FLAG); } /* explicit invalidation for the hardware cursor */ update |= (s->invalidated_y_table[y >> 5] >> (y & 0x1f)) & 1; if (update) { if (y_start < 0) y_start = y; if (page0 < page_min) page_min = page0; if (page1 > page_max) page_max = page1; if (!(is_buffer_shared(s->ds->surface))) { vga_draw_line(s, d, s->vram_ptr + addr, width); if (s->cursor_draw_line) s->cursor_draw_line(s, d, y); } } else { if (y_start >= 0) { /* flush to display */ dpy_update(s->ds, 0, y_start, disp_width, y - y_start); y_start = -1; } } if (!multi_run) { mask = (s->cr[0x17] & 3) ^ 3; if ((y1 & mask) == mask) addr1 += line_offset; y1++; multi_run = multi_scan; } else { multi_run--; } /* line compare acts on the displayed lines */ if (y == s->line_compare) addr1 = 0; d += linesize; } if (y_start >= 0) { /* flush to display */ dpy_update(s->ds, 0, y_start, disp_width, y - y_start); } /* reset modified pages */ if (page_max != -1) { cpu_physical_memory_reset_dirty(page_min, page_max + TARGET_PAGE_SIZE, VGA_DIRTY_FLAG); } memset(s->invalidated_y_table, 0, ((height + 31) >> 5) * 4); }"} {"target": 1, "idx": 9510, "func": "static int qemu_signalfd_compat(const sigset_t *mask) { pthread_attr_t attr; pthread_t tid; struct sigfd_compat_info *info; int fds[2]; info = malloc(sizeof(*info)); if (info == NULL) { errno = ENOMEM; return -1; } if (pipe(fds) == -1) { free(info); return -1; } memcpy(&info->mask, mask, sizeof(*mask)); info->fd = fds[1]; pthread_attr_init(&attr); pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); pthread_create(&tid, &attr, sigwait_compat, info); pthread_attr_destroy(&attr); return fds[0]; }"} {"target": 1, "idx": 9516, "func": "static int protocol_client_auth_sasl_start_len(VncState *vs, uint8_t *data, size_t len) { uint32_t startlen = read_u32(data, 0); VNC_DEBUG(\"Got client start len %d\\n\", startlen); if (startlen > SASL_DATA_MAX_LEN) { VNC_DEBUG(\"Too much SASL data %d\\n\", startlen); vnc_client_error(vs); return -1; } if (startlen == 0) return protocol_client_auth_sasl_start(vs, NULL, 0); vnc_read_when(vs, protocol_client_auth_sasl_start, startlen); return 0; }"} {"target": 1, "idx": 9528, "func": "static inline void mv_pred_direct(AVSContext *h, cavs_vector *pmv_fw, cavs_vector *col_mv) { cavs_vector *pmv_bw = pmv_fw + MV_BWD_OFFS; int den = h->direct_den[col_mv->ref]; int m = FF_SIGNBIT(col_mv->x); pmv_fw->dist = h->dist[1]; pmv_bw->dist = h->dist[0]; pmv_fw->ref = 1; pmv_bw->ref = 0; /* scale the co-located motion vector according to its temporal span */ pmv_fw->x = (((den + (den * col_mv->x * pmv_fw->dist ^ m) - m - 1) >> 14) ^ m) - m; pmv_bw->x = m - (((den + (den * col_mv->x * pmv_bw->dist ^ m) - m - 1) >> 14) ^ m); m = FF_SIGNBIT(col_mv->y); pmv_fw->y = (((den + (den * col_mv->y * pmv_fw->dist ^ m) - m - 1) >> 14) ^ m) - m; pmv_bw->y = m - (((den + (den * col_mv->y * pmv_bw->dist ^ m) - m - 1) >> 14) ^ m); }"} {"target": 0, "idx": 9533, "func": "static inline void h264_deblock_q1(register vector unsigned char p0, register vector unsigned char p1, register vector unsigned char p2, register vector unsigned char q0, register vector unsigned char tc0) { register vector unsigned char average = vec_avg(p0, q0); register vector unsigned char temp; register vector unsigned char uncliped; register vector unsigned char ones; register vector unsigned char max; register vector unsigned char min; temp = vec_xor(average, p2); average = vec_avg(average, p2); /*avg(p2, avg(p0, q0)) */ ones = vec_splat_u8(1); temp = vec_and(temp, ones); /*(p2^avg(p0, q0)) & 1 */ uncliped = vec_subs(average, temp); /*(p2+((p0+q0+1)>>1))>>1 */ max = vec_adds(p1, tc0); min = vec_subs(p1, tc0); p1 = vec_max(min, uncliped); p1 = vec_min(max, p1); }"} {"target": 0, "idx": 9546, "func": "static inline void gen_branch_a(DisasContext *dc, target_ulong pc1, target_ulong pc2, TCGv r_cond) { int l1; l1 = gen_new_label(); tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, 0, l1); gen_goto_tb(dc, 0, pc2, pc1); gen_set_label(l1); gen_goto_tb(dc, 1, pc2 + 4, pc2 + 8); }"} {"target": 0, "idx": 9548, "func": "uint64_t qcow2_alloc_cluster_offset(BlockDriverState *bs, uint64_t offset, int n_start, int n_end, int *num, QCowL2Meta *m) { BDRVQcowState *s = bs->opaque; int l2_index, ret; uint64_t l2_offset, *l2_table, cluster_offset; int nb_clusters, i = 0; QCowL2Meta *old_alloc; ret = get_cluster_table(bs, offset, &l2_table, &l2_offset, &l2_index); if (ret == 0) return 0; nb_clusters = size_to_clusters(s, n_end << 9); nb_clusters = MIN(nb_clusters, s->l2_size - l2_index); cluster_offset = be64_to_cpu(l2_table[l2_index]); /* We keep all QCOW_OFLAG_COPIED clusters */ if (cluster_offset & QCOW_OFLAG_COPIED) { nb_clusters = count_contiguous_clusters(nb_clusters, s->cluster_size, &l2_table[l2_index], 0, 0); cluster_offset &= ~QCOW_OFLAG_COPIED; m->nb_clusters = 0; goto out; } /* for the moment, multiple compressed clusters are not managed */ if (cluster_offset & QCOW_OFLAG_COMPRESSED) nb_clusters = 1; /* how many available clusters ? */ while (i < nb_clusters) { i += count_contiguous_clusters(nb_clusters - i, s->cluster_size, &l2_table[l2_index], i, 0); if(be64_to_cpu(l2_table[l2_index + i])) break; i += count_contiguous_free_clusters(nb_clusters - i, &l2_table[l2_index + i]); cluster_offset = be64_to_cpu(l2_table[l2_index + i]); if ((cluster_offset & QCOW_OFLAG_COPIED) || (cluster_offset & QCOW_OFLAG_COMPRESSED)) break; } nb_clusters = i; /* * Check if there already is an AIO write request in flight which allocates * the same cluster. In this case we need to wait until the previous * request has completed and updated the L2 table accordingly. */ LIST_FOREACH(old_alloc, &s->cluster_allocs, next_in_flight) { uint64_t end_offset = offset + nb_clusters * s->cluster_size; uint64_t old_offset = old_alloc->offset; uint64_t old_end_offset = old_alloc->offset + old_alloc->nb_clusters * s->cluster_size; if (end_offset < old_offset || offset > old_end_offset) { /* No intersection */ } else { if (offset < old_offset) { /* Stop at the start of a running allocation */ nb_clusters = (old_offset - offset) >> s->cluster_bits; } else { nb_clusters = 0; } if (nb_clusters == 0) { /* Set dependency and wait for a callback */ m->depends_on = old_alloc; m->nb_clusters = 0; *num = 0; return 0; } } } if (!nb_clusters) { abort(); } LIST_INSERT_HEAD(&s->cluster_allocs, m, next_in_flight); /* allocate a new cluster */ cluster_offset = qcow2_alloc_clusters(bs, nb_clusters * s->cluster_size); /* save info needed for meta data update */ m->offset = offset; m->n_start = n_start; m->nb_clusters = nb_clusters; out: m->nb_available = MIN(nb_clusters << (s->cluster_bits - 9), n_end); *num = m->nb_available - n_start; return cluster_offset; }"} {"target": 0, "idx": 9574, "func": "static int tx_consume(Rocker *r, DescInfo *info) { PCIDevice *dev = PCI_DEVICE(r); char *buf = desc_get_buf(info, true); RockerTlv *tlv_frag; RockerTlv *tlvs[ROCKER_TLV_TX_MAX + 1]; struct iovec iov[ROCKER_TX_FRAGS_MAX] = { { 0, }, }; uint32_t pport; uint32_t port; uint16_t tx_offload = ROCKER_TX_OFFLOAD_NONE; uint16_t tx_l3_csum_off = 0; uint16_t tx_tso_mss = 0; uint16_t tx_tso_hdr_len = 0; int iovcnt = 0; int err = ROCKER_OK; int rem; int i; if (!buf) { return -ROCKER_ENXIO; } rocker_tlv_parse(tlvs, ROCKER_TLV_TX_MAX, buf, desc_tlv_size(info)); if (!tlvs[ROCKER_TLV_TX_FRAGS]) { return -ROCKER_EINVAL; } pport = rocker_get_pport_by_tx_ring(r, desc_get_ring(info)); if (!fp_port_from_pport(pport, &port)) { return -ROCKER_EINVAL; } if (tlvs[ROCKER_TLV_TX_OFFLOAD]) { tx_offload = rocker_tlv_get_u8(tlvs[ROCKER_TLV_TX_OFFLOAD]); } switch (tx_offload) { case ROCKER_TX_OFFLOAD_L3_CSUM: if (!tlvs[ROCKER_TLV_TX_L3_CSUM_OFF]) { return -ROCKER_EINVAL; } break; case ROCKER_TX_OFFLOAD_TSO: if (!tlvs[ROCKER_TLV_TX_TSO_MSS] || !tlvs[ROCKER_TLV_TX_TSO_HDR_LEN]) { return -ROCKER_EINVAL; } break; } if (tlvs[ROCKER_TLV_TX_L3_CSUM_OFF]) { tx_l3_csum_off = rocker_tlv_get_le16(tlvs[ROCKER_TLV_TX_L3_CSUM_OFF]); } if (tlvs[ROCKER_TLV_TX_TSO_MSS]) { tx_tso_mss = rocker_tlv_get_le16(tlvs[ROCKER_TLV_TX_TSO_MSS]); } if (tlvs[ROCKER_TLV_TX_TSO_HDR_LEN]) { tx_tso_hdr_len = rocker_tlv_get_le16(tlvs[ROCKER_TLV_TX_TSO_HDR_LEN]); } rocker_tlv_for_each_nested(tlv_frag, tlvs[ROCKER_TLV_TX_FRAGS], rem) { hwaddr frag_addr; uint16_t frag_len; if (rocker_tlv_type(tlv_frag) != ROCKER_TLV_TX_FRAG) { err = -ROCKER_EINVAL; goto err_bad_attr; } rocker_tlv_parse_nested(tlvs, ROCKER_TLV_TX_FRAG_ATTR_MAX, tlv_frag); if (!tlvs[ROCKER_TLV_TX_FRAG_ATTR_ADDR] || !tlvs[ROCKER_TLV_TX_FRAG_ATTR_LEN]) { err = -ROCKER_EINVAL; goto err_bad_attr; } frag_addr = rocker_tlv_get_le64(tlvs[ROCKER_TLV_TX_FRAG_ATTR_ADDR]); frag_len = rocker_tlv_get_le16(tlvs[ROCKER_TLV_TX_FRAG_ATTR_LEN]); iov[iovcnt].iov_len = frag_len; iov[iovcnt].iov_base = g_malloc(frag_len); if (!iov[iovcnt].iov_base) { err = -ROCKER_ENOMEM; goto err_no_mem; } if (pci_dma_read(dev, frag_addr, iov[iovcnt].iov_base, iov[iovcnt].iov_len)) { err = -ROCKER_ENXIO; goto err_bad_io; } if (++iovcnt > ROCKER_TX_FRAGS_MAX) { goto err_too_many_frags; } } if (iovcnt) { /* XXX perform Tx offloads */ /* XXX silence compiler for now */ tx_l3_csum_off += tx_tso_mss = tx_tso_hdr_len = 0; } err = fp_port_eg(r->fp_port[port], iov, iovcnt); err_too_many_frags: err_bad_io: err_no_mem: err_bad_attr: for (i = 0; i < ROCKER_TX_FRAGS_MAX; i++) { if (iov[i].iov_base) { g_free(iov[i].iov_base); } } return err; }"} {"target": 0, "idx": 9576, "func": "AVFilterFormats *avfilter_all_colorspaces(void) { return avfilter_make_format_list(35, PIX_FMT_YUV444P, PIX_FMT_YUV422P, PIX_FMT_YUV420P, PIX_FMT_YUV411P, PIX_FMT_YUV410P, PIX_FMT_YUYV422, PIX_FMT_UYVY422, PIX_FMT_UYYVYY411, PIX_FMT_YUVJ444P, PIX_FMT_YUVJ422P, PIX_FMT_YUVJ420P, PIX_FMT_YUV440P, PIX_FMT_YUVJ440P, PIX_FMT_RGB32, PIX_FMT_BGR32, PIX_FMT_RGB32_1, PIX_FMT_BGR32_1, PIX_FMT_RGB24, PIX_FMT_BGR24, PIX_FMT_RGB565, PIX_FMT_BGR565, PIX_FMT_RGB555, PIX_FMT_BGR555, PIX_FMT_RGB8, PIX_FMT_BGR8, PIX_FMT_RGB4_BYTE,PIX_FMT_BGR4_BYTE, PIX_FMT_GRAY16BE, PIX_FMT_GRAY16LE, PIX_FMT_GRAY8, PIX_FMT_PAL8, PIX_FMT_MONOWHITE,PIX_FMT_MONOBLACK PIX_FMT_NV12, PIX_FMT_NV21); }"} {"target": 0, "idx": 9584, "func": "MemoryRegion *escc_init(target_phys_addr_t base, qemu_irq irqA, qemu_irq irqB, CharDriverState *chrA, CharDriverState *chrB, int clock, int it_shift) { DeviceState *dev; SysBusDevice *s; SerialState *d; dev = qdev_create(NULL, \"escc\"); qdev_prop_set_uint32(dev, \"disabled\", 0); qdev_prop_set_uint32(dev, \"frequency\", clock); qdev_prop_set_uint32(dev, \"it_shift\", it_shift); qdev_prop_set_chr(dev, \"chrB\", chrB); qdev_prop_set_chr(dev, \"chrA\", chrA); qdev_prop_set_uint32(dev, \"chnBtype\", ser); qdev_prop_set_uint32(dev, \"chnAtype\", ser); qdev_init_nofail(dev); s = sysbus_from_qdev(dev); sysbus_connect_irq(s, 0, irqB); sysbus_connect_irq(s, 1, irqA); if (base) { sysbus_mmio_map(s, 0, base); } d = FROM_SYSBUS(SerialState, s); return &d->mmio; }"} {"target": 1, "idx": 9598, "func": "static void free_geotags(TiffContext *const s) { int i; for (i = 0; i < s->geotag_count; i++) { if (s->geotags[i].val) av_freep(&s->geotags[i].val); } av_freep(&s->geotags); }"} {"target": 1, "idx": 9607, "func": "void ff_bink_idct_c(DCTELEM *block) { int i; DCTELEM temp[64]; for (i = 0; i < 8; i++) bink_idct_col(&temp[i], &block[i]); for (i = 0; i < 8; i++) { IDCT_ROW( (&block[8*i]), (&temp[8*i]) ); } }"} {"target": 1, "idx": 9616, "func": "PCIBus *pci_pmac_init(qemu_irq *pic) { DeviceState *dev; SysBusDevice *s; UNINState *d; /* Use values found on a real PowerMac */ /* Uninorth main bus */ dev = qdev_create(NULL, \"Uni-north main\"); qdev_init_nofail(dev); s = sysbus_from_qdev(dev); d = FROM_SYSBUS(UNINState, s); d->host_state.bus = pci_register_bus(&d->busdev.qdev, \"pci\", pci_unin_set_irq, pci_unin_map_irq, pic, 11 << 3, 4); pci_create_simple(d->host_state.bus, 11 << 3, \"Uni-north main\"); sysbus_mmio_map(s, 0, 0xf2800000); sysbus_mmio_map(s, 1, 0xf2c00000); /* DEC 21154 bridge */ #if 0 /* XXX: not activated as PPC BIOS doesn't handle multiple buses properly */ pci_create_simple(d->host_state.bus, 12 << 3, \"DEC 21154\"); #endif /* Uninorth AGP bus */ pci_create_simple(d->host_state.bus, 13 << 3, \"Uni-north AGP\"); /* Uninorth internal bus */ #if 0 /* XXX: not needed for now */ pci_create_simple(d->host_state.bus, 14 << 3, \"Uni-north internal\"); #endif return d->host_state.bus; }"} {"target": 0, "idx": 9619, "func": "static void json_message_process_token(JSONLexer *lexer, GString *input, JSONTokenType type, int x, int y) { JSONMessageParser *parser = container_of(lexer, JSONMessageParser, lexer); QDict *dict; switch (type) { case JSON_LCURLY: parser->brace_count++; break; case JSON_RCURLY: parser->brace_count--; break; case JSON_LSQUARE: parser->bracket_count++; break; case JSON_RSQUARE: parser->bracket_count--; break; default: break; } dict = qdict_new(); qdict_put(dict, \"type\", qint_from_int(type)); qdict_put(dict, \"token\", qstring_from_str(input->str)); qdict_put(dict, \"x\", qint_from_int(x)); qdict_put(dict, \"y\", qint_from_int(y)); parser->token_size += input->len; g_queue_push_tail(parser->tokens, dict); if (type == JSON_ERROR) { goto out_emit_bad; } else if (parser->brace_count < 0 || parser->bracket_count < 0 || (parser->brace_count == 0 && parser->bracket_count == 0)) { goto out_emit; } else if (parser->token_size > MAX_TOKEN_SIZE || parser->bracket_count + parser->brace_count > MAX_NESTING) { /* Security consideration, we limit total memory allocated per object * and the maximum recursion depth that a message can force. */ goto out_emit_bad; } return; out_emit_bad: /* * Clear out token list and tell the parser to emit an error * indication by passing it a NULL list */ json_message_free_tokens(parser); out_emit: /* send current list of tokens to parser and reset tokenizer */ parser->brace_count = 0; parser->bracket_count = 0; /* parser->emit takes ownership of parser->tokens. */ parser->emit(parser, parser->tokens); parser->tokens = g_queue_new(); parser->token_size = 0; }"} {"target": 0, "idx": 9629, "func": "static inline void ppc4xx_tlb_invalidate_virt(CPUPPCState *env, target_ulong eaddr, uint32_t pid) { #if !defined(FLUSH_ALL_TLBS) CPUState *cs = CPU(ppc_env_get_cpu(env)); ppcemb_tlb_t *tlb; hwaddr raddr; target_ulong page, end; int i; for (i = 0; i < env->nb_tlb; i++) { tlb = &env->tlb.tlbe[i]; if (ppcemb_tlb_check(env, tlb, &raddr, eaddr, pid, 0, i) == 0) { end = tlb->EPN + tlb->size; for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) { tlb_flush_page(cs, page); } tlb->prot &= ~PAGE_VALID; break; } } #else ppc4xx_tlb_invalidate_all(env); #endif }"} {"target": 0, "idx": 9630, "func": "void pci_bridge_initfn(PCIDevice *dev, const char *typename) { PCIBus *parent = dev->bus; PCIBridge *br = PCI_BRIDGE(dev); PCIBus *sec_bus = &br->sec_bus; pci_word_test_and_set_mask(dev->config + PCI_STATUS, PCI_STATUS_66MHZ | PCI_STATUS_FAST_BACK); /* * TODO: We implement VGA Enable in the Bridge Control Register * therefore per the PCI to PCI bridge spec we must also implement * VGA Palette Snooping. When done, set this bit writable: * * pci_word_test_and_set_mask(dev->wmask + PCI_COMMAND, * PCI_COMMAND_VGA_PALETTE); */ pci_config_set_class(dev->config, PCI_CLASS_BRIDGE_PCI); dev->config[PCI_HEADER_TYPE] = (dev->config[PCI_HEADER_TYPE] & PCI_HEADER_TYPE_MULTI_FUNCTION) | PCI_HEADER_TYPE_BRIDGE; pci_set_word(dev->config + PCI_SEC_STATUS, PCI_STATUS_66MHZ | PCI_STATUS_FAST_BACK); /* * If we don't specify the name, the bus will be addressed as .0, where * id is the device id. * Since PCI Bridge devices have a single bus each, we don't need the index: * let users address the bus using the device name. */ if (!br->bus_name && dev->qdev.id && *dev->qdev.id) { br->bus_name = dev->qdev.id; } qbus_create_inplace(sec_bus, sizeof(br->sec_bus), typename, DEVICE(dev), br->bus_name); sec_bus->parent_dev = dev; sec_bus->map_irq = br->map_irq ? br->map_irq : pci_swizzle_map_irq_fn; sec_bus->address_space_mem = &br->address_space_mem; memory_region_init(&br->address_space_mem, OBJECT(br), \"pci_bridge_pci\", UINT64_MAX); sec_bus->address_space_io = &br->address_space_io; memory_region_init(&br->address_space_io, OBJECT(br), \"pci_bridge_io\", 65536); br->windows = pci_bridge_region_init(br); QLIST_INIT(&sec_bus->child); QLIST_INSERT_HEAD(&parent->child, sec_bus, sibling); }"} {"target": 0, "idx": 9639, "func": "void qemu_pixman_linebuf_fill(pixman_image_t *linebuf, pixman_image_t *fb, int width, int y) { pixman_image_composite(PIXMAN_OP_SRC, fb, NULL, linebuf, 0, y, 0, 0, 0, 0, width, 1); }"} {"target": 0, "idx": 9643, "func": "static inline abi_long target_to_host_cmsg(struct msghdr *msgh, struct target_msghdr *target_msgh) { struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh); abi_long msg_controllen; abi_ulong target_cmsg_addr; struct target_cmsghdr *target_cmsg; socklen_t space = 0; msg_controllen = tswapal(target_msgh->msg_controllen); if (msg_controllen < sizeof (struct target_cmsghdr)) goto the_end; target_cmsg_addr = tswapal(target_msgh->msg_control); target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1); if (!target_cmsg) return -TARGET_EFAULT; while (cmsg && target_cmsg) { void *data = CMSG_DATA(cmsg); void *target_data = TARGET_CMSG_DATA(target_cmsg); int len = tswapal(target_cmsg->cmsg_len) - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr)); space += CMSG_SPACE(len); if (space > msgh->msg_controllen) { space -= CMSG_SPACE(len); gemu_log(\"Host cmsg overflow\\n\"); break; } if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) { cmsg->cmsg_level = SOL_SOCKET; } else { cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level); } cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type); cmsg->cmsg_len = CMSG_LEN(len); if (cmsg->cmsg_level != SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) { gemu_log(\"Unsupported ancillary data: %d/%d\\n\", cmsg->cmsg_level, cmsg->cmsg_type); memcpy(data, target_data, len); } else { int *fd = (int *)data; int *target_fd = (int *)target_data; int i, numfds = len / sizeof(int); for (i = 0; i < numfds; i++) fd[i] = tswap32(target_fd[i]); } cmsg = CMSG_NXTHDR(msgh, cmsg); target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg); } unlock_user(target_cmsg, target_cmsg_addr, 0); the_end: msgh->msg_controllen = space; return 0; }"} {"target": 0, "idx": 9647, "func": "static bool ga_open_pidfile(const char *pidfile) { int pidfd; char pidstr[32]; pidfd = open(pidfile, O_CREAT|O_WRONLY, S_IRUSR|S_IWUSR); if (pidfd == -1 || lockf(pidfd, F_TLOCK, 0)) { g_critical(\"Cannot lock pid file, %s\", strerror(errno)); if (pidfd != -1) { close(pidfd); } return false; } if (ftruncate(pidfd, 0) || lseek(pidfd, 0, SEEK_SET)) { g_critical(\"Failed to truncate pid file\"); goto fail; } sprintf(pidstr, \"%d\", getpid()); if (write(pidfd, pidstr, strlen(pidstr)) != strlen(pidstr)) { g_critical(\"Failed to write pid file\"); goto fail; } return true; fail: unlink(pidfile); return false; }"} {"target": 1, "idx": 9650, "func": "static void handle_pending_signal(CPUArchState *cpu_env, int sig) { CPUState *cpu = ENV_GET_CPU(cpu_env); abi_ulong handler; sigset_t set, old_set; target_sigset_t target_old_set; struct target_sigaction *sa; struct sigqueue *q; TaskState *ts = cpu->opaque; struct emulated_sigtable *k = &ts->sigtab[sig - 1]; trace_user_handle_signal(cpu_env, sig); /* dequeue signal */ q = k->first; k->first = q->next; if (!k->first) k->pending = 0; sig = gdb_handlesig(cpu, sig); if (!sig) { sa = NULL; handler = TARGET_SIG_IGN; } else { sa = &sigact_table[sig - 1]; handler = sa->_sa_handler; } if (ts->sigsegv_blocked && sig == TARGET_SIGSEGV) { /* Guest has blocked SIGSEGV but we got one anyway. Assume this * is a forced SIGSEGV (ie one the kernel handles via force_sig_info * because it got a real MMU fault), and treat as if default handler. */ handler = TARGET_SIG_DFL; } if (handler == TARGET_SIG_DFL) { /* default handler : ignore some signal. The other are job control or fatal */ if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) { kill(getpid(),SIGSTOP); } else if (sig != TARGET_SIGCHLD && sig != TARGET_SIGURG && sig != TARGET_SIGWINCH && sig != TARGET_SIGCONT) { force_sig(sig); } } else if (handler == TARGET_SIG_IGN) { /* ignore sig */ } else if (handler == TARGET_SIG_ERR) { force_sig(sig); } else { /* compute the blocked signals during the handler execution */ target_to_host_sigset(&set, &sa->sa_mask); /* SA_NODEFER indicates that the current signal should not be blocked during the handler */ if (!(sa->sa_flags & TARGET_SA_NODEFER)) sigaddset(&set, target_to_host_signal(sig)); /* block signals in the handler using Linux */ do_sigprocmask(SIG_BLOCK, &set, &old_set); /* save the previous blocked signal state to restore it at the end of the signal execution (see do_sigreturn) */ host_to_target_sigset_internal(&target_old_set, &old_set); /* if the CPU is in VM86 mode, we restore the 32 bit values */ #if defined(TARGET_I386) && !defined(TARGET_X86_64) { CPUX86State *env = cpu_env; if (env->eflags & VM_MASK) save_v86_state(env); } #endif /* prepare the stack frame of the virtual CPU */ #if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64) \\ || defined(TARGET_OPENRISC) || defined(TARGET_TILEGX) /* These targets do not have traditional signals. */ setup_rt_frame(sig, sa, &q->info, &target_old_set, cpu_env); #else if (sa->sa_flags & TARGET_SA_SIGINFO) setup_rt_frame(sig, sa, &q->info, &target_old_set, cpu_env); else setup_frame(sig, sa, &target_old_set, cpu_env); #endif if (sa->sa_flags & TARGET_SA_RESETHAND) { sa->_sa_handler = TARGET_SIG_DFL; } } if (q != &k->info) free_sigqueue(cpu_env, q); }"} {"target": 1, "idx": 9651, "func": "static void pcie_pci_bridge_write_config(PCIDevice *d, uint32_t address, uint32_t val, int len) { pci_bridge_write_config(d, address, val, len); msi_write_config(d, address, val, len); shpc_cap_write_config(d, address, val, len); }"} {"target": 0, "idx": 9670, "func": "static ram_addr_t find_ram_offset(ram_addr_t size) { RAMBlock *block, *next_block; ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX; assert(size != 0); /* it would hand out same offset multiple times */ if (QTAILQ_EMPTY(&ram_list.blocks)) return 0; QTAILQ_FOREACH(block, &ram_list.blocks, next) { ram_addr_t end, next = RAM_ADDR_MAX; end = block->offset + block->length; QTAILQ_FOREACH(next_block, &ram_list.blocks, next) { if (next_block->offset >= end) { next = MIN(next, next_block->offset); } } if (next - end >= size && next - end < mingap) { offset = end; mingap = next - end; } } if (offset == RAM_ADDR_MAX) { fprintf(stderr, \"Failed to find gap of requested size: %\" PRIu64 \"\\n\", (uint64_t)size); abort(); } return offset; }"} {"target": 0, "idx": 9683, "func": "CharDriverState *qemu_chr_alloc(void) { CharDriverState *chr = g_malloc0(sizeof(CharDriverState)); qemu_mutex_init(&chr->chr_write_lock); return chr; }"} {"target": 0, "idx": 9706, "func": "static int scsi_disk_emulate_command(SCSIDiskReq *r, uint8_t *outbuf) { SCSIRequest *req = &r->req; SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); uint64_t nb_sectors; int buflen = 0; int ret; switch (req->cmd.buf[0]) { case TEST_UNIT_READY: if (!bdrv_is_inserted(s->bs)) goto not_ready; break; case REQUEST_SENSE: if (req->cmd.xfer < 4) goto illegal_request; buflen = scsi_device_get_sense(&s->qdev, outbuf, req->cmd.xfer, (req->cmd.buf[1] & 1) == 0); break; case INQUIRY: buflen = scsi_disk_emulate_inquiry(req, outbuf); if (buflen < 0) goto illegal_request; break; case MODE_SENSE: case MODE_SENSE_10: buflen = scsi_disk_emulate_mode_sense(req, outbuf); if (buflen < 0) goto illegal_request; break; case READ_TOC: buflen = scsi_disk_emulate_read_toc(req, outbuf); if (buflen < 0) goto illegal_request; break; case RESERVE: if (req->cmd.buf[1] & 1) goto illegal_request; break; case RESERVE_10: if (req->cmd.buf[1] & 3) goto illegal_request; break; case RELEASE: if (req->cmd.buf[1] & 1) goto illegal_request; break; case RELEASE_10: if (req->cmd.buf[1] & 3) goto illegal_request; break; case START_STOP: if (s->qdev.type == TYPE_ROM && (req->cmd.buf[4] & 2)) { /* load/eject medium */ bdrv_eject(s->bs, !(req->cmd.buf[4] & 1)); } break; case ALLOW_MEDIUM_REMOVAL: bdrv_set_locked(s->bs, req->cmd.buf[4] & 1); break; case READ_CAPACITY_10: /* The normal LEN field for this command is zero. */ memset(outbuf, 0, 8); bdrv_get_geometry(s->bs, &nb_sectors); if (!nb_sectors) goto not_ready; nb_sectors /= s->cluster_size; /* Returned value is the address of the last sector. */ nb_sectors--; /* Remember the new size for read/write sanity checking. */ s->max_lba = nb_sectors; /* Clip to 2TB, instead of returning capacity modulo 2TB. */ if (nb_sectors > UINT32_MAX) nb_sectors = UINT32_MAX; outbuf[0] = (nb_sectors >> 24) & 0xff; outbuf[1] = (nb_sectors >> 16) & 0xff; outbuf[2] = (nb_sectors >> 8) & 0xff; outbuf[3] = nb_sectors & 0xff; outbuf[4] = 0; outbuf[5] = 0; outbuf[6] = s->cluster_size * 2; outbuf[7] = 0; buflen = 8; break; case SYNCHRONIZE_CACHE: ret = bdrv_flush(s->bs); if (ret < 0) { if (scsi_handle_rw_error(r, -ret, SCSI_REQ_STATUS_RETRY_FLUSH)) { return -1; } } break; case GET_CONFIGURATION: memset(outbuf, 0, 8); /* ??? This should probably return much more information. For now just return the basic header indicating the CD-ROM profile. */ outbuf[7] = 8; // CD-ROM buflen = 8; break; case SERVICE_ACTION_IN: /* Service Action In subcommands. */ if ((req->cmd.buf[1] & 31) == 0x10) { DPRINTF(\"SAI READ CAPACITY(16)\\n\"); memset(outbuf, 0, req->cmd.xfer); bdrv_get_geometry(s->bs, &nb_sectors); if (!nb_sectors) goto not_ready; nb_sectors /= s->cluster_size; /* Returned value is the address of the last sector. */ nb_sectors--; /* Remember the new size for read/write sanity checking. */ s->max_lba = nb_sectors; outbuf[0] = (nb_sectors >> 56) & 0xff; outbuf[1] = (nb_sectors >> 48) & 0xff; outbuf[2] = (nb_sectors >> 40) & 0xff; outbuf[3] = (nb_sectors >> 32) & 0xff; outbuf[4] = (nb_sectors >> 24) & 0xff; outbuf[5] = (nb_sectors >> 16) & 0xff; outbuf[6] = (nb_sectors >> 8) & 0xff; outbuf[7] = nb_sectors & 0xff; outbuf[8] = 0; outbuf[9] = 0; outbuf[10] = s->cluster_size * 2; outbuf[11] = 0; outbuf[12] = 0; outbuf[13] = get_physical_block_exp(&s->qdev.conf); /* set TPE bit if the format supports discard */ if (s->qdev.conf.discard_granularity) { outbuf[14] = 0x80; } /* Protection, exponent and lowest lba field left blank. */ buflen = req->cmd.xfer; break; } DPRINTF(\"Unsupported Service Action In\\n\"); goto illegal_request; case REPORT_LUNS: if (req->cmd.xfer < 16) goto illegal_request; memset(outbuf, 0, 16); outbuf[3] = 8; buflen = 16; break; case VERIFY_10: break; default: scsi_check_condition(r, SENSE_CODE(INVALID_OPCODE)); return -1; } return buflen; not_ready: if (!bdrv_is_inserted(s->bs)) { scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); } else { scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY)); } return -1; illegal_request: scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); return -1; }"} {"target": 0, "idx": 9707, "func": "static void flush_queued_work(CPUState *cpu) { struct qemu_work_item *wi; if (cpu->queued_work_first == NULL) { return; } while ((wi = cpu->queued_work_first)) { cpu->queued_work_first = wi->next; wi->func(wi->data); wi->done = true; if (wi->free) { g_free(wi); } } cpu->queued_work_last = NULL; qemu_cond_broadcast(&qemu_work_cond); }"} {"target": 0, "idx": 9709, "func": "static int validate_guest_space(unsigned long guest_base, unsigned long guest_size) { unsigned long real_start, test_page_addr; /* We need to check that we can force a fault on access to the * commpage at 0xffff0fxx */ test_page_addr = guest_base + (0xffff0f00 & qemu_host_page_mask); /* If the commpage lies within the already allocated guest space, * then there is no way we can allocate it. */ if (test_page_addr >= guest_base && test_page_addr <= (guest_base + guest_size)) { return -1; } /* Note it needs to be writeable to let us initialise it */ real_start = (unsigned long) mmap((void *)test_page_addr, qemu_host_page_size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); /* If we can't map it then try another address */ if (real_start == -1ul) { return 0; } if (real_start != test_page_addr) { /* OS didn't put the page where we asked - unmap and reject */ munmap((void *)real_start, qemu_host_page_size); return 0; } /* Leave the page mapped * Populate it (mmap should have left it all 0'd) */ /* Kernel helper versions */ __put_user(5, (uint32_t *)g2h(0xffff0ffcul)); /* Now it's populated make it RO */ if (mprotect((void *)test_page_addr, qemu_host_page_size, PROT_READ)) { perror(\"Protecting guest commpage\"); exit(-1); } return 1; /* All good */ }"} {"target": 1, "idx": 9721, "func": "long do_sigreturn(CPUCRISState *env) { struct target_signal_frame *frame; abi_ulong frame_addr; target_sigset_t target_set; sigset_t set; int i; frame_addr = env->regs[R_SP]; /* Make sure the guest isn't playing games. */ if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1)) goto badframe; /* Restore blocked signals */ if (__get_user(target_set.sig[0], &frame->sc.oldmask)) goto badframe; for(i = 1; i < TARGET_NSIG_WORDS; i++) { if (__get_user(target_set.sig[i], &frame->extramask[i - 1])) goto badframe; } target_to_host_sigset_internal(&set, &target_set); do_sigprocmask(SIG_SETMASK, &set, NULL); restore_sigcontext(&frame->sc, env); unlock_user_struct(frame, frame_addr, 0); return env->regs[10]; badframe: unlock_user_struct(frame, frame_addr, 0); force_sig(TARGET_SIGSEGV); }"} {"target": 1, "idx": 9722, "func": "static void usbredir_interrupt_packet(void *priv, uint32_t id, struct usb_redir_interrupt_packet_header *interrupt_packet, uint8_t *data, int data_len) { USBRedirDevice *dev = priv; uint8_t ep = interrupt_packet->endpoint; DPRINTF(\"interrupt-in status %d ep %02X len %d id %u\\n\", interrupt_packet->status, ep, data_len, id); if (dev->endpoint[EP2I(ep)].type != USB_ENDPOINT_XFER_INT) { ERROR(\"received int packet for non interrupt endpoint %02X\\n\", ep); free(data); return; } if (ep & USB_DIR_IN) { if (dev->endpoint[EP2I(ep)].interrupt_started == 0) { DPRINTF(\"received int packet while not started ep %02X\\n\", ep); free(data); return; } /* bufp_alloc also adds the packet to the ep queue */ bufp_alloc(dev, data, data_len, interrupt_packet->status, ep); } else { int len = interrupt_packet->length; AsyncURB *aurb = async_find(dev, id); if (!aurb) { return; } if (aurb->interrupt_packet.endpoint != interrupt_packet->endpoint) { ERROR(\"return int packet mismatch, please report this!\\n\"); len = USB_RET_NAK; } if (aurb->packet) { aurb->packet->len = usbredir_handle_status(dev, interrupt_packet->status, len); usb_packet_complete(&dev->dev, aurb->packet); } async_free(dev, aurb); } }"} {"target": 0, "idx": 9727, "func": "void mpeg1_encode_mb(MpegEncContext *s, DCTELEM block[6][64], int motion_x, int motion_y) { int i, cbp; const int mb_x = s->mb_x; const int mb_y = s->mb_y; const int first_mb= mb_x == s->resync_mb_x && mb_y == s->resync_mb_y; /* compute cbp */ cbp = 0; for(i=0;i<6;i++) { if (s->block_last_index[i] >= 0) cbp |= 1 << (5 - i); } if (cbp == 0 && !first_mb && (mb_x != s->mb_width - 1 || (mb_y != s->mb_height - 1 && s->codec_id == CODEC_ID_MPEG1VIDEO)) && ((s->pict_type == P_TYPE && s->mv_type == MV_TYPE_16X16 && (motion_x | motion_y) == 0) || (s->pict_type == B_TYPE && s->mv_dir == s->last_mv_dir && (((s->mv_dir & MV_DIR_FORWARD) ? ((s->mv[0][0][0] - s->last_mv[0][0][0])|(s->mv[0][0][1] - s->last_mv[0][0][1])) : 0) | ((s->mv_dir & MV_DIR_BACKWARD) ? ((s->mv[1][0][0] - s->last_mv[1][0][0])|(s->mv[1][0][1] - s->last_mv[1][0][1])) : 0)) == 0))) { s->mb_skip_run++; s->qscale -= s->dquant; s->skip_count++; s->misc_bits++; s->last_bits++; if(s->pict_type == P_TYPE){ s->last_mv[0][1][0]= s->last_mv[0][0][0]= s->last_mv[0][1][1]= s->last_mv[0][0][1]= 0; } } else { if(first_mb){ assert(s->mb_skip_run == 0); encode_mb_skip_run(s, s->mb_x); }else{ encode_mb_skip_run(s, s->mb_skip_run); } if (s->pict_type == I_TYPE) { if(s->dquant && cbp){ put_mb_modes(s, 2, 1, 0, 0); /* macroblock_type : macroblock_quant = 1 */ put_bits(&s->pb, 5, s->qscale); }else{ put_mb_modes(s, 1, 1, 0, 0); /* macroblock_type : macroblock_quant = 0 */ s->qscale -= s->dquant; } s->misc_bits+= get_bits_diff(s); s->i_count++; } else if (s->mb_intra) { if(s->dquant && cbp){ put_mb_modes(s, 6, 0x01, 0, 0); put_bits(&s->pb, 5, s->qscale); }else{ put_mb_modes(s, 5, 0x03, 0, 0); s->qscale -= s->dquant; } s->misc_bits+= get_bits_diff(s); s->i_count++; memset(s->last_mv, 0, sizeof(s->last_mv)); } else if (s->pict_type == P_TYPE) { if(s->mv_type == MV_TYPE_16X16){ if (cbp != 0) { if ((motion_x|motion_y) == 0) { if(s->dquant){ put_mb_modes(s, 5, 1, 0, 0); /* macroblock_pattern & quant */ put_bits(&s->pb, 5, s->qscale); }else{ put_mb_modes(s, 2, 1, 0, 0); /* macroblock_pattern only */ } s->misc_bits+= get_bits_diff(s); } else { if(s->dquant){ put_mb_modes(s, 5, 2, 1, 0); /* motion + cbp */ put_bits(&s->pb, 5, s->qscale); }else{ put_mb_modes(s, 1, 1, 1, 0); /* motion + cbp */ } s->misc_bits+= get_bits_diff(s); mpeg1_encode_motion(s, motion_x - s->last_mv[0][0][0], s->f_code); // RAL: f_code parameter added mpeg1_encode_motion(s, motion_y - s->last_mv[0][0][1], s->f_code); // RAL: f_code parameter added s->mv_bits+= get_bits_diff(s); } } else { put_bits(&s->pb, 3, 1); /* motion only */ if (!s->frame_pred_frame_dct) put_bits(&s->pb, 2, 2); /* motion_type: frame */ s->misc_bits+= get_bits_diff(s); mpeg1_encode_motion(s, motion_x - s->last_mv[0][0][0], s->f_code); // RAL: f_code parameter added mpeg1_encode_motion(s, motion_y - s->last_mv[0][0][1], s->f_code); // RAL: f_code parameter added s->qscale -= s->dquant; s->mv_bits+= get_bits_diff(s); } s->last_mv[0][1][0]= s->last_mv[0][0][0]= motion_x; s->last_mv[0][1][1]= s->last_mv[0][0][1]= motion_y; }else{ assert(!s->frame_pred_frame_dct && s->mv_type == MV_TYPE_FIELD); if (cbp) { if(s->dquant){ put_mb_modes(s, 5, 2, 1, 1); /* motion + cbp */ put_bits(&s->pb, 5, s->qscale); }else{ put_mb_modes(s, 1, 1, 1, 1); /* motion + cbp */ } } else { put_bits(&s->pb, 3, 1); /* motion only */ put_bits(&s->pb, 2, 1); /* motion_type: field */ s->qscale -= s->dquant; } s->misc_bits+= get_bits_diff(s); for(i=0; i<2; i++){ put_bits(&s->pb, 1, s->field_select[0][i]); mpeg1_encode_motion(s, s->mv[0][i][0] - s->last_mv[0][i][0] , s->f_code); mpeg1_encode_motion(s, s->mv[0][i][1] - (s->last_mv[0][i][1]>>1), s->f_code); s->last_mv[0][i][0]= s->mv[0][i][0]; s->last_mv[0][i][1]= 2*s->mv[0][i][1]; } s->mv_bits+= get_bits_diff(s); } if(cbp) put_bits(&s->pb, mbPatTable[cbp - 1][1], mbPatTable[cbp - 1][0]); s->f_count++; } else{ static const int mb_type_len[4]={0,3,4,2}; //bak,for,bi if(s->mv_type == MV_TYPE_16X16){ if (cbp){ // With coded bloc pattern if (s->dquant) { if(s->mv_dir == MV_DIR_FORWARD) put_mb_modes(s, 6, 3, 1, 0); else put_mb_modes(s, mb_type_len[s->mv_dir]+3, 2, 1, 0); put_bits(&s->pb, 5, s->qscale); } else { put_mb_modes(s, mb_type_len[s->mv_dir], 3, 1, 0); } }else{ // No coded bloc pattern put_bits(&s->pb, mb_type_len[s->mv_dir], 2); if (!s->frame_pred_frame_dct) put_bits(&s->pb, 2, 2); /* motion_type: frame */ s->qscale -= s->dquant; } s->misc_bits += get_bits_diff(s); if (s->mv_dir&MV_DIR_FORWARD){ mpeg1_encode_motion(s, s->mv[0][0][0] - s->last_mv[0][0][0], s->f_code); mpeg1_encode_motion(s, s->mv[0][0][1] - s->last_mv[0][0][1], s->f_code); s->last_mv[0][0][0]=s->last_mv[0][1][0]= s->mv[0][0][0]; s->last_mv[0][0][1]=s->last_mv[0][1][1]= s->mv[0][0][1]; s->f_count++; } if (s->mv_dir&MV_DIR_BACKWARD){ mpeg1_encode_motion(s, s->mv[1][0][0] - s->last_mv[1][0][0], s->b_code); mpeg1_encode_motion(s, s->mv[1][0][1] - s->last_mv[1][0][1], s->b_code); s->last_mv[1][0][0]=s->last_mv[1][1][0]= s->mv[1][0][0]; s->last_mv[1][0][1]=s->last_mv[1][1][1]= s->mv[1][0][1]; s->b_count++; } }else{ assert(s->mv_type == MV_TYPE_FIELD); assert(!s->frame_pred_frame_dct); if (cbp){ // With coded bloc pattern if (s->dquant) { if(s->mv_dir == MV_DIR_FORWARD) put_mb_modes(s, 6, 3, 1, 1); else put_mb_modes(s, mb_type_len[s->mv_dir]+3, 2, 1, 1); put_bits(&s->pb, 5, s->qscale); } else { put_mb_modes(s, mb_type_len[s->mv_dir], 3, 1, 1); } }else{ // No coded bloc pattern put_bits(&s->pb, mb_type_len[s->mv_dir], 2); put_bits(&s->pb, 2, 1); /* motion_type: field */ s->qscale -= s->dquant; } s->misc_bits += get_bits_diff(s); if (s->mv_dir&MV_DIR_FORWARD){ for(i=0; i<2; i++){ put_bits(&s->pb, 1, s->field_select[0][i]); mpeg1_encode_motion(s, s->mv[0][i][0] - s->last_mv[0][i][0] , s->f_code); mpeg1_encode_motion(s, s->mv[0][i][1] - (s->last_mv[0][i][1]>>1), s->f_code); s->last_mv[0][i][0]= s->mv[0][i][0]; s->last_mv[0][i][1]= 2*s->mv[0][i][1]; } s->f_count++; } if (s->mv_dir&MV_DIR_BACKWARD){ for(i=0; i<2; i++){ put_bits(&s->pb, 1, s->field_select[1][i]); mpeg1_encode_motion(s, s->mv[1][i][0] - s->last_mv[1][i][0] , s->b_code); mpeg1_encode_motion(s, s->mv[1][i][1] - (s->last_mv[1][i][1]>>1), s->b_code); s->last_mv[1][i][0]= s->mv[1][i][0]; s->last_mv[1][i][1]= 2*s->mv[1][i][1]; } s->b_count++; } } s->mv_bits += get_bits_diff(s); if(cbp) put_bits(&s->pb, mbPatTable[cbp - 1][1], mbPatTable[cbp - 1][0]); } for(i=0;i<6;i++) { if (cbp & (1 << (5 - i))) { mpeg1_encode_block(s, block[i], i); } } s->mb_skip_run = 0; if(s->mb_intra) s->i_tex_bits+= get_bits_diff(s); else s->p_tex_bits+= get_bits_diff(s); } }"} {"target": 0, "idx": 9744, "func": "static void virtio_ccw_device_realize(VirtioCcwDevice *dev, Error **errp) { unsigned int cssid = 0; unsigned int ssid = 0; unsigned int schid; unsigned int devno; bool have_devno = false; bool found = false; SubchDev *sch; int num; DeviceState *parent = DEVICE(dev); Error *err = NULL; VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_GET_CLASS(dev); VirtIODevice *vdev; sch = g_malloc0(sizeof(SubchDev)); sch->driver_data = dev; dev->sch = sch; dev->indicators = NULL; /* Initialize subchannel structure. */ sch->channel_prog = 0x0; sch->last_cmd_valid = false; sch->thinint_active = false; /* * Use a device number if provided. Otherwise, fall back to subchannel * number. */ if (dev->bus_id) { num = sscanf(dev->bus_id, \"%x.%x.%04x\", &cssid, &ssid, &devno); if (num == 3) { if ((cssid > MAX_CSSID) || (ssid > MAX_SSID)) { error_setg(errp, \"Invalid cssid or ssid: cssid %x, ssid %x\", cssid, ssid); goto out_err; } /* Enforce use of virtual cssid. */ if (cssid != VIRTUAL_CSSID) { error_setg(errp, \"cssid %x not valid for virtio devices\", cssid); goto out_err; } if (css_devno_used(cssid, ssid, devno)) { error_setg(errp, \"Device %x.%x.%04x already exists\", cssid, ssid, devno); goto out_err; } sch->cssid = cssid; sch->ssid = ssid; sch->devno = devno; have_devno = true; } else { error_setg(errp, \"Malformed devno parameter '%s'\", dev->bus_id); goto out_err; } } /* Find the next free id. */ if (have_devno) { for (schid = 0; schid <= MAX_SCHID; schid++) { if (!css_find_subch(1, cssid, ssid, schid)) { sch->schid = schid; css_subch_assign(cssid, ssid, schid, devno, sch); found = true; break; } } if (!found) { error_setg(errp, \"No free subchannel found for %x.%x.%04x\", cssid, ssid, devno); goto out_err; } trace_virtio_ccw_new_device(cssid, ssid, schid, devno, \"user-configured\"); } else { cssid = VIRTUAL_CSSID; for (ssid = 0; ssid <= MAX_SSID; ssid++) { for (schid = 0; schid <= MAX_SCHID; schid++) { if (!css_find_subch(1, cssid, ssid, schid)) { sch->cssid = cssid; sch->ssid = ssid; sch->schid = schid; devno = schid; /* * If the devno is already taken, look further in this * subchannel set. */ while (css_devno_used(cssid, ssid, devno)) { if (devno == MAX_SCHID) { devno = 0; } else if (devno == schid - 1) { error_setg(errp, \"No free devno found\"); goto out_err; } else { devno++; } } sch->devno = devno; css_subch_assign(cssid, ssid, schid, devno, sch); found = true; break; } } if (found) { break; } } if (!found) { error_setg(errp, \"Virtual channel subsystem is full!\"); goto out_err; } trace_virtio_ccw_new_device(cssid, ssid, schid, devno, \"auto-configured\"); } /* Build initial schib. */ css_sch_build_virtual_schib(sch, 0, VIRTIO_CCW_CHPID_TYPE); sch->ccw_cb = virtio_ccw_cb; /* Build senseid data. */ memset(&sch->id, 0, sizeof(SenseId)); sch->id.reserved = 0xff; sch->id.cu_type = VIRTIO_CCW_CU_TYPE; if (k->realize) { k->realize(dev, &err); } if (err) { error_propagate(errp, err); css_subch_assign(cssid, ssid, schid, devno, NULL); goto out_err; } /* device_id is only set after vdev has been realized */ vdev = virtio_ccw_get_vdev(sch); sch->id.cu_model = vdev->device_id; /* Only the first 32 feature bits are used. */ dev->host_features[0] = virtio_bus_get_vdev_features(&dev->bus, dev->host_features[0]); virtio_add_feature(&dev->host_features[0], VIRTIO_F_NOTIFY_ON_EMPTY); virtio_add_feature(&dev->host_features[0], VIRTIO_F_BAD_FEATURE); css_generate_sch_crws(sch->cssid, sch->ssid, sch->schid, parent->hotplugged, 1); return; out_err: dev->sch = NULL; g_free(sch); }"} {"target": 0, "idx": 9754, "func": "void op_cp1_registers(void) { if (!(env->CP0_Status & (1 << CP0St_FR)) && (PARAM1 & 1)) { CALL_FROM_TB1(do_raise_exception, EXCP_RI); } RETURN(); }"} {"target": 1, "idx": 9761, "func": "static inline void RENAME(rgb24tobgr24)(const uint8_t *src, uint8_t *dst, long src_size) { unsigned i; #ifdef HAVE_MMX long mmx_size= 23 - src_size; asm volatile ( \"test %%\"REG_a\", %%\"REG_a\" \\n\\t\" \"jns 2f \\n\\t\" \"movq \"MANGLE(mask24r)\", %%mm5 \\n\\t\" \"movq \"MANGLE(mask24g)\", %%mm6 \\n\\t\" \"movq \"MANGLE(mask24b)\", %%mm7 \\n\\t\" ASMALIGN(4) \"1: \\n\\t\" PREFETCH\" 32(%1, %%\"REG_a\") \\n\\t\" \"movq (%1, %%\"REG_a\"), %%mm0 \\n\\t\" // BGR BGR BG \"movq (%1, %%\"REG_a\"), %%mm1 \\n\\t\" // BGR BGR BG \"movq 2(%1, %%\"REG_a\"), %%mm2 \\n\\t\" // R BGR BGR B \"psllq $16, %%mm0 \\n\\t\" // 00 BGR BGR \"pand %%mm5, %%mm0 \\n\\t\" \"pand %%mm6, %%mm1 \\n\\t\" \"pand %%mm7, %%mm2 \\n\\t\" \"por %%mm0, %%mm1 \\n\\t\" \"por %%mm2, %%mm1 \\n\\t\" \"movq 6(%1, %%\"REG_a\"), %%mm0 \\n\\t\" // BGR BGR BG MOVNTQ\" %%mm1, (%2, %%\"REG_a\")\\n\\t\" // RGB RGB RG \"movq 8(%1, %%\"REG_a\"), %%mm1 \\n\\t\" // R BGR BGR B \"movq 10(%1, %%\"REG_a\"), %%mm2 \\n\\t\" // GR BGR BGR \"pand %%mm7, %%mm0 \\n\\t\" \"pand %%mm5, %%mm1 \\n\\t\" \"pand %%mm6, %%mm2 \\n\\t\" \"por %%mm0, %%mm1 \\n\\t\" \"por %%mm2, %%mm1 \\n\\t\" \"movq 14(%1, %%\"REG_a\"), %%mm0 \\n\\t\" // R BGR BGR B MOVNTQ\" %%mm1, 8(%2, %%\"REG_a\")\\n\\t\" // B RGB RGB R \"movq 16(%1, %%\"REG_a\"), %%mm1 \\n\\t\" // GR BGR BGR \"movq 18(%1, %%\"REG_a\"), %%mm2 \\n\\t\" // BGR BGR BG \"pand %%mm6, %%mm0 \\n\\t\" \"pand %%mm7, %%mm1 \\n\\t\" \"pand %%mm5, %%mm2 \\n\\t\" \"por %%mm0, %%mm1 \\n\\t\" \"por %%mm2, %%mm1 \\n\\t\" MOVNTQ\" %%mm1, 16(%2, %%\"REG_a\")\\n\\t\" \"add $24, %%\"REG_a\" \\n\\t\" \" js 1b \\n\\t\" \"2: \\n\\t\" : \"+a\" (mmx_size) : \"r\" (src-mmx_size), \"r\"(dst-mmx_size) ); __asm __volatile(SFENCE:::\"memory\"); __asm __volatile(EMMS:::\"memory\"); if(mmx_size==23) return; //finihsed, was multiple of 8 src+= src_size; dst+= src_size; src_size= 23-mmx_size; src-= src_size; dst-= src_size; #endif for(i=0; ipriv_data; struct vda_context *vda_ctx = avctx->hwaccel_context; AVFrame *frame = &h->cur_pic_ptr->f; struct vda_buffer *context; AVBufferRef *buffer; int status; if (!vda_ctx->decoder || !vda_ctx->priv_bitstream) status = vda_sync_decode(vda_ctx); frame->data[3] = (void*)vda_ctx->cv_buffer; if (status) av_log(avctx, AV_LOG_ERROR, \"Failed to decode frame (%d)\\n\", status);"} {"target": 1, "idx": 9781, "func": "static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; H264Context *h = avctx->priv_data; AVFrame *pict = data; int buf_index = 0; Picture *out; int i, out_idx; int ret; h->flags = avctx->flags; /* end of stream, output what is still in the buffers */ if (buf_size == 0) { out: h->cur_pic_ptr = NULL; h->first_field = 0; // FIXME factorize this with the output code below out = h->delayed_pic[0]; out_idx = 0; for (i = 1; h->delayed_pic[i] && !h->delayed_pic[i]->f.key_frame && !h->delayed_pic[i]->mmco_reset; i++) if (h->delayed_pic[i]->poc < out->poc) { out = h->delayed_pic[i]; out_idx = i; } for (i = out_idx; h->delayed_pic[i]; i++) h->delayed_pic[i] = h->delayed_pic[i + 1]; if (out) { out->reference &= ~DELAYED_PIC_REF; ret = output_frame(h, pict, &out->f); if (ret < 0) return ret; *got_frame = 1; } return buf_index; } if(h->is_avc && buf_size >= 9 && buf[0]==1 && buf[2]==0 && (buf[4]&0xFC)==0xFC && (buf[5]&0x1F) && buf[8]==0x67){ int cnt= buf[5]&0x1f; const uint8_t *p= buf+6; while(cnt--){ int nalsize= AV_RB16(p) + 2; if(nalsize > buf_size - (p-buf) || p[2]!=0x67) goto not_extra; p += nalsize; } cnt = *(p++); if(!cnt) goto not_extra; while(cnt--){ int nalsize= AV_RB16(p) + 2; if(nalsize > buf_size - (p-buf) || p[2]!=0x68) goto not_extra; p += nalsize; } return ff_h264_decode_extradata(h, buf, buf_size); } not_extra: buf_index = decode_nal_units(h, buf, buf_size, 0); if (buf_index < 0) return -1; if (!h->cur_pic_ptr && h->nal_unit_type == NAL_END_SEQUENCE) { av_assert0(buf_index <= buf_size); goto out; } if (!(avctx->flags2 & CODEC_FLAG2_CHUNKS) && !h->cur_pic_ptr) { if (avctx->skip_frame >= AVDISCARD_NONREF || buf_size >= 4 && !memcmp(\"Q264\", buf, 4)) return buf_size; av_log(avctx, AV_LOG_ERROR, \"no frame!\\n\"); return -1; } if (!(avctx->flags2 & CODEC_FLAG2_CHUNKS) || (h->mb_y >= h->mb_height && h->mb_height)) { if (avctx->flags2 & CODEC_FLAG2_CHUNKS) decode_postinit(h, 1); field_end(h, 0); /* Wait for second field. */ *got_frame = 0; if (h->next_output_pic && (h->next_output_pic->sync || h->sync>1)) { ret = output_frame(h, pict, &h->next_output_pic->f); if (ret < 0) return ret; *got_frame = 1; if (CONFIG_MPEGVIDEO) { ff_print_debug_info2(h->avctx, h->next_output_pic, pict, h->er.mbskip_table, &h->low_delay, h->mb_width, h->mb_height, h->mb_stride, 1); } } } assert(pict->data[0] || !*got_frame); return get_consumed_bytes(buf_index, buf_size); }"} {"target": 1, "idx": 9787, "func": "GIOStatus ga_channel_write_all(GAChannel *c, const char *buf, size_t size) { GIOStatus status = G_IO_STATUS_NORMAL; size_t count; while (size) { status = ga_channel_write(c, buf, size, &count); if (status == G_IO_STATUS_NORMAL) { size -= count; buf += count; } else if (status != G_IO_STATUS_AGAIN) { break; } } return status; }"} {"target": 1, "idx": 9789, "func": "static int channelmap_query_formats(AVFilterContext *ctx) { ChannelMapContext *s = ctx->priv; ff_set_common_formats(ctx, ff_planar_sample_fmts()); ff_set_common_samplerates(ctx, ff_all_samplerates()); ff_channel_layouts_ref(ff_all_channel_layouts(), &ctx->inputs[0]->out_channel_layouts); ff_channel_layouts_ref(s->channel_layouts, &ctx->outputs[0]->in_channel_layouts); return 0; }"} {"target": 0, "idx": 9805, "func": "static void flush_queued_work(CPUState *env) { struct qemu_work_item *wi; if (!env->queued_work_first) return; while ((wi = env->queued_work_first)) { env->queued_work_first = wi->next; wi->func(wi->data); wi->done = true; } env->queued_work_last = NULL; qemu_cond_broadcast(&qemu_work_cond); }"} {"target": 0, "idx": 9817, "func": "static int coroutine_fn add_aio_request(BDRVSheepdogState *s, AIOReq *aio_req, struct iovec *iov, int niov, bool create, enum AIOCBState aiocb_type) { int nr_copies = s->inode.nr_copies; SheepdogObjReq hdr; unsigned int wlen; int ret; uint64_t oid = aio_req->oid; unsigned int datalen = aio_req->data_len; uint64_t offset = aio_req->offset; uint8_t flags = aio_req->flags; uint64_t old_oid = aio_req->base_oid; if (!nr_copies) { error_report(\"bug\"); } memset(&hdr, 0, sizeof(hdr)); if (aiocb_type == AIOCB_READ_UDATA) { wlen = 0; hdr.opcode = SD_OP_READ_OBJ; hdr.flags = flags; } else if (create) { wlen = datalen; hdr.opcode = SD_OP_CREATE_AND_WRITE_OBJ; hdr.flags = SD_FLAG_CMD_WRITE | flags; } else { wlen = datalen; hdr.opcode = SD_OP_WRITE_OBJ; hdr.flags = SD_FLAG_CMD_WRITE | flags; } if (s->cache_flags) { hdr.flags |= s->cache_flags; } hdr.oid = oid; hdr.cow_oid = old_oid; hdr.copies = s->inode.nr_copies; hdr.data_length = datalen; hdr.offset = offset; hdr.id = aio_req->id; qemu_co_mutex_lock(&s->lock); s->co_send = qemu_coroutine_self(); qemu_aio_set_fd_handler(s->fd, co_read_response, co_write_request, aio_flush_request, s); socket_set_cork(s->fd, 1); /* send a header */ ret = qemu_co_send(s->fd, &hdr, sizeof(hdr)); if (ret < 0) { qemu_co_mutex_unlock(&s->lock); error_report(\"failed to send a req, %s\", strerror(errno)); return -errno; } if (wlen) { ret = qemu_co_sendv(s->fd, iov, niov, aio_req->iov_offset, wlen); if (ret < 0) { qemu_co_mutex_unlock(&s->lock); error_report(\"failed to send a data, %s\", strerror(errno)); return -errno; } } socket_set_cork(s->fd, 0); qemu_aio_set_fd_handler(s->fd, co_read_response, NULL, aio_flush_request, s); qemu_co_mutex_unlock(&s->lock); return 0; }"} {"target": 1, "idx": 9842, "func": "static int img_write_packet(AVFormatContext *s, int stream_index, UINT8 *buf, int size) { VideoData *img = s->priv_data; AVStream *st = s->streams[stream_index]; ByteIOContext pb1, *pb; AVPicture picture; int width, height, ret, size1; char filename[1024]; width = st->codec.width; height = st->codec.height; switch(st->codec.pix_fmt) { case PIX_FMT_YUV420P: size1 = (width * height * 3) / 2; if (size != size1) return -EIO; picture.data[0] = buf; picture.data[1] = picture.data[0] + width * height; picture.data[2] = picture.data[1] + (width * height) / 4; picture.linesize[0] = width; picture.linesize[1] = width >> 1; picture.linesize[2] = width >> 1; break; case PIX_FMT_RGB24: size1 = (width * height * 3); if (size != size1) return -EIO; picture.data[0] = buf; picture.linesize[0] = width * 3; break; default: return -EIO; } if (get_frame_filename(filename, sizeof(filename), img->path, img->img_number) < 0) return -EIO; if (!img->is_pipe) { pb = &pb1; if (url_fopen(pb, filename, URL_WRONLY) < 0) return -EIO; } else { pb = &s->pb; } switch(img->img_fmt) { case IMGFMT_PGMYUV: ret = pgm_save(&picture, width, height, pb, 1); break; case IMGFMT_PGM: ret = pgm_save(&picture, width, height, pb, 0); break; case IMGFMT_YUV: ret = yuv_save(&picture, width, height, filename); break; case IMGFMT_PPM: ret = ppm_save(&picture, width, height, pb); break; } if (!img->is_pipe) { url_fclose(pb); } img->img_number++; return 0; }"} {"target": 1, "idx": 9845, "func": "int pci_drive_hot_add(Monitor *mon, const QDict *qdict, DriveInfo *dinfo) { /* On non-x86 we don't do PCI hotplug */ monitor_printf(mon, \"Can't hot-add drive to type %d\\n\", dinfo->type); return -1; }"} {"target": 1, "idx": 9846, "func": "static inline int yv12toyuy2_unscaled_altivec(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY, int srcSliceH, uint8_t* dstParam[], int dstStride_a[]) { uint8_t *dst=dstParam[0] + dstStride_a[0]*srcSliceY; // yv12toyuy2( src[0],src[1],src[2],dst,c->srcW,srcSliceH,srcStride[0],srcStride[1],dstStride[0] ); uint8_t *ysrc = src[0]; uint8_t *usrc = src[1]; uint8_t *vsrc = src[2]; const int width = c->srcW; const int height = srcSliceH; const int lumStride = srcStride[0]; const int chromStride = srcStride[1]; const int dstStride = dstStride_a[0]; const vector unsigned char yperm = vec_lvsl(0, ysrc); const int vertLumPerChroma = 2; register unsigned int y; if(width&15){ yv12toyuy2( ysrc, usrc, vsrc, dst,c->srcW,srcSliceH, lumStride, chromStride, dstStride); return srcSliceH; } /* this code assume: 1) dst is 16 bytes-aligned 2) dstStride is a multiple of 16 3) width is a multiple of 16 4) lum&chrom stride are multiple of 8 */ for(y=0; y> 1; vector unsigned char v_yA = vec_ld(i, ysrc); vector unsigned char v_yB = vec_ld(i + 16, ysrc); vector unsigned char v_yC = vec_ld(i + 32, ysrc); vector unsigned char v_y1 = vec_perm(v_yA, v_yB, yperm); vector unsigned char v_y2 = vec_perm(v_yB, v_yC, yperm); vector unsigned char v_uA = vec_ld(j, usrc); vector unsigned char v_uB = vec_ld(j + 16, usrc); vector unsigned char v_u = vec_perm(v_uA, v_uB, vec_lvsl(j, usrc)); vector unsigned char v_vA = vec_ld(j, vsrc); vector unsigned char v_vB = vec_ld(j + 16, vsrc); vector unsigned char v_v = vec_perm(v_vA, v_vB, vec_lvsl(j, vsrc)); vector unsigned char v_uv_a = vec_mergeh(v_u, v_v); vector unsigned char v_uv_b = vec_mergel(v_u, v_v); vector unsigned char v_yuy2_0 = vec_mergeh(v_y1, v_uv_a); vector unsigned char v_yuy2_1 = vec_mergel(v_y1, v_uv_a); vector unsigned char v_yuy2_2 = vec_mergeh(v_y2, v_uv_b); vector unsigned char v_yuy2_3 = vec_mergel(v_y2, v_uv_b); vec_st(v_yuy2_0, (i << 1), dst); vec_st(v_yuy2_1, (i << 1) + 16, dst); vec_st(v_yuy2_2, (i << 1) + 32, dst); vec_st(v_yuy2_3, (i << 1) + 48, dst); } if (i < width) { const unsigned int j = i >> 1; vector unsigned char v_y1 = vec_ld(i, ysrc); vector unsigned char v_u = vec_ld(j, usrc); vector unsigned char v_v = vec_ld(j, vsrc); vector unsigned char v_uv_a = vec_mergeh(v_u, v_v); vector unsigned char v_yuy2_0 = vec_mergeh(v_y1, v_uv_a); vector unsigned char v_yuy2_1 = vec_mergel(v_y1, v_uv_a); vec_st(v_yuy2_0, (i << 1), dst); vec_st(v_yuy2_1, (i << 1) + 16, dst); } if((y&(vertLumPerChroma-1))==(vertLumPerChroma-1) ) { usrc += chromStride; vsrc += chromStride; } ysrc += lumStride; dst += dstStride; } return srcSliceH; }"} {"target": 0, "idx": 9864, "func": "void cpu_loop(CPUAlphaState *env) { CPUState *cs = CPU(alpha_env_get_cpu(env)); int trapnr; target_siginfo_t info; abi_long sysret; while (1) { cpu_exec_start(cs); trapnr = cpu_alpha_exec(cs); cpu_exec_end(cs); /* All of the traps imply a transition through PALcode, which implies an REI instruction has been executed. Which means that the intr_flag should be cleared. */ env->intr_flag = 0; switch (trapnr) { case EXCP_RESET: fprintf(stderr, \"Reset requested. Exit\\n\"); exit(EXIT_FAILURE); break; case EXCP_MCHK: fprintf(stderr, \"Machine check exception. Exit\\n\"); exit(EXIT_FAILURE); break; case EXCP_SMP_INTERRUPT: case EXCP_CLK_INTERRUPT: case EXCP_DEV_INTERRUPT: fprintf(stderr, \"External interrupt. Exit\\n\"); exit(EXIT_FAILURE); break; case EXCP_MMFAULT: env->lock_addr = -1; info.si_signo = TARGET_SIGSEGV; info.si_errno = 0; info.si_code = (page_get_flags(env->trap_arg0) & PAGE_VALID ? TARGET_SEGV_ACCERR : TARGET_SEGV_MAPERR); info._sifields._sigfault._addr = env->trap_arg0; queue_signal(env, info.si_signo, &info); break; case EXCP_UNALIGN: env->lock_addr = -1; info.si_signo = TARGET_SIGBUS; info.si_errno = 0; info.si_code = TARGET_BUS_ADRALN; info._sifields._sigfault._addr = env->trap_arg0; queue_signal(env, info.si_signo, &info); break; case EXCP_OPCDEC: do_sigill: env->lock_addr = -1; info.si_signo = TARGET_SIGILL; info.si_errno = 0; info.si_code = TARGET_ILL_ILLOPC; info._sifields._sigfault._addr = env->pc; queue_signal(env, info.si_signo, &info); break; case EXCP_ARITH: env->lock_addr = -1; info.si_signo = TARGET_SIGFPE; info.si_errno = 0; info.si_code = TARGET_FPE_FLTINV; info._sifields._sigfault._addr = env->pc; queue_signal(env, info.si_signo, &info); break; case EXCP_FEN: /* No-op. Linux simply re-enables the FPU. */ break; case EXCP_CALL_PAL: env->lock_addr = -1; switch (env->error_code) { case 0x80: /* BPT */ info.si_signo = TARGET_SIGTRAP; info.si_errno = 0; info.si_code = TARGET_TRAP_BRKPT; info._sifields._sigfault._addr = env->pc; queue_signal(env, info.si_signo, &info); break; case 0x81: /* BUGCHK */ info.si_signo = TARGET_SIGTRAP; info.si_errno = 0; info.si_code = 0; info._sifields._sigfault._addr = env->pc; queue_signal(env, info.si_signo, &info); break; case 0x83: /* CALLSYS */ trapnr = env->ir[IR_V0]; sysret = do_syscall(env, trapnr, env->ir[IR_A0], env->ir[IR_A1], env->ir[IR_A2], env->ir[IR_A3], env->ir[IR_A4], env->ir[IR_A5], 0, 0); if (trapnr == TARGET_NR_sigreturn || trapnr == TARGET_NR_rt_sigreturn) { break; } /* Syscall writes 0 to V0 to bypass error check, similar to how this is handled internal to Linux kernel. (Ab)use trapnr temporarily as boolean indicating error. */ trapnr = (env->ir[IR_V0] != 0 && sysret < 0); env->ir[IR_V0] = (trapnr ? -sysret : sysret); env->ir[IR_A3] = trapnr; break; case 0x86: /* IMB */ /* ??? We can probably elide the code using page_unprotect that is checking for self-modifying code. Instead we could simply call tb_flush here. Until we work out the changes required to turn off the extra write protection, this can be a no-op. */ break; case 0x9E: /* RDUNIQUE */ /* Handled in the translator for usermode. */ abort(); case 0x9F: /* WRUNIQUE */ /* Handled in the translator for usermode. */ abort(); case 0xAA: /* GENTRAP */ info.si_signo = TARGET_SIGFPE; switch (env->ir[IR_A0]) { case TARGET_GEN_INTOVF: info.si_code = TARGET_FPE_INTOVF; break; case TARGET_GEN_INTDIV: info.si_code = TARGET_FPE_INTDIV; break; case TARGET_GEN_FLTOVF: info.si_code = TARGET_FPE_FLTOVF; break; case TARGET_GEN_FLTUND: info.si_code = TARGET_FPE_FLTUND; break; case TARGET_GEN_FLTINV: info.si_code = TARGET_FPE_FLTINV; break; case TARGET_GEN_FLTINE: info.si_code = TARGET_FPE_FLTRES; break; case TARGET_GEN_ROPRAND: info.si_code = 0; break; default: info.si_signo = TARGET_SIGTRAP; info.si_code = 0; break; } info.si_errno = 0; info._sifields._sigfault._addr = env->pc; queue_signal(env, info.si_signo, &info); break; default: goto do_sigill; } break; case EXCP_DEBUG: info.si_signo = gdb_handlesig(cs, TARGET_SIGTRAP); if (info.si_signo) { env->lock_addr = -1; info.si_errno = 0; info.si_code = TARGET_TRAP_BRKPT; queue_signal(env, info.si_signo, &info); } break; case EXCP_STL_C: case EXCP_STQ_C: do_store_exclusive(env, env->error_code, trapnr - EXCP_STL_C); break; case EXCP_INTERRUPT: /* Just indicate that signals should be handled asap. */ break; default: printf (\"Unhandled trap: 0x%x\\n\", trapnr); cpu_dump_state(cs, stderr, fprintf, 0); exit(EXIT_FAILURE); } process_pending_signals (env); } }"} {"target": 0, "idx": 9870, "func": "static S390PCIBusDevice *s390_pci_find_dev_by_target(const char *target) { int i; S390PCIBusDevice *pbdev; S390pciState *s = s390_get_phb(); if (!target) { return NULL; } for (i = 0; i < PCI_SLOT_MAX; i++) { pbdev = s->pbdev[i]; if (!pbdev) { continue; } if (!strcmp(pbdev->target, target)) { return pbdev; } } return NULL; }"} {"target": 0, "idx": 9896, "func": "static void cin_decode_lzss(const unsigned char *src, int src_size, unsigned char *dst, int dst_size) { uint16_t cmd; int i, sz, offset, code; unsigned char *dst_end = dst + dst_size; const unsigned char *src_end = src + src_size; while (src < src_end && dst < dst_end) { code = *src++; for (i = 0; i < 8 && src < src_end && dst < dst_end; ++i) { if (code & (1 << i)) { *dst++ = *src++; } else { cmd = AV_RL16(src); src += 2; offset = cmd >> 4; sz = (cmd & 0xF) + 2; /* don't use memcpy/memmove here as the decoding routine (ab)uses */ /* buffer overlappings to repeat bytes in the destination */ sz = FFMIN(sz, dst_end - dst); while (sz--) { *dst = *(dst - offset - 1); ++dst; } } } } }"} {"target": 1, "idx": 9913, "func": "ff_rdt_parse_open(AVFormatContext *ic, int first_stream_of_set_idx, void *priv_data, RTPDynamicProtocolHandler *handler) { RDTDemuxContext *s = av_mallocz(sizeof(RDTDemuxContext)); if (!s) return NULL; s->ic = ic; s->streams = &ic->streams[first_stream_of_set_idx]; do { s->n_streams++; } while (first_stream_of_set_idx + s->n_streams < ic->nb_streams && s->streams[s->n_streams]->priv_data == s->streams[0]->priv_data); s->prev_set_id = -1; s->prev_stream_id = -1; s->prev_timestamp = -1; s->parse_packet = handler->parse_packet; s->dynamic_protocol_context = priv_data; return s; }"} {"target": 1, "idx": 9922, "func": "static int qcow2_co_writev(BlockDriverState *bs, int64_t sector_num, int remaining_sectors, QEMUIOVector *qiov) { BDRVQcowState *s = bs->opaque; int index_in_cluster; int n_end; int ret; int cur_nr_sectors; /* number of sectors in current iteration */ QCowL2Meta l2meta; uint64_t cluster_offset; QEMUIOVector hd_qiov; uint64_t bytes_done = 0; uint8_t *cluster_data = NULL; l2meta.nb_clusters = 0; qemu_co_queue_init(&l2meta.dependent_requests); qemu_iovec_init(&hd_qiov, qiov->niov); s->cluster_cache_offset = -1; /* disable compressed cache */ qemu_co_mutex_lock(&s->lock); while (remaining_sectors != 0) { index_in_cluster = sector_num & (s->cluster_sectors - 1); n_end = index_in_cluster + remaining_sectors; if (s->crypt_method && n_end > QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors) { n_end = QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors; } ret = qcow2_alloc_cluster_offset(bs, sector_num << 9, index_in_cluster, n_end, &cur_nr_sectors, &l2meta); if (ret < 0) { goto fail; } cluster_offset = l2meta.cluster_offset; assert((cluster_offset & 511) == 0); qemu_iovec_reset(&hd_qiov); qemu_iovec_copy(&hd_qiov, qiov, bytes_done, cur_nr_sectors * 512); if (s->crypt_method) { if (!cluster_data) { cluster_data = g_malloc0(QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size); } assert(hd_qiov.size <= QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size); qemu_iovec_to_buffer(&hd_qiov, cluster_data); qcow2_encrypt_sectors(s, sector_num, cluster_data, cluster_data, cur_nr_sectors, 1, &s->aes_encrypt_key); qemu_iovec_reset(&hd_qiov); qemu_iovec_add(&hd_qiov, cluster_data, cur_nr_sectors * 512); } BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO); qemu_co_mutex_unlock(&s->lock); ret = bdrv_co_writev(bs->file, (cluster_offset >> 9) + index_in_cluster, cur_nr_sectors, &hd_qiov); qemu_co_mutex_lock(&s->lock); if (ret < 0) { goto fail; } ret = qcow2_alloc_cluster_link_l2(bs, &l2meta); run_dependent_requests(s, &l2meta); if (ret < 0) { goto fail; } remaining_sectors -= cur_nr_sectors; sector_num += cur_nr_sectors; bytes_done += cur_nr_sectors * 512; } ret = 0; fail: qemu_co_mutex_unlock(&s->lock); qemu_iovec_destroy(&hd_qiov); return ret; }"} {"target": 1, "idx": 9929, "func": "int vhost_dev_init(struct vhost_dev *hdev, void *opaque, VhostBackendType backend_type, bool force) { uint64_t features; int i, r; if (vhost_set_backend_type(hdev, backend_type) < 0) { close((uintptr_t)opaque); return -1; } if (hdev->vhost_ops->vhost_backend_init(hdev, opaque) < 0) { close((uintptr_t)opaque); return -errno; } r = hdev->vhost_ops->vhost_call(hdev, VHOST_SET_OWNER, NULL); if (r < 0) { goto fail; } r = hdev->vhost_ops->vhost_call(hdev, VHOST_GET_FEATURES, &features); if (r < 0) { goto fail; } for (i = 0; i < hdev->nvqs; ++i) { r = vhost_virtqueue_init(hdev, hdev->vqs + i, i); if (r < 0) { goto fail_vq; } } hdev->features = features; hdev->memory_listener = (MemoryListener) { .begin = vhost_begin, .commit = vhost_commit, .region_add = vhost_region_add, .region_del = vhost_region_del, .region_nop = vhost_region_nop, .log_start = vhost_log_start, .log_stop = vhost_log_stop, .log_sync = vhost_log_sync, .log_global_start = vhost_log_global_start, .log_global_stop = vhost_log_global_stop, .eventfd_add = vhost_eventfd_add, .eventfd_del = vhost_eventfd_del, .priority = 10 }; hdev->migration_blocker = NULL; if (!(hdev->features & (0x1ULL << VHOST_F_LOG_ALL))) { error_setg(&hdev->migration_blocker, \"Migration disabled: vhost lacks VHOST_F_LOG_ALL feature.\"); migrate_add_blocker(hdev->migration_blocker); } hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions)); hdev->n_mem_sections = 0; hdev->mem_sections = NULL; hdev->log = NULL; hdev->log_size = 0; hdev->log_enabled = false; hdev->started = false; hdev->memory_changed = false; memory_listener_register(&hdev->memory_listener, &address_space_memory); hdev->force = force; return 0; fail_vq: while (--i >= 0) { vhost_virtqueue_cleanup(hdev->vqs + i); } fail: r = -errno; hdev->vhost_ops->vhost_backend_cleanup(hdev); return r; }"} {"target": 1, "idx": 9934, "func": "int ff_set_systematic_pal2(uint32_t pal[256], enum AVPixelFormat pix_fmt) { int i; for (i = 0; i < 256; i++) { int r, g, b; switch (pix_fmt) { case AV_PIX_FMT_RGB8: r = (i>>5 )*36; g = ((i>>2)&7)*36; b = (i&3 )*85; break; case AV_PIX_FMT_BGR8: b = (i>>6 )*85; g = ((i>>3)&7)*36; r = (i&7 )*36; break; case AV_PIX_FMT_RGB4_BYTE: r = (i>>3 )*255; g = ((i>>1)&3)*85; b = (i&1 )*255; break; case AV_PIX_FMT_BGR4_BYTE: b = (i>>3 )*255; g = ((i>>1)&3)*85; r = (i&1 )*255; break; case AV_PIX_FMT_GRAY8: r = b = g = i; break; default: return AVERROR(EINVAL); } pal[i] = b + (g<<8) + (r<<16) + (0xFF<<24); } return 0; }"} {"target": 0, "idx": 9943, "func": "void qmp_transaction(TransactionActionList *dev_list, Error **errp) { TransactionActionList *dev_entry = dev_list; BlkTransactionState *state, *next; Error *local_err = NULL; QSIMPLEQ_HEAD(snap_bdrv_states, BlkTransactionState) snap_bdrv_states; QSIMPLEQ_INIT(&snap_bdrv_states); /* drain all i/o before any operations */ bdrv_drain_all(); /* We don't do anything in this loop that commits us to the operations */ while (NULL != dev_entry) { TransactionAction *dev_info = NULL; const BdrvActionOps *ops; dev_info = dev_entry->value; dev_entry = dev_entry->next; assert(dev_info->kind < ARRAY_SIZE(actions)); ops = &actions[dev_info->kind]; assert(ops->instance_size > 0); state = g_malloc0(ops->instance_size); state->ops = ops; state->action = dev_info; QSIMPLEQ_INSERT_TAIL(&snap_bdrv_states, state, entry); state->ops->prepare(state, &local_err); if (local_err) { error_propagate(errp, local_err); goto delete_and_fail; } } QSIMPLEQ_FOREACH(state, &snap_bdrv_states, entry) { if (state->ops->commit) { state->ops->commit(state); } } /* success */ goto exit; delete_and_fail: /* failure, and it is all-or-none; roll back all operations */ QSIMPLEQ_FOREACH(state, &snap_bdrv_states, entry) { if (state->ops->abort) { state->ops->abort(state); } } exit: QSIMPLEQ_FOREACH_SAFE(state, &snap_bdrv_states, entry, next) { if (state->ops->clean) { state->ops->clean(state); } g_free(state); } }"} {"target": 0, "idx": 9956, "func": "static int intel_hda_init(PCIDevice *pci) { IntelHDAState *d = DO_UPCAST(IntelHDAState, pci, pci); uint8_t *conf = d->pci.config; d->name = d->pci.qdev.info->name; pci_config_set_vendor_id(conf, PCI_VENDOR_ID_INTEL); pci_config_set_device_id(conf, 0x2668); pci_config_set_revision(conf, 1); pci_config_set_class(conf, PCI_CLASS_MULTIMEDIA_HD_AUDIO); pci_config_set_interrupt_pin(conf, 1); /* HDCTL off 0x40 bit 0 selects signaling mode (1-HDA, 0 - Ac97) 18.1.19 */ conf[0x40] = 0x01; d->mmio_addr = cpu_register_io_memory(intel_hda_mmio_read, intel_hda_mmio_write, d, DEVICE_NATIVE_ENDIAN); pci_register_bar_simple(&d->pci, 0, 0x4000, 0, d->mmio_addr); if (d->msi) { msi_init(&d->pci, 0x50, 1, true, false); } hda_codec_bus_init(&d->pci.qdev, &d->codecs, intel_hda_response, intel_hda_xfer); return 0; }"} {"target": 0, "idx": 9961, "func": "static BlockDriverAIOCB *raw_aio_writev(BlockDriverState *bs, int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, BlockDriverCompletionFunc *cb, void *opaque) { RawAIOCB *acb; acb = raw_aio_setup(bs, sector_num, qiov, nb_sectors, cb, opaque); if (!acb) return NULL; if (qemu_paio_write(&acb->aiocb) < 0) { raw_aio_remove(acb); return NULL; } return &acb->common; }"} {"target": 0, "idx": 9977, "func": "static inline void pred_direct_motion(H264Context * const h, int *mb_type){ MpegEncContext * const s = &h->s; const int mb_xy = s->mb_x + s->mb_y*s->mb_stride; const int b8_xy = 2*s->mb_x + 2*s->mb_y*h->b8_stride; const int b4_xy = 4*s->mb_x + 4*s->mb_y*h->b_stride; const int mb_type_col = h->ref_list[1][0].mb_type[mb_xy]; const int16_t (*l1mv0)[2] = (const int16_t (*)[2]) &h->ref_list[1][0].motion_val[0][b4_xy]; const int8_t *l1ref0 = &h->ref_list[1][0].ref_index[0][b8_xy]; const int is_b8x8 = IS_8X8(*mb_type); int sub_mb_type; int i8, i4; if(IS_8X8(mb_type_col) && !h->sps.direct_8x8_inference_flag){ /* FIXME save sub mb types from previous frames (or derive from MVs) * so we know exactly what block size to use */ sub_mb_type = MB_TYPE_8x8|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_SUB_4x4 */ *mb_type = MB_TYPE_8x8; }else if(!is_b8x8 && (IS_16X16(mb_type_col) || IS_INTRA(mb_type_col))){ sub_mb_type = MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_SUB_8x8 */ *mb_type = MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_16x16 */ }else{ sub_mb_type = MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_SUB_8x8 */ *mb_type = MB_TYPE_8x8; } if(!is_b8x8) *mb_type |= MB_TYPE_DIRECT2; tprintf(\"mb_type = %08x, sub_mb_type = %08x, is_b8x8 = %d, mb_type_col = %08x\\n\", *mb_type, sub_mb_type, is_b8x8, mb_type_col); if(h->direct_spatial_mv_pred){ int ref[2]; int mv[2][2]; int list; /* ref = min(neighbors) */ for(list=0; list<2; list++){ int refa = h->ref_cache[list][scan8[0] - 1]; int refb = h->ref_cache[list][scan8[0] - 8]; int refc = h->ref_cache[list][scan8[0] - 8 + 4]; if(refc == -2) refc = h->ref_cache[list][scan8[0] - 8 - 1]; ref[list] = refa; if(ref[list] < 0 || (refb < ref[list] && refb >= 0)) ref[list] = refb; if(ref[list] < 0 || (refc < ref[list] && refc >= 0)) ref[list] = refc; if(ref[list] < 0) ref[list] = -1; } if(ref[0] < 0 && ref[1] < 0){ ref[0] = ref[1] = 0; mv[0][0] = mv[0][1] = mv[1][0] = mv[1][1] = 0; }else{ for(list=0; list<2; list++){ if(ref[list] >= 0) pred_motion(h, 0, 4, list, ref[list], &mv[list][0], &mv[list][1]); else mv[list][0] = mv[list][1] = 0; } } if(ref[1] < 0){ *mb_type &= ~MB_TYPE_P0L1; sub_mb_type &= ~MB_TYPE_P0L1; }else if(ref[0] < 0){ *mb_type &= ~MB_TYPE_P0L0; sub_mb_type &= ~MB_TYPE_P0L0; } if(IS_16X16(*mb_type)){ fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, ref[0], 1); fill_rectangle(&h->ref_cache[1][scan8[0]], 4, 4, 8, ref[1], 1); if(!IS_INTRA(mb_type_col) && l1ref0[0] == 0 && ABS(l1mv0[0][0]) <= 1 && ABS(l1mv0[0][1]) <= 1){ if(ref[0] > 0) fill_rectangle(&h->mv_cache[0][scan8[0]], 4, 4, 8, pack16to32(mv[0][0],mv[0][1]), 4); else fill_rectangle(&h->mv_cache[0][scan8[0]], 4, 4, 8, 0, 4); if(ref[1] > 0) fill_rectangle(&h->mv_cache[1][scan8[0]], 4, 4, 8, pack16to32(mv[1][0],mv[1][1]), 4); else fill_rectangle(&h->mv_cache[1][scan8[0]], 4, 4, 8, 0, 4); }else{ fill_rectangle(&h->mv_cache[0][scan8[0]], 4, 4, 8, pack16to32(mv[0][0],mv[0][1]), 4); fill_rectangle(&h->mv_cache[1][scan8[0]], 4, 4, 8, pack16to32(mv[1][0],mv[1][1]), 4); } }else{ for(i8=0; i8<4; i8++){ const int x8 = i8&1; const int y8 = i8>>1; if(is_b8x8 && !IS_DIRECT(h->sub_mb_type[i8])) continue; h->sub_mb_type[i8] = sub_mb_type; fill_rectangle(&h->mv_cache[0][scan8[i8*4]], 2, 2, 8, pack16to32(mv[0][0],mv[0][1]), 4); fill_rectangle(&h->mv_cache[1][scan8[i8*4]], 2, 2, 8, pack16to32(mv[1][0],mv[1][1]), 4); fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, ref[0], 1); fill_rectangle(&h->ref_cache[1][scan8[i8*4]], 2, 2, 8, ref[1], 1); /* col_zero_flag */ if(!IS_INTRA(mb_type_col) && l1ref0[x8 + y8*h->b8_stride] == 0){ for(i4=0; i4<4; i4++){ const int16_t *mv_col = l1mv0[x8*2 + (i4&1) + (y8*2 + (i4>>1))*h->b_stride]; if(ABS(mv_col[0]) <= 1 && ABS(mv_col[1]) <= 1){ if(ref[0] == 0) *(uint32_t*)h->mv_cache[0][scan8[i8*4+i4]] = 0; if(ref[1] == 0) *(uint32_t*)h->mv_cache[1][scan8[i8*4+i4]] = 0; } } } } } }else{ /* direct temporal mv pred */ /* FIXME assumes that L1ref0 used the same ref lists as current frame */ if(IS_16X16(*mb_type)){ fill_rectangle(&h->ref_cache[1][scan8[0]], 4, 4, 8, 0, 1); if(IS_INTRA(mb_type_col)){ fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, 0, 1); fill_rectangle(&h-> mv_cache[0][scan8[0]], 4, 4, 8, 0, 4); fill_rectangle(&h-> mv_cache[1][scan8[0]], 4, 4, 8, 0, 4); }else{ const int ref0 = l1ref0[0]; const int dist_scale_factor = h->dist_scale_factor[ref0]; const int16_t *mv_col = l1mv0[0]; int mv_l0[2]; mv_l0[0] = (dist_scale_factor * mv_col[0] + 128) >> 8; mv_l0[1] = (dist_scale_factor * mv_col[1] + 128) >> 8; fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, ref0, 1); fill_rectangle(&h-> mv_cache[0][scan8[0]], 4, 4, 8, pack16to32(mv_l0[0],mv_l0[1]), 4); fill_rectangle(&h-> mv_cache[1][scan8[0]], 4, 4, 8, pack16to32(mv_l0[0]-mv_col[0],mv_l0[1]-mv_col[1]), 4); } }else{ for(i8=0; i8<4; i8++){ const int x8 = i8&1; const int y8 = i8>>1; int ref0, dist_scale_factor; if(is_b8x8 && !IS_DIRECT(h->sub_mb_type[i8])) continue; h->sub_mb_type[i8] = sub_mb_type; if(IS_INTRA(mb_type_col)){ fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, 0, 1); fill_rectangle(&h->ref_cache[1][scan8[i8*4]], 2, 2, 8, 0, 1); fill_rectangle(&h-> mv_cache[0][scan8[i8*4]], 2, 2, 8, 0, 4); fill_rectangle(&h-> mv_cache[1][scan8[i8*4]], 2, 2, 8, 0, 4); continue; } ref0 = l1ref0[x8 + y8*h->b8_stride]; dist_scale_factor = h->dist_scale_factor[ref0]; fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, ref0, 1); fill_rectangle(&h->ref_cache[1][scan8[i8*4]], 2, 2, 8, 0, 1); for(i4=0; i4<4; i4++){ const int16_t *mv_col = l1mv0[x8*2 + (i4&1) + (y8*2 + (i4>>1))*h->b_stride]; int16_t *mv_l0 = h->mv_cache[0][scan8[i8*4+i4]]; mv_l0[0] = (dist_scale_factor * mv_col[0] + 128) >> 8; mv_l0[1] = (dist_scale_factor * mv_col[1] + 128) >> 8; *(uint32_t*)h->mv_cache[1][scan8[i8*4+i4]] = pack16to32(mv_l0[0]-mv_col[0],mv_l0[1]-mv_col[1]); } } } } }"} {"target": 0, "idx": 10000, "func": "static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, TCGMemOp ot) { uint32_t ret; switch (ot) { case MO_8: ret = cpu_ldub_code(env, s->pc); s->pc++; break; case MO_16: ret = cpu_lduw_code(env, s->pc); s->pc += 2; break; case MO_32: #ifdef TARGET_X86_64 case MO_64: #endif ret = cpu_ldl_code(env, s->pc); s->pc += 4; break; default: tcg_abort(); } return ret; }"} {"target": 0, "idx": 10026, "func": "static void bdrv_throttle_write_timer_cb(void *opaque) { BlockDriverState *bs = opaque; qemu_co_enter_next(&bs->throttled_reqs[1]); }"} {"target": 0, "idx": 10049, "func": "static int find_pte32(CPUPPCState *env, mmu_ctx_t *ctx, int h, int rw, int type, int target_page_bits) { hwaddr pteg_off; target_ulong pte0, pte1; int i, good = -1; int ret, r; ret = -1; /* No entry found */ pteg_off = get_pteg_offset(env, ctx->hash[h], HASH_PTE_SIZE_32); for (i = 0; i < 8; i++) { if (env->external_htab) { pte0 = ldl_p(env->external_htab + pteg_off + (i * 8)); pte1 = ldl_p(env->external_htab + pteg_off + (i * 8) + 4); } else { pte0 = ldl_phys(env->htab_base + pteg_off + (i * 8)); pte1 = ldl_phys(env->htab_base + pteg_off + (i * 8) + 4); } r = pte_check_hash32(ctx, pte0, pte1, h, rw, type); LOG_MMU(\"Load pte from %08\" HWADDR_PRIx \" => \" TARGET_FMT_lx \" \" TARGET_FMT_lx \" %d %d %d \" TARGET_FMT_lx \"\\n\", pteg_off + (i * 8), pte0, pte1, (int)(pte0 >> 31), h, (int)((pte0 >> 6) & 1), ctx->ptem); switch (r) { case -3: /* PTE inconsistency */ return -1; case -2: /* Access violation */ ret = -2; good = i; break; case -1: default: /* No PTE match */ break; case 0: /* access granted */ /* XXX: we should go on looping to check all PTEs consistency * but if we can speed-up the whole thing as the * result would be undefined if PTEs are not consistent. */ ret = 0; good = i; goto done; } } if (good != -1) { done: LOG_MMU(\"found PTE at addr %08\" HWADDR_PRIx \" prot=%01x ret=%d\\n\", ctx->raddr, ctx->prot, ret); /* Update page flags */ pte1 = ctx->raddr; if (pte_update_flags(ctx, &pte1, ret, rw) == 1) { if (env->external_htab) { stl_p(env->external_htab + pteg_off + (good * 8) + 4, pte1); } else { stl_phys_notdirty(env->htab_base + pteg_off + (good * 8) + 4, pte1); } } } /* We have a TLB that saves 4K pages, so let's * split a huge page to 4k chunks */ if (target_page_bits != TARGET_PAGE_BITS) { ctx->raddr |= (ctx->eaddr & ((1 << target_page_bits) - 1)) & TARGET_PAGE_MASK; } return ret; }"} {"target": 0, "idx": 10055, "func": "static int kvm_put_xcrs(CPUState *env) { #ifdef KVM_CAP_XCRS struct kvm_xcrs xcrs; if (!kvm_has_xcrs()) return 0; xcrs.nr_xcrs = 1; xcrs.flags = 0; xcrs.xcrs[0].xcr = 0; xcrs.xcrs[0].value = env->xcr0; return kvm_vcpu_ioctl(env, KVM_SET_XCRS, &xcrs); #else return 0; #endif }"} {"target": 0, "idx": 10064, "func": "static ExitStatus gen_bcond_internal(DisasContext *ctx, TCGCond cond, TCGv cmp, int32_t disp) { uint64_t dest = ctx->pc + (disp << 2); int lab_true = gen_new_label(); if (use_goto_tb(ctx, dest)) { tcg_gen_brcondi_i64(cond, cmp, 0, lab_true); tcg_gen_goto_tb(0); tcg_gen_movi_i64(cpu_pc, ctx->pc); tcg_gen_exit_tb((uintptr_t)ctx->tb); gen_set_label(lab_true); tcg_gen_goto_tb(1); tcg_gen_movi_i64(cpu_pc, dest); tcg_gen_exit_tb((uintptr_t)ctx->tb + 1); return EXIT_GOTO_TB; } else { TCGv_i64 z = tcg_const_i64(0); TCGv_i64 d = tcg_const_i64(dest); TCGv_i64 p = tcg_const_i64(ctx->pc); tcg_gen_movcond_i64(cond, cpu_pc, cmp, z, d, p); tcg_temp_free_i64(z); tcg_temp_free_i64(d); tcg_temp_free_i64(p); return EXIT_PC_UPDATED; } }"} {"target": 0, "idx": 10081, "func": "static uint32_t gt64120_read_config(PCIDevice *d, uint32_t address, int len) { uint32_t val = pci_default_read_config(d, address, len); #ifdef TARGET_WORDS_BIGENDIAN val = bswap32(val); #endif return val; }"} {"target": 1, "idx": 10104, "func": "static void rtas_get_time_of_day(PowerPCCPU *cpu, sPAPREnvironment *spapr, uint32_t token, uint32_t nargs, target_ulong args, uint32_t nret, target_ulong rets) { struct tm tm; if (nret != 8) { rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); return; } qemu_get_timedate(&tm, spapr->rtc_offset); rtas_st(rets, 0, RTAS_OUT_SUCCESS); rtas_st(rets, 1, tm.tm_year + 1900); rtas_st(rets, 2, tm.tm_mon + 1); rtas_st(rets, 3, tm.tm_mday); rtas_st(rets, 4, tm.tm_hour); rtas_st(rets, 5, tm.tm_min); rtas_st(rets, 6, tm.tm_sec); rtas_st(rets, 7, 0); /* we don't do nanoseconds */ }"} {"target": 0, "idx": 10128, "func": "void qmp_block_job_set_speed(const char *device, int64_t speed, Error **errp) { BlockJob *job = find_block_job(device); if (!job) { error_set(errp, QERR_BLOCK_JOB_NOT_ACTIVE, device); return; } block_job_set_speed(job, speed, errp); }"} {"target": 0, "idx": 10161, "func": "static int create_stream(AVFormatContext *s) { XCBGrabContext *c = s->priv_data; AVStream *st = avformat_new_stream(s, NULL); xcb_get_geometry_cookie_t gc; xcb_get_geometry_reply_t *geo; int ret; if (!st) return AVERROR(ENOMEM); ret = av_parse_video_size(&c->width, &c->height, c->video_size); if (ret < 0) return ret; ret = av_parse_video_rate(&st->avg_frame_rate, c->framerate); if (ret < 0) return ret; avpriv_set_pts_info(st, 64, 1, 1000000); gc = xcb_get_geometry(c->conn, c->screen->root); geo = xcb_get_geometry_reply(c->conn, gc, NULL); c->width = FFMIN(geo->width, c->width); c->height = FFMIN(geo->height, c->height); c->time_base = (AVRational){ st->avg_frame_rate.den, st->avg_frame_rate.num }; c->time_frame = av_gettime(); st->codec->codec_type = AVMEDIA_TYPE_VIDEO; st->codec->codec_id = AV_CODEC_ID_RAWVIDEO; st->codec->width = c->width; st->codec->height = c->height; st->codec->time_base = c->time_base; ret = pixfmt_from_pixmap_format(s, geo->depth, &st->codec->pix_fmt); free(geo); return ret; }"} {"target": 0, "idx": 10175, "func": "static int vorbis_parse_setup_hdr_codebooks(vorbis_context *vc) { unsigned cb; uint8_t *tmp_vlc_bits; uint32_t *tmp_vlc_codes; GetBitContext *gb = &vc->gb; uint16_t *codebook_multiplicands; int ret = 0; vc->codebook_count = get_bits(gb, 8) + 1; av_dlog(NULL, \" Codebooks: %d \\n\", vc->codebook_count); vc->codebooks = av_mallocz(vc->codebook_count * sizeof(*vc->codebooks)); tmp_vlc_bits = av_mallocz(V_MAX_VLCS * sizeof(*tmp_vlc_bits)); tmp_vlc_codes = av_mallocz(V_MAX_VLCS * sizeof(*tmp_vlc_codes)); codebook_multiplicands = av_malloc(V_MAX_VLCS * sizeof(*codebook_multiplicands)); for (cb = 0; cb < vc->codebook_count; ++cb) { vorbis_codebook *codebook_setup = &vc->codebooks[cb]; unsigned ordered, t, entries, used_entries = 0; av_dlog(NULL, \" %u. Codebook\\n\", cb); if (get_bits(gb, 24) != 0x564342) { av_log(vc->avctx, AV_LOG_ERROR, \" %u. Codebook setup data corrupt.\\n\", cb); ret = AVERROR_INVALIDDATA; goto error; } codebook_setup->dimensions=get_bits(gb, 16); if (codebook_setup->dimensions > 16 || codebook_setup->dimensions == 0) { av_log(vc->avctx, AV_LOG_ERROR, \" %u. Codebook's dimension is invalid (%d).\\n\", cb, codebook_setup->dimensions); ret = AVERROR_INVALIDDATA; goto error; } entries = get_bits(gb, 24); if (entries > V_MAX_VLCS) { av_log(vc->avctx, AV_LOG_ERROR, \" %u. Codebook has too many entries (%u).\\n\", cb, entries); ret = AVERROR_INVALIDDATA; goto error; } ordered = get_bits1(gb); av_dlog(NULL, \" codebook_dimensions %d, codebook_entries %u\\n\", codebook_setup->dimensions, entries); if (!ordered) { unsigned ce, flag; unsigned sparse = get_bits1(gb); av_dlog(NULL, \" not ordered \\n\"); if (sparse) { av_dlog(NULL, \" sparse \\n\"); used_entries = 0; for (ce = 0; ce < entries; ++ce) { flag = get_bits1(gb); if (flag) { tmp_vlc_bits[ce] = get_bits(gb, 5) + 1; ++used_entries; } else tmp_vlc_bits[ce] = 0; } } else { av_dlog(NULL, \" not sparse \\n\"); used_entries = entries; for (ce = 0; ce < entries; ++ce) tmp_vlc_bits[ce] = get_bits(gb, 5) + 1; } } else { unsigned current_entry = 0; unsigned current_length = get_bits(gb, 5) + 1; av_dlog(NULL, \" ordered, current length: %u\\n\", current_length); //FIXME used_entries = entries; for (; current_entry < used_entries && current_length <= 32; ++current_length) { unsigned i, number; av_dlog(NULL, \" number bits: %u \", ilog(entries - current_entry)); number = get_bits(gb, ilog(entries - current_entry)); av_dlog(NULL, \" number: %u\\n\", number); for (i = current_entry; i < number+current_entry; ++i) if (i < used_entries) tmp_vlc_bits[i] = current_length; current_entry+=number; } if (current_entry>used_entries) { av_log(vc->avctx, AV_LOG_ERROR, \" More codelengths than codes in codebook. \\n\"); ret = AVERROR_INVALIDDATA; goto error; } } codebook_setup->lookup_type = get_bits(gb, 4); av_dlog(NULL, \" lookup type: %d : %s \\n\", codebook_setup->lookup_type, codebook_setup->lookup_type ? \"vq\" : \"no lookup\"); // If the codebook is used for (inverse) VQ, calculate codevectors. if (codebook_setup->lookup_type == 1) { unsigned i, j, k; unsigned codebook_lookup_values = ff_vorbis_nth_root(entries, codebook_setup->dimensions); float codebook_minimum_value = vorbisfloat2float(get_bits_long(gb, 32)); float codebook_delta_value = vorbisfloat2float(get_bits_long(gb, 32)); unsigned codebook_value_bits = get_bits(gb, 4) + 1; unsigned codebook_sequence_p = get_bits1(gb); av_dlog(NULL, \" We expect %d numbers for building the codevectors. \\n\", codebook_lookup_values); av_dlog(NULL, \" delta %f minmum %f \\n\", codebook_delta_value, codebook_minimum_value); for (i = 0; i < codebook_lookup_values; ++i) { codebook_multiplicands[i] = get_bits(gb, codebook_value_bits); av_dlog(NULL, \" multiplicands*delta+minmum : %e \\n\", (float)codebook_multiplicands[i] * codebook_delta_value + codebook_minimum_value); av_dlog(NULL, \" multiplicand %u\\n\", codebook_multiplicands[i]); } // Weed out unused vlcs and build codevector vector codebook_setup->codevectors = used_entries ? av_mallocz(used_entries * codebook_setup->dimensions * sizeof(*codebook_setup->codevectors)) : NULL; for (j = 0, i = 0; i < entries; ++i) { unsigned dim = codebook_setup->dimensions; if (tmp_vlc_bits[i]) { float last = 0.0; unsigned lookup_offset = i; av_dlog(vc->avctx, \"Lookup offset %u ,\", i); for (k = 0; k < dim; ++k) { unsigned multiplicand_offset = lookup_offset % codebook_lookup_values; codebook_setup->codevectors[j * dim + k] = codebook_multiplicands[multiplicand_offset] * codebook_delta_value + codebook_minimum_value + last; if (codebook_sequence_p) last = codebook_setup->codevectors[j * dim + k]; lookup_offset/=codebook_lookup_values; } tmp_vlc_bits[j] = tmp_vlc_bits[i]; av_dlog(vc->avctx, \"real lookup offset %u, vector: \", j); for (k = 0; k < dim; ++k) av_dlog(vc->avctx, \" %f \", codebook_setup->codevectors[j * dim + k]); av_dlog(vc->avctx, \"\\n\"); ++j; } } if (j != used_entries) { av_log(vc->avctx, AV_LOG_ERROR, \"Bug in codevector vector building code. \\n\"); ret = AVERROR_INVALIDDATA; goto error; } entries = used_entries; } else if (codebook_setup->lookup_type >= 2) { av_log(vc->avctx, AV_LOG_ERROR, \"Codebook lookup type not supported. \\n\"); ret = AVERROR_INVALIDDATA; goto error; } // Initialize VLC table if (ff_vorbis_len2vlc(tmp_vlc_bits, tmp_vlc_codes, entries)) { av_log(vc->avctx, AV_LOG_ERROR, \" Invalid code lengths while generating vlcs. \\n\"); ret = AVERROR_INVALIDDATA; goto error; } codebook_setup->maxdepth = 0; for (t = 0; t < entries; ++t) if (tmp_vlc_bits[t] >= codebook_setup->maxdepth) codebook_setup->maxdepth = tmp_vlc_bits[t]; if (codebook_setup->maxdepth > 3 * V_NB_BITS) codebook_setup->nb_bits = V_NB_BITS2; else codebook_setup->nb_bits = V_NB_BITS; codebook_setup->maxdepth = (codebook_setup->maxdepth+codebook_setup->nb_bits - 1) / codebook_setup->nb_bits; if ((ret = init_vlc(&codebook_setup->vlc, codebook_setup->nb_bits, entries, tmp_vlc_bits, sizeof(*tmp_vlc_bits), sizeof(*tmp_vlc_bits), tmp_vlc_codes, sizeof(*tmp_vlc_codes), sizeof(*tmp_vlc_codes), INIT_VLC_LE))) { av_log(vc->avctx, AV_LOG_ERROR, \" Error generating vlc tables. \\n\"); goto error; } } av_free(tmp_vlc_bits); av_free(tmp_vlc_codes); av_free(codebook_multiplicands); return 0; // Error: error: av_free(tmp_vlc_bits); av_free(tmp_vlc_codes); av_free(codebook_multiplicands); return ret; }"} {"target": 1, "idx": 10180, "func": "static int htab_save_later_pass(QEMUFile *f, sPAPREnvironment *spapr, int64_t max_ns) { bool final = max_ns < 0; int htabslots = HTAB_SIZE(spapr) / HASH_PTE_SIZE_64; int examined = 0, sent = 0; int index = spapr->htab_save_index; int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); assert(!spapr->htab_first_pass); do { int chunkstart, invalidstart; /* Consume non-dirty HPTEs */ while ((index < htabslots) && !HPTE_DIRTY(HPTE(spapr->htab, index))) { index++; examined++; } chunkstart = index; /* Consume valid dirty HPTEs */ while ((index < htabslots) && HPTE_DIRTY(HPTE(spapr->htab, index)) && HPTE_VALID(HPTE(spapr->htab, index))) { CLEAN_HPTE(HPTE(spapr->htab, index)); index++; examined++; } invalidstart = index; /* Consume invalid dirty HPTEs */ while ((index < htabslots) && HPTE_DIRTY(HPTE(spapr->htab, index)) && !HPTE_VALID(HPTE(spapr->htab, index))) { CLEAN_HPTE(HPTE(spapr->htab, index)); index++; examined++; } if (index > chunkstart) { int n_valid = invalidstart - chunkstart; int n_invalid = index - invalidstart; qemu_put_be32(f, chunkstart); qemu_put_be16(f, n_valid); qemu_put_be16(f, n_invalid); qemu_put_buffer(f, HPTE(spapr->htab, chunkstart), HASH_PTE_SIZE_64 * n_valid); sent += index - chunkstart; if (!final && (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) > max_ns) { break; } } if (examined >= htabslots) { break; } if (index >= htabslots) { assert(index == htabslots); index = 0; } } while ((examined < htabslots) && (!qemu_file_rate_limit(f) || final)); if (index >= htabslots) { assert(index == htabslots); index = 0; } spapr->htab_save_index = index; return (examined >= htabslots) && (sent == 0) ? 1 : 0; }"} {"target": 0, "idx": 10190, "func": "static abi_long do_socket(int domain, int type, int protocol) { int target_type = type; int ret; ret = target_to_host_sock_type(&type); if (ret) { return ret; } if (domain == PF_NETLINK) return -TARGET_EAFNOSUPPORT; if (domain == AF_PACKET || (domain == AF_INET && type == SOCK_PACKET)) { protocol = tswap16(protocol); } ret = get_errno(socket(domain, type, protocol)); if (ret >= 0) { ret = sock_flags_fixup(ret, target_type); if (type == SOCK_PACKET) { /* Manage an obsolete case : * if socket type is SOCK_PACKET, bind by name */ fd_trans_register(ret, &target_packet_trans); } } return ret; }"} {"target": 0, "idx": 10192, "func": "int kvmppc_get_hypercall(CPUPPCState *env, uint8_t *buf, int buf_len) { PowerPCCPU *cpu = ppc_env_get_cpu(env); CPUState *cs = CPU(cpu); uint32_t *hc = (uint32_t*)buf; struct kvm_ppc_pvinfo pvinfo; if (kvm_check_extension(cs->kvm_state, KVM_CAP_PPC_GET_PVINFO) && !kvm_vm_ioctl(cs->kvm_state, KVM_PPC_GET_PVINFO, &pvinfo)) { memcpy(buf, pvinfo.hcall, buf_len); return 0; } /* * Fallback to always fail hypercalls: * * li r3, -1 * nop * nop * nop */ hc[0] = 0x3860ffff; hc[1] = 0x60000000; hc[2] = 0x60000000; hc[3] = 0x60000000; return 0; }"} {"target": 0, "idx": 10194, "func": "void process_incoming_migration(QEMUFile *f) { if (qemu_loadvm_state(f) < 0) { fprintf(stderr, \"load of migration failed\\n\"); exit(0); } qemu_announce_self(); DPRINTF(\"successfully loaded vm state\\n\"); incoming_expected = false; if (autostart) { vm_start(); } else { runstate_set(RSTATE_PRE_LAUNCH); } }"} {"target": 0, "idx": 10213, "func": "static int rv40_decode_mb_info(RV34DecContext *r) { MpegEncContext *s = &r->s; GetBitContext *gb = &s->gb; int q, i; int prev_type = 0; int mb_pos = s->mb_x + s->mb_y * s->mb_stride; int blocks[RV34_MB_TYPES] = {0}; int count = 0; if(!r->s.mb_skip_run) r->s.mb_skip_run = svq3_get_ue_golomb(gb) + 1; if(--r->s.mb_skip_run) return RV34_MB_SKIP; if(r->avail_cache[6-1]) blocks[r->mb_type[mb_pos - 1]]++; if(r->avail_cache[6-4]){ blocks[r->mb_type[mb_pos - s->mb_stride]]++; if(r->avail_cache[6-2]) blocks[r->mb_type[mb_pos - s->mb_stride + 1]]++; if(r->avail_cache[6-5]) blocks[r->mb_type[mb_pos - s->mb_stride - 1]]++; } for(i = 0; i < RV34_MB_TYPES; i++){ if(blocks[i] > count){ count = blocks[i]; prev_type = i; } } if(s->pict_type == AV_PICTURE_TYPE_P){ prev_type = block_num_to_ptype_vlc_num[prev_type]; q = get_vlc2(gb, ptype_vlc[prev_type].table, PTYPE_VLC_BITS, 1); if(q < PBTYPE_ESCAPE) return q; q = get_vlc2(gb, ptype_vlc[prev_type].table, PTYPE_VLC_BITS, 1); av_log(s->avctx, AV_LOG_ERROR, \"Dquant for P-frame\\n\"); }else{ prev_type = block_num_to_btype_vlc_num[prev_type]; q = get_vlc2(gb, btype_vlc[prev_type].table, BTYPE_VLC_BITS, 1); if(q < PBTYPE_ESCAPE) return q; q = get_vlc2(gb, btype_vlc[prev_type].table, BTYPE_VLC_BITS, 1); av_log(s->avctx, AV_LOG_ERROR, \"Dquant for B-frame\\n\"); } return 0; }"} {"target": 0, "idx": 10233, "func": "static inline void RENAME(yvu9_to_yuy2)(const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, uint8_t *dst, long width, long height, long srcStride1, long srcStride2, long srcStride3, long dstStride) { x86_reg x; long y,w,h; w=width/2; h=height; for (y=0;y>2); const uint8_t* vp=src3+srcStride3*(y>>2); uint8_t* d=dst+dstStride*y; x=0; #if COMPILE_TEMPLATE_MMX for (;xdata; int buf_size = avpkt->size; PCMDecode *s = avctx->priv_data; int sample_size, c, n, i; uint8_t *samples; const uint8_t *src, *src8, *src2[MAX_CHANNELS]; int32_t *dst_int32_t; samples = data; src = buf; if (avctx->sample_fmt!=avctx->codec->sample_fmts[0]) { av_log(avctx, AV_LOG_ERROR, \"invalid sample_fmt\\n\"); return -1; } if(avctx->channels <= 0 || avctx->channels > MAX_CHANNELS){ av_log(avctx, AV_LOG_ERROR, \"PCM channels out of bounds\\n\"); return -1; } sample_size = av_get_bits_per_sample(avctx->codec_id)/8; /* av_get_bits_per_sample returns 0 for CODEC_ID_PCM_DVD */ if (CODEC_ID_PCM_DVD == avctx->codec_id) /* 2 samples are interleaved per block in PCM_DVD */ sample_size = avctx->bits_per_coded_sample * 2 / 8; else if (avctx->codec_id == CODEC_ID_PCM_LXF) /* we process 40-bit blocks per channel for LXF */ sample_size = 5; if (sample_size == 0) { av_log(avctx, AV_LOG_ERROR, \"Invalid sample_size\\n\"); return AVERROR(EINVAL); } n = avctx->channels * sample_size; if(n && buf_size % n){ if (buf_size < n) { av_log(avctx, AV_LOG_ERROR, \"invalid PCM packet\\n\"); return -1; }else buf_size -= buf_size % n; } buf_size= FFMIN(buf_size, *data_size/2); n = buf_size/sample_size; switch(avctx->codec->id) { case CODEC_ID_PCM_U32LE: DECODE(32, le32, src, samples, n, 0, 0x80000000) break; case CODEC_ID_PCM_U32BE: DECODE(32, be32, src, samples, n, 0, 0x80000000) break; case CODEC_ID_PCM_S24LE: DECODE(32, le24, src, samples, n, 8, 0) break; case CODEC_ID_PCM_S24BE: DECODE(32, be24, src, samples, n, 8, 0) break; case CODEC_ID_PCM_U24LE: DECODE(32, le24, src, samples, n, 8, 0x800000) break; case CODEC_ID_PCM_U24BE: DECODE(32, be24, src, samples, n, 8, 0x800000) break; case CODEC_ID_PCM_S24DAUD: for(;n>0;n--) { uint32_t v = bytestream_get_be24(&src); v >>= 4; // sync flags are here AV_WN16A(samples, av_reverse[(v >> 8) & 0xff] + (av_reverse[v & 0xff] << 8)); samples += 2; } break; case CODEC_ID_PCM_S16LE_PLANAR: n /= avctx->channels; for(c=0;cchannels;c++) src2[c] = &src[c*n*2]; for(;n>0;n--) for(c=0;cchannels;c++) { AV_WN16A(samples, bytestream_get_le16(&src2[c])); samples += 2; } src = src2[avctx->channels-1]; break; case CODEC_ID_PCM_U16LE: DECODE(16, le16, src, samples, n, 0, 0x8000) break; case CODEC_ID_PCM_U16BE: DECODE(16, be16, src, samples, n, 0, 0x8000) break; case CODEC_ID_PCM_S8: for(;n>0;n--) { *samples++ = *src++ + 128; } break; #if HAVE_BIGENDIAN case CODEC_ID_PCM_F64LE: DECODE(64, le64, src, samples, n, 0, 0) break; case CODEC_ID_PCM_S32LE: case CODEC_ID_PCM_F32LE: DECODE(32, le32, src, samples, n, 0, 0) break; case CODEC_ID_PCM_S16LE: DECODE(16, le16, src, samples, n, 0, 0) break; case CODEC_ID_PCM_F64BE: case CODEC_ID_PCM_F32BE: case CODEC_ID_PCM_S32BE: case CODEC_ID_PCM_S16BE: #else case CODEC_ID_PCM_F64BE: DECODE(64, be64, src, samples, n, 0, 0) break; case CODEC_ID_PCM_F32BE: case CODEC_ID_PCM_S32BE: DECODE(32, be32, src, samples, n, 0, 0) break; case CODEC_ID_PCM_S16BE: DECODE(16, be16, src, samples, n, 0, 0) break; case CODEC_ID_PCM_F64LE: case CODEC_ID_PCM_F32LE: case CODEC_ID_PCM_S32LE: case CODEC_ID_PCM_S16LE: #endif /* HAVE_BIGENDIAN */ case CODEC_ID_PCM_U8: memcpy(samples, src, n*sample_size); src += n*sample_size; samples += n * sample_size; break; case CODEC_ID_PCM_ZORK: for(;n>0;n--) { int x= *src++; if(x&128) x-= 128; else x = -x; AV_WN16A(samples, x << 8); samples += 2; } break; case CODEC_ID_PCM_ALAW: case CODEC_ID_PCM_MULAW: for(;n>0;n--) { AV_WN16A(samples, s->table[*src++]); samples += 2; } break; case CODEC_ID_PCM_DVD: dst_int32_t = data; n /= avctx->channels; switch (avctx->bits_per_coded_sample) { case 20: while (n--) { c = avctx->channels; src8 = src + 4*c; while (c--) { *dst_int32_t++ = (bytestream_get_be16(&src) << 16) + ((*src8 &0xf0) << 8); *dst_int32_t++ = (bytestream_get_be16(&src) << 16) + ((*src8++ &0x0f) << 12); } src = src8; } break; case 24: while (n--) { c = avctx->channels; src8 = src + 4*c; while (c--) { *dst_int32_t++ = (bytestream_get_be16(&src) << 16) + ((*src8++) << 8); *dst_int32_t++ = (bytestream_get_be16(&src) << 16) + ((*src8++) << 8); } src = src8; } break; default: av_log(avctx, AV_LOG_ERROR, \"PCM DVD unsupported sample depth\\n\"); return -1; } samples = (uint8_t *) dst_int32_t; break; case CODEC_ID_PCM_LXF: dst_int32_t = data; n /= avctx->channels; //unpack and de-planerize for (i = 0; i < n; i++) { for (c = 0, src8 = src + i*5; c < avctx->channels; c++, src8 += n*5) { //extract low 20 bits and expand to 32 bits *dst_int32_t++ = (src8[2] << 28) | (src8[1] << 20) | (src8[0] << 12) | ((src8[2] & 0xF) << 8) | src8[1]; } for (c = 0, src8 = src + i*5; c < avctx->channels; c++, src8 += n*5) { //extract high 20 bits and expand to 32 bits *dst_int32_t++ = (src8[4] << 24) | (src8[3] << 16) | ((src8[2] & 0xF0) << 8) | (src8[4] << 4) | (src8[3] >> 4); } } src += n * avctx->channels * 5; samples = (uint8_t *) dst_int32_t; break; default: return -1; } *data_size = samples - (uint8_t *)data; return src - buf; }"} {"target": 0, "idx": 10253, "func": "static int parse_presentation_segment(AVCodecContext *avctx, const uint8_t *buf, int buf_size, int64_t pts) { PGSSubContext *ctx = avctx->priv_data; int x, y, ret; int w = bytestream_get_be16(&buf); int h = bytestream_get_be16(&buf); ctx->presentation.pts = pts; av_dlog(avctx, \"Video Dimensions %dx%d\\n\", w, h); ret = ff_set_dimensions(avctx, w, h); if (ret < 0) return ret; /* Skip 1 bytes of unknown, frame rate? */ buf++; ctx->presentation.id_number = bytestream_get_be16(&buf); /* * Skip 3 bytes of unknown: * state * palette_update_flag (0x80), * palette_id_to_use, */ buf += 3; ctx->presentation.object_number = bytestream_get_byte(&buf); ctx->presentation.composition_flag = 0; if (!ctx->presentation.object_number) return 0; /* * Skip 3 bytes of unknown: * object_id_ref (2 bytes), * window_id_ref, */ buf += 3; ctx->presentation.composition_flag = bytestream_get_byte(&buf); x = bytestream_get_be16(&buf); y = bytestream_get_be16(&buf); /* TODO If cropping, cropping_x, cropping_y, cropping_width, cropping_height (all 2 bytes).*/ av_dlog(avctx, \"Subtitle Placement x=%d, y=%d\\n\", x, y); if (x > avctx->width || y > avctx->height) { av_log(avctx, AV_LOG_ERROR, \"Subtitle out of video bounds. x = %d, y = %d, video width = %d, video height = %d.\\n\", x, y, avctx->width, avctx->height); x = 0; y = 0; } /* Fill in dimensions */ ctx->presentation.x = x; ctx->presentation.y = y; return 0; }"} {"target": 0, "idx": 10266, "func": "void qemu_coroutine_delete(Coroutine *co_) { CoroutineThreadState *s = coroutine_get_thread_state(); CoroutineUContext *co = DO_UPCAST(CoroutineUContext, base, co_); if (s->pool_size < POOL_MAX_SIZE) { QLIST_INSERT_HEAD(&s->pool, &co->base, pool_next); co->base.caller = NULL; s->pool_size++; return; } g_free(co->stack); g_free(co); }"} {"target": 0, "idx": 10268, "func": "int float64_eq_signaling( float64 a, float64 b STATUS_PARAM ) { if ( ( ( extractFloat64Exp( a ) == 0x7FF ) && extractFloat64Frac( a ) ) || ( ( extractFloat64Exp( b ) == 0x7FF ) && extractFloat64Frac( b ) ) ) { float_raise( float_flag_invalid STATUS_VAR); return 0; } return ( a == b ) || ( (bits64) ( ( a | b )<<1 ) == 0 ); }"} {"target": 0, "idx": 10269, "func": "static int tpm_passthrough_unix_tx_bufs(int tpm_fd, const uint8_t *in, uint32_t in_len, uint8_t *out, uint32_t out_len) { int ret; ret = tpm_passthrough_unix_write(tpm_fd, in, in_len); if (ret != in_len) { error_report(\"tpm_passthrough: error while transmitting data \" \"to TPM: %s (%i)\\n\", strerror(errno), errno); goto err_exit; } ret = tpm_passthrough_unix_read(tpm_fd, out, out_len); if (ret < 0) { error_report(\"tpm_passthrough: error while reading data from \" \"TPM: %s (%i)\\n\", strerror(errno), errno); } else if (ret < sizeof(struct tpm_resp_hdr) || tpm_passthrough_get_size_from_buffer(out) != ret) { ret = -1; error_report(\"tpm_passthrough: received invalid response \" \"packet from TPM\\n\"); } err_exit: if (ret < 0) { tpm_write_fatal_error_response(out, out_len); } return ret; }"} {"target": 0, "idx": 10278, "func": "int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr, uint8_t *buf, int len, int is_write) { int l; target_phys_addr_t phys_addr; target_ulong page; while (len > 0) { page = addr & TARGET_PAGE_MASK; phys_addr = cpu_get_phys_page_debug(env, page); /* if no physical page mapped, return an error */ if (phys_addr == -1) return -1; l = (page + TARGET_PAGE_SIZE) - addr; if (l > len) l = len; phys_addr += (addr & ~TARGET_PAGE_MASK); if (is_write) cpu_physical_memory_write_rom(phys_addr, buf, l); else cpu_physical_memory_rw(phys_addr, buf, l, is_write); len -= l; buf += l; addr += l; } return 0; }"} {"target": 0, "idx": 10282, "func": "struct omap_uwire_s *omap_uwire_init(MemoryRegion *system_memory, target_phys_addr_t base, qemu_irq *irq, qemu_irq dma, omap_clk clk) { struct omap_uwire_s *s = (struct omap_uwire_s *) g_malloc0(sizeof(struct omap_uwire_s)); s->txirq = irq[0]; s->rxirq = irq[1]; s->txdrq = dma; omap_uwire_reset(s); memory_region_init_io(&s->iomem, &omap_uwire_ops, s, \"omap-uwire\", 0x800); memory_region_add_subregion(system_memory, base, &s->iomem); return s; }"} {"target": 0, "idx": 10291, "func": "static int send_sub_rect(VncState *vs, int x, int y, int w, int h) { VncPalette *palette = &color_count_palette; uint32_t bg = 0, fg = 0; int colors; int ret = 0; #ifdef CONFIG_VNC_JPEG bool force_jpeg = false; bool allow_jpeg = true; #endif vnc_framebuffer_update(vs, x, y, w, h, vs->tight.type); vnc_tight_start(vs); vnc_raw_send_framebuffer_update(vs, x, y, w, h); vnc_tight_stop(vs); #ifdef CONFIG_VNC_JPEG if (!vs->vd->non_adaptive && vs->tight.quality != (uint8_t)-1) { double freq = vnc_update_freq(vs, x, y, w, h); if (freq < tight_jpeg_conf[vs->tight.quality].jpeg_freq_min) { allow_jpeg = false; } if (freq >= tight_jpeg_conf[vs->tight.quality].jpeg_freq_threshold) { force_jpeg = true; vnc_sent_lossy_rect(vs, x, y, w, h); } } #endif colors = tight_fill_palette(vs, x, y, w * h, &bg, &fg, palette); #ifdef CONFIG_VNC_JPEG if (allow_jpeg && vs->tight.quality != (uint8_t)-1) { ret = send_sub_rect_jpeg(vs, x, y, w, h, bg, fg, colors, palette, force_jpeg); } else { ret = send_sub_rect_nojpeg(vs, x, y, w, h, bg, fg, colors, palette); } #else ret = send_sub_rect_nojpeg(vs, x, y, w, h, bg, fg, colors, palette); #endif return ret; }"} {"target": 0, "idx": 10295, "func": "static ExitStatus translate_one(DisasContext *ctx, uint32_t insn) { uint32_t palcode; int32_t disp21, disp16; #ifndef CONFIG_USER_ONLY int32_t disp12; #endif uint16_t fn11; uint8_t opc, ra, rb, rc, fpfn, fn7, islit, real_islit; uint8_t lit; ExitStatus ret; /* Decode all instruction fields */ opc = insn >> 26; ra = (insn >> 21) & 0x1F; rb = (insn >> 16) & 0x1F; rc = insn & 0x1F; real_islit = islit = (insn >> 12) & 1; if (rb == 31 && !islit) { islit = 1; lit = 0; } else lit = (insn >> 13) & 0xFF; palcode = insn & 0x03FFFFFF; disp21 = ((int32_t)((insn & 0x001FFFFF) << 11)) >> 11; disp16 = (int16_t)(insn & 0x0000FFFF); #ifndef CONFIG_USER_ONLY disp12 = (int32_t)((insn & 0x00000FFF) << 20) >> 20; #endif fn11 = (insn >> 5) & 0x000007FF; fpfn = fn11 & 0x3F; fn7 = (insn >> 5) & 0x0000007F; LOG_DISAS(\"opc %02x ra %2d rb %2d rc %2d disp16 %6d\\n\", opc, ra, rb, rc, disp16); ret = NO_EXIT; switch (opc) { case 0x00: /* CALL_PAL */ ret = gen_call_pal(ctx, palcode); break; case 0x01: /* OPC01 */ goto invalid_opc; case 0x02: /* OPC02 */ goto invalid_opc; case 0x03: /* OPC03 */ goto invalid_opc; case 0x04: /* OPC04 */ goto invalid_opc; case 0x05: /* OPC05 */ goto invalid_opc; case 0x06: /* OPC06 */ goto invalid_opc; case 0x07: /* OPC07 */ goto invalid_opc; case 0x08: /* LDA */ if (likely(ra != 31)) { if (rb != 31) { tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16); } else { tcg_gen_movi_i64(cpu_ir[ra], disp16); } } break; case 0x09: /* LDAH */ if (likely(ra != 31)) { if (rb != 31) { tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16 << 16); } else { tcg_gen_movi_i64(cpu_ir[ra], disp16 << 16); } } break; case 0x0A: /* LDBU */ if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) { gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0); break; } goto invalid_opc; case 0x0B: /* LDQ_U */ gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1); break; case 0x0C: /* LDWU */ if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) { gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0); break; } goto invalid_opc; case 0x0D: /* STW */ gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0); break; case 0x0E: /* STB */ gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0); break; case 0x0F: /* STQ_U */ gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1); break; case 0x10: switch (fn7) { case 0x00: /* ADDL */ if (likely(rc != 31)) { if (ra != 31) { if (islit) { tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit); tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]); } else { tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]); } } else { if (islit) { tcg_gen_movi_i64(cpu_ir[rc], lit); } else { tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]); } } } break; case 0x02: /* S4ADDL */ if (likely(rc != 31)) { if (ra != 31) { TCGv tmp = tcg_temp_new(); tcg_gen_shli_i64(tmp, cpu_ir[ra], 2); if (islit) { tcg_gen_addi_i64(tmp, tmp, lit); } else { tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]); } tcg_gen_ext32s_i64(cpu_ir[rc], tmp); tcg_temp_free(tmp); } else { if (islit) { tcg_gen_movi_i64(cpu_ir[rc], lit); } else { tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]); } } } break; case 0x09: /* SUBL */ if (likely(rc != 31)) { if (ra != 31) { if (islit) { tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit); } else { tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); } tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]); } else { if (islit) { tcg_gen_movi_i64(cpu_ir[rc], -lit); } else { tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]); tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]); } } } break; case 0x0B: /* S4SUBL */ if (likely(rc != 31)) { if (ra != 31) { TCGv tmp = tcg_temp_new(); tcg_gen_shli_i64(tmp, cpu_ir[ra], 2); if (islit) { tcg_gen_subi_i64(tmp, tmp, lit); } else { tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]); } tcg_gen_ext32s_i64(cpu_ir[rc], tmp); tcg_temp_free(tmp); } else { if (islit) { tcg_gen_movi_i64(cpu_ir[rc], -lit); } else { tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]); tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]); } } } break; case 0x0F: /* CMPBGE */ gen_cmpbge(ra, rb, rc, islit, lit); break; case 0x12: /* S8ADDL */ if (likely(rc != 31)) { if (ra != 31) { TCGv tmp = tcg_temp_new(); tcg_gen_shli_i64(tmp, cpu_ir[ra], 3); if (islit) { tcg_gen_addi_i64(tmp, tmp, lit); } else { tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]); } tcg_gen_ext32s_i64(cpu_ir[rc], tmp); tcg_temp_free(tmp); } else { if (islit) { tcg_gen_movi_i64(cpu_ir[rc], lit); } else { tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]); } } } break; case 0x1B: /* S8SUBL */ if (likely(rc != 31)) { if (ra != 31) { TCGv tmp = tcg_temp_new(); tcg_gen_shli_i64(tmp, cpu_ir[ra], 3); if (islit) { tcg_gen_subi_i64(tmp, tmp, lit); } else { tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]); } tcg_gen_ext32s_i64(cpu_ir[rc], tmp); tcg_temp_free(tmp); } else { if (islit) { tcg_gen_movi_i64(cpu_ir[rc], -lit); } else { tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]); tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]); } } } break; case 0x1D: /* CMPULT */ gen_cmp(TCG_COND_LTU, ra, rb, rc, islit, lit); break; case 0x20: /* ADDQ */ if (likely(rc != 31)) { if (ra != 31) { if (islit) { tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit); } else { tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); } } else { if (islit) { tcg_gen_movi_i64(cpu_ir[rc], lit); } else { tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]); } } } break; case 0x22: /* S4ADDQ */ if (likely(rc != 31)) { if (ra != 31) { TCGv tmp = tcg_temp_new(); tcg_gen_shli_i64(tmp, cpu_ir[ra], 2); if (islit) { tcg_gen_addi_i64(cpu_ir[rc], tmp, lit); } else { tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]); } tcg_temp_free(tmp); } else { if (islit) { tcg_gen_movi_i64(cpu_ir[rc], lit); } else { tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]); } } } break; case 0x29: /* SUBQ */ if (likely(rc != 31)) { if (ra != 31) { if (islit) { tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit); } else { tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); } } else { if (islit) { tcg_gen_movi_i64(cpu_ir[rc], -lit); } else { tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]); } } } break; case 0x2B: /* S4SUBQ */ if (likely(rc != 31)) { if (ra != 31) { TCGv tmp = tcg_temp_new(); tcg_gen_shli_i64(tmp, cpu_ir[ra], 2); if (islit) { tcg_gen_subi_i64(cpu_ir[rc], tmp, lit); } else { tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]); } tcg_temp_free(tmp); } else { if (islit) { tcg_gen_movi_i64(cpu_ir[rc], -lit); } else { tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]); } } } break; case 0x2D: /* CMPEQ */ gen_cmp(TCG_COND_EQ, ra, rb, rc, islit, lit); break; case 0x32: /* S8ADDQ */ if (likely(rc != 31)) { if (ra != 31) { TCGv tmp = tcg_temp_new(); tcg_gen_shli_i64(tmp, cpu_ir[ra], 3); if (islit) { tcg_gen_addi_i64(cpu_ir[rc], tmp, lit); } else { tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]); } tcg_temp_free(tmp); } else { if (islit) { tcg_gen_movi_i64(cpu_ir[rc], lit); } else { tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]); } } } break; case 0x3B: /* S8SUBQ */ if (likely(rc != 31)) { if (ra != 31) { TCGv tmp = tcg_temp_new(); tcg_gen_shli_i64(tmp, cpu_ir[ra], 3); if (islit) { tcg_gen_subi_i64(cpu_ir[rc], tmp, lit); } else { tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]); } tcg_temp_free(tmp); } else { if (islit) { tcg_gen_movi_i64(cpu_ir[rc], -lit); } else { tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]); } } } break; case 0x3D: /* CMPULE */ gen_cmp(TCG_COND_LEU, ra, rb, rc, islit, lit); break; case 0x40: /* ADDL/V */ gen_addlv(ra, rb, rc, islit, lit); break; case 0x49: /* SUBL/V */ gen_sublv(ra, rb, rc, islit, lit); break; case 0x4D: /* CMPLT */ gen_cmp(TCG_COND_LT, ra, rb, rc, islit, lit); break; case 0x60: /* ADDQ/V */ gen_addqv(ra, rb, rc, islit, lit); break; case 0x69: /* SUBQ/V */ gen_subqv(ra, rb, rc, islit, lit); break; case 0x6D: /* CMPLE */ gen_cmp(TCG_COND_LE, ra, rb, rc, islit, lit); break; default: goto invalid_opc; } break; case 0x11: switch (fn7) { case 0x00: /* AND */ if (likely(rc != 31)) { if (ra == 31) { tcg_gen_movi_i64(cpu_ir[rc], 0); } else if (islit) { tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], lit); } else { tcg_gen_and_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); } } break; case 0x08: /* BIC */ if (likely(rc != 31)) { if (ra != 31) { if (islit) { tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], ~lit); } else { tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); } } else tcg_gen_movi_i64(cpu_ir[rc], 0); } break; case 0x14: /* CMOVLBS */ gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 1); break; case 0x16: /* CMOVLBC */ gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 1); break; case 0x20: /* BIS */ if (likely(rc != 31)) { if (ra != 31) { if (islit) { tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], lit); } else { tcg_gen_or_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); } } else { if (islit) { tcg_gen_movi_i64(cpu_ir[rc], lit); } else { tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]); } } } break; case 0x24: /* CMOVEQ */ gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 0); break; case 0x26: /* CMOVNE */ gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 0); break; case 0x28: /* ORNOT */ if (likely(rc != 31)) { if (ra != 31) { if (islit) { tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], ~lit); } else { tcg_gen_orc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); } } else { if (islit) { tcg_gen_movi_i64(cpu_ir[rc], ~lit); } else { tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]); } } } break; case 0x40: /* XOR */ if (likely(rc != 31)) { if (ra != 31) { if (islit) { tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], lit); } else { tcg_gen_xor_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); } } else { if (islit) { tcg_gen_movi_i64(cpu_ir[rc], lit); } else { tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]); } } } break; case 0x44: /* CMOVLT */ gen_cmov(TCG_COND_LT, ra, rb, rc, islit, lit, 0); break; case 0x46: /* CMOVGE */ gen_cmov(TCG_COND_GE, ra, rb, rc, islit, lit, 0); break; case 0x48: /* EQV */ if (likely(rc != 31)) { if (ra != 31) { if (islit) { tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], ~lit); } else { tcg_gen_eqv_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); } } else { if (islit) { tcg_gen_movi_i64(cpu_ir[rc], ~lit); } else { tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]); } } } break; case 0x61: /* AMASK */ if (likely(rc != 31)) { uint64_t amask = ctx->tb->flags >> TB_FLAGS_AMASK_SHIFT; if (islit) { tcg_gen_movi_i64(cpu_ir[rc], lit & ~amask); } else { tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[rb], ~amask); } } break; case 0x64: /* CMOVLE */ gen_cmov(TCG_COND_LE, ra, rb, rc, islit, lit, 0); break; case 0x66: /* CMOVGT */ gen_cmov(TCG_COND_GT, ra, rb, rc, islit, lit, 0); break; case 0x6C: /* IMPLVER */ if (rc != 31) { tcg_gen_movi_i64(cpu_ir[rc], ctx->implver); } break; default: goto invalid_opc; } break; case 0x12: switch (fn7) { case 0x02: /* MSKBL */ gen_msk_l(ra, rb, rc, islit, lit, 0x01); break; case 0x06: /* EXTBL */ gen_ext_l(ra, rb, rc, islit, lit, 0x01); break; case 0x0B: /* INSBL */ gen_ins_l(ra, rb, rc, islit, lit, 0x01); break; case 0x12: /* MSKWL */ gen_msk_l(ra, rb, rc, islit, lit, 0x03); break; case 0x16: /* EXTWL */ gen_ext_l(ra, rb, rc, islit, lit, 0x03); break; case 0x1B: /* INSWL */ gen_ins_l(ra, rb, rc, islit, lit, 0x03); break; case 0x22: /* MSKLL */ gen_msk_l(ra, rb, rc, islit, lit, 0x0f); break; case 0x26: /* EXTLL */ gen_ext_l(ra, rb, rc, islit, lit, 0x0f); break; case 0x2B: /* INSLL */ gen_ins_l(ra, rb, rc, islit, lit, 0x0f); break; case 0x30: /* ZAP */ gen_zap(ra, rb, rc, islit, lit); break; case 0x31: /* ZAPNOT */ gen_zapnot(ra, rb, rc, islit, lit); break; case 0x32: /* MSKQL */ gen_msk_l(ra, rb, rc, islit, lit, 0xff); break; case 0x34: /* SRL */ if (likely(rc != 31)) { if (ra != 31) { if (islit) { tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f); } else { TCGv shift = tcg_temp_new(); tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f); tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], shift); tcg_temp_free(shift); } } else tcg_gen_movi_i64(cpu_ir[rc], 0); } break; case 0x36: /* EXTQL */ gen_ext_l(ra, rb, rc, islit, lit, 0xff); break; case 0x39: /* SLL */ if (likely(rc != 31)) { if (ra != 31) { if (islit) { tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f); } else { TCGv shift = tcg_temp_new(); tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f); tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], shift); tcg_temp_free(shift); } } else tcg_gen_movi_i64(cpu_ir[rc], 0); } break; case 0x3B: /* INSQL */ gen_ins_l(ra, rb, rc, islit, lit, 0xff); break; case 0x3C: /* SRA */ if (likely(rc != 31)) { if (ra != 31) { if (islit) { tcg_gen_sari_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f); } else { TCGv shift = tcg_temp_new(); tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f); tcg_gen_sar_i64(cpu_ir[rc], cpu_ir[ra], shift); tcg_temp_free(shift); } } else tcg_gen_movi_i64(cpu_ir[rc], 0); } break; case 0x52: /* MSKWH */ gen_msk_h(ra, rb, rc, islit, lit, 0x03); break; case 0x57: /* INSWH */ gen_ins_h(ra, rb, rc, islit, lit, 0x03); break; case 0x5A: /* EXTWH */ gen_ext_h(ra, rb, rc, islit, lit, 0x03); break; case 0x62: /* MSKLH */ gen_msk_h(ra, rb, rc, islit, lit, 0x0f); break; case 0x67: /* INSLH */ gen_ins_h(ra, rb, rc, islit, lit, 0x0f); break; case 0x6A: /* EXTLH */ gen_ext_h(ra, rb, rc, islit, lit, 0x0f); break; case 0x72: /* MSKQH */ gen_msk_h(ra, rb, rc, islit, lit, 0xff); break; case 0x77: /* INSQH */ gen_ins_h(ra, rb, rc, islit, lit, 0xff); break; case 0x7A: /* EXTQH */ gen_ext_h(ra, rb, rc, islit, lit, 0xff); break; default: goto invalid_opc; } break; case 0x13: switch (fn7) { case 0x00: /* MULL */ if (likely(rc != 31)) { if (ra == 31) { tcg_gen_movi_i64(cpu_ir[rc], 0); } else { if (islit) { tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit); } else { tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); } tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]); } } break; case 0x20: /* MULQ */ if (likely(rc != 31)) { if (ra == 31) { tcg_gen_movi_i64(cpu_ir[rc], 0); } else if (islit) { tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit); } else { tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); } } break; case 0x30: /* UMULH */ { TCGv low; if (unlikely(rc == 31)){ break; } if (ra == 31) { tcg_gen_movi_i64(cpu_ir[rc], 0); break; } low = tcg_temp_new(); if (islit) { tcg_gen_movi_tl(low, lit); tcg_gen_mulu2_i64(low, cpu_ir[rc], cpu_ir[ra], low); } else { tcg_gen_mulu2_i64(low, cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); } tcg_temp_free(low); } break; case 0x40: /* MULL/V */ gen_mullv(ra, rb, rc, islit, lit); break; case 0x60: /* MULQ/V */ gen_mulqv(ra, rb, rc, islit, lit); break; default: goto invalid_opc; } break; case 0x14: switch (fpfn) { /* fn11 & 0x3F */ case 0x04: /* ITOFS */ if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) { goto invalid_opc; } if (likely(rc != 31)) { if (ra != 31) { TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]); gen_helper_memory_to_s(cpu_fir[rc], tmp); tcg_temp_free_i32(tmp); } else tcg_gen_movi_i64(cpu_fir[rc], 0); } break; case 0x0A: /* SQRTF */ if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) { gen_fsqrtf(rb, rc); break; } goto invalid_opc; case 0x0B: /* SQRTS */ if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) { gen_fsqrts(ctx, rb, rc, fn11); break; } goto invalid_opc; case 0x14: /* ITOFF */ if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) { goto invalid_opc; } if (likely(rc != 31)) { if (ra != 31) { TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]); gen_helper_memory_to_f(cpu_fir[rc], tmp); tcg_temp_free_i32(tmp); } else tcg_gen_movi_i64(cpu_fir[rc], 0); } break; case 0x24: /* ITOFT */ if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) { goto invalid_opc; } if (likely(rc != 31)) { if (ra != 31) { tcg_gen_mov_i64(cpu_fir[rc], cpu_ir[ra]); } else { tcg_gen_movi_i64(cpu_fir[rc], 0); } } break; case 0x2A: /* SQRTG */ if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) { gen_fsqrtg(rb, rc); break; } goto invalid_opc; case 0x02B: /* SQRTT */ if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) { gen_fsqrtt(ctx, rb, rc, fn11); break; } goto invalid_opc; default: goto invalid_opc; } break; case 0x15: /* VAX floating point */ /* XXX: rounding mode and trap are ignored (!) */ switch (fpfn) { /* fn11 & 0x3F */ case 0x00: /* ADDF */ gen_faddf(ra, rb, rc); break; case 0x01: /* SUBF */ gen_fsubf(ra, rb, rc); break; case 0x02: /* MULF */ gen_fmulf(ra, rb, rc); break; case 0x03: /* DIVF */ gen_fdivf(ra, rb, rc); break; case 0x1E: /* CVTDG */ #if 0 // TODO gen_fcvtdg(rb, rc); #else goto invalid_opc; #endif break; case 0x20: /* ADDG */ gen_faddg(ra, rb, rc); break; case 0x21: /* SUBG */ gen_fsubg(ra, rb, rc); break; case 0x22: /* MULG */ gen_fmulg(ra, rb, rc); break; case 0x23: /* DIVG */ gen_fdivg(ra, rb, rc); break; case 0x25: /* CMPGEQ */ gen_fcmpgeq(ra, rb, rc); break; case 0x26: /* CMPGLT */ gen_fcmpglt(ra, rb, rc); break; case 0x27: /* CMPGLE */ gen_fcmpgle(ra, rb, rc); break; case 0x2C: /* CVTGF */ gen_fcvtgf(rb, rc); break; case 0x2D: /* CVTGD */ #if 0 // TODO gen_fcvtgd(rb, rc); #else goto invalid_opc; #endif break; case 0x2F: /* CVTGQ */ gen_fcvtgq(rb, rc); break; case 0x3C: /* CVTQF */ gen_fcvtqf(rb, rc); break; case 0x3E: /* CVTQG */ gen_fcvtqg(rb, rc); break; default: goto invalid_opc; } break; case 0x16: /* IEEE floating-point */ switch (fpfn) { /* fn11 & 0x3F */ case 0x00: /* ADDS */ gen_fadds(ctx, ra, rb, rc, fn11); break; case 0x01: /* SUBS */ gen_fsubs(ctx, ra, rb, rc, fn11); break; case 0x02: /* MULS */ gen_fmuls(ctx, ra, rb, rc, fn11); break; case 0x03: /* DIVS */ gen_fdivs(ctx, ra, rb, rc, fn11); break; case 0x20: /* ADDT */ gen_faddt(ctx, ra, rb, rc, fn11); break; case 0x21: /* SUBT */ gen_fsubt(ctx, ra, rb, rc, fn11); break; case 0x22: /* MULT */ gen_fmult(ctx, ra, rb, rc, fn11); break; case 0x23: /* DIVT */ gen_fdivt(ctx, ra, rb, rc, fn11); break; case 0x24: /* CMPTUN */ gen_fcmptun(ctx, ra, rb, rc, fn11); break; case 0x25: /* CMPTEQ */ gen_fcmpteq(ctx, ra, rb, rc, fn11); break; case 0x26: /* CMPTLT */ gen_fcmptlt(ctx, ra, rb, rc, fn11); break; case 0x27: /* CMPTLE */ gen_fcmptle(ctx, ra, rb, rc, fn11); break; case 0x2C: if (fn11 == 0x2AC || fn11 == 0x6AC) { /* CVTST */ gen_fcvtst(ctx, rb, rc, fn11); } else { /* CVTTS */ gen_fcvtts(ctx, rb, rc, fn11); } break; case 0x2F: /* CVTTQ */ gen_fcvttq(ctx, rb, rc, fn11); break; case 0x3C: /* CVTQS */ gen_fcvtqs(ctx, rb, rc, fn11); break; case 0x3E: /* CVTQT */ gen_fcvtqt(ctx, rb, rc, fn11); break; default: goto invalid_opc; } break; case 0x17: switch (fn11) { case 0x010: /* CVTLQ */ gen_fcvtlq(rb, rc); break; case 0x020: if (likely(rc != 31)) { if (ra == rb) { /* FMOV */ if (ra == 31) { tcg_gen_movi_i64(cpu_fir[rc], 0); } else { tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[ra]); } } else { /* CPYS */ gen_fcpys(ra, rb, rc); } } break; case 0x021: /* CPYSN */ gen_fcpysn(ra, rb, rc); break; case 0x022: /* CPYSE */ gen_fcpyse(ra, rb, rc); break; case 0x024: /* MT_FPCR */ if (likely(ra != 31)) { gen_helper_store_fpcr(cpu_env, cpu_fir[ra]); } else { TCGv tmp = tcg_const_i64(0); gen_helper_store_fpcr(cpu_env, tmp); tcg_temp_free(tmp); } break; case 0x025: /* MF_FPCR */ if (likely(ra != 31)) { gen_helper_load_fpcr(cpu_fir[ra], cpu_env); } break; case 0x02A: /* FCMOVEQ */ gen_fcmov(TCG_COND_EQ, ra, rb, rc); break; case 0x02B: /* FCMOVNE */ gen_fcmov(TCG_COND_NE, ra, rb, rc); break; case 0x02C: /* FCMOVLT */ gen_fcmov(TCG_COND_LT, ra, rb, rc); break; case 0x02D: /* FCMOVGE */ gen_fcmov(TCG_COND_GE, ra, rb, rc); break; case 0x02E: /* FCMOVLE */ gen_fcmov(TCG_COND_LE, ra, rb, rc); break; case 0x02F: /* FCMOVGT */ gen_fcmov(TCG_COND_GT, ra, rb, rc); break; case 0x030: /* CVTQL */ gen_fcvtql(rb, rc); break; case 0x130: /* CVTQL/V */ case 0x530: /* CVTQL/SV */ /* ??? I'm pretty sure there's nothing that /sv needs to do that /v doesn't do. The only thing I can think is that /sv is a valid instruction merely for completeness in the ISA. */ gen_fcvtql_v(ctx, rb, rc); break; default: goto invalid_opc; } break; case 0x18: switch ((uint16_t)disp16) { case 0x0000: /* TRAPB */ /* No-op. */ break; case 0x0400: /* EXCB */ /* No-op. */ break; case 0x4000: /* MB */ /* No-op */ break; case 0x4400: /* WMB */ /* No-op */ break; case 0x8000: /* FETCH */ /* No-op */ break; case 0xA000: /* FETCH_M */ /* No-op */ break; case 0xC000: /* RPCC */ if (ra != 31) { if (use_icount) { gen_io_start(); gen_helper_load_pcc(cpu_ir[ra], cpu_env); gen_io_end(); ret = EXIT_PC_STALE; } else { gen_helper_load_pcc(cpu_ir[ra], cpu_env); } } break; case 0xE000: /* RC */ gen_rx(ra, 0); break; case 0xE800: /* ECB */ break; case 0xF000: /* RS */ gen_rx(ra, 1); break; case 0xF800: /* WH64 */ /* No-op */ break; default: goto invalid_opc; } break; case 0x19: /* HW_MFPR (PALcode) */ #ifndef CONFIG_USER_ONLY if (ctx->tb->flags & TB_FLAGS_PAL_MODE) { return gen_mfpr(ra, insn & 0xffff); } #endif goto invalid_opc; case 0x1A: /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch prediction stack action, which of course we don't implement. */ if (rb != 31) { tcg_gen_andi_i64(cpu_pc, cpu_ir[rb], ~3); } else { tcg_gen_movi_i64(cpu_pc, 0); } if (ra != 31) { tcg_gen_movi_i64(cpu_ir[ra], ctx->pc); } ret = EXIT_PC_UPDATED; break; case 0x1B: /* HW_LD (PALcode) */ #ifndef CONFIG_USER_ONLY if (ctx->tb->flags & TB_FLAGS_PAL_MODE) { TCGv addr; if (ra == 31) { break; } addr = tcg_temp_new(); if (rb != 31) { tcg_gen_addi_i64(addr, cpu_ir[rb], disp12); } else { tcg_gen_movi_i64(addr, disp12); } switch ((insn >> 12) & 0xF) { case 0x0: /* Longword physical access (hw_ldl/p) */ gen_helper_ldl_phys(cpu_ir[ra], cpu_env, addr); break; case 0x1: /* Quadword physical access (hw_ldq/p) */ gen_helper_ldq_phys(cpu_ir[ra], cpu_env, addr); break; case 0x2: /* Longword physical access with lock (hw_ldl_l/p) */ gen_helper_ldl_l_phys(cpu_ir[ra], cpu_env, addr); break; case 0x3: /* Quadword physical access with lock (hw_ldq_l/p) */ gen_helper_ldq_l_phys(cpu_ir[ra], cpu_env, addr); break; case 0x4: /* Longword virtual PTE fetch (hw_ldl/v) */ goto invalid_opc; case 0x5: /* Quadword virtual PTE fetch (hw_ldq/v) */ goto invalid_opc; break; case 0x6: /* Incpu_ir[ra]id */ goto invalid_opc; case 0x7: /* Incpu_ir[ra]id */ goto invalid_opc; case 0x8: /* Longword virtual access (hw_ldl) */ goto invalid_opc; case 0x9: /* Quadword virtual access (hw_ldq) */ goto invalid_opc; case 0xA: /* Longword virtual access with protection check (hw_ldl/w) */ tcg_gen_qemu_ld_i64(cpu_ir[ra], addr, MMU_KERNEL_IDX, MO_LESL); break; case 0xB: /* Quadword virtual access with protection check (hw_ldq/w) */ tcg_gen_qemu_ld_i64(cpu_ir[ra], addr, MMU_KERNEL_IDX, MO_LEQ); break; case 0xC: /* Longword virtual access with alt access mode (hw_ldl/a)*/ goto invalid_opc; case 0xD: /* Quadword virtual access with alt access mode (hw_ldq/a) */ goto invalid_opc; case 0xE: /* Longword virtual access with alternate access mode and protection checks (hw_ldl/wa) */ tcg_gen_qemu_ld_i64(cpu_ir[ra], addr, MMU_USER_IDX, MO_LESL); break; case 0xF: /* Quadword virtual access with alternate access mode and protection checks (hw_ldq/wa) */ tcg_gen_qemu_ld_i64(cpu_ir[ra], addr, MMU_USER_IDX, MO_LEQ); break; } tcg_temp_free(addr); break; } #endif goto invalid_opc; case 0x1C: switch (fn7) { case 0x00: /* SEXTB */ if ((ctx->tb->flags & TB_FLAGS_AMASK_BWX) == 0) { goto invalid_opc; } if (likely(rc != 31)) { if (islit) { tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int8_t)lit)); } else { tcg_gen_ext8s_i64(cpu_ir[rc], cpu_ir[rb]); } } break; case 0x01: /* SEXTW */ if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) { if (likely(rc != 31)) { if (islit) { tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int16_t)lit)); } else { tcg_gen_ext16s_i64(cpu_ir[rc], cpu_ir[rb]); } } break; } goto invalid_opc; case 0x30: /* CTPOP */ if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) { if (likely(rc != 31)) { if (islit) { tcg_gen_movi_i64(cpu_ir[rc], ctpop64(lit)); } else { gen_helper_ctpop(cpu_ir[rc], cpu_ir[rb]); } } break; } goto invalid_opc; case 0x31: /* PERR */ if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) { gen_perr(ra, rb, rc, islit, lit); break; } goto invalid_opc; case 0x32: /* CTLZ */ if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) { if (likely(rc != 31)) { if (islit) { tcg_gen_movi_i64(cpu_ir[rc], clz64(lit)); } else { gen_helper_ctlz(cpu_ir[rc], cpu_ir[rb]); } } break; } goto invalid_opc; case 0x33: /* CTTZ */ if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) { if (likely(rc != 31)) { if (islit) { tcg_gen_movi_i64(cpu_ir[rc], ctz64(lit)); } else { gen_helper_cttz(cpu_ir[rc], cpu_ir[rb]); } } break; } goto invalid_opc; case 0x34: /* UNPKBW */ if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) { if (real_islit || ra != 31) { goto invalid_opc; } gen_unpkbw(rb, rc); break; } goto invalid_opc; case 0x35: /* UNPKBL */ if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) { if (real_islit || ra != 31) { goto invalid_opc; } gen_unpkbl(rb, rc); break; } goto invalid_opc; case 0x36: /* PKWB */ if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) { if (real_islit || ra != 31) { goto invalid_opc; } gen_pkwb(rb, rc); break; } goto invalid_opc; case 0x37: /* PKLB */ if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) { if (real_islit || ra != 31) { goto invalid_opc; } gen_pklb(rb, rc); break; } goto invalid_opc; case 0x38: /* MINSB8 */ if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) { gen_minsb8(ra, rb, rc, islit, lit); break; } goto invalid_opc; case 0x39: /* MINSW4 */ if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) { gen_minsw4(ra, rb, rc, islit, lit); break; } goto invalid_opc; case 0x3A: /* MINUB8 */ if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) { gen_minub8(ra, rb, rc, islit, lit); break; } goto invalid_opc; case 0x3B: /* MINUW4 */ if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) { gen_minuw4(ra, rb, rc, islit, lit); break; } goto invalid_opc; case 0x3C: /* MAXUB8 */ if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) { gen_maxub8(ra, rb, rc, islit, lit); break; } goto invalid_opc; case 0x3D: /* MAXUW4 */ if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) { gen_maxuw4(ra, rb, rc, islit, lit); break; } goto invalid_opc; case 0x3E: /* MAXSB8 */ if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) { gen_maxsb8(ra, rb, rc, islit, lit); break; } goto invalid_opc; case 0x3F: /* MAXSW4 */ if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) { gen_maxsw4(ra, rb, rc, islit, lit); break; } goto invalid_opc; case 0x70: /* FTOIT */ if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) { goto invalid_opc; } if (likely(rc != 31)) { if (ra != 31) { tcg_gen_mov_i64(cpu_ir[rc], cpu_fir[ra]); } else { tcg_gen_movi_i64(cpu_ir[rc], 0); } } break; case 0x78: /* FTOIS */ if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) { goto invalid_opc; } if (rc != 31) { TCGv_i32 tmp1 = tcg_temp_new_i32(); if (ra != 31) { gen_helper_s_to_memory(tmp1, cpu_fir[ra]); } else { TCGv tmp2 = tcg_const_i64(0); gen_helper_s_to_memory(tmp1, tmp2); tcg_temp_free(tmp2); } tcg_gen_ext_i32_i64(cpu_ir[rc], tmp1); tcg_temp_free_i32(tmp1); } break; default: goto invalid_opc; } break; case 0x1D: /* HW_MTPR (PALcode) */ #ifndef CONFIG_USER_ONLY if (ctx->tb->flags & TB_FLAGS_PAL_MODE) { return gen_mtpr(ctx, rb, insn & 0xffff); } #endif goto invalid_opc; case 0x1E: /* HW_RET (PALcode) */ #ifndef CONFIG_USER_ONLY if (ctx->tb->flags & TB_FLAGS_PAL_MODE) { if (rb == 31) { /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return address from EXC_ADDR. This turns out to be useful for our emulation PALcode, so continue to accept it. */ TCGv tmp = tcg_temp_new(); tcg_gen_ld_i64(tmp, cpu_env, offsetof(CPUAlphaState, exc_addr)); gen_helper_hw_ret(cpu_env, tmp); tcg_temp_free(tmp); } else { gen_helper_hw_ret(cpu_env, cpu_ir[rb]); } ret = EXIT_PC_UPDATED; break; } #endif goto invalid_opc; case 0x1F: /* HW_ST (PALcode) */ #ifndef CONFIG_USER_ONLY if (ctx->tb->flags & TB_FLAGS_PAL_MODE) { TCGv addr, val; addr = tcg_temp_new(); if (rb != 31) { tcg_gen_addi_i64(addr, cpu_ir[rb], disp12); } else { tcg_gen_movi_i64(addr, disp12); } if (ra != 31) { val = cpu_ir[ra]; } else { val = tcg_temp_new(); tcg_gen_movi_i64(val, 0); } switch ((insn >> 12) & 0xF) { case 0x0: /* Longword physical access */ gen_helper_stl_phys(cpu_env, addr, val); break; case 0x1: /* Quadword physical access */ gen_helper_stq_phys(cpu_env, addr, val); break; case 0x2: /* Longword physical access with lock */ gen_helper_stl_c_phys(val, cpu_env, addr, val); break; case 0x3: /* Quadword physical access with lock */ gen_helper_stq_c_phys(val, cpu_env, addr, val); break; case 0x4: /* Longword virtual access */ goto invalid_opc; case 0x5: /* Quadword virtual access */ goto invalid_opc; case 0x6: /* Invalid */ goto invalid_opc; case 0x7: /* Invalid */ goto invalid_opc; case 0x8: /* Invalid */ goto invalid_opc; case 0x9: /* Invalid */ goto invalid_opc; case 0xA: /* Invalid */ goto invalid_opc; case 0xB: /* Invalid */ goto invalid_opc; case 0xC: /* Longword virtual access with alternate access mode */ goto invalid_opc; case 0xD: /* Quadword virtual access with alternate access mode */ goto invalid_opc; case 0xE: /* Invalid */ goto invalid_opc; case 0xF: /* Invalid */ goto invalid_opc; } if (ra == 31) { tcg_temp_free(val); } tcg_temp_free(addr); break; } #endif goto invalid_opc; case 0x20: /* LDF */ gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0); break; case 0x21: /* LDG */ gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0); break; case 0x22: /* LDS */ gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0); break; case 0x23: /* LDT */ gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0); break; case 0x24: /* STF */ gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0); break; case 0x25: /* STG */ gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0); break; case 0x26: /* STS */ gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0); break; case 0x27: /* STT */ gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0); break; case 0x28: /* LDL */ gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0); break; case 0x29: /* LDQ */ gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0); break; case 0x2A: /* LDL_L */ gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0); break; case 0x2B: /* LDQ_L */ gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0); break; case 0x2C: /* STL */ gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0); break; case 0x2D: /* STQ */ gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0); break; case 0x2E: /* STL_C */ ret = gen_store_conditional(ctx, ra, rb, disp16, 0); break; case 0x2F: /* STQ_C */ ret = gen_store_conditional(ctx, ra, rb, disp16, 1); break; case 0x30: /* BR */ ret = gen_bdirect(ctx, ra, disp21); break; case 0x31: /* FBEQ */ ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21); break; case 0x32: /* FBLT */ ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21); break; case 0x33: /* FBLE */ ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21); break; case 0x34: /* BSR */ ret = gen_bdirect(ctx, ra, disp21); break; case 0x35: /* FBNE */ ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21); break; case 0x36: /* FBGE */ ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21); break; case 0x37: /* FBGT */ ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21); break; case 0x38: /* BLBC */ ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1); break; case 0x39: /* BEQ */ ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0); break; case 0x3A: /* BLT */ ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0); break; case 0x3B: /* BLE */ ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0); break; case 0x3C: /* BLBS */ ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1); break; case 0x3D: /* BNE */ ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0); break; case 0x3E: /* BGE */ ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0); break; case 0x3F: /* BGT */ ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0); break; invalid_opc: ret = gen_invalid(ctx); break; } return ret; }"} {"target": 0, "idx": 10308, "func": "inline static void RENAME(hcscale)(SwsContext *c, uint16_t *dst, long dstWidth, const uint8_t *src1, const uint8_t *src2, int srcW, int xInc, int flags, const int16_t *hChrFilter, const int16_t *hChrFilterPos, int hChrFilterSize, int srcFormat, uint8_t *formatConvBuffer, uint32_t *pal) { int32_t av_unused *mmx2FilterPos = c->chrMmx2FilterPos; int16_t av_unused *mmx2Filter = c->chrMmx2Filter; int av_unused canMMX2BeUsed = c->canMMX2BeUsed; void av_unused *mmx2FilterCode= c->chrMmx2FilterCode; if (isGray(srcFormat) || srcFormat==PIX_FMT_MONOBLACK || srcFormat==PIX_FMT_MONOWHITE) return; if (srcFormat==PIX_FMT_RGB32_1 || srcFormat==PIX_FMT_BGR32_1) { src1 += ALT32_CORR; src2 += ALT32_CORR; } if (srcFormat==PIX_FMT_RGB48LE) { src1++; src2++; } if (c->hcscale_internal) { c->hcscale_internal(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal); src1= formatConvBuffer; src2= formatConvBuffer+VOFW; } #if COMPILE_TEMPLATE_MMX // Use the new MMX scaler if the MMX2 one can't be used (it is faster than the x86 ASM one). if (!(flags&SWS_FAST_BILINEAR) || (!canMMX2BeUsed)) #else if (!(flags&SWS_FAST_BILINEAR)) #endif { c->hScale(dst , dstWidth, src1, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize); c->hScale(dst+VOFW, dstWidth, src2, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize); } else { // fast bilinear upscale / crap downscale #if ARCH_X86 && CONFIG_GPL #if COMPILE_TEMPLATE_MMX2 int i; #if defined(PIC) DECLARE_ALIGNED(8, uint64_t, ebxsave); #endif if (canMMX2BeUsed) { __asm__ volatile( #if defined(PIC) \"mov %%\"REG_b\", %6 \\n\\t\" #endif \"pxor %%mm7, %%mm7 \\n\\t\" \"mov %0, %%\"REG_c\" \\n\\t\" \"mov %1, %%\"REG_D\" \\n\\t\" \"mov %2, %%\"REG_d\" \\n\\t\" \"mov %3, %%\"REG_b\" \\n\\t\" \"xor %%\"REG_a\", %%\"REG_a\" \\n\\t\" // i PREFETCH\" (%%\"REG_c\") \\n\\t\" PREFETCH\" 32(%%\"REG_c\") \\n\\t\" PREFETCH\" 64(%%\"REG_c\") \\n\\t\" CALL_MMX2_FILTER_CODE CALL_MMX2_FILTER_CODE CALL_MMX2_FILTER_CODE CALL_MMX2_FILTER_CODE \"xor %%\"REG_a\", %%\"REG_a\" \\n\\t\" // i \"mov %5, %%\"REG_c\" \\n\\t\" // src \"mov %1, %%\"REG_D\" \\n\\t\" // buf1 \"add $\"AV_STRINGIFY(VOF)\", %%\"REG_D\" \\n\\t\" PREFETCH\" (%%\"REG_c\") \\n\\t\" PREFETCH\" 32(%%\"REG_c\") \\n\\t\" PREFETCH\" 64(%%\"REG_c\") \\n\\t\" CALL_MMX2_FILTER_CODE CALL_MMX2_FILTER_CODE CALL_MMX2_FILTER_CODE CALL_MMX2_FILTER_CODE #if defined(PIC) \"mov %6, %%\"REG_b\" \\n\\t\" #endif :: \"m\" (src1), \"m\" (dst), \"m\" (mmx2Filter), \"m\" (mmx2FilterPos), \"m\" (mmx2FilterCode), \"m\" (src2) #if defined(PIC) ,\"m\" (ebxsave) #endif : \"%\"REG_a, \"%\"REG_c, \"%\"REG_d, \"%\"REG_S, \"%\"REG_D #if !defined(PIC) ,\"%\"REG_b #endif ); for (i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--) { //printf(\"%d %d %d\\n\", dstWidth, i, srcW); dst[i] = src1[srcW-1]*128; dst[i+VOFW] = src2[srcW-1]*128; } } else { #endif /* COMPILE_TEMPLATE_MMX2 */ x86_reg xInc_shr16 = (x86_reg) (xInc >> 16); uint16_t xInc_mask = xInc & 0xffff; __asm__ volatile( \"xor %%\"REG_a\", %%\"REG_a\" \\n\\t\" // i \"xor %%\"REG_d\", %%\"REG_d\" \\n\\t\" // xx \"xorl %%ecx, %%ecx \\n\\t\" // xalpha ASMALIGN(4) \"1: \\n\\t\" \"mov %0, %%\"REG_S\" \\n\\t\" \"movzbl (%%\"REG_S\", %%\"REG_d\"), %%edi \\n\\t\" //src[xx] \"movzbl 1(%%\"REG_S\", %%\"REG_d\"), %%esi \\n\\t\" //src[xx+1] FAST_BILINEAR_X86 \"movw %%si, (%%\"REG_D\", %%\"REG_a\", 2) \\n\\t\" \"movzbl (%5, %%\"REG_d\"), %%edi \\n\\t\" //src[xx] \"movzbl 1(%5, %%\"REG_d\"), %%esi \\n\\t\" //src[xx+1] FAST_BILINEAR_X86 \"movw %%si, \"AV_STRINGIFY(VOF)\"(%%\"REG_D\", %%\"REG_a\", 2) \\n\\t\" \"addw %4, %%cx \\n\\t\" //xalpha += xInc&0xFFFF \"adc %3, %%\"REG_d\" \\n\\t\" //xx+= xInc>>16 + carry \"add $1, %%\"REG_a\" \\n\\t\" \"cmp %2, %%\"REG_a\" \\n\\t\" \" jb 1b \\n\\t\" /* GCC 3.3 makes MPlayer crash on IA-32 machines when using \"g\" operand here, which is needed to support GCC 4.0. */ #if ARCH_X86_64 && ((__GNUC__ > 3) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) :: \"m\" (src1), \"m\" (dst), \"g\" (dstWidth), \"m\" (xInc_shr16), \"m\" (xInc_mask), #else :: \"m\" (src1), \"m\" (dst), \"m\" (dstWidth), \"m\" (xInc_shr16), \"m\" (xInc_mask), #endif \"r\" (src2) : \"%\"REG_a, \"%\"REG_d, \"%ecx\", \"%\"REG_D, \"%esi\" ); #if COMPILE_TEMPLATE_MMX2 } //if MMX2 can't be used #endif #else c->hcscale_fast(c, dst, dstWidth, src1, src2, srcW, xInc); #endif /* ARCH_X86 */ } if(c->srcRange != c->dstRange && !(isRGB(c->dstFormat) || isBGR(c->dstFormat))) { int i; //FIXME all pal and rgb srcFormats could do this convertion as well //FIXME all scalers more complex than bilinear could do half of this transform if(c->srcRange) { for (i=0; i>11; //1469 dst[i+VOFW]= (dst[i+VOFW]*1799 + 4081085)>>11; //1469 } } else { for (i=0; i>12; //-264 dst[i+VOFW]= (FFMIN(dst[i+VOFW],30775)*4663 - 9289992)>>12; //-264 } } } }"} {"target": 0, "idx": 10316, "func": "float32 HELPER(ucf64_abss)(float32 a) { return float32_abs(a); }"} {"target": 0, "idx": 10322, "func": "int qemu_thread_equal(QemuThread *thread1, QemuThread *thread2) { return pthread_equal(thread1->thread, thread2->thread); }"} {"target": 0, "idx": 10327, "func": "static void ivshmem_io_writel(void *opaque, target_phys_addr_t addr, uint32_t val) { IVShmemState *s = opaque; uint64_t write_one = 1; uint16_t dest = val >> 16; uint16_t vector = val & 0xff; addr &= 0xfc; IVSHMEM_DPRINTF(\"writing to addr \" TARGET_FMT_plx \"\\n\", addr); switch (addr) { case INTRMASK: ivshmem_IntrMask_write(s, val); break; case INTRSTATUS: ivshmem_IntrStatus_write(s, val); break; case DOORBELL: /* check that dest VM ID is reasonable */ if ((dest < 0) || (dest > s->max_peer)) { IVSHMEM_DPRINTF(\"Invalid destination VM ID (%d)\\n\", dest); break; } /* check doorbell range */ if ((vector >= 0) && (vector < s->peers[dest].nb_eventfds)) { IVSHMEM_DPRINTF(\"Writing %\" PRId64 \" to VM %d on vector %d\\n\", write_one, dest, vector); if (write(s->peers[dest].eventfds[vector], &(write_one), 8) != 8) { IVSHMEM_DPRINTF(\"error writing to eventfd\\n\"); } } break; default: IVSHMEM_DPRINTF(\"Invalid VM Doorbell VM %d\\n\", dest); } }"} {"target": 0, "idx": 10333, "func": "static inline void gen_outs(DisasContext *s, TCGMemOp ot) { if (use_icount) gen_io_start(); gen_string_movl_A0_ESI(s); gen_op_ld_v(s, ot, cpu_T[0], cpu_A0); tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_EDX]); tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff); tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[0]); gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32); gen_op_movl_T0_Dshift(ot); gen_op_add_reg_T0(s->aflag, R_ESI); if (use_icount) gen_io_end(); }"} {"target": 0, "idx": 10338, "func": "void test_fcmp(double a, double b) { long eflags, fpus; fpu_clear_exceptions(); asm(\"fcom %2\\n\" \"fstsw %%ax\\n\" : \"=a\" (fpus) : \"t\" (a), \"u\" (b)); printf(\"fcom(%f %f)=%04lx \\n\", a, b, fpus & (0x4500 | FPUS_EMASK)); fpu_clear_exceptions(); asm(\"fucom %2\\n\" \"fstsw %%ax\\n\" : \"=a\" (fpus) : \"t\" (a), \"u\" (b)); printf(\"fucom(%f %f)=%04lx\\n\", a, b, fpus & (0x4500 | FPUS_EMASK)); if (TEST_FCOMI) { /* test f(u)comi instruction */ fpu_clear_exceptions(); asm(\"fcomi %3, %2\\n\" \"fstsw %%ax\\n\" \"pushf\\n\" \"pop %0\\n\" : \"=r\" (eflags), \"=a\" (fpus) : \"t\" (a), \"u\" (b)); printf(\"fcomi(%f %f)=%04lx %02lx\\n\", a, b, fpus & FPUS_EMASK, eflags & (CC_Z | CC_P | CC_C)); fpu_clear_exceptions(); asm(\"fucomi %3, %2\\n\" \"fstsw %%ax\\n\" \"pushf\\n\" \"pop %0\\n\" : \"=r\" (eflags), \"=a\" (fpus) : \"t\" (a), \"u\" (b)); printf(\"fucomi(%f %f)=%04lx %02lx\\n\", a, b, fpus & FPUS_EMASK, eflags & (CC_Z | CC_P | CC_C)); } fpu_clear_exceptions(); asm volatile(\"fxam\\n\" \"fstsw %%ax\\n\" : \"=a\" (fpus) : \"t\" (a)); printf(\"fxam(%f)=%04lx\\n\", a, fpus & 0x4700); fpu_clear_exceptions(); }"} {"target": 1, "idx": 10351, "func": "void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, hwaddr paddr, MemTxAttrs attrs, int prot, int mmu_idx, target_ulong size) { CPUArchState *env = cpu->env_ptr; MemoryRegionSection *section; unsigned int index; target_ulong address; target_ulong code_address; uintptr_t addend; CPUTLBEntry *te; hwaddr iotlb, xlat, sz; unsigned vidx = env->vtlb_index++ % CPU_VTLB_SIZE; int asidx = cpu_asidx_from_attrs(cpu, attrs); assert_cpu_is_self(cpu); assert(size >= TARGET_PAGE_SIZE); if (size != TARGET_PAGE_SIZE) { tlb_add_large_page(env, vaddr, size); } sz = size; section = address_space_translate_for_iotlb(cpu, asidx, paddr, &xlat, &sz); assert(sz >= TARGET_PAGE_SIZE); tlb_debug(\"vaddr=\" TARGET_FMT_lx \" paddr=0x\" TARGET_FMT_plx \" prot=%x idx=%d\\n\", vaddr, paddr, prot, mmu_idx); address = vaddr; if (!memory_region_is_ram(section->mr) && !memory_region_is_romd(section->mr)) { /* IO memory case */ address |= TLB_MMIO; addend = 0; } else { /* TLB_MMIO for rom/romd handled below */ addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat; } code_address = address; iotlb = memory_region_section_get_iotlb(cpu, section, vaddr, paddr, xlat, prot, &address); index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); te = &env->tlb_table[mmu_idx][index]; /* do not discard the translation in te, evict it into a victim tlb */ env->tlb_v_table[mmu_idx][vidx] = *te; env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index]; /* refill the tlb */ env->iotlb[mmu_idx][index].addr = iotlb - vaddr; env->iotlb[mmu_idx][index].attrs = attrs; te->addend = addend - vaddr; if (prot & PAGE_READ) { te->addr_read = address; } else { te->addr_read = -1; } if (prot & PAGE_EXEC) { te->addr_code = code_address; } else { te->addr_code = -1; } if (prot & PAGE_WRITE) { if ((memory_region_is_ram(section->mr) && section->readonly) || memory_region_is_romd(section->mr)) { /* Write access calls the I/O callback. */ te->addr_write = address | TLB_MMIO; } else if (memory_region_is_ram(section->mr) && cpu_physical_memory_is_clean( memory_region_get_ram_addr(section->mr) + xlat)) { te->addr_write = address | TLB_NOTDIRTY; } else { te->addr_write = address; } } else { te->addr_write = -1; } }"} {"target": 1, "idx": 10355, "func": "static int qcrypto_cipher_init_des_rfb(QCryptoCipher *cipher, const uint8_t *key, size_t nkey, Error **errp) { QCryptoCipherBuiltin *ctxt; if (cipher->mode != QCRYPTO_CIPHER_MODE_ECB) { error_setg(errp, \"Unsupported cipher mode %d\", cipher->mode); return -1; } ctxt = g_new0(QCryptoCipherBuiltin, 1); ctxt->state.desrfb.key = g_new0(uint8_t, nkey); memcpy(ctxt->state.desrfb.key, key, nkey); ctxt->state.desrfb.nkey = nkey; ctxt->free = qcrypto_cipher_free_des_rfb; ctxt->setiv = qcrypto_cipher_setiv_des_rfb; ctxt->encrypt = qcrypto_cipher_encrypt_des_rfb; ctxt->decrypt = qcrypto_cipher_decrypt_des_rfb; cipher->opaque = ctxt; return 0; }"} {"target": 0, "idx": 10364, "func": "static int virtio_ccw_handle_set_vq(SubchDev *sch, CCW1 ccw, bool check_len, bool is_legacy) { int ret; VqInfoBlock info; VqInfoBlockLegacy linfo; size_t info_len = is_legacy ? sizeof(linfo) : sizeof(info); if (check_len) { if (ccw.count != info_len) { return -EINVAL; } } else if (ccw.count < info_len) { /* Can't execute command. */ return -EINVAL; } if (!ccw.cda) { return -EFAULT; } if (is_legacy) { linfo.queue = address_space_ldq_be(&address_space_memory, ccw.cda, MEMTXATTRS_UNSPECIFIED, NULL); linfo.align = address_space_ldl_be(&address_space_memory, ccw.cda + sizeof(linfo.queue), MEMTXATTRS_UNSPECIFIED, NULL); linfo.index = address_space_lduw_be(&address_space_memory, ccw.cda + sizeof(linfo.queue) + sizeof(linfo.align), MEMTXATTRS_UNSPECIFIED, NULL); linfo.num = address_space_lduw_be(&address_space_memory, ccw.cda + sizeof(linfo.queue) + sizeof(linfo.align) + sizeof(linfo.index), MEMTXATTRS_UNSPECIFIED, NULL); ret = virtio_ccw_set_vqs(sch, NULL, &linfo); } else { info.desc = address_space_ldq_be(&address_space_memory, ccw.cda, MEMTXATTRS_UNSPECIFIED, NULL); info.index = address_space_lduw_be(&address_space_memory, ccw.cda + sizeof(info.desc) + sizeof(info.res0), MEMTXATTRS_UNSPECIFIED, NULL); info.num = address_space_lduw_be(&address_space_memory, ccw.cda + sizeof(info.desc) + sizeof(info.res0) + sizeof(info.index), MEMTXATTRS_UNSPECIFIED, NULL); info.avail = address_space_ldq_be(&address_space_memory, ccw.cda + sizeof(info.desc) + sizeof(info.res0) + sizeof(info.index) + sizeof(info.num), MEMTXATTRS_UNSPECIFIED, NULL); info.used = address_space_ldq_be(&address_space_memory, ccw.cda + sizeof(info.desc) + sizeof(info.res0) + sizeof(info.index) + sizeof(info.num) + sizeof(info.avail), MEMTXATTRS_UNSPECIFIED, NULL); ret = virtio_ccw_set_vqs(sch, &info, NULL); } sch->curr_status.scsw.count = 0; return ret; }"} {"target": 0, "idx": 10370, "func": "static void vnc_client_cache_addr(VncState *client) { Error *err = NULL; client->info = g_malloc0(sizeof(*client->info)); client->info->base = g_malloc0(sizeof(*client->info->base)); vnc_init_basic_info_from_remote_addr(client->csock, client->info->base, &err); if (err) { qapi_free_VncClientInfo(client->info); client->info = NULL; error_free(err); } }"} {"target": 0, "idx": 10374, "func": "int kvm_irqchip_add_irqfd(KVMState *s, int fd, int virq) { return kvm_irqchip_assign_irqfd(s, fd, virq, true); }"} {"target": 0, "idx": 10385, "func": "static gboolean udp_chr_read(GIOChannel *chan, GIOCondition cond, void *opaque) { CharDriverState *chr = opaque; NetCharDriver *s = chr->opaque; gsize bytes_read = 0; GIOStatus status; if (s->max_size == 0) return FALSE; status = g_io_channel_read_chars(s->chan, (gchar *)s->buf, sizeof(s->buf), &bytes_read, NULL); s->bufcnt = bytes_read; s->bufptr = s->bufcnt; if (status != G_IO_STATUS_NORMAL) { return FALSE; } s->bufptr = 0; while (s->max_size > 0 && s->bufptr < s->bufcnt) { qemu_chr_be_write(chr, &s->buf[s->bufptr], 1); s->bufptr++; s->max_size = qemu_chr_be_can_write(chr); } return TRUE; }"} {"target": 1, "idx": 10406, "func": "PCIBus *pci_prep_init(qemu_irq *pic) { PREPPCIState *s; PCIDevice *d; int PPC_io_memory; s = qemu_mallocz(sizeof(PREPPCIState)); s->bus = pci_register_bus(prep_set_irq, prep_map_irq, pic, 0, 2); register_ioport_write(0xcf8, 4, 4, pci_prep_addr_writel, s); register_ioport_read(0xcf8, 4, 4, pci_prep_addr_readl, s); register_ioport_write(0xcfc, 4, 1, pci_host_data_writeb, s); register_ioport_write(0xcfc, 4, 2, pci_host_data_writew, s); register_ioport_write(0xcfc, 4, 4, pci_host_data_writel, s); register_ioport_read(0xcfc, 4, 1, pci_host_data_readb, s); register_ioport_read(0xcfc, 4, 2, pci_host_data_readw, s); register_ioport_read(0xcfc, 4, 4, pci_host_data_readl, s); PPC_io_memory = cpu_register_io_memory(0, PPC_PCIIO_read, PPC_PCIIO_write, s); cpu_register_physical_memory(0x80800000, 0x00400000, PPC_io_memory); /* PCI host bridge */ d = pci_register_device(s->bus, \"PREP Host Bridge - Motorola Raven\", sizeof(PCIDevice), 0, NULL, NULL); d->config[0x00] = 0x57; // vendor_id : Motorola d->config[0x01] = 0x10; d->config[0x02] = 0x01; // device_id : Raven d->config[0x03] = 0x48; d->config[0x08] = 0x00; // revision d->config[0x0A] = 0x00; // class_sub = pci host d->config[0x0B] = 0x06; // class_base = PCI_bridge d->config[0x0C] = 0x08; // cache_line_size d->config[0x0D] = 0x10; // latency_timer d->config[0x0E] = 0x00; // header_type d->config[0x34] = 0x00; // capabilities_pointer return s->bus; }"} {"target": 0, "idx": 10410, "func": "static int raw_init_encoder(AVCodecContext *avctx) { avctx->coded_frame = (AVFrame *)avctx->priv_data; avctx->coded_frame->pict_type = FF_I_TYPE; avctx->coded_frame->key_frame = 1; avctx->codec_tag = findFourCC(avctx->pix_fmt); return 0; }"} {"target": 1, "idx": 10414, "func": "static int decode_sequence_header_adv(VC1Context *v, GetBitContext *gb) { v->res_rtm_flag = 1; v->level = get_bits(gb, 3); if(v->level >= 5) { av_log(v->s.avctx, AV_LOG_ERROR, \"Reserved LEVEL %i\\n\",v->level); } v->chromaformat = get_bits(gb, 2); if (v->chromaformat != 1) { av_log(v->s.avctx, AV_LOG_ERROR, \"Only 4:2:0 chroma format supported\\n\"); return -1; } // (fps-2)/4 (->30) v->frmrtq_postproc = get_bits(gb, 3); //common // (bitrate-32kbps)/64kbps v->bitrtq_postproc = get_bits(gb, 5); //common v->postprocflag = get_bits(gb, 1); //common v->s.avctx->coded_width = (get_bits(gb, 12) + 1) << 1; v->s.avctx->coded_height = (get_bits(gb, 12) + 1) << 1; v->broadcast = get_bits1(gb); v->interlace = get_bits1(gb); if(v->interlace){ av_log(v->s.avctx, AV_LOG_ERROR, \"Interlaced mode not supported (yet)\\n\"); return -1; } v->tfcntrflag = get_bits1(gb); v->finterpflag = get_bits1(gb); get_bits1(gb); // reserved v->psf = get_bits1(gb); if(v->psf) { //PsF, 6.1.13 av_log(v->s.avctx, AV_LOG_ERROR, \"Progressive Segmented Frame mode: not supported (yet)\\n\"); return -1; } if(get_bits1(gb)) { //Display Info - decoding is not affected by it int w, h, ar = 0; av_log(v->s.avctx, AV_LOG_INFO, \"Display extended info:\\n\"); w = get_bits(gb, 14); h = get_bits(gb, 14); av_log(v->s.avctx, AV_LOG_INFO, \"Display dimensions: %ix%i\\n\", w, h); //TODO: store aspect ratio in AVCodecContext if(get_bits1(gb)) ar = get_bits(gb, 4); if(ar == 15) { w = get_bits(gb, 8); h = get_bits(gb, 8); } if(get_bits1(gb)){ //framerate stuff if(get_bits1(gb)) { get_bits(gb, 16); } else { get_bits(gb, 8); get_bits(gb, 4); } } if(get_bits1(gb)){ v->color_prim = get_bits(gb, 8); v->transfer_char = get_bits(gb, 8); v->matrix_coef = get_bits(gb, 8); } } v->hrd_param_flag = get_bits1(gb); if(v->hrd_param_flag) { int i; v->hrd_num_leaky_buckets = get_bits(gb, 5); get_bits(gb, 4); //bitrate exponent get_bits(gb, 4); //buffer size exponent for(i = 0; i < v->hrd_num_leaky_buckets; i++) { get_bits(gb, 16); //hrd_rate[n] get_bits(gb, 16); //hrd_buffer[n] } } return 0; }"} {"target": 1, "idx": 10416, "func": "int page_unprotect(target_ulong address, uintptr_t pc, void *puc) { unsigned int prot; PageDesc *p; target_ulong host_start, host_end, addr; /* Technically this isn't safe inside a signal handler. However we know this only ever happens in a synchronous SEGV handler, so in practice it seems to be ok. */ mmap_lock(); p = page_find(address >> TARGET_PAGE_BITS); if (!p) { mmap_unlock(); return 0; } /* if the page was really writable, then we change its protection back to writable */ if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) { host_start = address & qemu_host_page_mask; host_end = host_start + qemu_host_page_size; prot = 0; for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) { p = page_find(addr >> TARGET_PAGE_BITS); p->flags |= PAGE_WRITE; prot |= p->flags; /* and since the content will be modified, we must invalidate the corresponding translated code. */ tb_invalidate_phys_page(addr, pc, puc); #ifdef DEBUG_TB_CHECK tb_invalidate_check(addr); #endif } mprotect((void *)g2h(host_start), qemu_host_page_size, prot & PAGE_BITS); mmap_unlock(); return 1; } mmap_unlock(); return 0; }"} {"target": 0, "idx": 10429, "func": "void ff_put_h264_qpel8_mc23_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride) { avc_luma_midv_qrt_8w_msa(src - (2 * stride) - 2, stride, dst, stride, 8, 1); }"} {"target": 0, "idx": 10449, "func": "static int film_probe(AVProbeData *p) { if (p->buf_size < 4) return 0; if (AV_RB32(&p->buf[0]) != FILM_TAG) return 0; return AVPROBE_SCORE_MAX; }"} {"target": 0, "idx": 10478, "func": "static void gen_store_v10_conditional(DisasContext *dc, TCGv addr, TCGv val, unsigned int size, int mem_index) { int l1 = gen_new_label(); TCGv taddr = tcg_temp_local_new(); TCGv tval = tcg_temp_local_new(); TCGv t1 = tcg_temp_local_new(); dc->postinc = 0; cris_evaluate_flags(dc); tcg_gen_mov_tl(taddr, addr); tcg_gen_mov_tl(tval, val); /* Store only if F flag isn't set */ tcg_gen_andi_tl(t1, cpu_PR[PR_CCS], F_FLAG_V10); tcg_gen_brcondi_tl(TCG_COND_NE, t1, 0, l1); if (size == 1) { tcg_gen_qemu_st8(tval, taddr, mem_index); } else if (size == 2) { tcg_gen_qemu_st16(tval, taddr, mem_index); } else { tcg_gen_qemu_st32(tval, taddr, mem_index); } gen_set_label(l1); tcg_gen_shri_tl(t1, t1, 1); /* shift F to P position */ tcg_gen_or_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], t1); /*P=F*/ tcg_temp_free(t1); tcg_temp_free(tval); tcg_temp_free(taddr); }"} {"target": 0, "idx": 10480, "func": "static void ide_trim_bh_cb(void *opaque) { TrimAIOCB *iocb = opaque; iocb->common.cb(iocb->common.opaque, iocb->ret); qemu_bh_delete(iocb->bh); iocb->bh = NULL; qemu_aio_unref(iocb); }"} {"target": 0, "idx": 10485, "func": "static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd, abi_long cmd, abi_long arg) { void *argptr; struct dm_ioctl *host_dm; abi_long guest_data; uint32_t guest_data_size; int target_size; const argtype *arg_type = ie->arg_type; abi_long ret; void *big_buf = NULL; char *host_data; arg_type++; target_size = thunk_type_size(arg_type, 0); argptr = lock_user(VERIFY_READ, arg, target_size, 1); if (!argptr) { ret = -TARGET_EFAULT; goto out; } thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); unlock_user(argptr, arg, 0); /* buf_temp is too small, so fetch things into a bigger buffer */ big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2); memcpy(big_buf, buf_temp, target_size); buf_temp = big_buf; host_dm = big_buf; guest_data = arg + host_dm->data_start; if ((guest_data - arg) < 0) { ret = -EINVAL; goto out; } guest_data_size = host_dm->data_size - host_dm->data_start; host_data = (char*)host_dm + host_dm->data_start; argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1); switch (ie->host_cmd) { case DM_REMOVE_ALL: case DM_LIST_DEVICES: case DM_DEV_CREATE: case DM_DEV_REMOVE: case DM_DEV_SUSPEND: case DM_DEV_STATUS: case DM_DEV_WAIT: case DM_TABLE_STATUS: case DM_TABLE_CLEAR: case DM_TABLE_DEPS: case DM_LIST_VERSIONS: /* no input data */ break; case DM_DEV_RENAME: case DM_DEV_SET_GEOMETRY: /* data contains only strings */ memcpy(host_data, argptr, guest_data_size); break; case DM_TARGET_MSG: memcpy(host_data, argptr, guest_data_size); *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr); break; case DM_TABLE_LOAD: { void *gspec = argptr; void *cur_data = host_data; const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) }; int spec_size = thunk_type_size(arg_type, 0); int i; for (i = 0; i < host_dm->target_count; i++) { struct dm_target_spec *spec = cur_data; uint32_t next; int slen; thunk_convert(spec, gspec, arg_type, THUNK_HOST); slen = strlen((char*)gspec + spec_size) + 1; next = spec->next; spec->next = sizeof(*spec) + slen; strcpy((char*)&spec[1], gspec + spec_size); gspec += next; cur_data += spec->next; } break; } default: ret = -TARGET_EINVAL; goto out; } unlock_user(argptr, guest_data, 0); ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); if (!is_error(ret)) { guest_data = arg + host_dm->data_start; guest_data_size = host_dm->data_size - host_dm->data_start; argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0); switch (ie->host_cmd) { case DM_REMOVE_ALL: case DM_DEV_CREATE: case DM_DEV_REMOVE: case DM_DEV_RENAME: case DM_DEV_SUSPEND: case DM_DEV_STATUS: case DM_TABLE_LOAD: case DM_TABLE_CLEAR: case DM_TARGET_MSG: case DM_DEV_SET_GEOMETRY: /* no return data */ break; case DM_LIST_DEVICES: { struct dm_name_list *nl = (void*)host_dm + host_dm->data_start; uint32_t remaining_data = guest_data_size; void *cur_data = argptr; const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) }; int nl_size = 12; /* can't use thunk_size due to alignment */ while (1) { uint32_t next = nl->next; if (next) { nl->next = nl_size + (strlen(nl->name) + 1); } if (remaining_data < nl->next) { host_dm->flags |= DM_BUFFER_FULL_FLAG; break; } thunk_convert(cur_data, nl, arg_type, THUNK_TARGET); strcpy(cur_data + nl_size, nl->name); cur_data += nl->next; remaining_data -= nl->next; if (!next) { break; } nl = (void*)nl + next; } break; } case DM_DEV_WAIT: case DM_TABLE_STATUS: { struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start; void *cur_data = argptr; const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) }; int spec_size = thunk_type_size(arg_type, 0); int i; for (i = 0; i < host_dm->target_count; i++) { uint32_t next = spec->next; int slen = strlen((char*)&spec[1]) + 1; spec->next = (cur_data - argptr) + spec_size + slen; if (guest_data_size < spec->next) { host_dm->flags |= DM_BUFFER_FULL_FLAG; break; } thunk_convert(cur_data, spec, arg_type, THUNK_TARGET); strcpy(cur_data + spec_size, (char*)&spec[1]); cur_data = argptr + spec->next; spec = (void*)host_dm + host_dm->data_start + next; } break; } case DM_TABLE_DEPS: { void *hdata = (void*)host_dm + host_dm->data_start; int count = *(uint32_t*)hdata; uint64_t *hdev = hdata + 8; uint64_t *gdev = argptr + 8; int i; *(uint32_t*)argptr = tswap32(count); for (i = 0; i < count; i++) { *gdev = tswap64(*hdev); gdev++; hdev++; } break; } case DM_LIST_VERSIONS: { struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start; uint32_t remaining_data = guest_data_size; void *cur_data = argptr; const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) }; int vers_size = thunk_type_size(arg_type, 0); while (1) { uint32_t next = vers->next; if (next) { vers->next = vers_size + (strlen(vers->name) + 1); } if (remaining_data < vers->next) { host_dm->flags |= DM_BUFFER_FULL_FLAG; break; } thunk_convert(cur_data, vers, arg_type, THUNK_TARGET); strcpy(cur_data + vers_size, vers->name); cur_data += vers->next; remaining_data -= vers->next; if (!next) { break; } vers = (void*)vers + next; } break; } default: ret = -TARGET_EINVAL; goto out; } unlock_user(argptr, guest_data, guest_data_size); argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); if (!argptr) { ret = -TARGET_EFAULT; goto out; } thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); unlock_user(argptr, arg, target_size); } out: if (big_buf) { free(big_buf); } return ret; }"} {"target": 0, "idx": 10498, "func": "static int dxva2_retrieve_data(AVCodecContext *s, AVFrame *frame) { InputStream *ist = s->opaque; DXVA2Context *ctx = ist->hwaccel_ctx; int ret; ret = av_hwframe_transfer_data(ctx->tmp_frame, frame, 0); if (ret < 0) return ret; ret = av_frame_copy_props(ctx->tmp_frame, frame); if (ret < 0) { av_frame_unref(ctx->tmp_frame); return ret; } av_frame_unref(frame); av_frame_move_ref(frame, ctx->tmp_frame); return 0; }"} {"target": 1, "idx": 10502, "func": "void decode_mb_mode(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y, uint8_t *segment, uint8_t *ref, int layout) { VP56RangeCoder *c = &s->c; if (s->segmentation.update_map) *segment = vp8_rac_get_tree(c, vp8_segmentid_tree, s->prob->segmentid); else if (s->segmentation.enabled) *segment = ref ? *ref : *segment; mb->segment = *segment; mb->skip = s->mbskip_enabled ? vp56_rac_get_prob(c, s->prob->mbskip) : 0; if (s->keyframe) { mb->mode = vp8_rac_get_tree(c, vp8_pred16x16_tree_intra, vp8_pred16x16_prob_intra); if (mb->mode == MODE_I4x4) { decode_intra4x4_modes(s, c, mb, mb_x, 1, layout); } else { const uint32_t modes = vp8_pred4x4_mode[mb->mode] * 0x01010101u; if (s->mb_layout == 1) AV_WN32A(mb->intra4x4_pred_mode_top, modes); else AV_WN32A(s->intra4x4_pred_mode_top + 4 * mb_x, modes); AV_WN32A(s->intra4x4_pred_mode_left, modes); } mb->chroma_pred_mode = vp8_rac_get_tree(c, vp8_pred8x8c_tree, vp8_pred8x8c_prob_intra); mb->ref_frame = VP56_FRAME_CURRENT; } else if (vp56_rac_get_prob_branchy(c, s->prob->intra)) { // inter MB, 16.2 if (vp56_rac_get_prob_branchy(c, s->prob->last)) mb->ref_frame = vp56_rac_get_prob(c, s->prob->golden) ? VP56_FRAME_GOLDEN2 /* altref */ : VP56_FRAME_GOLDEN; else mb->ref_frame = VP56_FRAME_PREVIOUS; s->ref_count[mb->ref_frame - 1]++; // motion vectors, 16.3 decode_mvs(s, mb, mb_x, mb_y, layout); } else { // intra MB, 16.1 mb->mode = vp8_rac_get_tree(c, vp8_pred16x16_tree_inter, s->prob->pred16x16); if (mb->mode == MODE_I4x4) decode_intra4x4_modes(s, c, mb, mb_x, 0, layout); mb->chroma_pred_mode = vp8_rac_get_tree(c, vp8_pred8x8c_tree, s->prob->pred8x8c); mb->ref_frame = VP56_FRAME_CURRENT; mb->partitioning = VP8_SPLITMVMODE_NONE; AV_ZERO32(&mb->bmv[0]); } }"} {"target": 1, "idx": 10504, "func": "static int decode_micromips_opc (CPUMIPSState *env, DisasContext *ctx, int *is_branch) { uint32_t op; /* make sure instructions are on a halfword boundary */ if (ctx->pc & 0x1) { env->CP0_BadVAddr = ctx->pc; generate_exception(ctx, EXCP_AdEL); ctx->bstate = BS_STOP; return 2; } op = (ctx->opcode >> 10) & 0x3f; /* Enforce properly-sized instructions in a delay slot */ if (ctx->hflags & MIPS_HFLAG_BMASK) { int bits = ctx->hflags & MIPS_HFLAG_BMASK_EXT; switch (op) { case POOL32A: case POOL32B: case POOL32I: case POOL32C: case ADDI32: case ADDIU32: case ORI32: case XORI32: case SLTI32: case SLTIU32: case ANDI32: case JALX32: case LBU32: case LHU32: case POOL32F: case JALS32: case BEQ32: case BNE32: case J32: case JAL32: case SB32: case SH32: case POOL32S: case ADDIUPC: case SWC132: case SDC132: case SD32: case SW32: case LB32: case LH32: case DADDIU32: case LWC132: case LDC132: case LD32: case LW32: if (bits & MIPS_HFLAG_BDS16) { generate_exception(ctx, EXCP_RI); /* Just stop translation; the user is confused. */ ctx->bstate = BS_STOP; return 2; } break; case POOL16A: case POOL16B: case POOL16C: case LWGP16: case POOL16F: case LBU16: case LHU16: case LWSP16: case LW16: case SB16: case SH16: case SWSP16: case SW16: case MOVE16: case ANDI16: case POOL16D: case POOL16E: case BEQZ16: case BNEZ16: case B16: case LI16: if (bits & MIPS_HFLAG_BDS32) { generate_exception(ctx, EXCP_RI); /* Just stop translation; the user is confused. */ ctx->bstate = BS_STOP; return 2; } break; default: break; } } switch (op) { case POOL16A: { int rd = mmreg(uMIPS_RD(ctx->opcode)); int rs1 = mmreg(uMIPS_RS1(ctx->opcode)); int rs2 = mmreg(uMIPS_RS2(ctx->opcode)); uint32_t opc = 0; switch (ctx->opcode & 0x1) { case ADDU16: opc = OPC_ADDU; break; case SUBU16: opc = OPC_SUBU; break; } gen_arith(ctx, opc, rd, rs1, rs2); } break; case POOL16B: { int rd = mmreg(uMIPS_RD(ctx->opcode)); int rs = mmreg(uMIPS_RS(ctx->opcode)); int amount = (ctx->opcode >> 1) & 0x7; uint32_t opc = 0; amount = amount == 0 ? 8 : amount; switch (ctx->opcode & 0x1) { case SLL16: opc = OPC_SLL; break; case SRL16: opc = OPC_SRL; break; } gen_shift_imm(ctx, opc, rd, rs, amount); } break; case POOL16C: gen_pool16c_insn(ctx, is_branch); break; case LWGP16: { int rd = mmreg(uMIPS_RD(ctx->opcode)); int rb = 28; /* GP */ int16_t offset = SIMM(ctx->opcode, 0, 7) << 2; gen_ld(ctx, OPC_LW, rd, rb, offset); } break; case POOL16F: if (ctx->opcode & 1) { generate_exception(ctx, EXCP_RI); } else { /* MOVEP */ int enc_dest = uMIPS_RD(ctx->opcode); int enc_rt = uMIPS_RS2(ctx->opcode); int enc_rs = uMIPS_RS1(ctx->opcode); int rd, rs, re, rt; static const int rd_enc[] = { 5, 5, 6, 4, 4, 4, 4, 4 }; static const int re_enc[] = { 6, 7, 7, 21, 22, 5, 6, 7 }; static const int rs_rt_enc[] = { 0, 17, 2, 3, 16, 18, 19, 20 }; rd = rd_enc[enc_dest]; re = re_enc[enc_dest]; rs = rs_rt_enc[enc_rs]; rt = rs_rt_enc[enc_rt]; gen_arith_imm(ctx, OPC_ADDIU, rd, rs, 0); gen_arith_imm(ctx, OPC_ADDIU, re, rt, 0); } break; case LBU16: { int rd = mmreg(uMIPS_RD(ctx->opcode)); int rb = mmreg(uMIPS_RS(ctx->opcode)); int16_t offset = ZIMM(ctx->opcode, 0, 4); offset = (offset == 0xf ? -1 : offset); gen_ld(ctx, OPC_LBU, rd, rb, offset); } break; case LHU16: { int rd = mmreg(uMIPS_RD(ctx->opcode)); int rb = mmreg(uMIPS_RS(ctx->opcode)); int16_t offset = ZIMM(ctx->opcode, 0, 4) << 1; gen_ld(ctx, OPC_LHU, rd, rb, offset); } break; case LWSP16: { int rd = (ctx->opcode >> 5) & 0x1f; int rb = 29; /* SP */ int16_t offset = ZIMM(ctx->opcode, 0, 5) << 2; gen_ld(ctx, OPC_LW, rd, rb, offset); } break; case LW16: { int rd = mmreg(uMIPS_RD(ctx->opcode)); int rb = mmreg(uMIPS_RS(ctx->opcode)); int16_t offset = ZIMM(ctx->opcode, 0, 4) << 2; gen_ld(ctx, OPC_LW, rd, rb, offset); } break; case SB16: { int rd = mmreg2(uMIPS_RD(ctx->opcode)); int rb = mmreg(uMIPS_RS(ctx->opcode)); int16_t offset = ZIMM(ctx->opcode, 0, 4); gen_st(ctx, OPC_SB, rd, rb, offset); } break; case SH16: { int rd = mmreg2(uMIPS_RD(ctx->opcode)); int rb = mmreg(uMIPS_RS(ctx->opcode)); int16_t offset = ZIMM(ctx->opcode, 0, 4) << 1; gen_st(ctx, OPC_SH, rd, rb, offset); } break; case SWSP16: { int rd = (ctx->opcode >> 5) & 0x1f; int rb = 29; /* SP */ int16_t offset = ZIMM(ctx->opcode, 0, 5) << 2; gen_st(ctx, OPC_SW, rd, rb, offset); } break; case SW16: { int rd = mmreg2(uMIPS_RD(ctx->opcode)); int rb = mmreg(uMIPS_RS(ctx->opcode)); int16_t offset = ZIMM(ctx->opcode, 0, 4) << 2; gen_st(ctx, OPC_SW, rd, rb, offset); } break; case MOVE16: { int rd = uMIPS_RD5(ctx->opcode); int rs = uMIPS_RS5(ctx->opcode); gen_arith_imm(ctx, OPC_ADDIU, rd, rs, 0); } break; case ANDI16: gen_andi16(ctx); break; case POOL16D: switch (ctx->opcode & 0x1) { case ADDIUS5: gen_addius5(ctx); break; case ADDIUSP: gen_addiusp(ctx); break; } break; case POOL16E: switch (ctx->opcode & 0x1) { case ADDIUR2: gen_addiur2(ctx); break; case ADDIUR1SP: gen_addiur1sp(ctx); break; } break; case B16: gen_compute_branch(ctx, OPC_BEQ, 2, 0, 0, SIMM(ctx->opcode, 0, 10) << 1); *is_branch = 1; break; case BNEZ16: case BEQZ16: gen_compute_branch(ctx, op == BNEZ16 ? OPC_BNE : OPC_BEQ, 2, mmreg(uMIPS_RD(ctx->opcode)), 0, SIMM(ctx->opcode, 0, 7) << 1); *is_branch = 1; break; case LI16: { int reg = mmreg(uMIPS_RD(ctx->opcode)); int imm = ZIMM(ctx->opcode, 0, 7); imm = (imm == 0x7f ? -1 : imm); tcg_gen_movi_tl(cpu_gpr[reg], imm); } break; case RES_20: case RES_28: case RES_29: case RES_30: case RES_31: case RES_38: case RES_39: generate_exception(ctx, EXCP_RI); break; default: decode_micromips32_opc (env, ctx, op, is_branch); return 4; } return 2; }"} {"target": 0, "idx": 10513, "func": "static void decouple_info(COOKContext *q, COOKSubpacket *p, int *decouple_tab) { int i; int vlc = get_bits1(&q->gb); int start = cplband[p->js_subband_start]; int end = cplband[p->subbands - 1]; int length = end - start + 1; if (start > end) return; if (vlc) for (i = 0; i < length; i++) decouple_tab[start + i] = get_vlc2(&q->gb, p->ccpl.table, p->ccpl.bits, 2); else for (i = 0; i < length; i++) decouple_tab[start + i] = get_bits(&q->gb, p->js_vlc_bits); }"} {"target": 1, "idx": 10520, "func": "static int prepare_packet(AVPacket *pkt,const FailingMuxerPacketData *pkt_data, int64_t pts) { int ret; FailingMuxerPacketData *data = av_malloc(sizeof(*data)); memcpy(data, pkt_data, sizeof(FailingMuxerPacketData)); ret = av_packet_from_data(pkt, (uint8_t*) data, sizeof(*data)); pkt->pts = pkt->dts = pts; pkt->duration = 1; return ret;"} {"target": 0, "idx": 10539, "func": "static void find_best_state(uint8_t best_state[256][256], const uint8_t one_state[256]){ int i,j,k,m; double l2tab[256]; for(i=1; i<256; i++) l2tab[i]= log2(i/256.0); for(i=0; i<256; i++){ double best_len[256]; double p= i/256.0; for(j=0; j<256; j++) best_len[j]= 1<<30; for(j=FFMAX(i-10,1); jsps->log2_min_pu_size; int x_pu = x >> log2_min_pu_size; int y_pu = y >> log2_min_pu_size; if (x < 0 || x_pu >= s->sps->min_pu_width || y < 0 || y_pu >= s->sps->min_pu_height) return 2; return s->is_pcm[y_pu * s->sps->min_pu_width + x_pu]; }"} {"target": 0, "idx": 10554, "func": "static int h264_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; H264Context *h = avctx->priv_data; AVFrame *pict = data; int buf_index = 0; int ret; const uint8_t *new_extradata; int new_extradata_size; h->flags = avctx->flags; h->setup_finished = 0; /* end of stream, output what is still in the buffers */ out: if (buf_size == 0) { H264Picture *out; int i, out_idx; h->cur_pic_ptr = NULL; // FIXME factorize this with the output code below out = h->delayed_pic[0]; out_idx = 0; for (i = 1; h->delayed_pic[i] && !h->delayed_pic[i]->f->key_frame && !h->delayed_pic[i]->mmco_reset; i++) if (h->delayed_pic[i]->poc < out->poc) { out = h->delayed_pic[i]; out_idx = i; } for (i = out_idx; h->delayed_pic[i]; i++) h->delayed_pic[i] = h->delayed_pic[i + 1]; if (out) { ret = output_frame(h, pict, out->f); if (ret < 0) return ret; *got_frame = 1; } return buf_index; } new_extradata_size = 0; new_extradata = av_packet_get_side_data(avpkt, AV_PKT_DATA_NEW_EXTRADATA, &new_extradata_size); if (new_extradata_size > 0 && new_extradata) { ret = ff_h264_decode_extradata(new_extradata, new_extradata_size, &h->ps, &h->is_avc, &h->nal_length_size, avctx->err_recognition, avctx); if (ret < 0) return ret; } buf_index = decode_nal_units(h, buf, buf_size); if (buf_index < 0) return AVERROR_INVALIDDATA; if (!h->cur_pic_ptr && h->nal_unit_type == NAL_END_SEQUENCE) { buf_size = 0; goto out; } if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS) && !h->cur_pic_ptr) { if (avctx->skip_frame >= AVDISCARD_NONREF) return 0; av_log(avctx, AV_LOG_ERROR, \"no frame!\\n\"); return AVERROR_INVALIDDATA; } if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS) || (h->mb_y >= h->mb_height && h->mb_height)) { if (avctx->flags2 & AV_CODEC_FLAG2_CHUNKS) decode_postinit(h, 1); ff_h264_field_end(h, &h->slice_ctx[0], 0); *got_frame = 0; if (h->next_output_pic && ((avctx->flags & AV_CODEC_FLAG_OUTPUT_CORRUPT) || h->next_output_pic->recovered)) { if (!h->next_output_pic->recovered) h->next_output_pic->f->flags |= AV_FRAME_FLAG_CORRUPT; ret = output_frame(h, pict, h->next_output_pic->f); if (ret < 0) return ret; *got_frame = 1; } } assert(pict->buf[0] || !*got_frame); return get_consumed_bytes(buf_index, buf_size); }"} {"target": 0, "idx": 10556, "func": "static inline int16_t calc_lowcomp(int16_t a, int16_t b0, int16_t b1, uint8_t bin) { if (bin < 7) { if ((b0 + 256) == b1) a = 384; else if (b0 > b1) a = FFMAX(0, a - 64); } else if (bin < 20) { if ((b0 + 256) == b1) a = 320; else if (b0 > b1) a = FFMAX(0, a - 64); } else { a = FFMAX(0, a - 128); } return a; }"} {"target": 0, "idx": 10564, "func": "static inline void RENAME(yuy2toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst, long width, long height, long lumStride, long chromStride, long srcStride) { long y; const x86_reg chromWidth= width>>1; for (y=0; ycpu_model == NULL) { machine->cpu_model = \"e500v2_v30\"; } irqs = g_malloc0(smp_cpus * sizeof(qemu_irq *)); irqs[0] = g_malloc0(smp_cpus * sizeof(qemu_irq) * OPENPIC_OUTPUT_NB); for (i = 0; i < smp_cpus; i++) { PowerPCCPU *cpu; CPUState *cs; qemu_irq *input; cpu = cpu_ppc_init(machine->cpu_model); if (cpu == NULL) { fprintf(stderr, \"Unable to initialize CPU!\\n\"); exit(1); } env = &cpu->env; cs = CPU(cpu); if (!firstenv) { firstenv = env; } irqs[i] = irqs[0] + (i * OPENPIC_OUTPUT_NB); input = (qemu_irq *)env->irq_inputs; irqs[i][OPENPIC_OUTPUT_INT] = input[PPCE500_INPUT_INT]; irqs[i][OPENPIC_OUTPUT_CINT] = input[PPCE500_INPUT_CINT]; env->spr_cb[SPR_BOOKE_PIR].default_value = cs->cpu_index = i; env->mpic_iack = params->ccsrbar_base + MPC8544_MPIC_REGS_OFFSET + 0xa0; ppc_booke_timers_init(cpu, 400000000, PPC_TIMER_E500); /* Register reset handler */ if (!i) { /* Primary CPU */ struct boot_info *boot_info; boot_info = g_malloc0(sizeof(struct boot_info)); qemu_register_reset(ppce500_cpu_reset, cpu); env->load_info = boot_info; } else { /* Secondary CPUs */ qemu_register_reset(ppce500_cpu_reset_sec, cpu); } } env = firstenv; /* Fixup Memory size on a alignment boundary */ ram_size &= ~(RAM_SIZES_ALIGN - 1); machine->ram_size = ram_size; /* Register Memory */ memory_region_allocate_system_memory(ram, NULL, \"mpc8544ds.ram\", ram_size); memory_region_add_subregion(address_space_mem, 0, ram); dev = qdev_create(NULL, \"e500-ccsr\"); object_property_add_child(qdev_get_machine(), \"e500-ccsr\", OBJECT(dev), NULL); qdev_init_nofail(dev); ccsr = CCSR(dev); ccsr_addr_space = &ccsr->ccsr_space; memory_region_add_subregion(address_space_mem, params->ccsrbar_base, ccsr_addr_space); mpic = ppce500_init_mpic(params, ccsr_addr_space, irqs); /* Serial */ if (serial_hds[0]) { serial_mm_init(ccsr_addr_space, MPC8544_SERIAL0_REGS_OFFSET, 0, mpic[42], 399193, serial_hds[0], DEVICE_BIG_ENDIAN); } if (serial_hds[1]) { serial_mm_init(ccsr_addr_space, MPC8544_SERIAL1_REGS_OFFSET, 0, mpic[42], 399193, serial_hds[1], DEVICE_BIG_ENDIAN); } /* General Utility device */ dev = qdev_create(NULL, \"mpc8544-guts\"); qdev_init_nofail(dev); s = SYS_BUS_DEVICE(dev); memory_region_add_subregion(ccsr_addr_space, MPC8544_UTIL_OFFSET, sysbus_mmio_get_region(s, 0)); /* PCI */ dev = qdev_create(NULL, \"e500-pcihost\"); qdev_prop_set_uint32(dev, \"first_slot\", params->pci_first_slot); qdev_prop_set_uint32(dev, \"first_pin_irq\", pci_irq_nrs[0]); qdev_init_nofail(dev); s = SYS_BUS_DEVICE(dev); for (i = 0; i < PCI_NUM_PINS; i++) { sysbus_connect_irq(s, i, mpic[pci_irq_nrs[i]]); } memory_region_add_subregion(ccsr_addr_space, MPC8544_PCI_REGS_OFFSET, sysbus_mmio_get_region(s, 0)); pci_bus = (PCIBus *)qdev_get_child_bus(dev, \"pci.0\"); if (!pci_bus) printf(\"couldn't create PCI controller!\\n\"); if (pci_bus) { /* Register network interfaces. */ for (i = 0; i < nb_nics; i++) { pci_nic_init_nofail(&nd_table[i], pci_bus, \"virtio\", NULL); } } /* Register spinning region */ sysbus_create_simple(\"e500-spin\", params->spin_base, NULL); if (cur_base < (32 * 1024 * 1024)) { /* u-boot occupies memory up to 32MB, so load blobs above */ cur_base = (32 * 1024 * 1024); } if (params->has_mpc8xxx_gpio) { qemu_irq poweroff_irq; dev = qdev_create(NULL, \"mpc8xxx_gpio\"); s = SYS_BUS_DEVICE(dev); qdev_init_nofail(dev); sysbus_connect_irq(s, 0, mpic[MPC8XXX_GPIO_IRQ]); memory_region_add_subregion(ccsr_addr_space, MPC8XXX_GPIO_OFFSET, sysbus_mmio_get_region(s, 0)); /* Power Off GPIO at Pin 0 */ poweroff_irq = qemu_allocate_irq(ppce500_power_off, NULL, 0); qdev_connect_gpio_out(dev, 0, poweroff_irq); } /* Platform Bus Device */ if (params->has_platform_bus) { dev = qdev_create(NULL, TYPE_PLATFORM_BUS_DEVICE); dev->id = TYPE_PLATFORM_BUS_DEVICE; qdev_prop_set_uint32(dev, \"num_irqs\", params->platform_bus_num_irqs); qdev_prop_set_uint32(dev, \"mmio_size\", params->platform_bus_size); qdev_init_nofail(dev); s = SYS_BUS_DEVICE(dev); for (i = 0; i < params->platform_bus_num_irqs; i++) { int irqn = params->platform_bus_first_irq + i; sysbus_connect_irq(s, i, mpic[irqn]); } memory_region_add_subregion(address_space_mem, params->platform_bus_base, sysbus_mmio_get_region(s, 0)); } /* Load kernel. */ if (machine->kernel_filename) { kernel_base = cur_base; kernel_size = load_image_targphys(machine->kernel_filename, cur_base, ram_size - cur_base); if (kernel_size < 0) { fprintf(stderr, \"qemu: could not load kernel '%s'\\n\", machine->kernel_filename); exit(1); } cur_base += kernel_size; } /* Load initrd. */ if (machine->initrd_filename) { initrd_base = (cur_base + INITRD_LOAD_PAD) & ~INITRD_PAD_MASK; initrd_size = load_image_targphys(machine->initrd_filename, initrd_base, ram_size - initrd_base); if (initrd_size < 0) { fprintf(stderr, \"qemu: could not load initial ram disk '%s'\\n\", machine->initrd_filename); exit(1); } cur_base = initrd_base + initrd_size; } /* * Smart firmware defaults ahead! * * We follow the following table to select which payload we execute. * * -kernel | -bios | payload * ---------+-------+--------- * N | Y | u-boot * N | N | u-boot * Y | Y | u-boot * Y | N | kernel * * This ensures backwards compatibility with how we used to expose * -kernel to users but allows them to run through u-boot as well. */ if (bios_name == NULL) { if (machine->kernel_filename) { bios_name = machine->kernel_filename; } else { bios_name = \"u-boot.e500\"; } } filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); bios_size = load_elf(filename, NULL, NULL, &bios_entry, &loadaddr, NULL, 1, ELF_MACHINE, 0); if (bios_size < 0) { /* * Hrm. No ELF image? Try a uImage, maybe someone is giving us an * ePAPR compliant kernel */ kernel_size = load_uimage(filename, &bios_entry, &loadaddr, NULL, NULL, NULL); if (kernel_size < 0) { fprintf(stderr, \"qemu: could not load firmware '%s'\\n\", filename); exit(1); } } /* Reserve space for dtb */ dt_base = (loadaddr + bios_size + DTC_LOAD_PAD) & ~DTC_PAD_MASK; dt_size = ppce500_prep_device_tree(machine, params, dt_base, initrd_base, initrd_size, kernel_base, kernel_size); if (dt_size < 0) { fprintf(stderr, \"couldn't load device tree\\n\"); exit(1); } assert(dt_size < DTB_MAX_SIZE); boot_info = env->load_info; boot_info->entry = bios_entry; boot_info->dt_base = dt_base; boot_info->dt_size = dt_size; if (kvm_enabled()) { kvmppc_init(); } }"} {"target": 1, "idx": 10609, "func": "static av_always_inline int coeff_abs_level_remaining_decode(HEVCContext *s, int rc_rice_param) { int prefix = 0; int suffix = 0; int last_coeff_abs_level_remaining; int i; while (prefix < CABAC_MAX_BIN && get_cabac_bypass(&s->HEVClc->cc)) prefix++; if (prefix < 3) { for (i = 0; i < rc_rice_param; i++) suffix = (suffix << 1) | get_cabac_bypass(&s->HEVClc->cc); last_coeff_abs_level_remaining = (prefix << rc_rice_param) + suffix; } else { int prefix_minus3 = prefix - 3; if (prefix == CABAC_MAX_BIN) { av_log(s->avctx, AV_LOG_ERROR, \"CABAC_MAX_BIN : %d\\n\", prefix); return 0; } for (i = 0; i < prefix_minus3 + rc_rice_param; i++) suffix = (suffix << 1) | get_cabac_bypass(&s->HEVClc->cc); last_coeff_abs_level_remaining = (((1 << prefix_minus3) + 3 - 1) << rc_rice_param) + suffix; } return last_coeff_abs_level_remaining; }"} {"target": 0, "idx": 10620, "func": "static int segment_hls_window(AVFormatContext *s, int last) { SegmentContext *seg = s->priv_data; int i, ret = 0; char buf[1024]; if ((ret = avio_open2(&seg->pb, seg->list, AVIO_FLAG_WRITE, &s->interrupt_callback, NULL)) < 0) goto fail; avio_printf(seg->pb, \"#EXTM3U\\n\"); avio_printf(seg->pb, \"#EXT-X-VERSION:3\\n\"); avio_printf(seg->pb, \"#EXT-X-TARGETDURATION:%d\\n\", (int)seg->time); avio_printf(seg->pb, \"#EXT-X-MEDIA-SEQUENCE:%d\\n\", FFMAX(0, seg->number - seg->size)); av_log(s, AV_LOG_VERBOSE, \"EXT-X-MEDIA-SEQUENCE:%d\\n\", FFMAX(0, seg->number - seg->size)); for (i = FFMAX(0, seg->number - seg->size); i < seg->number; i++) { avio_printf(seg->pb, \"#EXTINF:%d,\\n\", (int)seg->time); if (seg->entry_prefix) { avio_printf(seg->pb, \"%s\", seg->entry_prefix); } ret = av_get_frame_filename(buf, sizeof(buf), s->filename, i); if (ret < 0) { ret = AVERROR(EINVAL); goto fail; } avio_printf(seg->pb, \"%s\\n\", buf); } if (last) avio_printf(seg->pb, \"#EXT-X-ENDLIST\\n\"); fail: avio_closep(&seg->pb); return ret; }"} {"target": 1, "idx": 10640, "func": "static inline int cris_addc_pi_m(int a, int **b) { asm volatile (\"addc [%1+], %0\\n\" : \"+r\" (a), \"+b\" (*b)); return a; }"} {"target": 1, "idx": 10648, "func": "static void vmgenid_query_monitor_test(void) { QemuUUID expected, measured; gchar *cmd; g_assert(qemu_uuid_parse(VGID_GUID, &expected) == 0); cmd = g_strdup_printf(\"-machine accel=tcg -device vmgenid,id=testvgid,\" \"guid=%s\", VGID_GUID); qtest_start(cmd); /* Read the GUID via the monitor */ read_guid_from_monitor(&measured); g_assert(memcmp(measured.data, expected.data, sizeof(measured.data)) == 0); qtest_quit(global_qtest); g_free(cmd); }"} {"target": 1, "idx": 10665, "func": "static int decode_frame_byterun1(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { IffContext *s = avctx->priv_data; const uint8_t *buf = avpkt->size >= 2 ? avpkt->data + AV_RB16(avpkt->data) : NULL; const int buf_size = avpkt->size >= 2 ? avpkt->size - AV_RB16(avpkt->data) : 0; const uint8_t *buf_end = buf+buf_size; int y, plane, res; if ((res = extract_header(avctx, avpkt)) < 0) return res; if (s->init) { if ((res = avctx->reget_buffer(avctx, &s->frame)) < 0) { av_log(avctx, AV_LOG_ERROR, \"reget_buffer() failed\\n\"); return res; } } else if ((res = avctx->get_buffer(avctx, &s->frame)) < 0) { av_log(avctx, AV_LOG_ERROR, \"get_buffer() failed\\n\"); return res; } else if (avctx->bits_per_coded_sample <= 8 && avctx->pix_fmt != PIX_FMT_GRAY8) { if ((res = ff_cmap_read_palette(avctx, (uint32_t*)s->frame.data[1])) < 0) return res; } s->init = 1; if (avctx->codec_tag == MKTAG('I','L','B','M')) { //interleaved if (avctx->pix_fmt == PIX_FMT_PAL8 || avctx->pix_fmt == PIX_FMT_GRAY8) { for(y = 0; y < avctx->height ; y++ ) { uint8_t *row = &s->frame.data[0][ y*s->frame.linesize[0] ]; memset(row, 0, avctx->width); for (plane = 0; plane < s->bpp; plane++) { buf += decode_byterun(s->planebuf, s->planesize, buf, buf_end); decodeplane8(row, s->planebuf, s->planesize, plane); } } } else if (s->ham) { // HAM to PIX_FMT_BGR32 for (y = 0; y < avctx->height ; y++) { uint8_t *row = &s->frame.data[0][y*s->frame.linesize[0]]; memset(s->ham_buf, 0, avctx->width); for (plane = 0; plane < s->bpp; plane++) { buf += decode_byterun(s->planebuf, s->planesize, buf, buf_end); decodeplane8(s->ham_buf, s->planebuf, s->planesize, plane); } decode_ham_plane32((uint32_t *) row, s->ham_buf, s->ham_palbuf, s->planesize); } } else { //PIX_FMT_BGR32 for(y = 0; y < avctx->height ; y++ ) { uint8_t *row = &s->frame.data[0][y*s->frame.linesize[0]]; memset(row, 0, avctx->width << 2); for (plane = 0; plane < s->bpp; plane++) { buf += decode_byterun(s->planebuf, s->planesize, buf, buf_end); decodeplane32((uint32_t *) row, s->planebuf, s->planesize, plane); } } } } else if (avctx->pix_fmt == PIX_FMT_PAL8 || avctx->pix_fmt == PIX_FMT_GRAY8) { // IFF-PBM for(y = 0; y < avctx->height ; y++ ) { uint8_t *row = &s->frame.data[0][y*s->frame.linesize[0]]; buf += decode_byterun(row, avctx->width, buf, buf_end); } } else { // IFF-PBM: HAM to PIX_FMT_BGR32 for (y = 0; y < avctx->height ; y++) { uint8_t *row = &s->frame.data[0][y*s->frame.linesize[0]]; buf += decode_byterun(s->ham_buf, avctx->width, buf, buf_end); decode_ham_plane32((uint32_t *) row, s->ham_buf, s->ham_palbuf, avctx->width); } } *data_size = sizeof(AVFrame); *(AVFrame*)data = s->frame; return buf_size; }"} {"target": 1, "idx": 10670, "func": "int alloc_picture(MpegEncContext *s, Picture *pic, int shared){ const int big_mb_num= s->mb_stride*(s->mb_height+1) + 1; //the +1 is needed so memset(,,stride*height) does not sig11 const int mb_array_size= s->mb_stride*s->mb_height; const int b8_array_size= s->b8_stride*s->mb_height*2; const int b4_array_size= s->b4_stride*s->mb_height*4; int i; if(shared){ assert(pic->data[0]); assert(pic->type == 0 || pic->type == FF_BUFFER_TYPE_SHARED); pic->type= FF_BUFFER_TYPE_SHARED; }else{ int r; assert(!pic->data[0]); r= s->avctx->get_buffer(s->avctx, (AVFrame*)pic); if(r<0 || !pic->age || !pic->type || !pic->data[0]){ av_log(s->avctx, AV_LOG_ERROR, \"get_buffer() failed (%d %d %d %p)\\n\", r, pic->age, pic->type, pic->data[0]); return -1; } if(s->linesize && (s->linesize != pic->linesize[0] || s->uvlinesize != pic->linesize[1])){ av_log(s->avctx, AV_LOG_ERROR, \"get_buffer() failed (stride changed)\\n\"); return -1; } if(pic->linesize[1] != pic->linesize[2]){ av_log(s->avctx, AV_LOG_ERROR, \"get_buffer() failed (uv stride mismatch)\\n\"); return -1; } s->linesize = pic->linesize[0]; s->uvlinesize= pic->linesize[1]; } if(pic->qscale_table==NULL){ if (s->encoding) { CHECKED_ALLOCZ(pic->mb_var , mb_array_size * sizeof(int16_t)) CHECKED_ALLOCZ(pic->mc_mb_var, mb_array_size * sizeof(int16_t)) CHECKED_ALLOCZ(pic->mb_mean , mb_array_size * sizeof(int8_t)) } CHECKED_ALLOCZ(pic->mbskip_table , mb_array_size * sizeof(uint8_t)+2) //the +2 is for the slice end check CHECKED_ALLOCZ(pic->qscale_table , mb_array_size * sizeof(uint8_t)) CHECKED_ALLOCZ(pic->mb_type_base , big_mb_num * sizeof(uint32_t)) pic->mb_type= pic->mb_type_base + s->mb_stride+1; if(s->out_format == FMT_H264){ for(i=0; i<2; i++){ CHECKED_ALLOCZ(pic->motion_val_base[i], 2 * (b4_array_size+4) * sizeof(int16_t)) pic->motion_val[i]= pic->motion_val_base[i]+4; CHECKED_ALLOCZ(pic->ref_index[i], b8_array_size * sizeof(uint8_t)) } pic->motion_subsample_log2= 2; }else if(s->out_format == FMT_H263 || s->encoding || (s->avctx->debug&FF_DEBUG_MV) || (s->avctx->debug_mv)){ for(i=0; i<2; i++){ CHECKED_ALLOCZ(pic->motion_val_base[i], 2 * (b8_array_size+4) * sizeof(int16_t)) pic->motion_val[i]= pic->motion_val_base[i]+4; CHECKED_ALLOCZ(pic->ref_index[i], b8_array_size * sizeof(uint8_t)) } pic->motion_subsample_log2= 3; } if(s->avctx->debug&FF_DEBUG_DCT_COEFF) { CHECKED_ALLOCZ(pic->dct_coeff, 64 * mb_array_size * sizeof(DCTELEM)*6) } pic->qstride= s->mb_stride; CHECKED_ALLOCZ(pic->pan_scan , 1 * sizeof(AVPanScan)) } /* It might be nicer if the application would keep track of these * but it would require an API change. */ memmove(s->prev_pict_types+1, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE-1); s->prev_pict_types[0]= s->pict_type; if(pic->age < PREV_PICT_TYPES_BUFFER_SIZE && s->prev_pict_types[pic->age] == B_TYPE) pic->age= INT_MAX; // Skipped MBs in B-frames are quite rare in MPEG-1/2 and it is a bit tricky to skip them anyway. return 0; fail: //for the CHECKED_ALLOCZ macro return -1; }"} {"target": 1, "idx": 10671, "func": "static int decode_pic_hdr(IVI5DecContext *ctx, AVCodecContext *avctx) { if (get_bits(&ctx->gb, 5) != 0x1F) { av_log(avctx, AV_LOG_ERROR, \"Invalid picture start code!\\n\"); return -1; ctx->prev_frame_type = ctx->frame_type; ctx->frame_type = get_bits(&ctx->gb, 3); if (ctx->frame_type >= 5) { av_log(avctx, AV_LOG_ERROR, \"Invalid frame type: %d \\n\", ctx->frame_type); return -1; ctx->frame_num = get_bits(&ctx->gb, 8); if (ctx->frame_type == FRAMETYPE_INTRA) { ctx->gop_invalid = 1; if (decode_gop_header(ctx, avctx)) return -1; ctx->gop_invalid = 0; if (ctx->frame_type != FRAMETYPE_NULL) { ctx->frame_flags = get_bits(&ctx->gb, 8); ctx->pic_hdr_size = (ctx->frame_flags & 1) ? get_bits_long(&ctx->gb, 24) : 0; ctx->checksum = (ctx->frame_flags & 0x10) ? get_bits(&ctx->gb, 16) : 0; /* skip unknown extension if any */ if (ctx->frame_flags & 0x20) skip_hdr_extension(&ctx->gb); /* XXX: untested */ /* decode macroblock huffman codebook */ if (ff_ivi_dec_huff_desc(&ctx->gb, ctx->frame_flags & 0x40, IVI_MB_HUFF, &ctx->mb_vlc, avctx)) return -1; skip_bits(&ctx->gb, 3); /* FIXME: unknown meaning! */ align_get_bits(&ctx->gb); return 0;"} {"target": 1, "idx": 10672, "func": "static int mpegts_write_header(AVFormatContext *s) { MpegTSWrite *ts = s->priv_data; MpegTSWriteStream *ts_st; MpegTSService *service; AVStream *st, *pcr_st = NULL; AVDictionaryEntry *title, *provider; int i, j; const char *service_name; const char *provider_name; int *pids; int ret; if (s->max_delay < 0) /* Not set by the caller */ s->max_delay = 0; // round up to a whole number of TS packets ts->pes_payload_size = (ts->pes_payload_size + 14 + 183) / 184 * 184 - 14; ts->tsid = ts->transport_stream_id; ts->onid = ts->original_network_id; /* allocate a single DVB service */ title = av_dict_get(s->metadata, \"service_name\", NULL, 0); if (!title) title = av_dict_get(s->metadata, \"title\", NULL, 0); service_name = title ? title->value : DEFAULT_SERVICE_NAME; provider = av_dict_get(s->metadata, \"service_provider\", NULL, 0); provider_name = provider ? provider->value : DEFAULT_PROVIDER_NAME; service = mpegts_add_service(ts, ts->service_id, provider_name, service_name); if (!service) return AVERROR(ENOMEM); service->pmt.write_packet = section_write_packet; service->pmt.opaque = s; service->pmt.cc = 15; ts->pat.pid = PAT_PID; /* Initialize at 15 so that it wraps and is equal to 0 for the * first packet we write. */ ts->pat.cc = 15; ts->pat.write_packet = section_write_packet; ts->pat.opaque = s; ts->sdt.pid = SDT_PID; ts->sdt.cc = 15; ts->sdt.write_packet = section_write_packet; ts->sdt.opaque = s; pids = av_malloc_array(s->nb_streams, sizeof(*pids)); if (!pids) { ret = AVERROR(ENOMEM); goto fail; } /* assign pids to each stream */ for (i = 0; i < s->nb_streams; i++) { st = s->streams[i]; ts_st = av_mallocz(sizeof(MpegTSWriteStream)); if (!ts_st) { ret = AVERROR(ENOMEM); goto fail; } st->priv_data = ts_st; ts_st->user_tb = st->time_base; avpriv_set_pts_info(st, 33, 1, 90000); ts_st->payload = av_mallocz(ts->pes_payload_size); if (!ts_st->payload) { ret = AVERROR(ENOMEM); goto fail; } ts_st->service = service; /* MPEG pid values < 16 are reserved. Applications which set st->id in * this range are assigned a calculated pid. */ if (st->id < 16) { ts_st->pid = ts->start_pid + i; } else if (st->id < 0x1FFF) { ts_st->pid = st->id; } else { av_log(s, AV_LOG_ERROR, \"Invalid stream id %d, must be less than 8191\\n\", st->id); ret = AVERROR(EINVAL); goto fail; } if (ts_st->pid == service->pmt.pid) { av_log(s, AV_LOG_ERROR, \"Duplicate stream id %d\\n\", ts_st->pid); ret = AVERROR(EINVAL); goto fail; } for (j = 0; j < i; j++) { if (pids[j] == ts_st->pid) { av_log(s, AV_LOG_ERROR, \"Duplicate stream id %d\\n\", ts_st->pid); ret = AVERROR(EINVAL); goto fail; } } pids[i] = ts_st->pid; ts_st->payload_pts = AV_NOPTS_VALUE; ts_st->payload_dts = AV_NOPTS_VALUE; ts_st->first_pts_check = 1; ts_st->cc = 15; /* update PCR pid by using the first video stream */ if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO && service->pcr_pid == 0x1fff) { service->pcr_pid = ts_st->pid; pcr_st = st; } if (st->codec->codec_id == AV_CODEC_ID_AAC && st->codec->extradata_size > 0) { AVStream *ast; ts_st->amux = avformat_alloc_context(); if (!ts_st->amux) { ret = AVERROR(ENOMEM); goto fail; } ts_st->amux->oformat = av_guess_format((ts->flags & MPEGTS_FLAG_AAC_LATM) ? \"latm\" : \"adts\", NULL, NULL); if (!ts_st->amux->oformat) { ret = AVERROR(EINVAL); goto fail; } if (!(ast = avformat_new_stream(ts_st->amux, NULL))) { ret = AVERROR(ENOMEM); goto fail; } ret = avcodec_copy_context(ast->codec, st->codec); if (ret != 0) goto fail; ast->time_base = st->time_base; ret = avformat_write_header(ts_st->amux, NULL); if (ret < 0) goto fail; } if (st->codec->codec_id == AV_CODEC_ID_OPUS) { ts_st->opus_pending_trim_start = st->codec->initial_padding * 48000 / st->codec->sample_rate; } } av_freep(&pids); /* if no video stream, use the first stream as PCR */ if (service->pcr_pid == 0x1fff && s->nb_streams > 0) { pcr_st = s->streams[0]; ts_st = pcr_st->priv_data; service->pcr_pid = ts_st->pid; } else ts_st = pcr_st->priv_data; if (ts->mux_rate > 1) { service->pcr_packet_period = (ts->mux_rate * ts->pcr_period) / (TS_PACKET_SIZE * 8 * 1000); ts->sdt_packet_period = (ts->mux_rate * SDT_RETRANS_TIME) / (TS_PACKET_SIZE * 8 * 1000); ts->pat_packet_period = (ts->mux_rate * PAT_RETRANS_TIME) / (TS_PACKET_SIZE * 8 * 1000); if (ts->copyts < 1) ts->first_pcr = av_rescale(s->max_delay, PCR_TIME_BASE, AV_TIME_BASE); } else { /* Arbitrary values, PAT/PMT will also be written on video key frames */ ts->sdt_packet_period = 200; ts->pat_packet_period = 40; if (pcr_st->codec->codec_type == AVMEDIA_TYPE_AUDIO) { if (!pcr_st->codec->frame_size) { av_log(s, AV_LOG_WARNING, \"frame size not set\\n\"); service->pcr_packet_period = pcr_st->codec->sample_rate / (10 * 512); } else { service->pcr_packet_period = pcr_st->codec->sample_rate / (10 * pcr_st->codec->frame_size); } } else { // max delta PCR 0.1s // TODO: should be avg_frame_rate service->pcr_packet_period = ts_st->user_tb.den / (10 * ts_st->user_tb.num); } if (!service->pcr_packet_period) service->pcr_packet_period = 1; } ts->last_pat_ts = AV_NOPTS_VALUE; ts->last_sdt_ts = AV_NOPTS_VALUE; // The user specified a period, use only it if (ts->pat_period < INT_MAX/2) { ts->pat_packet_period = INT_MAX; } if (ts->sdt_period < INT_MAX/2) { ts->sdt_packet_period = INT_MAX; } // output a PCR as soon as possible service->pcr_packet_count = service->pcr_packet_period; ts->pat_packet_count = ts->pat_packet_period - 1; ts->sdt_packet_count = ts->sdt_packet_period - 1; if (ts->mux_rate == 1) av_log(s, AV_LOG_VERBOSE, \"muxrate VBR, \"); else av_log(s, AV_LOG_VERBOSE, \"muxrate %d, \", ts->mux_rate); av_log(s, AV_LOG_VERBOSE, \"pcr every %d pkts, sdt every %d, pat/pmt every %d pkts\\n\", service->pcr_packet_period, ts->sdt_packet_period, ts->pat_packet_period); if (ts->m2ts_mode == -1) { if (av_match_ext(s->filename, \"m2ts\")) { ts->m2ts_mode = 1; } else { ts->m2ts_mode = 0; } } return 0; fail: av_freep(&pids); for (i = 0; i < s->nb_streams; i++) { st = s->streams[i]; ts_st = st->priv_data; if (ts_st) { av_freep(&ts_st->payload); if (ts_st->amux) { avformat_free_context(ts_st->amux); ts_st->amux = NULL; } } av_freep(&st->priv_data); } for (i = 0; i < ts->nb_services; i++) { service = ts->services[i]; av_freep(&service->provider_name); av_freep(&service->name); av_freep(&service); } av_freep(&ts->services); return ret; }"} {"target": 1, "idx": 10699, "func": "void dct32(INTFLOAT *out, const INTFLOAT *tab) { INTFLOAT tmp0, tmp1; INTFLOAT val0 , val1 , val2 , val3 , val4 , val5 , val6 , val7 , val8 , val9 , val10, val11, val12, val13, val14, val15, val16, val17, val18, val19, val20, val21, val22, val23, val24, val25, val26, val27, val28, val29, val30, val31; /* pass 1 */ BF0( 0, 31, COS0_0 , 1); BF0(15, 16, COS0_15, 5); /* pass 2 */ BF( 0, 15, COS1_0 , 1); BF(16, 31,-COS1_0 , 1); /* pass 1 */ BF0( 7, 24, COS0_7 , 1); BF0( 8, 23, COS0_8 , 1); /* pass 2 */ BF( 7, 8, COS1_7 , 4); BF(23, 24,-COS1_7 , 4); /* pass 3 */ BF( 0, 7, COS2_0 , 1); BF( 8, 15,-COS2_0 , 1); BF(16, 23, COS2_0 , 1); BF(24, 31,-COS2_0 , 1); /* pass 1 */ BF0( 3, 28, COS0_3 , 1); BF0(12, 19, COS0_12, 2); /* pass 2 */ BF( 3, 12, COS1_3 , 1); BF(19, 28,-COS1_3 , 1); /* pass 1 */ BF0( 4, 27, COS0_4 , 1); BF0(11, 20, COS0_11, 2); /* pass 2 */ BF( 4, 11, COS1_4 , 1); BF(20, 27,-COS1_4 , 1); /* pass 3 */ BF( 3, 4, COS2_3 , 3); BF(11, 12,-COS2_3 , 3); BF(19, 20, COS2_3 , 3); BF(27, 28,-COS2_3 , 3); /* pass 4 */ BF( 0, 3, COS3_0 , 1); BF( 4, 7,-COS3_0 , 1); BF( 8, 11, COS3_0 , 1); BF(12, 15,-COS3_0 , 1); BF(16, 19, COS3_0 , 1); BF(20, 23,-COS3_0 , 1); BF(24, 27, COS3_0 , 1); BF(28, 31,-COS3_0 , 1); /* pass 1 */ BF0( 1, 30, COS0_1 , 1); BF0(14, 17, COS0_14, 3); /* pass 2 */ BF( 1, 14, COS1_1 , 1); BF(17, 30,-COS1_1 , 1); /* pass 1 */ BF0( 6, 25, COS0_6 , 1); BF0( 9, 22, COS0_9 , 1); /* pass 2 */ BF( 6, 9, COS1_6 , 2); BF(22, 25,-COS1_6 , 2); /* pass 3 */ BF( 1, 6, COS2_1 , 1); BF( 9, 14,-COS2_1 , 1); BF(17, 22, COS2_1 , 1); BF(25, 30,-COS2_1 , 1); /* pass 1 */ BF0( 2, 29, COS0_2 , 1); BF0(13, 18, COS0_13, 3); /* pass 2 */ BF( 2, 13, COS1_2 , 1); BF(18, 29,-COS1_2 , 1); /* pass 1 */ BF0( 5, 26, COS0_5 , 1); BF0(10, 21, COS0_10, 1); /* pass 2 */ BF( 5, 10, COS1_5 , 2); BF(21, 26,-COS1_5 , 2); /* pass 3 */ BF( 2, 5, COS2_2 , 1); BF(10, 13,-COS2_2 , 1); BF(18, 21, COS2_2 , 1); BF(26, 29,-COS2_2 , 1); /* pass 4 */ BF( 1, 2, COS3_1 , 2); BF( 5, 6,-COS3_1 , 2); BF( 9, 10, COS3_1 , 2); BF(13, 14,-COS3_1 , 2); BF(17, 18, COS3_1 , 2); BF(21, 22,-COS3_1 , 2); BF(25, 26, COS3_1 , 2); BF(29, 30,-COS3_1 , 2); /* pass 5 */ BF1( 0, 1, 2, 3); BF2( 4, 5, 6, 7); BF1( 8, 9, 10, 11); BF2(12, 13, 14, 15); BF1(16, 17, 18, 19); BF2(20, 21, 22, 23); BF1(24, 25, 26, 27); BF2(28, 29, 30, 31); /* pass 6 */ ADD( 8, 12); ADD(12, 10); ADD(10, 14); ADD(14, 9); ADD( 9, 13); ADD(13, 11); ADD(11, 15); out[ 0] = val0; out[16] = val1; out[ 8] = val2; out[24] = val3; out[ 4] = val4; out[20] = val5; out[12] = val6; out[28] = val7; out[ 2] = val8; out[18] = val9; out[10] = val10; out[26] = val11; out[ 6] = val12; out[22] = val13; out[14] = val14; out[30] = val15; ADD(24, 28); ADD(28, 26); ADD(26, 30); ADD(30, 25); ADD(25, 29); ADD(29, 27); ADD(27, 31); out[ 1] = val16 + val24; out[17] = val17 + val25; out[ 9] = val18 + val26; out[25] = val19 + val27; out[ 5] = val20 + val28; out[21] = val21 + val29; out[13] = val22 + val30; out[29] = val23 + val31; out[ 3] = val24 + val20; out[19] = val25 + val21; out[11] = val26 + val22; out[27] = val27 + val23; out[ 7] = val28 + val18; out[23] = val29 + val19; out[15] = val30 + val17; out[31] = val31; }"} {"target": 0, "idx": 10721, "func": "static inline void gen_op_movl_seg_T0_vm(int seg_reg) { tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffff); tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,segs[seg_reg].selector)); tcg_gen_shli_tl(cpu_T[0], cpu_T[0], 4); tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,segs[seg_reg].base)); }"} {"target": 1, "idx": 10723, "func": "static void vc1_v_overlap_c(uint8_t* src, int stride) { int i; int a, b, c, d; int d1, d2; int rnd = 1; for(i = 0; i < 8; i++) { a = src[-2*stride]; b = src[-stride]; c = src[0]; d = src[stride]; d1 = (a - d + 3 + rnd) >> 3; d2 = (a - d + b - c + 4 - rnd) >> 3; src[-2*stride] = a - d1; src[-stride] = b - d2; src[0] = c + d2; src[stride] = d + d1; src++; rnd = !rnd; } }"} {"target": 1, "idx": 10727, "func": "void qvirtio_pci_set_msix_configuration_vector(QVirtioPCIDevice *d, QGuestAllocator *alloc, uint16_t entry) { uint16_t vector; uint32_t control; void *addr; g_assert(d->pdev->msix_enabled); addr = d->pdev->msix_table + (entry * 16); g_assert_cmpint(entry, >=, 0); g_assert_cmpint(entry, <, qpci_msix_table_size(d->pdev)); d->config_msix_entry = entry; d->config_msix_data = 0x12345678; d->config_msix_addr = guest_alloc(alloc, 4); qpci_io_writel(d->pdev, addr + PCI_MSIX_ENTRY_LOWER_ADDR, d->config_msix_addr & ~0UL); qpci_io_writel(d->pdev, addr + PCI_MSIX_ENTRY_UPPER_ADDR, (d->config_msix_addr >> 32) & ~0UL); qpci_io_writel(d->pdev, addr + PCI_MSIX_ENTRY_DATA, d->config_msix_data); control = qpci_io_readl(d->pdev, addr + PCI_MSIX_ENTRY_VECTOR_CTRL); qpci_io_writel(d->pdev, addr + PCI_MSIX_ENTRY_VECTOR_CTRL, control & ~PCI_MSIX_ENTRY_CTRL_MASKBIT); qpci_io_writew(d->pdev, d->addr + VIRTIO_MSI_CONFIG_VECTOR, entry); vector = qpci_io_readw(d->pdev, d->addr + VIRTIO_MSI_CONFIG_VECTOR); g_assert_cmphex(vector, !=, VIRTIO_MSI_NO_VECTOR); }"} {"target": 1, "idx": 10730, "func": "static void gen_rdhwr(DisasContext *ctx, int rt, int rd) { TCGv t0; #if !defined(CONFIG_USER_ONLY) /* The Linux kernel will emulate rdhwr if it's not supported natively. Therefore only check the ISA in system mode. */ check_insn(ctx, ISA_MIPS32R2); #endif t0 = tcg_temp_new(); switch (rd) { case 0: save_cpu_state(ctx, 1); gen_helper_rdhwr_cpunum(t0, cpu_env); gen_store_gpr(t0, rt); break; case 1: save_cpu_state(ctx, 1); gen_helper_rdhwr_synci_step(t0, cpu_env); gen_store_gpr(t0, rt); break; case 2: save_cpu_state(ctx, 1); gen_helper_rdhwr_cc(t0, cpu_env); gen_store_gpr(t0, rt); break; case 3: save_cpu_state(ctx, 1); gen_helper_rdhwr_ccres(t0, cpu_env); gen_store_gpr(t0, rt); break; case 29: #if defined(CONFIG_USER_ONLY) tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUMIPSState, tls_value)); gen_store_gpr(t0, rt); break; #else /* XXX: Some CPUs implement this in hardware. Not supported yet. */ #endif default: /* Invalid */ MIPS_INVAL(\"rdhwr\"); generate_exception(ctx, EXCP_RI); break; } tcg_temp_free(t0); }"} {"target": 1, "idx": 10735, "func": "static void usb_msd_realize_bot(USBDevice *dev, Error **errp) { MSDState *s = DO_UPCAST(MSDState, dev, dev); usb_desc_create_serial(dev); usb_desc_init(dev); scsi_bus_new(&s->bus, sizeof(s->bus), DEVICE(dev), &usb_msd_scsi_info_bot, NULL); s->bus.qbus.allow_hotplug = 0; usb_msd_handle_reset(dev); }"} {"target": 0, "idx": 10738, "func": "static int decode_slice_header(FFV1Context *f, FFV1Context *fs) { RangeCoder *c = &fs->c; uint8_t state[CONTEXT_SIZE]; unsigned ps, i, context_count; memset(state, 128, sizeof(state)); if (fs->ac > 1) { for (i = 1; i < 256; i++) { fs->c.one_state[i] = f->state_transition[i]; fs->c.zero_state[256 - i] = 256 - fs->c.one_state[i]; } } fs->slice_x = get_symbol(c, state, 0) * f->width; fs->slice_y = get_symbol(c, state, 0) * f->height; fs->slice_width = (get_symbol(c, state, 0) + 1) * f->width + fs->slice_x; fs->slice_height = (get_symbol(c, state, 0) + 1) * f->height + fs->slice_y; fs->slice_x /= f->num_h_slices; fs->slice_y /= f->num_v_slices; fs->slice_width = fs->slice_width / f->num_h_slices - fs->slice_x; fs->slice_height = fs->slice_height / f->num_v_slices - fs->slice_y; if ((unsigned)fs->slice_width > f->width || (unsigned)fs->slice_height > f->height) return AVERROR_INVALIDDATA; if ((unsigned)fs->slice_x + (uint64_t)fs->slice_width > f->width || (unsigned)fs->slice_y + (uint64_t)fs->slice_height > f->height) return AVERROR_INVALIDDATA; for (i = 0; i < f->plane_count; i++) { PlaneContext *const p = &fs->plane[i]; int idx = get_symbol(c, state, 0); if (idx > (unsigned)f->quant_table_count) { av_log(f->avctx, AV_LOG_ERROR, \"quant_table_index out of range\\n\"); return AVERROR_INVALIDDATA; } p->quant_table_index = idx; memcpy(p->quant_table, f->quant_tables[idx], sizeof(p->quant_table)); context_count = f->context_count[idx]; if (p->context_count < context_count) { av_freep(&p->state); av_freep(&p->vlc_state); } p->context_count = context_count; } ps = get_symbol(c, state, 0); if (ps == 1) { f->cur->interlaced_frame = 1; f->cur->top_field_first = 1; } else if (ps == 2) { f->cur->interlaced_frame = 1; f->cur->top_field_first = 0; } else if (ps == 3) { f->cur->interlaced_frame = 0; } f->cur->sample_aspect_ratio.num = get_symbol(c, state, 0); f->cur->sample_aspect_ratio.den = get_symbol(c, state, 0); if (av_image_check_sar(f->width, f->height, f->cur->sample_aspect_ratio) < 0) { av_log(f->avctx, AV_LOG_WARNING, \"ignoring invalid SAR: %u/%u\\n\", f->cur->sample_aspect_ratio.num, f->cur->sample_aspect_ratio.den); f->cur->sample_aspect_ratio = (AVRational){ 0, 1 }; } return 0; }"} {"target": 1, "idx": 10741, "func": "int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) { if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, sizeof(diag_501), 0) || cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)diag_501, sizeof(diag_501), 1)) { return -EINVAL; } return 0; }"} {"target": 1, "idx": 10744, "func": "static inline uint8_t *ram_chunk_start(const RDMALocalBlock *rdma_ram_block, uint64_t i) { return (uint8_t *) (((uintptr_t) rdma_ram_block->local_host_addr) + (i << RDMA_REG_CHUNK_SHIFT)); }"} {"target": 0, "idx": 10750, "func": "static int decode_element(AVCodecContext *avctx, void *data, int ch_index, int channels) { ALACContext *alac = avctx->priv_data; int has_size, bps, is_compressed, decorr_shift, decorr_left_weight, ret; uint32_t output_samples; int i, ch; skip_bits(&alac->gb, 4); /* element instance tag */ skip_bits(&alac->gb, 12); /* unused header bits */ /* the number of output samples is stored in the frame */ has_size = get_bits1(&alac->gb); alac->extra_bits = get_bits(&alac->gb, 2) << 3; bps = alac->sample_size - alac->extra_bits + channels - 1; if (bps > 32) { av_log(avctx, AV_LOG_ERROR, \"bps is unsupported: %d\\n\", bps); return AVERROR_PATCHWELCOME; } /* whether the frame is compressed */ is_compressed = !get_bits1(&alac->gb); if (has_size) output_samples = get_bits_long(&alac->gb, 32); else output_samples = alac->max_samples_per_frame; if (!output_samples || output_samples > alac->max_samples_per_frame) { av_log(avctx, AV_LOG_ERROR, \"invalid samples per frame: %d\\n\", output_samples); return AVERROR_INVALIDDATA; } if (!alac->nb_samples) { /* get output buffer */ alac->frame.nb_samples = output_samples; if ((ret = avctx->get_buffer(avctx, &alac->frame)) < 0) { av_log(avctx, AV_LOG_ERROR, \"get_buffer() failed\\n\"); return ret; } } else if (output_samples != alac->nb_samples) { av_log(avctx, AV_LOG_ERROR, \"sample count mismatch: %u != %d\\n\", output_samples, alac->nb_samples); return AVERROR_INVALIDDATA; } alac->nb_samples = output_samples; if (alac->direct_output) { for (ch = 0; ch < channels; ch++) alac->output_samples_buffer[ch] = (int32_t *)alac->frame.extended_data[ch_index + ch]; } if (is_compressed) { int16_t lpc_coefs[2][32]; int lpc_order[2]; int prediction_type[2]; int lpc_quant[2]; int rice_history_mult[2]; decorr_shift = get_bits(&alac->gb, 8); decorr_left_weight = get_bits(&alac->gb, 8); for (ch = 0; ch < channels; ch++) { prediction_type[ch] = get_bits(&alac->gb, 4); lpc_quant[ch] = get_bits(&alac->gb, 4); rice_history_mult[ch] = get_bits(&alac->gb, 3); lpc_order[ch] = get_bits(&alac->gb, 5); /* read the predictor table */ for (i = lpc_order[ch] - 1; i >= 0; i--) lpc_coefs[ch][i] = get_sbits(&alac->gb, 16); } if (alac->extra_bits) { for (i = 0; i < alac->nb_samples; i++) { if(get_bits_left(&alac->gb) <= 0) return -1; for (ch = 0; ch < channels; ch++) alac->extra_bits_buffer[ch][i] = get_bits(&alac->gb, alac->extra_bits); } } for (ch = 0; ch < channels; ch++) { int ret=rice_decompress(alac, alac->predict_error_buffer[ch], alac->nb_samples, bps, rice_history_mult[ch] * alac->rice_history_mult / 4); if(ret<0) return ret; /* adaptive FIR filter */ if (prediction_type[ch] == 15) { /* Prediction type 15 runs the adaptive FIR twice. * The first pass uses the special-case coef_num = 31, while * the second pass uses the coefs from the bitstream. * * However, this prediction type is not currently used by the * reference encoder. */ lpc_prediction(alac->predict_error_buffer[ch], alac->predict_error_buffer[ch], alac->nb_samples, bps, NULL, 31, 0); } else if (prediction_type[ch] > 0) { av_log(avctx, AV_LOG_WARNING, \"unknown prediction type: %i\\n\", prediction_type[ch]); } lpc_prediction(alac->predict_error_buffer[ch], alac->output_samples_buffer[ch], alac->nb_samples, bps, lpc_coefs[ch], lpc_order[ch], lpc_quant[ch]); } } else { /* not compressed, easy case */ for (i = 0; i < alac->nb_samples; i++) { if(get_bits_left(&alac->gb) <= 0) return -1; for (ch = 0; ch < channels; ch++) { alac->output_samples_buffer[ch][i] = get_sbits_long(&alac->gb, alac->sample_size); } } alac->extra_bits = 0; decorr_shift = 0; decorr_left_weight = 0; } if (channels == 2 && decorr_left_weight) { decorrelate_stereo(alac->output_samples_buffer, alac->nb_samples, decorr_shift, decorr_left_weight); } if (alac->extra_bits) { append_extra_bits(alac->output_samples_buffer, alac->extra_bits_buffer, alac->extra_bits, channels, alac->nb_samples); } if(av_sample_fmt_is_planar(avctx->sample_fmt)) { switch(alac->sample_size) { case 16: { for (ch = 0; ch < channels; ch++) { int16_t *outbuffer = (int16_t *)alac->frame.extended_data[ch_index + ch]; for (i = 0; i < alac->nb_samples; i++) *outbuffer++ = alac->output_samples_buffer[ch][i]; }} break; case 24: { for (ch = 0; ch < channels; ch++) { for (i = 0; i < alac->nb_samples; i++) alac->output_samples_buffer[ch][i] <<= 8; }} break; } }else{ switch(alac->sample_size) { case 16: { int16_t *outbuffer = ((int16_t *)alac->frame.extended_data[0]) + ch_index; for (i = 0; i < alac->nb_samples; i++) { for (ch = 0; ch < channels; ch++) *outbuffer++ = alac->output_samples_buffer[ch][i]; outbuffer += alac->channels - channels; } } break; case 24: { int32_t *outbuffer = ((int32_t *)alac->frame.extended_data[0]) + ch_index; for (i = 0; i < alac->nb_samples; i++) { for (ch = 0; ch < channels; ch++) *outbuffer++ = alac->output_samples_buffer[ch][i] << 8; outbuffer += alac->channels - channels; } } break; case 32: { int32_t *outbuffer = ((int32_t *)alac->frame.extended_data[0]) + ch_index; for (i = 0; i < alac->nb_samples; i++) { for (ch = 0; ch < channels; ch++) *outbuffer++ = alac->output_samples_buffer[ch][i]; outbuffer += alac->channels - channels; } } break; } } return 0; }"} {"target": 0, "idx": 10765, "func": "static av_always_inline float quantize_and_encode_band_cost_template( struct AACEncContext *s, PutBitContext *pb, const float *in, const float *scaled, int size, int scale_idx, int cb, const float lambda, const float uplim, int *bits, int BT_ZERO, int BT_UNSIGNED, int BT_PAIR, int BT_ESC) { const float IQ = ff_aac_pow2sf_tab[200 + scale_idx - SCALE_ONE_POS + SCALE_DIV_512]; const float Q = ff_aac_pow2sf_tab[200 - scale_idx + SCALE_ONE_POS - SCALE_DIV_512]; const float CLIPPED_ESCAPE = 165140.0f*IQ; int i, j, k; float cost = 0; const int dim = BT_PAIR ? 2 : 4; int resbits = 0; const float Q34 = sqrtf(Q * sqrtf(Q)); const int range = aac_cb_range[cb]; const int maxval = aac_cb_maxval[cb]; int off; if (BT_ZERO) { for (i = 0; i < size; i++) cost += in[i]*in[i]; if (bits) *bits = 0; return cost * lambda; } if (!scaled) { abs_pow34_v(s->scoefs, in, size); scaled = s->scoefs; } quantize_bands(s->qcoefs, in, scaled, size, Q34, !BT_UNSIGNED, maxval); if (BT_UNSIGNED) { off = 0; } else { off = maxval; } for (i = 0; i < size; i += dim) { const float *vec; int *quants = s->qcoefs + i; int curidx = 0; int curbits; float rd = 0.0f; for (j = 0; j < dim; j++) { curidx *= range; curidx += quants[j] + off; } curbits = ff_aac_spectral_bits[cb-1][curidx]; vec = &ff_aac_codebook_vectors[cb-1][curidx*dim]; if (BT_UNSIGNED) { for (k = 0; k < dim; k++) { float t = fabsf(in[i+k]); float di; if (BT_ESC && vec[k] == 64.0f) { //FIXME: slow if (t >= CLIPPED_ESCAPE) { di = t - CLIPPED_ESCAPE; curbits += 21; } else { int c = av_clip(quant(t, Q), 0, 8191); di = t - c*cbrtf(c)*IQ; curbits += av_log2(c)*2 - 4 + 1; } } else { di = t - vec[k]*IQ; } if (vec[k] != 0.0f) curbits++; rd += di*di; } } else { for (k = 0; k < dim; k++) { float di = in[i+k] - vec[k]*IQ; rd += di*di; } } cost += rd * lambda + curbits; resbits += curbits; if (cost >= uplim) return uplim; if (pb) { put_bits(pb, ff_aac_spectral_bits[cb-1][curidx], ff_aac_spectral_codes[cb-1][curidx]); if (BT_UNSIGNED) for (j = 0; j < dim; j++) if (ff_aac_codebook_vectors[cb-1][curidx*dim+j] != 0.0f) put_bits(pb, 1, in[i+j] < 0.0f); if (BT_ESC) { for (j = 0; j < 2; j++) { if (ff_aac_codebook_vectors[cb-1][curidx*2+j] == 64.0f) { int coef = av_clip(quant(fabsf(in[i+j]), Q), 0, 8191); int len = av_log2(coef); put_bits(pb, len - 4 + 1, (1 << (len - 4 + 1)) - 2); put_bits(pb, len, coef & ((1 << len) - 1)); } } } } } if (bits) *bits = resbits; return cost; }"} {"target": 0, "idx": 10767, "func": "static void event_loop(VideoState *cur_stream) { SDL_Event event; double incr, pos, frac; for(;;) { double x; SDL_WaitEvent(&event); switch(event.type) { case SDL_KEYDOWN: if (exit_on_keydown) { do_exit(cur_stream); break; } switch(event.key.keysym.sym) { case SDLK_ESCAPE: case SDLK_q: do_exit(cur_stream); break; case SDLK_f: toggle_full_screen(cur_stream); break; case SDLK_p: case SDLK_SPACE: if (cur_stream) toggle_pause(cur_stream); break; case SDLK_s: //S: Step to next frame if (cur_stream) step_to_next_frame(cur_stream); break; case SDLK_a: if (cur_stream) stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO); break; case SDLK_v: if (cur_stream) stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO); break; case SDLK_t: if (cur_stream) stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE); break; case SDLK_w: if (cur_stream) toggle_audio_display(cur_stream); break; case SDLK_LEFT: incr = -10.0; goto do_seek; case SDLK_RIGHT: incr = 10.0; goto do_seek; case SDLK_UP: incr = 60.0; goto do_seek; case SDLK_DOWN: incr = -60.0; do_seek: if (cur_stream) { if (seek_by_bytes) { if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){ pos= cur_stream->video_current_pos; }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){ pos= cur_stream->audio_pkt.pos; }else pos = avio_tell(cur_stream->ic->pb); if (cur_stream->ic->bit_rate) incr *= cur_stream->ic->bit_rate / 8.0; else incr *= 180000.0; pos += incr; stream_seek(cur_stream, pos, incr, 1); } else { pos = get_master_clock(cur_stream); pos += incr; stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0); } } break; default: break; } break; case SDL_MOUSEBUTTONDOWN: if (exit_on_mousedown) { do_exit(cur_stream); break; } case SDL_MOUSEMOTION: if(event.type ==SDL_MOUSEBUTTONDOWN){ x= event.button.x; }else{ if(event.motion.state != SDL_PRESSED) break; x= event.motion.x; } if (cur_stream) { if(seek_by_bytes || cur_stream->ic->duration<=0){ uint64_t size= avio_size(cur_stream->ic->pb); stream_seek(cur_stream, size*x/cur_stream->width, 0, 1); }else{ int64_t ts; int ns, hh, mm, ss; int tns, thh, tmm, tss; tns = cur_stream->ic->duration/1000000LL; thh = tns/3600; tmm = (tns%3600)/60; tss = (tns%60); frac = x/cur_stream->width; ns = frac*tns; hh = ns/3600; mm = (ns%3600)/60; ss = (ns%60); fprintf(stderr, \"Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \\n\", frac*100, hh, mm, ss, thh, tmm, tss); ts = frac*cur_stream->ic->duration; if (cur_stream->ic->start_time != AV_NOPTS_VALUE) ts += cur_stream->ic->start_time; stream_seek(cur_stream, ts, 0, 0); } } break; case SDL_VIDEORESIZE: if (cur_stream) { screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0, SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL); screen_width = cur_stream->width = event.resize.w; screen_height= cur_stream->height= event.resize.h; } break; case SDL_QUIT: case FF_QUIT_EVENT: do_exit(cur_stream); break; case FF_ALLOC_EVENT: video_open(event.user.data1); alloc_picture(event.user.data1); break; case FF_REFRESH_EVENT: video_refresh(event.user.data1); cur_stream->refresh=0; break; default: break; } } }"} {"target": 1, "idx": 10772, "func": "static int sox_read_packet(AVFormatContext *s, AVPacket *pkt) { int ret, size; if (url_feof(s->pb)) return AVERROR_EOF; size = SOX_SAMPLES*s->streams[0]->codec->block_align; ret = av_get_packet(s->pb, pkt, size); if (ret < 0) return AVERROR(EIO); pkt->stream_index = 0; pkt->size = ret; return 0; }"} {"target": 1, "idx": 10773, "func": "static av_cold int svq1_encode_init(AVCodecContext *avctx) { SVQ1Context * const s = avctx->priv_data; dsputil_init(&s->dsp, avctx); avctx->coded_frame= (AVFrame*)&s->picture; s->frame_width = avctx->width; s->frame_height = avctx->height; s->y_block_width = (s->frame_width + 15) / 16; s->y_block_height = (s->frame_height + 15) / 16; s->c_block_width = (s->frame_width / 4 + 15) / 16; s->c_block_height = (s->frame_height / 4 + 15) / 16; s->avctx= avctx; s->m.avctx= avctx; s->m.me.scratchpad= av_mallocz((avctx->width+64)*2*16*2*sizeof(uint8_t)); s->m.me.map = av_mallocz(ME_MAP_SIZE*sizeof(uint32_t)); s->m.me.score_map = av_mallocz(ME_MAP_SIZE*sizeof(uint32_t)); s->mb_type = av_mallocz((s->y_block_width+1)*s->y_block_height*sizeof(int16_t)); s->dummy = av_mallocz((s->y_block_width+1)*s->y_block_height*sizeof(int32_t)); h263_encode_init(&s->m); //mv_penalty return 0; }"} {"target": 1, "idx": 10801, "func": "static int ahci_populate_sglist(AHCIDevice *ad, QEMUSGList *sglist, int offset) { AHCICmdHdr *cmd = ad->cur_cmd; uint32_t opts = le32_to_cpu(cmd->opts); uint64_t prdt_addr = le64_to_cpu(cmd->tbl_addr) + 0x80; int sglist_alloc_hint = opts >> AHCI_CMD_HDR_PRDT_LEN; dma_addr_t prdt_len = (sglist_alloc_hint * sizeof(AHCI_SG)); dma_addr_t real_prdt_len = prdt_len; uint8_t *prdt; int i; int r = 0; int sum = 0; int off_idx = -1; int off_pos = -1; int tbl_entry_size; IDEBus *bus = &ad->port; BusState *qbus = BUS(bus); if (!sglist_alloc_hint) { DPRINTF(ad->port_no, \"no sg list given by guest: 0x%08x\\n\", opts); return -1; } /* map PRDT */ if (!(prdt = dma_memory_map(ad->hba->as, prdt_addr, &prdt_len, DMA_DIRECTION_TO_DEVICE))){ DPRINTF(ad->port_no, \"map failed\\n\"); return -1; } if (prdt_len < real_prdt_len) { DPRINTF(ad->port_no, \"mapped less than expected\\n\"); r = -1; goto out; } /* Get entries in the PRDT, init a qemu sglist accordingly */ if (sglist_alloc_hint > 0) { AHCI_SG *tbl = (AHCI_SG *)prdt; sum = 0; for (i = 0; i < sglist_alloc_hint; i++) { /* flags_size is zero-based */ tbl_entry_size = (le32_to_cpu(tbl[i].flags_size) + 1); if (offset <= (sum + tbl_entry_size)) { off_idx = i; off_pos = offset - sum; break; } sum += tbl_entry_size; } if ((off_idx == -1) || (off_pos < 0) || (off_pos > tbl_entry_size)) { DPRINTF(ad->port_no, \"%s: Incorrect offset! \" \"off_idx: %d, off_pos: %d\\n\", __func__, off_idx, off_pos); r = -1; goto out; } qemu_sglist_init(sglist, qbus->parent, (sglist_alloc_hint - off_idx), ad->hba->as); qemu_sglist_add(sglist, le64_to_cpu(tbl[off_idx].addr + off_pos), le32_to_cpu(tbl[off_idx].flags_size) + 1 - off_pos); for (i = off_idx + 1; i < sglist_alloc_hint; i++) { /* flags_size is zero-based */ qemu_sglist_add(sglist, le64_to_cpu(tbl[i].addr), le32_to_cpu(tbl[i].flags_size) + 1); } } out: dma_memory_unmap(ad->hba->as, prdt, prdt_len, DMA_DIRECTION_TO_DEVICE, prdt_len); return r; }"} {"target": 1, "idx": 10836, "func": "static av_always_inline void mpeg_motion_lowres(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int field_based, int bottom_field, int field_select, uint8_t **ref_picture, h264_chroma_mc_func *pix_op, int motion_x, int motion_y, int h, int mb_y) { uint8_t *ptr_y, *ptr_cb, *ptr_cr; int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy, uvsx, uvsy; const int lowres = s->avctx->lowres; const int op_index = FFMIN(lowres, 2); const int block_s = 8>>lowres; const int s_mask = (2 << lowres) - 1; const int h_edge_pos = s->h_edge_pos >> lowres; const int v_edge_pos = s->v_edge_pos >> lowres; linesize = s->current_picture.f.linesize[0] << field_based; uvlinesize = s->current_picture.f.linesize[1] << field_based; // FIXME obviously not perfect but qpel will not work in lowres anyway if (s->quarter_sample) { motion_x /= 2; motion_y /= 2; } if (field_based) { motion_y += (bottom_field - field_select) * (1 << lowres - 1); } sx = motion_x & s_mask; sy = motion_y & s_mask; src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1); src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1); if (s->out_format == FMT_H263) { uvsx = ((motion_x >> 1) & s_mask) | (sx & 1); uvsy = ((motion_y >> 1) & s_mask) | (sy & 1); uvsrc_x = src_x >> 1; uvsrc_y = src_y >> 1; } else if (s->out_format == FMT_H261) { // even chroma mv's are full pel in H261 mx = motion_x / 4; my = motion_y / 4; uvsx = (2 * mx) & s_mask; uvsy = (2 * my) & s_mask; uvsrc_x = s->mb_x * block_s + (mx >> lowres); uvsrc_y = mb_y * block_s + (my >> lowres); } else { mx = motion_x / 2; my = motion_y / 2; uvsx = mx & s_mask; uvsy = my & s_mask; uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1); uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1); } ptr_y = ref_picture[0] + src_y * linesize + src_x; ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x; ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x; if ((unsigned) src_x > h_edge_pos - (!!sx) - 2 * block_s || (unsigned) src_y > (v_edge_pos >> field_based) - (!!sy) - h) { s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y, s->linesize, 17, 17 + field_based, src_x, src_y << field_based, h_edge_pos, v_edge_pos); ptr_y = s->edge_emu_buffer; if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) { uint8_t *uvbuf = s->edge_emu_buffer + 18 * s->linesize; s->dsp.emulated_edge_mc(uvbuf , ptr_cb, s->uvlinesize, 9, 9 + field_based, uvsrc_x, uvsrc_y << field_based, h_edge_pos >> 1, v_edge_pos >> 1); s->dsp.emulated_edge_mc(uvbuf + 16, ptr_cr, s->uvlinesize, 9, 9 + field_based, uvsrc_x, uvsrc_y << field_based, h_edge_pos >> 1, v_edge_pos >> 1); ptr_cb = uvbuf; ptr_cr = uvbuf + 16; } } // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f.data if (bottom_field) { dest_y += s->linesize; dest_cb += s->uvlinesize; dest_cr += s->uvlinesize; } if (field_select) { ptr_y += s->linesize; ptr_cb += s->uvlinesize; ptr_cr += s->uvlinesize; } sx = (sx << 2) >> lowres; sy = (sy << 2) >> lowres; pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy); if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) { uvsx = (uvsx << 2) >> lowres; uvsy = (uvsy << 2) >> lowres; pix_op[op_index](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy); pix_op[op_index](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy); } // FIXME h261 lowres loop filter }"} {"target": 1, "idx": 10842, "func": "static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; unsigned int buf_size = avpkt->size; const uint8_t *buf_end = buf + buf_size; const AVPixFmtDescriptor *desc; EXRContext *const s = avctx->priv_data; AVFrame *picture = data; AVFrame *const p = &s->picture; uint8_t *ptr; int i, x, y, stride, magic_number, version, flags, ret; int w = 0; int h = 0; unsigned int xmin = ~0; unsigned int xmax = ~0; unsigned int ymin = ~0; unsigned int ymax = ~0; unsigned int xdelta = ~0; int out_line_size; int bxmin, axmax; int scan_lines_per_block; unsigned long scan_line_size; unsigned long uncompressed_size; unsigned int current_channel_offset = 0; s->channel_offsets[0] = -1; s->channel_offsets[1] = -1; s->channel_offsets[2] = -1; s->channel_offsets[3] = -1; s->bits_per_color_id = -1; s->compr = -1; if (buf_size < 10) { av_log(avctx, AV_LOG_ERROR, \"Too short header to parse\\n\"); return AVERROR_INVALIDDATA; } magic_number = bytestream_get_le32(&buf); if (magic_number != 20000630) { // As per documentation of OpenEXR it's supposed to be int 20000630 little-endian av_log(avctx, AV_LOG_ERROR, \"Wrong magic number %d\\n\", magic_number); return AVERROR_INVALIDDATA; } version = bytestream_get_byte(&buf); if (version != 2) { av_log(avctx, AV_LOG_ERROR, \"Unsupported version %d\\n\", version); return AVERROR_PATCHWELCOME; } flags = bytestream_get_le24(&buf); if (flags & 0x2) { av_log(avctx, AV_LOG_ERROR, \"Tile based images are not supported\\n\"); return AVERROR_PATCHWELCOME; } // Parse the header while (buf < buf_end && buf[0]) { unsigned int variable_buffer_data_size; // Process the channel list if (check_header_variable(avctx, &buf, buf_end, \"channels\", \"chlist\", 38, &variable_buffer_data_size) >= 0) { const uint8_t *channel_list_end; if (!variable_buffer_data_size) return AVERROR_INVALIDDATA; channel_list_end = buf + variable_buffer_data_size; while (channel_list_end - buf >= 19) { int current_bits_per_color_id = -1; int channel_index = -1; if (!strcmp(buf, \"R\")) channel_index = 0; else if (!strcmp(buf, \"G\")) channel_index = 1; else if (!strcmp(buf, \"B\")) channel_index = 2; else if (!strcmp(buf, \"A\")) channel_index = 3; else av_log(avctx, AV_LOG_WARNING, \"Unsupported channel %.256s\\n\", buf); while (bytestream_get_byte(&buf) && buf < channel_list_end) continue; /* skip */ if (channel_list_end - * &buf < 4) { av_log(avctx, AV_LOG_ERROR, \"Incomplete header\\n\"); return AVERROR_INVALIDDATA; } current_bits_per_color_id = bytestream_get_le32(&buf); if (current_bits_per_color_id > 2) { av_log(avctx, AV_LOG_ERROR, \"Unknown color format\\n\"); return AVERROR_INVALIDDATA; } if (channel_index >= 0) { if (s->bits_per_color_id != -1 && s->bits_per_color_id != current_bits_per_color_id) { av_log(avctx, AV_LOG_ERROR, \"RGB channels not of the same depth\\n\"); return AVERROR_INVALIDDATA; } s->bits_per_color_id = current_bits_per_color_id; s->channel_offsets[channel_index] = current_channel_offset; } current_channel_offset += 1 << current_bits_per_color_id; buf += 12; } /* Check if all channels are set with an offset or if the channels * are causing an overflow */ if (FFMIN3(s->channel_offsets[0], s->channel_offsets[1], s->channel_offsets[2]) < 0) { if (s->channel_offsets[0] < 0) av_log(avctx, AV_LOG_ERROR, \"Missing red channel\\n\"); if (s->channel_offsets[1] < 0) av_log(avctx, AV_LOG_ERROR, \"Missing green channel\\n\"); if (s->channel_offsets[2] < 0) av_log(avctx, AV_LOG_ERROR, \"Missing blue channel\\n\"); return AVERROR_INVALIDDATA; } buf = channel_list_end; continue; } else if (check_header_variable(avctx, &buf, buf_end, \"dataWindow\", \"box2i\", 31, &variable_buffer_data_size) >= 0) { if (!variable_buffer_data_size) return AVERROR_INVALIDDATA; xmin = AV_RL32(buf); ymin = AV_RL32(buf + 4); xmax = AV_RL32(buf + 8); ymax = AV_RL32(buf + 12); xdelta = (xmax-xmin) + 1; buf += variable_buffer_data_size; continue; } else if (check_header_variable(avctx, &buf, buf_end, \"displayWindow\", \"box2i\", 34, &variable_buffer_data_size) >= 0) { if (!variable_buffer_data_size) return AVERROR_INVALIDDATA; w = AV_RL32(buf + 8) + 1; h = AV_RL32(buf + 12) + 1; buf += variable_buffer_data_size; continue; } else if (check_header_variable(avctx, &buf, buf_end, \"lineOrder\", \"lineOrder\", 25, &variable_buffer_data_size) >= 0) { if (!variable_buffer_data_size) return AVERROR_INVALIDDATA; if (*buf) { av_log(avctx, AV_LOG_ERROR, \"Doesn't support this line order : %d\\n\", *buf); return AVERROR_PATCHWELCOME; } buf += variable_buffer_data_size; continue; } else if (check_header_variable(avctx, &buf, buf_end, \"pixelAspectRatio\", \"float\", 31, &variable_buffer_data_size) >= 0) { if (!variable_buffer_data_size) return AVERROR_INVALIDDATA; avctx->sample_aspect_ratio = av_d2q(av_int2float(AV_RL32(buf)), 255); buf += variable_buffer_data_size; continue; } else if (check_header_variable(avctx, &buf, buf_end, \"compression\", \"compression\", 29, &variable_buffer_data_size) >= 0) { if (!variable_buffer_data_size) return AVERROR_INVALIDDATA; if (s->compr == -1) s->compr = *buf; else av_log(avctx, AV_LOG_WARNING, \"Found more than one compression attribute\\n\"); buf += variable_buffer_data_size; continue; } // Check if there is enough bytes for a header if (buf_end - buf <= 9) { av_log(avctx, AV_LOG_ERROR, \"Incomplete header\\n\"); return AVERROR_INVALIDDATA; } // Process unknown variables for (i = 0; i < 2; i++) { // Skip variable name/type while (++buf < buf_end) if (buf[0] == 0x0) break; } buf++; // Skip variable length if (buf_end - buf >= 5) { variable_buffer_data_size = get_header_variable_length(&buf, buf_end); if (!variable_buffer_data_size) { av_log(avctx, AV_LOG_ERROR, \"Incomplete header\\n\"); return AVERROR_INVALIDDATA; } buf += variable_buffer_data_size; } } if (s->compr == -1) { av_log(avctx, AV_LOG_ERROR, \"Missing compression attribute\\n\"); return AVERROR_INVALIDDATA; } if (buf >= buf_end) { av_log(avctx, AV_LOG_ERROR, \"Incomplete frame\\n\"); return AVERROR_INVALIDDATA; } buf++; switch (s->bits_per_color_id) { case 2: // 32-bit case 1: // 16-bit if (s->channel_offsets[3] >= 0) avctx->pix_fmt = AV_PIX_FMT_RGBA64; else avctx->pix_fmt = AV_PIX_FMT_RGB48; break; // 8-bit case 0: av_log_missing_feature(avctx, \"8-bit OpenEXR\", 1); return AVERROR_PATCHWELCOME; default: av_log(avctx, AV_LOG_ERROR, \"Unknown color format : %d\\n\", s->bits_per_color_id); return AVERROR_INVALIDDATA; } switch (s->compr) { case EXR_RAW: case EXR_RLE: case EXR_ZIP1: scan_lines_per_block = 1; break; case EXR_ZIP16: scan_lines_per_block = 16; break; default: av_log(avctx, AV_LOG_ERROR, \"Compression type %d is not supported\\n\", s->compr); return AVERROR_PATCHWELCOME; } if (s->picture.data[0]) ff_thread_release_buffer(avctx, &s->picture); if (av_image_check_size(w, h, 0, avctx)) return AVERROR_INVALIDDATA; // Verify the xmin, xmax, ymin, ymax and xdelta before setting the actual image size if (xmin > xmax || ymin > ymax || xdelta != xmax - xmin + 1 || xmax >= w || ymax >= h) { av_log(avctx, AV_LOG_ERROR, \"Wrong sizing or missing size information\\n\"); return AVERROR_INVALIDDATA; } if (w != avctx->width || h != avctx->height) { avcodec_set_dimensions(avctx, w, h); } desc = av_pix_fmt_desc_get(avctx->pix_fmt); bxmin = xmin * 2 * desc->nb_components; axmax = (avctx->width - (xmax + 1)) * 2 * desc->nb_components; out_line_size = avctx->width * 2 * desc->nb_components; scan_line_size = xdelta * current_channel_offset; uncompressed_size = scan_line_size * scan_lines_per_block; if (s->compr != EXR_RAW) { av_fast_padded_malloc(&s->uncompressed_data, &s->uncompressed_size, uncompressed_size); av_fast_padded_malloc(&s->tmp, &s->tmp_size, uncompressed_size); if (!s->uncompressed_data || !s->tmp) return AVERROR(ENOMEM); } if ((ret = ff_thread_get_buffer(avctx, p)) < 0) { av_log(avctx, AV_LOG_ERROR, \"get_buffer() failed\\n\"); return ret; } ptr = p->data[0]; stride = p->linesize[0]; // Zero out the start if ymin is not 0 for (y = 0; y < ymin; y++) { memset(ptr, 0, out_line_size); ptr += stride; } // Process the actual scan line blocks for (y = ymin; y <= ymax; y += scan_lines_per_block) { uint16_t *ptr_x = (uint16_t *)ptr; if (buf_end - buf > 8) { /* Read the lineoffset from the line offset table and add 8 bytes to skip the coordinates and data size fields */ const uint64_t line_offset = bytestream_get_le64(&buf) + 8; int32_t data_size; // Check if the buffer has the required bytes needed from the offset if ((line_offset > buf_size) || (s->compr == EXR_RAW && line_offset > avpkt->size - xdelta * current_channel_offset) || (s->compr != EXR_RAW && line_offset > buf_size - (data_size = AV_RL32(avpkt->data + line_offset - 4)))) { // Line offset is probably wrong and not inside the buffer av_log(avctx, AV_LOG_WARNING, \"Line offset for line %d is out of reach setting it to black\\n\", y); for (i = 0; i < scan_lines_per_block && y + i <= ymax; i++, ptr += stride) { ptr_x = (uint16_t *)ptr; memset(ptr_x, 0, out_line_size); } } else { const uint8_t *red_channel_buffer, *green_channel_buffer, *blue_channel_buffer, *alpha_channel_buffer = 0; if (scan_lines_per_block > 1) uncompressed_size = scan_line_size * FFMIN(scan_lines_per_block, ymax - y + 1); if ((s->compr == EXR_ZIP1 || s->compr == EXR_ZIP16) && data_size < uncompressed_size) { unsigned long dest_len = uncompressed_size; if (uncompress(s->tmp, &dest_len, avpkt->data + line_offset, data_size) != Z_OK || dest_len != uncompressed_size) { av_log(avctx, AV_LOG_ERROR, \"error during zlib decompression\\n\"); return AVERROR(EINVAL); } } else if (s->compr == EXR_RLE && data_size < uncompressed_size) { if (rle_uncompress(avpkt->data + line_offset, data_size, s->tmp, uncompressed_size)) { av_log(avctx, AV_LOG_ERROR, \"error during rle decompression\\n\"); return AVERROR(EINVAL); } } if (s->compr != EXR_RAW && data_size < uncompressed_size) { predictor(s->tmp, uncompressed_size); reorder_pixels(s->tmp, s->uncompressed_data, uncompressed_size); red_channel_buffer = s->uncompressed_data + xdelta * s->channel_offsets[0]; green_channel_buffer = s->uncompressed_data + xdelta * s->channel_offsets[1]; blue_channel_buffer = s->uncompressed_data + xdelta * s->channel_offsets[2]; if (s->channel_offsets[3] >= 0) alpha_channel_buffer = s->uncompressed_data + xdelta * s->channel_offsets[3]; } else { red_channel_buffer = avpkt->data + line_offset + xdelta * s->channel_offsets[0]; green_channel_buffer = avpkt->data + line_offset + xdelta * s->channel_offsets[1]; blue_channel_buffer = avpkt->data + line_offset + xdelta * s->channel_offsets[2]; if (s->channel_offsets[3] >= 0) alpha_channel_buffer = avpkt->data + line_offset + xdelta * s->channel_offsets[3]; } for (i = 0; i < scan_lines_per_block && y + i <= ymax; i++, ptr += stride) { const uint8_t *r, *g, *b, *a; r = red_channel_buffer; g = green_channel_buffer; b = blue_channel_buffer; if (alpha_channel_buffer) a = alpha_channel_buffer; ptr_x = (uint16_t *)ptr; // Zero out the start if xmin is not 0 memset(ptr_x, 0, bxmin); ptr_x += xmin * desc->nb_components; if (s->bits_per_color_id == 2) { // 32-bit for (x = 0; x < xdelta; x++) { *ptr_x++ = exr_flt2uint(bytestream_get_le32(&r)); *ptr_x++ = exr_flt2uint(bytestream_get_le32(&g)); *ptr_x++ = exr_flt2uint(bytestream_get_le32(&b)); if (alpha_channel_buffer) *ptr_x++ = exr_flt2uint(bytestream_get_le32(&a)); } } else { // 16-bit for (x = 0; x < xdelta; x++) { *ptr_x++ = exr_halflt2uint(bytestream_get_le16(&r)); *ptr_x++ = exr_halflt2uint(bytestream_get_le16(&g)); *ptr_x++ = exr_halflt2uint(bytestream_get_le16(&b)); if (alpha_channel_buffer) *ptr_x++ = exr_halflt2uint(bytestream_get_le16(&a)); } } // Zero out the end if xmax+1 is not w memset(ptr_x, 0, axmax); red_channel_buffer += scan_line_size; green_channel_buffer += scan_line_size; blue_channel_buffer += scan_line_size; if (alpha_channel_buffer) alpha_channel_buffer += scan_line_size; } } } } // Zero out the end if ymax+1 is not h for (y = ymax + 1; y < avctx->height; y++) { memset(ptr, 0, out_line_size); ptr += stride; } *picture = s->picture; *got_frame = 1; return buf_size; }"} {"target": 1, "idx": 10847, "func": "static void csrhci_reset(struct csrhci_s *s) { s->out_len = 0; s->out_size = FIFO_LEN; s->in_len = 0; s->baud_delay = NANOSECONDS_PER_SECOND; s->enable = 0; s->in_hdr = INT_MAX; s->in_data = INT_MAX; s->modem_state = 0; /* After a while... (but sooner than 10ms) */ s->modem_state |= CHR_TIOCM_CTS; memset(&s->bd_addr, 0, sizeof(bdaddr_t)); }"} {"target": 1, "idx": 10849, "func": "void rgb15tobgr15(const uint8_t *src, uint8_t *dst, unsigned int src_size) { unsigned i; unsigned num_pixels = src_size >> 1; for(i=0; i>5; b = (rgb&0x7C00)>>10; dst[2*i] = (b&0x1F) | ((g&0x1F)<<5) | ((r&0x1F)<<10); } }"} {"target": 1, "idx": 10857, "func": "int net_init_vhost_user(const Netdev *netdev, const char *name, NetClientState *peer, Error **errp) { int queues; const NetdevVhostUserOptions *vhost_user_opts; CharDriverState *chr; assert(netdev->type == NET_CLIENT_DRIVER_VHOST_USER); vhost_user_opts = &netdev->u.vhost_user; chr = net_vhost_parse_chardev(vhost_user_opts, errp); if (!chr) { return -1; } /* verify net frontend */ if (qemu_opts_foreach(qemu_find_opts(\"device\"), net_vhost_check_net, (char *)name, errp)) { return -1; } queues = vhost_user_opts->has_queues ? vhost_user_opts->queues : 1; if (queues < 1 || queues > MAX_QUEUE_NUM) { error_setg(errp, \"vhost-user number of queues must be in range [1, %d]\", MAX_QUEUE_NUM); return -1; } return net_vhost_user_init(peer, \"vhost_user\", name, chr, queues); }"} {"target": 1, "idx": 10868, "func": "static int lag_decode_prob(GetBitContext *gb, uint32_t *value) { static const uint8_t series[] = { 1, 2, 3, 5, 8, 13, 21 }; int i; int bit = 0; int bits = 0; int prevbit = 0; unsigned val; for (i = 0; i < 7; i++) { if (prevbit && bit) break; prevbit = bit; bit = get_bits1(gb); if (bit && !prevbit) bits += series[i]; } bits--; if (bits < 0 || bits > 31) { *value = 0; return -1; } else if (bits == 0) { *value = 0; return 0; } val = get_bits_long(gb, bits); val |= 1 << bits; *value = val - 1; return 0; }"} {"target": 1, "idx": 10869, "func": "static void check_add_res(HEVCDSPContext h, int bit_depth) { int i; LOCAL_ALIGNED_32(int16_t, res0, [32 * 32]); LOCAL_ALIGNED_32(int16_t, res1, [32 * 32]); LOCAL_ALIGNED_32(uint8_t, dst0, [32 * 32 * 2]); LOCAL_ALIGNED_32(uint8_t, dst1, [32 * 32 * 2]); for (i = 2; i <= 5; i++) { int block_size = 1 << i; int size = block_size * block_size; ptrdiff_t stride = block_size << (bit_depth > 8); declare_func_emms(AV_CPU_FLAG_MMX, void, uint8_t *dst, int16_t *res, ptrdiff_t stride); randomize_buffers(res0, size); randomize_buffers2(dst0, size); memcpy(res1, res0, sizeof(*res0) * size); memcpy(dst1, dst0, size); if (check_func(h.add_residual[i - 2], \"add_res_%dx%d_%d\", block_size, block_size, bit_depth)) { call_ref(dst0, res0, stride); call_new(dst1, res1, stride); if (memcmp(dst0, dst1, size)) fail(); bench_new(dst1, res1, stride); } } }"} {"target": 0, "idx": 10875, "func": "static void test_validate_fail_union_flat(TestInputVisitorData *data, const void *unused) { UserDefFlatUnion *tmp = NULL; Error *errp = NULL; Visitor *v; v = validate_test_init(data, \"{ 'string': 'c', 'integer': 41, 'boolean': true }\"); visit_type_UserDefFlatUnion(v, &tmp, NULL, &errp); g_assert(error_is_set(&errp)); qapi_free_UserDefFlatUnion(tmp); }"} {"target": 0, "idx": 10877, "func": "void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4) { #if defined(DEBUG_MMU) printf(\"CR4 update: CR4=%08x\\n\", (uint32_t)env->cr[4]); #endif if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) != (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) { tlb_flush(env, 1); } /* SSE handling */ if (!(env->cpuid_features & CPUID_SSE)) new_cr4 &= ~CR4_OSFXSR_MASK; if (new_cr4 & CR4_OSFXSR_MASK) env->hflags |= HF_OSFXSR_MASK; else env->hflags &= ~HF_OSFXSR_MASK; env->cr[4] = new_cr4; }"} {"target": 0, "idx": 10899, "func": "int drive_init(struct drive_opt *arg, int snapshot, void *opaque) { char buf[128]; char file[1024]; char devname[128]; char serial[21]; const char *mediastr = \"\"; BlockInterfaceType type; enum { MEDIA_DISK, MEDIA_CDROM } media; int bus_id, unit_id; int cyls, heads, secs, translation; BlockDriverState *bdrv; BlockDriver *drv = NULL; QEMUMachine *machine = opaque; int max_devs; int index; int cache; int bdrv_flags, onerror; int drives_table_idx; char *str = arg->opt; static const char * const params[] = { \"bus\", \"unit\", \"if\", \"index\", \"cyls\", \"heads\", \"secs\", \"trans\", \"media\", \"snapshot\", \"file\", \"cache\", \"format\", \"serial\", \"werror\", NULL }; if (check_params(buf, sizeof(buf), params, str) < 0) { fprintf(stderr, \"qemu: unknown parameter '%s' in '%s'\\n\", buf, str); return -1; } file[0] = 0; cyls = heads = secs = 0; bus_id = 0; unit_id = -1; translation = BIOS_ATA_TRANSLATION_AUTO; index = -1; cache = 3; if (machine->use_scsi) { type = IF_SCSI; max_devs = MAX_SCSI_DEVS; pstrcpy(devname, sizeof(devname), \"scsi\"); } else { type = IF_IDE; max_devs = MAX_IDE_DEVS; pstrcpy(devname, sizeof(devname), \"ide\"); } media = MEDIA_DISK; /* extract parameters */ if (get_param_value(buf, sizeof(buf), \"bus\", str)) { bus_id = strtol(buf, NULL, 0); if (bus_id < 0) { fprintf(stderr, \"qemu: '%s' invalid bus id\\n\", str); return -1; } } if (get_param_value(buf, sizeof(buf), \"unit\", str)) { unit_id = strtol(buf, NULL, 0); if (unit_id < 0) { fprintf(stderr, \"qemu: '%s' invalid unit id\\n\", str); return -1; } } if (get_param_value(buf, sizeof(buf), \"if\", str)) { pstrcpy(devname, sizeof(devname), buf); if (!strcmp(buf, \"ide\")) { type = IF_IDE; max_devs = MAX_IDE_DEVS; } else if (!strcmp(buf, \"scsi\")) { type = IF_SCSI; max_devs = MAX_SCSI_DEVS; } else if (!strcmp(buf, \"floppy\")) { type = IF_FLOPPY; max_devs = 0; } else if (!strcmp(buf, \"pflash\")) { type = IF_PFLASH; max_devs = 0; } else if (!strcmp(buf, \"mtd\")) { type = IF_MTD; max_devs = 0; } else if (!strcmp(buf, \"sd\")) { type = IF_SD; max_devs = 0; } else if (!strcmp(buf, \"virtio\")) { type = IF_VIRTIO; max_devs = 0; } else if (!strcmp(buf, \"xen\")) { type = IF_XEN; max_devs = 0; } else { fprintf(stderr, \"qemu: '%s' unsupported bus type '%s'\\n\", str, buf); return -1; } } if (get_param_value(buf, sizeof(buf), \"index\", str)) { index = strtol(buf, NULL, 0); if (index < 0) { fprintf(stderr, \"qemu: '%s' invalid index\\n\", str); return -1; } } if (get_param_value(buf, sizeof(buf), \"cyls\", str)) { cyls = strtol(buf, NULL, 0); } if (get_param_value(buf, sizeof(buf), \"heads\", str)) { heads = strtol(buf, NULL, 0); } if (get_param_value(buf, sizeof(buf), \"secs\", str)) { secs = strtol(buf, NULL, 0); } if (cyls || heads || secs) { if (cyls < 1 || cyls > 16383) { fprintf(stderr, \"qemu: '%s' invalid physical cyls number\\n\", str); return -1; } if (heads < 1 || heads > 16) { fprintf(stderr, \"qemu: '%s' invalid physical heads number\\n\", str); return -1; } if (secs < 1 || secs > 63) { fprintf(stderr, \"qemu: '%s' invalid physical secs number\\n\", str); return -1; } } if (get_param_value(buf, sizeof(buf), \"trans\", str)) { if (!cyls) { fprintf(stderr, \"qemu: '%s' trans must be used with cyls,heads and secs\\n\", str); return -1; } if (!strcmp(buf, \"none\")) translation = BIOS_ATA_TRANSLATION_NONE; else if (!strcmp(buf, \"lba\")) translation = BIOS_ATA_TRANSLATION_LBA; else if (!strcmp(buf, \"auto\")) translation = BIOS_ATA_TRANSLATION_AUTO; else { fprintf(stderr, \"qemu: '%s' invalid translation type\\n\", str); return -1; } } if (get_param_value(buf, sizeof(buf), \"media\", str)) { if (!strcmp(buf, \"disk\")) { media = MEDIA_DISK; } else if (!strcmp(buf, \"cdrom\")) { if (cyls || secs || heads) { fprintf(stderr, \"qemu: '%s' invalid physical CHS format\\n\", str); return -1; } media = MEDIA_CDROM; } else { fprintf(stderr, \"qemu: '%s' invalid media\\n\", str); return -1; } } if (get_param_value(buf, sizeof(buf), \"snapshot\", str)) { if (!strcmp(buf, \"on\")) snapshot = 1; else if (!strcmp(buf, \"off\")) snapshot = 0; else { fprintf(stderr, \"qemu: '%s' invalid snapshot option\\n\", str); return -1; } } if (get_param_value(buf, sizeof(buf), \"cache\", str)) { if (!strcmp(buf, \"off\") || !strcmp(buf, \"none\")) cache = 0; else if (!strcmp(buf, \"writethrough\")) cache = 1; else if (!strcmp(buf, \"writeback\")) cache = 2; else { fprintf(stderr, \"qemu: invalid cache option\\n\"); return -1; } } if (get_param_value(buf, sizeof(buf), \"format\", str)) { if (strcmp(buf, \"?\") == 0) { fprintf(stderr, \"qemu: Supported formats:\"); bdrv_iterate_format(bdrv_format_print, NULL); fprintf(stderr, \"\\n\"); return -1; } drv = bdrv_find_format(buf); if (!drv) { fprintf(stderr, \"qemu: '%s' invalid format\\n\", buf); return -1; } } if (arg->file == NULL) get_param_value(file, sizeof(file), \"file\", str); else pstrcpy(file, sizeof(file), arg->file); if (!get_param_value(serial, sizeof(serial), \"serial\", str)) memset(serial, 0, sizeof(serial)); onerror = BLOCK_ERR_STOP_ENOSPC; if (get_param_value(buf, sizeof(serial), \"werror\", str)) { if (type != IF_IDE && type != IF_SCSI && type != IF_VIRTIO) { fprintf(stderr, \"werror is no supported by this format\\n\"); return -1; } if (!strcmp(buf, \"ignore\")) onerror = BLOCK_ERR_IGNORE; else if (!strcmp(buf, \"enospc\")) onerror = BLOCK_ERR_STOP_ENOSPC; else if (!strcmp(buf, \"stop\")) onerror = BLOCK_ERR_STOP_ANY; else if (!strcmp(buf, \"report\")) onerror = BLOCK_ERR_REPORT; else { fprintf(stderr, \"qemu: '%s' invalid write error action\\n\", buf); return -1; } } /* compute bus and unit according index */ if (index != -1) { if (bus_id != 0 || unit_id != -1) { fprintf(stderr, \"qemu: '%s' index cannot be used with bus and unit\\n\", str); return -1; } if (max_devs == 0) { unit_id = index; bus_id = 0; } else { unit_id = index % max_devs; bus_id = index / max_devs; } } /* if user doesn't specify a unit_id, * try to find the first free */ if (unit_id == -1) { unit_id = 0; while (drive_get_index(type, bus_id, unit_id) != -1) { unit_id++; if (max_devs && unit_id >= max_devs) { unit_id -= max_devs; bus_id++; } } } /* check unit id */ if (max_devs && unit_id >= max_devs) { fprintf(stderr, \"qemu: '%s' unit %d too big (max is %d)\\n\", str, unit_id, max_devs - 1); return -1; } /* * ignore multiple definitions */ if (drive_get_index(type, bus_id, unit_id) != -1) return -2; /* init */ if (type == IF_IDE || type == IF_SCSI) mediastr = (media == MEDIA_CDROM) ? \"-cd\" : \"-hd\"; if (max_devs) snprintf(buf, sizeof(buf), \"%s%i%s%i\", devname, bus_id, mediastr, unit_id); else snprintf(buf, sizeof(buf), \"%s%s%i\", devname, mediastr, unit_id); bdrv = bdrv_new(buf); drives_table_idx = drive_get_free_idx(); drives_table[drives_table_idx].bdrv = bdrv; drives_table[drives_table_idx].type = type; drives_table[drives_table_idx].bus = bus_id; drives_table[drives_table_idx].unit = unit_id; drives_table[drives_table_idx].onerror = onerror; drives_table[drives_table_idx].drive_opt_idx = arg - drives_opt; strncpy(drives_table[nb_drives].serial, serial, sizeof(serial)); nb_drives++; switch(type) { case IF_IDE: case IF_SCSI: case IF_XEN: switch(media) { case MEDIA_DISK: if (cyls != 0) { bdrv_set_geometry_hint(bdrv, cyls, heads, secs); bdrv_set_translation_hint(bdrv, translation); } break; case MEDIA_CDROM: bdrv_set_type_hint(bdrv, BDRV_TYPE_CDROM); break; } break; case IF_SD: /* FIXME: This isn't really a floppy, but it's a reasonable approximation. */ case IF_FLOPPY: bdrv_set_type_hint(bdrv, BDRV_TYPE_FLOPPY); break; case IF_PFLASH: case IF_MTD: case IF_VIRTIO: break; } if (!file[0]) return -2; bdrv_flags = 0; if (snapshot) { bdrv_flags |= BDRV_O_SNAPSHOT; cache = 2; /* always use write-back with snapshot */ } if (cache == 0) /* no caching */ bdrv_flags |= BDRV_O_NOCACHE; else if (cache == 2) /* write-back */ bdrv_flags |= BDRV_O_CACHE_WB; else if (cache == 3) /* not specified */ bdrv_flags |= BDRV_O_CACHE_DEF; if (bdrv_open2(bdrv, file, bdrv_flags, drv) < 0) { fprintf(stderr, \"qemu: could not open disk image %s\\n\", file); return -1; } if (bdrv_key_required(bdrv)) autostart = 0; return drives_table_idx; }"} {"target": 1, "idx": 10914, "func": "static void unix_wait_for_connect(int fd, Error *err, void *opaque) { MigrationState *s = opaque; if (fd < 0) { DPRINTF(\"migrate connect error: %s\\n\", error_get_pretty(err)); s->file = NULL; migrate_fd_error(s); } else { DPRINTF(\"migrate connect success\\n\"); s->file = qemu_fopen_socket(fd, \"wb\"); migrate_fd_connect(s); } }"} {"target": 0, "idx": 10950, "func": "static int ljpeg_encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pict, int *got_packet) { LJpegEncContext *s = avctx->priv_data; PutBitContext pb; const int width = avctx->width; const int height = avctx->height; const int mb_width = (width + s->hsample[0] - 1) / s->hsample[0]; const int mb_height = (height + s->vsample[0] - 1) / s->vsample[0]; int max_pkt_size = AV_INPUT_BUFFER_MIN_SIZE; int ret, header_bits; if (avctx->pix_fmt == AV_PIX_FMT_BGR24) max_pkt_size += width * height * 3 * 3; else { max_pkt_size += mb_width * mb_height * 3 * 4 * s->hsample[0] * s->vsample[0]; } if ((ret = ff_alloc_packet(pkt, max_pkt_size)) < 0) { av_log(avctx, AV_LOG_ERROR, \"Error getting output packet of size %d.\\n\", max_pkt_size); return ret; } init_put_bits(&pb, pkt->data, pkt->size); ff_mjpeg_encode_picture_header(avctx, &pb, &s->scantable, s->matrix); header_bits = put_bits_count(&pb); if (avctx->pix_fmt == AV_PIX_FMT_BGR24) ret = ljpeg_encode_bgr(avctx, &pb, pict); else ret = ljpeg_encode_yuv(avctx, &pb, pict); if (ret < 0) return ret; emms_c(); ff_mjpeg_encode_picture_trailer(&pb, header_bits); flush_put_bits(&pb); pkt->size = put_bits_ptr(&pb) - pb.buf; pkt->flags |= AV_PKT_FLAG_KEY; *got_packet = 1; return 0; }"} {"target": 1, "idx": 10962, "func": "int av_vsrc_buffer_add_video_buffer_ref(AVFilterContext *buffer_filter, AVFilterBufferRef *picref) { BufferSourceContext *c = buffer_filter->priv; AVFilterLink *outlink = buffer_filter->outputs[0]; int ret; if (c->picref) { av_log(buffer_filter, AV_LOG_ERROR, \"Buffering several frames is not supported. \" \"Please consume all available frames before adding a new one.\\n\" ); //return -1; } if (picref->video->w != c->w || picref->video->h != c->h || picref->format != c->pix_fmt) { AVFilterContext *scale = buffer_filter->outputs[0]->dst; AVFilterLink *link; char scale_param[1024]; av_log(buffer_filter, AV_LOG_INFO, \"Buffer video input changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\\n\", c->w, c->h, av_pix_fmt_descriptors[c->pix_fmt].name, picref->video->w, picref->video->h, av_pix_fmt_descriptors[picref->format].name); if (!scale || strcmp(scale->filter->name, \"scale\")) { AVFilter *f = avfilter_get_by_name(\"scale\"); av_log(buffer_filter, AV_LOG_INFO, \"Inserting scaler filter\\n\"); if ((ret = avfilter_open(&scale, f, \"Input equalizer\")) < 0) return ret; snprintf(scale_param, sizeof(scale_param)-1, \"%d:%d:%s\", c->w, c->h, c->sws_param); if ((ret = avfilter_init_filter(scale, scale_param, NULL)) < 0) { avfilter_free(scale); return ret; } if ((ret = avfilter_insert_filter(buffer_filter->outputs[0], scale, 0, 0)) < 0) { avfilter_free(scale); return ret; } scale->outputs[0]->time_base = scale->inputs[0]->time_base; scale->outputs[0]->format= c->pix_fmt; } else if (!strcmp(scale->filter->name, \"scale\")) { snprintf(scale_param, sizeof(scale_param)-1, \"%d:%d:%s\", scale->outputs[0]->w, scale->outputs[0]->h, c->sws_param); scale->filter->init(scale, scale_param, NULL); } c->pix_fmt = scale->inputs[0]->format = picref->format; c->w = scale->inputs[0]->w = picref->video->w; c->h = scale->inputs[0]->h = picref->video->h; link = scale->outputs[0]; if ((ret = link->srcpad->config_props(link)) < 0) return ret; } c->picref = avfilter_get_video_buffer(outlink, AV_PERM_WRITE, picref->video->w, picref->video->h); av_image_copy(c->picref->data, c->picref->linesize, picref->data, picref->linesize, picref->format, picref->video->w, picref->video->h); avfilter_copy_buffer_ref_props(c->picref, picref); return 0; }"} {"target": 1, "idx": 10971, "func": "int ff_h264_decode_picture_parameter_set(GetBitContext *gb, AVCodecContext *avctx, H264ParamSets *ps, int bit_length) { AVBufferRef *pps_buf; const SPS *sps; unsigned int pps_id = get_ue_golomb(gb); PPS *pps; int qp_bd_offset; int bits_left; int ret; if (pps_id >= MAX_PPS_COUNT) { av_log(avctx, AV_LOG_ERROR, \"pps_id %u out of range\\n\", pps_id); return AVERROR_INVALIDDATA; pps_buf = av_buffer_allocz(sizeof(*pps)); if (!pps_buf) return AVERROR(ENOMEM); pps = (PPS*)pps_buf->data; pps->data_size = gb->buffer_end - gb->buffer; if (pps->data_size > sizeof(pps->data)) { av_log(avctx, AV_LOG_WARNING, \"Truncating likely oversized PPS \" \"(%\"SIZE_SPECIFIER\" > %\"SIZE_SPECIFIER\")\\n\", pps->data_size, sizeof(pps->data)); pps->data_size = sizeof(pps->data); memcpy(pps->data, gb->buffer, pps->data_size); pps->sps_id = get_ue_golomb_31(gb); if ((unsigned)pps->sps_id >= MAX_SPS_COUNT || !ps->sps_list[pps->sps_id]) { av_log(avctx, AV_LOG_ERROR, \"sps_id %u out of range\\n\", pps->sps_id); sps = (const SPS*)ps->sps_list[pps->sps_id]->data; if (sps->bit_depth_luma > 14) { av_log(avctx, AV_LOG_ERROR, \"Invalid luma bit depth=%d\\n\", sps->bit_depth_luma); } else if (sps->bit_depth_luma == 11 || sps->bit_depth_luma == 13) { av_log(avctx, AV_LOG_ERROR, \"Unimplemented luma bit depth=%d\\n\", sps->bit_depth_luma); ret = AVERROR_PATCHWELCOME; pps->cabac = get_bits1(gb); pps->pic_order_present = get_bits1(gb); pps->slice_group_count = get_ue_golomb(gb) + 1; if (pps->slice_group_count > 1) { pps->mb_slice_group_map_type = get_ue_golomb(gb); av_log(avctx, AV_LOG_ERROR, \"FMO not supported\\n\"); switch (pps->mb_slice_group_map_type) { case 0: #if 0 | for (i = 0; i <= num_slice_groups_minus1; i++) | | | | run_length[i] |1 |ue(v) | #endif break; case 2: #if 0 | for (i = 0; i < num_slice_groups_minus1; i++) { | | | | top_left_mb[i] |1 |ue(v) | | bottom_right_mb[i] |1 |ue(v) | | } | | | #endif break; case 3: case 4: case 5: #if 0 | slice_group_change_direction_flag |1 |u(1) | | slice_group_change_rate_minus1 |1 |ue(v) | #endif break; case 6: #if 0 | slice_group_id_cnt_minus1 |1 |ue(v) | | for (i = 0; i <= slice_group_id_cnt_minus1; i++)| | | | slice_group_id[i] |1 |u(v) | #endif break; pps->ref_count[0] = get_ue_golomb(gb) + 1; pps->ref_count[1] = get_ue_golomb(gb) + 1; if (pps->ref_count[0] - 1 > 32 - 1 || pps->ref_count[1] - 1 > 32 - 1) { av_log(avctx, AV_LOG_ERROR, \"reference overflow (pps)\\n\"); qp_bd_offset = 6 * (sps->bit_depth_luma - 8); pps->weighted_pred = get_bits1(gb); pps->weighted_bipred_idc = get_bits(gb, 2); pps->init_qp = get_se_golomb(gb) + 26 + qp_bd_offset; pps->init_qs = get_se_golomb(gb) + 26 + qp_bd_offset; pps->chroma_qp_index_offset[0] = get_se_golomb(gb); pps->deblocking_filter_parameters_present = get_bits1(gb); pps->constrained_intra_pred = get_bits1(gb); pps->redundant_pic_cnt_present = get_bits1(gb); pps->transform_8x8_mode = 0; memcpy(pps->scaling_matrix4, sps->scaling_matrix4, sizeof(pps->scaling_matrix4)); memcpy(pps->scaling_matrix8, sps->scaling_matrix8, sizeof(pps->scaling_matrix8)); bits_left = bit_length - get_bits_count(gb); if (bits_left > 0 && more_rbsp_data_in_pps(sps, avctx)) { pps->transform_8x8_mode = get_bits1(gb); decode_scaling_matrices(gb, sps, pps, 0, pps->scaling_matrix4, pps->scaling_matrix8); // second_chroma_qp_index_offset pps->chroma_qp_index_offset[1] = get_se_golomb(gb); if (pps->chroma_qp_index_offset[1] < -12 || pps->chroma_qp_index_offset[1] > 12) { } else { pps->chroma_qp_index_offset[1] = pps->chroma_qp_index_offset[0]; build_qp_table(pps, 0, pps->chroma_qp_index_offset[0], sps->bit_depth_luma); build_qp_table(pps, 1, pps->chroma_qp_index_offset[1], sps->bit_depth_luma); init_dequant_tables(pps, sps); if (pps->chroma_qp_index_offset[0] != pps->chroma_qp_index_offset[1]) pps->chroma_qp_diff = 1; if (avctx->debug & FF_DEBUG_PICT_INFO) { av_log(avctx, AV_LOG_DEBUG, \"pps:%u sps:%u %s slice_groups:%d ref:%u/%u %s qp:%d/%d/%d/%d %s %s %s %s\\n\", pps_id, pps->sps_id, pps->cabac ? \"CABAC\" : \"CAVLC\", pps->slice_group_count, pps->ref_count[0], pps->ref_count[1], pps->weighted_pred ? \"weighted\" : \"\", pps->init_qp, pps->init_qs, pps->chroma_qp_index_offset[0], pps->chroma_qp_index_offset[1], pps->deblocking_filter_parameters_present ? \"LPAR\" : \"\", pps->constrained_intra_pred ? \"CONSTR\" : \"\", pps->redundant_pic_cnt_present ? \"REDU\" : \"\", pps->transform_8x8_mode ? \"8x8DCT\" : \"\"); remove_pps(ps, pps_id); ps->pps_list[pps_id] = pps_buf; return 0; fail: av_buffer_unref(&pps_buf); return ret;"} {"target": 1, "idx": 10974, "func": "static void qxl_spice_monitors_config_async(PCIQXLDevice *qxl, int replay) { trace_qxl_spice_monitors_config(qxl->id); if (replay) { /* * don't use QXL_COOKIE_TYPE_IO: * - we are not running yet (post_load), we will assert * in send_events * - this is not a guest io, but a reply, so async_io isn't set. */ spice_qxl_monitors_config_async(&qxl->ssd.qxl, qxl->guest_monitors_config, MEMSLOT_GROUP_GUEST, (uintptr_t)qxl_cookie_new( QXL_COOKIE_TYPE_POST_LOAD_MONITORS_CONFIG, 0)); } else { #if SPICE_SERVER_VERSION >= 0x000c06 /* release 0.12.6 */ if (qxl->max_outputs) { spice_qxl_set_monitors_config_limit(&qxl->ssd.qxl, qxl->max_outputs); } #endif qxl->guest_monitors_config = qxl->ram->monitors_config; spice_qxl_monitors_config_async(&qxl->ssd.qxl, qxl->ram->monitors_config, MEMSLOT_GROUP_GUEST, (uintptr_t)qxl_cookie_new(QXL_COOKIE_TYPE_IO, QXL_IO_MONITORS_CONFIG_ASYNC)); } }"} {"target": 0, "idx": 10982, "func": "static void virtio_net_vhost_status(VirtIONet *n, uint8_t status) { VirtIODevice *vdev = VIRTIO_DEVICE(n); NetClientState *nc = qemu_get_queue(n->nic); int queues = n->multiqueue ? n->max_queues : 1; if (!get_vhost_net(nc->peer)) { return; } if (!!n->vhost_started == (virtio_net_started(n, status) && !nc->peer->link_down)) { return; } if (!n->vhost_started) { int r; if (!vhost_net_query(get_vhost_net(nc->peer), vdev)) { return; } n->vhost_started = 1; r = vhost_net_start(vdev, n->nic->ncs, queues); if (r < 0) { error_report(\"unable to start vhost net: %d: \" \"falling back on userspace virtio\", -r); n->vhost_started = 0; } } else { vhost_net_stop(vdev, n->nic->ncs, queues); n->vhost_started = 0; } }"} {"target": 0, "idx": 10984, "func": "static void spapr_cpu_core_host_initfn(Object *obj) { sPAPRCPUCore *core = SPAPR_CPU_CORE(obj); char *name = g_strdup_printf(\"%s-\" TYPE_POWERPC_CPU, \"host\"); ObjectClass *oc = object_class_by_name(name); g_assert(oc); g_free((void *)name); core->cpu_class = oc; }"} {"target": 0, "idx": 10992, "func": "static void init_block_mapping(Vp3DecodeContext *s) { int i, j; signed int hilbert_walk_y[16]; signed int hilbert_walk_c[16]; signed int hilbert_walk_mb[4]; int current_fragment = 0; int current_width = 0; int current_height = 0; int right_edge = 0; int bottom_edge = 0; int superblock_row_inc = 0; int *hilbert = NULL; int mapping_index = 0; int current_macroblock; int c_fragment; signed char travel_width[16] = { 1, 1, 0, -1, 0, 0, 1, 0, 1, 0, 1, 0, 0, -1, 0, 1 }; signed char travel_height[16] = { 0, 0, 1, 0, 1, 1, 0, -1, 0, 1, 0, -1, -1, 0, -1, 0 }; signed char travel_width_mb[4] = { 1, 0, 1, 0 }; signed char travel_height_mb[4] = { 0, 1, 0, -1 }; debug_vp3(\" vp3: initialize block mapping tables\\n\"); /* figure out hilbert pattern per these frame dimensions */ hilbert_walk_y[0] = 1; hilbert_walk_y[1] = 1; hilbert_walk_y[2] = s->fragment_width; hilbert_walk_y[3] = -1; hilbert_walk_y[4] = s->fragment_width; hilbert_walk_y[5] = s->fragment_width; hilbert_walk_y[6] = 1; hilbert_walk_y[7] = -s->fragment_width; hilbert_walk_y[8] = 1; hilbert_walk_y[9] = s->fragment_width; hilbert_walk_y[10] = 1; hilbert_walk_y[11] = -s->fragment_width; hilbert_walk_y[12] = -s->fragment_width; hilbert_walk_y[13] = -1; hilbert_walk_y[14] = -s->fragment_width; hilbert_walk_y[15] = 1; hilbert_walk_c[0] = 1; hilbert_walk_c[1] = 1; hilbert_walk_c[2] = s->fragment_width / 2; hilbert_walk_c[3] = -1; hilbert_walk_c[4] = s->fragment_width / 2; hilbert_walk_c[5] = s->fragment_width / 2; hilbert_walk_c[6] = 1; hilbert_walk_c[7] = -s->fragment_width / 2; hilbert_walk_c[8] = 1; hilbert_walk_c[9] = s->fragment_width / 2; hilbert_walk_c[10] = 1; hilbert_walk_c[11] = -s->fragment_width / 2; hilbert_walk_c[12] = -s->fragment_width / 2; hilbert_walk_c[13] = -1; hilbert_walk_c[14] = -s->fragment_width / 2; hilbert_walk_c[15] = 1; hilbert_walk_mb[0] = 1; hilbert_walk_mb[1] = s->macroblock_width; hilbert_walk_mb[2] = 1; hilbert_walk_mb[3] = -s->macroblock_width; /* iterate through each superblock (all planes) and map the fragments */ for (i = 0; i < s->superblock_count; i++) { debug_init(\" superblock %d (u starts @ %d, v starts @ %d)\\n\", i, s->u_superblock_start, s->v_superblock_start); /* time to re-assign the limits? */ if (i == 0) { /* start of Y superblocks */ right_edge = s->fragment_width; bottom_edge = s->fragment_height; current_width = 0; current_height = 0; superblock_row_inc = 3 * s->fragment_width; hilbert = hilbert_walk_y; /* the first operation for this variable is to advance by 1 */ current_fragment = -1; } else if (i == s->u_superblock_start) { /* start of U superblocks */ right_edge = s->fragment_width / 2; bottom_edge = s->fragment_height / 2; current_width = 0; current_height = 0; superblock_row_inc = 3 * (s->fragment_width / 2); hilbert = hilbert_walk_c; /* the first operation for this variable is to advance by 1 */ current_fragment = s->u_fragment_start - 1; } else if (i == s->v_superblock_start) { /* start of V superblocks */ right_edge = s->fragment_width / 2; bottom_edge = s->fragment_height / 2; current_width = 0; current_height = 0; superblock_row_inc = 3 * (s->fragment_width / 2); hilbert = hilbert_walk_c; /* the first operation for this variable is to advance by 1 */ current_fragment = s->v_fragment_start - 1; } if (current_width >= right_edge) { /* reset width and move to next superblock row */ current_width = 0; current_height += 4; /* fragment is now at the start of a new superblock row */ current_fragment += superblock_row_inc; } /* iterate through all 16 fragments in a superblock */ for (j = 0; j < 16; j++) { current_fragment += hilbert[j]; current_height += travel_height[j]; /* check if the fragment is in bounds */ if ((current_width <= right_edge) && (current_height < bottom_edge)) { s->superblock_fragments[mapping_index] = current_fragment; debug_init(\" mapping fragment %d to superblock %d, position %d\\n\", s->superblock_fragments[mapping_index], i, j); } else { s->superblock_fragments[mapping_index] = -1; debug_init(\" superblock %d, position %d has no fragment\\n\", i, j); } current_width += travel_width[j]; mapping_index++; } } /* initialize the superblock <-> macroblock mapping; iterate through * all of the Y plane superblocks to build this mapping */ right_edge = s->macroblock_width; bottom_edge = s->macroblock_height; current_width = 0; current_height = 0; superblock_row_inc = s->macroblock_width; hilbert = hilbert_walk_mb; mapping_index = 0; current_macroblock = -1; for (i = 0; i < s->u_superblock_start; i++) { if (current_width >= right_edge) { /* reset width and move to next superblock row */ current_width = 0; current_height += 2; /* macroblock is now at the start of a new superblock row */ current_macroblock += superblock_row_inc; } /* iterate through each potential macroblock in the superblock */ for (j = 0; j < 4; j++) { current_macroblock += hilbert_walk_mb[j]; current_height += travel_height_mb[j]; /* check if the macroblock is in bounds */ if ((current_width <= right_edge) && (current_height < bottom_edge)) { s->superblock_macroblocks[mapping_index] = current_macroblock; debug_init(\" mapping macroblock %d to superblock %d, position %d\\n\", s->superblock_macroblocks[mapping_index], i, j); } else { s->superblock_macroblocks[mapping_index] = -1; debug_init(\" superblock %d, position %d has no macroblock\\n\", i, j); } current_width += travel_width_mb[j]; mapping_index++; } } /* initialize the macroblock <-> fragment mapping */ current_fragment = 0; current_macroblock = 0; mapping_index = 0; for (i = 0; i < s->fragment_height; i += 2) { for (j = 0; j < s->fragment_width; j += 2) { debug_init(\" macroblock %d contains fragments: \", current_macroblock); s->all_fragments[current_fragment].macroblock = current_macroblock; s->macroblock_fragments[mapping_index++] = current_fragment; debug_init(\"%d \", current_fragment); if (j + 1 < s->fragment_width) { s->all_fragments[current_fragment + 1].macroblock = current_macroblock; s->macroblock_fragments[mapping_index++] = current_fragment + 1; debug_init(\"%d \", current_fragment + 1); } else s->macroblock_fragments[mapping_index++] = -1; if (i + 1 < s->fragment_height) { s->all_fragments[current_fragment + s->fragment_width].macroblock = current_macroblock; s->macroblock_fragments[mapping_index++] = current_fragment + s->fragment_width; debug_init(\"%d \", current_fragment + s->fragment_width); } else s->macroblock_fragments[mapping_index++] = -1; if ((j + 1 < s->fragment_width) && (i + 1 < s->fragment_height)) { s->all_fragments[current_fragment + s->fragment_width + 1].macroblock = current_macroblock; s->macroblock_fragments[mapping_index++] = current_fragment + s->fragment_width + 1; debug_init(\"%d \", current_fragment + s->fragment_width + 1); } else s->macroblock_fragments[mapping_index++] = -1; /* C planes */ c_fragment = s->u_fragment_start + (i * s->fragment_width / 4) + (j / 2); s->all_fragments[c_fragment].macroblock = s->macroblock_count; s->macroblock_fragments[mapping_index++] = c_fragment; debug_init(\"%d \", c_fragment); c_fragment = s->v_fragment_start + (i * s->fragment_width / 4) + (j / 2); s->all_fragments[c_fragment].macroblock = s->macroblock_count; s->macroblock_fragments[mapping_index++] = c_fragment; debug_init(\"%d \", c_fragment); debug_init(\"\\n\"); if (j + 2 <= s->fragment_width) current_fragment += 2; else current_fragment++; current_macroblock++; } current_fragment += s->fragment_width; } }"} {"target": 0, "idx": 10999, "func": "void m68k_cpu_list(FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...)) { unsigned int i; for (i = 0; m68k_cpu_defs[i].name; i++) { (*cpu_fprintf)(f, \"%s\\n\", m68k_cpu_defs[i].name); } }"} {"target": 0, "idx": 11016, "func": "int acpi_pcihp_device_hotplug(AcpiPciHpState *s, PCIDevice *dev, PCIHotplugState state) { int slot = PCI_SLOT(dev->devfn); int bsel = acpi_pcihp_get_bsel(dev->bus); if (bsel < 0) { return -1; } /* Don't send event when device is enabled during qemu machine creation: * it is present on boot, no hotplug event is necessary. We do send an * event when the device is disabled later. */ if (state == PCI_COLDPLUG_ENABLED) { s->acpi_pcihp_pci_status[bsel].device_present |= (1U << slot); return 0; } if (state == PCI_HOTPLUG_ENABLED) { enable_device(s, bsel, slot); } else { disable_device(s, bsel, slot); } return 0; }"} {"target": 0, "idx": 11026, "func": "static void frame_start(H264Context *h){ MpegEncContext * const s = &h->s; int i; MPV_frame_start(s, s->avctx); ff_er_frame_start(s); assert(s->linesize && s->uvlinesize); for(i=0; i<16; i++){ h->block_offset[i]= 4*((scan8[i] - scan8[0])&7) + 4*s->linesize*((scan8[i] - scan8[0])>>3); h->block_offset[24+i]= 4*((scan8[i] - scan8[0])&7) + 8*s->linesize*((scan8[i] - scan8[0])>>3); } for(i=0; i<4; i++){ h->block_offset[16+i]= h->block_offset[20+i]= 4*((scan8[i] - scan8[0])&7) + 4*s->uvlinesize*((scan8[i] - scan8[0])>>3); h->block_offset[24+16+i]= h->block_offset[24+20+i]= 4*((scan8[i] - scan8[0])&7) + 8*s->uvlinesize*((scan8[i] - scan8[0])>>3); } /* can't be in alloc_tables because linesize isn't known there. * FIXME: redo bipred weight to not require extra buffer? */ if(!s->obmc_scratchpad) s->obmc_scratchpad = av_malloc(16*s->linesize + 2*8*s->uvlinesize); // s->decode= (s->flags&CODEC_FLAG_PSNR) || !s->encoding || s->current_picture.reference /*|| h->contains_intra*/ || 1; }"} {"target": 1, "idx": 11046, "func": "static void test_flush_nodev(void) { QPCIDevice *dev; QPCIBar bmdma_bar, ide_bar; ide_test_start(\"\"); dev = get_pci_device(&bmdma_bar, &ide_bar); /* FLUSH CACHE command on device 0*/ qpci_io_writeb(dev, ide_bar, reg_device, 0); qpci_io_writeb(dev, ide_bar, reg_command, CMD_FLUSH_CACHE); /* Just testing that qemu doesn't crash... */ ide_test_quit(); }"} {"target": 1, "idx": 11070, "func": "void qmp_guest_file_flush(int64_t handle, Error **errp) { GuestFileHandle *gfh = guest_file_handle_find(handle, errp); FILE *fh; int ret; if (!gfh) { return; } fh = gfh->fh; ret = fflush(fh); if (ret == EOF) { error_setg_errno(errp, errno, \"failed to flush file\"); } }"} {"target": 0, "idx": 11080, "func": "static void fill_colmap(H264Context *h, int map[2][16+32], int list, int field, int colfield, int mbafi){ MpegEncContext * const s = &h->s; Picture * const ref1 = &h->ref_list[1][0]; int j, old_ref, rfield; int start= mbafi ? 16 : 0; int end = mbafi ? 16+2*h->ref_count[0] : h->ref_count[0]; int interl= mbafi || s->picture_structure != PICT_FRAME; /* bogus; fills in for missing frames */ memset(map[list], 0, sizeof(map[list])); for(rfield=0; rfield<2; rfield++){ for(old_ref=0; old_refref_count[colfield][list]; old_ref++){ int poc = ref1->ref_poc[colfield][list][old_ref]; if (!interl) poc |= 3; else if( interl && (poc&3) == 3) //FIXME store all MBAFF references so this isnt needed poc= (poc&~3) + rfield + 1; for(j=start; jref_list[0][j].frame_num + (h->ref_list[0][j].f.reference & 3) == poc) { int cur_ref= mbafi ? (j-16)^field : j; map[list][2*old_ref + (rfield^field) + 16] = cur_ref; if(rfield == field || !interl) map[list][old_ref] = cur_ref; break; } } } } }"} {"target": 0, "idx": 11084, "func": "static void compute_frame_duration(int *pnum, int *pden, AVFormatContext *s, AVStream *st, AVCodecParserContext *pc, AVPacket *pkt) { int frame_size; *pnum = 0; *pden = 0; switch(st->codec.codec_type) { case CODEC_TYPE_VIDEO: *pnum = st->codec.frame_rate_base; *pden = st->codec.frame_rate; if (pc && pc->repeat_pict) { *pden *= 2; *pnum = (*pnum) * (2 + pc->repeat_pict); } break; case CODEC_TYPE_AUDIO: frame_size = get_audio_frame_size(&st->codec, pkt->size); if (frame_size < 0) break; *pnum = frame_size; *pden = st->codec.sample_rate; break; default: break; } }"} {"target": 1, "idx": 11113, "func": "static int os_host_main_loop_wait(int64_t timeout) { GMainContext *context = g_main_context_default(); GPollFD poll_fds[1024 * 2]; /* this is probably overkill */ int select_ret = 0; int g_poll_ret, ret, i, n_poll_fds; PollingEntry *pe; WaitObjects *w = &wait_objects; gint poll_timeout; int64_t poll_timeout_ns; static struct timeval tv0; fd_set rfds, wfds, xfds; int nfds; /* XXX: need to suppress polling by better using win32 events */ ret = 0; for (pe = first_polling_entry; pe != NULL; pe = pe->next) { ret |= pe->func(pe->opaque); } if (ret != 0) { return ret; } FD_ZERO(&rfds); FD_ZERO(&wfds); FD_ZERO(&xfds); nfds = pollfds_fill(gpollfds, &rfds, &wfds, &xfds); if (nfds >= 0) { select_ret = select(nfds + 1, &rfds, &wfds, &xfds, &tv0); if (select_ret != 0) { timeout = 0; } if (select_ret > 0) { pollfds_poll(gpollfds, nfds, &rfds, &wfds, &xfds); } } g_main_context_prepare(context, &max_priority); n_poll_fds = g_main_context_query(context, max_priority, &poll_timeout, poll_fds, ARRAY_SIZE(poll_fds)); g_assert(n_poll_fds <= ARRAY_SIZE(poll_fds)); for (i = 0; i < w->num; i++) { poll_fds[n_poll_fds + i].fd = (DWORD_PTR)w->events[i]; poll_fds[n_poll_fds + i].events = G_IO_IN; } if (poll_timeout < 0) { poll_timeout_ns = -1; } else { poll_timeout_ns = (int64_t)poll_timeout * (int64_t)SCALE_MS; } poll_timeout_ns = qemu_soonest_timeout(poll_timeout_ns, timeout); qemu_mutex_unlock_iothread(); g_poll_ret = qemu_poll_ns(poll_fds, n_poll_fds + w->num, poll_timeout_ns); qemu_mutex_lock_iothread(); if (g_poll_ret > 0) { for (i = 0; i < w->num; i++) { w->revents[i] = poll_fds[n_poll_fds + i].revents; } for (i = 0; i < w->num; i++) { if (w->revents[i] && w->func[i]) { w->func[i](w->opaque[i]); } } } if (g_main_context_check(context, max_priority, poll_fds, n_poll_fds)) { g_main_context_dispatch(context); } return select_ret || g_poll_ret; }"} {"target": 1, "idx": 11117, "func": "int bdrv_snapshot_delete(BlockDriverState *bs, const char *snapshot_id) { BlockDriver *drv = bs->drv; if (!drv) { return -ENOMEDIUM; } if (drv->bdrv_snapshot_delete) { return drv->bdrv_snapshot_delete(bs, snapshot_id); } if (bs->file) { return bdrv_snapshot_delete(bs->file, snapshot_id); } return -ENOTSUP; }"} {"target": 0, "idx": 11126, "func": "static uint32_t pci_apb_ioreadl (void *opaque, target_phys_addr_t addr) { uint32_t val; val = bswap32(cpu_inl(addr & IOPORTS_MASK)); return val; }"} {"target": 0, "idx": 11127, "func": "static void qemu_fill_buffer(QEMUFile *f) { int len; if (f->is_writable) return; if (f->is_file) { fseek(f->outfile, f->buf_offset, SEEK_SET); len = fread(f->buf, 1, IO_BUF_SIZE, f->outfile); if (len < 0) len = 0; } else { len = bdrv_pread(f->bs, f->base_offset + f->buf_offset, f->buf, IO_BUF_SIZE); if (len < 0) len = 0; } f->buf_index = 0; f->buf_size = len; f->buf_offset += len; }"} {"target": 0, "idx": 11128, "func": "void net_hub_check_clients(void) { NetHub *hub; NetHubPort *port; NetClientState *peer; QLIST_FOREACH(hub, &hubs, next) { int has_nic = 0, has_host_dev = 0; QLIST_FOREACH(port, &hub->ports, next) { peer = port->nc.peer; if (!peer) { fprintf(stderr, \"Warning: hub port %s has no peer\\n\", port->nc.name); continue; } switch (peer->info->type) { case NET_CLIENT_DRIVER_NIC: has_nic = 1; break; case NET_CLIENT_DRIVER_USER: case NET_CLIENT_DRIVER_TAP: case NET_CLIENT_DRIVER_SOCKET: case NET_CLIENT_DRIVER_VDE: case NET_CLIENT_DRIVER_VHOST_USER: has_host_dev = 1; break; default: break; } } if (has_host_dev && !has_nic) { warn_report(\"vlan %d with no nics\", hub->id); } if (has_nic && !has_host_dev) { fprintf(stderr, \"Warning: vlan %d is not connected to host network\\n\", hub->id); } } }"} {"target": 0, "idx": 11140, "func": "DVDemuxContext* dv_init_demux(AVFormatContext *s) { DVDemuxContext *c; c = av_mallocz(sizeof(DVDemuxContext)); if (!c) return NULL; c->vst = av_new_stream(s, 0); c->ast[0] = av_new_stream(s, 0); if (!c->vst || !c->ast[0]) goto fail; av_set_pts_info(c->vst, 64, 1, 30000); av_set_pts_info(c->ast[0], 64, 1, 30000); c->fctx = s; c->ast[1] = NULL; c->ach = 0; c->frames = 0; c->abytes = 0; c->audio_pkt[0].size = 0; c->audio_pkt[1].size = 0; c->vst->codec.codec_type = CODEC_TYPE_VIDEO; c->vst->codec.codec_id = CODEC_ID_DVVIDEO; c->vst->codec.bit_rate = 25000000; c->ast[0]->codec.codec_type = CODEC_TYPE_AUDIO; c->ast[0]->codec.codec_id = CODEC_ID_PCM_S16LE; s->ctx_flags |= AVFMTCTX_NOHEADER; return c; fail: if (c->vst) av_free(c->vst); if (c->ast[0]) av_free(c->ast[0]); av_free(c); return NULL; }"} {"target": 0, "idx": 11141, "func": "static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int bytes, BdrvRequestFlags flags) { BlockDriver *drv = bs->drv; QEMUIOVector qiov; struct iovec iov = {0}; int ret = 0; bool need_flush = false; int head = 0; int tail = 0; int max_write_zeroes = MIN_NON_ZERO(bs->bl.max_pwrite_zeroes, INT_MAX); int alignment = MAX(bs->bl.pwrite_zeroes_alignment, bs->bl.request_alignment); int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer, MAX_WRITE_ZEROES_BOUNCE_BUFFER); assert(alignment % bs->bl.request_alignment == 0); head = offset % alignment; tail = (offset + bytes) % alignment; max_write_zeroes = QEMU_ALIGN_DOWN(max_write_zeroes, alignment); assert(max_write_zeroes >= bs->bl.request_alignment); while (bytes > 0 && !ret) { int num = bytes; /* Align request. Block drivers can expect the \"bulk\" of the request * to be aligned, and that unaligned requests do not cross cluster * boundaries. */ if (head) { /* Make a small request up to the first aligned sector. For * convenience, limit this request to max_transfer even if * we don't need to fall back to writes. */ num = MIN(MIN(bytes, max_transfer), alignment - head); head = (head + num) % alignment; assert(num < max_write_zeroes); } else if (tail && num > alignment) { /* Shorten the request to the last aligned sector. */ num -= tail; } /* limit request size */ if (num > max_write_zeroes) { num = max_write_zeroes; } ret = -ENOTSUP; /* First try the efficient write zeroes operation */ if (drv->bdrv_co_pwrite_zeroes) { ret = drv->bdrv_co_pwrite_zeroes(bs, offset, num, flags & bs->supported_zero_flags); if (ret != -ENOTSUP && (flags & BDRV_REQ_FUA) && !(bs->supported_zero_flags & BDRV_REQ_FUA)) { need_flush = true; } } else { assert(!bs->supported_zero_flags); } if (ret == -ENOTSUP) { /* Fall back to bounce buffer if write zeroes is unsupported */ BdrvRequestFlags write_flags = flags & ~BDRV_REQ_ZERO_WRITE; if ((flags & BDRV_REQ_FUA) && !(bs->supported_write_flags & BDRV_REQ_FUA)) { /* No need for bdrv_driver_pwrite() to do a fallback * flush on each chunk; use just one at the end */ write_flags &= ~BDRV_REQ_FUA; need_flush = true; } num = MIN(num, max_transfer); iov.iov_len = num; if (iov.iov_base == NULL) { iov.iov_base = qemu_try_blockalign(bs, num); if (iov.iov_base == NULL) { ret = -ENOMEM; goto fail; } memset(iov.iov_base, 0, num); } qemu_iovec_init_external(&qiov, &iov, 1); ret = bdrv_driver_pwritev(bs, offset, num, &qiov, write_flags); /* Keep bounce buffer around if it is big enough for all * all future requests. */ if (num < max_transfer) { qemu_vfree(iov.iov_base); iov.iov_base = NULL; } } offset += num; bytes -= num; } fail: if (ret == 0 && need_flush) { ret = bdrv_co_flush(bs); } qemu_vfree(iov.iov_base); return ret; }"} {"target": 0, "idx": 11142, "func": "static void openpic_save_IRQ_queue(QEMUFile* f, IRQQueue *q) { unsigned int i; for (i = 0; i < BF_WIDTH(MAX_IRQ); i++) qemu_put_be32s(f, &q->queue[i]); qemu_put_sbe32s(f, &q->next); qemu_put_sbe32s(f, &q->priority); }"} {"target": 0, "idx": 11143, "func": "static EHCIQueue *ehci_state_fetchqh(EHCIState *ehci, int async) { uint32_t entry; EHCIQueue *q; int reload; entry = ehci_get_fetch_addr(ehci, async); q = ehci_find_queue_by_qh(ehci, entry); if (NULL == q) { q = ehci_alloc_queue(ehci, async); } q->qhaddr = entry; q->seen++; if (q->seen > 1) { /* we are going in circles -- stop processing */ ehci_set_state(ehci, async, EST_ACTIVE); q = NULL; goto out; } get_dwords(NLPTR_GET(q->qhaddr), (uint32_t *) &q->qh, sizeof(EHCIqh) >> 2); ehci_trace_qh(q, NLPTR_GET(q->qhaddr), &q->qh); if (q->async == EHCI_ASYNC_INFLIGHT) { /* I/O still in progress -- skip queue */ ehci_set_state(ehci, async, EST_HORIZONTALQH); goto out; } if (q->async == EHCI_ASYNC_FINISHED) { /* I/O finished -- continue processing queue */ trace_usb_ehci_queue_action(q, \"resume\"); ehci_set_state(ehci, async, EST_EXECUTING); goto out; } if (async && (q->qh.epchar & QH_EPCHAR_H)) { /* EHCI spec version 1.0 Section 4.8.3 & 4.10.1 */ if (ehci->usbsts & USBSTS_REC) { ehci_clear_usbsts(ehci, USBSTS_REC); } else { DPRINTF(\"FETCHQH: QH 0x%08x. H-bit set, reclamation status reset\" \" - done processing\\n\", q->qhaddr); ehci_set_state(ehci, async, EST_ACTIVE); q = NULL; goto out; } } #if EHCI_DEBUG if (q->qhaddr != q->qh.next) { DPRINTF(\"FETCHQH: QH 0x%08x (h %x halt %x active %x) next 0x%08x\\n\", q->qhaddr, q->qh.epchar & QH_EPCHAR_H, q->qh.token & QTD_TOKEN_HALT, q->qh.token & QTD_TOKEN_ACTIVE, q->qh.next); } #endif reload = get_field(q->qh.epchar, QH_EPCHAR_RL); if (reload) { set_field(&q->qh.altnext_qtd, reload, QH_ALTNEXT_NAKCNT); } if (q->qh.token & QTD_TOKEN_HALT) { ehci_set_state(ehci, async, EST_HORIZONTALQH); } else if ((q->qh.token & QTD_TOKEN_ACTIVE) && (q->qh.current_qtd > 0x1000)) { q->qtdaddr = q->qh.current_qtd; ehci_set_state(ehci, async, EST_FETCHQTD); } else { /* EHCI spec version 1.0 Section 4.10.2 */ ehci_set_state(ehci, async, EST_ADVANCEQUEUE); } out: return q; }"} {"target": 0, "idx": 11167, "func": "static void omap_rtc_reset(struct omap_rtc_s *s) { struct tm tm; s->interrupts = 0; s->comp_reg = 0; s->running = 0; s->pm_am = 0; s->auto_comp = 0; s->round = 0; s->tick = qemu_get_clock(rt_clock); memset(&s->alarm_tm, 0, sizeof(s->alarm_tm)); s->alarm_tm.tm_mday = 0x01; s->status = 1 << 7; qemu_get_timedate(&tm, 0); s->ti = mktimegm(&tm); omap_rtc_alarm_update(s); omap_rtc_tick(s); }"} {"target": 1, "idx": 11193, "func": "static void tosa_init(MachineState *machine) { const char *cpu_model = machine->cpu_model; const char *kernel_filename = machine->kernel_filename; const char *kernel_cmdline = machine->kernel_cmdline; const char *initrd_filename = machine->initrd_filename; MemoryRegion *address_space_mem = get_system_memory(); MemoryRegion *rom = g_new(MemoryRegion, 1); PXA2xxState *mpu; TC6393xbState *tmio; DeviceState *scp0, *scp1; if (!cpu_model) cpu_model = \"pxa255\"; mpu = pxa255_init(address_space_mem, tosa_binfo.ram_size); memory_region_init_ram(rom, NULL, \"tosa.rom\", TOSA_ROM, &error_abort); vmstate_register_ram_global(rom); memory_region_set_readonly(rom, true); memory_region_add_subregion(address_space_mem, 0, rom); tmio = tc6393xb_init(address_space_mem, 0x10000000, qdev_get_gpio_in(mpu->gpio, TOSA_GPIO_TC6393XB_INT)); scp0 = sysbus_create_simple(\"scoop\", 0x08800000, NULL); scp1 = sysbus_create_simple(\"scoop\", 0x14800040, NULL); tosa_gpio_setup(mpu, scp0, scp1, tmio); tosa_microdrive_attach(mpu); tosa_tg_init(mpu); tosa_binfo.kernel_filename = kernel_filename; tosa_binfo.kernel_cmdline = kernel_cmdline; tosa_binfo.initrd_filename = initrd_filename; tosa_binfo.board_id = 0x208; arm_load_kernel(mpu->cpu, &tosa_binfo); sl_bootparam_write(SL_PXA_PARAM_BASE); }"} {"target": 1, "idx": 11202, "func": "static void get_sensor_evt_enable(IPMIBmcSim *ibs, uint8_t *cmd, unsigned int cmd_len, uint8_t *rsp, unsigned int *rsp_len, unsigned int max_rsp_len) { IPMISensor *sens; IPMI_CHECK_CMD_LEN(3); if ((cmd[2] > MAX_SENSORS) || !IPMI_SENSOR_GET_PRESENT(ibs->sensors + cmd[2])) { rsp[2] = IPMI_CC_REQ_ENTRY_NOT_PRESENT; return; } sens = ibs->sensors + cmd[2]; IPMI_ADD_RSP_DATA(IPMI_SENSOR_GET_RET_STATUS(sens)); IPMI_ADD_RSP_DATA(sens->assert_enable & 0xff); IPMI_ADD_RSP_DATA((sens->assert_enable >> 8) & 0xff); IPMI_ADD_RSP_DATA(sens->deassert_enable & 0xff); IPMI_ADD_RSP_DATA((sens->deassert_enable >> 8) & 0xff); }"} {"target": 1, "idx": 11235, "func": "struct omap_uart_s *omap_uart_init(hwaddr base, qemu_irq irq, omap_clk fclk, omap_clk iclk, qemu_irq txdma, qemu_irq rxdma, const char *label, CharDriverState *chr) { struct omap_uart_s *s = (struct omap_uart_s *) g_malloc0(sizeof(struct omap_uart_s)); s->base = base; s->fclk = fclk; s->irq = irq; s->serial = serial_mm_init(get_system_memory(), base, 2, irq, omap_clk_getrate(fclk)/16, chr ?: qemu_chr_new(label, \"null\", NULL), DEVICE_NATIVE_ENDIAN); return s; }"} {"target": 0, "idx": 11268, "func": "static int create_dynamic_disk(int fd, uint8_t *buf, int64_t total_sectors) { VHDDynDiskHeader *dyndisk_header = (VHDDynDiskHeader *) buf; size_t block_size, num_bat_entries; int i; int ret = -EIO; // Write the footer (twice: at the beginning and at the end) block_size = 0x200000; num_bat_entries = (total_sectors + block_size / 512) / (block_size / 512); if (write(fd, buf, HEADER_SIZE) != HEADER_SIZE) { goto fail; } if (lseek(fd, 1536 + ((num_bat_entries * 4 + 511) & ~511), SEEK_SET) < 0) { goto fail; } if (write(fd, buf, HEADER_SIZE) != HEADER_SIZE) { goto fail; } // Write the initial BAT if (lseek(fd, 3 * 512, SEEK_SET) < 0) { goto fail; } memset(buf, 0xFF, 512); for (i = 0; i < (num_bat_entries * 4 + 511) / 512; i++) { if (write(fd, buf, 512) != 512) { goto fail; } } // Prepare the Dynamic Disk Header memset(buf, 0, 1024); memcpy(dyndisk_header->magic, \"cxsparse\", 8); /* * Note: The spec is actually wrong here for data_offset, it says * 0xFFFFFFFF, but MS tools expect all 64 bits to be set. */ dyndisk_header->data_offset = be64_to_cpu(0xFFFFFFFFFFFFFFFFULL); dyndisk_header->table_offset = be64_to_cpu(3 * 512); dyndisk_header->version = be32_to_cpu(0x00010000); dyndisk_header->block_size = be32_to_cpu(block_size); dyndisk_header->max_table_entries = be32_to_cpu(num_bat_entries); dyndisk_header->checksum = be32_to_cpu(vpc_checksum(buf, 1024)); // Write the header if (lseek(fd, 512, SEEK_SET) < 0) { goto fail; } if (write(fd, buf, 1024) != 1024) { goto fail; } ret = 0; fail: return ret; }"} {"target": 0, "idx": 11303, "func": "static int qemu_rbd_snap_list(BlockDriverState *bs, QEMUSnapshotInfo **psn_tab) { BDRVRBDState *s = bs->opaque; QEMUSnapshotInfo *sn_info, *sn_tab = NULL; int i, snap_count; rbd_snap_info_t *snaps; int max_snaps = RBD_MAX_SNAPS; do { snaps = g_malloc(sizeof(*snaps) * max_snaps); snap_count = rbd_snap_list(s->image, snaps, &max_snaps); if (snap_count < 0) { g_free(snaps); } } while (snap_count == -ERANGE); if (snap_count <= 0) { return snap_count; } sn_tab = g_malloc0(snap_count * sizeof(QEMUSnapshotInfo)); for (i = 0; i < snap_count; i++) { const char *snap_name = snaps[i].name; sn_info = sn_tab + i; pstrcpy(sn_info->id_str, sizeof(sn_info->id_str), snap_name); pstrcpy(sn_info->name, sizeof(sn_info->name), snap_name); sn_info->vm_state_size = snaps[i].size; sn_info->date_sec = 0; sn_info->date_nsec = 0; sn_info->vm_clock_nsec = 0; } rbd_snap_list_end(snaps); *psn_tab = sn_tab; return snap_count; }"} {"target": 0, "idx": 11316, "func": "static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address, MMUAccessType access_type, ARMMMUIdx mmu_idx, hwaddr *phys_ptr, int *prot, uint32_t *fsr) { ARMCPU *cpu = arm_env_get_cpu(env); bool is_user = regime_is_user(env, mmu_idx); int n; int matchregion = -1; bool hit = false; *phys_ptr = address; *prot = 0; /* Unlike the ARM ARM pseudocode, we don't need to check whether this * was an exception vector read from the vector table (which is always * done using the default system address map), because those accesses * are done in arm_v7m_load_vector(), which always does a direct * read using address_space_ldl(), rather than going via this function. */ if (regime_translation_disabled(env, mmu_idx)) { /* MPU disabled */ hit = true; } else if (m_is_ppb_region(env, address)) { hit = true; } else if (pmsav7_use_background_region(cpu, mmu_idx, is_user)) { hit = true; } else { for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) { /* region search */ /* Note that the base address is bits [31:5] from the register * with bits [4:0] all zeroes, but the limit address is bits * [31:5] from the register with bits [4:0] all ones. */ uint32_t base = env->pmsav8.rbar[n] & ~0x1f; uint32_t limit = env->pmsav8.rlar[n] | 0x1f; if (!(env->pmsav8.rlar[n] & 0x1)) { /* Region disabled */ continue; } if (address < base || address > limit) { continue; } if (hit) { /* Multiple regions match -- always a failure (unlike * PMSAv7 where highest-numbered-region wins) */ *fsr = 0x00d; /* permission fault */ return true; } matchregion = n; hit = true; if (base & ~TARGET_PAGE_MASK) { qemu_log_mask(LOG_UNIMP, \"MPU_RBAR[%d]: No support for MPU region base\" \"address of 0x%\" PRIx32 \". Minimum alignment is \" \"%d\\n\", n, base, TARGET_PAGE_BITS); continue; } if ((limit + 1) & ~TARGET_PAGE_MASK) { qemu_log_mask(LOG_UNIMP, \"MPU_RBAR[%d]: No support for MPU region limit\" \"address of 0x%\" PRIx32 \". Minimum alignment is \" \"%d\\n\", n, limit, TARGET_PAGE_BITS); continue; } } } if (!hit) { /* background fault */ *fsr = 0; return true; } if (matchregion == -1) { /* hit using the background region */ get_phys_addr_pmsav7_default(env, mmu_idx, address, prot); } else { uint32_t ap = extract32(env->pmsav8.rbar[matchregion], 1, 2); uint32_t xn = extract32(env->pmsav8.rbar[matchregion], 0, 1); if (m_is_system_region(env, address)) { /* System space is always execute never */ xn = 1; } *prot = simple_ap_to_rw_prot(env, mmu_idx, ap); if (*prot && !xn) { *prot |= PAGE_EXEC; } /* We don't need to look the attribute up in the MAIR0/MAIR1 * registers because that only tells us about cacheability. */ } *fsr = 0x00d; /* Permission fault */ return !(*prot & (1 << access_type)); }"} {"target": 1, "idx": 11328, "func": "static int v9fs_xattr_read(V9fsState *s, V9fsPDU *pdu, V9fsFidState *fidp, uint64_t off, uint32_t max_count) { ssize_t err; size_t offset = 7; int read_count; int64_t xattr_len; V9fsVirtioState *v = container_of(s, V9fsVirtioState, state); VirtQueueElement *elem = v->elems[pdu->idx]; xattr_len = fidp->fs.xattr.len; read_count = xattr_len - off; if (read_count > max_count) { read_count = max_count; } else if (read_count < 0) { /* * read beyond XATTR value */ read_count = 0; } err = pdu_marshal(pdu, offset, \"d\", read_count); if (err < 0) { return err; } offset += err; err = v9fs_pack(elem->in_sg, elem->in_num, offset, ((char *)fidp->fs.xattr.value) + off, read_count); if (err < 0) { return err; } offset += err; return offset; }"} {"target": 1, "idx": 11334, "func": "static coroutine_fn int vmdk_co_write(BlockDriverState *bs, int64_t sector_num, const uint8_t *buf, int nb_sectors) { int ret; BDRVVmdkState *s = bs->opaque; qemu_co_mutex_lock(&s->lock); ret = vmdk_write(bs, sector_num, buf, nb_sectors); qemu_co_mutex_unlock(&s->lock); return ret; }"} {"target": 1, "idx": 11359, "func": "int xbzrle_encode_buffer(uint8_t *old_buf, uint8_t *new_buf, int slen, uint8_t *dst, int dlen) { uint32_t zrun_len = 0, nzrun_len = 0; int d = 0, i = 0; long res, xor; uint8_t *nzrun_start = NULL; g_assert(!(((uintptr_t)old_buf | (uintptr_t)new_buf | slen) % sizeof(long))); while (i < slen) { /* overflow */ if (d + 2 > dlen) { return -1; } /* not aligned to sizeof(long) */ res = (slen - i) % sizeof(long); while (res && old_buf[i] == new_buf[i]) { zrun_len++; i++; res--; } /* word at a time for speed */ if (!res) { while (i < slen && (*(long *)(old_buf + i)) == (*(long *)(new_buf + i))) { i += sizeof(long); zrun_len += sizeof(long); } /* go over the rest */ while (i < slen && old_buf[i] == new_buf[i]) { zrun_len++; i++; } } /* buffer unchanged */ if (zrun_len == slen) { return 0; } /* skip last zero run */ if (i == slen) { return d; } d += uleb128_encode_small(dst + d, zrun_len); zrun_len = 0; nzrun_start = new_buf + i; /* overflow */ if (d + 2 > dlen) { return -1; } /* not aligned to sizeof(long) */ res = (slen - i) % sizeof(long); while (res && old_buf[i] != new_buf[i]) { i++; nzrun_len++; res--; } /* word at a time for speed, use of 32-bit long okay */ if (!res) { /* truncation to 32-bit long okay */ long mask = (long)0x0101010101010101ULL; while (i < slen) { xor = *(long *)(old_buf + i) ^ *(long *)(new_buf + i); if ((xor - mask) & ~xor & (mask << 7)) { /* found the end of an nzrun within the current long */ while (old_buf[i] != new_buf[i]) { nzrun_len++; i++; } break; } else { i += sizeof(long); nzrun_len += sizeof(long); } } } d += uleb128_encode_small(dst + d, nzrun_len); /* overflow */ if (d + nzrun_len > dlen) { return -1; } memcpy(dst + d, nzrun_start, nzrun_len); d += nzrun_len; nzrun_len = 0; } return d; }"} {"target": 1, "idx": 11361, "func": "static av_cold void init_atrac3_transforms(ATRAC3Context *q) { float enc_window[256]; int i; /* Generate the mdct window, for details see * http://wiki.multimedia.cx/index.php?title=RealAudio_atrc#Windows */ for (i=0 ; i<256; i++) enc_window[i] = (sin(((i + 0.5) / 256.0 - 0.5) * M_PI) + 1.0) * 0.5; if (!mdct_window[0]) for (i=0 ; i<256; i++) { mdct_window[i] = enc_window[i]/(enc_window[i]*enc_window[i] + enc_window[255-i]*enc_window[255-i]); mdct_window[511-i] = mdct_window[i]; } /* Initialize the MDCT transform. */ ff_mdct_init(&mdct_ctx, 9, 1, 1.0); }"} {"target": 1, "idx": 11368, "func": "static void gd_resize(DisplayChangeListener *dcl, DisplayState *ds) { GtkDisplayState *s = ds->opaque; cairo_format_t kind; int stride; DPRINTF(\"resize(width=%d, height=%d)\\n\", ds_get_width(ds), ds_get_height(ds)); if (s->surface) { cairo_surface_destroy(s->surface); } switch (ds->surface->pf.bits_per_pixel) { case 8: kind = CAIRO_FORMAT_A8; break; case 16: kind = CAIRO_FORMAT_RGB16_565; break; case 32: kind = CAIRO_FORMAT_RGB24; break; default: g_assert_not_reached(); break; } stride = cairo_format_stride_for_width(kind, ds_get_width(ds)); g_assert(ds_get_linesize(ds) == stride); s->surface = cairo_image_surface_create_for_data(ds_get_data(ds), kind, ds_get_width(ds), ds_get_height(ds), ds_get_linesize(ds)); if (!s->full_screen) { GtkRequisition req; double sx, sy; if (s->free_scale) { sx = s->scale_x; sy = s->scale_y; s->scale_y = 1.0; s->scale_x = 1.0; } else { sx = 1.0; sy = 1.0; } gtk_widget_set_size_request(s->drawing_area, ds_get_width(ds) * s->scale_x, ds_get_height(ds) * s->scale_y); #if GTK_CHECK_VERSION(3, 0, 0) gtk_widget_get_preferred_size(s->vbox, NULL, &req); #else gtk_widget_size_request(s->vbox, &req); #endif gtk_window_resize(GTK_WINDOW(s->window), req.width * sx, req.height * sy); } }"} {"target": 1, "idx": 11381, "func": "void lm32_juart_set_jtx(DeviceState *d, uint32_t jtx) { LM32JuartState *s = LM32_JUART(d); unsigned char ch = jtx & 0xff; trace_lm32_juart_set_jtx(s->jtx); s->jtx = jtx; if (s->chr) { qemu_chr_fe_write_all(s->chr, &ch, 1); } }"} {"target": 1, "idx": 11383, "func": "static void matroska_fix_ass_packet(MatroskaDemuxContext *matroska, AVPacket *pkt, uint64_t display_duration) { char *line, *layer, *ptr = pkt->data, *end = ptr+pkt->size; for (; *ptr!=',' && ptrpts + display_duration; int sc = matroska->time_scale * pkt->pts / 10000000; int ec = matroska->time_scale * end_pts / 10000000; int sh, sm, ss, eh, em, es, len; sh = sc/360000; sc -= 360000*sh; sm = sc/ 6000; sc -= 6000*sm; ss = sc/ 100; sc -= 100*ss; eh = ec/360000; ec -= 360000*eh; em = ec/ 6000; ec -= 6000*em; es = ec/ 100; ec -= 100*es; *ptr++ = '\\0'; len = 50 + end-ptr + FF_INPUT_BUFFER_PADDING_SIZE; if (!(line = av_malloc(len))) return; snprintf(line,len,\"Dialogue: %s,%d:%02d:%02d.%02d,%d:%02d:%02d.%02d,%s\", layer, sh, sm, ss, sc, eh, em, es, ec, ptr); av_free(pkt->data); pkt->data = line; pkt->size = strlen(line); } }"} {"target": 0, "idx": 11400, "func": "send_msg( VSCMsgType type, uint32_t reader_id, const void *msg, unsigned int length ) { VSCMsgHeader mhHeader; qemu_mutex_lock(&socket_to_send_lock); if (verbose > 10) { printf(\"sending type=%d id=%u, len =%u (0x%x)\\n\", type, reader_id, length, length); } mhHeader.type = htonl(type); mhHeader.reader_id = 0; mhHeader.length = htonl(length); g_byte_array_append(socket_to_send, (guint8 *)&mhHeader, sizeof(mhHeader)); g_byte_array_append(socket_to_send, (guint8 *)msg, length); g_idle_add(socket_prepare_sending, NULL); qemu_mutex_unlock(&socket_to_send_lock); return 0; }"} {"target": 0, "idx": 11423, "func": "void FUNCC(ff_h264_idct_dc_add)(uint8_t *_dst, int16_t *block, int stride){ int i, j; int dc = (((dctcoef*)block)[0] + 32) >> 6; pixel *dst = (pixel*)_dst; stride >>= sizeof(pixel)-1; for( j = 0; j < 4; j++ ) { for( i = 0; i < 4; i++ ) dst[i] = av_clip_pixel( dst[i] + dc ); dst += stride; } }"} {"target": 0, "idx": 11427, "func": "static int encode_picture_ls(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pict, int *got_packet) { const AVFrame *const p = pict; const int near = avctx->prediction_method; PutBitContext pb, pb2; GetBitContext gb; uint8_t *buf2 = NULL; uint8_t *zero = NULL; uint8_t *cur = NULL; uint8_t *last = NULL; JLSState *state; int i, size, ret; int comps; if (avctx->pix_fmt == AV_PIX_FMT_GRAY8 || avctx->pix_fmt == AV_PIX_FMT_GRAY16) comps = 1; else comps = 3; if ((ret = ff_alloc_packet(pkt, avctx->width * avctx->height * comps * 4 + AV_INPUT_BUFFER_MIN_SIZE)) < 0) { av_log(avctx, AV_LOG_ERROR, \"Error getting output packet.\\n\"); return ret; } buf2 = av_malloc(pkt->size); if (!buf2) goto memfail; init_put_bits(&pb, pkt->data, pkt->size); init_put_bits(&pb2, buf2, pkt->size); /* write our own JPEG header, can't use mjpeg_picture_header */ put_marker(&pb, SOI); put_marker(&pb, SOF48); put_bits(&pb, 16, 8 + comps * 3); // header size depends on components put_bits(&pb, 8, (avctx->pix_fmt == AV_PIX_FMT_GRAY16) ? 16 : 8); // bpp put_bits(&pb, 16, avctx->height); put_bits(&pb, 16, avctx->width); put_bits(&pb, 8, comps); // components for (i = 1; i <= comps; i++) { put_bits(&pb, 8, i); // component ID put_bits(&pb, 8, 0x11); // subsampling: none put_bits(&pb, 8, 0); // Tiq, used by JPEG-LS ext } put_marker(&pb, SOS); put_bits(&pb, 16, 6 + comps * 2); put_bits(&pb, 8, comps); for (i = 1; i <= comps; i++) { put_bits(&pb, 8, i); // component ID put_bits(&pb, 8, 0); // mapping index: none } put_bits(&pb, 8, near); put_bits(&pb, 8, (comps > 1) ? 1 : 0); // interleaving: 0 - plane, 1 - line put_bits(&pb, 8, 0); // point transform: none state = av_mallocz(sizeof(JLSState)); if (!state) goto memfail; /* initialize JPEG-LS state from JPEG parameters */ state->near = near; state->bpp = (avctx->pix_fmt == AV_PIX_FMT_GRAY16) ? 16 : 8; ff_jpegls_reset_coding_parameters(state, 0); ff_jpegls_init_state(state); ls_store_lse(state, &pb); zero = last = av_mallocz(p->linesize[0]); if (!zero) goto memfail; cur = p->data[0]; if (avctx->pix_fmt == AV_PIX_FMT_GRAY8) { int t = 0; for (i = 0; i < avctx->height; i++) { ls_encode_line(state, &pb2, last, cur, t, avctx->width, 1, 0, 8); t = last[0]; last = cur; cur += p->linesize[0]; } } else if (avctx->pix_fmt == AV_PIX_FMT_GRAY16) { int t = 0; for (i = 0; i < avctx->height; i++) { ls_encode_line(state, &pb2, last, cur, t, avctx->width, 1, 0, 16); t = *((uint16_t *)last); last = cur; cur += p->linesize[0]; } } else if (avctx->pix_fmt == AV_PIX_FMT_RGB24) { int j, width; int Rc[3] = { 0, 0, 0 }; width = avctx->width * 3; for (i = 0; i < avctx->height; i++) { for (j = 0; j < 3; j++) { ls_encode_line(state, &pb2, last + j, cur + j, Rc[j], width, 3, j, 8); Rc[j] = last[j]; } last = cur; cur += p->linesize[0]; } } else if (avctx->pix_fmt == AV_PIX_FMT_BGR24) { int j, width; int Rc[3] = { 0, 0, 0 }; width = avctx->width * 3; for (i = 0; i < avctx->height; i++) { for (j = 2; j >= 0; j--) { ls_encode_line(state, &pb2, last + j, cur + j, Rc[j], width, 3, j, 8); Rc[j] = last[j]; } last = cur; cur += p->linesize[0]; } } av_freep(&zero); av_freep(&state); /* the specification says that after doing 0xff escaping unused bits in * the last byte must be set to 0, so just append 7 \"optional\" zero-bits * to avoid special-casing. */ put_bits(&pb2, 7, 0); size = put_bits_count(&pb2); flush_put_bits(&pb2); /* do escape coding */ init_get_bits(&gb, buf2, size); size -= 7; while (get_bits_count(&gb) < size) { int v; v = get_bits(&gb, 8); put_bits(&pb, 8, v); if (v == 0xFF) { v = get_bits(&gb, 7); put_bits(&pb, 8, v); } } avpriv_align_put_bits(&pb); av_freep(&buf2); /* End of image */ put_marker(&pb, EOI); flush_put_bits(&pb); emms_c(); pkt->size = put_bits_count(&pb) >> 3; pkt->flags |= AV_PKT_FLAG_KEY; *got_packet = 1; return 0; memfail: av_packet_unref(pkt); av_freep(&buf2); av_freep(&state); av_freep(&zero); return AVERROR(ENOMEM); }"} {"target": 0, "idx": 11428, "func": "void ff_vc1_decode_blocks(VC1Context *v) { v->s.esc3_level_length = 0; if (v->x8_type) { ff_intrax8_decode_picture(&v->x8, 2*v->pq + v->halfpq, v->pq * !v->pquantizer); ff_er_add_slice(&v->s.er, 0, 0, (v->s.mb_x >> 1) - 1, (v->s.mb_y >> 1) - 1, ER_MB_END); } else { v->cur_blk_idx = 0; v->left_blk_idx = -1; v->topleft_blk_idx = 1; v->top_blk_idx = 2; switch (v->s.pict_type) { case AV_PICTURE_TYPE_I: if (v->profile == PROFILE_ADVANCED) vc1_decode_i_blocks_adv(v); else vc1_decode_i_blocks(v); break; case AV_PICTURE_TYPE_P: if (v->p_frame_skipped) vc1_decode_skip_blocks(v); else vc1_decode_p_blocks(v); break; case AV_PICTURE_TYPE_B: if (v->bi_type) { if (v->profile == PROFILE_ADVANCED) vc1_decode_i_blocks_adv(v); else vc1_decode_i_blocks(v); } else vc1_decode_b_blocks(v); break; } } }"} {"target": 1, "idx": 11433, "func": "paint_mouse_pointer(XImage *image, struct x11_grab *s) { int x_off = s->x_off; int y_off = s->y_off; int width = s->width; int height = s->height; Display *dpy = s->dpy; XFixesCursorImage *xcim; int x, y; int line, column; int to_line, to_column; int image_addr, xcim_addr; xcim = XFixesGetCursorImage(dpy); x = xcim->x - xcim->xhot; y = xcim->y - xcim->yhot; to_line = FFMIN((y + xcim->height), (height + y_off)); to_column = FFMIN((x + xcim->width), (width + x_off)); for (line = FFMAX(y, y_off); line < to_line; line++) { for (column = FFMAX(x, x_off); column < to_column; column++) { xcim_addr = (line - y) * xcim->width + column - x; if ((unsigned char)(xcim->pixels[xcim_addr] >> 24) != 0) { // skip fully transparent pixel image_addr = ((line - y_off) * width + column - x_off) * 4; image->data[image_addr] = (unsigned char)(xcim->pixels[xcim_addr] >> 0); image->data[image_addr+1] = (unsigned char)(xcim->pixels[xcim_addr] >> 8); image->data[image_addr+2] = (unsigned char)(xcim->pixels[xcim_addr] >> 16); } } } XFree(xcim); xcim = NULL; }"} {"target": 1, "idx": 11439, "func": "static uint64_t boston_platreg_read(void *opaque, hwaddr addr, unsigned size) { BostonState *s = opaque; uint32_t gic_freq, val; if (size != 4) { qemu_log_mask(LOG_UNIMP, \"%uB platform register read\", size); return 0; } switch (addr & 0xffff) { case PLAT_FPGA_BUILD: case PLAT_CORE_CL: case PLAT_WRAPPER_CL: return 0; case PLAT_DDR3_STATUS: return PLAT_DDR3_STATUS_LOCKED | PLAT_DDR3_STATUS_CALIBRATED; case PLAT_MMCM_DIV: gic_freq = mips_gictimer_get_freq(s->cps->gic.gic_timer) / 1000000; val = gic_freq << PLAT_MMCM_DIV_INPUT_SHIFT; val |= 1 << PLAT_MMCM_DIV_MUL_SHIFT; val |= 1 << PLAT_MMCM_DIV_CLK0DIV_SHIFT; val |= 1 << PLAT_MMCM_DIV_CLK1DIV_SHIFT; return val; case PLAT_BUILD_CFG: val = PLAT_BUILD_CFG_PCIE0_EN; val |= PLAT_BUILD_CFG_PCIE1_EN; val |= PLAT_BUILD_CFG_PCIE2_EN; return val; case PLAT_DDR_CFG: val = s->mach->ram_size / G_BYTE; assert(!(val & ~PLAT_DDR_CFG_SIZE)); val |= PLAT_DDR_CFG_MHZ; return val; default: qemu_log_mask(LOG_UNIMP, \"Read platform register 0x%\" HWADDR_PRIx, addr & 0xffff); return 0; } }"} {"target": 0, "idx": 11444, "func": "int avfilter_register(AVFilter *filter) { if (next_registered_avfilter_idx == MAX_REGISTERED_AVFILTERS_NB) return -1; registered_avfilters[next_registered_avfilter_idx++] = filter; return 0; }"} {"target": 1, "idx": 11449, "func": "void qmp_migrate_cancel(Error **errp) { migrate_fd_cancel(migrate_get_current()); }"} {"target": 1, "idx": 11455, "func": "int load_snapshot(const char *name, Error **errp) { BlockDriverState *bs, *bs_vm_state; QEMUSnapshotInfo sn; QEMUFile *f; int ret; AioContext *aio_context; MigrationIncomingState *mis = migration_incoming_get_current(); if (!bdrv_all_can_snapshot(&bs)) { error_setg(errp, \"Device '%s' is writable but does not support snapshots\", bdrv_get_device_name(bs)); return -ENOTSUP; } ret = bdrv_all_find_snapshot(name, &bs); if (ret < 0) { error_setg(errp, \"Device '%s' does not have the requested snapshot '%s'\", bdrv_get_device_name(bs), name); return ret; } bs_vm_state = bdrv_all_find_vmstate_bs(); if (!bs_vm_state) { error_setg(errp, \"No block device supports snapshots\"); return -ENOTSUP; } aio_context = bdrv_get_aio_context(bs_vm_state); /* Don't even try to load empty VM states */ aio_context_acquire(aio_context); ret = bdrv_snapshot_find(bs_vm_state, &sn, name); aio_context_release(aio_context); if (ret < 0) { return ret; } else if (sn.vm_state_size == 0) { error_setg(errp, \"This is a disk-only snapshot. Revert to it \" \" offline using qemu-img\"); return -EINVAL; } /* Flush all IO requests so they don't interfere with the new state. */ bdrv_drain_all(); ret = bdrv_all_goto_snapshot(name, &bs); if (ret < 0) { error_setg(errp, \"Error %d while activating snapshot '%s' on '%s'\", ret, name, bdrv_get_device_name(bs)); return ret; } /* restore the VM state */ f = qemu_fopen_bdrv(bs_vm_state, 0); if (!f) { error_setg(errp, \"Could not open VM state file\"); return -EINVAL; } qemu_system_reset(SHUTDOWN_CAUSE_NONE); mis->from_src_file = f; aio_context_acquire(aio_context); ret = qemu_loadvm_state(f); qemu_fclose(f); aio_context_release(aio_context); migration_incoming_state_destroy(); if (ret < 0) { error_setg(errp, \"Error %d while loading VM state\", ret); return ret; } return 0; }"} {"target": 0, "idx": 11468, "func": "static av_noinline void emulated_edge_mc_sse(uint8_t *buf, const uint8_t *src, ptrdiff_t buf_stride, ptrdiff_t src_stride, int block_w, int block_h, int src_x, int src_y, int w, int h) { emulated_edge_mc(buf, src, buf_stride, src_stride, block_w, block_h, src_x, src_y, w, h, vfixtbl_sse, &ff_emu_edge_vvar_sse, hfixtbl_mmxext, &ff_emu_edge_hvar_mmxext); }"} {"target": 1, "idx": 11488, "func": "static int mov_text_tx3g(AVCodecContext *avctx, MovTextContext *m) { char *tx3g_ptr = avctx->extradata; int i, box_size, font_length; int8_t v_align, h_align; int style_fontID; StyleBox s_default; m->count_f = 0; m->ftab_entries = 0; box_size = BOX_SIZE_INITIAL; /* Size till ftab_entries */ if (avctx->extradata_size < box_size) return -1; // Display Flags tx3g_ptr += 4; // Alignment h_align = *tx3g_ptr++; v_align = *tx3g_ptr++; if (h_align == 0) { if (v_align == 0) m->d.alignment = TOP_LEFT; if (v_align == 1) m->d.alignment = MIDDLE_LEFT; if (v_align == -1) m->d.alignment = BOTTOM_LEFT; } if (h_align == 1) { if (v_align == 0) m->d.alignment = TOP_CENTER; if (v_align == 1) m->d.alignment = MIDDLE_CENTER; if (v_align == -1) m->d.alignment = BOTTOM_CENTER; } if (h_align == -1) { if (v_align == 0) m->d.alignment = TOP_RIGHT; if (v_align == 1) m->d.alignment = MIDDLE_RIGHT; if (v_align == -1) m->d.alignment = BOTTOM_RIGHT; } // Background Color m->d.back_color = AV_RB24(tx3g_ptr); tx3g_ptr += 4; // BoxRecord tx3g_ptr += 8; // StyleRecord tx3g_ptr += 4; // fontID style_fontID = AV_RB16(tx3g_ptr); tx3g_ptr += 2; // face-style-flags s_default.style_flag = *tx3g_ptr++; m->d.bold = s_default.style_flag & STYLE_FLAG_BOLD; m->d.italic = s_default.style_flag & STYLE_FLAG_ITALIC; m->d.underline = s_default.style_flag & STYLE_FLAG_UNDERLINE; // fontsize m->d.fontsize = *tx3g_ptr++; // Primary color m->d.color = AV_RB24(tx3g_ptr); tx3g_ptr += 4; // FontRecord // FontRecord Size tx3g_ptr += 4; // ftab tx3g_ptr += 4; m->ftab_entries = AV_RB16(tx3g_ptr); tx3g_ptr += 2; for (i = 0; i < m->ftab_entries; i++) { box_size += 3; if (avctx->extradata_size < box_size) { mov_text_cleanup_ftab(m); m->ftab_entries = 0; return -1; } m->ftab_temp = av_malloc(sizeof(*m->ftab_temp)); if (!m->ftab_temp) { mov_text_cleanup_ftab(m); return AVERROR(ENOMEM); } m->ftab_temp->fontID = AV_RB16(tx3g_ptr); tx3g_ptr += 2; font_length = *tx3g_ptr++; box_size = box_size + font_length; if (avctx->extradata_size < box_size) { mov_text_cleanup_ftab(m); m->ftab_entries = 0; return -1; } m->ftab_temp->font = av_malloc(font_length + 1); if (!m->ftab_temp->font) { mov_text_cleanup_ftab(m); return AVERROR(ENOMEM); } memcpy(m->ftab_temp->font, tx3g_ptr, font_length); m->ftab_temp->font[font_length] = '\\0'; av_dynarray_add(&m->ftab, &m->count_f, m->ftab_temp); if (!m->ftab) { mov_text_cleanup_ftab(m); return AVERROR(ENOMEM); } tx3g_ptr = tx3g_ptr + font_length; } for (i = 0; i < m->ftab_entries; i++) { if (style_fontID == m->ftab[i]->fontID) m->d.font = m->ftab[i]->font; } return 0; }"} {"target": 1, "idx": 11501, "func": "static void msmouse_chr_close (struct CharDriverState *chr) { MouseState *mouse = chr->opaque; qemu_input_handler_unregister(mouse->hs); g_free(mouse); g_free(chr); }"} {"target": 1, "idx": 11516, "func": "int qemu_init_main_loop(void) { int ret; ret = qemu_signal_init(); if (ret) { return ret; } qemu_init_sigbus(); return qemu_event_init(); }"} {"target": 1, "idx": 11524, "func": "envlist_parse(envlist_t *envlist, const char *env, int (*callback)(envlist_t *, const char *)) { char *tmpenv, *envvar; char *envsave = NULL; assert(callback != NULL); if ((envlist == NULL) || (env == NULL)) return (EINVAL); /* * We need to make temporary copy of the env string * as strtok_r(3) modifies it while it tokenizes. */ if ((tmpenv = strdup(env)) == NULL) return (errno); envvar = strtok_r(tmpenv, \",\", &envsave); while (envvar != NULL) { if ((*callback)(envlist, envvar) != 0) { free(tmpenv); return (errno); } envvar = strtok_r(NULL, \",\", &envsave); } free(tmpenv); return (0); }"} {"target": 0, "idx": 11527, "func": "static int avi_write_packet(AVFormatContext *s, AVPacket *pkt) { unsigned char tag[5]; unsigned int flags = 0; const int stream_index = pkt->stream_index; int size = pkt->size; AVIContext *avi = s->priv_data; AVIOContext *pb = s->pb; AVIStream *avist = s->streams[stream_index]->priv_data; AVCodecParameters *par = s->streams[stream_index]->codecpar; while (par->block_align == 0 && pkt->dts != AV_NOPTS_VALUE && pkt->dts > avist->packet_count) { AVPacket empty_packet; av_init_packet(&empty_packet); empty_packet.size = 0; empty_packet.data = NULL; empty_packet.stream_index = stream_index; avi_write_packet(s, &empty_packet); } avist->packet_count++; // Make sure to put an OpenDML chunk when the file size exceeds the limits if (pb->seekable && (avio_tell(pb) - avi->riff_start > AVI_MAX_RIFF_SIZE)) { avi_write_ix(s); ff_end_tag(pb, avi->movi_list); if (avi->riff_id == 1) avi_write_idx1(s); ff_end_tag(pb, avi->riff_start); avi->movi_list = avi_start_new_riff(s, pb, \"AVIX\", \"movi\"); } avi_stream2fourcc(tag, stream_index, par->codec_type); if (pkt->flags & AV_PKT_FLAG_KEY) flags = 0x10; if (par->codec_type == AVMEDIA_TYPE_AUDIO) avist->audio_strm_length += size; if (s->pb->seekable) { int err; AVIIndex *idx = &avist->indexes; int cl = idx->entry / AVI_INDEX_CLUSTER_SIZE; int id = idx->entry % AVI_INDEX_CLUSTER_SIZE; if (idx->ents_allocated <= idx->entry) { if ((err = av_reallocp(&idx->cluster, (cl + 1) * sizeof(*idx->cluster))) < 0) { idx->ents_allocated = 0; idx->entry = 0; return err; } idx->cluster[cl] = av_malloc(AVI_INDEX_CLUSTER_SIZE * sizeof(AVIIentry)); if (!idx->cluster[cl]) return -1; idx->ents_allocated += AVI_INDEX_CLUSTER_SIZE; } idx->cluster[cl][id].flags = flags; idx->cluster[cl][id].pos = avio_tell(pb) - avi->movi_list; idx->cluster[cl][id].len = size; idx->entry++; } avio_write(pb, tag, 4); avio_wl32(pb, size); avio_write(pb, pkt->data, size); if (size & 1) avio_w8(pb, 0); return 0; }"} {"target": 0, "idx": 11529, "func": "static av_cold int check_cuda_errors(AVCodecContext *avctx, CUresult err, const char *func) { if (err != CUDA_SUCCESS) { av_log(avctx, AV_LOG_FATAL, \">> %s - failed with error code 0x%x\\n\", func, err); return 0; } return 1; }"} {"target": 0, "idx": 11535, "func": "static void show_stream(WriterContext *w, AVFormatContext *fmt_ctx, int stream_idx, int in_program) { AVStream *stream = fmt_ctx->streams[stream_idx]; AVCodecContext *dec_ctx; const AVCodec *dec; char val_str[128]; const char *s; AVRational sar, dar; AVBPrint pbuf; av_bprint_init(&pbuf, 1, AV_BPRINT_SIZE_UNLIMITED); writer_print_section_header(w, in_program ? SECTION_ID_PROGRAM_STREAM : SECTION_ID_STREAM); print_int(\"index\", stream->index); if ((dec_ctx = stream->codec)) { const char *profile = NULL; dec = dec_ctx->codec; if (dec) { print_str(\"codec_name\", dec->name); if (!do_bitexact) { if (dec->long_name) print_str (\"codec_long_name\", dec->long_name); else print_str_opt(\"codec_long_name\", \"unknown\"); } } else { print_str_opt(\"codec_name\", \"unknown\"); if (!do_bitexact) { print_str_opt(\"codec_long_name\", \"unknown\"); } } if (dec && (profile = av_get_profile_name(dec, dec_ctx->profile))) print_str(\"profile\", profile); else print_str_opt(\"profile\", \"unknown\"); s = av_get_media_type_string(dec_ctx->codec_type); if (s) print_str (\"codec_type\", s); else print_str_opt(\"codec_type\", \"unknown\"); print_q(\"codec_time_base\", dec_ctx->time_base, '/'); /* print AVI/FourCC tag */ av_get_codec_tag_string(val_str, sizeof(val_str), dec_ctx->codec_tag); print_str(\"codec_tag_string\", val_str); print_fmt(\"codec_tag\", \"0x%04x\", dec_ctx->codec_tag); switch (dec_ctx->codec_type) { case AVMEDIA_TYPE_VIDEO: print_int(\"width\", dec_ctx->width); print_int(\"height\", dec_ctx->height); print_int(\"has_b_frames\", dec_ctx->has_b_frames); sar = av_guess_sample_aspect_ratio(fmt_ctx, stream, NULL); if (sar.den) { print_q(\"sample_aspect_ratio\", sar, ':'); av_reduce(&dar.num, &dar.den, dec_ctx->width * sar.num, dec_ctx->height * sar.den, 1024*1024); print_q(\"display_aspect_ratio\", dar, ':'); } else { print_str_opt(\"sample_aspect_ratio\", \"N/A\"); print_str_opt(\"display_aspect_ratio\", \"N/A\"); } s = av_get_pix_fmt_name(dec_ctx->pix_fmt); if (s) print_str (\"pix_fmt\", s); else print_str_opt(\"pix_fmt\", \"unknown\"); print_int(\"level\", dec_ctx->level); if (dec_ctx->timecode_frame_start >= 0) { char tcbuf[AV_TIMECODE_STR_SIZE]; av_timecode_make_mpeg_tc_string(tcbuf, dec_ctx->timecode_frame_start); print_str(\"timecode\", tcbuf); } else { print_str_opt(\"timecode\", \"N/A\"); } break; case AVMEDIA_TYPE_AUDIO: s = av_get_sample_fmt_name(dec_ctx->sample_fmt); if (s) print_str (\"sample_fmt\", s); else print_str_opt(\"sample_fmt\", \"unknown\"); print_val(\"sample_rate\", dec_ctx->sample_rate, unit_hertz_str); print_int(\"channels\", dec_ctx->channels); if (dec_ctx->channel_layout) { av_bprint_clear(&pbuf); av_bprint_channel_layout(&pbuf, dec_ctx->channels, dec_ctx->channel_layout); print_str (\"channel_layout\", pbuf.str); } else { print_str_opt(\"channel_layout\", \"unknown\"); } print_int(\"bits_per_sample\", av_get_bits_per_sample(dec_ctx->codec_id)); break; case AVMEDIA_TYPE_SUBTITLE: if (dec_ctx->width) print_int(\"width\", dec_ctx->width); else print_str_opt(\"width\", \"N/A\"); if (dec_ctx->height) print_int(\"height\", dec_ctx->height); else print_str_opt(\"height\", \"N/A\"); break; } } else { print_str_opt(\"codec_type\", \"unknown\"); } if (dec_ctx->codec && dec_ctx->codec->priv_class && show_private_data) { const AVOption *opt = NULL; while (opt = av_opt_next(dec_ctx->priv_data,opt)) { uint8_t *str; if (opt->flags) continue; if (av_opt_get(dec_ctx->priv_data, opt->name, 0, &str) >= 0) { print_str(opt->name, str); av_free(str); } } } if (fmt_ctx->iformat->flags & AVFMT_SHOW_IDS) print_fmt (\"id\", \"0x%x\", stream->id); else print_str_opt(\"id\", \"N/A\"); print_q(\"r_frame_rate\", stream->r_frame_rate, '/'); print_q(\"avg_frame_rate\", stream->avg_frame_rate, '/'); print_q(\"time_base\", stream->time_base, '/'); print_ts (\"start_pts\", stream->start_time); print_time(\"start_time\", stream->start_time, &stream->time_base); print_ts (\"duration_ts\", stream->duration); print_time(\"duration\", stream->duration, &stream->time_base); if (dec_ctx->bit_rate > 0) print_val (\"bit_rate\", dec_ctx->bit_rate, unit_bit_per_second_str); else print_str_opt(\"bit_rate\", \"N/A\"); if (stream->nb_frames) print_fmt (\"nb_frames\", \"%\"PRId64, stream->nb_frames); else print_str_opt(\"nb_frames\", \"N/A\"); if (nb_streams_frames[stream_idx]) print_fmt (\"nb_read_frames\", \"%\"PRIu64, nb_streams_frames[stream_idx]); else print_str_opt(\"nb_read_frames\", \"N/A\"); if (nb_streams_packets[stream_idx]) print_fmt (\"nb_read_packets\", \"%\"PRIu64, nb_streams_packets[stream_idx]); else print_str_opt(\"nb_read_packets\", \"N/A\"); if (do_show_data) writer_print_data(w, \"extradata\", dec_ctx->extradata, dec_ctx->extradata_size); /* Print disposition information */ #define PRINT_DISPOSITION(flagname, name) do { \\ print_int(name, !!(stream->disposition & AV_DISPOSITION_##flagname)); \\ } while (0) if (do_show_stream_disposition) { writer_print_section_header(w, in_program ? SECTION_ID_PROGRAM_STREAM_DISPOSITION : SECTION_ID_STREAM_DISPOSITION); PRINT_DISPOSITION(DEFAULT, \"default\"); PRINT_DISPOSITION(DUB, \"dub\"); PRINT_DISPOSITION(ORIGINAL, \"original\"); PRINT_DISPOSITION(COMMENT, \"comment\"); PRINT_DISPOSITION(LYRICS, \"lyrics\"); PRINT_DISPOSITION(KARAOKE, \"karaoke\"); PRINT_DISPOSITION(FORCED, \"forced\"); PRINT_DISPOSITION(HEARING_IMPAIRED, \"hearing_impaired\"); PRINT_DISPOSITION(VISUAL_IMPAIRED, \"visual_impaired\"); PRINT_DISPOSITION(CLEAN_EFFECTS, \"clean_effects\"); PRINT_DISPOSITION(ATTACHED_PIC, \"attached_pic\"); writer_print_section_footer(w); } show_tags(w, stream->metadata, in_program ? SECTION_ID_PROGRAM_STREAM_TAGS : SECTION_ID_STREAM_TAGS); writer_print_section_footer(w); av_bprint_finalize(&pbuf, NULL); fflush(stdout); }"} {"target": 1, "idx": 11543, "func": "static inline int get_block(GetBitContext *gb, DCTELEM *block, const uint8_t *scan, const uint32_t *quant) { int coeff, i, n; int8_t ac; uint8_t dc = get_bits(gb, 8); // block not coded if (dc == 255) // number of non-zero coefficients coeff = get_bits(gb, 6); if (get_bits_count(gb) + (coeff << 1) >= gb->size_in_bits) // normally we would only need to clear the (63 - coeff) last values, // but since we do not know where they are we just clear the whole block memset(block, 0, 64 * sizeof(DCTELEM)); // 2 bits per coefficient while (coeff) { ac = get_sbits(gb, 2); if (ac == -2) break; // continue with more bits PUT_COEFF(ac); } // 4 bits per coefficient ALIGN(4); while (coeff) { ac = get_sbits(gb, 4); if (ac == -8) break; // continue with more bits PUT_COEFF(ac); } // 8 bits per coefficient ALIGN(8); if (get_bits_count(gb) + (coeff << 3) >= gb->size_in_bits) while (coeff) { ac = get_sbits(gb, 8); PUT_COEFF(ac); } PUT_COEFF(dc); return 1; }"} {"target": 1, "idx": 11546, "func": "static void gen_neon_trn_u16(TCGv t0, TCGv t1) { TCGv rd, tmp; rd = new_tmp(); tmp = new_tmp(); tcg_gen_shli_i32(rd, t0, 16); tcg_gen_andi_i32(tmp, t1, 0xffff); tcg_gen_or_i32(rd, rd, tmp); tcg_gen_shri_i32(t1, t1, 16); tcg_gen_andi_i32(tmp, t0, 0xffff0000); tcg_gen_or_i32(t1, t1, tmp); tcg_gen_mov_i32(t0, rd); dead_tmp(tmp); dead_tmp(rd); }"} {"target": 0, "idx": 11553, "func": "static int parse_header(OutputStream *os, const uint8_t *buf, int buf_size) { if (buf_size < 13) return AVERROR_INVALIDDATA; if (memcmp(buf, \"FLV\", 3)) return AVERROR_INVALIDDATA; buf += 13; buf_size -= 13; while (buf_size >= 11 + 4) { int type = buf[0]; int size = AV_RB24(&buf[1]) + 11 + 4; if (size > buf_size) return AVERROR_INVALIDDATA; if (type == 8 || type == 9) { if (os->nb_extra_packets > FF_ARRAY_ELEMS(os->extra_packets)) return AVERROR_INVALIDDATA; os->extra_packet_sizes[os->nb_extra_packets] = size; os->extra_packets[os->nb_extra_packets] = av_malloc(size); if (!os->extra_packets[os->nb_extra_packets]) return AVERROR(ENOMEM); memcpy(os->extra_packets[os->nb_extra_packets], buf, size); os->nb_extra_packets++; } else if (type == 0x12) { if (os->metadata) return AVERROR_INVALIDDATA; os->metadata_size = size - 11 - 4; os->metadata = av_malloc(os->metadata_size); if (!os->metadata) return AVERROR(ENOMEM); memcpy(os->metadata, buf + 11, os->metadata_size); } buf += size; buf_size -= size; } if (!os->metadata) return AVERROR_INVALIDDATA; return 0; }"} {"target": 1, "idx": 11566, "func": "restore_sigcontext(CPUMIPSState *regs, struct target_sigcontext *sc) { int err = 0; int i; __get_user(regs->CP0_EPC, &sc->sc_pc); __get_user(regs->active_tc.HI[0], &sc->sc_mdhi); __get_user(regs->active_tc.LO[0], &sc->sc_mdlo); for (i = 1; i < 32; ++i) { __get_user(regs->active_tc.gpr[i], &sc->sc_regs[i]); } __get_user(regs->active_tc.HI[1], &sc->sc_hi1); __get_user(regs->active_tc.HI[2], &sc->sc_hi2); __get_user(regs->active_tc.HI[3], &sc->sc_hi3); __get_user(regs->active_tc.LO[1], &sc->sc_lo1); __get_user(regs->active_tc.LO[2], &sc->sc_lo2); __get_user(regs->active_tc.LO[3], &sc->sc_lo3); { uint32_t dsp; __get_user(dsp, &sc->sc_dsp); cpu_wrdsp(dsp, 0x3ff, regs); } for (i = 0; i < 32; ++i) { __get_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]); } return err; }"} {"target": 0, "idx": 11592, "func": "static void sdhci_write_block_to_card(SDHCIState *s) { int index = 0; if (s->prnsts & SDHC_SPACE_AVAILABLE) { if (s->norintstsen & SDHC_NISEN_WBUFRDY) { s->norintsts |= SDHC_NIS_WBUFRDY; } sdhci_update_irq(s); return; } if (s->trnmod & SDHC_TRNS_BLK_CNT_EN) { if (s->blkcnt == 0) { return; } else { s->blkcnt--; } } for (index = 0; index < (s->blksize & 0x0fff); index++) { sd_write_data(s->card, s->fifo_buffer[index]); } /* Next data can be written through BUFFER DATORT register */ s->prnsts |= SDHC_SPACE_AVAILABLE; /* Finish transfer if that was the last block of data */ if ((s->trnmod & SDHC_TRNS_MULTI) == 0 || ((s->trnmod & SDHC_TRNS_MULTI) && (s->trnmod & SDHC_TRNS_BLK_CNT_EN) && (s->blkcnt == 0))) { SDHCI_GET_CLASS(s)->end_data_transfer(s); } else if (s->norintstsen & SDHC_NISEN_WBUFRDY) { s->norintsts |= SDHC_NIS_WBUFRDY; } /* Generate Block Gap Event if requested and if not the last block */ if (s->stopped_state == sdhc_gap_write && (s->trnmod & SDHC_TRNS_MULTI) && s->blkcnt > 0) { s->prnsts &= ~SDHC_DOING_WRITE; if (s->norintstsen & SDHC_EISEN_BLKGAP) { s->norintsts |= SDHC_EIS_BLKGAP; } SDHCI_GET_CLASS(s)->end_data_transfer(s); } sdhci_update_irq(s); }"} {"target": 0, "idx": 11608, "func": "static AHCIQState *ahci_boot(void) { AHCIQState *s; const char *cli; s = g_malloc0(sizeof(AHCIQState)); cli = \"-drive if=none,id=drive0,file=%s,cache=writeback,serial=%s\" \",format=qcow2\" \" -M q35 \" \"-device ide-hd,drive=drive0 \" \"-global ide-hd.ver=%s\"; s->parent = qtest_pc_boot(cli, tmp_path, \"testdisk\", \"version\"); alloc_set_flags(s->parent->alloc, ALLOC_LEAK_ASSERT); /* Verify that we have an AHCI device present. */ s->dev = get_ahci_device(&s->fingerprint); return s; }"} {"target": 0, "idx": 11624, "func": "void address_space_destroy_dispatch(AddressSpace *as) { AddressSpaceDispatch *d = as->dispatch; memory_listener_unregister(&d->listener); g_free(d); as->dispatch = NULL; }"} {"target": 0, "idx": 11631, "func": "mst_fpga_readb(void *opaque, target_phys_addr_t addr) { mst_irq_state *s = (mst_irq_state *) opaque; switch (addr) { case MST_LEDDAT1: return s->leddat1; case MST_LEDDAT2: return s->leddat2; case MST_LEDCTRL: return s->ledctrl; case MST_GPSWR: return s->gpswr; case MST_MSCWR1: return s->mscwr1; case MST_MSCWR2: return s->mscwr2; case MST_MSCWR3: return s->mscwr3; case MST_MSCRD: return s->mscrd; case MST_INTMSKENA: return s->intmskena; case MST_INTSETCLR: return s->intsetclr; case MST_PCMCIA0: return s->pcmcia0; case MST_PCMCIA1: return s->pcmcia1; default: printf(\"Mainstone - mst_fpga_readb: Bad register offset \" \"0x\" TARGET_FMT_plx \" \\n\", addr); } return 0; }"} {"target": 0, "idx": 11635, "func": "static int pci_piix_ide_initfn(PCIIDEState *d) { uint8_t *pci_conf = d->dev.config; pci_conf[PCI_CLASS_PROG] = 0x80; // legacy ATA mode pci_config_set_class(pci_conf, PCI_CLASS_STORAGE_IDE); qemu_register_reset(piix3_reset, d); pci_register_bar(&d->dev, 4, 0x10, PCI_BASE_ADDRESS_SPACE_IO, bmdma_map); vmstate_register(&d->dev.qdev, 0, &vmstate_ide_pci, d); pci_piix_init_ports(d); return 0; }"} {"target": 0, "idx": 11641, "func": "static void virtio_net_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) { DeviceState *qdev = DEVICE(vpci_dev); VirtIONetPCI *dev = VIRTIO_NET_PCI(vpci_dev); DeviceState *vdev = DEVICE(&dev->vdev); virtio_net_set_config_size(&dev->vdev, vpci_dev->host_features); virtio_net_set_netclient_name(&dev->vdev, qdev->id, object_get_typename(OBJECT(qdev))); qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus)); object_property_set_bool(OBJECT(vdev), true, \"realized\", errp); }"} {"target": 0, "idx": 11642, "func": "static void spapr_nvram_realize(VIOsPAPRDevice *dev, Error **errp) { sPAPRNVRAM *nvram = VIO_SPAPR_NVRAM(dev); int ret; if (nvram->blk) { nvram->size = blk_getlength(nvram->blk); ret = blk_set_perm(nvram->blk, BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE, BLK_PERM_ALL, errp); if (ret < 0) { return; } } else { nvram->size = DEFAULT_NVRAM_SIZE; } nvram->buf = g_malloc0(nvram->size); if ((nvram->size < MIN_NVRAM_SIZE) || (nvram->size > MAX_NVRAM_SIZE)) { error_setg(errp, \"spapr-nvram must be between %d and %d bytes in size\", MIN_NVRAM_SIZE, MAX_NVRAM_SIZE); return; } if (nvram->blk) { int alen = blk_pread(nvram->blk, 0, nvram->buf, nvram->size); if (alen != nvram->size) { error_setg(errp, \"can't read spapr-nvram contents\"); return; } } else if (nb_prom_envs > 0) { /* Create a system partition to pass the -prom-env variables */ chrp_nvram_create_system_partition(nvram->buf, MIN_NVRAM_SIZE / 4); chrp_nvram_create_free_partition(&nvram->buf[MIN_NVRAM_SIZE / 4], nvram->size - MIN_NVRAM_SIZE / 4); } spapr_rtas_register(RTAS_NVRAM_FETCH, \"nvram-fetch\", rtas_nvram_fetch); spapr_rtas_register(RTAS_NVRAM_STORE, \"nvram-store\", rtas_nvram_store); }"} {"target": 1, "idx": 11646, "func": "static void ehci_free_packet(EHCIPacket *p) { trace_usb_ehci_packet_action(p->queue, p, \"free\"); if (p->async == EHCI_ASYNC_INFLIGHT) { usb_cancel_packet(&p->packet); usb_packet_unmap(&p->packet, &p->sgl); qemu_sglist_destroy(&p->sgl); QTAILQ_REMOVE(&p->queue->packets, p, next); usb_packet_cleanup(&p->packet); g_free(p);"} {"target": 0, "idx": 11662, "func": "static inline void RENAME(nv12ToUV)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1, const uint8_t *src2, long width, uint32_t *unused) { RENAME(nvXXtoUV)(dstU, dstV, src1, width); }"} {"target": 0, "idx": 11665, "func": "int ff_thread_init(AVCodecContext *avctx) { if (avctx->thread_opaque) { av_log(avctx, AV_LOG_ERROR, \"avcodec_thread_init is ignored after avcodec_open\\n\"); return -1; } #if HAVE_W32THREADS w32thread_init(); #endif if (avctx->codec) { validate_thread_parameters(avctx); if (avctx->active_thread_type&FF_THREAD_SLICE) return thread_init(avctx); else if (avctx->active_thread_type&FF_THREAD_FRAME) return frame_thread_init(avctx); } return 0; }"} {"target": 0, "idx": 11674, "func": "static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write) { if (memory_region_is_ram(mr)) { return !(is_write && mr->readonly); } if (memory_region_is_romd(mr)) { return !is_write; } return false; }"} {"target": 0, "idx": 11685, "func": "VirtIODevice *virtio_blk_init(DeviceState *dev, VirtIOBlkConf *blk) { VirtIOBlock *s; int cylinders, heads, secs; static int virtio_blk_id; DriveInfo *dinfo; if (!blk->conf.bs) { error_report(\"drive property not set\"); return NULL; } if (!bdrv_is_inserted(blk->conf.bs)) { error_report(\"Device needs media, but drive is empty\"); return NULL; } if (!blk->serial) { /* try to fall back to value set with legacy -drive serial=... */ dinfo = drive_get_by_blockdev(blk->conf.bs); if (*dinfo->serial) { blk->serial = strdup(dinfo->serial); } } s = (VirtIOBlock *)virtio_common_init(\"virtio-blk\", VIRTIO_ID_BLOCK, sizeof(struct virtio_blk_config), sizeof(VirtIOBlock)); s->vdev.get_config = virtio_blk_update_config; s->vdev.get_features = virtio_blk_get_features; s->vdev.reset = virtio_blk_reset; s->bs = blk->conf.bs; s->conf = &blk->conf; s->blk = blk; s->rq = NULL; s->sector_mask = (s->conf->logical_block_size / BDRV_SECTOR_SIZE) - 1; bdrv_guess_geometry(s->bs, &cylinders, &heads, &secs); s->vq = virtio_add_queue(&s->vdev, 128, virtio_blk_handle_output); qemu_add_vm_change_state_handler(virtio_blk_dma_restart_cb, s); s->qdev = dev; register_savevm(dev, \"virtio-blk\", virtio_blk_id++, 2, virtio_blk_save, virtio_blk_load, s); bdrv_set_dev_ops(s->bs, &virtio_block_ops, s); bdrv_set_buffer_alignment(s->bs, s->conf->logical_block_size); bdrv_iostatus_enable(s->bs); add_boot_device_path(s->conf->bootindex, dev, \"/disk@0,0\"); return &s->vdev; }"} {"target": 1, "idx": 11712, "func": "static int pcm_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; PCMDecode *s = avctx->priv_data; int sample_size, c, n, i; short *samples; const uint8_t *src, *src8, *src2[MAX_CHANNELS]; uint8_t *dstu8; int16_t *dst_int16_t; int32_t *dst_int32_t; int64_t *dst_int64_t; uint16_t *dst_uint16_t; uint32_t *dst_uint32_t; samples = data; src = buf; if (avctx->sample_fmt!=avctx->codec->sample_fmts[0]) { av_log(avctx, AV_LOG_ERROR, \"invalid sample_fmt\\n\"); return -1; if(avctx->channels <= 0 || avctx->channels > MAX_CHANNELS){ av_log(avctx, AV_LOG_ERROR, \"PCM channels out of bounds\\n\"); return -1; sample_size = av_get_bits_per_sample(avctx->codec_id)/8; /* av_get_bits_per_sample returns 0 for CODEC_ID_PCM_DVD */ if (CODEC_ID_PCM_DVD == avctx->codec_id) /* 2 samples are interleaved per block in PCM_DVD */ sample_size = avctx->bits_per_coded_sample * 2 / 8; else if (avctx->codec_id == CODEC_ID_PCM_LXF) /* we process 40-bit blocks per channel for LXF */ sample_size = 5; n = avctx->channels * sample_size; if(n && buf_size % n){ if (buf_size < n) { av_log(avctx, AV_LOG_ERROR, \"invalid PCM packet\\n\"); return -1; }else buf_size -= buf_size % n; buf_size= FFMIN(buf_size, *data_size/2); *data_size=0; n = buf_size/sample_size; switch(avctx->codec->id) { case CODEC_ID_PCM_U32LE: DECODE(uint32_t, le32, src, samples, n, 0, 0x80000000) break; case CODEC_ID_PCM_U32BE: DECODE(uint32_t, be32, src, samples, n, 0, 0x80000000) break; case CODEC_ID_PCM_S24LE: DECODE(int32_t, le24, src, samples, n, 8, 0) break; case CODEC_ID_PCM_S24BE: DECODE(int32_t, be24, src, samples, n, 8, 0) break; case CODEC_ID_PCM_U24LE: DECODE(uint32_t, le24, src, samples, n, 8, 0x800000) break; case CODEC_ID_PCM_U24BE: DECODE(uint32_t, be24, src, samples, n, 8, 0x800000) break; case CODEC_ID_PCM_S24DAUD: for(;n>0;n--) { uint32_t v = bytestream_get_be24(&src); v >>= 4; // sync flags are here *samples++ = av_reverse[(v >> 8) & 0xff] + (av_reverse[v & 0xff] << 8); break; case CODEC_ID_PCM_S16LE_PLANAR: n /= avctx->channels; for(c=0;cchannels;c++) src2[c] = &src[c*n*2]; for(;n>0;n--) for(c=0;cchannels;c++) *samples++ = bytestream_get_le16(&src2[c]); src = src2[avctx->channels-1]; break; case CODEC_ID_PCM_U16LE: DECODE(uint16_t, le16, src, samples, n, 0, 0x8000) break; case CODEC_ID_PCM_U16BE: DECODE(uint16_t, be16, src, samples, n, 0, 0x8000) break; case CODEC_ID_PCM_S8: dstu8= (uint8_t*)samples; for(;n>0;n--) { *dstu8++ = *src++ + 128; samples= (short*)dstu8; break; #if HAVE_BIGENDIAN case CODEC_ID_PCM_F64LE: DECODE(int64_t, le64, src, samples, n, 0, 0) break; case CODEC_ID_PCM_S32LE: case CODEC_ID_PCM_F32LE: DECODE(int32_t, le32, src, samples, n, 0, 0) break; case CODEC_ID_PCM_S16LE: DECODE(int16_t, le16, src, samples, n, 0, 0) break; case CODEC_ID_PCM_F64BE: case CODEC_ID_PCM_F32BE: case CODEC_ID_PCM_S32BE: case CODEC_ID_PCM_S16BE: #else case CODEC_ID_PCM_F64BE: DECODE(int64_t, be64, src, samples, n, 0, 0) break; case CODEC_ID_PCM_F32BE: case CODEC_ID_PCM_S32BE: DECODE(int32_t, be32, src, samples, n, 0, 0) break; case CODEC_ID_PCM_S16BE: DECODE(int16_t, be16, src, samples, n, 0, 0) break; case CODEC_ID_PCM_F64LE: case CODEC_ID_PCM_F32LE: case CODEC_ID_PCM_S32LE: case CODEC_ID_PCM_S16LE: #endif /* HAVE_BIGENDIAN */ case CODEC_ID_PCM_U8: memcpy(samples, src, n*sample_size); src += n*sample_size; samples = (short*)((uint8_t*)data + n*sample_size); break; case CODEC_ID_PCM_ZORK: for(;n>0;n--) { int x= *src++; if(x&128) x-= 128; else x = -x; *samples++ = x << 8; break; case CODEC_ID_PCM_ALAW: case CODEC_ID_PCM_MULAW: for(;n>0;n--) { *samples++ = s->table[*src++]; break; case CODEC_ID_PCM_DVD: dst_int32_t = data; n /= avctx->channels; switch (avctx->bits_per_coded_sample) { case 20: while (n--) { c = avctx->channels; src8 = src + 4*c; while (c--) { *dst_int32_t++ = (bytestream_get_be16(&src) << 16) + ((*src8 &0xf0) << 8); *dst_int32_t++ = (bytestream_get_be16(&src) << 16) + ((*src8++ &0x0f) << 12); src = src8; break; case 24: while (n--) { c = avctx->channels; src8 = src + 4*c; while (c--) { *dst_int32_t++ = (bytestream_get_be16(&src) << 16) + ((*src8++) << 8); *dst_int32_t++ = (bytestream_get_be16(&src) << 16) + ((*src8++) << 8); src = src8; break; default: av_log(avctx, AV_LOG_ERROR, \"PCM DVD unsupported sample depth\\n\"); return -1; break; samples = (short *) dst_int32_t; break; case CODEC_ID_PCM_LXF: dst_int32_t = data; n /= avctx->channels; //unpack and de-planerize for (i = 0; i < n; i++) { for (c = 0, src8 = src + i*5; c < avctx->channels; c++, src8 += n*5) { //extract low 20 bits and expand to 32 bits *dst_int32_t++ = (src8[2] << 28) | (src8[1] << 20) | (src8[0] << 12) | ((src8[2] & 0xF) << 8) | src8[1]; for (c = 0, src8 = src + i*5; c < avctx->channels; c++, src8 += n*5) { //extract high 20 bits and expand to 32 bits *dst_int32_t++ = (src8[4] << 24) | (src8[3] << 16) | ((src8[2] & 0xF0) << 8) | (src8[4] << 4) | (src8[3] >> 4); src += n * avctx->channels * 5; samples = (short *) dst_int32_t; break; default: return -1; *data_size = (uint8_t *)samples - (uint8_t *)data; return src - buf;"} {"target": 1, "idx": 11714, "func": "static int arm946_prbs_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { if (ri->crm > 8) { return EXCP_UDEF; } env->cp15.c6_region[ri->crm] = value; return 0; }"} {"target": 1, "idx": 11742, "func": "static int usb_host_open(USBHostDevice *dev, int bus_num, int addr, const char *port, const char *prod_name, int speed) { int fd = -1, ret; trace_usb_host_open_started(bus_num, addr); if (dev->fd != -1) { goto fail; } fd = usb_host_open_device(bus_num, addr); if (fd < 0) { goto fail; } DPRINTF(\"husb: opened %s\\n\", buf); dev->bus_num = bus_num; dev->addr = addr; strcpy(dev->port, port); dev->fd = fd; /* read the device description */ dev->descr_len = read(fd, dev->descr, sizeof(dev->descr)); if (dev->descr_len <= 0) { perror(\"husb: reading device data failed\"); goto fail; } #ifdef DEBUG { int x; printf(\"=== begin dumping device descriptor data ===\\n\"); for (x = 0; x < dev->descr_len; x++) { printf(\"%02x \", dev->descr[x]); } printf(\"\\n=== end dumping device descriptor data ===\\n\"); } #endif /* start unconfigured -- we'll wait for the guest to set a configuration */ if (!usb_host_claim_interfaces(dev, 0)) { goto fail; } usb_ep_init(&dev->dev); usb_linux_update_endp_table(dev); if (speed == -1) { struct usbdevfs_connectinfo ci; ret = ioctl(fd, USBDEVFS_CONNECTINFO, &ci); if (ret < 0) { perror(\"usb_host_device_open: USBDEVFS_CONNECTINFO\"); goto fail; } if (ci.slow) { speed = USB_SPEED_LOW; } else { speed = USB_SPEED_HIGH; } } dev->dev.speed = speed; dev->dev.speedmask = (1 << speed); if (dev->dev.speed == USB_SPEED_HIGH && usb_linux_full_speed_compat(dev)) { dev->dev.speedmask |= USB_SPEED_MASK_FULL; } trace_usb_host_open_success(bus_num, addr); if (!prod_name || prod_name[0] == '\\0') { snprintf(dev->dev.product_desc, sizeof(dev->dev.product_desc), \"host:%d.%d\", bus_num, addr); } else { pstrcpy(dev->dev.product_desc, sizeof(dev->dev.product_desc), prod_name); } ret = usb_device_attach(&dev->dev); if (ret) { goto fail; } /* USB devio uses 'write' flag to check for async completions */ qemu_set_fd_handler(dev->fd, NULL, async_complete, dev); return 0; fail: trace_usb_host_open_failure(bus_num, addr); if (dev->fd != -1) { close(dev->fd); dev->fd = -1; } return -1; }"} {"target": 1, "idx": 11748, "func": "static int esp_pci_scsi_init(PCIDevice *dev) { PCIESPState *pci = PCI_ESP(dev); DeviceState *d = DEVICE(dev); ESPState *s = &pci->esp; uint8_t *pci_conf; Error *err = NULL; pci_conf = dev->config; /* Interrupt pin A */ pci_conf[PCI_INTERRUPT_PIN] = 0x01; s->dma_memory_read = esp_pci_dma_memory_read; s->dma_memory_write = esp_pci_dma_memory_write; s->dma_opaque = pci; s->chip_id = TCHI_AM53C974; memory_region_init_io(&pci->io, OBJECT(pci), &esp_pci_io_ops, pci, \"esp-io\", 0x80); pci_register_bar(dev, 0, PCI_BASE_ADDRESS_SPACE_IO, &pci->io); s->irq = pci_allocate_irq(dev); scsi_bus_new(&s->bus, sizeof(s->bus), d, &esp_pci_scsi_info, NULL); if (!d->hotplugged) { scsi_bus_legacy_handle_cmdline(&s->bus, &err); if (err != NULL) { error_free(err); return -1; } } return 0; }"} {"target": 1, "idx": 11749, "func": "static void copy_frame(Jpeg2000EncoderContext *s) { int tileno, compno, i, y, x; uint8_t *line; for (tileno = 0; tileno < s->numXtiles * s->numYtiles; tileno++){ Jpeg2000Tile *tile = s->tile + tileno; if (s->planar){ for (compno = 0; compno < s->ncomponents; compno++){ Jpeg2000Component *comp = tile->comp + compno; int *dst = comp->data; line = s->picture.data[compno] + comp->coord[1][0] * s->picture.linesize[compno] + comp->coord[0][0]; for (y = comp->coord[1][0]; y < comp->coord[1][1]; y++){ uint8_t *ptr = line; for (x = comp->coord[0][0]; x < comp->coord[0][1]; x++) *dst++ = *ptr++ - (1 << 7); line += s->picture.linesize[compno]; } } } else{ line = s->picture.data[0] + tile->comp[0].coord[1][0] * s->picture.linesize[0] + tile->comp[0].coord[0][0] * s->ncomponents; i = 0; for (y = tile->comp[0].coord[1][0]; y < tile->comp[0].coord[1][1]; y++){ uint8_t *ptr = line; for (x = tile->comp[0].coord[0][0]; x < tile->comp[0].coord[0][1]; x++, i++){ for (compno = 0; compno < s->ncomponents; compno++){ tile->comp[compno].data[i] = *ptr++ - (1 << 7); } } line += s->picture.linesize[0]; } } } }"} {"target": 0, "idx": 11781, "func": "static off_t proxy_telldir(FsContext *ctx, V9fsFidOpenState *fs) { return telldir(fs->dir); }"} {"target": 0, "idx": 11793, "func": "static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamplesref) { AResampleContext *aresample = inlink->dst->priv; const int n_in = insamplesref->audio->nb_samples; int n_out = FFMAX(n_in * aresample->ratio * 2, 1); AVFilterLink *const outlink = inlink->dst->outputs[0]; AVFilterBufferRef *outsamplesref = ff_get_audio_buffer(outlink, AV_PERM_WRITE, n_out); int ret; if(!outsamplesref) return AVERROR(ENOMEM); avfilter_copy_buffer_ref_props(outsamplesref, insamplesref); outsamplesref->format = outlink->format; outsamplesref->audio->channel_layout = outlink->channel_layout; outsamplesref->audio->sample_rate = outlink->sample_rate; if(insamplesref->pts != AV_NOPTS_VALUE) { int64_t inpts = av_rescale(insamplesref->pts, inlink->time_base.num * (int64_t)outlink->sample_rate * inlink->sample_rate, inlink->time_base.den); int64_t outpts= swr_next_pts(aresample->swr, inpts); aresample->next_pts = outsamplesref->pts = (outpts + inlink->sample_rate/2) / inlink->sample_rate; } else { outsamplesref->pts = AV_NOPTS_VALUE; } n_out = swr_convert(aresample->swr, outsamplesref->extended_data, n_out, (void *)insamplesref->extended_data, n_in); if (n_out <= 0) { avfilter_unref_buffer(outsamplesref); avfilter_unref_buffer(insamplesref); return 0; } outsamplesref->audio->nb_samples = n_out; ret = ff_filter_samples(outlink, outsamplesref); aresample->req_fullfilled= 1; avfilter_unref_buffer(insamplesref); return ret; }"} {"target": 0, "idx": 11795, "func": "static int mov_read_ctts(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom) { AVStream *st = c->fc->streams[c->fc->nb_streams-1]; MOVStreamContext *sc = st->priv_data; unsigned int i, entries; get_byte(pb); /* version */ get_be24(pb); /* flags */ entries = get_be32(pb); if(entries >= UINT_MAX / sizeof(MOV_stts_t)) return -1; sc->ctts_count = entries; sc->ctts_data = av_malloc(entries * sizeof(MOV_stts_t)); if (!sc->ctts_data) return -1; dprintf(c->fc, \"track[%i].ctts.entries = %i\\n\", c->fc->nb_streams-1, entries); for(i=0; ifc, AV_LOG_ERROR, \"negative ctts, ignoring\\n\"); sc->ctts_count = 0; url_fskip(pb, 8 * (entries - i - 1)); break; } sc->ctts_data[i].count = count; sc->ctts_data[i].duration= duration; sc->time_rate= ff_gcd(sc->time_rate, duration); } return 0; }"} {"target": 1, "idx": 11806, "func": "int64_t av_get_int(void *obj, const char *name, const AVOption **o_out) { int64_t intnum=1; double num=1; int den=1; av_get_number(obj, name, o_out, &num, &den, &intnum); return num*intnum/den; }"} {"target": 1, "idx": 11810, "func": "void commit_active_start(BlockDriverState *bs, BlockDriverState *base, int64_t speed, BlockdevOnError on_error, BlockDriverCompletionFunc *cb, void *opaque, Error **errp) { int64_t length, base_length; int orig_base_flags; orig_base_flags = bdrv_get_flags(base); if (bdrv_reopen(base, bs->open_flags, errp)) { return; } length = bdrv_getlength(bs); if (length < 0) { error_setg(errp, \"Unable to determine length of %s\", bs->filename); goto error_restore_flags; } base_length = bdrv_getlength(base); if (base_length < 0) { error_setg(errp, \"Unable to determine length of %s\", base->filename); goto error_restore_flags; } if (length > base_length) { if (bdrv_truncate(base, length) < 0) { error_setg(errp, \"Top image %s is larger than base image %s, and \" \"resize of base image failed\", bs->filename, base->filename); goto error_restore_flags; } } bdrv_ref(base); mirror_start_job(bs, base, speed, 0, 0, on_error, on_error, cb, opaque, errp, &commit_active_job_driver, false, base); if (error_is_set(errp)) { goto error_restore_flags; } return; error_restore_flags: /* ignore error and errp for bdrv_reopen, because we want to propagate * the original error */ bdrv_reopen(base, orig_base_flags, NULL); return; }"} {"target": 1, "idx": 11813, "func": "static int matroska_decode_buffer(uint8_t** buf, int* buf_size, MatroskaTrack *track) { MatroskaTrackEncoding *encodings = track->encodings.elem; uint8_t* data = *buf; int isize = *buf_size; uint8_t* pkt_data = NULL; int pkt_size = isize; int result = 0; int olen; switch (encodings[0].compression.algo) { case MATROSKA_TRACK_ENCODING_COMP_HEADERSTRIP: return encodings[0].compression.settings.size; case MATROSKA_TRACK_ENCODING_COMP_LZO: do { olen = pkt_size *= 3; pkt_data = av_realloc(pkt_data, pkt_size+AV_LZO_OUTPUT_PADDING); result = av_lzo1x_decode(pkt_data, &olen, data, &isize); } while (result==AV_LZO_OUTPUT_FULL && pkt_size<10000000); if (result) goto failed; pkt_size -= olen; break; #if CONFIG_ZLIB case MATROSKA_TRACK_ENCODING_COMP_ZLIB: { z_stream zstream = {0}; if (inflateInit(&zstream) != Z_OK) zstream.next_in = data; zstream.avail_in = isize; do { pkt_size *= 3; pkt_data = av_realloc(pkt_data, pkt_size); zstream.avail_out = pkt_size - zstream.total_out; zstream.next_out = pkt_data + zstream.total_out; result = inflate(&zstream, Z_NO_FLUSH); } while (result==Z_OK && pkt_size<10000000); pkt_size = zstream.total_out; inflateEnd(&zstream); if (result != Z_STREAM_END) goto failed; break; } #endif #if CONFIG_BZLIB case MATROSKA_TRACK_ENCODING_COMP_BZLIB: { bz_stream bzstream = {0}; if (BZ2_bzDecompressInit(&bzstream, 0, 0) != BZ_OK) bzstream.next_in = data; bzstream.avail_in = isize; do { pkt_size *= 3; pkt_data = av_realloc(pkt_data, pkt_size); bzstream.avail_out = pkt_size - bzstream.total_out_lo32; bzstream.next_out = pkt_data + bzstream.total_out_lo32; result = BZ2_bzDecompress(&bzstream); } while (result==BZ_OK && pkt_size<10000000); pkt_size = bzstream.total_out_lo32; BZ2_bzDecompressEnd(&bzstream); if (result != BZ_STREAM_END) goto failed; break; } #endif default: } *buf = pkt_data; *buf_size = pkt_size; return 0; failed: av_free(pkt_data); }"} {"target": 0, "idx": 11833, "func": "void alpha_cpu_unassigned_access(CPUState *cs, hwaddr addr, bool is_write, bool is_exec, int unused, unsigned size) { AlphaCPU *cpu = ALPHA_CPU(cs); CPUAlphaState *env = &cpu->env; env->trap_arg0 = addr; env->trap_arg1 = is_write ? 1 : 0; dynamic_excp(env, 0, EXCP_MCHK, 0); }"} {"target": 0, "idx": 11834, "func": "static int virtio_serial_init_pci(PCIDevice *pci_dev) { VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev); VirtIODevice *vdev; if (proxy->class_code != PCI_CLASS_COMMUNICATION_OTHER && proxy->class_code != PCI_CLASS_DISPLAY_OTHER && /* qemu 0.10 */ proxy->class_code != PCI_CLASS_OTHERS) /* qemu-kvm */ proxy->class_code = PCI_CLASS_COMMUNICATION_OTHER; vdev = virtio_serial_init(&pci_dev->qdev, &proxy->serial); if (!vdev) { return -1; } vdev->nvectors = proxy->nvectors == DEV_NVECTORS_UNSPECIFIED ? proxy->serial.max_virtserial_ports + 1 : proxy->nvectors; virtio_init_pci(proxy, vdev, PCI_VENDOR_ID_REDHAT_QUMRANET, PCI_DEVICE_ID_VIRTIO_CONSOLE, proxy->class_code, 0x00); proxy->nvectors = vdev->nvectors; return 0; }"} {"target": 0, "idx": 11840, "func": "static CharDriverState *qmp_chardev_open_serial(const char *id, ChardevBackend *backend, ChardevReturn *ret, Error **errp) { ChardevHostdev *serial = backend->serial; int fd; fd = qmp_chardev_open_file_source(serial->device, O_RDWR, errp); if (fd < 0) { return NULL; } qemu_set_nonblock(fd); return qemu_chr_open_tty_fd(fd); }"} {"target": 0, "idx": 11843, "func": "void alpha_pci_vga_setup(PCIBus *pci_bus) { switch (vga_interface_type) { #ifdef CONFIG_SPICE case VGA_QXL: pci_create_simple(pci_bus, -1, \"qxl-vga\"); return; #endif case VGA_CIRRUS: pci_cirrus_vga_init(pci_bus); return; case VGA_VMWARE: if (pci_vmsvga_init(pci_bus)) { return; } break; } /* If VGA is enabled at all, and one of the above didn't work, then fallback to Standard VGA. */ if (vga_interface_type != VGA_NONE) { pci_vga_init(pci_bus); } }"} {"target": 0, "idx": 11844, "func": "struct omap_uart_s *omap2_uart_init(struct omap_target_agent_s *ta, qemu_irq irq, omap_clk fclk, omap_clk iclk, qemu_irq txdma, qemu_irq rxdma, CharDriverState *chr) { target_phys_addr_t base = omap_l4_attach(ta, 0, 0); struct omap_uart_s *s = omap_uart_init(base, irq, fclk, iclk, txdma, rxdma, chr); int iomemtype = cpu_register_io_memory(0, omap_uart_readfn, omap_uart_writefn, s); s->ta = ta; s->base = base; cpu_register_physical_memory(s->base + 0x20, 0x100, iomemtype); return s; }"} {"target": 0, "idx": 11852, "func": "static void QEMU_NORETURN help(void) { const char *help_msg = QEMU_IMG_VERSION \"usage: qemu-img [standard options] command [command options]\\n\" \"QEMU disk image utility\\n\" \"\\n\" \" '-h', '--help' display this help and exit\\n\" \" '-V', '--version' output version information and exit\\n\" \" '-T', '--trace' [[enable=]][,events=][,file=]\\n\" \" specify tracing options\\n\" \"\\n\" \"Command syntax:\\n\" #define DEF(option, callback, arg_string) \\ \" \" arg_string \"\\n\" #include \"qemu-img-cmds.h\" #undef DEF #undef GEN_DOCS \"\\n\" \"Command parameters:\\n\" \" 'filename' is a disk image filename\\n\" \" 'objectdef' is a QEMU user creatable object definition. See the qemu(1)\\n\" \" manual page for a description of the object properties. The most common\\n\" \" object type is a 'secret', which is used to supply passwords and/or\\n\" \" encryption keys.\\n\" \" 'fmt' is the disk image format. It is guessed automatically in most cases\\n\" \" 'cache' is the cache mode used to write the output disk image, the valid\\n\" \" options are: 'none', 'writeback' (default, except for convert), 'writethrough',\\n\" \" 'directsync' and 'unsafe' (default for convert)\\n\" \" 'src_cache' is the cache mode used to read input disk images, the valid\\n\" \" options are the same as for the 'cache' option\\n\" \" 'size' is the disk image size in bytes. Optional suffixes\\n\" \" 'k' or 'K' (kilobyte, 1024), 'M' (megabyte, 1024k), 'G' (gigabyte, 1024M),\\n\" \" 'T' (terabyte, 1024G), 'P' (petabyte, 1024T) and 'E' (exabyte, 1024P) are\\n\" \" supported. 'b' is ignored.\\n\" \" 'output_filename' is the destination disk image filename\\n\" \" 'output_fmt' is the destination format\\n\" \" 'options' is a comma separated list of format specific options in a\\n\" \" name=value format. Use -o ? for an overview of the options supported by the\\n\" \" used format\\n\" \" 'snapshot_param' is param used for internal snapshot, format\\n\" \" is 'snapshot.id=[ID],snapshot.name=[NAME]', or\\n\" \" '[ID_OR_NAME]'\\n\" \" 'snapshot_id_or_name' is deprecated, use 'snapshot_param'\\n\" \" instead\\n\" \" '-c' indicates that target image must be compressed (qcow format only)\\n\" \" '-u' enables unsafe rebasing. It is assumed that old and new backing file\\n\" \" match exactly. The image doesn't need a working backing file before\\n\" \" rebasing in this case (useful for renaming the backing file)\\n\" \" '-h' with or without a command shows this help and lists the supported formats\\n\" \" '-p' show progress of command (only certain commands)\\n\" \" '-q' use Quiet mode - do not print any output (except errors)\\n\" \" '-S' indicates the consecutive number of bytes (defaults to 4k) that must\\n\" \" contain only zeros for qemu-img to create a sparse image during\\n\" \" conversion. If the number of bytes is 0, the source will not be scanned for\\n\" \" unallocated or zero sectors, and the destination image will always be\\n\" \" fully allocated\\n\" \" '--output' takes the format in which the output must be done (human or json)\\n\" \" '-n' skips the target volume creation (useful if the volume is created\\n\" \" prior to running qemu-img)\\n\" \"\\n\" \"Parameters to check subcommand:\\n\" \" '-r' tries to repair any inconsistencies that are found during the check.\\n\" \" '-r leaks' repairs only cluster leaks, whereas '-r all' fixes all\\n\" \" kinds of errors, with a higher risk of choosing the wrong fix or\\n\" \" hiding corruption that has already occurred.\\n\" \"\\n\" \"Parameters to convert subcommand:\\n\" \" '-m' specifies how many coroutines work in parallel during the convert\\n\" \" process (defaults to 8)\\n\" \" '-W' allow to write to the target out of order rather than sequential\\n\" \"\\n\" \"Parameters to snapshot subcommand:\\n\" \" 'snapshot' is the name of the snapshot to create, apply or delete\\n\" \" '-a' applies a snapshot (revert disk to saved state)\\n\" \" '-c' creates a snapshot\\n\" \" '-d' deletes a snapshot\\n\" \" '-l' lists all snapshots in the given image\\n\" \"\\n\" \"Parameters to compare subcommand:\\n\" \" '-f' first image format\\n\" \" '-F' second image format\\n\" \" '-s' run in Strict mode - fail on different image size or sector allocation\\n\" \"\\n\" \"Parameters to dd subcommand:\\n\" \" 'bs=BYTES' read and write up to BYTES bytes at a time \" \"(default: 512)\\n\" \" 'count=N' copy only N input blocks\\n\" \" 'if=FILE' read from FILE\\n\" \" 'of=FILE' write to FILE\\n\" \" 'skip=N' skip N bs-sized blocks at the start of input\\n\"; printf(\"%s\\nSupported formats:\", help_msg); bdrv_iterate_format(format_print, NULL); printf(\"\\n\"); exit(EXIT_SUCCESS); }"} {"target": 0, "idx": 11853, "func": "static void coroutine_fn bdrv_create_co_entry(void *opaque) { Error *local_err = NULL; int ret; CreateCo *cco = opaque; assert(cco->drv); ret = cco->drv->bdrv_create(cco->filename, cco->opts, &local_err); if (local_err) { error_propagate(&cco->err, local_err); } cco->ret = ret; }"} {"target": 0, "idx": 11854, "func": "void helper_mtc0_entryhi(CPUMIPSState *env, target_ulong arg1) { target_ulong old, val, mask; mask = (TARGET_PAGE_MASK << 1) | env->CP0_EntryHi_ASID_mask; if (((env->CP0_Config4 >> CP0C4_IE) & 0x3) >= 2) { mask |= 1 << CP0EnHi_EHINV; } /* 1k pages not implemented */ #if defined(TARGET_MIPS64) if (env->insn_flags & ISA_MIPS32R6) { int entryhi_r = extract64(arg1, 62, 2); int config0_at = extract32(env->CP0_Config0, 13, 2); bool no_supervisor = (env->CP0_Status_rw_bitmask & 0x8) == 0; if ((entryhi_r == 2) || (entryhi_r == 1 && (no_supervisor || config0_at == 1))) { /* skip EntryHi.R field if new value is reserved */ mask &= ~(0x3ull << 62); } } mask &= env->SEGMask; #endif old = env->CP0_EntryHi; val = (arg1 & mask) | (old & ~mask); env->CP0_EntryHi = val; if (env->CP0_Config3 & (1 << CP0C3_MT)) { sync_c0_entryhi(env, env->current_tc); } /* If the ASID changes, flush qemu's TLB. */ if ((old & env->CP0_EntryHi_ASID_mask) != (val & env->CP0_EntryHi_ASID_mask)) { cpu_mips_tlb_flush(env); } }"} {"target": 1, "idx": 11861, "func": "static bool vmxnet3_verify_driver_magic(hwaddr dshmem) { return (VMXNET3_READ_DRV_SHARED32(dshmem, magic) == VMXNET3_REV1_MAGIC); }"} {"target": 1, "idx": 11865, "func": "ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr) { ram_addr_t ram_addr; if (qemu_ram_addr_from_host(ptr, &ram_addr)) { fprintf(stderr, \"Bad ram pointer %p\\n\", ptr); abort(); } return ram_addr; }"} {"target": 1, "idx": 11877, "func": "static int decode_format80(VqaContext *s, int src_size, unsigned char *dest, int dest_size, int check_size) { int dest_index = 0; int count, opcode, start; int src_pos; unsigned char color; int i; start = bytestream2_tell(&s->gb); while (bytestream2_tell(&s->gb) - start < src_size) { opcode = bytestream2_get_byte(&s->gb); av_dlog(s->avctx, \"opcode %02X: \", opcode); /* 0x80 means that frame is finished */ if (opcode == 0x80) break; if (dest_index >= dest_size) { av_log(s->avctx, AV_LOG_ERROR, \"decode_format80 problem: dest_index (%d) exceeded dest_size (%d)\\n\", dest_index, dest_size); return AVERROR_INVALIDDATA; } if (opcode == 0xFF) { count = bytestream2_get_le16(&s->gb); src_pos = bytestream2_get_le16(&s->gb); av_dlog(s->avctx, \"(1) copy %X bytes from absolute pos %X\\n\", count, src_pos); CHECK_COUNT(); CHECK_COPY(src_pos); for (i = 0; i < count; i++) dest[dest_index + i] = dest[src_pos + i]; dest_index += count; } else if (opcode == 0xFE) { count = bytestream2_get_le16(&s->gb); color = bytestream2_get_byte(&s->gb); av_dlog(s->avctx, \"(2) set %X bytes to %02X\\n\", count, color); CHECK_COUNT(); memset(&dest[dest_index], color, count); dest_index += count; } else if ((opcode & 0xC0) == 0xC0) { count = (opcode & 0x3F) + 3; src_pos = bytestream2_get_le16(&s->gb); av_dlog(s->avctx, \"(3) copy %X bytes from absolute pos %X\\n\", count, src_pos); CHECK_COUNT(); CHECK_COPY(src_pos); for (i = 0; i < count; i++) dest[dest_index + i] = dest[src_pos + i]; dest_index += count; } else if (opcode > 0x80) { count = opcode & 0x3F; av_dlog(s->avctx, \"(4) copy %X bytes from source to dest\\n\", count); CHECK_COUNT(); bytestream2_get_buffer(&s->gb, &dest[dest_index], count); dest_index += count; } else { count = ((opcode & 0x70) >> 4) + 3; src_pos = bytestream2_get_byte(&s->gb) | ((opcode & 0x0F) << 8); av_dlog(s->avctx, \"(5) copy %X bytes from relpos %X\\n\", count, src_pos); CHECK_COUNT(); CHECK_COPY(dest_index - src_pos); for (i = 0; i < count; i++) dest[dest_index + i] = dest[dest_index - src_pos + i]; dest_index += count; } } /* validate that the entire destination buffer was filled; this is * important for decoding frame maps since each vector needs to have a * codebook entry; it is not important for compressed codebooks because * not every entry needs to be filled */ if (check_size) if (dest_index < dest_size) av_log(s->avctx, AV_LOG_ERROR, \"decode_format80 problem: decode finished with dest_index (%d) < dest_size (%d)\\n\", dest_index, dest_size); return 0; // let's display what we decoded anyway }"} {"target": 1, "idx": 11885, "func": "static int zero12v_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { int line = 0, ret; const int width = avctx->width; AVFrame *pic = data; uint16_t *y, *u, *v; const uint8_t *line_end, *src = avpkt->data; int stride = avctx->width * 8 / 3; if (width == 1) { av_log(avctx, AV_LOG_ERROR, \"Width 1 not supported.\\n\"); return AVERROR_INVALIDDATA; } if ( avctx->codec_tag == MKTAG('0', '1', '2', 'v') && avpkt->size % avctx->height == 0 && avpkt->size / avctx->height * 3 >= width * 8) stride = avpkt->size / avctx->height; if (avpkt->size < avctx->height * stride) { av_log(avctx, AV_LOG_ERROR, \"Packet too small: %d instead of %d\\n\", avpkt->size, avctx->height * stride); return AVERROR_INVALIDDATA; } if ((ret = ff_get_buffer(avctx, pic, 0)) < 0) return ret; pic->pict_type = AV_PICTURE_TYPE_I; pic->key_frame = 1; y = (uint16_t *)pic->data[0]; u = (uint16_t *)pic->data[1]; v = (uint16_t *)pic->data[2]; line_end = avpkt->data + stride; while (line++ < avctx->height) { while (1) { uint32_t t = AV_RL32(src); src += 4; *u++ = t << 6 & 0xFFC0; *y++ = t >> 4 & 0xFFC0; *v++ = t >> 14 & 0xFFC0; if (src >= line_end - 1) { *y = 0x80; src++; line_end += stride; y = (uint16_t *)(pic->data[0] + line * pic->linesize[0]); u = (uint16_t *)(pic->data[1] + line * pic->linesize[1]); v = (uint16_t *)(pic->data[2] + line * pic->linesize[2]); break; } t = AV_RL32(src); src += 4; *y++ = t << 6 & 0xFFC0; *u++ = t >> 4 & 0xFFC0; *y++ = t >> 14 & 0xFFC0; if (src >= line_end - 2) { if (!(width & 1)) { *y = 0x80; src += 2; } line_end += stride; y = (uint16_t *)(pic->data[0] + line * pic->linesize[0]); u = (uint16_t *)(pic->data[1] + line * pic->linesize[1]); v = (uint16_t *)(pic->data[2] + line * pic->linesize[2]); break; } t = AV_RL32(src); src += 4; *v++ = t << 6 & 0xFFC0; *y++ = t >> 4 & 0xFFC0; *u++ = t >> 14 & 0xFFC0; if (src >= line_end - 1) { *y = 0x80; src++; line_end += stride; y = (uint16_t *)(pic->data[0] + line * pic->linesize[0]); u = (uint16_t *)(pic->data[1] + line * pic->linesize[1]); v = (uint16_t *)(pic->data[2] + line * pic->linesize[2]); break; } t = AV_RL32(src); src += 4; *y++ = t << 6 & 0xFFC0; *v++ = t >> 4 & 0xFFC0; *y++ = t >> 14 & 0xFFC0; if (src >= line_end - 2) { if (width & 1) { *y = 0x80; src += 2; } line_end += stride; y = (uint16_t *)(pic->data[0] + line * pic->linesize[0]); u = (uint16_t *)(pic->data[1] + line * pic->linesize[1]); v = (uint16_t *)(pic->data[2] + line * pic->linesize[2]); break; } } } *got_frame = 1; return avpkt->size; }"} {"target": 0, "idx": 11911, "func": "static inline int test_bit(uint32_t *field, int bit) { return (field[bit >> 5] & 1 << (bit & 0x1F)) != 0; }"} {"target": 0, "idx": 11919, "func": "static void test_qemu_strtoul_decimal(void) { const char *str = \"0123\"; char f = 'X'; const char *endptr = &f; unsigned long res = 999; int err; err = qemu_strtoul(str, &endptr, 10, &res); g_assert_cmpint(err, ==, 0); g_assert_cmpint(res, ==, 123); g_assert(endptr == str + strlen(str)); str = \"123\"; res = 999; endptr = &f; err = qemu_strtoul(str, &endptr, 0, &res); g_assert_cmpint(err, ==, 0); g_assert_cmpint(res, ==, 123); g_assert(endptr == str + strlen(str)); }"} {"target": 1, "idx": 11933, "func": "static void piix3_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); dc->desc = \"ISA bridge\"; dc->vmsd = &vmstate_piix3; dc->no_user = 1, k->no_hotplug = 1; k->init = piix3_initfn; k->config_write = piix3_write_config; k->vendor_id = PCI_VENDOR_ID_INTEL; /* 82371SB PIIX3 PCI-to-ISA bridge (Step A1) */ k->device_id = PCI_DEVICE_ID_INTEL_82371SB_0; k->class_id = PCI_CLASS_BRIDGE_ISA; }"} {"target": 1, "idx": 11936, "func": "qio_channel_websock_source_dispatch(GSource *source, GSourceFunc callback, gpointer user_data) { QIOChannelFunc func = (QIOChannelFunc)callback; QIOChannelWebsockSource *wsource = (QIOChannelWebsockSource *)source; GIOCondition cond = 0; if (wsource->wioc->rawinput.offset) { cond |= G_IO_IN; } if (wsource->wioc->rawoutput.offset < QIO_CHANNEL_WEBSOCK_MAX_BUFFER) { cond |= G_IO_OUT; } return (*func)(QIO_CHANNEL(wsource->wioc), (cond & wsource->condition), user_data); }"} {"target": 0, "idx": 11957, "func": "static void acpi_dsdt_add_pci(Aml *scope, const MemMapEntry *memmap, uint32_t irq, bool use_highmem) { Aml *method, *crs, *ifctx, *UUID, *ifctx1, *elsectx, *buf; int i, bus_no; hwaddr base_mmio = memmap[VIRT_PCIE_MMIO].base; hwaddr size_mmio = memmap[VIRT_PCIE_MMIO].size; hwaddr base_pio = memmap[VIRT_PCIE_PIO].base; hwaddr size_pio = memmap[VIRT_PCIE_PIO].size; hwaddr base_ecam = memmap[VIRT_PCIE_ECAM].base; hwaddr size_ecam = memmap[VIRT_PCIE_ECAM].size; int nr_pcie_buses = size_ecam / PCIE_MMCFG_SIZE_MIN; Aml *dev = aml_device(\"%s\", \"PCI0\"); aml_append(dev, aml_name_decl(\"_HID\", aml_string(\"PNP0A08\"))); aml_append(dev, aml_name_decl(\"_CID\", aml_string(\"PNP0A03\"))); aml_append(dev, aml_name_decl(\"_SEG\", aml_int(0))); aml_append(dev, aml_name_decl(\"_BBN\", aml_int(0))); aml_append(dev, aml_name_decl(\"_ADR\", aml_int(0))); aml_append(dev, aml_name_decl(\"_UID\", aml_string(\"PCI0\"))); aml_append(dev, aml_name_decl(\"_STR\", aml_unicode(\"PCIe 0 Device\"))); aml_append(dev, aml_name_decl(\"_CCA\", aml_int(1))); /* Declare the PCI Routing Table. */ Aml *rt_pkg = aml_package(nr_pcie_buses * PCI_NUM_PINS); for (bus_no = 0; bus_no < nr_pcie_buses; bus_no++) { for (i = 0; i < PCI_NUM_PINS; i++) { int gsi = (i + bus_no) % PCI_NUM_PINS; Aml *pkg = aml_package(4); aml_append(pkg, aml_int((bus_no << 16) | 0xFFFF)); aml_append(pkg, aml_int(i)); aml_append(pkg, aml_name(\"GSI%d\", gsi)); aml_append(pkg, aml_int(0)); aml_append(rt_pkg, pkg); } } aml_append(dev, aml_name_decl(\"_PRT\", rt_pkg)); /* Create GSI link device */ for (i = 0; i < PCI_NUM_PINS; i++) { uint32_t irqs = irq + i; Aml *dev_gsi = aml_device(\"GSI%d\", i); aml_append(dev_gsi, aml_name_decl(\"_HID\", aml_string(\"PNP0C0F\"))); aml_append(dev_gsi, aml_name_decl(\"_UID\", aml_int(0))); crs = aml_resource_template(); aml_append(crs, aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH, AML_EXCLUSIVE, &irqs, 1)); aml_append(dev_gsi, aml_name_decl(\"_PRS\", crs)); crs = aml_resource_template(); aml_append(crs, aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH, AML_EXCLUSIVE, &irqs, 1)); aml_append(dev_gsi, aml_name_decl(\"_CRS\", crs)); method = aml_method(\"_SRS\", 1, AML_NOTSERIALIZED); aml_append(dev_gsi, method); aml_append(dev, dev_gsi); } method = aml_method(\"_CBA\", 0, AML_NOTSERIALIZED); aml_append(method, aml_return(aml_int(base_ecam))); aml_append(dev, method); method = aml_method(\"_CRS\", 0, AML_NOTSERIALIZED); Aml *rbuf = aml_resource_template(); aml_append(rbuf, aml_word_bus_number(AML_MIN_FIXED, AML_MAX_FIXED, AML_POS_DECODE, 0x0000, 0x0000, nr_pcie_buses - 1, 0x0000, nr_pcie_buses)); aml_append(rbuf, aml_dword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED, AML_NON_CACHEABLE, AML_READ_WRITE, 0x0000, base_mmio, base_mmio + size_mmio - 1, 0x0000, size_mmio)); aml_append(rbuf, aml_dword_io(AML_MIN_FIXED, AML_MAX_FIXED, AML_POS_DECODE, AML_ENTIRE_RANGE, 0x0000, 0x0000, size_pio - 1, base_pio, size_pio)); if (use_highmem) { hwaddr base_mmio_high = memmap[VIRT_PCIE_MMIO_HIGH].base; hwaddr size_mmio_high = memmap[VIRT_PCIE_MMIO_HIGH].size; aml_append(rbuf, aml_qword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED, AML_NON_CACHEABLE, AML_READ_WRITE, 0x0000, base_mmio_high, base_mmio_high, 0x0000, size_mmio_high)); } aml_append(method, aml_name_decl(\"RBUF\", rbuf)); aml_append(method, aml_return(rbuf)); aml_append(dev, method); /* Declare an _OSC (OS Control Handoff) method */ aml_append(dev, aml_name_decl(\"SUPP\", aml_int(0))); aml_append(dev, aml_name_decl(\"CTRL\", aml_int(0))); method = aml_method(\"_OSC\", 4, AML_NOTSERIALIZED); aml_append(method, aml_create_dword_field(aml_arg(3), aml_int(0), \"CDW1\")); /* PCI Firmware Specification 3.0 * 4.5.1. _OSC Interface for PCI Host Bridge Devices * The _OSC interface for a PCI/PCI-X/PCI Express hierarchy is * identified by the Universal Unique IDentifier (UUID) * 33DB4D5B-1FF7-401C-9657-7441C03DD766 */ UUID = aml_touuid(\"33DB4D5B-1FF7-401C-9657-7441C03DD766\"); ifctx = aml_if(aml_equal(aml_arg(0), UUID)); aml_append(ifctx, aml_create_dword_field(aml_arg(3), aml_int(4), \"CDW2\")); aml_append(ifctx, aml_create_dword_field(aml_arg(3), aml_int(8), \"CDW3\")); aml_append(ifctx, aml_store(aml_name(\"CDW2\"), aml_name(\"SUPP\"))); aml_append(ifctx, aml_store(aml_name(\"CDW3\"), aml_name(\"CTRL\"))); aml_append(ifctx, aml_store(aml_and(aml_name(\"CTRL\"), aml_int(0x1D), NULL), aml_name(\"CTRL\"))); ifctx1 = aml_if(aml_lnot(aml_equal(aml_arg(1), aml_int(0x1)))); aml_append(ifctx1, aml_store(aml_or(aml_name(\"CDW1\"), aml_int(0x08), NULL), aml_name(\"CDW1\"))); aml_append(ifctx, ifctx1); ifctx1 = aml_if(aml_lnot(aml_equal(aml_name(\"CDW3\"), aml_name(\"CTRL\")))); aml_append(ifctx1, aml_store(aml_or(aml_name(\"CDW1\"), aml_int(0x10), NULL), aml_name(\"CDW1\"))); aml_append(ifctx, ifctx1); aml_append(ifctx, aml_store(aml_name(\"CTRL\"), aml_name(\"CDW3\"))); aml_append(ifctx, aml_return(aml_arg(3))); aml_append(method, ifctx); elsectx = aml_else(); aml_append(elsectx, aml_store(aml_or(aml_name(\"CDW1\"), aml_int(4), NULL), aml_name(\"CDW1\"))); aml_append(elsectx, aml_return(aml_arg(3))); aml_append(method, elsectx); aml_append(dev, method); method = aml_method(\"_DSM\", 4, AML_NOTSERIALIZED); /* PCI Firmware Specification 3.0 * 4.6.1. _DSM for PCI Express Slot Information * The UUID in _DSM in this context is * {E5C937D0-3553-4D7A-9117-EA4D19C3434D} */ UUID = aml_touuid(\"E5C937D0-3553-4D7A-9117-EA4D19C3434D\"); ifctx = aml_if(aml_equal(aml_arg(0), UUID)); ifctx1 = aml_if(aml_equal(aml_arg(2), aml_int(0))); uint8_t byte_list[1] = {1}; buf = aml_buffer(1, byte_list); aml_append(ifctx1, aml_return(buf)); aml_append(ifctx, ifctx1); aml_append(method, ifctx); byte_list[0] = 0; buf = aml_buffer(1, byte_list); aml_append(method, aml_return(buf)); aml_append(dev, method); Aml *dev_rp0 = aml_device(\"%s\", \"RP0\"); aml_append(dev_rp0, aml_name_decl(\"_ADR\", aml_int(0))); aml_append(dev, dev_rp0); aml_append(scope, dev); }"} {"target": 0, "idx": 11958, "func": "static int vmdk_parent_open(BlockDriverState *bs) { char *p_name; char desc[DESC_SIZE + 1]; BDRVVmdkState *s = bs->opaque; desc[DESC_SIZE] = '\\0'; if (bdrv_pread(bs->file, s->desc_offset, desc, DESC_SIZE) != DESC_SIZE) { return -1; } if ((p_name = strstr(desc,\"parentFileNameHint\")) != NULL) { char *end_name; p_name += sizeof(\"parentFileNameHint\") + 1; if ((end_name = strchr(p_name,'\\\"')) == NULL) return -1; if ((end_name - p_name) > sizeof (bs->backing_file) - 1) return -1; pstrcpy(bs->backing_file, end_name - p_name + 1, p_name); } return 0; }"} {"target": 0, "idx": 11969, "func": "static MemoryRegionSection address_space_do_translate(AddressSpace *as, hwaddr addr, hwaddr *xlat, hwaddr *plen, bool is_write, bool is_mmio) { IOMMUTLBEntry iotlb; MemoryRegionSection *section; MemoryRegion *mr; for (;;) { AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch); section = address_space_translate_internal(d, addr, &addr, plen, is_mmio); mr = section->mr; if (!mr->iommu_ops) { break; } iotlb = mr->iommu_ops->translate(mr, addr, is_write); addr = ((iotlb.translated_addr & ~iotlb.addr_mask) | (addr & iotlb.addr_mask)); *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1); if (!(iotlb.perm & (1 << is_write))) { goto translate_fail; } as = iotlb.target_as; } *xlat = addr; return *section; translate_fail: return (MemoryRegionSection) { .mr = &io_mem_unassigned }; }"} {"target": 0, "idx": 11975, "func": "static void read_guest_mem(void) { uint32_t *guest_mem; gint64 end_time; int i, j; size_t size; g_mutex_lock(data_mutex); end_time = g_get_monotonic_time() + 5 * G_TIME_SPAN_SECOND; while (!fds_num) { if (!_cond_wait_until(data_cond, data_mutex, end_time)) { /* timeout has passed */ g_assert(fds_num); break; } } /* check for sanity */ g_assert_cmpint(fds_num, >, 0); g_assert_cmpint(fds_num, ==, memory.nregions); /* iterate all regions */ for (i = 0; i < fds_num; i++) { /* We'll check only the region statring at 0x0*/ if (memory.regions[i].guest_phys_addr != 0x0) { continue; } g_assert_cmpint(memory.regions[i].memory_size, >, 1024); size = memory.regions[i].memory_size + memory.regions[i].mmap_offset; guest_mem = mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fds[i], 0); g_assert(guest_mem != MAP_FAILED); guest_mem += (memory.regions[i].mmap_offset / sizeof(*guest_mem)); for (j = 0; j < 256; j++) { uint32_t a = readl(memory.regions[i].guest_phys_addr + j*4); uint32_t b = guest_mem[j]; g_assert_cmpint(a, ==, b); } munmap(guest_mem, memory.regions[i].memory_size); } g_assert_cmpint(1, ==, 1); g_mutex_unlock(data_mutex); }"} {"target": 1, "idx": 11978, "func": "static uint64_t qemu_rdma_poll(RDMAContext *rdma, uint64_t *wr_id_out, uint32_t *byte_len) { int ret; struct ibv_wc wc; uint64_t wr_id; ret = ibv_poll_cq(rdma->cq, 1, &wc); if (!ret) { *wr_id_out = RDMA_WRID_NONE; return 0; } if (ret < 0) { fprintf(stderr, \"ibv_poll_cq return %d!\\n\", ret); return ret; } wr_id = wc.wr_id & RDMA_WRID_TYPE_MASK; if (wc.status != IBV_WC_SUCCESS) { fprintf(stderr, \"ibv_poll_cq wc.status=%d %s!\\n\", wc.status, ibv_wc_status_str(wc.status)); fprintf(stderr, \"ibv_poll_cq wrid=%s!\\n\", wrid_desc[wr_id]); return -1; } if (rdma->control_ready_expected && (wr_id >= RDMA_WRID_RECV_CONTROL)) { DDDPRINTF(\"completion %s #%\" PRId64 \" received (%\" PRId64 \")\" \" left %d\\n\", wrid_desc[RDMA_WRID_RECV_CONTROL], wr_id - RDMA_WRID_RECV_CONTROL, wr_id, rdma->nb_sent); rdma->control_ready_expected = 0; } if (wr_id == RDMA_WRID_RDMA_WRITE) { uint64_t chunk = (wc.wr_id & RDMA_WRID_CHUNK_MASK) >> RDMA_WRID_CHUNK_SHIFT; uint64_t index = (wc.wr_id & RDMA_WRID_BLOCK_MASK) >> RDMA_WRID_BLOCK_SHIFT; RDMALocalBlock *block = &(rdma->local_ram_blocks.block[index]); DDDPRINTF(\"completions %s (%\" PRId64 \") left %d, \" \"block %\" PRIu64 \", chunk: %\" PRIu64 \" %p %p\\n\", print_wrid(wr_id), wr_id, rdma->nb_sent, index, chunk, block->local_host_addr, (void *)block->remote_host_addr); clear_bit(chunk, block->transit_bitmap); if (rdma->nb_sent > 0) { rdma->nb_sent--; } if (!rdma->pin_all) { /* * FYI: If one wanted to signal a specific chunk to be unregistered * using LRU or workload-specific information, this is the function * you would call to do so. That chunk would then get asynchronously * unregistered later. */ #ifdef RDMA_UNREGISTRATION_EXAMPLE qemu_rdma_signal_unregister(rdma, index, chunk, wc.wr_id); #endif } } else { DDDPRINTF(\"other completion %s (%\" PRId64 \") received left %d\\n\", print_wrid(wr_id), wr_id, rdma->nb_sent); } *wr_id_out = wc.wr_id; if (byte_len) { *byte_len = wc.byte_len; } return 0; }"} {"target": 1, "idx": 11979, "func": "static int mov_read_ftyp(MOVContext *c, AVIOContext *pb, MOVAtom atom) { uint32_t minor_ver; int comp_brand_size; char minor_ver_str[11]; /* 32 bit integer -> 10 digits + null */ char* comp_brands_str; uint8_t type[5] = {0}; avio_read(pb, type, 4); if (strcmp(type, \"qt \")) c->isom = 1; av_log(c->fc, AV_LOG_DEBUG, \"ISO: File Type Major Brand: %.4s\\n\",(char *)&type); av_dict_set(&c->fc->metadata, \"major_brand\", type, 0); minor_ver = avio_rb32(pb); /* minor version */ snprintf(minor_ver_str, sizeof(minor_ver_str), \"%\"PRIu32\"\", minor_ver); av_dict_set(&c->fc->metadata, \"minor_version\", minor_ver_str, 0); comp_brand_size = atom.size - 8; if (comp_brand_size < 0) return AVERROR_INVALIDDATA; comp_brands_str = av_malloc(comp_brand_size + 1); /* Add null terminator */ if (!comp_brands_str) return AVERROR(ENOMEM); avio_read(pb, comp_brands_str, comp_brand_size); comp_brands_str[comp_brand_size] = 0; av_dict_set(&c->fc->metadata, \"compatible_brands\", comp_brands_str, 0); av_freep(&comp_brands_str); return 0; }"} {"target": 1, "idx": 11980, "func": "static void qpci_pc_config_writew(QPCIBus *bus, int devfn, uint8_t offset, uint16_t value) { outl(0xcf8, (1 << 31) | (devfn << 8) | offset); outw(0xcfc, value); }"} {"target": 0, "idx": 11983, "func": "static int decode_stream_header(NUTContext *nut){ AVFormatContext *s= nut->avf; ByteIOContext *bc = &s->pb; StreamContext *stc; int class, stream_id; uint64_t tmp, end; AVStream *st; end= get_packetheader(nut, bc, 1); end += url_ftell(bc); GET_V(stream_id, tmp < s->nb_streams && !nut->stream[tmp].time_base); stc= &nut->stream[stream_id]; st = s->streams[stream_id]; if (!st) return AVERROR(ENOMEM); class = get_v(bc); tmp = get_fourcc(bc); st->codec->codec_tag= tmp; switch(class) { case 0: st->codec->codec_type = CODEC_TYPE_VIDEO; st->codec->codec_id = codec_get_id(codec_bmp_tags, tmp); if (st->codec->codec_id == CODEC_ID_NONE) av_log(s, AV_LOG_ERROR, \"Unknown codec?!\\n\"); break; case 1: st->codec->codec_type = CODEC_TYPE_AUDIO; st->codec->codec_id = codec_get_id(codec_wav_tags, tmp); if (st->codec->codec_id == CODEC_ID_NONE) av_log(s, AV_LOG_ERROR, \"Unknown codec?!\\n\"); break; case 2: // st->codec->codec_type = CODEC_TYPE_TEXT; // break; case 3: st->codec->codec_type = CODEC_TYPE_DATA; break; default: av_log(s, AV_LOG_ERROR, \"Unknown stream class (%d)\\n\", class); return -1; } GET_V(stc->time_base_id , tmp < nut->time_base_count); GET_V(stc->msb_pts_shift , tmp < 16); stc->max_pts_distance= get_v(bc); GET_V(stc->decode_delay , tmp < 1000); //sanity limit, raise this if moors law is true st->codec->has_b_frames= stc->decode_delay; get_v(bc); //stream flags GET_V(st->codec->extradata_size, tmp < (1<<30)); if(st->codec->extradata_size){ st->codec->extradata= av_mallocz(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE); get_buffer(bc, st->codec->extradata, st->codec->extradata_size); } if (st->codec->codec_type == CODEC_TYPE_VIDEO){ GET_V(st->codec->width , tmp > 0) GET_V(st->codec->height, tmp > 0) st->codec->sample_aspect_ratio.num= get_v(bc); st->codec->sample_aspect_ratio.den= get_v(bc); if((!st->codec->sample_aspect_ratio.num) != (!st->codec->sample_aspect_ratio.den)){ av_log(s, AV_LOG_ERROR, \"invalid aspect ratio\\n\"); return -1; } get_v(bc); /* csp type */ }else if (st->codec->codec_type == CODEC_TYPE_AUDIO){ GET_V(st->codec->sample_rate , tmp > 0) tmp= get_v(bc); // samplerate_den if(tmp > st->codec->sample_rate){ av_log(s, AV_LOG_ERROR, \"bleh, libnut muxed this ;)\\n\"); st->codec->sample_rate= tmp; } GET_V(st->codec->channels, tmp > 0) } if(skip_reserved(bc, end) || get_checksum(bc)){ av_log(s, AV_LOG_ERROR, \"Stream header %d checksum mismatch\\n\", stream_id); return -1; } stc->time_base= &nut->time_base[stc->time_base_id]; av_set_pts_info(s->streams[stream_id], 63, stc->time_base->num, stc->time_base->den); return 0; }"} {"target": 1, "idx": 11985, "func": "static void lz_unpack(const unsigned char *src, unsigned char *dest, int dest_len) { const unsigned char *s; unsigned char *d; unsigned char *d_end; unsigned char queue[QUEUE_SIZE]; unsigned int qpos; unsigned int dataleft; unsigned int chainofs; unsigned int chainlen; unsigned int speclen; unsigned char tag; unsigned int i, j; s = src; d = dest; d_end = d + dest_len; dataleft = AV_RL32(s); s += 4; memset(queue, 0x20, QUEUE_SIZE); if (AV_RL32(s) == 0x56781234) { s += 4; qpos = 0x111; speclen = 0xF + 3; } else { qpos = 0xFEE; speclen = 100; /* no speclen */ } while (dataleft > 0) { tag = *s++; if ((tag == 0xFF) && (dataleft > 8)) { if (d + 8 > d_end) return; for (i = 0; i < 8; i++) { queue[qpos++] = *d++ = *s++; qpos &= QUEUE_MASK; } dataleft -= 8; } else { for (i = 0; i < 8; i++) { if (dataleft == 0) break; if (tag & 0x01) { if (d + 1 > d_end) return; queue[qpos++] = *d++ = *s++; qpos &= QUEUE_MASK; dataleft--; } else { chainofs = *s++; chainofs |= ((*s & 0xF0) << 4); chainlen = (*s++ & 0x0F) + 3; if (chainlen == speclen) chainlen = *s++ + 0xF + 3; if (d + chainlen > d_end) return; for (j = 0; j < chainlen; j++) { *d = queue[chainofs++ & QUEUE_MASK]; queue[qpos++] = *d++; qpos &= QUEUE_MASK; } dataleft -= chainlen; } tag >>= 1; } } } }"} {"target": 1, "idx": 11988, "func": "void scsi_req_cancel(SCSIRequest *req) { trace_scsi_req_cancel(req->dev->id, req->lun, req->tag); if (!req->enqueued) { return; } scsi_req_ref(req); scsi_req_dequeue(req); req->io_canceled = true; if (req->aiocb) { blk_aio_cancel(req->aiocb); } else { scsi_req_cancel_complete(req); } }"} {"target": 1, "idx": 11998, "func": "static void gen_exception_return(DisasContext *s, TCGv_i32 pc) { TCGv_i32 tmp; store_reg(s, 15, pc); tmp = load_cpu_field(spsr); gen_set_cpsr(tmp, CPSR_ERET_MASK); tcg_temp_free_i32(tmp); s->is_jmp = DISAS_UPDATE; }"} {"target": 0, "idx": 12023, "func": "static void qio_channel_websock_write_close(QIOChannelWebsock *ioc, uint16_t code, const char *reason) { struct iovec iov; buffer_reserve(&ioc->rawoutput, 2 + (reason ? strlen(reason) : 0)); *(uint16_t *)(ioc->rawoutput.buffer + ioc->rawoutput.offset) = cpu_to_be16(code); ioc->rawoutput.offset += 2; if (reason) { buffer_append(&ioc->rawoutput, reason, strlen(reason)); } iov.iov_base = ioc->rawoutput.buffer; iov.iov_len = ioc->rawoutput.offset; qio_channel_websock_encode(ioc, QIO_CHANNEL_WEBSOCK_OPCODE_CLOSE, &iov, 1, iov.iov_len); buffer_reset(&ioc->rawoutput); qio_channel_websock_write_wire(ioc, NULL); qio_channel_shutdown(ioc->master, QIO_CHANNEL_SHUTDOWN_BOTH, NULL); }"} {"target": 0, "idx": 12026, "func": "static void fadt_setup(AcpiFadtDescriptorRev1 *fadt, AcpiPmInfo *pm) { fadt->model = 1; fadt->reserved1 = 0; fadt->sci_int = cpu_to_le16(pm->sci_int); fadt->smi_cmd = cpu_to_le32(ACPI_PORT_SMI_CMD); fadt->acpi_enable = pm->acpi_enable_cmd; fadt->acpi_disable = pm->acpi_disable_cmd; /* EVT, CNT, TMR offset matches hw/acpi/core.c */ fadt->pm1a_evt_blk = cpu_to_le32(pm->io_base); fadt->pm1a_cnt_blk = cpu_to_le32(pm->io_base + 0x04); fadt->pm_tmr_blk = cpu_to_le32(pm->io_base + 0x08); fadt->gpe0_blk = cpu_to_le32(pm->gpe0_blk); /* EVT, CNT, TMR length matches hw/acpi/core.c */ fadt->pm1_evt_len = 4; fadt->pm1_cnt_len = 2; fadt->pm_tmr_len = 4; fadt->gpe0_blk_len = pm->gpe0_blk_len; fadt->plvl2_lat = cpu_to_le16(0xfff); /* C2 state not supported */ fadt->plvl3_lat = cpu_to_le16(0xfff); /* C3 state not supported */ fadt->flags = cpu_to_le32((1 << ACPI_FADT_F_WBINVD) | (1 << ACPI_FADT_F_PROC_C1) | (1 << ACPI_FADT_F_SLP_BUTTON) | (1 << ACPI_FADT_F_RTC_S4)); fadt->flags |= cpu_to_le32(1 << ACPI_FADT_F_USE_PLATFORM_CLOCK); /* APIC destination mode (\"Flat Logical\") has an upper limit of 8 CPUs * For more than 8 CPUs, \"Clustered Logical\" mode has to be used */ if (max_cpus > 8) { fadt->flags |= cpu_to_le32(1 << ACPI_FADT_F_FORCE_APIC_CLUSTER_MODEL); } fadt->century = RTC_CENTURY; }"} {"target": 0, "idx": 12036, "func": "void acpi_pm1_cnt_write(ACPIREGS *ar, uint16_t val) { ar->pm1.cnt.cnt = val & ~(ACPI_BITMASK_SLEEP_ENABLE); if (val & ACPI_BITMASK_SLEEP_ENABLE) { /* change suspend type */ uint16_t sus_typ = (val >> 10) & 7; switch(sus_typ) { case 0: /* soft power off */ qemu_system_shutdown_request(); break; case 1: /* ACPI_BITMASK_WAKE_STATUS should be set on resume. Pretend that resume was caused by power button */ ar->pm1.evt.sts |= (ACPI_BITMASK_WAKE_STATUS | ACPI_BITMASK_POWER_BUTTON_STATUS); qemu_system_reset_request(); qemu_irq_raise(ar->pm1.cnt.cmos_s3); default: break; } } }"} {"target": 0, "idx": 12038, "func": "static void win32_rearm_timer(struct qemu_alarm_timer *t) { struct qemu_alarm_win32 *data = t->priv; uint64_t nearest_delta_us; if (!active_timers[QEMU_TIMER_REALTIME] && !active_timers[QEMU_TIMER_VIRTUAL]) return; nearest_delta_us = qemu_next_deadline_dyntick(); nearest_delta_us /= 1000; timeKillEvent(data->timerId); data->timerId = timeSetEvent(1, data->period, host_alarm_handler, (DWORD)t, TIME_ONESHOT | TIME_PERIODIC); if (!data->timerId) { fprintf(stderr, \"Failed to re-arm win32 alarm timer %ld\\n\", GetLastError()); timeEndPeriod(data->period); exit(1); } }"} {"target": 1, "idx": 12075, "func": "static void qapi_dealloc_end_struct(Visitor *v, Error **errp) { QapiDeallocVisitor *qov = to_qov(v); void **obj = qapi_dealloc_pop(qov); if (obj) { g_free(*obj); } }"} {"target": 1, "idx": 12079, "func": "static void file_completion(const char *input) { DIR *ffs; struct dirent *d; char path[1024]; char file[1024], file_prefix[1024]; int input_path_len; const char *p; p = strrchr(input, '/'); if (!p) { input_path_len = 0; pstrcpy(file_prefix, sizeof(file_prefix), input); pstrcpy(path, sizeof(path), \".\"); } else { input_path_len = p - input + 1; memcpy(path, input, input_path_len); if (input_path_len > sizeof(path) - 1) input_path_len = sizeof(path) - 1; path[input_path_len] = '\\0'; pstrcpy(file_prefix, sizeof(file_prefix), p + 1); } #ifdef DEBUG_COMPLETION monitor_printf(cur_mon, \"input='%s' path='%s' prefix='%s'\\n\", input, path, file_prefix); #endif ffs = opendir(path); if (!ffs) return; for(;;) { struct stat sb; d = readdir(ffs); if (!d) break; if (strcmp(d->d_name, \".\") == 0 || strcmp(d->d_name, \"..\") == 0) { continue; } if (strstart(d->d_name, file_prefix, NULL)) { memcpy(file, input, input_path_len); if (input_path_len < sizeof(file)) pstrcpy(file + input_path_len, sizeof(file) - input_path_len, d->d_name); /* stat the file to find out if it's a directory. * In that case add a slash to speed up typing long paths */ stat(file, &sb); if(S_ISDIR(sb.st_mode)) pstrcat(file, sizeof(file), \"/\"); readline_add_completion(cur_mon->rs, file); } } closedir(ffs); }"} {"target": 1, "idx": 12083, "func": "static void set_int8(Object *obj, Visitor *v, void *opaque, const char *name, Error **errp) { DeviceState *dev = DEVICE(obj); Property *prop = opaque; int8_t *ptr = qdev_get_prop_ptr(dev, prop); Error *local_err = NULL; int64_t value; if (dev->state != DEV_STATE_CREATED) { error_set(errp, QERR_PERMISSION_DENIED); return; } visit_type_int(v, &value, name, &local_err); if (local_err) { error_propagate(errp, local_err); return; } if (value > prop->info->min && value <= prop->info->max) { *ptr = value; } else { error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, dev->id?:\"\", name, value, prop->info->min, prop->info->max); } }"} {"target": 1, "idx": 12086, "func": "static void enter_migration_coroutine(void *opaque) { Coroutine *co = opaque; qemu_coroutine_enter(co, NULL); }"} {"target": 1, "idx": 12095, "func": "static int decode_channel_residues(WmallDecodeCtx *s, int ch, int tile_size) { int i = 0; unsigned int ave_mean; s->transient[ch] = get_bits1(&s->gb); if (s->transient[ch]) { s->transient_pos[ch] = get_bits(&s->gb, av_log2(tile_size)); if (s->transient_pos[ch]) s->transient[ch] = 0; s->channel[ch].transient_counter = FFMAX(s->channel[ch].transient_counter, s->samples_per_frame / 2); } else if (s->channel[ch].transient_counter) s->transient[ch] = 1; if (s->seekable_tile) { ave_mean = get_bits(&s->gb, s->bits_per_sample); s->ave_sum[ch] = ave_mean << (s->movave_scaling + 1); } if (s->seekable_tile) { if (s->do_inter_ch_decorr) s->channel_residues[ch][0] = get_sbits(&s->gb, s->bits_per_sample + 1); else s->channel_residues[ch][0] = get_sbits(&s->gb, s->bits_per_sample); i++; } for (; i < tile_size; i++) { int quo = 0, rem, rem_bits, residue; while(get_bits1(&s->gb)) { quo++; if (get_bits_left(&s->gb) <= 0) return -1; } if (quo >= 32) quo += get_bits_long(&s->gb, get_bits(&s->gb, 5) + 1); ave_mean = (s->ave_sum[ch] + (1 << s->movave_scaling)) >> (s->movave_scaling + 1); if (ave_mean <= 1) residue = quo; else { rem_bits = av_ceil_log2(ave_mean); rem = rem_bits ? get_bits(&s->gb, rem_bits) : 0; residue = (quo << rem_bits) + rem; } s->ave_sum[ch] = residue + s->ave_sum[ch] - (s->ave_sum[ch] >> s->movave_scaling); if (residue & 1) residue = -(residue >> 1) - 1; else residue = residue >> 1; s->channel_residues[ch][i] = residue; } return 0; }"} {"target": 0, "idx": 12099, "func": "static int cinaudio_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; CinAudioContext *cin = avctx->priv_data; const uint8_t *src = buf; int16_t *samples = data; int delta; buf_size = FFMIN(buf_size, *data_size/2); delta = cin->delta; if (cin->initial_decode_frame) { cin->initial_decode_frame = 0; delta = (int16_t)AV_RL16(src); src += 2; *samples++ = delta; buf_size -= 2; } while (buf_size > 0) { delta += cinaudio_delta16_table[*src++]; delta = av_clip_int16(delta); *samples++ = delta; --buf_size; } cin->delta = delta; *data_size = (uint8_t *)samples - (uint8_t *)data; return src - buf; }"} {"target": 0, "idx": 12106, "func": "static void rv34_pred_mv_rv3(RV34DecContext *r, int block_type, int dir) { MpegEncContext *s = &r->s; int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride; int A[2] = {0}, B[2], C[2]; int i, j, k; int mx, my; int avail_index = avail_indexes[0]; if(r->avail_cache[avail_index - 1]){ A[0] = s->current_picture_ptr->f.motion_val[0][mv_pos - 1][0]; A[1] = s->current_picture_ptr->f.motion_val[0][mv_pos - 1][1]; } if(r->avail_cache[avail_index - 4]){ B[0] = s->current_picture_ptr->f.motion_val[0][mv_pos - s->b8_stride][0]; B[1] = s->current_picture_ptr->f.motion_val[0][mv_pos - s->b8_stride][1]; }else{ B[0] = A[0]; B[1] = A[1]; } if(!r->avail_cache[avail_index - 4 + 2]){ if(r->avail_cache[avail_index - 4] && (r->avail_cache[avail_index - 1])){ C[0] = s->current_picture_ptr->f.motion_val[0][mv_pos - s->b8_stride - 1][0]; C[1] = s->current_picture_ptr->f.motion_val[0][mv_pos - s->b8_stride - 1][1]; }else{ C[0] = A[0]; C[1] = A[1]; } }else{ C[0] = s->current_picture_ptr->f.motion_val[0][mv_pos - s->b8_stride + 2][0]; C[1] = s->current_picture_ptr->f.motion_val[0][mv_pos - s->b8_stride + 2][1]; } mx = mid_pred(A[0], B[0], C[0]); my = mid_pred(A[1], B[1], C[1]); mx += r->dmv[0][0]; my += r->dmv[0][1]; for(j = 0; j < 2; j++){ for(i = 0; i < 2; i++){ for(k = 0; k < 2; k++){ s->current_picture_ptr->f.motion_val[k][mv_pos + i + j*s->b8_stride][0] = mx; s->current_picture_ptr->f.motion_val[k][mv_pos + i + j*s->b8_stride][1] = my; } } } }"} {"target": 0, "idx": 12111, "func": "int qemu_paio_init(struct qemu_paioinit *aioinit) { int ret; ret = pthread_attr_init(&attr); if (ret) die2(ret, \"pthread_attr_init\"); ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); if (ret) die2(ret, \"pthread_attr_setdetachstate\"); TAILQ_INIT(&request_list); return 0; }"} {"target": 0, "idx": 12130, "func": "dprint(int level, const char *fmt, ...) { va_list args; if (level <= debug) { va_start(args, fmt); vfprintf(stderr, fmt, args); va_end(args); } }"} {"target": 0, "idx": 12133, "func": "static void s390_pcihost_hot_plug(HotplugHandler *hotplug_dev, DeviceState *dev, Error **errp) { PCIDevice *pci_dev = PCI_DEVICE(dev); S390PCIBusDevice *pbdev; S390pciState *s = S390_PCI_HOST_BRIDGE(pci_device_root_bus(pci_dev) ->qbus.parent); pbdev = &s->pbdev[PCI_SLOT(pci_dev->devfn)]; pbdev->fid = s390_pci_get_pfid(pci_dev); pbdev->pdev = pci_dev; pbdev->configured = true; pbdev->fh = s390_pci_get_pfh(pci_dev); s390_pcihost_setup_msix(pbdev); if (dev->hotplugged) { s390_pci_generate_plug_event(HP_EVENT_RESERVED_TO_STANDBY, pbdev->fh, pbdev->fid); s390_pci_generate_plug_event(HP_EVENT_TO_CONFIGURED, pbdev->fh, pbdev->fid); } }"} {"target": 0, "idx": 12134, "func": "static uint64_t pxa2xx_mm_read(void *opaque, hwaddr addr, unsigned size) { PXA2xxState *s = (PXA2xxState *) opaque; switch (addr) { case MDCNFG ... SA1110: if ((addr & 3) == 0) return s->mm_regs[addr >> 2]; default: printf(\"%s: Bad register \" REG_FMT \"\\n\", __FUNCTION__, addr); break; } return 0; }"} {"target": 0, "idx": 12142, "func": "static int vhost_verify_ring_mappings(struct vhost_dev *dev, uint64_t start_addr, uint64_t size) { int i, j; int r = 0; const char *part_name[] = { \"descriptor table\", \"available ring\", \"used ring\" }; for (i = 0; i < dev->nvqs; ++i) { struct vhost_virtqueue *vq = dev->vqs + i; j = 0; r = vhost_verify_ring_part_mapping(vq->desc, vq->desc_phys, vq->desc_size, start_addr, size); if (!r) { break; } j++; r = vhost_verify_ring_part_mapping(vq->avail, vq->avail_phys, vq->avail_size, start_addr, size); if (!r) { break; } j++; r = vhost_verify_ring_part_mapping(vq->used, vq->used_phys, vq->used_size, start_addr, size); if (!r) { break; } } if (r == -ENOMEM) { error_report(\"Unable to map %s for ring %d\", part_name[j], i); } else if (r == -EBUSY) { error_report(\"%s relocated for ring %d\", part_name[j], i); } return r; }"} {"target": 0, "idx": 12145, "func": "void qemu_ram_remap(ram_addr_t addr, ram_addr_t length) { RAMBlock *block; ram_addr_t offset; int flags; void *area, *vaddr; QTAILQ_FOREACH(block, &ram_list.blocks, next) { offset = addr - block->offset; if (offset < block->length) { vaddr = block->host + offset; if (block->flags & RAM_PREALLOC_MASK) { ; } else if (xen_enabled()) { abort(); } else { flags = MAP_FIXED; munmap(vaddr, length); if (mem_path) { #if defined(__linux__) && !defined(TARGET_S390X) if (block->fd) { #ifdef MAP_POPULATE flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE; #else flags |= MAP_PRIVATE; #endif area = mmap(vaddr, length, PROT_READ | PROT_WRITE, flags, block->fd, offset); } else { flags |= MAP_PRIVATE | MAP_ANONYMOUS; area = mmap(vaddr, length, PROT_READ | PROT_WRITE, flags, -1, 0); } #else abort(); #endif } else { #if defined(TARGET_S390X) && defined(CONFIG_KVM) flags |= MAP_SHARED | MAP_ANONYMOUS; area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE, flags, -1, 0); #else flags |= MAP_PRIVATE | MAP_ANONYMOUS; area = mmap(vaddr, length, PROT_READ | PROT_WRITE, flags, -1, 0); #endif } if (area != vaddr) { fprintf(stderr, \"Could not remap addr: \" RAM_ADDR_FMT \"@\" RAM_ADDR_FMT \"\\n\", length, addr); exit(1); } memory_try_enable_merging(vaddr, length); qemu_ram_setup_dump(vaddr, length); } return; } } }"} {"target": 1, "idx": 12182, "func": "const char *object_get_typename(Object *obj) { return obj->class->type->name; }"} {"target": 0, "idx": 12190, "func": "static int vaapi_build_decoder_config(VAAPIDecoderContext *ctx, AVCodecContext *avctx, int fallback_allowed) { AVVAAPIDeviceContext *hwctx = ctx->device->hwctx; AVVAAPIHWConfig *hwconfig = NULL; AVHWFramesConstraints *constraints = NULL; VAStatus vas; int err, i, j; int loglevel = fallback_allowed ? AV_LOG_VERBOSE : AV_LOG_ERROR; const AVCodecDescriptor *codec_desc; const AVPixFmtDescriptor *pix_desc; enum AVPixelFormat pix_fmt; VAProfile profile, *profile_list = NULL; int profile_count, exact_match, alt_profile; codec_desc = avcodec_descriptor_get(avctx->codec_id); if (!codec_desc) { err = AVERROR(EINVAL); goto fail; } profile_count = vaMaxNumProfiles(hwctx->display); profile_list = av_malloc(profile_count * sizeof(VAProfile)); if (!profile_list) { err = AVERROR(ENOMEM); goto fail; } vas = vaQueryConfigProfiles(hwctx->display, profile_list, &profile_count); if (vas != VA_STATUS_SUCCESS) { av_log(ctx, loglevel, \"Failed to query profiles: %d (%s).\\n\", vas, vaErrorStr(vas)); err = AVERROR(EIO); goto fail; } profile = VAProfileNone; exact_match = 0; for (i = 0; i < FF_ARRAY_ELEMS(vaapi_profile_map); i++) { int profile_match = 0; if (avctx->codec_id != vaapi_profile_map[i].codec_id) continue; if (avctx->profile == vaapi_profile_map[i].codec_profile) profile_match = 1; profile = vaapi_profile_map[i].va_profile; for (j = 0; j < profile_count; j++) { if (profile == profile_list[j]) { exact_match = profile_match; break; } } if (j < profile_count) { if (exact_match) break; alt_profile = vaapi_profile_map[i].codec_profile; } } av_freep(&profile_list); if (profile == VAProfileNone) { av_log(ctx, loglevel, \"No VAAPI support for codec %s.\\n\", codec_desc->name); err = AVERROR(ENOSYS); goto fail; } if (!exact_match) { if (fallback_allowed || !hwaccel_lax_profile_check) { av_log(ctx, loglevel, \"No VAAPI support for codec %s \" \"profile %d.\\n\", codec_desc->name, avctx->profile); if (!fallback_allowed) { av_log(ctx, AV_LOG_WARNING, \"If you want attempt decoding \" \"anyway with a possibly-incompatible profile, add \" \"the option -hwaccel_lax_profile_check.\\n\"); } err = AVERROR(EINVAL); goto fail; } else { av_log(ctx, AV_LOG_WARNING, \"No VAAPI support for codec %s \" \"profile %d: trying instead with profile %d.\\n\", codec_desc->name, avctx->profile, alt_profile); av_log(ctx, AV_LOG_WARNING, \"This may fail or give \" \"incorrect results, depending on your hardware.\\n\"); } } ctx->va_profile = profile; ctx->va_entrypoint = VAEntrypointVLD; vas = vaCreateConfig(hwctx->display, ctx->va_profile, ctx->va_entrypoint, 0, 0, &ctx->va_config); if (vas != VA_STATUS_SUCCESS) { av_log(ctx, AV_LOG_ERROR, \"Failed to create decode pipeline \" \"configuration: %d (%s).\\n\", vas, vaErrorStr(vas)); err = AVERROR(EIO); goto fail; } hwconfig = av_hwdevice_hwconfig_alloc(ctx->device_ref); if (!hwconfig) { err = AVERROR(ENOMEM); goto fail; } hwconfig->config_id = ctx->va_config; constraints = av_hwdevice_get_hwframe_constraints(ctx->device_ref, hwconfig); if (!constraints) goto fail; // Decide on the decoder target format. // If the user specified something with -hwaccel_output_format then // try to use that to minimise conversions later. ctx->decode_format = AV_PIX_FMT_NONE; if (ctx->output_format != AV_PIX_FMT_NONE && ctx->output_format != AV_PIX_FMT_VAAPI) { for (i = 0; constraints->valid_sw_formats[i] != AV_PIX_FMT_NONE; i++) { if (constraints->valid_sw_formats[i] == ctx->decode_format) { ctx->decode_format = ctx->output_format; av_log(ctx, AV_LOG_DEBUG, \"Using decode format %s (output \" \"format).\\n\", av_get_pix_fmt_name(ctx->decode_format)); break; } } } // Otherwise, we would like to try to choose something which matches the // decoder output, but there isn't enough information available here to // do so. Assume for now that we are always dealing with YUV 4:2:0, so // pick a format which does that. if (ctx->decode_format == AV_PIX_FMT_NONE) { for (i = 0; constraints->valid_sw_formats[i] != AV_PIX_FMT_NONE; i++) { pix_fmt = constraints->valid_sw_formats[i]; pix_desc = av_pix_fmt_desc_get(pix_fmt); if (pix_desc->nb_components == 3 && pix_desc->log2_chroma_w == 1 && pix_desc->log2_chroma_h == 1) { ctx->decode_format = pix_fmt; av_log(ctx, AV_LOG_DEBUG, \"Using decode format %s (format \" \"matched).\\n\", av_get_pix_fmt_name(ctx->decode_format)); break; } } } // Otherwise pick the first in the list and hope for the best. if (ctx->decode_format == AV_PIX_FMT_NONE) { ctx->decode_format = constraints->valid_sw_formats[0]; av_log(ctx, AV_LOG_DEBUG, \"Using decode format %s (first in list).\\n\", av_get_pix_fmt_name(ctx->decode_format)); if (i > 1) { // There was a choice, and we picked randomly. Warn the user // that they might want to choose intelligently instead. av_log(ctx, AV_LOG_WARNING, \"Using randomly chosen decode \" \"format %s.\\n\", av_get_pix_fmt_name(ctx->decode_format)); } } // Ensure the picture size is supported by the hardware. ctx->decode_width = avctx->coded_width; ctx->decode_height = avctx->coded_height; if (ctx->decode_width < constraints->min_width || ctx->decode_height < constraints->min_height || ctx->decode_width > constraints->max_width || ctx->decode_height >constraints->max_height) { av_log(ctx, AV_LOG_ERROR, \"VAAPI hardware does not support image \" \"size %dx%d (constraints: width %d-%d height %d-%d).\\n\", ctx->decode_width, ctx->decode_height, constraints->min_width, constraints->max_width, constraints->min_height, constraints->max_height); err = AVERROR(EINVAL); goto fail; } av_hwframe_constraints_free(&constraints); av_freep(&hwconfig); // Decide how many reference frames we need. This might be doable more // nicely based on the codec and input stream? ctx->decode_surfaces = DEFAULT_SURFACES; // For frame-threaded decoding, one additional surfaces is needed for // each thread. if (avctx->active_thread_type & FF_THREAD_FRAME) ctx->decode_surfaces += avctx->thread_count; return 0; fail: av_hwframe_constraints_free(&constraints); av_freep(&hwconfig); vaDestroyConfig(hwctx->display, ctx->va_config); av_freep(&profile_list); return err; }"} {"target": 0, "idx": 12192, "func": "static int parse(AVCodecParserContext *ctx, AVCodecContext *avctx, const uint8_t **out_data, int *out_size, const uint8_t *data, int size) { VP9ParseContext *s = ctx->priv_data; int marker; if (size <= 0) { *out_size = 0; *out_data = data; return 0; } if (s->n_frames > 0) { *out_data = data; *out_size = s->size[--s->n_frames]; parse_frame(ctx, *out_data, *out_size); return s->n_frames > 0 ? *out_size : size /* i.e. include idx tail */; } marker = data[size - 1]; if ((marker & 0xe0) == 0xc0) { int nbytes = 1 + ((marker >> 3) & 0x3); int n_frames = 1 + (marker & 0x7), idx_sz = 2 + n_frames * nbytes; if (size >= idx_sz && data[size - idx_sz] == marker) { const uint8_t *idx = data + size + 1 - idx_sz; int first = 1; switch (nbytes) { #define case_n(a, rd) \\ case a: \\ while (n_frames--) { \\ int sz = rd; \\ idx += a; \\ if (sz > size) { \\ s->n_frames = 0; \\ av_log(avctx, AV_LOG_ERROR, \\ \"Superframe packet size too big: %d > %d\\n\", \\ sz, size); \\ return AVERROR_INVALIDDATA; \\ } \\ if (first) { \\ first = 0; \\ *out_data = data; \\ *out_size = sz; \\ s->n_frames = n_frames; \\ } else { \\ s->size[n_frames] = sz; \\ } \\ data += sz; \\ size -= sz; \\ } \\ parse_frame(ctx, *out_data, *out_size); \\ return *out_size case_n(1, *idx); case_n(2, AV_RL16(idx)); case_n(3, AV_RL24(idx)); case_n(4, AV_RL32(idx)); } } } *out_data = data; *out_size = size; parse_frame(ctx, data, size); return size; }"} {"target": 1, "idx": 12200, "func": "static int ram_init1(SysBusDevice *dev) { RamDevice *d = SUN4U_RAM(dev); memory_region_init_ram(&d->ram, OBJECT(d), \"sun4u.ram\", d->size, &error_abort); vmstate_register_ram_global(&d->ram); sysbus_init_mmio(dev, &d->ram); return 0; }"} {"target": 1, "idx": 12205, "func": "static void stop_tco(const TestData *d) { uint32_t val; val = qpci_io_readw(d->dev, d->tco_io_base + TCO1_CNT); val |= TCO_TMR_HLT; qpci_io_writew(d->dev, d->tco_io_base + TCO1_CNT, val); }"} {"target": 1, "idx": 12215, "func": "void qemu_put_be32(QEMUFile *f, unsigned int v) { qemu_put_byte(f, v >> 24); qemu_put_byte(f, v >> 16); qemu_put_byte(f, v >> 8); qemu_put_byte(f, v); }"} {"target": 1, "idx": 12216, "func": "static const unsigned char *seq_unpack_rle_block(const unsigned char *src, unsigned char *dst, int dst_size) { int i, len, sz; GetBitContext gb; int code_table[64]; /* get the rle codes (at most 64 bytes) */ init_get_bits(&gb, src, 64 * 8); for (i = 0, sz = 0; i < 64 && sz < dst_size; i++) { code_table[i] = get_sbits(&gb, 4); sz += FFABS(code_table[i]); } src += (get_bits_count(&gb) + 7) / 8; /* do the rle unpacking */ for (i = 0; i < 64 && dst_size > 0; i++) { len = code_table[i]; if (len < 0) { len = -len; memset(dst, *src++, FFMIN(len, dst_size)); } else { memcpy(dst, src, FFMIN(len, dst_size)); src += len; } dst += len; dst_size -= len; } return src; }"} {"target": 1, "idx": 12243, "func": "bool bdrv_is_first_non_filter(BlockDriverState *candidate) { BlockDriverState *bs; BdrvNextIterator *it = NULL; /* walk down the bs forest recursively */ while ((it = bdrv_next(it, &bs)) != NULL) { bool perm; /* try to recurse in this top level bs */ perm = bdrv_recurse_is_first_non_filter(bs, candidate); /* candidate is the first non filter */ if (perm) { return true; } } return false; }"} {"target": 1, "idx": 12245, "func": "static int vhost_user_set_mem_table(struct vhost_dev *dev, struct vhost_memory *mem) { int fds[VHOST_MEMORY_MAX_NREGIONS]; int i, fd; size_t fd_num = 0; bool reply_supported = virtio_has_feature(dev->protocol_features, VHOST_USER_PROTOCOL_F_REPLY_ACK); VhostUserMsg msg = { .hdr.request = VHOST_USER_SET_MEM_TABLE, .hdr.flags = VHOST_USER_VERSION, }; if (reply_supported) { msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK; } for (i = 0; i < dev->mem->nregions; ++i) { struct vhost_memory_region *reg = dev->mem->regions + i; ram_addr_t offset; MemoryRegion *mr; assert((uintptr_t)reg->userspace_addr == reg->userspace_addr); mr = memory_region_from_host((void *)(uintptr_t)reg->userspace_addr, &offset); fd = memory_region_get_fd(mr); if (fd > 0) { msg.payload.memory.regions[fd_num].userspace_addr = reg->userspace_addr; msg.payload.memory.regions[fd_num].memory_size = reg->memory_size; msg.payload.memory.regions[fd_num].guest_phys_addr = reg->guest_phys_addr; msg.payload.memory.regions[fd_num].mmap_offset = offset; assert(fd_num < VHOST_MEMORY_MAX_NREGIONS); fds[fd_num++] = fd; } } msg.payload.memory.nregions = fd_num; if (!fd_num) { error_report(\"Failed initializing vhost-user memory map, \" \"consider using -object memory-backend-file share=on\"); return -1; } msg.hdr.size = sizeof(msg.payload.memory.nregions); msg.hdr.size += sizeof(msg.payload.memory.padding); msg.hdr.size += fd_num * sizeof(VhostUserMemoryRegion); if (vhost_user_write(dev, &msg, fds, fd_num) < 0) { return -1; } if (reply_supported) { return process_message_reply(dev, &msg); } return 0; }"} {"target": 1, "idx": 12255, "func": "int av_parser_parse2(AVCodecParserContext *s, AVCodecContext *avctx, uint8_t **poutbuf, int *poutbuf_size, const uint8_t *buf, int buf_size, int64_t pts, int64_t dts, int64_t pos) { int index, i; uint8_t dummy_buf[AV_INPUT_BUFFER_PADDING_SIZE]; if (!(s->flags & PARSER_FLAG_FETCHED_OFFSET)) { s->next_frame_offset = s->cur_offset = pos; s->flags |= PARSER_FLAG_FETCHED_OFFSET; } if (buf_size == 0) { /* padding is always necessary even if EOF, so we add it here */ memset(dummy_buf, 0, sizeof(dummy_buf)); buf = dummy_buf; } else if (s->cur_offset + buf_size != s->cur_frame_end[s->cur_frame_start_index]) { /* skip remainder packets */ /* add a new packet descriptor */ i = (s->cur_frame_start_index + 1) & (AV_PARSER_PTS_NB - 1); s->cur_frame_start_index = i; s->cur_frame_offset[i] = s->cur_offset; s->cur_frame_end[i] = s->cur_offset + buf_size; s->cur_frame_pts[i] = pts; s->cur_frame_dts[i] = dts; s->cur_frame_pos[i] = pos; } if (s->fetch_timestamp) { s->fetch_timestamp = 0; s->last_pts = s->pts; s->last_dts = s->dts; s->last_pos = s->pos; ff_fetch_timestamp(s, 0, 0, 0); } /* WARNING: the returned index can be negative */ index = s->parser->parser_parse(s, avctx, (const uint8_t **) poutbuf, poutbuf_size, buf, buf_size); av_assert0(index > -0x20000000); // The API does not allow returning AVERROR codes /* update the file pointer */ if (*poutbuf_size) { /* fill the data for the current frame */ s->frame_offset = s->next_frame_offset; /* offset of the next frame */ s->next_frame_offset = s->cur_offset + index; s->fetch_timestamp = 1; } if (index < 0) index = 0; s->cur_offset += index; return index; }"} {"target": 1, "idx": 12257, "func": "static int nbd_co_readv_1(NbdClientSession *client, int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, int offset) { struct nbd_request request; struct nbd_reply reply; ssize_t ret; request.type = NBD_CMD_READ; request.from = sector_num * 512; request.len = nb_sectors * 512; nbd_coroutine_start(client, &request); ret = nbd_co_send_request(client, &request, NULL, 0); if (ret < 0) { reply.error = -ret; } else { nbd_co_receive_reply(client, &request, &reply, qiov, offset); } nbd_coroutine_end(client, &request); return -reply.error; }"} {"target": 1, "idx": 12273, "func": "static AVIOContext * wtvfile_open2(AVFormatContext *s, const uint8_t *buf, int buf_size, const uint8_t *filename, int filename_size) { const uint8_t *buf_end = buf + buf_size; while(buf + 48 <= buf_end) { int dir_length, name_size, first_sector, depth; uint64_t file_length; const uint8_t *name; if (ff_guidcmp(buf, dir_entry_guid)) { av_log(s, AV_LOG_ERROR, \"unknown guid \"FF_PRI_GUID\", expected dir_entry_guid; \" \"remaining directory entries ignored\\n\", FF_ARG_GUID(buf)); break; } dir_length = AV_RL16(buf + 16); file_length = AV_RL64(buf + 24); name_size = 2 * AV_RL32(buf + 32); if (buf + 48 + name_size > buf_end) { av_log(s, AV_LOG_ERROR, \"filename exceeds buffer size; remaining directory entries ignored\\n\"); break; } first_sector = AV_RL32(buf + 40 + name_size); depth = AV_RL32(buf + 44 + name_size); /* compare file name; test optional null terminator */ name = buf + 40; if (name_size >= filename_size && !memcmp(name, filename, filename_size) && (name_size < filename_size + 2 || !AV_RN16(name + filename_size))) return wtvfile_open_sector(first_sector, file_length, depth, s); buf += dir_length; } return 0; }"} {"target": 1, "idx": 12278, "func": "static void stereo_processing(PSContext *ps, INTFLOAT (*l)[32][2], INTFLOAT (*r)[32][2], int is34) { int e, b, k; INTFLOAT (*H11)[PS_MAX_NUM_ENV+1][PS_MAX_NR_IIDICC] = ps->H11; INTFLOAT (*H12)[PS_MAX_NUM_ENV+1][PS_MAX_NR_IIDICC] = ps->H12; INTFLOAT (*H21)[PS_MAX_NUM_ENV+1][PS_MAX_NR_IIDICC] = ps->H21; INTFLOAT (*H22)[PS_MAX_NUM_ENV+1][PS_MAX_NR_IIDICC] = ps->H22; int8_t *opd_hist = ps->opd_hist; int8_t *ipd_hist = ps->ipd_hist; int8_t iid_mapped_buf[PS_MAX_NUM_ENV][PS_MAX_NR_IIDICC]; int8_t icc_mapped_buf[PS_MAX_NUM_ENV][PS_MAX_NR_IIDICC]; int8_t ipd_mapped_buf[PS_MAX_NUM_ENV][PS_MAX_NR_IIDICC]; int8_t opd_mapped_buf[PS_MAX_NUM_ENV][PS_MAX_NR_IIDICC]; int8_t (*iid_mapped)[PS_MAX_NR_IIDICC] = iid_mapped_buf; int8_t (*icc_mapped)[PS_MAX_NR_IIDICC] = icc_mapped_buf; int8_t (*ipd_mapped)[PS_MAX_NR_IIDICC] = ipd_mapped_buf; int8_t (*opd_mapped)[PS_MAX_NR_IIDICC] = opd_mapped_buf; const int8_t *k_to_i = is34 ? k_to_i_34 : k_to_i_20; TABLE_CONST INTFLOAT (*H_LUT)[8][4] = (PS_BASELINE || ps->icc_mode < 3) ? HA : HB; //Remapping if (ps->num_env_old) { memcpy(H11[0][0], H11[0][ps->num_env_old], PS_MAX_NR_IIDICC*sizeof(H11[0][0][0])); memcpy(H11[1][0], H11[1][ps->num_env_old], PS_MAX_NR_IIDICC*sizeof(H11[1][0][0])); memcpy(H12[0][0], H12[0][ps->num_env_old], PS_MAX_NR_IIDICC*sizeof(H12[0][0][0])); memcpy(H12[1][0], H12[1][ps->num_env_old], PS_MAX_NR_IIDICC*sizeof(H12[1][0][0])); memcpy(H21[0][0], H21[0][ps->num_env_old], PS_MAX_NR_IIDICC*sizeof(H21[0][0][0])); memcpy(H21[1][0], H21[1][ps->num_env_old], PS_MAX_NR_IIDICC*sizeof(H21[1][0][0])); memcpy(H22[0][0], H22[0][ps->num_env_old], PS_MAX_NR_IIDICC*sizeof(H22[0][0][0])); memcpy(H22[1][0], H22[1][ps->num_env_old], PS_MAX_NR_IIDICC*sizeof(H22[1][0][0])); } if (is34) { remap34(&iid_mapped, ps->iid_par, ps->nr_iid_par, ps->num_env, 1); remap34(&icc_mapped, ps->icc_par, ps->nr_icc_par, ps->num_env, 1); if (ps->enable_ipdopd) { remap34(&ipd_mapped, ps->ipd_par, ps->nr_ipdopd_par, ps->num_env, 0); remap34(&opd_mapped, ps->opd_par, ps->nr_ipdopd_par, ps->num_env, 0); } if (!ps->is34bands_old) { map_val_20_to_34(H11[0][0]); map_val_20_to_34(H11[1][0]); map_val_20_to_34(H12[0][0]); map_val_20_to_34(H12[1][0]); map_val_20_to_34(H21[0][0]); map_val_20_to_34(H21[1][0]); map_val_20_to_34(H22[0][0]); map_val_20_to_34(H22[1][0]); ipdopd_reset(ipd_hist, opd_hist); } } else { remap20(&iid_mapped, ps->iid_par, ps->nr_iid_par, ps->num_env, 1); remap20(&icc_mapped, ps->icc_par, ps->nr_icc_par, ps->num_env, 1); if (ps->enable_ipdopd) { remap20(&ipd_mapped, ps->ipd_par, ps->nr_ipdopd_par, ps->num_env, 0); remap20(&opd_mapped, ps->opd_par, ps->nr_ipdopd_par, ps->num_env, 0); } if (ps->is34bands_old) { map_val_34_to_20(H11[0][0]); map_val_34_to_20(H11[1][0]); map_val_34_to_20(H12[0][0]); map_val_34_to_20(H12[1][0]); map_val_34_to_20(H21[0][0]); map_val_34_to_20(H21[1][0]); map_val_34_to_20(H22[0][0]); map_val_34_to_20(H22[1][0]); ipdopd_reset(ipd_hist, opd_hist); } } //Mixing for (e = 0; e < ps->num_env; e++) { for (b = 0; b < NR_PAR_BANDS[is34]; b++) { INTFLOAT h11, h12, h21, h22; h11 = H_LUT[iid_mapped[e][b] + 7 + 23 * ps->iid_quant][icc_mapped[e][b]][0]; h12 = H_LUT[iid_mapped[e][b] + 7 + 23 * ps->iid_quant][icc_mapped[e][b]][1]; h21 = H_LUT[iid_mapped[e][b] + 7 + 23 * ps->iid_quant][icc_mapped[e][b]][2]; h22 = H_LUT[iid_mapped[e][b] + 7 + 23 * ps->iid_quant][icc_mapped[e][b]][3]; if (!PS_BASELINE && ps->enable_ipdopd && b < NR_IPDOPD_BANDS[is34]) { //The spec say says to only run this smoother when enable_ipdopd //is set but the reference decoder appears to run it constantly INTFLOAT h11i, h12i, h21i, h22i; INTFLOAT ipd_adj_re, ipd_adj_im; int opd_idx = opd_hist[b] * 8 + opd_mapped[e][b]; int ipd_idx = ipd_hist[b] * 8 + ipd_mapped[e][b]; INTFLOAT opd_re = pd_re_smooth[opd_idx]; INTFLOAT opd_im = pd_im_smooth[opd_idx]; INTFLOAT ipd_re = pd_re_smooth[ipd_idx]; INTFLOAT ipd_im = pd_im_smooth[ipd_idx]; opd_hist[b] = opd_idx & 0x3F; ipd_hist[b] = ipd_idx & 0x3F; ipd_adj_re = AAC_MADD30(opd_re, ipd_re, opd_im, ipd_im); ipd_adj_im = AAC_MSUB30(opd_im, ipd_re, opd_re, ipd_im); h11i = AAC_MUL30(h11, opd_im); h11 = AAC_MUL30(h11, opd_re); h12i = AAC_MUL30(h12, ipd_adj_im); h12 = AAC_MUL30(h12, ipd_adj_re); h21i = AAC_MUL30(h21, opd_im); h21 = AAC_MUL30(h21, opd_re); h22i = AAC_MUL30(h22, ipd_adj_im); h22 = AAC_MUL30(h22, ipd_adj_re); H11[1][e+1][b] = h11i; H12[1][e+1][b] = h12i; H21[1][e+1][b] = h21i; H22[1][e+1][b] = h22i; } H11[0][e+1][b] = h11; H12[0][e+1][b] = h12; H21[0][e+1][b] = h21; H22[0][e+1][b] = h22; } for (k = 0; k < NR_BANDS[is34]; k++) { LOCAL_ALIGNED_16(INTFLOAT, h, [2], [4]); LOCAL_ALIGNED_16(INTFLOAT, h_step, [2], [4]); int start = ps->border_position[e]; int stop = ps->border_position[e+1]; INTFLOAT width = Q30(1.f) / ((stop - start) ? (stop - start) : 1); #if USE_FIXED width <<= 1; #endif b = k_to_i[k]; h[0][0] = H11[0][e][b]; h[0][1] = H12[0][e][b]; h[0][2] = H21[0][e][b]; h[0][3] = H22[0][e][b]; if (!PS_BASELINE && ps->enable_ipdopd) { //Is this necessary? ps_04_new seems unchanged if ((is34 && k <= 13 && k >= 9) || (!is34 && k <= 1)) { h[1][0] = -H11[1][e][b]; h[1][1] = -H12[1][e][b]; h[1][2] = -H21[1][e][b]; h[1][3] = -H22[1][e][b]; } else { h[1][0] = H11[1][e][b]; h[1][1] = H12[1][e][b]; h[1][2] = H21[1][e][b]; h[1][3] = H22[1][e][b]; } } //Interpolation h_step[0][0] = AAC_MSUB31_V3(H11[0][e+1][b], h[0][0], width); h_step[0][1] = AAC_MSUB31_V3(H12[0][e+1][b], h[0][1], width); h_step[0][2] = AAC_MSUB31_V3(H21[0][e+1][b], h[0][2], width); h_step[0][3] = AAC_MSUB31_V3(H22[0][e+1][b], h[0][3], width); if (!PS_BASELINE && ps->enable_ipdopd) { h_step[1][0] = AAC_MSUB31_V3(H11[1][e+1][b], h[1][0], width); h_step[1][1] = AAC_MSUB31_V3(H12[1][e+1][b], h[1][1], width); h_step[1][2] = AAC_MSUB31_V3(H21[1][e+1][b], h[1][2], width); h_step[1][3] = AAC_MSUB31_V3(H22[1][e+1][b], h[1][3], width); } ps->dsp.stereo_interpolate[!PS_BASELINE && ps->enable_ipdopd]( l[k] + start + 1, r[k] + start + 1, h, h_step, stop - start); } } }"} {"target": 1, "idx": 12281, "func": "static void handle_qmp_command(JSONMessageParser *parser, QList *tokens) { int err; QObject *obj; QDict *input, *args; const mon_cmd_t *cmd; Monitor *mon = cur_mon; const char *cmd_name, *info_item; args = NULL; obj = json_parser_parse(tokens, NULL); if (!obj) { // FIXME: should be triggered in json_parser_parse() qerror_report(QERR_JSON_PARSING); goto err_out; qerror_report(QERR_QMP_BAD_INPUT_OBJECT, \"object\"); qobject_decref(obj); goto err_out; } input = qobject_to_qdict(obj); mon->mc->id = qdict_get(input, \"id\"); qobject_incref(mon->mc->id); obj = qdict_get(input, \"execute\"); if (!obj) { qerror_report(QERR_QMP_BAD_INPUT_OBJECT, \"execute\"); } else if (qobject_type(obj) != QTYPE_QSTRING) { qerror_report(QERR_QMP_BAD_INPUT_OBJECT_MEMBER, \"execute\", \"string\"); } cmd_name = qstring_get_str(qobject_to_qstring(obj)); if (invalid_qmp_mode(mon, cmd_name)) { qerror_report(QERR_COMMAND_NOT_FOUND, cmd_name); } /* * XXX: We need this special case until we get info handlers * converted into 'query-' commands */ if (compare_cmd(cmd_name, \"info\")) { qerror_report(QERR_COMMAND_NOT_FOUND, cmd_name); } else if (strstart(cmd_name, \"query-\", &info_item)) { cmd = monitor_find_command(\"info\"); qdict_put_obj(input, \"arguments\", qobject_from_jsonf(\"{ 'item': %s }\", info_item)); } else { cmd = monitor_find_command(cmd_name); if (!cmd || !monitor_handler_ported(cmd)) { qerror_report(QERR_COMMAND_NOT_FOUND, cmd_name); } } obj = qdict_get(input, \"arguments\"); if (!obj) { args = qdict_new(); } else { args = qobject_to_qdict(obj); QINCREF(args); } QDECREF(input); err = monitor_check_qmp_args(cmd, args); if (err < 0) { goto err_out; } if (monitor_handler_is_async(cmd)) { qmp_async_cmd_handler(mon, cmd, args); } else { monitor_call_handler(mon, cmd, args); } goto out; err_input: QDECREF(input); err_out: monitor_protocol_emitter(mon, NULL); out: QDECREF(args); }"} {"target": 0, "idx": 12305, "func": "static int idcin_decode_init(AVCodecContext *avctx) { IdcinContext *s = avctx->priv_data; int i, j, histogram_index = 0; unsigned char *histograms; s->avctx = avctx; avctx->pix_fmt = PIX_FMT_PAL8; dsputil_init(&s->dsp, avctx); /* make sure the Huffman tables make it */ if (s->avctx->extradata_size != HUFFMAN_TABLE_SIZE) { av_log(s->avctx, AV_LOG_ERROR, \" Id CIN video: expected extradata size of %d\\n\", HUFFMAN_TABLE_SIZE); return -1; } /* build the 256 Huffman decode trees */ histograms = (unsigned char *)s->avctx->extradata; for (i = 0; i < 256; i++) { for(j = 0; j < HUF_TOKENS; j++) s->huff_nodes[i][j].count = histograms[histogram_index++]; huff_build_tree(s, i); } s->frame.data[0] = NULL; return 0; }"} {"target": 0, "idx": 12307, "func": "static void find_best_solid_area(VncState *vs, int x, int y, int w, int h, uint32_t color, int *w_ptr, int *h_ptr) { int dx, dy, dw, dh; int w_prev; int w_best = 0, h_best = 0; w_prev = w; for (dy = y; dy < y + h; dy += VNC_TIGHT_MAX_SPLIT_TILE_SIZE) { dh = MIN(VNC_TIGHT_MAX_SPLIT_TILE_SIZE, y + h - dy); dw = MIN(VNC_TIGHT_MAX_SPLIT_TILE_SIZE, w_prev); if (!check_solid_tile(vs, x, dy, dw, dh, &color, true)) { break; } for (dx = x + dw; dx < x + w_prev;) { dw = MIN(VNC_TIGHT_MAX_SPLIT_TILE_SIZE, x + w_prev - dx); if (!check_solid_tile(vs, dx, dy, dw, dh, &color, true)) { break; } dx += dw; } w_prev = dx - x; if (w_prev * (dy + dh - y) > w_best * h_best) { w_best = w_prev; h_best = dy + dh - y; } } *w_ptr = w_best; *h_ptr = h_best; }"} {"target": 0, "idx": 12314, "func": "static RAMBlock *qemu_get_ram_block(ram_addr_t addr) { RAMBlock *block; /* The list is protected by the iothread lock here. */ block = ram_list.mru_block; if (block && addr - block->offset < block->max_length) { goto found; } QTAILQ_FOREACH(block, &ram_list.blocks, next) { if (addr - block->offset < block->max_length) { goto found; } } fprintf(stderr, \"Bad ram offset %\" PRIx64 \"\\n\", (uint64_t)addr); abort(); found: ram_list.mru_block = block; return block; }"} {"target": 0, "idx": 12320, "func": "static int usbnet_can_receive(VLANClientState *nc) { USBNetState *s = DO_UPCAST(NICState, nc, nc)->opaque; if (s->rndis && !s->rndis_state == RNDIS_DATA_INITIALIZED) return 1; return !s->in_len; }"} {"target": 0, "idx": 12326, "func": "static int scsi_disk_emulate_mode_sense(SCSIRequest *req, uint8_t *outbuf) { SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); uint64_t nb_sectors; int page, dbd, buflen, page_control; uint8_t *p; uint8_t dev_specific_param; dbd = req->cmd.buf[1] & 0x8; page = req->cmd.buf[2] & 0x3f; page_control = (req->cmd.buf[2] & 0xc0) >> 6; DPRINTF(\"Mode Sense(%d) (page %d, len %d, page_control %d)\\n\", (req->cmd.buf[0] == MODE_SENSE) ? 6 : 10, page, len, page_control); memset(outbuf, 0, req->cmd.xfer); p = outbuf; if (bdrv_is_read_only(s->bs)) { dev_specific_param = 0x80; /* Readonly. */ } else { dev_specific_param = 0x00; } if (req->cmd.buf[0] == MODE_SENSE) { p[1] = 0; /* Default media type. */ p[2] = dev_specific_param; p[3] = 0; /* Block descriptor length. */ p += 4; } else { /* MODE_SENSE_10 */ p[2] = 0; /* Default media type. */ p[3] = dev_specific_param; p[6] = p[7] = 0; /* Block descriptor length. */ p += 8; } bdrv_get_geometry(s->bs, &nb_sectors); if ((~dbd) & nb_sectors) { if (req->cmd.buf[0] == MODE_SENSE) { outbuf[3] = 8; /* Block descriptor length */ } else { /* MODE_SENSE_10 */ outbuf[7] = 8; /* Block descriptor length */ } nb_sectors /= s->cluster_size; if (nb_sectors > 0xffffff) nb_sectors = 0; p[0] = 0; /* media density code */ p[1] = (nb_sectors >> 16) & 0xff; p[2] = (nb_sectors >> 8) & 0xff; p[3] = nb_sectors & 0xff; p[4] = 0; /* reserved */ p[5] = 0; /* bytes 5-7 are the sector size in bytes */ p[6] = s->cluster_size * 2; p[7] = 0; p += 8; } if (page_control == 3) { /* Saved Values */ return -1; /* ILLEGAL_REQUEST */ } switch (page) { case 0x04: case 0x05: case 0x08: case 0x2a: p += mode_sense_page(req, page, p, page_control); break; case 0x3f: p += mode_sense_page(req, 0x08, p, page_control); p += mode_sense_page(req, 0x2a, p, page_control); break; default: return -1; /* ILLEGAL_REQUEST */ } buflen = p - outbuf; /* * The mode data length field specifies the length in bytes of the * following data that is available to be transferred. The mode data * length does not include itself. */ if (req->cmd.buf[0] == MODE_SENSE) { outbuf[0] = buflen - 1; } else { /* MODE_SENSE_10 */ outbuf[0] = ((buflen - 2) >> 8) & 0xff; outbuf[1] = (buflen - 2) & 0xff; } if (buflen > req->cmd.xfer) buflen = req->cmd.xfer; return buflen; }"} {"target": 0, "idx": 12329, "func": "void tcg_add_target_add_op_defs(const TCGTargetOpDef *tdefs) { TCGOpcode op; TCGOpDef *def; const char *ct_str; int i, nb_args; for(;;) { if (tdefs->op == (TCGOpcode)-1) break; op = tdefs->op; assert((unsigned)op < NB_OPS); def = &tcg_op_defs[op]; #if defined(CONFIG_DEBUG_TCG) /* Duplicate entry in op definitions? */ assert(!def->used); def->used = 1; #endif nb_args = def->nb_iargs + def->nb_oargs; for(i = 0; i < nb_args; i++) { ct_str = tdefs->args_ct_str[i]; /* Incomplete TCGTargetOpDef entry? */ assert(ct_str != NULL); tcg_regset_clear(def->args_ct[i].u.regs); def->args_ct[i].ct = 0; if (ct_str[0] >= '0' && ct_str[0] <= '9') { int oarg; oarg = ct_str[0] - '0'; assert(oarg < def->nb_oargs); assert(def->args_ct[oarg].ct & TCG_CT_REG); /* TCG_CT_ALIAS is for the output arguments. The input argument is tagged with TCG_CT_IALIAS. */ def->args_ct[i] = def->args_ct[oarg]; def->args_ct[oarg].ct = TCG_CT_ALIAS; def->args_ct[oarg].alias_index = i; def->args_ct[i].ct |= TCG_CT_IALIAS; def->args_ct[i].alias_index = oarg; } else { for(;;) { if (*ct_str == '\\0') break; switch(*ct_str) { case 'i': def->args_ct[i].ct |= TCG_CT_CONST; ct_str++; break; default: if (target_parse_constraint(&def->args_ct[i], &ct_str) < 0) { fprintf(stderr, \"Invalid constraint '%s' for arg %d of operation '%s'\\n\", ct_str, i, def->name); exit(1); } } } } } /* TCGTargetOpDef entry with too much information? */ assert(i == TCG_MAX_OP_ARGS || tdefs->args_ct_str[i] == NULL); /* sort the constraints (XXX: this is just an heuristic) */ sort_constraints(def, 0, def->nb_oargs); sort_constraints(def, def->nb_oargs, def->nb_iargs); #if 0 { int i; printf(\"%s: sorted=\", def->name); for(i = 0; i < def->nb_oargs + def->nb_iargs; i++) printf(\" %d\", def->sorted_args[i]); printf(\"\\n\"); } #endif tdefs++; } #if defined(CONFIG_DEBUG_TCG) i = 0; for (op = 0; op < tcg_op_defs_max; op++) { const TCGOpDef *def = &tcg_op_defs[op]; if (def->flags & TCG_OPF_NOT_PRESENT) { /* Wrong entry in op definitions? */ if (def->used) { fprintf(stderr, \"Invalid op definition for %s\\n\", def->name); i = 1; } } else { /* Missing entry in op definitions? */ if (!def->used) { fprintf(stderr, \"Missing op definition for %s\\n\", def->name); i = 1; } } } if (i == 1) { tcg_abort(); } #endif }"} {"target": 1, "idx": 12347, "func": "void AUD_vlog (const char *cap, const char *fmt, va_list ap) { if (conf.log_to_monitor) { if (cap) { monitor_printf(default_mon, \"%s: \", cap); } monitor_vprintf(default_mon, fmt, ap); } else { if (cap) { fprintf (stderr, \"%s: \", cap); } vfprintf (stderr, fmt, ap); } }"} {"target": 0, "idx": 12353, "func": "static int ogg_build_flac_headers(const uint8_t *extradata, int extradata_size, OGGStreamContext *oggstream, int bitexact) { const char *vendor = bitexact ? \"ffmpeg\" : LIBAVFORMAT_IDENT; uint8_t *p; if (extradata_size != 34) return -1; oggstream->header_len[0] = 51; oggstream->header[0] = av_mallocz(51); // per ogg flac specs p = oggstream->header[0]; bytestream_put_byte(&p, 0x7F); bytestream_put_buffer(&p, \"FLAC\", 4); bytestream_put_byte(&p, 1); // major version bytestream_put_byte(&p, 0); // minor version bytestream_put_be16(&p, 1); // headers packets without this one bytestream_put_buffer(&p, \"fLaC\", 4); bytestream_put_byte(&p, 0x00); // streaminfo bytestream_put_be24(&p, 34); bytestream_put_buffer(&p, extradata, 34); oggstream->header_len[1] = 1+3+4+strlen(vendor)+4; oggstream->header[1] = av_mallocz(oggstream->header_len[1]); p = oggstream->header[1]; bytestream_put_byte(&p, 0x84); // last metadata block and vorbis comment bytestream_put_be24(&p, oggstream->header_len[1] - 4); bytestream_put_le32(&p, strlen(vendor)); bytestream_put_buffer(&p, vendor, strlen(vendor)); bytestream_put_le32(&p, 0); // user comment list length return 0; }"} {"target": 0, "idx": 12357, "func": "static void put_subframe(DCAEncContext *c, int subframe) { int i, band, ss, ch; /* Subsubframes count */ put_bits(&c->pb, 2, SUBSUBFRAMES -1); /* Partial subsubframe sample count: dummy */ put_bits(&c->pb, 3, 0); /* Prediction mode: no ADPCM, in each channel and subband */ for (ch = 0; ch < c->fullband_channels; ch++) for (band = 0; band < DCAENC_SUBBANDS; band++) put_bits(&c->pb, 1, 0); /* Prediction VQ address: not transmitted */ /* Bit allocation index */ for (ch = 0; ch < c->fullband_channels; ch++) for (band = 0; band < DCAENC_SUBBANDS; band++) put_bits(&c->pb, 5, c->abits[band][ch]); if (SUBSUBFRAMES > 1) { /* Transition mode: none for each channel and subband */ for (ch = 0; ch < c->fullband_channels; ch++) for (band = 0; band < DCAENC_SUBBANDS; band++) put_bits(&c->pb, 1, 0); /* codebook A4 */ } /* Scale factors */ for (ch = 0; ch < c->fullband_channels; ch++) for (band = 0; band < DCAENC_SUBBANDS; band++) put_bits(&c->pb, 7, c->scale_factor[band][ch]); /* Joint subband scale factor codebook select: not transmitted */ /* Scale factors for joint subband coding: not transmitted */ /* Stereo down-mix coefficients: not transmitted */ /* Dynamic range coefficient: not transmitted */ /* Stde information CRC check word: not transmitted */ /* VQ encoded high frequency subbands: not transmitted */ /* LFE data: 8 samples and scalefactor */ if (c->lfe_channel) { for (i = 0; i < DCA_LFE_SAMPLES; i++) put_bits(&c->pb, 8, quantize_value(c->downsampled_lfe[i], c->lfe_quant) & 0xff); put_bits(&c->pb, 8, c->lfe_scale_factor); } /* Audio data (subsubframes) */ for (ss = 0; ss < SUBSUBFRAMES ; ss++) for (ch = 0; ch < c->fullband_channels; ch++) for (band = 0; band < DCAENC_SUBBANDS; band++) put_subframe_samples(c, ss, band, ch); /* DSYNC */ put_bits(&c->pb, 16, 0xffff); }"} {"target": 0, "idx": 12362, "func": "static void decode_gray_bitstream(HYuvContext *s, int count) { int i; count/=2; if (count >= (get_bits_left(&s->gb)) / (31 * 2)) { for (i = 0; i < count && get_bits_left(&s->gb) > 0; i++) { READ_2PIX(s->temp[0][2 * i], s->temp[0][2 * i + 1], 0); } } else { for(i=0; itemp[0][2 * i], s->temp[0][2 * i + 1], 0); } } }"} {"target": 0, "idx": 12365, "func": "static int start_auth_vencrypt_subauth(VncState *vs) { switch (vs->vd->subauth) { case VNC_AUTH_VENCRYPT_TLSNONE: case VNC_AUTH_VENCRYPT_X509NONE: VNC_DEBUG(\"Accept TLS auth none\\n\"); vnc_write_u32(vs, 0); /* Accept auth completion */ vnc_read_when(vs, protocol_client_init, 1); break; case VNC_AUTH_VENCRYPT_TLSVNC: case VNC_AUTH_VENCRYPT_X509VNC: VNC_DEBUG(\"Start TLS auth VNC\\n\"); return start_auth_vnc(vs); default: /* Should not be possible, but just in case */ VNC_DEBUG(\"Reject auth %d\\n\", vs->vd->auth); vnc_write_u8(vs, 1); if (vs->minor >= 8) { static const char err[] = \"Unsupported authentication type\"; vnc_write_u32(vs, sizeof(err)); vnc_write(vs, err, sizeof(err)); } vnc_client_error(vs); } return 0; }"} {"target": 0, "idx": 12367, "func": "iscsi_unmap_cb(struct iscsi_context *iscsi, int status, void *command_data, void *opaque) { IscsiAIOCB *acb = opaque; if (acb->canceled != 0) { return; } acb->status = 0; if (status < 0) { error_report(\"Failed to unmap data on iSCSI lun. %s\", iscsi_get_error(iscsi)); acb->status = -EIO; } iscsi_schedule_bh(acb); }"} {"target": 0, "idx": 12376, "func": "static void virtio_notify(struct subchannel_id schid) { kvm_hypercall(KVM_S390_VIRTIO_CCW_NOTIFY, *(u32*)&schid, 0); }"} {"target": 0, "idx": 12377, "func": "static void kvm_mce_inj_srao_memscrub2(CPUState *env, target_phys_addr_t paddr) { struct kvm_x86_mce mce = { .bank = 9, .status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN | MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S | 0xc0, .mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV, .addr = paddr, .misc = (MCM_ADDR_PHYS << 6) | 0xc, }; kvm_inject_x86_mce_on(env, &mce, ABORT_ON_ERROR); kvm_mce_broadcast_rest(env); }"} {"target": 0, "idx": 12378, "func": "static void lm32_evr_init(QEMUMachineInitArgs *args) { const char *cpu_model = args->cpu_model; const char *kernel_filename = args->kernel_filename; LM32CPU *cpu; CPULM32State *env; DriveInfo *dinfo; MemoryRegion *address_space_mem = get_system_memory(); MemoryRegion *phys_ram = g_new(MemoryRegion, 1); qemu_irq *cpu_irq, irq[32]; ResetInfo *reset_info; int i; /* memory map */ target_phys_addr_t flash_base = 0x04000000; size_t flash_sector_size = 256 * 1024; size_t flash_size = 32 * 1024 * 1024; target_phys_addr_t ram_base = 0x08000000; size_t ram_size = 64 * 1024 * 1024; target_phys_addr_t timer0_base = 0x80002000; target_phys_addr_t uart0_base = 0x80006000; target_phys_addr_t timer1_base = 0x8000a000; int uart0_irq = 0; int timer0_irq = 1; int timer1_irq = 3; reset_info = g_malloc0(sizeof(ResetInfo)); if (cpu_model == NULL) { cpu_model = \"lm32-full\"; } cpu = cpu_lm32_init(cpu_model); env = &cpu->env; reset_info->cpu = cpu; reset_info->flash_base = flash_base; memory_region_init_ram(phys_ram, \"lm32_evr.sdram\", ram_size); vmstate_register_ram_global(phys_ram); memory_region_add_subregion(address_space_mem, ram_base, phys_ram); dinfo = drive_get(IF_PFLASH, 0, 0); /* Spansion S29NS128P */ pflash_cfi02_register(flash_base, NULL, \"lm32_evr.flash\", flash_size, dinfo ? dinfo->bdrv : NULL, flash_sector_size, flash_size / flash_sector_size, 1, 2, 0x01, 0x7e, 0x43, 0x00, 0x555, 0x2aa, 1); /* create irq lines */ cpu_irq = qemu_allocate_irqs(cpu_irq_handler, env, 1); env->pic_state = lm32_pic_init(*cpu_irq); for (i = 0; i < 32; i++) { irq[i] = qdev_get_gpio_in(env->pic_state, i); } sysbus_create_simple(\"lm32-uart\", uart0_base, irq[uart0_irq]); sysbus_create_simple(\"lm32-timer\", timer0_base, irq[timer0_irq]); sysbus_create_simple(\"lm32-timer\", timer1_base, irq[timer1_irq]); /* make sure juart isn't the first chardev */ env->juart_state = lm32_juart_init(); reset_info->bootstrap_pc = flash_base; if (kernel_filename) { uint64_t entry; int kernel_size; kernel_size = load_elf(kernel_filename, NULL, NULL, &entry, NULL, NULL, 1, ELF_MACHINE, 0); reset_info->bootstrap_pc = entry; if (kernel_size < 0) { kernel_size = load_image_targphys(kernel_filename, ram_base, ram_size); reset_info->bootstrap_pc = ram_base; } if (kernel_size < 0) { fprintf(stderr, \"qemu: could not load kernel '%s'\\n\", kernel_filename); exit(1); } } qemu_register_reset(main_cpu_reset, reset_info); }"} {"target": 0, "idx": 12383, "func": "static void gen_msa_branch(CPUMIPSState *env, DisasContext *ctx, uint32_t op1) { uint8_t df = (ctx->opcode >> 21) & 0x3; uint8_t wt = (ctx->opcode >> 16) & 0x1f; int64_t s16 = (int16_t)ctx->opcode; check_msa_access(ctx); if (ctx->insn_flags & ISA_MIPS32R6 && ctx->hflags & MIPS_HFLAG_BMASK) { generate_exception_end(ctx, EXCP_RI); return; } switch (op1) { case OPC_BZ_V: case OPC_BNZ_V: { TCGv_i64 t0 = tcg_temp_new_i64(); tcg_gen_or_i64(t0, msa_wr_d[wt<<1], msa_wr_d[(wt<<1)+1]); tcg_gen_setcondi_i64((op1 == OPC_BZ_V) ? TCG_COND_EQ : TCG_COND_NE, t0, t0, 0); tcg_gen_trunc_i64_tl(bcond, t0); tcg_temp_free_i64(t0); } break; case OPC_BZ_B: case OPC_BZ_H: case OPC_BZ_W: case OPC_BZ_D: gen_check_zero_element(bcond, df, wt); break; case OPC_BNZ_B: case OPC_BNZ_H: case OPC_BNZ_W: case OPC_BNZ_D: gen_check_zero_element(bcond, df, wt); tcg_gen_setcondi_tl(TCG_COND_EQ, bcond, bcond, 0); break; } ctx->btarget = ctx->pc + (s16 << 2) + 4; ctx->hflags |= MIPS_HFLAG_BC; ctx->hflags |= MIPS_HFLAG_BDS32; }"} {"target": 0, "idx": 12384, "func": "static QObject *parse_keyword(JSONParserContext *ctxt, QList **tokens) { QObject *token, *ret; QList *working = qlist_copy(*tokens); token = qlist_pop(working); if (token == NULL) { goto out; } if (token_get_type(token) != JSON_KEYWORD) { goto out; } if (token_is_keyword(token, \"true\")) { ret = QOBJECT(qbool_from_int(true)); } else if (token_is_keyword(token, \"false\")) { ret = QOBJECT(qbool_from_int(false)); } else { parse_error(ctxt, token, \"invalid keyword `%s'\", token_get_value(token)); goto out; } qobject_decref(token); QDECREF(*tokens); *tokens = working; return ret; out: qobject_decref(token); QDECREF(working); return NULL; }"} {"target": 0, "idx": 12395, "func": "static av_cold int vaapi_encode_config_attributes(AVCodecContext *avctx) { VAAPIEncodeContext *ctx = avctx->priv_data; VAStatus vas; int i, n, err; VAProfile *profiles = NULL; VAEntrypoint *entrypoints = NULL; VAConfigAttrib attr[] = { { VAConfigAttribRTFormat }, { VAConfigAttribRateControl }, { VAConfigAttribEncMaxRefFrames }, }; n = vaMaxNumProfiles(ctx->hwctx->display); profiles = av_malloc_array(n, sizeof(VAProfile)); if (!profiles) { err = AVERROR(ENOMEM); goto fail; } vas = vaQueryConfigProfiles(ctx->hwctx->display, profiles, &n); if (vas != VA_STATUS_SUCCESS) { av_log(ctx, AV_LOG_ERROR, \"Failed to query profiles: %d (%s).\\n\", vas, vaErrorStr(vas)); err = AVERROR(ENOSYS); goto fail; } for (i = 0; i < n; i++) { if (profiles[i] == ctx->va_profile) break; } if (i >= n) { av_log(ctx, AV_LOG_ERROR, \"Encoding profile not found (%d).\\n\", ctx->va_profile); err = AVERROR(ENOSYS); goto fail; } n = vaMaxNumEntrypoints(ctx->hwctx->display); entrypoints = av_malloc_array(n, sizeof(VAEntrypoint)); if (!entrypoints) { err = AVERROR(ENOMEM); goto fail; } vas = vaQueryConfigEntrypoints(ctx->hwctx->display, ctx->va_profile, entrypoints, &n); if (vas != VA_STATUS_SUCCESS) { av_log(ctx, AV_LOG_ERROR, \"Failed to query entrypoints for \" \"profile %u: %d (%s).\\n\", ctx->va_profile, vas, vaErrorStr(vas)); err = AVERROR(ENOSYS); goto fail; } for (i = 0; i < n; i++) { if (entrypoints[i] == ctx->va_entrypoint) break; } if (i >= n) { av_log(ctx, AV_LOG_ERROR, \"Encoding entrypoint not found \" \"(%d / %d).\\n\", ctx->va_profile, ctx->va_entrypoint); err = AVERROR(ENOSYS); goto fail; } vas = vaGetConfigAttributes(ctx->hwctx->display, ctx->va_profile, ctx->va_entrypoint, attr, FF_ARRAY_ELEMS(attr)); if (vas != VA_STATUS_SUCCESS) { av_log(avctx, AV_LOG_ERROR, \"Failed to fetch config \" \"attributes: %d (%s).\\n\", vas, vaErrorStr(vas)); return AVERROR(EINVAL); } for (i = 0; i < FF_ARRAY_ELEMS(attr); i++) { if (attr[i].value == VA_ATTRIB_NOT_SUPPORTED) { // Unfortunately we have to treat this as \"don't know\" and hope // for the best, because the Intel MJPEG encoder returns this // for all the interesting attributes. continue; } switch (attr[i].type) { case VAConfigAttribRTFormat: if (!(ctx->va_rt_format & attr[i].value)) { av_log(avctx, AV_LOG_ERROR, \"Surface RT format %#x \" \"is not supported (mask %#x).\\n\", ctx->va_rt_format, attr[i].value); err = AVERROR(EINVAL); goto fail; } ctx->config_attributes[ctx->nb_config_attributes++] = (VAConfigAttrib) { .type = VAConfigAttribRTFormat, .value = ctx->va_rt_format, }; break; case VAConfigAttribRateControl: if (!(ctx->va_rc_mode & attr[i].value)) { av_log(avctx, AV_LOG_ERROR, \"Rate control mode %#x \" \"is not supported (mask: %#x).\\n\", ctx->va_rc_mode, attr[i].value); err = AVERROR(EINVAL); goto fail; } ctx->config_attributes[ctx->nb_config_attributes++] = (VAConfigAttrib) { .type = VAConfigAttribRateControl, .value = ctx->va_rc_mode, }; break; case VAConfigAttribEncMaxRefFrames: { unsigned int ref_l0 = attr[i].value & 0xffff; unsigned int ref_l1 = (attr[i].value >> 16) & 0xffff; if (avctx->gop_size > 1 && ref_l0 < 1) { av_log(avctx, AV_LOG_ERROR, \"P frames are not \" \"supported (%#x).\\n\", attr[i].value); err = AVERROR(EINVAL); goto fail; } if (avctx->max_b_frames > 0 && ref_l1 < 1) { av_log(avctx, AV_LOG_ERROR, \"B frames are not \" \"supported (%#x).\\n\", attr[i].value); err = AVERROR(EINVAL); goto fail; } } break; default: av_assert0(0 && \"Unexpected config attribute.\"); } } err = 0; fail: av_freep(&profiles); av_freep(&entrypoints); return err; }"} {"target": 0, "idx": 12402, "func": "static int targa_encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *p, int *got_packet) { int bpp, picsize, datasize = -1, ret; uint8_t *out; if(avctx->width > 0xffff || avctx->height > 0xffff) { av_log(avctx, AV_LOG_ERROR, \"image dimensions too large\\n\"); return AVERROR(EINVAL); } picsize = av_image_get_buffer_size(avctx->pix_fmt, avctx->width, avctx->height, 1); if ((ret = ff_alloc_packet(pkt, picsize + 45)) < 0) { av_log(avctx, AV_LOG_ERROR, \"encoded frame too large\\n\"); return ret; } /* zero out the header and only set applicable fields */ memset(pkt->data, 0, 12); AV_WL16(pkt->data+12, avctx->width); AV_WL16(pkt->data+14, avctx->height); /* image descriptor byte: origin is always top-left, bits 0-3 specify alpha */ pkt->data[17] = 0x20 | (avctx->pix_fmt == AV_PIX_FMT_BGRA ? 8 : 0); switch(avctx->pix_fmt) { case AV_PIX_FMT_GRAY8: pkt->data[2] = TGA_BW; /* uncompressed grayscale image */ pkt->data[16] = 8; /* bpp */ break; case AV_PIX_FMT_RGB555LE: pkt->data[2] = TGA_RGB; /* uncompresses true-color image */ pkt->data[16] = 16; /* bpp */ break; case AV_PIX_FMT_BGR24: pkt->data[2] = TGA_RGB; /* uncompressed true-color image */ pkt->data[16] = 24; /* bpp */ break; case AV_PIX_FMT_BGRA: pkt->data[2] = TGA_RGB; /* uncompressed true-color image */ pkt->data[16] = 32; /* bpp */ break; default: av_log(avctx, AV_LOG_ERROR, \"Pixel format '%s' not supported.\\n\", av_get_pix_fmt_name(avctx->pix_fmt)); return AVERROR(EINVAL); } bpp = pkt->data[16] >> 3; out = pkt->data + 18; /* skip past the header we just output */ /* try RLE compression */ if (avctx->coder_type != FF_CODER_TYPE_RAW) datasize = targa_encode_rle(out, picsize, p, bpp, avctx->width, avctx->height); /* if that worked well, mark the picture as RLE compressed */ if(datasize >= 0) pkt->data[2] |= 8; /* if RLE didn't make it smaller, go back to no compression */ else datasize = targa_encode_normal(out, p, bpp, avctx->width, avctx->height); out += datasize; /* The standard recommends including this section, even if we don't use * any of the features it affords. TODO: take advantage of the pixel * aspect ratio and encoder ID fields available? */ memcpy(out, \"\\0\\0\\0\\0\\0\\0\\0\\0TRUEVISION-XFILE.\", 26); pkt->size = out + 26 - pkt->data; pkt->flags |= AV_PKT_FLAG_KEY; *got_packet = 1; return 0; }"} {"target": 1, "idx": 12409, "func": "static int scsi_req_length(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf) { cmd->xfer = scsi_cdb_length(buf); switch (buf[0]) { case TEST_UNIT_READY: case REWIND: case START_STOP: case SET_CAPACITY: case WRITE_FILEMARKS: case WRITE_FILEMARKS_16: case SPACE: case RESERVE: case RELEASE: case ERASE: case ALLOW_MEDIUM_REMOVAL: case SEEK_10: case SYNCHRONIZE_CACHE: case SYNCHRONIZE_CACHE_16: case LOCATE_16: case LOCK_UNLOCK_CACHE: case SET_CD_SPEED: case SET_LIMITS: case WRITE_LONG_10: case UPDATE_BLOCK: case RESERVE_TRACK: case SET_READ_AHEAD: case PRE_FETCH: case PRE_FETCH_16: case ALLOW_OVERWRITE: cmd->xfer = 0; break; case VERIFY_10: case VERIFY_12: case VERIFY_16: if ((buf[1] & 2) == 0) { cmd->xfer = 0; } else if ((buf[1] & 4) == 1) { cmd->xfer = 1; } cmd->xfer *= dev->blocksize; break; case MODE_SENSE: break; case WRITE_SAME_10: case WRITE_SAME_16: cmd->xfer = dev->blocksize; break; case READ_CAPACITY_10: cmd->xfer = 8; break; case READ_BLOCK_LIMITS: cmd->xfer = 6; break; case SEND_VOLUME_TAG: /* GPCMD_SET_STREAMING from multimedia commands. */ if (dev->type == TYPE_ROM) { cmd->xfer = buf[10] | (buf[9] << 8); } else { cmd->xfer = buf[9] | (buf[8] << 8); } break; case WRITE_6: /* length 0 means 256 blocks */ if (cmd->xfer == 0) { cmd->xfer = 256; } case WRITE_10: case WRITE_VERIFY_10: case WRITE_12: case WRITE_VERIFY_12: case WRITE_16: case WRITE_VERIFY_16: cmd->xfer *= dev->blocksize; break; case READ_6: case READ_REVERSE: /* length 0 means 256 blocks */ if (cmd->xfer == 0) { cmd->xfer = 256; } case READ_10: case RECOVER_BUFFERED_DATA: case READ_12: case READ_16: cmd->xfer *= dev->blocksize; break; case FORMAT_UNIT: /* MMC mandates the parameter list to be 12-bytes long. Parameters * for block devices are restricted to the header right now. */ if (dev->type == TYPE_ROM && (buf[1] & 16)) { cmd->xfer = 12; } else { cmd->xfer = (buf[1] & 16) == 0 ? 0 : (buf[1] & 32 ? 8 : 4); } break; case INQUIRY: case RECEIVE_DIAGNOSTIC: case SEND_DIAGNOSTIC: cmd->xfer = buf[4] | (buf[3] << 8); break; case READ_CD: case READ_BUFFER: case WRITE_BUFFER: case SEND_CUE_SHEET: cmd->xfer = buf[8] | (buf[7] << 8) | (buf[6] << 16); break; case PERSISTENT_RESERVE_OUT: cmd->xfer = ldl_be_p(&buf[5]) & 0xffffffffULL; break; case ERASE_12: if (dev->type == TYPE_ROM) { /* MMC command GET PERFORMANCE. */ cmd->xfer = scsi_get_performance_length(buf[9] | (buf[8] << 8), buf[10], buf[1] & 0x1f); } break; case MECHANISM_STATUS: case READ_DVD_STRUCTURE: case SEND_DVD_STRUCTURE: case MAINTENANCE_OUT: case MAINTENANCE_IN: if (dev->type == TYPE_ROM) { /* GPCMD_REPORT_KEY and GPCMD_SEND_KEY from multi media commands */ cmd->xfer = buf[9] | (buf[8] << 8); } break; case ATA_PASSTHROUGH_12: if (dev->type == TYPE_ROM) { /* BLANK command of MMC */ cmd->xfer = 0; } else { cmd->xfer = ata_passthrough_12_xfer_size(dev, buf); } break; case ATA_PASSTHROUGH_16: cmd->xfer = ata_passthrough_16_xfer_size(dev, buf); break; } return 0; }"} {"target": 0, "idx": 12412, "func": "static void qdm2_init(QDM2Context *q) { static int inited = 0; if (inited != 0) return; inited = 1; qdm2_init_vlc(); ff_mpa_synth_init(mpa_window); softclip_table_init(); rnd_table_init(); init_noise_samples(); av_log(NULL, AV_LOG_DEBUG, \"init done\\n\"); }"} {"target": 1, "idx": 12429, "func": "static int encode_frame(FlacEncodeContext *s) { int ch, count; count = count_frame_header(s); for (ch = 0; ch < s->channels; ch++) count += encode_residual_ch(s, ch); count += (8 - (count & 7)) & 7; // byte alignment count += 16; // CRC-16 return count >> 3; }"} {"target": 0, "idx": 12434, "func": "static int vfio_add_std_cap(VFIOPCIDevice *vdev, uint8_t pos) { PCIDevice *pdev = &vdev->pdev; uint8_t cap_id, next, size; int ret; cap_id = pdev->config[pos]; next = pdev->config[pos + PCI_CAP_LIST_NEXT]; /* * If it becomes important to configure capabilities to their actual * size, use this as the default when it's something we don't recognize. * Since QEMU doesn't actually handle many of the config accesses, * exact size doesn't seem worthwhile. */ size = vfio_std_cap_max_size(pdev, pos); /* * pci_add_capability always inserts the new capability at the head * of the chain. Therefore to end up with a chain that matches the * physical device, we insert from the end by making this recursive. * This is also why we pre-calculate size above as cached config space * will be changed as we unwind the stack. */ if (next) { ret = vfio_add_std_cap(vdev, next); if (ret) { return ret; } } else { /* Begin the rebuild, use QEMU emulated list bits */ pdev->config[PCI_CAPABILITY_LIST] = 0; vdev->emulated_config_bits[PCI_CAPABILITY_LIST] = 0xff; vdev->emulated_config_bits[PCI_STATUS] |= PCI_STATUS_CAP_LIST; } /* Use emulated next pointer to allow dropping caps */ pci_set_byte(vdev->emulated_config_bits + pos + PCI_CAP_LIST_NEXT, 0xff); switch (cap_id) { case PCI_CAP_ID_MSI: ret = vfio_msi_setup(vdev, pos); break; case PCI_CAP_ID_EXP: vfio_check_pcie_flr(vdev, pos); ret = vfio_setup_pcie_cap(vdev, pos, size); break; case PCI_CAP_ID_MSIX: ret = vfio_msix_setup(vdev, pos); break; case PCI_CAP_ID_PM: vfio_check_pm_reset(vdev, pos); vdev->pm_cap = pos; ret = pci_add_capability(pdev, cap_id, pos, size); break; case PCI_CAP_ID_AF: vfio_check_af_flr(vdev, pos); ret = pci_add_capability(pdev, cap_id, pos, size); break; default: ret = pci_add_capability(pdev, cap_id, pos, size); break; } if (ret < 0) { error_report(\"vfio: %04x:%02x:%02x.%x Error adding PCI capability \" \"0x%x[0x%x]@0x%x: %d\", vdev->host.domain, vdev->host.bus, vdev->host.slot, vdev->host.function, cap_id, size, pos, ret); return ret; } return 0; }"} {"target": 0, "idx": 12451, "func": "int cpu_exec(CPUState *env1) { #define DECLARE_HOST_REGS 1 #include \"hostregs_helper.h\" int ret, interrupt_request; TranslationBlock *tb; uint8_t *tc_ptr; unsigned long next_tb; if (cpu_halted(env1) == EXCP_HALTED) return EXCP_HALTED; cpu_single_env = env1; /* first we save global registers */ #define SAVE_HOST_REGS 1 #include \"hostregs_helper.h\" env = env1; #if defined(TARGET_I386) /* put eflags in CPU temporary format */ CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); DF = 1 - (2 * ((env->eflags >> 10) & 1)); CC_OP = CC_OP_EFLAGS; env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); #elif defined(TARGET_SPARC) #elif defined(TARGET_M68K) env->cc_op = CC_OP_FLAGS; env->cc_dest = env->sr & 0xf; env->cc_x = (env->sr >> 4) & 1; #elif defined(TARGET_ALPHA) #elif defined(TARGET_ARM) #elif defined(TARGET_PPC) #elif defined(TARGET_MICROBLAZE) #elif defined(TARGET_MIPS) #elif defined(TARGET_SH4) #elif defined(TARGET_CRIS) #elif defined(TARGET_S390X) /* XXXXX */ #else #error unsupported target CPU #endif env->exception_index = -1; /* prepare setjmp context for exception handling */ for(;;) { if (setjmp(env->jmp_env) == 0) { #if defined(__sparc__) && !defined(CONFIG_SOLARIS) #undef env env = cpu_single_env; #define env cpu_single_env #endif /* if an exception is pending, we execute it here */ if (env->exception_index >= 0) { if (env->exception_index >= EXCP_INTERRUPT) { /* exit request from the cpu execution loop */ ret = env->exception_index; if (ret == EXCP_DEBUG) cpu_handle_debug_exception(env); break; } else { #if defined(CONFIG_USER_ONLY) /* if user mode only, we simulate a fake exception which will be handled outside the cpu execution loop */ #if defined(TARGET_I386) do_interrupt_user(env->exception_index, env->exception_is_int, env->error_code, env->exception_next_eip); /* successfully delivered */ env->old_exception = -1; #endif ret = env->exception_index; break; #else #if defined(TARGET_I386) /* simulate a real cpu exception. On i386, it can trigger new exceptions, but we do not handle double or triple faults yet. */ do_interrupt(env->exception_index, env->exception_is_int, env->error_code, env->exception_next_eip, 0); /* successfully delivered */ env->old_exception = -1; #elif defined(TARGET_PPC) do_interrupt(env); #elif defined(TARGET_MICROBLAZE) do_interrupt(env); #elif defined(TARGET_MIPS) do_interrupt(env); #elif defined(TARGET_SPARC) do_interrupt(env); #elif defined(TARGET_ARM) do_interrupt(env); #elif defined(TARGET_SH4) do_interrupt(env); #elif defined(TARGET_ALPHA) do_interrupt(env); #elif defined(TARGET_CRIS) do_interrupt(env); #elif defined(TARGET_M68K) do_interrupt(0); #endif env->exception_index = -1; #endif } } if (kvm_enabled()) { kvm_cpu_exec(env); longjmp(env->jmp_env, 1); } next_tb = 0; /* force lookup of first TB */ for(;;) { interrupt_request = env->interrupt_request; if (unlikely(interrupt_request)) { if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) { /* Mask out external interrupts for this step. */ interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_FIQ | CPU_INTERRUPT_SMI | CPU_INTERRUPT_NMI); } if (interrupt_request & CPU_INTERRUPT_DEBUG) { env->interrupt_request &= ~CPU_INTERRUPT_DEBUG; env->exception_index = EXCP_DEBUG; cpu_loop_exit(); } #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \\ defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \\ defined(TARGET_MICROBLAZE) if (interrupt_request & CPU_INTERRUPT_HALT) { env->interrupt_request &= ~CPU_INTERRUPT_HALT; env->halted = 1; env->exception_index = EXCP_HLT; cpu_loop_exit(); } #endif #if defined(TARGET_I386) if (interrupt_request & CPU_INTERRUPT_INIT) { svm_check_intercept(SVM_EXIT_INIT); do_cpu_init(env); env->exception_index = EXCP_HALTED; cpu_loop_exit(); } else if (interrupt_request & CPU_INTERRUPT_SIPI) { do_cpu_sipi(env); } else if (env->hflags2 & HF2_GIF_MASK) { if ((interrupt_request & CPU_INTERRUPT_SMI) && !(env->hflags & HF_SMM_MASK)) { svm_check_intercept(SVM_EXIT_SMI); env->interrupt_request &= ~CPU_INTERRUPT_SMI; do_smm_enter(); next_tb = 0; } else if ((interrupt_request & CPU_INTERRUPT_NMI) && !(env->hflags2 & HF2_NMI_MASK)) { env->interrupt_request &= ~CPU_INTERRUPT_NMI; env->hflags2 |= HF2_NMI_MASK; do_interrupt(EXCP02_NMI, 0, 0, 0, 1); next_tb = 0; } else if (interrupt_request & CPU_INTERRUPT_MCE) { env->interrupt_request &= ~CPU_INTERRUPT_MCE; do_interrupt(EXCP12_MCHK, 0, 0, 0, 0); next_tb = 0; } else if ((interrupt_request & CPU_INTERRUPT_HARD) && (((env->hflags2 & HF2_VINTR_MASK) && (env->hflags2 & HF2_HIF_MASK)) || (!(env->hflags2 & HF2_VINTR_MASK) && (env->eflags & IF_MASK && !(env->hflags & HF_INHIBIT_IRQ_MASK))))) { int intno; svm_check_intercept(SVM_EXIT_INTR); env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ); intno = cpu_get_pic_interrupt(env); qemu_log_mask(CPU_LOG_TB_IN_ASM, \"Servicing hardware INT=0x%02x\\n\", intno); #if defined(__sparc__) && !defined(CONFIG_SOLARIS) #undef env env = cpu_single_env; #define env cpu_single_env #endif do_interrupt(intno, 0, 0, 0, 1); /* ensure that no TB jump will be modified as the program flow was changed */ next_tb = 0; #if !defined(CONFIG_USER_ONLY) } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) && (env->eflags & IF_MASK) && !(env->hflags & HF_INHIBIT_IRQ_MASK)) { int intno; /* FIXME: this should respect TPR */ svm_check_intercept(SVM_EXIT_VINTR); intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector)); qemu_log_mask(CPU_LOG_TB_IN_ASM, \"Servicing virtual hardware INT=0x%02x\\n\", intno); do_interrupt(intno, 0, 0, 0, 1); env->interrupt_request &= ~CPU_INTERRUPT_VIRQ; next_tb = 0; #endif } } #elif defined(TARGET_PPC) #if 0 if ((interrupt_request & CPU_INTERRUPT_RESET)) { cpu_reset(env); } #endif if (interrupt_request & CPU_INTERRUPT_HARD) { ppc_hw_interrupt(env); if (env->pending_interrupts == 0) env->interrupt_request &= ~CPU_INTERRUPT_HARD; next_tb = 0; } #elif defined(TARGET_MICROBLAZE) if ((interrupt_request & CPU_INTERRUPT_HARD) && (env->sregs[SR_MSR] & MSR_IE) && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP)) && !(env->iflags & (D_FLAG | IMM_FLAG))) { env->exception_index = EXCP_IRQ; do_interrupt(env); next_tb = 0; } #elif defined(TARGET_MIPS) if ((interrupt_request & CPU_INTERRUPT_HARD) && (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) && (env->CP0_Status & (1 << CP0St_IE)) && !(env->CP0_Status & (1 << CP0St_EXL)) && !(env->CP0_Status & (1 << CP0St_ERL)) && !(env->hflags & MIPS_HFLAG_DM)) { /* Raise it */ env->exception_index = EXCP_EXT_INTERRUPT; env->error_code = 0; do_interrupt(env); next_tb = 0; } #elif defined(TARGET_SPARC) if (interrupt_request & CPU_INTERRUPT_HARD) { if (cpu_interrupts_enabled(env) && env->interrupt_index > 0) { int pil = env->interrupt_index & 0xf; int type = env->interrupt_index & 0xf0; if (((type == TT_EXTINT) && cpu_pil_allowed(env, pil)) || type != TT_EXTINT) { env->exception_index = env->interrupt_index; do_interrupt(env); next_tb = 0; } } } else if (interrupt_request & CPU_INTERRUPT_TIMER) { //do_interrupt(0, 0, 0, 0, 0); env->interrupt_request &= ~CPU_INTERRUPT_TIMER; } #elif defined(TARGET_ARM) if (interrupt_request & CPU_INTERRUPT_FIQ && !(env->uncached_cpsr & CPSR_F)) { env->exception_index = EXCP_FIQ; do_interrupt(env); next_tb = 0; } /* ARMv7-M interrupt return works by loading a magic value into the PC. On real hardware the load causes the return to occur. The qemu implementation performs the jump normally, then does the exception return when the CPU tries to execute code at the magic address. This will cause the magic PC value to be pushed to the stack if an interrupt occured at the wrong time. We avoid this by disabling interrupts when pc contains a magic address. */ if (interrupt_request & CPU_INTERRUPT_HARD && ((IS_M(env) && env->regs[15] < 0xfffffff0) || !(env->uncached_cpsr & CPSR_I))) { env->exception_index = EXCP_IRQ; do_interrupt(env); next_tb = 0; } #elif defined(TARGET_SH4) if (interrupt_request & CPU_INTERRUPT_HARD) { do_interrupt(env); next_tb = 0; } #elif defined(TARGET_ALPHA) if (interrupt_request & CPU_INTERRUPT_HARD) { do_interrupt(env); next_tb = 0; } #elif defined(TARGET_CRIS) if (interrupt_request & CPU_INTERRUPT_HARD && (env->pregs[PR_CCS] & I_FLAG)) { env->exception_index = EXCP_IRQ; do_interrupt(env); next_tb = 0; } if (interrupt_request & CPU_INTERRUPT_NMI && (env->pregs[PR_CCS] & M_FLAG)) { env->exception_index = EXCP_NMI; do_interrupt(env); next_tb = 0; } #elif defined(TARGET_M68K) if (interrupt_request & CPU_INTERRUPT_HARD && ((env->sr & SR_I) >> SR_I_SHIFT) < env->pending_level) { /* Real hardware gets the interrupt vector via an IACK cycle at this point. Current emulated hardware doesn't rely on this, so we provide/save the vector when the interrupt is first signalled. */ env->exception_index = env->pending_vector; do_interrupt(1); next_tb = 0; } #endif /* Don't use the cached interupt_request value, do_interrupt may have updated the EXITTB flag. */ if (env->interrupt_request & CPU_INTERRUPT_EXITTB) { env->interrupt_request &= ~CPU_INTERRUPT_EXITTB; /* ensure that no TB jump will be modified as the program flow was changed */ next_tb = 0; } } if (unlikely(env->exit_request)) { env->exit_request = 0; env->exception_index = EXCP_INTERRUPT; cpu_loop_exit(); } #ifdef CONFIG_DEBUG_EXEC if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) { /* restore flags in standard format */ #if defined(TARGET_I386) env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK); log_cpu_state(env, X86_DUMP_CCOP); env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); #elif defined(TARGET_ARM) log_cpu_state(env, 0); #elif defined(TARGET_SPARC) log_cpu_state(env, 0); #elif defined(TARGET_PPC) log_cpu_state(env, 0); #elif defined(TARGET_M68K) cpu_m68k_flush_flags(env, env->cc_op); env->cc_op = CC_OP_FLAGS; env->sr = (env->sr & 0xffe0) | env->cc_dest | (env->cc_x << 4); log_cpu_state(env, 0); #elif defined(TARGET_MICROBLAZE) log_cpu_state(env, 0); #elif defined(TARGET_MIPS) log_cpu_state(env, 0); #elif defined(TARGET_SH4) log_cpu_state(env, 0); #elif defined(TARGET_ALPHA) log_cpu_state(env, 0); #elif defined(TARGET_CRIS) log_cpu_state(env, 0); #else #error unsupported target CPU #endif } #endif spin_lock(&tb_lock); tb = tb_find_fast(); /* Note: we do it here to avoid a gcc bug on Mac OS X when doing it in tb_find_slow */ if (tb_invalidated_flag) { /* as some TB could have been invalidated because of memory exceptions while generating the code, we must recompute the hash index here */ next_tb = 0; tb_invalidated_flag = 0; } #ifdef CONFIG_DEBUG_EXEC qemu_log_mask(CPU_LOG_EXEC, \"Trace 0x%08lx [\" TARGET_FMT_lx \"] %s\\n\", (long)tb->tc_ptr, tb->pc, lookup_symbol(tb->pc)); #endif /* see if we can patch the calling TB. When the TB spans two pages, we cannot safely do a direct jump. */ if (next_tb != 0 && tb->page_addr[1] == -1) { tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb); } spin_unlock(&tb_lock); /* cpu_interrupt might be called while translating the TB, but before it is linked into a potentially infinite loop and becomes env->current_tb. Avoid starting execution if there is a pending interrupt. */ if (!unlikely (env->exit_request)) { env->current_tb = tb; tc_ptr = tb->tc_ptr; /* execute the generated code */ #if defined(__sparc__) && !defined(CONFIG_SOLARIS) #undef env env = cpu_single_env; #define env cpu_single_env #endif next_tb = tcg_qemu_tb_exec(tc_ptr); env->current_tb = NULL; if ((next_tb & 3) == 2) { /* Instruction counter expired. */ int insns_left; tb = (TranslationBlock *)(long)(next_tb & ~3); /* Restore PC. */ cpu_pc_from_tb(env, tb); insns_left = env->icount_decr.u32; if (env->icount_extra && insns_left >= 0) { /* Refill decrementer and continue execution. */ env->icount_extra += insns_left; if (env->icount_extra > 0xffff) { insns_left = 0xffff; } else { insns_left = env->icount_extra; } env->icount_extra -= insns_left; env->icount_decr.u16.low = insns_left; } else { if (insns_left > 0) { /* Execute remaining instructions. */ cpu_exec_nocache(insns_left, tb); } env->exception_index = EXCP_INTERRUPT; next_tb = 0; cpu_loop_exit(); } } } /* reset soft MMU for next block (it can currently only be set by a memory fault) */ } /* for(;;) */ } } /* for(;;) */ #if defined(TARGET_I386) /* restore flags in standard format */ env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK); #elif defined(TARGET_ARM) /* XXX: Save/restore host fpu exception state?. */ #elif defined(TARGET_SPARC) #elif defined(TARGET_PPC) #elif defined(TARGET_M68K) cpu_m68k_flush_flags(env, env->cc_op); env->cc_op = CC_OP_FLAGS; env->sr = (env->sr & 0xffe0) | env->cc_dest | (env->cc_x << 4); #elif defined(TARGET_MICROBLAZE) #elif defined(TARGET_MIPS) #elif defined(TARGET_SH4) #elif defined(TARGET_ALPHA) #elif defined(TARGET_CRIS) #elif defined(TARGET_S390X) /* XXXXX */ #else #error unsupported target CPU #endif /* restore global registers */ #include \"hostregs_helper.h\" /* fail safe : never use cpu_single_env outside cpu_exec() */ cpu_single_env = NULL; return ret; }"} {"target": 0, "idx": 12452, "func": "void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val) { /* We're passed bits [11..0] of the instruction; extract * SYSm and the mask bits. * Invalid combinations of SYSm and mask are UNPREDICTABLE; * we choose to treat them as if the mask bits were valid. * NB that the pseudocode 'mask' variable is bits [11..10], * whereas ours is [11..8]. */ uint32_t mask = extract32(maskreg, 8, 4); uint32_t reg = extract32(maskreg, 0, 8); if (arm_current_el(env) == 0 && reg > 7) { /* only xPSR sub-fields may be written by unprivileged */ return; } switch (reg) { case 0 ... 7: /* xPSR sub-fields */ /* only APSR is actually writable */ if (!(reg & 4)) { uint32_t apsrmask = 0; if (mask & 8) { apsrmask |= XPSR_NZCV | XPSR_Q; } if ((mask & 4) && arm_feature(env, ARM_FEATURE_THUMB_DSP)) { apsrmask |= XPSR_GE; } xpsr_write(env, val, apsrmask); } break; case 8: /* MSP */ if (env->v7m.control & R_V7M_CONTROL_SPSEL_MASK) { env->v7m.other_sp = val; } else { env->regs[13] = val; } break; case 9: /* PSP */ if (env->v7m.control & R_V7M_CONTROL_SPSEL_MASK) { env->regs[13] = val; } else { env->v7m.other_sp = val; } break; case 16: /* PRIMASK */ env->v7m.primask[env->v7m.secure] = val & 1; break; case 17: /* BASEPRI */ env->v7m.basepri[env->v7m.secure] = val & 0xff; break; case 18: /* BASEPRI_MAX */ val &= 0xff; if (val != 0 && (val < env->v7m.basepri[env->v7m.secure] || env->v7m.basepri[env->v7m.secure] == 0)) { env->v7m.basepri[env->v7m.secure] = val; } break; case 19: /* FAULTMASK */ env->v7m.faultmask = val & 1; break; case 20: /* CONTROL */ /* Writing to the SPSEL bit only has an effect if we are in * thread mode; other bits can be updated by any privileged code. * switch_v7m_sp() deals with updating the SPSEL bit in * env->v7m.control, so we only need update the others. */ if (!arm_v7m_is_handler_mode(env)) { switch_v7m_sp(env, (val & R_V7M_CONTROL_SPSEL_MASK) != 0); } env->v7m.control &= ~R_V7M_CONTROL_NPRIV_MASK; env->v7m.control |= val & R_V7M_CONTROL_NPRIV_MASK; break; default: qemu_log_mask(LOG_GUEST_ERROR, \"Attempt to write unknown special\" \" register %d\\n\", reg); return; } }"} {"target": 0, "idx": 12470, "func": "static bool migrate_caps_check(bool *cap_list, MigrationCapabilityStatusList *params, Error **errp) { MigrationCapabilityStatusList *cap; bool old_postcopy_cap; old_postcopy_cap = cap_list[MIGRATION_CAPABILITY_POSTCOPY_RAM]; for (cap = params; cap; cap = cap->next) { cap_list[cap->value->capability] = cap->value->state; } #ifndef CONFIG_LIVE_BLOCK_MIGRATION if (cap_list[MIGRATION_CAPABILITY_BLOCK]) { error_setg(errp, \"QEMU compiled without old-style (blk/-b, inc/-i) \" \"block migration\"); error_append_hint(errp, \"Use drive_mirror+NBD instead.\\n\"); return false; } #endif if (cap_list[MIGRATION_CAPABILITY_POSTCOPY_RAM]) { if (cap_list[MIGRATION_CAPABILITY_COMPRESS]) { /* The decompression threads asynchronously write into RAM * rather than use the atomic copies needed to avoid * userfaulting. It should be possible to fix the decompression * threads for compatibility in future. */ error_setg(errp, \"Postcopy is not currently compatible \" \"with compression\"); return false; } /* This check is reasonably expensive, so only when it's being * set the first time, also it's only the destination that needs * special support. */ if (!old_postcopy_cap && runstate_check(RUN_STATE_INMIGRATE) && !postcopy_ram_supported_by_host()) { /* postcopy_ram_supported_by_host will have emitted a more * detailed message */ error_setg(errp, \"Postcopy is not supported\"); return false; } } return true; }"} {"target": 0, "idx": 12477, "func": "static int virtio_blk_handle_rw_error(VirtIOBlockReq *req, int error, bool is_read) { BlockErrorAction action = bdrv_get_error_action(req->dev->bs, is_read, error); VirtIOBlock *s = req->dev; if (action == BLOCK_ERROR_ACTION_STOP) { req->next = s->rq; s->rq = req; } else if (action == BLOCK_ERROR_ACTION_REPORT) { virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR); block_acct_done(bdrv_get_stats(s->bs), &req->acct); virtio_blk_free_request(req); } bdrv_error_action(s->bs, action, is_read, error); return action != BLOCK_ERROR_ACTION_IGNORE; }"} {"target": 1, "idx": 12493, "func": "static inline void FUNC(idctRowCondDC_extrashift)(int16_t *row, int extra_shift) #else static inline void FUNC(idctRowCondDC)(int16_t *row, int extra_shift) #endif { int a0, a1, a2, a3, b0, b1, b2, b3; #if HAVE_FAST_64BIT #define ROW0_MASK (0xffffLL << 48 * HAVE_BIGENDIAN) if (((AV_RN64A(row) & ~ROW0_MASK) | AV_RN64A(row+4)) == 0) { uint64_t temp; if (DC_SHIFT - extra_shift >= 0) { temp = (row[0] * (1 << (DC_SHIFT - extra_shift))) & 0xffff; } else { temp = ((row[0] + (1<<(extra_shift - DC_SHIFT-1))) >> (extra_shift - DC_SHIFT)) & 0xffff; } temp += temp * (1 << 16); temp += temp * ((uint64_t) 1 << 32); AV_WN64A(row, temp); AV_WN64A(row + 4, temp); return; } #else if (!(AV_RN32A(row+2) | AV_RN32A(row+4) | AV_RN32A(row+6) | row[1])) { uint32_t temp; if (DC_SHIFT - extra_shift >= 0) { temp = (row[0] * (1 << (DC_SHIFT - extra_shift))) & 0xffff; } else { temp = ((row[0] + (1<<(extra_shift - DC_SHIFT-1))) >> (extra_shift - DC_SHIFT)) & 0xffff; } temp += temp * (1 << 16); AV_WN32A(row, temp); AV_WN32A(row+2, temp); AV_WN32A(row+4, temp); AV_WN32A(row+6, temp); return; } #endif a0 = (W4 * row[0]) + (1 << (ROW_SHIFT + extra_shift - 1)); a1 = a0; a2 = a0; a3 = a0; a0 += W2 * row[2]; a1 += W6 * row[2]; a2 -= W6 * row[2]; a3 -= W2 * row[2]; b0 = MUL(W1, row[1]); MAC(b0, W3, row[3]); b1 = MUL(W3, row[1]); MAC(b1, -W7, row[3]); b2 = MUL(W5, row[1]); MAC(b2, -W1, row[3]); b3 = MUL(W7, row[1]); MAC(b3, -W5, row[3]); if (AV_RN64A(row + 4)) { a0 += W4*row[4] + W6*row[6]; a1 += - W4*row[4] - W2*row[6]; a2 += - W4*row[4] + W2*row[6]; a3 += W4*row[4] - W6*row[6]; MAC(b0, W5, row[5]); MAC(b0, W7, row[7]); MAC(b1, -W1, row[5]); MAC(b1, -W5, row[7]); MAC(b2, W7, row[5]); MAC(b2, W3, row[7]); MAC(b3, W3, row[5]); MAC(b3, -W1, row[7]); } row[0] = (a0 + b0) >> (ROW_SHIFT + extra_shift); row[7] = (a0 - b0) >> (ROW_SHIFT + extra_shift); row[1] = (a1 + b1) >> (ROW_SHIFT + extra_shift); row[6] = (a1 - b1) >> (ROW_SHIFT + extra_shift); row[2] = (a2 + b2) >> (ROW_SHIFT + extra_shift); row[5] = (a2 - b2) >> (ROW_SHIFT + extra_shift); row[3] = (a3 + b3) >> (ROW_SHIFT + extra_shift); row[4] = (a3 - b3) >> (ROW_SHIFT + extra_shift); }"} {"target": 1, "idx": 12499, "func": "struct omap_mmc_s *omap2_mmc_init(struct omap_target_agent_s *ta, BlockDriverState *bd, qemu_irq irq, qemu_irq dma[], omap_clk fclk, omap_clk iclk) { struct omap_mmc_s *s = (struct omap_mmc_s *) g_malloc0(sizeof(struct omap_mmc_s)); s->irq = irq; s->dma = dma; s->clk = fclk; s->lines = 4; s->rev = 2; omap_mmc_reset(s); memory_region_init_io(&s->iomem, NULL, &omap_mmc_ops, s, \"omap.mmc\", omap_l4_region_size(ta, 0)); omap_l4_attach(ta, 0, &s->iomem); /* Instantiate the storage */ s->card = sd_init(bd, false); if (s->card == NULL) { exit(1); } s->cdet = qemu_allocate_irqs(omap_mmc_cover_cb, s, 1)[0]; sd_set_cb(s->card, NULL, s->cdet); return s; }"} {"target": 0, "idx": 12544, "func": "static void calxeda_init(MachineState *machine, enum cxmachines machine_id) { ram_addr_t ram_size = machine->ram_size; const char *cpu_model = machine->cpu_model; const char *kernel_filename = machine->kernel_filename; const char *kernel_cmdline = machine->kernel_cmdline; const char *initrd_filename = machine->initrd_filename; DeviceState *dev = NULL; SysBusDevice *busdev; qemu_irq pic[128]; int n; qemu_irq cpu_irq[4]; qemu_irq cpu_fiq[4]; MemoryRegion *sysram; MemoryRegion *dram; MemoryRegion *sysmem; char *sysboot_filename; if (!cpu_model) { switch (machine_id) { case CALXEDA_HIGHBANK: cpu_model = \"cortex-a9\"; break; case CALXEDA_MIDWAY: cpu_model = \"cortex-a15\"; break; } } for (n = 0; n < smp_cpus; n++) { ObjectClass *oc = cpu_class_by_name(TYPE_ARM_CPU, cpu_model); Object *cpuobj; ARMCPU *cpu; Error *err = NULL; if (!oc) { error_report(\"Unable to find CPU definition\"); exit(1); } cpuobj = object_new(object_class_get_name(oc)); cpu = ARM_CPU(cpuobj); /* By default A9 and A15 CPUs have EL3 enabled. This board does not * currently support EL3 so the CPU EL3 property is disabled before * realization. */ if (object_property_find(cpuobj, \"has_el3\", NULL)) { object_property_set_bool(cpuobj, false, \"has_el3\", &err); if (err) { error_report_err(err); exit(1); } } if (object_property_find(cpuobj, \"reset-cbar\", NULL)) { object_property_set_int(cpuobj, MPCORE_PERIPHBASE, \"reset-cbar\", &error_abort); } object_property_set_bool(cpuobj, true, \"realized\", &err); if (err) { error_report_err(err); exit(1); } cpu_irq[n] = qdev_get_gpio_in(DEVICE(cpu), ARM_CPU_IRQ); cpu_fiq[n] = qdev_get_gpio_in(DEVICE(cpu), ARM_CPU_FIQ); } sysmem = get_system_memory(); dram = g_new(MemoryRegion, 1); memory_region_allocate_system_memory(dram, NULL, \"highbank.dram\", ram_size); /* SDRAM at address zero. */ memory_region_add_subregion(sysmem, 0, dram); sysram = g_new(MemoryRegion, 1); memory_region_init_ram(sysram, NULL, \"highbank.sysram\", 0x8000, &error_fatal); memory_region_add_subregion(sysmem, 0xfff88000, sysram); if (bios_name != NULL) { sysboot_filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); if (sysboot_filename != NULL) { if (load_image_targphys(sysboot_filename, 0xfff88000, 0x8000) < 0) { hw_error(\"Unable to load %s\\n\", bios_name); } g_free(sysboot_filename); } else { hw_error(\"Unable to find %s\\n\", bios_name); } } switch (machine_id) { case CALXEDA_HIGHBANK: dev = qdev_create(NULL, \"l2x0\"); qdev_init_nofail(dev); busdev = SYS_BUS_DEVICE(dev); sysbus_mmio_map(busdev, 0, 0xfff12000); dev = qdev_create(NULL, \"a9mpcore_priv\"); break; case CALXEDA_MIDWAY: dev = qdev_create(NULL, \"a15mpcore_priv\"); break; } qdev_prop_set_uint32(dev, \"num-cpu\", smp_cpus); qdev_prop_set_uint32(dev, \"num-irq\", NIRQ_GIC); qdev_init_nofail(dev); busdev = SYS_BUS_DEVICE(dev); sysbus_mmio_map(busdev, 0, MPCORE_PERIPHBASE); for (n = 0; n < smp_cpus; n++) { sysbus_connect_irq(busdev, n, cpu_irq[n]); sysbus_connect_irq(busdev, n + smp_cpus, cpu_fiq[n]); } for (n = 0; n < 128; n++) { pic[n] = qdev_get_gpio_in(dev, n); } dev = qdev_create(NULL, \"sp804\"); qdev_prop_set_uint32(dev, \"freq0\", 150000000); qdev_prop_set_uint32(dev, \"freq1\", 150000000); qdev_init_nofail(dev); busdev = SYS_BUS_DEVICE(dev); sysbus_mmio_map(busdev, 0, 0xfff34000); sysbus_connect_irq(busdev, 0, pic[18]); sysbus_create_simple(\"pl011\", 0xfff36000, pic[20]); dev = qdev_create(NULL, \"highbank-regs\"); qdev_init_nofail(dev); busdev = SYS_BUS_DEVICE(dev); sysbus_mmio_map(busdev, 0, 0xfff3c000); sysbus_create_simple(\"pl061\", 0xfff30000, pic[14]); sysbus_create_simple(\"pl061\", 0xfff31000, pic[15]); sysbus_create_simple(\"pl061\", 0xfff32000, pic[16]); sysbus_create_simple(\"pl061\", 0xfff33000, pic[17]); sysbus_create_simple(\"pl031\", 0xfff35000, pic[19]); sysbus_create_simple(\"pl022\", 0xfff39000, pic[23]); sysbus_create_simple(\"sysbus-ahci\", 0xffe08000, pic[83]); if (nd_table[0].used) { qemu_check_nic_model(&nd_table[0], \"xgmac\"); dev = qdev_create(NULL, \"xgmac\"); qdev_set_nic_properties(dev, &nd_table[0]); qdev_init_nofail(dev); sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, 0xfff50000); sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic[77]); sysbus_connect_irq(SYS_BUS_DEVICE(dev), 1, pic[78]); sysbus_connect_irq(SYS_BUS_DEVICE(dev), 2, pic[79]); qemu_check_nic_model(&nd_table[1], \"xgmac\"); dev = qdev_create(NULL, \"xgmac\"); qdev_set_nic_properties(dev, &nd_table[1]); qdev_init_nofail(dev); sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, 0xfff51000); sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic[80]); sysbus_connect_irq(SYS_BUS_DEVICE(dev), 1, pic[81]); sysbus_connect_irq(SYS_BUS_DEVICE(dev), 2, pic[82]); } highbank_binfo.ram_size = ram_size; highbank_binfo.kernel_filename = kernel_filename; highbank_binfo.kernel_cmdline = kernel_cmdline; highbank_binfo.initrd_filename = initrd_filename; /* highbank requires a dtb in order to boot, and the dtb will override * the board ID. The following value is ignored, so set it to -1 to be * clear that the value is meaningless. */ highbank_binfo.board_id = -1; highbank_binfo.nb_cpus = smp_cpus; highbank_binfo.loader_start = 0; highbank_binfo.write_secondary_boot = hb_write_secondary; highbank_binfo.secondary_cpu_reset_hook = hb_reset_secondary; arm_load_kernel(ARM_CPU(first_cpu), &highbank_binfo); }"} {"target": 0, "idx": 12545, "func": "static int get_phys_addr_v5(CPUState *env, uint32_t address, int access_type, int is_user, uint32_t *phys_ptr, int *prot) { int code; uint32_t table; uint32_t desc; int type; int ap; int domain; uint32_t phys_addr; /* Pagetable walk. */ /* Lookup l1 descriptor. */ table = get_level1_table_address(env, address); desc = ldl_phys(table); type = (desc & 3); domain = (env->cp15.c3 >> ((desc >> 4) & 0x1e)) & 3; if (type == 0) { /* Section translation fault. */ code = 5; goto do_fault; } if (domain == 0 || domain == 2) { if (type == 2) code = 9; /* Section domain fault. */ else code = 11; /* Page domain fault. */ goto do_fault; } if (type == 2) { /* 1Mb section. */ phys_addr = (desc & 0xfff00000) | (address & 0x000fffff); ap = (desc >> 10) & 3; code = 13; } else { /* Lookup l2 entry. */ if (type == 1) { /* Coarse pagetable. */ table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc); } else { /* Fine pagetable. */ table = (desc & 0xfffff000) | ((address >> 8) & 0xffc); } desc = ldl_phys(table); switch (desc & 3) { case 0: /* Page translation fault. */ code = 7; goto do_fault; case 1: /* 64k page. */ phys_addr = (desc & 0xffff0000) | (address & 0xffff); ap = (desc >> (4 + ((address >> 13) & 6))) & 3; break; case 2: /* 4k page. */ phys_addr = (desc & 0xfffff000) | (address & 0xfff); ap = (desc >> (4 + ((address >> 13) & 6))) & 3; break; case 3: /* 1k page. */ if (type == 1) { if (arm_feature(env, ARM_FEATURE_XSCALE)) { phys_addr = (desc & 0xfffff000) | (address & 0xfff); } else { /* Page translation fault. */ code = 7; goto do_fault; } } else { phys_addr = (desc & 0xfffffc00) | (address & 0x3ff); } ap = (desc >> 4) & 3; break; default: /* Never happens, but compiler isn't smart enough to tell. */ abort(); } code = 15; } *prot = check_ap(env, ap, domain, access_type, is_user); if (!*prot) { /* Access permission fault. */ goto do_fault; } *phys_ptr = phys_addr; return 0; do_fault: return code | (domain << 4); }"} {"target": 1, "idx": 12548, "func": "static void vmxnet3_update_mcast_filters(VMXNET3State *s) { uint16_t list_bytes = VMXNET3_READ_DRV_SHARED16(s->drv_shmem, devRead.rxFilterConf.mfTableLen); s->mcast_list_len = list_bytes / sizeof(s->mcast_list[0]); s->mcast_list = g_realloc(s->mcast_list, list_bytes); if (!s->mcast_list) { if (s->mcast_list_len == 0) { VMW_CFPRN(\"Current multicast list is empty\"); } else { VMW_ERPRN(\"Failed to allocate multicast list of %d elements\", s->mcast_list_len); } s->mcast_list_len = 0; } else { int i; hwaddr mcast_list_pa = VMXNET3_READ_DRV_SHARED64(s->drv_shmem, devRead.rxFilterConf.mfTablePA); pci_dma_read(PCI_DEVICE(s), mcast_list_pa, s->mcast_list, list_bytes); VMW_CFPRN(\"Current multicast list len is %d:\", s->mcast_list_len); for (i = 0; i < s->mcast_list_len; i++) { VMW_CFPRN(\"\\t\" MAC_FMT, MAC_ARG(s->mcast_list[i].a)); } } }"} {"target": 0, "idx": 12556, "func": "int ff_nvdec_decode_init(AVCodecContext *avctx, unsigned int dpb_size) { NVDECContext *ctx = avctx->internal->hwaccel_priv_data; NVDECFramePool *pool; AVHWFramesContext *frames_ctx; const AVPixFmtDescriptor *sw_desc; CUVIDDECODECREATEINFO params = { 0 }; int cuvid_codec_type, cuvid_chroma_format; int ret = 0; sw_desc = av_pix_fmt_desc_get(avctx->sw_pix_fmt); if (!sw_desc) return AVERROR_BUG; cuvid_codec_type = map_avcodec_id(avctx->codec_id); if (cuvid_codec_type < 0) { av_log(avctx, AV_LOG_ERROR, \"Unsupported codec ID\\n\"); return AVERROR_BUG; } cuvid_chroma_format = map_chroma_format(avctx->sw_pix_fmt); if (cuvid_chroma_format < 0) { av_log(avctx, AV_LOG_ERROR, \"Unsupported chroma format\\n\"); return AVERROR(ENOSYS); } if (avctx->thread_type & FF_THREAD_FRAME) dpb_size += avctx->thread_count; if (!avctx->hw_frames_ctx) { AVHWFramesContext *frames_ctx; if (!avctx->hw_device_ctx) { av_log(avctx, AV_LOG_ERROR, \"A hardware device or frames context \" \"is required for CUVID decoding.\\n\"); return AVERROR(EINVAL); } avctx->hw_frames_ctx = av_hwframe_ctx_alloc(avctx->hw_device_ctx); if (!avctx->hw_frames_ctx) return AVERROR(ENOMEM); frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data; frames_ctx->format = AV_PIX_FMT_CUDA; frames_ctx->width = avctx->coded_width; frames_ctx->height = avctx->coded_height; frames_ctx->sw_format = AV_PIX_FMT_NV12; frames_ctx->sw_format = sw_desc->comp[0].depth > 8 ? AV_PIX_FMT_P010 : AV_PIX_FMT_NV12; frames_ctx->initial_pool_size = dpb_size; ret = av_hwframe_ctx_init(avctx->hw_frames_ctx); if (ret < 0) { av_log(avctx, AV_LOG_ERROR, \"Error initializing internal frames context\\n\"); return ret; } } frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data; params.ulWidth = avctx->coded_width; params.ulHeight = avctx->coded_height; params.ulTargetWidth = avctx->coded_width; params.ulTargetHeight = avctx->coded_height; params.bitDepthMinus8 = sw_desc->comp[0].depth - 8; params.OutputFormat = params.bitDepthMinus8 ? cudaVideoSurfaceFormat_P016 : cudaVideoSurfaceFormat_NV12; params.CodecType = cuvid_codec_type; params.ChromaFormat = cuvid_chroma_format; params.ulNumDecodeSurfaces = dpb_size; params.ulNumOutputSurfaces = 1; ret = nvdec_decoder_create(&ctx->decoder_ref, frames_ctx->device_ref, ¶ms, avctx); if (ret < 0) return ret; pool = av_mallocz(sizeof(*pool)); if (!pool) { ret = AVERROR(ENOMEM); goto fail; } pool->dpb_size = dpb_size; ctx->decoder_pool = av_buffer_pool_init2(sizeof(int), pool, nvdec_decoder_frame_alloc, av_free); if (!ctx->decoder_pool) { ret = AVERROR(ENOMEM); goto fail; } return 0; fail: ff_nvdec_decode_uninit(avctx); return ret; }"} {"target": 1, "idx": 12558, "func": "void hmp_info_snapshots(Monitor *mon, const QDict *qdict) { BlockDriverState *bs, *bs1; QEMUSnapshotInfo *sn_tab, *sn, s, *sn_info = &s; int nb_sns, i, ret, available; int total; int *available_snapshots; bs = find_vmstate_bs(); if (!bs) { monitor_printf(mon, \"No available block device supports snapshots\\n\"); return; } nb_sns = bdrv_snapshot_list(bs, &sn_tab); if (nb_sns < 0) { monitor_printf(mon, \"bdrv_snapshot_list: error %d\\n\", nb_sns); return; } if (nb_sns == 0) { monitor_printf(mon, \"There is no snapshot available.\\n\"); return; } available_snapshots = g_malloc0(sizeof(int) * nb_sns); total = 0; for (i = 0; i < nb_sns; i++) { sn = &sn_tab[i]; available = 1; bs1 = NULL; while ((bs1 = bdrv_next(bs1))) { if (bdrv_can_snapshot(bs1) && bs1 != bs) { ret = bdrv_snapshot_find(bs1, sn_info, sn->id_str); if (ret < 0) { available = 0; break; } } } if (available) { available_snapshots[total] = i; total++; } } if (total > 0) { bdrv_snapshot_dump((fprintf_function)monitor_printf, mon, NULL); monitor_printf(mon, \"\\n\"); for (i = 0; i < total; i++) { sn = &sn_tab[available_snapshots[i]]; bdrv_snapshot_dump((fprintf_function)monitor_printf, mon, sn); monitor_printf(mon, \"\\n\"); } } else { monitor_printf(mon, \"There is no suitable snapshot available\\n\"); } g_free(sn_tab); g_free(available_snapshots); }"} {"target": 1, "idx": 12585, "func": "int decode_block_coeffs(VP56RangeCoder *c, int16_t block[16], uint8_t probs[16][3][NUM_DCT_TOKENS - 1], int i, int zero_nhood, int16_t qmul[2]) { uint8_t *token_prob = probs[i][zero_nhood]; if (!vp56_rac_get_prob_branchy(c, token_prob[0])) // DCT_EOB return 0; return decode_block_coeffs_internal(c, block, probs, i, token_prob, qmul); }"} {"target": 1, "idx": 12596, "func": "static int cirrus_bitblt_solidfill(CirrusVGAState *s, int blt_rop) { cirrus_fill_t rop_func; if (blit_is_unsafe(s)) { return 0; } rop_func = cirrus_fill[rop_to_index[blt_rop]][s->cirrus_blt_pixelwidth - 1]; rop_func(s, s->vga.vram_ptr + (s->cirrus_blt_dstaddr & s->cirrus_addr_mask), s->cirrus_blt_dstpitch, s->cirrus_blt_width, s->cirrus_blt_height); cirrus_invalidate_region(s, s->cirrus_blt_dstaddr, s->cirrus_blt_dstpitch, s->cirrus_blt_width, s->cirrus_blt_height); cirrus_bitblt_reset(s); return 1; }"} {"target": 1, "idx": 12607, "func": "static int load_ipmovie_packet(IPMVEContext *s, AVIOContext *pb, AVPacket *pkt) { int chunk_type; if (s->audio_chunk_offset) { /* adjust for PCM audio by skipping chunk header */ if (s->audio_type != CODEC_ID_INTERPLAY_DPCM) { s->audio_chunk_offset += 6; s->audio_chunk_size -= 6; avio_seek(pb, s->audio_chunk_offset, SEEK_SET); s->audio_chunk_offset = 0; if (s->audio_chunk_size != av_get_packet(pb, pkt, s->audio_chunk_size)) return CHUNK_EOF; pkt->stream_index = s->audio_stream_index; pkt->pts = s->audio_frame_count; /* audio frame maintenance */ if (s->audio_type != CODEC_ID_INTERPLAY_DPCM) s->audio_frame_count += (s->audio_chunk_size / s->audio_channels / (s->audio_bits / 8)); else s->audio_frame_count += (s->audio_chunk_size - 6) / s->audio_channels; av_dlog(NULL, \"sending audio frame with pts %\"PRId64\" (%d audio frames)\\n\", pkt->pts, s->audio_frame_count); chunk_type = CHUNK_VIDEO; } else if (s->decode_map_chunk_offset) { /* send both the decode map and the video data together */ if (av_new_packet(pkt, s->decode_map_chunk_size + s->video_chunk_size)) return CHUNK_NOMEM; if (s->has_palette) { uint8_t *pal; pal = av_packet_new_side_data(pkt, AV_PKT_DATA_PALETTE, AVPALETTE_SIZE); if (pal) { memcpy(pal, s->palette, AVPALETTE_SIZE); s->has_palette = 0; pkt->pos= s->decode_map_chunk_offset; avio_seek(pb, s->decode_map_chunk_offset, SEEK_SET); s->decode_map_chunk_offset = 0; if (avio_read(pb, pkt->data, s->decode_map_chunk_size) != s->decode_map_chunk_size) { av_free_packet(pkt); return CHUNK_EOF; avio_seek(pb, s->video_chunk_offset, SEEK_SET); s->video_chunk_offset = 0; if (avio_read(pb, pkt->data + s->decode_map_chunk_size, s->video_chunk_size) != s->video_chunk_size) { av_free_packet(pkt); return CHUNK_EOF; pkt->stream_index = s->video_stream_index; pkt->pts = s->video_pts; av_dlog(NULL, \"sending video frame with pts %\"PRId64\"\\n\", pkt->pts); s->video_pts += s->frame_pts_inc; chunk_type = CHUNK_VIDEO; } else { avio_seek(pb, s->next_chunk_offset, SEEK_SET); chunk_type = CHUNK_DONE; return chunk_type;"} {"target": 0, "idx": 12616, "func": "static int encode_hq_slice(AVCodecContext *avctx, void *arg) { SliceArgs *slice_dat = arg; VC2EncContext *s = slice_dat->ctx; PutBitContext *pb = &slice_dat->pb; const int slice_x = slice_dat->x; const int slice_y = slice_dat->y; const int quant_idx = slice_dat->quant_idx; const int slice_bytes_max = slice_dat->bytes; uint8_t quants[MAX_DWT_LEVELS][4]; int p, level, orientation; avpriv_align_put_bits(pb); skip_put_bytes(pb, s->prefix_bytes); put_bits(pb, 8, quant_idx); /* Slice quantization (slice_quantizers() in the specs) */ for (level = 0; level < s->wavelet_depth; level++) for (orientation = !!level; orientation < 4; orientation++) quants[level][orientation] = FFMAX(quant_idx - s->quant[level][orientation], 0); /* Luma + 2 Chroma planes */ for (p = 0; p < 3; p++) { int bytes_start, bytes_len, pad_s, pad_c; bytes_start = put_bits_count(pb) >> 3; put_bits(pb, 8, 0); for (level = 0; level < s->wavelet_depth; level++) { for (orientation = !!level; orientation < 4; orientation++) { encode_subband(s, pb, slice_x, slice_y, &s->plane[p].band[level][orientation], quants[level][orientation]); } } avpriv_align_put_bits(pb); bytes_len = (put_bits_count(pb) >> 3) - bytes_start - 1; if (p == 2) { int len_diff = slice_bytes_max - (put_bits_count(pb) >> 3); pad_s = FFALIGN((bytes_len + len_diff), s->size_scaler)/s->size_scaler; pad_c = (pad_s*s->size_scaler) - bytes_len; } else { pad_s = FFALIGN(bytes_len, s->size_scaler)/s->size_scaler; pad_c = (pad_s*s->size_scaler) - bytes_len; } pb->buf[bytes_start] = pad_s; flush_put_bits(pb); skip_put_bytes(pb, pad_c); } return 0; }"} {"target": 0, "idx": 12624, "func": "static void do_info_commands(Monitor *mon, QObject **ret_data) { QList *cmd_list; const mon_cmd_t *cmd; cmd_list = qlist_new(); for (cmd = qmp_cmds; cmd->name != NULL; cmd++) { if (monitor_handler_ported(cmd) && !monitor_cmd_user_only(cmd) && !compare_cmd(cmd->name, \"info\")) { qlist_append_obj(cmd_list, get_cmd_dict(cmd->name)); } } for (cmd = qmp_query_cmds; cmd->name != NULL; cmd++) { if (monitor_handler_ported(cmd) && !monitor_cmd_user_only(cmd)) { char buf[128]; snprintf(buf, sizeof(buf), \"query-%s\", cmd->name); qlist_append_obj(cmd_list, get_cmd_dict(buf)); } } *ret_data = QOBJECT(cmd_list); }"} {"target": 0, "idx": 12635, "func": "static void spapr_msi_write(void *opaque, hwaddr addr, uint64_t data, unsigned size) { sPAPRPHBState *phb = opaque; int ndev = addr >> 16; int vec = ((addr & 0xFFFF) >> 2) | data; uint32_t irq = phb->msi_table[ndev].irq + vec; trace_spapr_pci_msi_write(addr, data, irq); qemu_irq_pulse(xics_get_qirq(spapr->icp, irq)); }"} {"target": 0, "idx": 12642, "func": "static void pxa2xx_pm_write(void *opaque, target_phys_addr_t addr, uint32_t value) { struct pxa2xx_state_s *s = (struct pxa2xx_state_s *) opaque; if (addr > s->pm_base + PCMD31) { /* Special case: PWRI2C registers appear in the same range. */ pxa2xx_i2c_write(s->i2c[1], addr, value); return; } addr -= s->pm_base; switch (addr) { case PMCR: s->pm_regs[addr >> 2] &= 0x15 & ~(value & 0x2a); s->pm_regs[addr >> 2] |= value & 0x15; break; case PSSR: /* Read-clean registers */ case RCSR: case PKSR: s->pm_regs[addr >> 2] &= ~value; break; default: /* Read-write registers */ if (addr >= PMCR && addr <= PCMD31 && !(addr & 3)) { s->pm_regs[addr >> 2] = value; break; } printf(\"%s: Bad register \" REG_FMT \"\\n\", __FUNCTION__, addr); break; } }"} {"target": 0, "idx": 12645, "func": "CPUX86State *cpu_x86_init(void) { CPUX86State *env; int i; static int inited; cpu_x86_tblocks_init(); env = malloc(sizeof(CPUX86State)); if (!env) return NULL; memset(env, 0, sizeof(CPUX86State)); /* basic FPU init */ for(i = 0;i < 8; i++) env->fptags[i] = 1; env->fpuc = 0x37f; /* flags setup */ env->eflags = 0; /* init various static tables */ if (!inited) { inited = 1; optimize_flags_init(); } return env; }"} {"target": 0, "idx": 12663, "func": "void readline_show_prompt(ReadLineState *rs) { monitor_printf(rs->mon, \"%s\", rs->prompt); monitor_flush(rs->mon); rs->last_cmd_buf_index = 0; rs->last_cmd_buf_size = 0; rs->esc_state = IS_NORM; }"} {"target": 0, "idx": 12667, "func": "static int parallel_parse(const char *devname) { static int index = 0; char label[32]; if (strcmp(devname, \"none\") == 0) return 0; if (index == MAX_PARALLEL_PORTS) { fprintf(stderr, \"qemu: too many parallel ports\\n\"); exit(1); } snprintf(label, sizeof(label), \"parallel%d\", index); parallel_hds[index] = qemu_chr_new(label, devname, NULL); if (!parallel_hds[index]) { fprintf(stderr, \"qemu: could not connect parallel device\" \" to character backend '%s'\\n\", devname); return -1; } index++; return 0; }"} {"target": 0, "idx": 12668, "func": "START_TEST(qfloat_destroy_test) { QFloat *qf = qfloat_from_double(0.0); QDECREF(qf); }"} {"target": 0, "idx": 12672, "func": "static int local_lstat(FsContext *fs_ctx, V9fsPath *fs_path, struct stat *stbuf) { int err; char buffer[PATH_MAX]; char *path = fs_path->data; err = lstat(rpath(fs_ctx, path, buffer), stbuf); if (err) { return err; } if (fs_ctx->fs_sm == SM_MAPPED) { /* Actual credentials are part of extended attrs */ uid_t tmp_uid; gid_t tmp_gid; mode_t tmp_mode; dev_t tmp_dev; if (getxattr(rpath(fs_ctx, path, buffer), \"user.virtfs.uid\", &tmp_uid, sizeof(uid_t)) > 0) { stbuf->st_uid = tmp_uid; } if (getxattr(rpath(fs_ctx, path, buffer), \"user.virtfs.gid\", &tmp_gid, sizeof(gid_t)) > 0) { stbuf->st_gid = tmp_gid; } if (getxattr(rpath(fs_ctx, path, buffer), \"user.virtfs.mode\", &tmp_mode, sizeof(mode_t)) > 0) { stbuf->st_mode = tmp_mode; } if (getxattr(rpath(fs_ctx, path, buffer), \"user.virtfs.rdev\", &tmp_dev, sizeof(dev_t)) > 0) { stbuf->st_rdev = tmp_dev; } } return err; }"} {"target": 0, "idx": 12686, "func": "void QEMU_NORETURN runtime_exception(CPUS390XState *env, int excp, uintptr_t retaddr) { CPUState *cs = CPU(s390_env_get_cpu(env)); int t; cs->exception_index = EXCP_PGM; env->int_pgm_code = excp; /* Use the (ultimate) callers address to find the insn that trapped. */ cpu_restore_state(cs, retaddr); /* Advance past the insn. */ t = cpu_ldub_code(env, env->psw.addr); env->int_pgm_ilen = t = get_ilen(t); env->psw.addr += t; cpu_loop_exit(cs); }"} {"target": 0, "idx": 12701, "func": "static int usb_host_init(void) { const struct libusb_pollfd **poll; int i, rc; if (ctx) { return 0; } rc = libusb_init(&ctx); if (rc != 0) { return -1; } libusb_set_debug(ctx, loglevel); libusb_set_pollfd_notifiers(ctx, usb_host_add_fd, usb_host_del_fd, ctx); poll = libusb_get_pollfds(ctx); if (poll) { for (i = 0; poll[i] != NULL; i++) { usb_host_add_fd(poll[i]->fd, poll[i]->events, ctx); } } free(poll); return 0; }"} {"target": 0, "idx": 12704, "func": "SnapshotInfo *qmp_blockdev_snapshot_delete_internal_sync(const char *device, bool has_id, const char *id, bool has_name, const char *name, Error **errp) { BlockDriverState *bs; BlockBackend *blk; AioContext *aio_context; QEMUSnapshotInfo sn; Error *local_err = NULL; SnapshotInfo *info = NULL; int ret; blk = blk_by_name(device); if (!blk) { error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND, \"Device '%s' not found\", device); return NULL; } aio_context = blk_get_aio_context(blk); aio_context_acquire(aio_context); if (!has_id) { id = NULL; } if (!has_name) { name = NULL; } if (!id && !name) { error_setg(errp, \"Name or id must be provided\"); goto out_aio_context; } if (!blk_is_available(blk)) { error_setg(errp, \"Device '%s' has no medium\", device); goto out_aio_context; } bs = blk_bs(blk); if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_INTERNAL_SNAPSHOT_DELETE, errp)) { goto out_aio_context; } ret = bdrv_snapshot_find_by_id_and_name(bs, id, name, &sn, &local_err); if (local_err) { error_propagate(errp, local_err); goto out_aio_context; } if (!ret) { error_setg(errp, \"Snapshot with id '%s' and name '%s' does not exist on \" \"device '%s'\", STR_OR_NULL(id), STR_OR_NULL(name), device); goto out_aio_context; } bdrv_snapshot_delete(bs, id, name, &local_err); if (local_err) { error_propagate(errp, local_err); goto out_aio_context; } aio_context_release(aio_context); info = g_new0(SnapshotInfo, 1); info->id = g_strdup(sn.id_str); info->name = g_strdup(sn.name); info->date_nsec = sn.date_nsec; info->date_sec = sn.date_sec; info->vm_state_size = sn.vm_state_size; info->vm_clock_nsec = sn.vm_clock_nsec % 1000000000; info->vm_clock_sec = sn.vm_clock_nsec / 1000000000; return info; out_aio_context: aio_context_release(aio_context); return NULL; }"} {"target": 0, "idx": 12710, "func": "static void virtio_scsi_complete_req(VirtIOSCSIReq *req) { VirtIOSCSI *s = req->dev; VirtQueue *vq = req->vq; VirtIODevice *vdev = VIRTIO_DEVICE(s); virtqueue_push(vq, &req->elem, req->qsgl.size + req->elem.in_sg[0].iov_len); if (req->sreq) { req->sreq->hba_private = NULL; scsi_req_unref(req->sreq); } virtio_scsi_free_req(req); virtio_notify(vdev, vq); }"} {"target": 0, "idx": 12724, "func": "bool timer_pending(QEMUTimer *ts) { QEMUTimer *t; for (t = ts->timer_list->active_timers; t != NULL; t = t->next) { if (t == ts) { return true; } } return false; }"} {"target": 0, "idx": 12730, "func": "static int av_cold libopus_encode_init(AVCodecContext *avctx) { LibopusEncContext *opus = avctx->priv_data; const uint8_t *channel_mapping; OpusMSEncoder *enc; int ret = OPUS_OK; int coupled_stream_count, header_size, frame_size; coupled_stream_count = opus_coupled_streams[avctx->channels - 1]; opus->stream_count = avctx->channels - coupled_stream_count; channel_mapping = libav_libopus_channel_map[avctx->channels - 1]; /* FIXME: Opus can handle up to 255 channels. However, the mapping for * anything greater than 8 is undefined. */ if (avctx->channels > 8) av_log(avctx, AV_LOG_WARNING, \"Channel layout undefined for %d channels.\\n\", avctx->channels); if (!avctx->bit_rate) { /* Sane default copied from opusenc */ avctx->bit_rate = 64000 * opus->stream_count + 32000 * coupled_stream_count; av_log(avctx, AV_LOG_WARNING, \"No bit rate set. Defaulting to %d bps.\\n\", avctx->bit_rate); } if (avctx->bit_rate < 500 || avctx->bit_rate > 256000 * avctx->channels) { av_log(avctx, AV_LOG_ERROR, \"The bit rate %d bps is unsupported. \" \"Please choose a value between 500 and %d.\\n\", avctx->bit_rate, 256000 * avctx->channels); return AVERROR(EINVAL); } frame_size = opus->opts.frame_duration * 48000 / 1000; switch (frame_size) { case 120: case 240: if (opus->opts.application != OPUS_APPLICATION_RESTRICTED_LOWDELAY) av_log(avctx, AV_LOG_WARNING, \"LPC mode cannot be used with a frame duration of less \" \"than 10ms. Enabling restricted low-delay mode.\\n\" \"Use a longer frame duration if this is not what you want.\\n\"); /* Frame sizes less than 10 ms can only use MDCT mode, so switching to * RESTRICTED_LOWDELAY avoids an unnecessary extra 2.5ms lookahead. */ opus->opts.application = OPUS_APPLICATION_RESTRICTED_LOWDELAY; case 480: case 960: case 1920: case 2880: opus->opts.packet_size = avctx->frame_size = frame_size * avctx->sample_rate / 48000; break; default: av_log(avctx, AV_LOG_ERROR, \"Invalid frame duration: %g.\\n\" \"Frame duration must be exactly one of: 2.5, 5, 10, 20, 40 or 60.\\n\", opus->opts.frame_duration); return AVERROR(EINVAL); } if (avctx->compression_level < 0 || avctx->compression_level > 10) { av_log(avctx, AV_LOG_WARNING, \"Compression level must be in the range 0 to 10. \" \"Defaulting to 10.\\n\"); opus->opts.complexity = 10; } else { opus->opts.complexity = avctx->compression_level; } if (avctx->cutoff) { switch (avctx->cutoff) { case 4000: opus->opts.max_bandwidth = OPUS_BANDWIDTH_NARROWBAND; break; case 6000: opus->opts.max_bandwidth = OPUS_BANDWIDTH_MEDIUMBAND; break; case 8000: opus->opts.max_bandwidth = OPUS_BANDWIDTH_WIDEBAND; break; case 12000: opus->opts.max_bandwidth = OPUS_BANDWIDTH_SUPERWIDEBAND; break; case 20000: opus->opts.max_bandwidth = OPUS_BANDWIDTH_FULLBAND; break; default: av_log(avctx, AV_LOG_WARNING, \"Invalid frequency cutoff: %d. Using default maximum bandwidth.\\n\" \"Cutoff frequency must be exactly one of: 4000, 6000, 8000, 12000 or 20000.\\n\", avctx->cutoff); avctx->cutoff = 0; } } enc = opus_multistream_encoder_create(avctx->sample_rate, avctx->channels, opus->stream_count, coupled_stream_count, channel_mapping, opus->opts.application, &ret); if (ret != OPUS_OK) { av_log(avctx, AV_LOG_ERROR, \"Failed to create encoder: %s\\n\", opus_strerror(ret)); return ff_opus_error_to_averror(ret); } ret = libopus_configure_encoder(avctx, enc, &opus->opts); if (ret != OPUS_OK) { ret = ff_opus_error_to_averror(ret); goto fail; } header_size = 19 + (avctx->channels > 2 ? 2 + avctx->channels : 0); avctx->extradata = av_malloc(header_size + FF_INPUT_BUFFER_PADDING_SIZE); if (!avctx->extradata) { av_log(avctx, AV_LOG_ERROR, \"Failed to allocate extradata.\\n\"); ret = AVERROR(ENOMEM); goto fail; } avctx->extradata_size = header_size; opus->samples = av_mallocz(frame_size * avctx->channels * av_get_bytes_per_sample(avctx->sample_fmt)); if (!opus->samples) { av_log(avctx, AV_LOG_ERROR, \"Failed to allocate samples buffer.\\n\"); ret = AVERROR(ENOMEM); goto fail; } ret = opus_multistream_encoder_ctl(enc, OPUS_GET_LOOKAHEAD(&avctx->delay)); if (ret != OPUS_OK) av_log(avctx, AV_LOG_WARNING, \"Unable to get number of lookahead samples: %s\\n\", opus_strerror(ret)); libopus_write_header(avctx, opus->stream_count, coupled_stream_count, opus_vorbis_channel_map[avctx->channels - 1]); ff_af_queue_init(avctx, &opus->afq); opus->enc = enc; return 0; fail: opus_multistream_encoder_destroy(enc); av_freep(&avctx->extradata); return ret; }"} {"target": 0, "idx": 12741, "func": "static void setup_rt_frame(int sig, struct emulated_sigaction *ka, target_siginfo_t *info, target_sigset_t *set, CPUX86State *env) { struct rt_sigframe *frame; int err = 0; frame = get_sigframe(ka, env, sizeof(*frame)); #if 0 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) goto give_sigsegv; #endif err |= __put_user((/*current->exec_domain && current->exec_domain->signal_invmap && sig < 32 ? current->exec_domain->signal_invmap[sig] : */sig), &frame->sig); err |= __put_user((target_ulong)&frame->info, &frame->pinfo); err |= __put_user((target_ulong)&frame->uc, &frame->puc); err |= copy_siginfo_to_user(&frame->info, info); if (err) goto give_sigsegv; /* Create the ucontext. */ err |= __put_user(0, &frame->uc.uc_flags); err |= __put_user(0, &frame->uc.uc_link); err |= __put_user(/*current->sas_ss_sp*/ 0, &frame->uc.uc_stack.ss_sp); err |= __put_user(/* sas_ss_flags(regs->esp) */ 0, &frame->uc.uc_stack.ss_flags); err |= __put_user(/* current->sas_ss_size */ 0, &frame->uc.uc_stack.ss_size); err |= setup_sigcontext(&frame->uc.uc_mcontext, &frame->fpstate, env, set->sig[0]); err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); if (err) goto give_sigsegv; /* Set up to return from userspace. If provided, use a stub already in userspace. */ if (ka->sa.sa_flags & TARGET_SA_RESTORER) { err |= __put_user(ka->sa.sa_restorer, &frame->pretcode); } else { err |= __put_user(frame->retcode, &frame->pretcode); /* This is movl $,%eax ; int $0x80 */ err |= __put_user(0xb8, (char *)(frame->retcode+0)); err |= __put_user(TARGET_NR_rt_sigreturn, (int *)(frame->retcode+1)); err |= __put_user(0x80cd, (short *)(frame->retcode+5)); } if (err) goto give_sigsegv; /* Set up registers for signal handler */ env->regs[R_ESP] = (unsigned long) frame; env->eip = (unsigned long) ka->sa._sa_handler; cpu_x86_load_seg(env, R_DS, __USER_DS); cpu_x86_load_seg(env, R_ES, __USER_DS); cpu_x86_load_seg(env, R_SS, __USER_DS); cpu_x86_load_seg(env, R_CS, __USER_CS); env->eflags &= ~TF_MASK; return; give_sigsegv: if (sig == TARGET_SIGSEGV) ka->sa._sa_handler = TARGET_SIG_DFL; force_sig(TARGET_SIGSEGV /* , current */); }"} {"target": 0, "idx": 12777, "func": "static void tcp_chr_telnet_init(QIOChannel *ioc) { char buf[3]; /* Send the telnet negotion to put telnet in binary, no echo, single char mode */ IACSET(buf, 0xff, 0xfb, 0x01); /* IAC WILL ECHO */ qio_channel_write(ioc, buf, 3, NULL); IACSET(buf, 0xff, 0xfb, 0x03); /* IAC WILL Suppress go ahead */ qio_channel_write(ioc, buf, 3, NULL); IACSET(buf, 0xff, 0xfb, 0x00); /* IAC WILL Binary */ qio_channel_write(ioc, buf, 3, NULL); IACSET(buf, 0xff, 0xfd, 0x00); /* IAC DO Binary */ qio_channel_write(ioc, buf, 3, NULL); }"} {"target": 1, "idx": 12780, "func": "static void blk_delete(BlockBackend *blk) { assert(!blk->refcnt); assert(!blk->name); assert(!blk->dev); if (blk->root) { blk_remove_bs(blk); assert(QLIST_EMPTY(&blk->remove_bs_notifiers.notifiers)); assert(QLIST_EMPTY(&blk->insert_bs_notifiers.notifiers)); QTAILQ_REMOVE(&block_backends, blk, link); drive_info_del(blk->legacy_dinfo); block_acct_cleanup(&blk->stats); g_free(blk);"} {"target": 1, "idx": 12785, "func": "static int gdbserver_open(int port) { struct sockaddr_in sockaddr; int fd, val, ret; fd = socket(PF_INET, SOCK_STREAM, 0); if (fd < 0) { perror(\"socket\"); return -1; } #ifndef _WIN32 fcntl(fd, F_SETFD, FD_CLOEXEC); #endif /* allow fast reuse */ val = 1; setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, (char *)&val, sizeof(val)); sockaddr.sin_family = AF_INET; sockaddr.sin_port = htons(port); sockaddr.sin_addr.s_addr = 0; ret = bind(fd, (struct sockaddr *)&sockaddr, sizeof(sockaddr)); if (ret < 0) { perror(\"bind\"); return -1; } ret = listen(fd, 0); if (ret < 0) { perror(\"listen\"); return -1; } return fd; }"} {"target": 1, "idx": 12786, "func": "static void spapr_nvram_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VIOsPAPRDeviceClass *k = VIO_SPAPR_DEVICE_CLASS(klass); k->realize = spapr_nvram_realize; k->devnode = spapr_nvram_devnode; k->dt_name = \"nvram\"; k->dt_type = \"nvram\"; k->dt_compatible = \"qemu,spapr-nvram\"; set_bit(DEVICE_CATEGORY_MISC, dc->categories); dc->props = spapr_nvram_properties; dc->vmsd = &vmstate_spapr_nvram; }"} {"target": 1, "idx": 12787, "func": "static void cpu_handle_ioreq(void *opaque) { XenIOState *state = opaque; ioreq_t *req = cpu_get_ioreq(state); handle_buffered_iopage(state); if (req) { ioreq_t copy = *req; xen_rmb(); handle_ioreq(state, ©); req->data = copy.data; if (req->state != STATE_IOREQ_INPROCESS) { fprintf(stderr, \"Badness in I/O request ... not in service?!: \" \"%x, ptr: %x, port: %\"PRIx64\", \" \"data: %\"PRIx64\", count: %u, size: %u, type: %u\\n\", req->state, req->data_is_ptr, req->addr, req->data, req->count, req->size, req->type); destroy_hvm_domain(false); return; } xen_wmb(); /* Update ioreq contents /then/ update state. */ /* * We do this before we send the response so that the tools * have the opportunity to pick up on the reset before the * guest resumes and does a hlt with interrupts disabled which * causes Xen to powerdown the domain. */ if (runstate_is_running()) { if (qemu_shutdown_requested_get()) { destroy_hvm_domain(false); } if (qemu_reset_requested_get()) { qemu_system_reset(VMRESET_REPORT); destroy_hvm_domain(true); } } req->state = STATE_IORESP_READY; xenevtchn_notify(state->xce_handle, state->ioreq_local_port[state->send_vcpu]); } }"} {"target": 1, "idx": 12789, "func": "static bool scsi_target_emulate_inquiry(SCSITargetReq *r) { assert(r->req.dev->lun != r->req.lun); if (r->req.cmd.buf[1] & 0x2) { /* Command support data - optional, not implemented */ return false; } if (r->req.cmd.buf[1] & 0x1) { /* Vital product data */ uint8_t page_code = r->req.cmd.buf[2]; r->buf[r->len++] = page_code ; /* this page */ r->buf[r->len++] = 0x00; switch (page_code) { case 0x00: /* Supported page codes, mandatory */ { int pages; pages = r->len++; r->buf[r->len++] = 0x00; /* list of supported pages (this page) */ r->buf[pages] = r->len - pages - 1; /* number of pages */ break; } default: return false; } /* done with EVPD */ assert(r->len < sizeof(r->buf)); r->len = MIN(r->req.cmd.xfer, r->len); return true; } /* Standard INQUIRY data */ if (r->req.cmd.buf[2] != 0) { return false; } /* PAGE CODE == 0 */ r->len = MIN(r->req.cmd.xfer, 36); memset(r->buf, 0, r->len); if (r->req.lun != 0) { r->buf[0] = TYPE_NO_LUN; } else { r->buf[0] = TYPE_NOT_PRESENT | TYPE_INACTIVE; r->buf[2] = 5; /* Version */ r->buf[3] = 2 | 0x10; /* HiSup, response data format */ r->buf[4] = r->len - 5; /* Additional Length = (Len - 1) - 4 */ r->buf[7] = 0x10 | (r->req.bus->info->tcq ? 0x02 : 0); /* Sync, TCQ. */ memcpy(&r->buf[8], \"QEMU \", 8); memcpy(&r->buf[16], \"QEMU TARGET \", 16); pstrcpy((char *) &r->buf[32], 4, qemu_get_version()); } return true; }"} {"target": 1, "idx": 12799, "func": "int qcow2_expand_zero_clusters(BlockDriverState *bs) { BDRVQcowState *s = bs->opaque; uint64_t *l1_table = NULL; uint64_t nb_clusters; uint8_t *expanded_clusters; int ret; int i, j; nb_clusters = size_to_clusters(s, bs->file->total_sectors * BDRV_SECTOR_SIZE); expanded_clusters = g_malloc0((nb_clusters + 7) / 8); ret = expand_zero_clusters_in_l1(bs, s->l1_table, s->l1_size, &expanded_clusters, &nb_clusters); if (ret < 0) { goto fail; } /* Inactive L1 tables may point to active L2 tables - therefore it is * necessary to flush the L2 table cache before trying to access the L2 * tables pointed to by inactive L1 entries (else we might try to expand * zero clusters that have already been expanded); furthermore, it is also * necessary to empty the L2 table cache, since it may contain tables which * are now going to be modified directly on disk, bypassing the cache. * qcow2_cache_empty() does both for us. */ ret = qcow2_cache_empty(bs, s->l2_table_cache); if (ret < 0) { goto fail; } for (i = 0; i < s->nb_snapshots; i++) { int l1_sectors = (s->snapshots[i].l1_size * sizeof(uint64_t) + BDRV_SECTOR_SIZE - 1) / BDRV_SECTOR_SIZE; l1_table = g_realloc(l1_table, l1_sectors * BDRV_SECTOR_SIZE); ret = bdrv_read(bs->file, s->snapshots[i].l1_table_offset / BDRV_SECTOR_SIZE, (void *)l1_table, l1_sectors); if (ret < 0) { goto fail; } for (j = 0; j < s->snapshots[i].l1_size; j++) { be64_to_cpus(&l1_table[j]); } ret = expand_zero_clusters_in_l1(bs, l1_table, s->snapshots[i].l1_size, &expanded_clusters, &nb_clusters); if (ret < 0) { goto fail; } } ret = 0; fail: g_free(expanded_clusters); g_free(l1_table); return ret; }"} {"target": 1, "idx": 12805, "func": "static int local_open2(FsContext *fs_ctx, const char *path, int flags, FsCred *credp) { int fd = -1; int err = -1; int serrno = 0; /* Determine the security model */ if (fs_ctx->fs_sm == SM_MAPPED) { fd = open(rpath(fs_ctx, path), flags, SM_LOCAL_MODE_BITS); if (fd == -1) { return fd; } credp->fc_mode = credp->fc_mode|S_IFREG; /* Set cleint credentials in xattr */ err = local_set_xattr(rpath(fs_ctx, path), credp); if (err == -1) { serrno = errno; goto err_end; } } else if (fs_ctx->fs_sm == SM_PASSTHROUGH) { fd = open(rpath(fs_ctx, path), flags, credp->fc_mode); if (fd == -1) { return fd; } err = local_post_create_passthrough(fs_ctx, path, credp); if (err == -1) { serrno = errno; goto err_end; } } return fd; err_end: close(fd); remove(rpath(fs_ctx, path)); errno = serrno; return err; }"} {"target": 1, "idx": 12809, "func": "static inline void yuv2packedXinC(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize, int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize, uint8_t *dest, int dstW, int y) { int i; switch(c->dstFormat) { case PIX_FMT_BGR32: case PIX_FMT_RGB32: YSCALE_YUV_2_RGBX_C(uint32_t) ((uint32_t*)dest)[i2+0]= r[Y1] + g[Y1] + b[Y1]; ((uint32_t*)dest)[i2+1]= r[Y2] + g[Y2] + b[Y2]; } break; case PIX_FMT_RGB24: YSCALE_YUV_2_RGBX_C(uint8_t) ((uint8_t*)dest)[0]= r[Y1]; ((uint8_t*)dest)[1]= g[Y1]; ((uint8_t*)dest)[2]= b[Y1]; ((uint8_t*)dest)[3]= r[Y2]; ((uint8_t*)dest)[4]= g[Y2]; ((uint8_t*)dest)[5]= b[Y2]; dest+=6; }"} {"target": 1, "idx": 12810, "func": "static int write_packet(AVFormatContext *s, AVPacket *pkt) { int ret, did_split; if (s->output_ts_offset) { AVStream *st = s->streams[pkt->stream_index]; int64_t offset = av_rescale_q(s->output_ts_offset, AV_TIME_BASE_Q, st->time_base); if (pkt->dts != AV_NOPTS_VALUE) pkt->dts += offset; if (pkt->pts != AV_NOPTS_VALUE) pkt->pts += offset; } if (s->avoid_negative_ts > 0) { AVStream *st = s->streams[pkt->stream_index]; int64_t offset = st->mux_ts_offset; int64_t ts = s->internal->avoid_negative_ts_use_pts ? pkt->pts : pkt->dts; if (s->internal->offset == AV_NOPTS_VALUE && ts != AV_NOPTS_VALUE && (ts < 0 || s->avoid_negative_ts == AVFMT_AVOID_NEG_TS_MAKE_ZERO)) { s->internal->offset = -ts; s->internal->offset_timebase = st->time_base; } if (s->internal->offset != AV_NOPTS_VALUE && !offset) { offset = st->mux_ts_offset = av_rescale_q_rnd(s->internal->offset, s->internal->offset_timebase, st->time_base, AV_ROUND_UP); } if (pkt->dts != AV_NOPTS_VALUE) pkt->dts += offset; if (pkt->pts != AV_NOPTS_VALUE) pkt->pts += offset; if (s->internal->avoid_negative_ts_use_pts) { if (pkt->pts != AV_NOPTS_VALUE && pkt->pts < 0) { av_log(s, AV_LOG_WARNING, \"failed to avoid negative \" \"pts %s in stream %d.\\n\" \"Try -avoid_negative_ts 1 as a possible workaround.\\n\", av_ts2str(pkt->dts), pkt->stream_index ); } } else { av_assert2(pkt->dts == AV_NOPTS_VALUE || pkt->dts >= 0 || s->max_interleave_delta > 0); if (pkt->dts != AV_NOPTS_VALUE && pkt->dts < 0) { av_log(s, AV_LOG_WARNING, \"Packets poorly interleaved, failed to avoid negative \" \"timestamp %s in stream %d.\\n\" \"Try -max_interleave_delta 0 as a possible workaround.\\n\", av_ts2str(pkt->dts), pkt->stream_index ); } } } did_split = av_packet_split_side_data(pkt); if (!s->internal->header_written) { ret = s->internal->write_header_ret ? s->internal->write_header_ret : write_header_internal(s); if (ret < 0) goto fail; } if ((pkt->flags & AV_PKT_FLAG_UNCODED_FRAME)) { AVFrame *frame = (AVFrame *)pkt->data; av_assert0(pkt->size == UNCODED_FRAME_PACKET_SIZE); ret = s->oformat->write_uncoded_frame(s, pkt->stream_index, &frame, 0); av_frame_free(&frame); } else { ret = s->oformat->write_packet(s, pkt); } if (s->pb && ret >= 0) { if (s->flush_packets && s->flags & AVFMT_FLAG_FLUSH_PACKETS) avio_flush(s->pb); if (s->pb->error < 0) ret = s->pb->error; } fail: if (did_split) av_packet_merge_side_data(pkt); if (ret < 0) { pkt->pts = pts_backup; pkt->dts = dts_backup; } return ret; }"} {"target": 1, "idx": 12814, "func": "static void read_sgi_header(ByteIOContext *f, SGIInfo *info) { info->magic = (unsigned short) get_be16(f); info->rle = get_byte(f); info->bytes_per_channel = get_byte(f); info->dimension = (unsigned short)get_be16(f); info->xsize = (unsigned short) get_be16(f); info->ysize = (unsigned short) get_be16(f); info->zsize = (unsigned short) get_be16(f); #ifdef DEBUG printf(\"sgi header fields:\\n\"); printf(\" magic: %d\\n\", info->magic); printf(\" rle: %d\\n\", info->rle); printf(\" bpc: %d\\n\", info->bytes_per_channel); printf(\" dim: %d\\n\", info->dimension); printf(\" xsize: %d\\n\", info->xsize); printf(\" ysize: %d\\n\", info->ysize); printf(\" zsize: %d\\n\", info->zsize); #endif return; }"} {"target": 1, "idx": 12828, "func": "static void pc_init1(ram_addr_t ram_size, int vga_ram_size, const char *boot_device, DisplayState *ds, const char *kernel_filename, const char *kernel_cmdline, const char *initrd_filename, int pci_enabled, const char *cpu_model) { char buf[1024]; int ret, linux_boot, i; ram_addr_t ram_addr, vga_ram_addr, bios_offset, vga_bios_offset; ram_addr_t below_4g_mem_size, above_4g_mem_size = 0; int bios_size, isa_bios_size, vga_bios_size; PCIBus *pci_bus; int piix3_devfn = -1; CPUState *env; qemu_irq *cpu_irq; qemu_irq *i8259; int index; BlockDriverState *hd[MAX_IDE_BUS * MAX_IDE_DEVS]; BlockDriverState *fd[MAX_FD]; if (ram_size >= 0xe0000000 ) { above_4g_mem_size = ram_size - 0xe0000000; below_4g_mem_size = 0xe0000000; } else { below_4g_mem_size = ram_size; } linux_boot = (kernel_filename != NULL); /* init CPUs */ if (cpu_model == NULL) { #ifdef TARGET_X86_64 cpu_model = \"qemu64\"; #else cpu_model = \"qemu32\"; #endif } for(i = 0; i < smp_cpus; i++) { env = cpu_init(cpu_model); if (!env) { fprintf(stderr, \"Unable to find x86 CPU definition\\n\"); exit(1); } if (i != 0) env->halted = 1; if (smp_cpus > 1) { /* XXX: enable it in all cases */ env->cpuid_features |= CPUID_APIC; } qemu_register_reset(main_cpu_reset, env); if (pci_enabled) { apic_init(env); } } vmport_init(); /* allocate RAM */ ram_addr = qemu_ram_alloc(0xa0000); cpu_register_physical_memory(0, 0xa0000, ram_addr); /* Allocate, even though we won't register, so we don't break the * phys_ram_base + PA assumption. This range includes vga (0xa0000 - 0xc0000), * and some bios areas, which will be registered later */ ram_addr = qemu_ram_alloc(0x100000 - 0xa0000); ram_addr = qemu_ram_alloc(below_4g_mem_size - 0x100000); cpu_register_physical_memory(0x100000, below_4g_mem_size - 0x100000, ram_addr); /* above 4giga memory allocation */ if (above_4g_mem_size > 0) { ram_addr = qemu_ram_alloc(above_4g_mem_size); cpu_register_physical_memory(0x100000000ULL, above_4g_mem_size, ram_addr); } /* allocate VGA RAM */ vga_ram_addr = qemu_ram_alloc(vga_ram_size); /* BIOS load */ if (bios_name == NULL) bios_name = BIOS_FILENAME; snprintf(buf, sizeof(buf), \"%s/%s\", bios_dir, bios_name); bios_size = get_image_size(buf); if (bios_size <= 0 || (bios_size % 65536) != 0) { goto bios_error; } bios_offset = qemu_ram_alloc(bios_size); ret = load_image(buf, phys_ram_base + bios_offset); if (ret != bios_size) { bios_error: fprintf(stderr, \"qemu: could not load PC BIOS '%s'\\n\", buf); exit(1); } if (cirrus_vga_enabled || std_vga_enabled || vmsvga_enabled) { /* VGA BIOS load */ if (cirrus_vga_enabled) { snprintf(buf, sizeof(buf), \"%s/%s\", bios_dir, VGABIOS_CIRRUS_FILENAME); } else { snprintf(buf, sizeof(buf), \"%s/%s\", bios_dir, VGABIOS_FILENAME); } vga_bios_size = get_image_size(buf); if (vga_bios_size <= 0 || vga_bios_size > 65536) goto vga_bios_error; vga_bios_offset = qemu_ram_alloc(65536); ret = load_image(buf, phys_ram_base + vga_bios_offset); if (ret != vga_bios_size) { vga_bios_error: fprintf(stderr, \"qemu: could not load VGA BIOS '%s'\\n\", buf); exit(1); } } /* setup basic memory access */ cpu_register_physical_memory(0xc0000, 0x10000, vga_bios_offset | IO_MEM_ROM); /* map the last 128KB of the BIOS in ISA space */ isa_bios_size = bios_size; if (isa_bios_size > (128 * 1024)) isa_bios_size = 128 * 1024; cpu_register_physical_memory(0x100000 - isa_bios_size, isa_bios_size, (bios_offset + bios_size - isa_bios_size) | IO_MEM_ROM); { ram_addr_t option_rom_offset; int size, offset; offset = 0; if (linux_boot) { option_rom_offset = qemu_ram_alloc(TARGET_PAGE_SIZE); load_linux(phys_ram_base + option_rom_offset, kernel_filename, initrd_filename, kernel_cmdline); cpu_register_physical_memory(0xd0000, TARGET_PAGE_SIZE, option_rom_offset | IO_MEM_ROM); offset = TARGET_PAGE_SIZE; } for (i = 0; i < nb_option_roms; i++) { size = get_image_size(option_rom[i]); if (size < 0) { fprintf(stderr, \"Could not load option rom '%s'\\n\", option_rom[i]); exit(1); } if (size > (0x10000 - offset)) goto option_rom_error; option_rom_offset = qemu_ram_alloc(size); ret = load_image(option_rom[i], phys_ram_base + option_rom_offset); if (ret != size) { option_rom_error: fprintf(stderr, \"Too many option ROMS\\n\"); exit(1); } size = (size + 4095) & ~4095; cpu_register_physical_memory(0xd0000 + offset, size, option_rom_offset | IO_MEM_ROM); offset += size; } } /* map all the bios at the top of memory */ cpu_register_physical_memory((uint32_t)(-bios_size), bios_size, bios_offset | IO_MEM_ROM); bochs_bios_init(); cpu_irq = qemu_allocate_irqs(pic_irq_request, NULL, 1); i8259 = i8259_init(cpu_irq[0]); ferr_irq = i8259[13]; if (pci_enabled) { pci_bus = i440fx_init(&i440fx_state, i8259); piix3_devfn = piix3_init(pci_bus, -1); } else { pci_bus = NULL; } /* init basic PC hardware */ register_ioport_write(0x80, 1, 1, ioport80_write, NULL); register_ioport_write(0xf0, 1, 1, ioportF0_write, NULL); if (cirrus_vga_enabled) { if (pci_enabled) { pci_cirrus_vga_init(pci_bus, ds, phys_ram_base + vga_ram_addr, vga_ram_addr, vga_ram_size); } else { isa_cirrus_vga_init(ds, phys_ram_base + vga_ram_addr, vga_ram_addr, vga_ram_size); } } else if (vmsvga_enabled) { if (pci_enabled) pci_vmsvga_init(pci_bus, ds, phys_ram_base + vga_ram_addr, vga_ram_addr, vga_ram_size); else fprintf(stderr, \"%s: vmware_vga: no PCI bus\\n\", __FUNCTION__); } else if (std_vga_enabled) { if (pci_enabled) { pci_vga_init(pci_bus, ds, phys_ram_base + vga_ram_addr, vga_ram_addr, vga_ram_size, 0, 0); } else { isa_vga_init(ds, phys_ram_base + vga_ram_addr, vga_ram_addr, vga_ram_size); } } rtc_state = rtc_init(0x70, i8259[8]); qemu_register_boot_set(pc_boot_set, rtc_state); register_ioport_read(0x92, 1, 1, ioport92_read, NULL); register_ioport_write(0x92, 1, 1, ioport92_write, NULL); if (pci_enabled) { ioapic = ioapic_init(); } pit = pit_init(0x40, i8259[0]); pcspk_init(pit); if (!no_hpet) { hpet_init(i8259); } if (pci_enabled) { pic_set_alt_irq_func(isa_pic, ioapic_set_irq, ioapic); } for(i = 0; i < MAX_SERIAL_PORTS; i++) { if (serial_hds[i]) { serial_init(serial_io[i], i8259[serial_irq[i]], 115200, serial_hds[i]); } } for(i = 0; i < MAX_PARALLEL_PORTS; i++) { if (parallel_hds[i]) { parallel_init(parallel_io[i], i8259[parallel_irq[i]], parallel_hds[i]); } } for(i = 0; i < nb_nics; i++) { NICInfo *nd = &nd_table[i]; if (!pci_enabled || (nd->model && strcmp(nd->model, \"ne2k_isa\") == 0)) pc_init_ne2k_isa(nd, i8259); else pci_nic_init(pci_bus, nd, -1, \"ne2k_pci\"); } if (drive_get_max_bus(IF_IDE) >= MAX_IDE_BUS) { fprintf(stderr, \"qemu: too many IDE bus\\n\"); exit(1); } for(i = 0; i < MAX_IDE_BUS * MAX_IDE_DEVS; i++) { index = drive_get_index(IF_IDE, i / MAX_IDE_DEVS, i % MAX_IDE_DEVS); if (index != -1) hd[i] = drives_table[index].bdrv; else hd[i] = NULL; } if (pci_enabled) { pci_piix3_ide_init(pci_bus, hd, piix3_devfn + 1, i8259); } else { for(i = 0; i < MAX_IDE_BUS; i++) { isa_ide_init(ide_iobase[i], ide_iobase2[i], i8259[ide_irq[i]], hd[MAX_IDE_DEVS * i], hd[MAX_IDE_DEVS * i + 1]); } } i8042_init(i8259[1], i8259[12], 0x60); DMA_init(0); #ifdef HAS_AUDIO audio_init(pci_enabled ? pci_bus : NULL, i8259); #endif for(i = 0; i < MAX_FD; i++) { index = drive_get_index(IF_FLOPPY, 0, i); if (index != -1) fd[i] = drives_table[index].bdrv; else fd[i] = NULL; } floppy_controller = fdctrl_init(i8259[6], 2, 0, 0x3f0, fd); cmos_init(below_4g_mem_size, above_4g_mem_size, boot_device, hd); if (pci_enabled && usb_enabled) { usb_uhci_piix3_init(pci_bus, piix3_devfn + 2); } if (pci_enabled && acpi_enabled) { uint8_t *eeprom_buf = qemu_mallocz(8 * 256); /* XXX: make this persistent */ i2c_bus *smbus; /* TODO: Populate SPD eeprom data. */ smbus = piix4_pm_init(pci_bus, piix3_devfn + 3, 0xb100, i8259[9]); for (i = 0; i < 8; i++) { smbus_eeprom_device_init(smbus, 0x50 + i, eeprom_buf + (i * 256)); } } if (i440fx_state) { i440fx_init_memory_mappings(i440fx_state); } if (pci_enabled) { int max_bus; int bus, unit; void *scsi; max_bus = drive_get_max_bus(IF_SCSI); for (bus = 0; bus <= max_bus; bus++) { scsi = lsi_scsi_init(pci_bus, -1); for (unit = 0; unit < LSI_MAX_DEVS; unit++) { index = drive_get_index(IF_SCSI, bus, unit); if (index == -1) continue; lsi_scsi_attach(scsi, drives_table[index].bdrv, unit); } } } /* Add virtio block devices */ if (pci_enabled) { int index; int unit_id = 0; while ((index = drive_get_index(IF_VIRTIO, 0, unit_id)) != -1) { virtio_blk_init(pci_bus, drives_table[index].bdrv); unit_id++; } } /* Add virtio balloon device */ if (pci_enabled) virtio_balloon_init(pci_bus); /* Add virtio console devices */ if (pci_enabled) { for(i = 0; i < MAX_VIRTIO_CONSOLES; i++) { if (virtcon_hds[i]) virtio_console_init(pci_bus, virtcon_hds[i]); } } }"} {"target": 0, "idx": 12846, "func": "static int slirp_hostfwd(SlirpState *s, const char *redir_str, int legacy_format) { struct in_addr host_addr = { .s_addr = INADDR_ANY }; struct in_addr guest_addr = { .s_addr = 0 }; int host_port, guest_port; const char *p; char buf[256]; int is_udp; char *end; p = redir_str; if (!p || get_str_sep(buf, sizeof(buf), &p, ':') < 0) { goto fail_syntax; } if (!strcmp(buf, \"tcp\") || buf[0] == '\\0') { is_udp = 0; } else if (!strcmp(buf, \"udp\")) { is_udp = 1; } else { goto fail_syntax; } if (!legacy_format) { if (get_str_sep(buf, sizeof(buf), &p, ':') < 0) { goto fail_syntax; } if (buf[0] != '\\0' && !inet_aton(buf, &host_addr)) { goto fail_syntax; } } if (get_str_sep(buf, sizeof(buf), &p, legacy_format ? ':' : '-') < 0) { goto fail_syntax; } host_port = strtol(buf, &end, 0); if (*end != '\\0' || host_port < 1 || host_port > 65535) { goto fail_syntax; } if (get_str_sep(buf, sizeof(buf), &p, ':') < 0) { goto fail_syntax; } if (buf[0] != '\\0' && !inet_aton(buf, &guest_addr)) { goto fail_syntax; } guest_port = strtol(p, &end, 0); if (*end != '\\0' || guest_port < 1 || guest_port > 65535) { goto fail_syntax; } if (slirp_add_hostfwd(s->slirp, is_udp, host_addr, host_port, guest_addr, guest_port) < 0) { error_report(\"could not set up host forwarding rule '%s'\", redir_str); return -1; } return 0; fail_syntax: error_report(\"invalid host forwarding rule '%s'\", redir_str); return -1; }"} {"target": 0, "idx": 12853, "func": "int kvm_arch_put_registers(CPUState *cs, int level) { S390CPU *cpu = S390_CPU(cs); CPUS390XState *env = &cpu->env; struct kvm_sregs sregs; struct kvm_regs regs; struct kvm_fpu fpu; int r; int i; /* always save the PSW and the GPRS*/ cs->kvm_run->psw_addr = env->psw.addr; cs->kvm_run->psw_mask = env->psw.mask; if (cap_sync_regs && cs->kvm_run->kvm_valid_regs & KVM_SYNC_GPRS) { for (i = 0; i < 16; i++) { cs->kvm_run->s.regs.gprs[i] = env->regs[i]; cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_GPRS; } } else { for (i = 0; i < 16; i++) { regs.gprs[i] = env->regs[i]; } r = kvm_vcpu_ioctl(cs, KVM_SET_REGS, ®s); if (r < 0) { return r; } } /* Floating point */ for (i = 0; i < 16; i++) { fpu.fprs[i] = env->fregs[i].ll; } fpu.fpc = env->fpc; r = kvm_vcpu_ioctl(cs, KVM_SET_FPU, &fpu); if (r < 0) { return r; } /* Do we need to save more than that? */ if (level == KVM_PUT_RUNTIME_STATE) { return 0; } /* * These ONE_REGS are not protected by a capability. As they are only * necessary for migration we just trace a possible error, but don't * return with an error return code. */ kvm_set_one_reg(cs, KVM_REG_S390_CPU_TIMER, &env->cputm); kvm_set_one_reg(cs, KVM_REG_S390_CLOCK_COMP, &env->ckc); kvm_set_one_reg(cs, KVM_REG_S390_TODPR, &env->todpr); kvm_set_one_reg(cs, KVM_REG_S390_GBEA, &env->gbea); kvm_set_one_reg(cs, KVM_REG_S390_PP, &env->pp); if (cap_async_pf) { r = kvm_set_one_reg(cs, KVM_REG_S390_PFTOKEN, &env->pfault_token); if (r < 0) { return r; } r = kvm_set_one_reg(cs, KVM_REG_S390_PFCOMPARE, &env->pfault_compare); if (r < 0) { return r; } r = kvm_set_one_reg(cs, KVM_REG_S390_PFSELECT, &env->pfault_select); if (r < 0) { return r; } } if (cap_sync_regs && cs->kvm_run->kvm_valid_regs & KVM_SYNC_ACRS && cs->kvm_run->kvm_valid_regs & KVM_SYNC_CRS) { for (i = 0; i < 16; i++) { cs->kvm_run->s.regs.acrs[i] = env->aregs[i]; cs->kvm_run->s.regs.crs[i] = env->cregs[i]; } cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_ACRS; cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_CRS; } else { for (i = 0; i < 16; i++) { sregs.acrs[i] = env->aregs[i]; sregs.crs[i] = env->cregs[i]; } r = kvm_vcpu_ioctl(cs, KVM_SET_SREGS, &sregs); if (r < 0) { return r; } } /* Finally the prefix */ if (cap_sync_regs && cs->kvm_run->kvm_valid_regs & KVM_SYNC_PREFIX) { cs->kvm_run->s.regs.prefix = env->psa; cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_PREFIX; } else { /* prefix is only supported via sync regs */ } return 0; }"} {"target": 0, "idx": 12887, "func": "static void spapr_hotplug_req_event(uint8_t hp_id, uint8_t hp_action, sPAPRDRConnectorType drc_type, uint32_t drc) { sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine()); struct hp_log_full *new_hp; struct rtas_error_log *hdr; struct rtas_event_log_v6 *v6hdr; struct rtas_event_log_v6_maina *maina; struct rtas_event_log_v6_mainb *mainb; struct rtas_event_log_v6_hp *hp; new_hp = g_malloc0(sizeof(struct hp_log_full)); hdr = &new_hp->hdr; v6hdr = &new_hp->v6hdr; maina = &new_hp->maina; mainb = &new_hp->mainb; hp = &new_hp->hp; hdr->summary = cpu_to_be32(RTAS_LOG_VERSION_6 | RTAS_LOG_SEVERITY_EVENT | RTAS_LOG_DISPOSITION_NOT_RECOVERED | RTAS_LOG_OPTIONAL_PART_PRESENT | RTAS_LOG_INITIATOR_HOTPLUG | RTAS_LOG_TYPE_HOTPLUG); hdr->extended_length = cpu_to_be32(sizeof(*new_hp) - sizeof(new_hp->hdr)); spapr_init_v6hdr(v6hdr); spapr_init_maina(maina, 3 /* Main-A, Main-B, HP */); mainb->hdr.section_id = cpu_to_be16(RTAS_LOG_V6_SECTION_ID_MAINB); mainb->hdr.section_length = cpu_to_be16(sizeof(*mainb)); mainb->subsystem_id = 0x80; /* External environment */ mainb->event_severity = 0x00; /* Informational / non-error */ mainb->event_subtype = 0x00; /* Normal shutdown */ hp->hdr.section_id = cpu_to_be16(RTAS_LOG_V6_SECTION_ID_HOTPLUG); hp->hdr.section_length = cpu_to_be16(sizeof(*hp)); hp->hdr.section_version = 1; /* includes extended modifier */ hp->hotplug_action = hp_action; hp->hotplug_identifier = hp_id; switch (drc_type) { case SPAPR_DR_CONNECTOR_TYPE_PCI: hp->hotplug_type = RTAS_LOG_V6_HP_TYPE_PCI; if (hp->hotplug_action == RTAS_LOG_V6_HP_ACTION_ADD) { spapr_hotplug_set_signalled(drc); } break; case SPAPR_DR_CONNECTOR_TYPE_LMB: hp->hotplug_type = RTAS_LOG_V6_HP_TYPE_MEMORY; break; case SPAPR_DR_CONNECTOR_TYPE_CPU: hp->hotplug_type = RTAS_LOG_V6_HP_TYPE_CPU; break; default: /* we shouldn't be signaling hotplug events for resources * that don't support them */ g_assert(false); return; } if (hp_id == RTAS_LOG_V6_HP_ID_DRC_COUNT) { hp->drc.count = cpu_to_be32(drc); } else if (hp_id == RTAS_LOG_V6_HP_ID_DRC_INDEX) { hp->drc.index = cpu_to_be32(drc); } rtas_event_log_queue(RTAS_LOG_TYPE_HOTPLUG, new_hp, true); qemu_irq_pulse(xics_get_qirq(spapr->xics, spapr->check_exception_irq)); }"} {"target": 0, "idx": 12894, "func": "static int usb_hub_broadcast_packet(USBHubState *s, USBPacket *p) { USBHubPort *port; USBDevice *dev; int i, ret; for(i = 0; i < NUM_PORTS; i++) { port = &s->ports[i]; dev = port->port.dev; if (dev && (port->wPortStatus & PORT_STAT_ENABLE)) { ret = usb_handle_packet(dev, p); if (ret != USB_RET_NODEV) { return ret; } } } return USB_RET_NODEV; }"} {"target": 1, "idx": 12916, "func": "BlockDriverAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, BlockDriverCompletionFunc *cb, void *opaque) { BlockDriver *drv = bs->drv; BlockDriverAIOCB *ret; if (!drv) return NULL; if (bs->read_only) return NULL; if (bdrv_check_request(bs, sector_num, nb_sectors)) return NULL; if (bs->dirty_tracking) { set_dirty_bitmap(bs, sector_num, nb_sectors, 1); } ret = drv->bdrv_aio_writev(bs, sector_num, qiov, nb_sectors, cb, opaque); if (ret) { /* Update stats even though technically transfer has not happened. */ bs->wr_bytes += (unsigned) nb_sectors * BDRV_SECTOR_SIZE; bs->wr_ops ++; } return ret; }"} {"target": 0, "idx": 12927, "func": "av_cold void ff_dct_init_x86(DCTContext *s) { int cpu_flags = av_get_cpu_flags(); if (EXTERNAL_SSE(cpu_flags)) s->dct32 = ff_dct32_float_sse; if (EXTERNAL_SSE2(cpu_flags)) s->dct32 = ff_dct32_float_sse2; if (EXTERNAL_AVX(cpu_flags)) s->dct32 = ff_dct32_float_avx; }"} {"target": 1, "idx": 12940, "func": "static int msrle_decode_pal4(AVCodecContext *avctx, AVPicture *pic, GetByteContext *gb) { unsigned char rle_code; unsigned char extra_byte, odd_pixel; unsigned char stream_byte; unsigned int pixel_ptr = 0; int row_dec = pic->linesize[0]; int row_ptr = (avctx->height - 1) * row_dec; int frame_size = row_dec * avctx->height; int i; while (row_ptr >= 0) { if (bytestream2_get_bytes_left(gb) <= 0) { av_log(avctx, AV_LOG_ERROR, \"MS RLE: bytestream overrun, %d rows left\\n\", row_ptr); return AVERROR_INVALIDDATA; } rle_code = stream_byte = bytestream2_get_byteu(gb); if (rle_code == 0) { /* fetch the next byte to see how to handle escape code */ stream_byte = bytestream2_get_byte(gb); if (stream_byte == 0) { /* line is done, goto the next one */ row_ptr -= row_dec; pixel_ptr = 0; } else if (stream_byte == 1) { /* decode is done */ return 0; } else if (stream_byte == 2) { /* reposition frame decode coordinates */ stream_byte = bytestream2_get_byte(gb); pixel_ptr += stream_byte; stream_byte = bytestream2_get_byte(gb); row_ptr -= stream_byte * row_dec; } else { // copy pixels from encoded stream odd_pixel = stream_byte & 1; rle_code = (stream_byte + 1) / 2; extra_byte = rle_code & 0x01; if (row_ptr + pixel_ptr + stream_byte > frame_size || bytestream2_get_bytes_left(gb) < rle_code) { av_log(avctx, AV_LOG_ERROR, \"MS RLE: frame/stream ptr just went out of bounds (copy)\\n\"); return AVERROR_INVALIDDATA; } for (i = 0; i < rle_code; i++) { if (pixel_ptr >= avctx->width) break; stream_byte = bytestream2_get_byteu(gb); pic->data[0][row_ptr + pixel_ptr] = stream_byte >> 4; pixel_ptr++; if (i + 1 == rle_code && odd_pixel) break; if (pixel_ptr >= avctx->width) break; pic->data[0][row_ptr + pixel_ptr] = stream_byte & 0x0F; pixel_ptr++; } // if the RLE code is odd, skip a byte in the stream if (extra_byte) bytestream2_skip(gb, 1); } } else { // decode a run of data if (row_ptr + pixel_ptr + stream_byte > frame_size) { av_log(avctx, AV_LOG_ERROR, \"MS RLE: frame ptr just went out of bounds (run)\\n\"); return AVERROR_INVALIDDATA; } stream_byte = bytestream2_get_byte(gb); for (i = 0; i < rle_code; i++) { if (pixel_ptr >= avctx->width) break; if ((i & 1) == 0) pic->data[0][row_ptr + pixel_ptr] = stream_byte >> 4; else pic->data[0][row_ptr + pixel_ptr] = stream_byte & 0x0F; pixel_ptr++; } } } /* one last sanity check on the way out */ if (bytestream2_get_bytes_left(gb)) { av_log(avctx, AV_LOG_ERROR, \"MS RLE: ended frame decode with %d bytes left over\\n\", bytestream2_get_bytes_left(gb)); return AVERROR_INVALIDDATA; } return 0; }"} {"target": 1, "idx": 12946, "func": "void HELPER(mvc)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src) { int i = 0; int x = 0; uint32_t l_64 = (l + 1) / 8; HELPER_LOG(\"%s l %d dest %\" PRIx64 \" src %\" PRIx64 \"\\n\", __func__, l, dest, src); #ifndef CONFIG_USER_ONLY if ((l > 32) && (src & TARGET_PAGE_MASK) == ((src + l) & TARGET_PAGE_MASK) && (dest & TARGET_PAGE_MASK) == ((dest + l) & TARGET_PAGE_MASK)) { if (dest == (src + 1)) { mvc_fast_memset(env, l + 1, dest, cpu_ldub_data(env, src)); return; } else if ((src & TARGET_PAGE_MASK) != (dest & TARGET_PAGE_MASK)) { mvc_fast_memmove(env, l + 1, dest, src); return; } } #else if (dest == (src + 1)) { memset(g2h(dest), cpu_ldub_data(env, src), l + 1); return; } else { memmove(g2h(dest), g2h(src), l + 1); return; } #endif /* handle the parts that fit into 8-byte loads/stores */ if (dest != (src + 1)) { for (i = 0; i < l_64; i++) { cpu_stq_data(env, dest + x, cpu_ldq_data(env, src + x)); x += 8; } } /* slow version crossing pages with byte accesses */ for (i = x; i <= l; i++) { cpu_stb_data(env, dest + i, cpu_ldub_data(env, src + i)); } }"} {"target": 1, "idx": 12949, "func": "static void test_properties(const char *path, bool recurse) { char *child_path; QDict *response, *tuple; QList *list; QListEntry *entry; g_test_message(\"Obtaining properties of %s\", path); response = qmp(\"{ 'execute': 'qom-list',\" \" 'arguments': { 'path': %s } }\", path); g_assert(response); if (!recurse) { return; } g_assert(qdict_haskey(response, \"return\")); list = qobject_to_qlist(qdict_get(response, \"return\")); QLIST_FOREACH_ENTRY(list, entry) { tuple = qobject_to_qdict(qlist_entry_obj(entry)); bool is_child = strstart(qdict_get_str(tuple, \"type\"), \"child<\", NULL); bool is_link = strstart(qdict_get_str(tuple, \"type\"), \"link<\", NULL); if (is_child || is_link) { child_path = g_strdup_printf(\"%s/%s\", path, qdict_get_str(tuple, \"name\")); test_properties(child_path, is_child); g_free(child_path); } else { const char *prop = qdict_get_str(tuple, \"name\"); g_test_message(\"Testing property %s.%s\", path, prop); response = qmp(\"{ 'execute': 'qom-get',\" \" 'arguments': { 'path': %s,\" \" 'property': %s } }\", path, prop); /* qom-get may fail but should not, e.g., segfault. */ g_assert(response); } } }"} {"target": 1, "idx": 12951, "func": "static int thp_read_header(AVFormatContext *s, AVFormatParameters *ap) { ThpDemuxContext *thp = s->priv_data; AVStream *st; AVIOContext *pb = s->pb; int i; /* Read the file header. */ avio_rb32(pb); /* Skip Magic. */ thp->version = avio_rb32(pb); avio_rb32(pb); /* Max buf size. */ avio_rb32(pb); /* Max samples. */ thp->fps = av_d2q(av_int2float(avio_rb32(pb)), INT_MAX); thp->framecnt = avio_rb32(pb); thp->first_framesz = avio_rb32(pb); avio_rb32(pb); /* Data size. */ thp->compoff = avio_rb32(pb); avio_rb32(pb); /* offsetDataOffset. */ thp->first_frame = avio_rb32(pb); thp->last_frame = avio_rb32(pb); thp->next_framesz = thp->first_framesz; thp->next_frame = thp->first_frame; /* Read the component structure. */ avio_seek (pb, thp->compoff, SEEK_SET); thp->compcount = avio_rb32(pb); /* Read the list of component types. */ avio_read(pb, thp->components, 16); for (i = 0; i < thp->compcount; i++) { if (thp->components[i] == 0) { if (thp->vst != 0) break; /* Video component. */ st = avformat_new_stream(s, NULL); if (!st) return AVERROR(ENOMEM); /* The denominator and numerator are switched because 1/fps is required. */ avpriv_set_pts_info(st, 64, thp->fps.den, thp->fps.num); st->codec->codec_type = AVMEDIA_TYPE_VIDEO; st->codec->codec_id = CODEC_ID_THP; st->codec->codec_tag = 0; /* no fourcc */ st->codec->width = avio_rb32(pb); st->codec->height = avio_rb32(pb); st->codec->sample_rate = av_q2d(thp->fps); thp->vst = st; thp->video_stream_index = st->index; if (thp->version == 0x11000) avio_rb32(pb); /* Unknown. */ } else if (thp->components[i] == 1) { if (thp->has_audio != 0) break; /* Audio component. */ st = avformat_new_stream(s, NULL); if (!st) return AVERROR(ENOMEM); st->codec->codec_type = AVMEDIA_TYPE_AUDIO; st->codec->codec_id = CODEC_ID_ADPCM_THP; st->codec->codec_tag = 0; /* no fourcc */ st->codec->channels = avio_rb32(pb); /* numChannels. */ st->codec->sample_rate = avio_rb32(pb); /* Frequency. */ avpriv_set_pts_info(st, 64, 1, st->codec->sample_rate); thp->audio_stream_index = st->index; thp->has_audio = 1; } } return 0; }"} {"target": 1, "idx": 12968, "func": "static int rdma_add_block(RDMAContext *rdma, const char *block_name, void *host_addr, ram_addr_t block_offset, uint64_t length) { RDMALocalBlocks *local = &rdma->local_ram_blocks; RDMALocalBlock *block; RDMALocalBlock *old = local->block; local->block = g_malloc0(sizeof(RDMALocalBlock) * (local->nb_blocks + 1)); if (local->nb_blocks) { int x; if (rdma->blockmap) { for (x = 0; x < local->nb_blocks; x++) { g_hash_table_remove(rdma->blockmap, (void *)(uintptr_t)old[x].offset); g_hash_table_insert(rdma->blockmap, (void *)(uintptr_t)old[x].offset, &local->block[x]); } } memcpy(local->block, old, sizeof(RDMALocalBlock) * local->nb_blocks); g_free(old); } block = &local->block[local->nb_blocks]; block->block_name = g_strdup(block_name); block->local_host_addr = host_addr; block->offset = block_offset; block->length = length; block->index = local->nb_blocks; block->src_index = ~0U; /* Filled in by the receipt of the block list */ block->nb_chunks = ram_chunk_index(host_addr, host_addr + length) + 1UL; block->transit_bitmap = bitmap_new(block->nb_chunks); bitmap_clear(block->transit_bitmap, 0, block->nb_chunks); block->unregister_bitmap = bitmap_new(block->nb_chunks); bitmap_clear(block->unregister_bitmap, 0, block->nb_chunks); block->remote_keys = g_malloc0(block->nb_chunks * sizeof(uint32_t)); block->is_ram_block = local->init ? false : true; if (rdma->blockmap) { g_hash_table_insert(rdma->blockmap, (void *) block_offset, block); } trace_rdma_add_block(block_name, local->nb_blocks, (uintptr_t) block->local_host_addr, block->offset, block->length, (uintptr_t) (block->local_host_addr + block->length), BITS_TO_LONGS(block->nb_chunks) * sizeof(unsigned long) * 8, block->nb_chunks); local->nb_blocks++; return 0; }"} {"target": 1, "idx": 12984, "func": "process_tx_desc(E1000State *s, struct e1000_tx_desc *dp) { uint32_t txd_lower = le32_to_cpu(dp->lower.data); uint32_t dtype = txd_lower & (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D); unsigned int split_size = txd_lower & 0xffff, bytes, sz, op; unsigned int msh = 0xfffff, hdr = 0; uint64_t addr; struct e1000_context_desc *xp = (struct e1000_context_desc *)dp; struct e1000_tx *tp = &s->tx; if (dtype == E1000_TXD_CMD_DEXT) { // context descriptor op = le32_to_cpu(xp->cmd_and_length); tp->ipcss = xp->lower_setup.ip_fields.ipcss; tp->ipcso = xp->lower_setup.ip_fields.ipcso; tp->ipcse = le16_to_cpu(xp->lower_setup.ip_fields.ipcse); tp->tucss = xp->upper_setup.tcp_fields.tucss; tp->tucso = xp->upper_setup.tcp_fields.tucso; tp->tucse = le16_to_cpu(xp->upper_setup.tcp_fields.tucse); tp->paylen = op & 0xfffff; tp->hdr_len = xp->tcp_seg_setup.fields.hdr_len; tp->mss = le16_to_cpu(xp->tcp_seg_setup.fields.mss); tp->ip = (op & E1000_TXD_CMD_IP) ? 1 : 0; tp->tcp = (op & E1000_TXD_CMD_TCP) ? 1 : 0; tp->tse = (op & E1000_TXD_CMD_TSE) ? 1 : 0; tp->tso_frames = 0; if (tp->tucso == 0) { // this is probably wrong DBGOUT(TXSUM, \"TCP/UDP: cso 0!\\n\"); tp->tucso = tp->tucss + (tp->tcp ? 16 : 6); } return; } else if (dtype == (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D)) { // data descriptor if (tp->size == 0) { tp->sum_needed = le32_to_cpu(dp->upper.data) >> 8; } tp->cptse = ( txd_lower & E1000_TXD_CMD_TSE ) ? 1 : 0; } else { // legacy descriptor tp->cptse = 0; } if (vlan_enabled(s) && is_vlan_txd(txd_lower) && (tp->cptse || txd_lower & E1000_TXD_CMD_EOP)) { tp->vlan_needed = 1; cpu_to_be16wu((uint16_t *)(tp->vlan_header), le16_to_cpup((uint16_t *)(s->mac_reg + VET))); cpu_to_be16wu((uint16_t *)(tp->vlan_header + 2), le16_to_cpu(dp->upper.fields.special)); } addr = le64_to_cpu(dp->buffer_addr); if (tp->tse && tp->cptse) { hdr = tp->hdr_len; msh = hdr + tp->mss; do { bytes = split_size; if (tp->size + bytes > msh) bytes = msh - tp->size; bytes = MIN(sizeof(tp->data) - tp->size, bytes); pci_dma_read(&s->dev, addr, tp->data + tp->size, bytes); if ((sz = tp->size + bytes) >= hdr && tp->size < hdr) memmove(tp->header, tp->data, hdr); tp->size = sz; addr += bytes; if (sz == msh) { xmit_seg(s); memmove(tp->data, tp->header, hdr); tp->size = hdr; } } while (split_size -= bytes); } else if (!tp->tse && tp->cptse) { // context descriptor TSE is not set, while data descriptor TSE is set DBGOUT(TXERR, \"TCP segmentaion Error\\n\"); } else { pci_dma_read(&s->dev, addr, tp->data + tp->size, split_size); tp->size += split_size; } if (!(txd_lower & E1000_TXD_CMD_EOP)) return; if (!(tp->tse && tp->cptse && tp->size < hdr)) xmit_seg(s); tp->tso_frames = 0; tp->sum_needed = 0; tp->vlan_needed = 0; tp->size = 0; tp->cptse = 0; }"} {"target": 0, "idx": 13001, "func": "static void virtio_input_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) { VirtIOInputPCI *vinput = VIRTIO_INPUT_PCI(vpci_dev); DeviceState *vdev = DEVICE(&vinput->vdev); qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus)); /* force virtio-1.0 */ vpci_dev->flags &= ~VIRTIO_PCI_FLAG_DISABLE_MODERN; vpci_dev->flags |= VIRTIO_PCI_FLAG_DISABLE_LEGACY; object_property_set_bool(OBJECT(vdev), true, \"realized\", errp); }"} {"target": 0, "idx": 13002, "func": "static void test_visitor_out_string(TestOutputVisitorData *data, const void *unused) { char *string = (char *) \"Q E M U\"; Error *err = NULL; QObject *obj; visit_type_str(data->ov, &string, NULL, &err); g_assert(!err); obj = qmp_output_get_qobject(data->qov); g_assert(obj != NULL); g_assert(qobject_type(obj) == QTYPE_QSTRING); g_assert_cmpstr(qstring_get_str(qobject_to_qstring(obj)), ==, string); qobject_decref(obj); }"} {"target": 0, "idx": 13012, "func": "static void pc_machine_set_vmport(Object *obj, bool value, Error **errp) { PCMachineState *pcms = PC_MACHINE(obj); pcms->vmport = value; }"} {"target": 1, "idx": 13024, "func": "static int mov_read_trak(MOVContext *c, AVIOContext *pb, MOVAtom atom) { AVStream *st; MOVStreamContext *sc; int ret; st = avformat_new_stream(c->fc, NULL); if (!st) return AVERROR(ENOMEM); st->id = c->fc->nb_streams; sc = av_mallocz(sizeof(MOVStreamContext)); if (!sc) return AVERROR(ENOMEM); st->priv_data = sc; st->codec->codec_type = AVMEDIA_TYPE_DATA; sc->ffindex = st->index; if ((ret = mov_read_default(c, pb, atom)) < 0) return ret; /* sanity checks */ if (sc->chunk_count && (!sc->stts_count || !sc->stsc_count || (!sc->sample_size && !sc->sample_count))) { av_log(c->fc, AV_LOG_ERROR, \"stream %d, missing mandatory atoms, broken header\\n\", st->index); return 0; } fix_timescale(c, sc); avpriv_set_pts_info(st, 64, 1, sc->time_scale); mov_build_index(c, st); if (sc->dref_id-1 < sc->drefs_count && sc->drefs[sc->dref_id-1].path) { MOVDref *dref = &sc->drefs[sc->dref_id - 1]; if (mov_open_dref(&sc->pb, c->fc->filename, dref, &c->fc->interrupt_callback, c->use_absolute_path, c->fc) < 0) av_log(c->fc, AV_LOG_ERROR, \"stream %d, error opening alias: path='%s', dir='%s', \" \"filename='%s', volume='%s', nlvl_from=%d, nlvl_to=%d\\n\", st->index, dref->path, dref->dir, dref->filename, dref->volume, dref->nlvl_from, dref->nlvl_to); } else { sc->pb = c->fc->pb; sc->pb_is_copied = 1; } if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) { if (!st->sample_aspect_ratio.num && (st->codec->width != sc->width || st->codec->height != sc->height)) { st->sample_aspect_ratio = av_d2q(((double)st->codec->height * sc->width) / ((double)st->codec->width * sc->height), INT_MAX); } #if FF_API_R_FRAME_RATE if (sc->stts_count == 1 || (sc->stts_count == 2 && sc->stts_data[1].count == 1)) av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, sc->time_scale, sc->stts_data[0].duration, INT_MAX); #endif } // done for ai5q, ai52, ai55, ai1q, ai12 and ai15. if (!st->codec->extradata_size && st->codec->codec_id == AV_CODEC_ID_H264 && TAG_IS_AVCI(st->codec->codec_tag)) { ret = ff_generate_avci_extradata(st); if (ret < 0) return ret; } switch (st->codec->codec_id) { #if CONFIG_H261_DECODER case AV_CODEC_ID_H261: #endif #if CONFIG_H263_DECODER case AV_CODEC_ID_H263: #endif #if CONFIG_MPEG4_DECODER case AV_CODEC_ID_MPEG4: #endif st->codec->width = 0; /* let decoder init width/height */ st->codec->height= 0; break; } /* Do not need those anymore. */ av_freep(&sc->chunk_offsets); av_freep(&sc->stsc_data); av_freep(&sc->sample_sizes); av_freep(&sc->keyframes); av_freep(&sc->stts_data); av_freep(&sc->stps_data); av_freep(&sc->elst_data); av_freep(&sc->rap_group); return 0; }"} {"target": 1, "idx": 13031, "func": "static int buffer_empty(Buffer *buffer) { return buffer->offset == 0; }"} {"target": 1, "idx": 13035, "func": "static gboolean pty_chr_timer(gpointer opaque) { struct CharDriverState *chr = opaque; PtyCharDriver *s = chr->opaque; if (s->connected) { goto out; } /* Next poll ... */ pty_chr_update_read_handler(chr); out: s->timer_tag = 0; return FALSE; }"} {"target": 1, "idx": 13040, "func": "void h263_encode_init(MpegEncContext *s) { static int done = 0; if (!done) { done = 1; init_uni_dc_tab(); init_rl(&rl_inter); init_rl(&rl_intra); init_rl(&rl_intra_aic); init_uni_mpeg4_rl_tab(&rl_intra, uni_mpeg4_intra_rl_bits, uni_mpeg4_intra_rl_len); init_uni_mpeg4_rl_tab(&rl_inter, uni_mpeg4_inter_rl_bits, uni_mpeg4_inter_rl_len); init_uni_h263_rl_tab(&rl_intra_aic, NULL, uni_h263_intra_aic_rl_len); init_uni_h263_rl_tab(&rl_inter , NULL, uni_h263_inter_rl_len); init_mv_penalty_and_fcode(s); } s->me.mv_penalty= mv_penalty; //FIXME exact table for msmpeg4 & h263p s->intra_ac_vlc_length =s->inter_ac_vlc_length = uni_h263_inter_rl_len; s->intra_ac_vlc_last_length=s->inter_ac_vlc_last_length= uni_h263_inter_rl_len + 128*64; if(s->h263_aic){ s->intra_ac_vlc_length = uni_h263_intra_aic_rl_len; s->intra_ac_vlc_last_length= uni_h263_intra_aic_rl_len + 128*64; } s->ac_esc_length= 7+1+6+8; // use fcodes >1 only for mpeg4 & h263 & h263p FIXME switch(s->codec_id){ case CODEC_ID_MPEG4: s->fcode_tab= fcode_tab; s->min_qcoeff= -2048; s->max_qcoeff= 2047; s->intra_ac_vlc_length = uni_mpeg4_intra_rl_len; s->intra_ac_vlc_last_length= uni_mpeg4_intra_rl_len + 128*64; s->inter_ac_vlc_length = uni_mpeg4_inter_rl_len; s->inter_ac_vlc_last_length= uni_mpeg4_inter_rl_len + 128*64; s->luma_dc_vlc_length= uni_DCtab_lum_len; s->chroma_dc_vlc_length= uni_DCtab_chrom_len; s->ac_esc_length= 7+2+1+6+1+12+1; s->y_dc_scale_table= ff_mpeg4_y_dc_scale_table; s->c_dc_scale_table= ff_mpeg4_c_dc_scale_table; if(s->flags & CODEC_FLAG_GLOBAL_HEADER){ s->avctx->extradata= av_malloc(1024); init_put_bits(&s->pb, s->avctx->extradata, 1024); mpeg4_encode_visual_object_header(s); mpeg4_encode_vol_header(s, 0, 0); // ff_mpeg4_stuffing(&s->pb); ? flush_put_bits(&s->pb); s->avctx->extradata_size= (put_bits_count(&s->pb)+7)>>3; } break; case CODEC_ID_H263P: if(s->umvplus) s->fcode_tab= umv_fcode_tab; if(s->modified_quant){ s->min_qcoeff= -2047; s->max_qcoeff= 2047; }else{ s->min_qcoeff= -127; s->max_qcoeff= 127; } break; //Note for mpeg4 & h263 the dc-scale table will be set per frame as needed later case CODEC_ID_FLV1: if (s->h263_flv > 1) { s->min_qcoeff= -1023; s->max_qcoeff= 1023; } else { s->min_qcoeff= -127; s->max_qcoeff= 127; } s->y_dc_scale_table= s->c_dc_scale_table= ff_mpeg1_dc_scale_table; break; default: //nothing needed default table allready set in mpegvideo.c s->min_qcoeff= -127; s->max_qcoeff= 127; s->y_dc_scale_table= s->c_dc_scale_table= ff_mpeg1_dc_scale_table; } }"} {"target": 0, "idx": 13050, "func": "static int check_oflag_copied(BlockDriverState *bs, BdrvCheckResult *res, BdrvCheckMode fix) { BDRVQcow2State *s = bs->opaque; uint64_t *l2_table = qemu_blockalign(bs, s->cluster_size); int ret; uint64_t refcount; int i, j; for (i = 0; i < s->l1_size; i++) { uint64_t l1_entry = s->l1_table[i]; uint64_t l2_offset = l1_entry & L1E_OFFSET_MASK; bool l2_dirty = false; if (!l2_offset) { continue; } ret = qcow2_get_refcount(bs, l2_offset >> s->cluster_bits, &refcount); if (ret < 0) { /* don't print message nor increment check_errors */ continue; } if ((refcount == 1) != ((l1_entry & QCOW_OFLAG_COPIED) != 0)) { fprintf(stderr, \"%s OFLAG_COPIED L2 cluster: l1_index=%d \" \"l1_entry=%\" PRIx64 \" refcount=%\" PRIu64 \"\\n\", fix & BDRV_FIX_ERRORS ? \"Repairing\" : \"ERROR\", i, l1_entry, refcount); if (fix & BDRV_FIX_ERRORS) { s->l1_table[i] = refcount == 1 ? l1_entry | QCOW_OFLAG_COPIED : l1_entry & ~QCOW_OFLAG_COPIED; ret = qcow2_write_l1_entry(bs, i); if (ret < 0) { res->check_errors++; goto fail; } res->corruptions_fixed++; } else { res->corruptions++; } } ret = bdrv_pread(bs->file, l2_offset, l2_table, s->l2_size * sizeof(uint64_t)); if (ret < 0) { fprintf(stderr, \"ERROR: Could not read L2 table: %s\\n\", strerror(-ret)); res->check_errors++; goto fail; } for (j = 0; j < s->l2_size; j++) { uint64_t l2_entry = be64_to_cpu(l2_table[j]); uint64_t data_offset = l2_entry & L2E_OFFSET_MASK; QCow2ClusterType cluster_type = qcow2_get_cluster_type(l2_entry); if ((cluster_type == QCOW2_CLUSTER_NORMAL) || ((cluster_type == QCOW2_CLUSTER_ZERO) && (data_offset != 0))) { ret = qcow2_get_refcount(bs, data_offset >> s->cluster_bits, &refcount); if (ret < 0) { /* don't print message nor increment check_errors */ continue; } if ((refcount == 1) != ((l2_entry & QCOW_OFLAG_COPIED) != 0)) { fprintf(stderr, \"%s OFLAG_COPIED data cluster: \" \"l2_entry=%\" PRIx64 \" refcount=%\" PRIu64 \"\\n\", fix & BDRV_FIX_ERRORS ? \"Repairing\" : \"ERROR\", l2_entry, refcount); if (fix & BDRV_FIX_ERRORS) { l2_table[j] = cpu_to_be64(refcount == 1 ? l2_entry | QCOW_OFLAG_COPIED : l2_entry & ~QCOW_OFLAG_COPIED); l2_dirty = true; res->corruptions_fixed++; } else { res->corruptions++; } } } } if (l2_dirty) { ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_ACTIVE_L2, l2_offset, s->cluster_size); if (ret < 0) { fprintf(stderr, \"ERROR: Could not write L2 table; metadata \" \"overlap check failed: %s\\n\", strerror(-ret)); res->check_errors++; goto fail; } ret = bdrv_pwrite(bs->file, l2_offset, l2_table, s->cluster_size); if (ret < 0) { fprintf(stderr, \"ERROR: Could not write L2 table: %s\\n\", strerror(-ret)); res->check_errors++; goto fail; } } } ret = 0; fail: qemu_vfree(l2_table); return ret; }"} {"target": 0, "idx": 13051, "func": "static void mainstone_common_init(MemoryRegion *address_space_mem, MachineState *machine, enum mainstone_model_e model, int arm_id) { uint32_t sector_len = 256 * 1024; hwaddr mainstone_flash_base[] = { MST_FLASH_0, MST_FLASH_1 }; PXA2xxState *mpu; DeviceState *mst_irq; DriveInfo *dinfo; int i; int be; MemoryRegion *rom = g_new(MemoryRegion, 1); const char *cpu_model = machine->cpu_model; if (!cpu_model) cpu_model = \"pxa270-c5\"; /* Setup CPU & memory */ mpu = pxa270_init(address_space_mem, mainstone_binfo.ram_size, cpu_model); memory_region_init_ram(rom, NULL, \"mainstone.rom\", MAINSTONE_ROM, &error_abort); vmstate_register_ram_global(rom); memory_region_set_readonly(rom, true); memory_region_add_subregion(address_space_mem, 0, rom); #ifdef TARGET_WORDS_BIGENDIAN be = 1; #else be = 0; #endif /* There are two 32MiB flash devices on the board */ for (i = 0; i < 2; i ++) { dinfo = drive_get(IF_PFLASH, 0, i); if (!dinfo) { if (qtest_enabled()) { break; } fprintf(stderr, \"Two flash images must be given with the \" \"'pflash' parameter\\n\"); exit(1); } if (!pflash_cfi01_register(mainstone_flash_base[i], NULL, i ? \"mainstone.flash1\" : \"mainstone.flash0\", MAINSTONE_FLASH, blk_bs(blk_by_legacy_dinfo(dinfo)), sector_len, MAINSTONE_FLASH / sector_len, 4, 0, 0, 0, 0, be)) { fprintf(stderr, \"qemu: Error registering flash memory.\\n\"); exit(1); } } mst_irq = sysbus_create_simple(\"mainstone-fpga\", MST_FPGA_PHYS, qdev_get_gpio_in(mpu->gpio, 0)); /* setup keypad */ pxa27x_register_keypad(mpu->kp, map, 0xe0); /* MMC/SD host */ pxa2xx_mmci_handlers(mpu->mmc, NULL, qdev_get_gpio_in(mst_irq, MMC_IRQ)); pxa2xx_pcmcia_set_irq_cb(mpu->pcmcia[0], qdev_get_gpio_in(mst_irq, S0_IRQ), qdev_get_gpio_in(mst_irq, S0_CD_IRQ)); pxa2xx_pcmcia_set_irq_cb(mpu->pcmcia[1], qdev_get_gpio_in(mst_irq, S1_IRQ), qdev_get_gpio_in(mst_irq, S1_CD_IRQ)); smc91c111_init(&nd_table[0], MST_ETH_PHYS, qdev_get_gpio_in(mst_irq, ETHERNET_IRQ)); mainstone_binfo.kernel_filename = machine->kernel_filename; mainstone_binfo.kernel_cmdline = machine->kernel_cmdline; mainstone_binfo.initrd_filename = machine->initrd_filename; mainstone_binfo.board_id = arm_id; arm_load_kernel(mpu->cpu, &mainstone_binfo); }"} {"target": 1, "idx": 13068, "func": "static inline void RENAME(vu9_to_vu12)(const uint8_t *src1, const uint8_t *src2, uint8_t *dst1, uint8_t *dst2, long width, long height, long srcStride1, long srcStride2, long dstStride1, long dstStride2) { long y,x,w,h; w=width/2; h=height/2; #ifdef HAVE_MMX asm volatile( PREFETCH\" %0\\n\\t\" PREFETCH\" %1\\n\\t\" ::\"m\"(*(src1+srcStride1)),\"m\"(*(src2+srcStride2)):\"memory\"); #endif for(y=0;y>1); uint8_t* d=dst1+dstStride1*y; x=0; #ifdef HAVE_MMX for(;x>1); uint8_t* d=dst2+dstStride2*y; x=0; #ifdef HAVE_MMX for(;xram_size; const char *cpu_model = machine->cpu_model; const char *kernel_filename = machine->kernel_filename; const char *initrd_filename = machine->initrd_filename; CPUUniCore32State *env; UniCore32CPU *cpu; if (initrd_filename) { error_report(\"Please use kernel built-in initramdisk\"); exit(1); } if (!cpu_model) { cpu_model = \"UniCore-II\"; } cpu = UNICORE32_CPU(cpu_generic_init(TYPE_UNICORE32_CPU, cpu_model)); if (!cpu) { error_report(\"Unable to find CPU definition\"); exit(1); } env = &cpu->env; puv3_soc_init(env); puv3_board_init(env, ram_size); puv3_load_kernel(kernel_filename); }"} {"target": 0, "idx": 13081, "func": "static int output_data_internal(MLPDecodeContext *m, unsigned int substr, uint8_t *data, unsigned int *data_size, int is32) { SubStream *s = &m->substream[substr]; unsigned int i, out_ch = 0; int32_t *data_32 = (int32_t*) data; int16_t *data_16 = (int16_t*) data; if (*data_size < (s->max_channel + 1) * s->blockpos * (is32 ? 4 : 2)) return -1; for (i = 0; i < s->blockpos; i++) { for (out_ch = 0; out_ch <= s->max_matrix_channel; out_ch++) { int mat_ch = s->ch_assign[out_ch]; int32_t sample = m->sample_buffer[i][mat_ch] << s->output_shift[mat_ch]; s->lossless_check_data ^= (sample & 0xffffff) << mat_ch; if (is32) *data_32++ = sample << 8; else *data_16++ = sample >> 8; } } *data_size = i * out_ch * (is32 ? 4 : 2); return 0; }"} {"target": 1, "idx": 13097, "func": "static int qcow2_read_extensions(BlockDriverState *bs, uint64_t start_offset, uint64_t end_offset, void **p_feature_table, Error **errp) { BDRVQcowState *s = bs->opaque; QCowExtension ext; uint64_t offset; int ret; #ifdef DEBUG_EXT printf(\"qcow2_read_extensions: start=%ld end=%ld\\n\", start_offset, end_offset); #endif offset = start_offset; while (offset < end_offset) { #ifdef DEBUG_EXT /* Sanity check */ if (offset > s->cluster_size) printf(\"qcow2_read_extension: suspicious offset %lu\\n\", offset); printf(\"attempting to read extended header in offset %lu\\n\", offset); #endif ret = bdrv_pread(bs->file, offset, &ext, sizeof(ext)); if (ret < 0) { error_setg_errno(errp, -ret, \"qcow2_read_extension: ERROR: \" \"pread fail from offset %\" PRIu64, offset); return 1; } be32_to_cpus(&ext.magic); be32_to_cpus(&ext.len); offset += sizeof(ext); #ifdef DEBUG_EXT printf(\"ext.magic = 0x%x\\n\", ext.magic); #endif if (ext.len > end_offset - offset) { error_setg(errp, \"Header extension too large\"); return -EINVAL; } switch (ext.magic) { case QCOW2_EXT_MAGIC_END: return 0; case QCOW2_EXT_MAGIC_BACKING_FORMAT: if (ext.len >= sizeof(bs->backing_format)) { error_setg(errp, \"ERROR: ext_backing_format: len=%\" PRIu32 \" too large (>=%zu)\", ext.len, sizeof(bs->backing_format)); return 2; } ret = bdrv_pread(bs->file, offset, bs->backing_format, ext.len); if (ret < 0) { error_setg_errno(errp, -ret, \"ERROR: ext_backing_format: \" \"Could not read format name\"); return 3; } bs->backing_format[ext.len] = '\\0'; #ifdef DEBUG_EXT printf(\"Qcow2: Got format extension %s\\n\", bs->backing_format); #endif break; case QCOW2_EXT_MAGIC_FEATURE_TABLE: if (p_feature_table != NULL) { void* feature_table = g_malloc0(ext.len + 2 * sizeof(Qcow2Feature)); ret = bdrv_pread(bs->file, offset , feature_table, ext.len); if (ret < 0) { error_setg_errno(errp, -ret, \"ERROR: ext_feature_table: \" \"Could not read table\"); return ret; } *p_feature_table = feature_table; } break; default: /* unknown magic - save it in case we need to rewrite the header */ { Qcow2UnknownHeaderExtension *uext; uext = g_malloc0(sizeof(*uext) + ext.len); uext->magic = ext.magic; uext->len = ext.len; QLIST_INSERT_HEAD(&s->unknown_header_ext, uext, next); ret = bdrv_pread(bs->file, offset , uext->data, uext->len); if (ret < 0) { error_setg_errno(errp, -ret, \"ERROR: unknown extension: \" \"Could not read data\"); return ret; } } break; } offset += ((ext.len + 7) & ~7); } return 0; }"} {"target": 1, "idx": 13100, "func": "inline static void RENAME(hcscale)(SwsContext *c, uint16_t *dst, int dstWidth, const uint8_t *src1, const uint8_t *src2, int srcW, int xInc, const int16_t *hChrFilter, const int16_t *hChrFilterPos, int hChrFilterSize, uint8_t *formatConvBuffer, uint32_t *pal) { src1 += c->chrSrcOffset; src2 += c->chrSrcOffset; if (c->chrToYV12) { c->chrToYV12(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal); src1= formatConvBuffer; src2= formatConvBuffer+VOFW; } if (!c->hcscale_fast) { c->hScale(dst , dstWidth, src1, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize); c->hScale(dst+VOFW, dstWidth, src2, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize); } else { // fast bilinear upscale / crap downscale c->hcscale_fast(c, dst, dstWidth, src1, src2, srcW, xInc); } if (c->chrConvertRange) c->chrConvertRange(dst, dstWidth); }"} {"target": 1, "idx": 13114, "func": "void pl011_init(uint32_t base, qemu_irq irq, CharDriverState *chr) { int iomemtype; pl011_state *s; s = (pl011_state *)qemu_mallocz(sizeof(pl011_state)); iomemtype = cpu_register_io_memory(0, pl011_readfn, pl011_writefn, s); cpu_register_physical_memory(base, 0x00000fff, iomemtype); s->base = base; s->irq = irq; s->chr = chr; s->read_trigger = 1; s->ifl = 0x12; s->cr = 0x300; s->flags = 0x90; if (chr){ qemu_chr_add_handlers(chr, pl011_can_recieve, pl011_recieve, pl011_event, s); } /* ??? Save/restore. */ }"} {"target": 1, "idx": 13118, "func": "static void spr_read_xer (DisasContext *ctx, int gprn, int sprn) { gen_read_xer(cpu_gpr[gprn]); }"} {"target": 1, "idx": 13130, "func": "static int cpu_load_old(QEMUFile *f, void *opaque, int version_id) { PowerPCCPU *cpu = opaque; CPUPPCState *env = &cpu->env; unsigned int i, j; target_ulong sdr1; uint32_t fpscr; target_ulong xer; for (i = 0; i < 32; i++) qemu_get_betls(f, &env->gpr[i]); #if !defined(TARGET_PPC64) for (i = 0; i < 32; i++) qemu_get_betls(f, &env->gprh[i]); #endif qemu_get_betls(f, &env->lr); qemu_get_betls(f, &env->ctr); for (i = 0; i < 8; i++) qemu_get_be32s(f, &env->crf[i]); qemu_get_betls(f, &xer); cpu_write_xer(env, xer); qemu_get_betls(f, &env->reserve_addr); qemu_get_betls(f, &env->msr); for (i = 0; i < 4; i++) qemu_get_betls(f, &env->tgpr[i]); for (i = 0; i < 32; i++) { union { float64 d; uint64_t l; } u; u.l = qemu_get_be64(f); env->fpr[i] = u.d; } qemu_get_be32s(f, &fpscr); env->fpscr = fpscr; qemu_get_sbe32s(f, &env->access_type); #if defined(TARGET_PPC64) qemu_get_betls(f, &env->spr[SPR_ASR]); qemu_get_sbe32s(f, &env->slb_nr); #endif qemu_get_betls(f, &sdr1); for (i = 0; i < 32; i++) qemu_get_betls(f, &env->sr[i]); for (i = 0; i < 2; i++) for (j = 0; j < 8; j++) qemu_get_betls(f, &env->DBAT[i][j]); for (i = 0; i < 2; i++) for (j = 0; j < 8; j++) qemu_get_betls(f, &env->IBAT[i][j]); qemu_get_sbe32s(f, &env->nb_tlb); qemu_get_sbe32s(f, &env->tlb_per_way); qemu_get_sbe32s(f, &env->nb_ways); qemu_get_sbe32s(f, &env->last_way); qemu_get_sbe32s(f, &env->id_tlbs); qemu_get_sbe32s(f, &env->nb_pids); if (env->tlb.tlb6) { // XXX assumes 6xx for (i = 0; i < env->nb_tlb; i++) { qemu_get_betls(f, &env->tlb.tlb6[i].pte0); qemu_get_betls(f, &env->tlb.tlb6[i].pte1); qemu_get_betls(f, &env->tlb.tlb6[i].EPN); } } for (i = 0; i < 4; i++) qemu_get_betls(f, &env->pb[i]); for (i = 0; i < 1024; i++) qemu_get_betls(f, &env->spr[i]); ppc_store_sdr1(env, sdr1); qemu_get_be32s(f, &env->vscr); qemu_get_be64s(f, &env->spe_acc); qemu_get_be32s(f, &env->spe_fscr); qemu_get_betls(f, &env->msr_mask); qemu_get_be32s(f, &env->flags); qemu_get_sbe32s(f, &env->error_code); qemu_get_be32s(f, &env->pending_interrupts); qemu_get_be32s(f, &env->irq_input_state); for (i = 0; i < POWERPC_EXCP_NB; i++) qemu_get_betls(f, &env->excp_vectors[i]); qemu_get_betls(f, &env->excp_prefix); qemu_get_betls(f, &env->ivor_mask); qemu_get_betls(f, &env->ivpr_mask); qemu_get_betls(f, &env->hreset_vector); qemu_get_betls(f, &env->nip); qemu_get_betls(f, &env->hflags); qemu_get_betls(f, &env->hflags_nmsr); qemu_get_sbe32s(f, &env->mmu_idx); qemu_get_sbe32(f); /* Discard unused power_mode */ return 0; }"} {"target": 1, "idx": 13136, "func": "static void test_qemu_strtoul_invalid(void) { const char *str = \" xxxx \\t abc\"; char f = 'X'; const char *endptr = &f; unsigned long res = 999; int err; err = qemu_strtoul(str, &endptr, 0, &res); g_assert_cmpint(err, ==, 0); g_assert(endptr == str); }"} {"target": 0, "idx": 13137, "func": "static void avc_luma_midv_qrt_4w_msa(const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, int32_t height, uint8_t ver_offset) { uint32_t loop_cnt; v16i8 src0, src1, src2, src3, src4; v16i8 mask0, mask1, mask2; v8i16 hz_out0, hz_out1, hz_out2, hz_out3; v8i16 hz_out4, hz_out5, hz_out6, hz_out7, hz_out8; v8i16 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7; LD_SB3(&luma_mask_arr[48], 16, mask0, mask1, mask2); LD_SB5(src, src_stride, src0, src1, src2, src3, src4); src += (5 * src_stride); XORI_B5_128_SB(src0, src1, src2, src3, src4); hz_out0 = AVC_XOR_VSHF_B_AND_APPLY_6TAP_HORIZ_FILT_SH(src0, src1, mask0, mask1, mask2); hz_out2 = AVC_XOR_VSHF_B_AND_APPLY_6TAP_HORIZ_FILT_SH(src2, src3, mask0, mask1, mask2); PCKOD_D2_SH(hz_out0, hz_out0, hz_out2, hz_out2, hz_out1, hz_out3); hz_out4 = AVC_HORZ_FILTER_SH(src4, mask0, mask1, mask2); for (loop_cnt = (height >> 2); loop_cnt--;) { LD_SB4(src, src_stride, src0, src1, src2, src3); src += (4 * src_stride); XORI_B4_128_SB(src0, src1, src2, src3); hz_out5 = AVC_XOR_VSHF_B_AND_APPLY_6TAP_HORIZ_FILT_SH(src0, src1, mask0, mask1, mask2); hz_out7 = AVC_XOR_VSHF_B_AND_APPLY_6TAP_HORIZ_FILT_SH(src2, src3, mask0, mask1, mask2); PCKOD_D2_SH(hz_out5, hz_out5, hz_out7, hz_out7, hz_out6, hz_out8); dst0 = AVC_CALC_DPADD_H_6PIX_2COEFF_SH(hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, hz_out5); dst2 = AVC_CALC_DPADD_H_6PIX_2COEFF_SH(hz_out1, hz_out2, hz_out3, hz_out4, hz_out5, hz_out6); dst4 = AVC_CALC_DPADD_H_6PIX_2COEFF_SH(hz_out2, hz_out3, hz_out4, hz_out5, hz_out6, hz_out7); dst6 = AVC_CALC_DPADD_H_6PIX_2COEFF_SH(hz_out3, hz_out4, hz_out5, hz_out6, hz_out7, hz_out8); if (ver_offset) { dst1 = __msa_srari_h(hz_out3, 5); dst3 = __msa_srari_h(hz_out4, 5); dst5 = __msa_srari_h(hz_out5, 5); dst7 = __msa_srari_h(hz_out6, 5); } else { dst1 = __msa_srari_h(hz_out2, 5); dst3 = __msa_srari_h(hz_out3, 5); dst5 = __msa_srari_h(hz_out4, 5); dst7 = __msa_srari_h(hz_out5, 5); } SAT_SH4_SH(dst1, dst3, dst5, dst7, 7); dst0 = __msa_aver_s_h(dst0, dst1); dst1 = __msa_aver_s_h(dst2, dst3); dst2 = __msa_aver_s_h(dst4, dst5); dst3 = __msa_aver_s_h(dst6, dst7); PCKEV_B2_SB(dst1, dst0, dst3, dst2, src0, src1); XORI_B2_128_SB(src0, src1); ST4x4_UB(src0, src1, 0, 2, 0, 2, dst, dst_stride); dst += (4 * dst_stride); hz_out0 = hz_out4; hz_out1 = hz_out5; hz_out2 = hz_out6; hz_out3 = hz_out7; hz_out4 = hz_out8; } }"} {"target": 1, "idx": 13140, "func": "static void aml_free(gpointer data, gpointer user_data) { Aml *var = data; build_free_array(var->buf); }"} {"target": 0, "idx": 13149, "func": "static void virtio_scsi_handle_cmd(VirtIODevice *vdev, VirtQueue *vq) { /* use non-QOM casts in the data path */ VirtIOSCSI *s = (VirtIOSCSI *)vdev; VirtIOSCSICommon *vs = &s->parent_obj; VirtIOSCSIReq *req; int n; while ((req = virtio_scsi_pop_req(s, vq))) { SCSIDevice *d; int out_size, in_size; if (req->elem.out_num < 1 || req->elem.in_num < 1) { virtio_scsi_bad_req(); } out_size = req->elem.out_sg[0].iov_len; in_size = req->elem.in_sg[0].iov_len; if (out_size < sizeof(VirtIOSCSICmdReq) + vs->cdb_size || in_size < sizeof(VirtIOSCSICmdResp) + vs->sense_size) { virtio_scsi_bad_req(); } if (req->elem.out_num > 1 && req->elem.in_num > 1) { virtio_scsi_fail_cmd_req(req); continue; } d = virtio_scsi_device_find(s, req->req.cmd->lun); if (!d) { req->resp.cmd->response = VIRTIO_SCSI_S_BAD_TARGET; virtio_scsi_complete_req(req); continue; } req->sreq = scsi_req_new(d, req->req.cmd->tag, virtio_scsi_get_lun(req->req.cmd->lun), req->req.cmd->cdb, req); if (req->sreq->cmd.mode != SCSI_XFER_NONE) { int req_mode = (req->elem.in_num > 1 ? SCSI_XFER_FROM_DEV : SCSI_XFER_TO_DEV); if (req->sreq->cmd.mode != req_mode || req->sreq->cmd.xfer > req->qsgl.size) { req->resp.cmd->response = VIRTIO_SCSI_S_OVERRUN; virtio_scsi_complete_req(req); continue; } } n = scsi_req_enqueue(req->sreq); if (n) { scsi_req_continue(req->sreq); } } }"} {"target": 0, "idx": 13154, "func": "CharDriverState *text_console_init(QemuOpts *opts) { CharDriverState *chr; QemuConsole *s; unsigned width; unsigned height; chr = g_malloc0(sizeof(CharDriverState)); width = qemu_opt_get_number(opts, \"width\", 0); if (width == 0) width = qemu_opt_get_number(opts, \"cols\", 0) * FONT_WIDTH; height = qemu_opt_get_number(opts, \"height\", 0); if (height == 0) height = qemu_opt_get_number(opts, \"rows\", 0) * FONT_HEIGHT; if (width == 0 || height == 0) { s = new_console(NULL, TEXT_CONSOLE); } else { s = new_console(NULL, TEXT_CONSOLE_FIXED_SIZE); } if (!s) { g_free(chr); return NULL; } s->chr = chr; s->g_width = width; s->g_height = height; chr->opaque = s; chr->chr_set_echo = text_console_set_echo; return chr; }"} {"target": 0, "idx": 13155, "func": "static uint64_t eepro100_read(void *opaque, target_phys_addr_t addr, unsigned size) { EEPRO100State *s = opaque; switch (size) { case 1: return eepro100_read1(s, addr); case 2: return eepro100_read2(s, addr); case 4: return eepro100_read4(s, addr); default: abort(); } }"} {"target": 0, "idx": 13156, "func": "bool vring_should_notify(VirtIODevice *vdev, Vring *vring) { uint16_t old, new; bool v; /* Flush out used index updates. This is paired * with the barrier that the Guest executes when enabling * interrupts. */ smp_mb(); if ((vdev->guest_features & (1 << VIRTIO_F_NOTIFY_ON_EMPTY)) && unlikely(!vring_more_avail(vdev, vring))) { return true; } if (!(vdev->guest_features & (1 << VIRTIO_RING_F_EVENT_IDX))) { return !(vring_get_avail_flags(vdev, vring) & VRING_AVAIL_F_NO_INTERRUPT); } old = vring->signalled_used; v = vring->signalled_used_valid; new = vring->signalled_used = vring->last_used_idx; vring->signalled_used_valid = true; if (unlikely(!v)) { return true; } return vring_need_event(vring_used_event(&vring->vr), new, old); }"} {"target": 0, "idx": 13163, "func": "static int amr_nb_encode_frame(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr) { AMRContext *s = avctx->priv_data; int written, ret; int16_t *flush_buf = NULL; const int16_t *samples = frame ? (const int16_t *)frame->data[0] : NULL; if (s->enc_bitrate != avctx->bit_rate) { s->enc_mode = get_bitrate_mode(avctx->bit_rate, avctx); s->enc_bitrate = avctx->bit_rate; } if ((ret = ff_alloc_packet(avpkt, 32))) { av_log(avctx, AV_LOG_ERROR, \"Error getting output packet\\n\"); return ret; } if (frame) { if (frame->nb_samples < avctx->frame_size) { flush_buf = av_mallocz(avctx->frame_size * sizeof(*flush_buf)); if (!flush_buf) return AVERROR(ENOMEM); memcpy(flush_buf, samples, frame->nb_samples * sizeof(*flush_buf)); samples = flush_buf; if (frame->nb_samples < avctx->frame_size - avctx->delay) s->enc_last_frame = -1; } if ((ret = ff_af_queue_add(&s->afq, frame)) < 0) { av_freep(&flush_buf); return ret; } } else { if (s->enc_last_frame < 0) return 0; flush_buf = av_mallocz(avctx->frame_size * sizeof(*flush_buf)); if (!flush_buf) return AVERROR(ENOMEM); samples = flush_buf; s->enc_last_frame = -1; } written = Encoder_Interface_Encode(s->enc_state, s->enc_mode, samples, avpkt->data, 0); av_dlog(avctx, \"amr_nb_encode_frame encoded %u bytes, bitrate %u, first byte was %#02x\\n\", written, s->enc_mode, frame[0]); /* Get the next frame pts/duration */ ff_af_queue_remove(&s->afq, avctx->frame_size, &avpkt->pts, &avpkt->duration); avpkt->size = written; *got_packet_ptr = 1; av_freep(&flush_buf); return 0; }"} {"target": 0, "idx": 13172, "func": "void do_unassigned_access(target_phys_addr_t addr, int is_write, int is_exec, int is_asi, int size) { CPUState *saved_env; /* XXX: hack to restore env in all cases, even if not called from generated code */ saved_env = env; env = cpu_single_env; qemu_log(\"Unassigned \" TARGET_FMT_plx \" wr=%d exe=%d\\n\", addr, is_write, is_exec); if (!(env->sregs[SR_MSR] & MSR_EE)) { return; } if (is_exec) { if (!(env->pvr.regs[2] & PVR2_IOPB_BUS_EXC_MASK)) { env->sregs[SR_ESR] = ESR_EC_INSN_BUS; helper_raise_exception(EXCP_HW_EXCP); } } else { if (!(env->pvr.regs[2] & PVR2_DOPB_BUS_EXC_MASK)) { env->sregs[SR_ESR] = ESR_EC_DATA_BUS; helper_raise_exception(EXCP_HW_EXCP); } } }"} {"target": 1, "idx": 13204, "func": "static int RENAME(swScale)(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY, int srcSliceH, uint8_t* dst[], int dstStride[]){ /* load a few things into local vars to make the code more readable? and faster */ const int srcW= c->srcW; const int dstW= c->dstW; const int dstH= c->dstH; const int chrDstW= c->chrDstW; const int chrSrcW= c->chrSrcW; const int lumXInc= c->lumXInc; const int chrXInc= c->chrXInc; const int dstFormat= c->dstFormat; const int srcFormat= c->srcFormat; const int flags= c->flags; const int canMMX2BeUsed= c->canMMX2BeUsed; int16_t *vLumFilterPos= c->vLumFilterPos; int16_t *vChrFilterPos= c->vChrFilterPos; int16_t *hLumFilterPos= c->hLumFilterPos; int16_t *hChrFilterPos= c->hChrFilterPos; int16_t *vLumFilter= c->vLumFilter; int16_t *vChrFilter= c->vChrFilter; int16_t *hLumFilter= c->hLumFilter; int16_t *hChrFilter= c->hChrFilter; int32_t *lumMmxFilter= c->lumMmxFilter; int32_t *chrMmxFilter= c->chrMmxFilter; const int vLumFilterSize= c->vLumFilterSize; const int vChrFilterSize= c->vChrFilterSize; const int hLumFilterSize= c->hLumFilterSize; const int hChrFilterSize= c->hChrFilterSize; int16_t **lumPixBuf= c->lumPixBuf; int16_t **chrPixBuf= c->chrPixBuf; const int vLumBufSize= c->vLumBufSize; const int vChrBufSize= c->vChrBufSize; uint8_t *funnyYCode= c->funnyYCode; uint8_t *funnyUVCode= c->funnyUVCode; uint8_t *formatConvBuffer= c->formatConvBuffer; const int chrSrcSliceY= srcSliceY >> c->chrSrcVSubSample; const int chrSrcSliceH= -((-srcSliceH) >> c->chrSrcVSubSample); int lastDstY; uint8_t *pal=NULL; /* vars whch will change and which we need to storw back in the context */ int dstY= c->dstY; int lumBufIndex= c->lumBufIndex; int chrBufIndex= c->chrBufIndex; int lastInLumBuf= c->lastInLumBuf; int lastInChrBuf= c->lastInChrBuf; if(isPacked(c->srcFormat)){ pal= src[1]; src[0]= src[1]= src[2]= src[0]; srcStride[0]= srcStride[1]= srcStride[2]= srcStride[0]; } srcStride[1]<<= c->vChrDrop; srcStride[2]<<= c->vChrDrop; // printf(\"swscale %X %X %X -> %X %X %X\\n\", (int)src[0], (int)src[1], (int)src[2], // (int)dst[0], (int)dst[1], (int)dst[2]); #if 0 //self test FIXME move to a vfilter or something { static volatile int i=0; i++; if(srcFormat==PIX_FMT_YUV420P && i==1 && srcSliceH>= c->srcH) selfTest(src, srcStride, c->srcW, c->srcH); i--; } #endif //printf(\"sws Strides:%d %d %d -> %d %d %d\\n\", srcStride[0],srcStride[1],srcStride[2], //dstStride[0],dstStride[1],dstStride[2]); if(dstStride[0]%8 !=0 || dstStride[1]%8 !=0 || dstStride[2]%8 !=0) { static int firstTime=1; //FIXME move this into the context perhaps if(flags & SWS_PRINT_INFO && firstTime) { av_log(c, AV_LOG_WARNING, \"SwScaler: Warning: dstStride is not aligned!\\n\" \"SwScaler: ->cannot do aligned memory acesses anymore\\n\"); firstTime=0; } } /* Note the user might start scaling the picture in the middle so this will not get executed this is not really intended but works currently, so ppl might do it */ if(srcSliceY ==0){ lumBufIndex=0; chrBufIndex=0; dstY=0; lastInLumBuf= -1; lastInChrBuf= -1; } lastDstY= dstY; for(;dstY < dstH; dstY++){ unsigned char *dest =dst[0]+dstStride[0]*dstY; const int chrDstY= dstY>>c->chrDstVSubSample; unsigned char *uDest=dst[1]+dstStride[1]*chrDstY; unsigned char *vDest=dst[2]+dstStride[2]*chrDstY; const int firstLumSrcY= vLumFilterPos[dstY]; //First line needed as input const int firstChrSrcY= vChrFilterPos[chrDstY]; //First line needed as input const int lastLumSrcY= firstLumSrcY + vLumFilterSize -1; // Last line needed as input const int lastChrSrcY= firstChrSrcY + vChrFilterSize -1; // Last line needed as input //printf(\"dstY:%d dstH:%d firstLumSrcY:%d lastInLumBuf:%d vLumBufSize: %d vChrBufSize: %d slice: %d %d vLumFilterSize: %d firstChrSrcY: %d vChrFilterSize: %d c->chrSrcVSubSample: %d\\n\", // dstY, dstH, firstLumSrcY, lastInLumBuf, vLumBufSize, vChrBufSize, srcSliceY, srcSliceH, vLumFilterSize, firstChrSrcY, vChrFilterSize, c->chrSrcVSubSample); //handle holes (FAST_BILINEAR & weird filters) if(firstLumSrcY > lastInLumBuf) lastInLumBuf= firstLumSrcY-1; if(firstChrSrcY > lastInChrBuf) lastInChrBuf= firstChrSrcY-1; //printf(\"%d %d %d\\n\", firstChrSrcY, lastInChrBuf, vChrBufSize); ASSERT(firstLumSrcY >= lastInLumBuf - vLumBufSize + 1) ASSERT(firstChrSrcY >= lastInChrBuf - vChrBufSize + 1) // Do we have enough lines in this slice to output the dstY line if(lastLumSrcY < srcSliceY + srcSliceH && lastChrSrcY < -((-srcSliceY - srcSliceH)>>c->chrSrcVSubSample)) { //Do horizontal scaling while(lastInLumBuf < lastLumSrcY) { uint8_t *s= src[0]+(lastInLumBuf + 1 - srcSliceY)*srcStride[0]; lumBufIndex++; // printf(\"%d %d %d %d\\n\", lumBufIndex, vLumBufSize, lastInLumBuf, lastLumSrcY); ASSERT(lumBufIndex < 2*vLumBufSize) ASSERT(lastInLumBuf + 1 - srcSliceY < srcSliceH) ASSERT(lastInLumBuf + 1 - srcSliceY >= 0) // printf(\"%d %d\\n\", lumBufIndex, vLumBufSize); RENAME(hyscale)(lumPixBuf[ lumBufIndex ], dstW, s, srcW, lumXInc, flags, canMMX2BeUsed, hLumFilter, hLumFilterPos, hLumFilterSize, funnyYCode, c->srcFormat, formatConvBuffer, c->lumMmx2Filter, c->lumMmx2FilterPos, pal); lastInLumBuf++; } while(lastInChrBuf < lastChrSrcY) { uint8_t *src1= src[1]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[1]; uint8_t *src2= src[2]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[2]; chrBufIndex++; ASSERT(chrBufIndex < 2*vChrBufSize) ASSERT(lastInChrBuf + 1 - chrSrcSliceY < (chrSrcSliceH)) ASSERT(lastInChrBuf + 1 - chrSrcSliceY >= 0) //FIXME replace parameters through context struct (some at least) if(!(isGray(srcFormat) || isGray(dstFormat))) RENAME(hcscale)(chrPixBuf[ chrBufIndex ], chrDstW, src1, src2, chrSrcW, chrXInc, flags, canMMX2BeUsed, hChrFilter, hChrFilterPos, hChrFilterSize, funnyUVCode, c->srcFormat, formatConvBuffer, c->chrMmx2Filter, c->chrMmx2FilterPos, pal); lastInChrBuf++; } //wrap buf index around to stay inside the ring buffer if(lumBufIndex >= vLumBufSize ) lumBufIndex-= vLumBufSize; if(chrBufIndex >= vChrBufSize ) chrBufIndex-= vChrBufSize; } else // not enough lines left in this slice -> load the rest in the buffer { /* printf(\"%d %d Last:%d %d LastInBuf:%d %d Index:%d %d Y:%d FSize: %d %d BSize: %d %d\\n\", firstChrSrcY,firstLumSrcY,lastChrSrcY,lastLumSrcY, lastInChrBuf,lastInLumBuf,chrBufIndex,lumBufIndex,dstY,vChrFilterSize,vLumFilterSize, vChrBufSize, vLumBufSize);*/ //Do horizontal scaling while(lastInLumBuf+1 < srcSliceY + srcSliceH) { uint8_t *s= src[0]+(lastInLumBuf + 1 - srcSliceY)*srcStride[0]; lumBufIndex++; ASSERT(lumBufIndex < 2*vLumBufSize) ASSERT(lastInLumBuf + 1 - srcSliceY < srcSliceH) ASSERT(lastInLumBuf + 1 - srcSliceY >= 0) RENAME(hyscale)(lumPixBuf[ lumBufIndex ], dstW, s, srcW, lumXInc, flags, canMMX2BeUsed, hLumFilter, hLumFilterPos, hLumFilterSize, funnyYCode, c->srcFormat, formatConvBuffer, c->lumMmx2Filter, c->lumMmx2FilterPos, pal); lastInLumBuf++; } while(lastInChrBuf+1 < (chrSrcSliceY + chrSrcSliceH)) { uint8_t *src1= src[1]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[1]; uint8_t *src2= src[2]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[2]; chrBufIndex++; ASSERT(chrBufIndex < 2*vChrBufSize) ASSERT(lastInChrBuf + 1 - chrSrcSliceY < chrSrcSliceH) ASSERT(lastInChrBuf + 1 - chrSrcSliceY >= 0) if(!(isGray(srcFormat) || isGray(dstFormat))) RENAME(hcscale)(chrPixBuf[ chrBufIndex ], chrDstW, src1, src2, chrSrcW, chrXInc, flags, canMMX2BeUsed, hChrFilter, hChrFilterPos, hChrFilterSize, funnyUVCode, c->srcFormat, formatConvBuffer, c->chrMmx2Filter, c->chrMmx2FilterPos, pal); lastInChrBuf++; } //wrap buf index around to stay inside the ring buffer if(lumBufIndex >= vLumBufSize ) lumBufIndex-= vLumBufSize; if(chrBufIndex >= vChrBufSize ) chrBufIndex-= vChrBufSize; break; //we can't output a dstY line so let's try with the next slice } #ifdef HAVE_MMX b5Dither= dither8[dstY&1]; g6Dither= dither4[dstY&1]; g5Dither= dither8[dstY&1]; r5Dither= dither8[(dstY+1)&1]; #endif if(dstY < dstH-2) { int16_t **lumSrcPtr= lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize; int16_t **chrSrcPtr= chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize; #ifdef HAVE_MMX int i; if(flags & SWS_ACCURATE_RND){ for(i=0; i1)]; lumMmxFilter[2*i+2]= lumMmxFilter[2*i+3]= vLumFilter[dstY*vLumFilterSize + i ] + (vLumFilterSize>1 ? vLumFilter[dstY*vLumFilterSize + i + 1]<<16 : 0); } for(i=0; i1)]; chrMmxFilter[2*i+2]= chrMmxFilter[2*i+3]= vChrFilter[chrDstY*vChrFilterSize + i ] + (vChrFilterSize>1 ? vChrFilter[chrDstY*vChrFilterSize + i + 1]<<16 : 0); } }else{ for(i=0; i> 32; lumMmxFilter[4*i+2]= lumMmxFilter[4*i+3]= ((uint16_t)vLumFilter[dstY*vLumFilterSize + i])*0x10001; } for(i=0; ichrDstVSubSample)-1; if(dstY&chrSkipMask) uDest= NULL; //FIXME split functions in lumi / chromi RENAME(yuv2nv12X)(c, vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize, vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize, dest, uDest, dstW, chrDstW, dstFormat); } else if(isPlanarYUV(dstFormat) || isGray(dstFormat)) //YV12 like { const int chrSkipMask= (1<chrDstVSubSample)-1; if((dstY&chrSkipMask) || isGray(dstFormat)) uDest=vDest= NULL; //FIXME split functions in lumi / chromi if(vLumFilterSize == 1 && vChrFilterSize == 1) // Unscaled YV12 { int16_t *lumBuf = lumPixBuf[0]; int16_t *chrBuf= chrPixBuf[0]; RENAME(yuv2yuv1)(lumBuf, chrBuf, dest, uDest, vDest, dstW, chrDstW); } else //General YV12 { RENAME(yuv2yuvX)(c, vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize, vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize, dest, uDest, vDest, dstW, chrDstW); } } else { ASSERT(lumSrcPtr + vLumFilterSize - 1 < lumPixBuf + vLumBufSize*2); ASSERT(chrSrcPtr + vChrFilterSize - 1 < chrPixBuf + vChrBufSize*2); if(vLumFilterSize == 1 && vChrFilterSize == 2) //Unscaled RGB { int chrAlpha= vChrFilter[2*dstY+1]; RENAME(yuv2packed1)(c, *lumSrcPtr, *chrSrcPtr, *(chrSrcPtr+1), dest, dstW, chrAlpha, dstFormat, flags, dstY); } else if(vLumFilterSize == 2 && vChrFilterSize == 2) //BiLinear Upscale RGB { int lumAlpha= vLumFilter[2*dstY+1]; int chrAlpha= vChrFilter[2*dstY+1]; lumMmxFilter[2]= lumMmxFilter[3]= vLumFilter[2*dstY ]*0x10001; chrMmxFilter[2]= chrMmxFilter[3]= vChrFilter[2*chrDstY]*0x10001; RENAME(yuv2packed2)(c, *lumSrcPtr, *(lumSrcPtr+1), *chrSrcPtr, *(chrSrcPtr+1), dest, dstW, lumAlpha, chrAlpha, dstY); } else //General RGB { RENAME(yuv2packedX)(c, vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize, vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize, dest, dstW, dstY); } } } else // hmm looks like we can't use MMX here without overwriting this array's tail { int16_t **lumSrcPtr= lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize; int16_t **chrSrcPtr= chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize; if(dstFormat == PIX_FMT_NV12 || dstFormat == PIX_FMT_NV21){ const int chrSkipMask= (1<chrDstVSubSample)-1; if(dstY&chrSkipMask) uDest= NULL; //FIXME split functions in lumi / chromi yuv2nv12XinC( vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize, vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize, dest, uDest, dstW, chrDstW, dstFormat); } else if(isPlanarYUV(dstFormat) || isGray(dstFormat)) //YV12 { const int chrSkipMask= (1<chrDstVSubSample)-1; if((dstY&chrSkipMask) || isGray(dstFormat)) uDest=vDest= NULL; //FIXME split functions in lumi / chromi yuv2yuvXinC( vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize, vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize, dest, uDest, vDest, dstW, chrDstW); } else { ASSERT(lumSrcPtr + vLumFilterSize - 1 < lumPixBuf + vLumBufSize*2); ASSERT(chrSrcPtr + vChrFilterSize - 1 < chrPixBuf + vChrBufSize*2); yuv2packedXinC(c, vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize, vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize, dest, dstW, dstY); } } } #ifdef HAVE_MMX __asm __volatile(SFENCE:::\"memory\"); __asm __volatile(EMMS:::\"memory\"); #endif /* store changed local vars back in the context */ c->dstY= dstY; c->lumBufIndex= lumBufIndex; c->chrBufIndex= chrBufIndex; c->lastInLumBuf= lastInLumBuf; c->lastInChrBuf= lastInChrBuf; return dstY - lastDstY; }"} {"target": 1, "idx": 13205, "func": "static inline void RENAME(bgr24ToY)(uint8_t *dst, const uint8_t *src, int width, uint32_t *unused) { #if COMPILE_TEMPLATE_MMX RENAME(bgr24ToY_mmx)(dst, src, width, PIX_FMT_BGR24); #else int i; for (i=0; i>RGB2YUV_SHIFT); } #endif /* COMPILE_TEMPLATE_MMX */ }"} {"target": 1, "idx": 13230, "func": "void FUNC(ff_simple_idct)(DCTELEM *block) { int i; for (i = 0; i < 8; i++) FUNC(idctRowCondDC)(block + i*8); for (i = 0; i < 8; i++) FUNC(idctSparseCol)(block + i); }"} {"target": 1, "idx": 13236, "func": "static void ohci_async_cancel_device(OHCIState *ohci, USBDevice *dev) { if (ohci->async_td && ohci->usb_packet.owner != NULL && ohci->usb_packet.owner->dev == dev) { usb_cancel_packet(&ohci->usb_packet); ohci->async_td = 0; } }"} {"target": 1, "idx": 13268, "func": "static int av_encode(AVFormatContext **output_files, int nb_output_files, AVFormatContext **input_files, int nb_input_files, AVStreamMap *stream_maps, int nb_stream_maps) { int ret, i, j, k, n, nb_istreams = 0, nb_ostreams = 0; AVFormatContext *is, *os; AVCodecContext *codec, *icodec; AVOutputStream *ost, **ost_table = NULL; AVInputStream *ist, **ist_table = NULL; AVInputFile *file_table; AVFormatContext *stream_no_data; int key; file_table= (AVInputFile*) av_mallocz(nb_input_files * sizeof(AVInputFile)); if (!file_table) goto fail; /* input stream init */ j = 0; for(i=0;inb_streams; j += is->nb_streams; } nb_istreams = j; ist_table = av_mallocz(nb_istreams * sizeof(AVInputStream *)); if (!ist_table) goto fail; for(i=0;inb_streams;k++) { ist = ist_table[j++]; ist->st = is->streams[k]; ist->file_index = i; ist->index = k; ist->discard = 1; /* the stream is discarded by default (changed later) */ if (ist->st->codec.rate_emu) { ist->start = av_gettime(); ist->frame = 0; } } } /* output stream init */ nb_ostreams = 0; for(i=0;inb_streams; } if (nb_stream_maps > 0 && nb_stream_maps != nb_ostreams) { fprintf(stderr, \"Number of stream maps must match number of output streams\\n\"); exit(1); } /* Sanity check the mapping args -- do the input files & streams exist? */ for(i=0;i nb_input_files - 1 || si < 0 || si > file_table[fi].nb_streams - 1) { fprintf(stderr,\"Could not find input stream #%d.%d\\n\", fi, si); exit(1); } } ost_table = av_mallocz(sizeof(AVOutputStream *) * nb_ostreams); if (!ost_table) goto fail; for(i=0;inb_streams;i++) { int found; ost = ost_table[n++]; ost->file_index = k; ost->index = i; ost->st = os->streams[i]; if (nb_stream_maps > 0) { ost->source_index = file_table[stream_maps[n-1].file_index].ist_index + stream_maps[n-1].stream_index; /* Sanity check that the stream types match */ if (ist_table[ost->source_index]->st->codec.codec_type != ost->st->codec.codec_type) { fprintf(stderr, \"Codec type mismatch for mapping #%d.%d -> #%d.%d\\n\", stream_maps[n-1].file_index, stream_maps[n-1].stream_index, ost->file_index, ost->index); exit(1); } } else { /* get corresponding input stream index : we select the first one with the right type */ found = 0; for(j=0;jdiscard && ist->st->codec.codec_type == ost->st->codec.codec_type) { ost->source_index = j; found = 1; } } if (!found) { /* try again and reuse existing stream */ for(j=0;jst->codec.codec_type == ost->st->codec.codec_type) { ost->source_index = j; found = 1; } } if (!found) { fprintf(stderr, \"Could not find input stream matching output stream #%d.%d\\n\", ost->file_index, ost->index); exit(1); } } } ist = ist_table[ost->source_index]; ist->discard = 0; } } /* for each output stream, we compute the right encoding parameters */ for(i=0;isource_index]; codec = &ost->st->codec; icodec = &ist->st->codec; if (ost->st->stream_copy) { /* if stream_copy is selected, no need to decode or encode */ codec->codec_id = icodec->codec_id; codec->codec_type = icodec->codec_type; codec->codec_tag = icodec->codec_tag; codec->bit_rate = icodec->bit_rate; switch(codec->codec_type) { case CODEC_TYPE_AUDIO: codec->sample_rate = icodec->sample_rate; codec->channels = icodec->channels; break; case CODEC_TYPE_VIDEO: codec->frame_rate = icodec->frame_rate; codec->frame_rate_base = icodec->frame_rate_base; codec->width = icodec->width; codec->height = icodec->height; break; default: av_abort(); } } else { switch(codec->codec_type) { case CODEC_TYPE_AUDIO: if (fifo_init(&ost->fifo, 2 * MAX_AUDIO_PACKET_SIZE)) goto fail; if (codec->channels == icodec->channels && codec->sample_rate == icodec->sample_rate) { ost->audio_resample = 0; } else { if (codec->channels != icodec->channels && icodec->codec_id == CODEC_ID_AC3) { /* Special case for 5:1 AC3 input */ /* and mono or stereo output */ /* Request specific number of channels */ icodec->channels = codec->channels; if (codec->sample_rate == icodec->sample_rate) ost->audio_resample = 0; else { ost->audio_resample = 1; ost->resample = audio_resample_init(codec->channels, icodec->channels, codec->sample_rate, icodec->sample_rate); if(!ost->resample) { printf(\"Can't resample. Aborting.\\n\"); av_abort(); } } /* Request specific number of channels */ icodec->channels = codec->channels; } else { ost->audio_resample = 1; ost->resample = audio_resample_init(codec->channels, icodec->channels, codec->sample_rate, icodec->sample_rate); if(!ost->resample) { printf(\"Can't resample. Aborting.\\n\"); av_abort(); } } } ist->decoding_needed = 1; ost->encoding_needed = 1; break; case CODEC_TYPE_VIDEO: if (codec->width == icodec->width && codec->height == icodec->height && frame_topBand == 0 && frame_bottomBand == 0 && frame_leftBand == 0 && frame_rightBand == 0) { ost->video_resample = 0; ost->video_crop = 0; } else if ((codec->width == icodec->width - (frame_leftBand + frame_rightBand)) && (codec->height == icodec->height - (frame_topBand + frame_bottomBand))) { ost->video_resample = 0; ost->video_crop = 1; ost->topBand = frame_topBand; ost->leftBand = frame_leftBand; } else { uint8_t *buf; ost->video_resample = 1; ost->video_crop = 0; // cropping is handled as part of resample buf = av_malloc((codec->width * codec->height * 3) / 2); if (!buf) goto fail; ost->pict_tmp.data[0] = buf; ost->pict_tmp.data[1] = ost->pict_tmp.data[0] + (codec->width * codec->height); ost->pict_tmp.data[2] = ost->pict_tmp.data[1] + (codec->width * codec->height) / 4; ost->pict_tmp.linesize[0] = codec->width; ost->pict_tmp.linesize[1] = codec->width / 2; ost->pict_tmp.linesize[2] = codec->width / 2; ost->img_resample_ctx = img_resample_full_init( ost->st->codec.width, ost->st->codec.height, ist->st->codec.width, ist->st->codec.height, frame_topBand, frame_bottomBand, frame_leftBand, frame_rightBand); } ost->encoding_needed = 1; ist->decoding_needed = 1; break; default: av_abort(); } /* two pass mode */ if (ost->encoding_needed && (codec->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))) { char logfilename[1024]; FILE *f; int size; char *logbuffer; snprintf(logfilename, sizeof(logfilename), \"%s-%d.log\", pass_logfilename ? pass_logfilename : DEFAULT_PASS_LOGFILENAME, i); if (codec->flags & CODEC_FLAG_PASS1) { f = fopen(logfilename, \"w\"); if (!f) { perror(logfilename); exit(1); } ost->logfile = f; } else { /* read the log file */ f = fopen(logfilename, \"r\"); if (!f) { perror(logfilename); exit(1); } fseek(f, 0, SEEK_END); size = ftell(f); fseek(f, 0, SEEK_SET); logbuffer = av_malloc(size + 1); if (!logbuffer) { fprintf(stderr, \"Could not allocate log buffer\\n\"); exit(1); } fread(logbuffer, 1, size, f); fclose(f); logbuffer[size] = '\\0'; codec->stats_in = logbuffer; } } } } /* dump the file output parameters - cannot be done before in case of stream copy */ for(i=0;ifilename, 1); } /* dump the stream mapping */ fprintf(stderr, \"Stream mapping:\\n\"); for(i=0;i #%d.%d\\n\", ist_table[ost->source_index]->file_index, ist_table[ost->source_index]->index, ost->file_index, ost->index); } /* open each encoder */ for(i=0;iencoding_needed) { AVCodec *codec; codec = avcodec_find_encoder(ost->st->codec.codec_id); if (!codec) { fprintf(stderr, \"Unsupported codec for output stream #%d.%d\\n\", ost->file_index, ost->index); exit(1); } if (avcodec_open(&ost->st->codec, codec) < 0) { fprintf(stderr, \"Error while opening codec for stream #%d.%d - maybe incorrect parameters such as bit_rate, rate, width or height\\n\", ost->file_index, ost->index); exit(1); } } } /* open each decoder */ for(i=0;idecoding_needed) { AVCodec *codec; codec = avcodec_find_decoder(ist->st->codec.codec_id); if (!codec) { fprintf(stderr, \"Unsupported codec (id=%d) for input stream #%d.%d\\n\", ist->st->codec.codec_id, ist->file_index, ist->index); exit(1); } if (avcodec_open(&ist->st->codec, codec) < 0) { fprintf(stderr, \"Error while opening codec for input stream #%d.%d\\n\", ist->file_index, ist->index); exit(1); } //if (ist->st->codec.codec_type == CODEC_TYPE_VIDEO) // ist->st->codec.flags |= CODEC_FLAG_REPEAT_FIELD; ist->frame_decoded = 1; } } /* init pts */ for(i=0;ifile_index]; ist->pts = 0; if (ist->decoding_needed) { switch (ist->st->codec.codec_type) { case CODEC_TYPE_AUDIO: av_frac_init(&ist->next_pts, 0, 0, is->pts_num * ist->st->codec.sample_rate); break; case CODEC_TYPE_VIDEO: av_frac_init(&ist->next_pts, 0, 0, is->pts_num * ist->st->codec.frame_rate); break; default: break; } } } /* compute buffer size max (should use a complete heuristic) */ for(i=0;ifile_index]; ist = ist_table[ost->source_index]; pts = (double)ost->st->pts.val * os->pts_num / os->pts_den; if (!file_table[ist->file_index].eof_reached && pts < pts_min) { pts_min = pts; file_index = ist->file_index; } } /* if none, if is finished */ if (file_index < 0) { break; } /* finish if recording time exhausted */ if (recording_time > 0 && pts_min >= (recording_time / 1000000.0)) break; /* read a packet from it and output it in the fifo */ is = input_files[file_index]; if (av_read_packet(is, &pkt) < 0) { file_table[file_index].eof_reached = 1; continue; } if (!pkt.size) { stream_no_data = is; } else { stream_no_data = 0; } if (do_hex_dump) { printf(\"stream #%d, size=%d:\\n\", pkt.stream_index, pkt.size); av_hex_dump(pkt.data, pkt.size); } /* the following test is needed in case new streams appear dynamically in stream : we ignore them */ if (pkt.stream_index >= file_table[file_index].nb_streams) goto discard_packet; ist_index = file_table[file_index].ist_index + pkt.stream_index; ist = ist_table[ist_index]; if (ist->discard) goto discard_packet; // printf(\"read #%d.%d size=%d\\n\", ist->file_index, ist->index, pkt.size); len = pkt.size; ptr = pkt.data; while (len > 0) { /* decode the packet if needed */ data_buf = NULL; /* fail safe */ data_size = 0; if (ist->decoding_needed) { /* NOTE1: we only take into account the PTS if a new frame has begun (MPEG semantics) */ /* NOTE2: even if the fraction is not initialized, av_frac_set can be used to set the integer part */ if (ist->frame_decoded) { /* If pts is unavailable -- we have to use synthetic one */ if( pkt.pts != AV_NOPTS_VALUE ) { ist->pts = ist->next_pts.val = pkt.pts; } else { ist->pts = ist->next_pts.val; } ist->frame_decoded = 0; } switch(ist->st->codec.codec_type) { case CODEC_TYPE_AUDIO: /* XXX: could avoid copy if PCM 16 bits with same endianness as CPU */ ret = avcodec_decode_audio(&ist->st->codec, samples, &data_size, ptr, len); if (ret < 0) goto fail_decode; /* Some bug in mpeg audio decoder gives */ /* data_size < 0, it seems they are overflows */ if (data_size <= 0) { /* no audio frame */ ptr += ret; len -= ret; continue; } data_buf = (uint8_t *)samples; av_frac_add(&ist->next_pts, is->pts_den * data_size / (2 * ist->st->codec.channels)); break; case CODEC_TYPE_VIDEO: { AVFrame big_picture; data_size = (ist->st->codec.width * ist->st->codec.height * 3) / 2; ret = avcodec_decode_video(&ist->st->codec, &big_picture, &got_picture, ptr, len); picture= *(AVPicture*)&big_picture; ist->st->quality= big_picture.quality; if (ret < 0) { fail_decode: fprintf(stderr, \"Error while decoding stream #%d.%d\\n\", ist->file_index, ist->index); av_free_packet(&pkt); goto redo; } if (!got_picture) { /* no picture yet */ ptr += ret; len -= ret; continue; } av_frac_add(&ist->next_pts, is->pts_den * ist->st->codec.frame_rate_base); } break; default: goto fail_decode; } } else { data_buf = ptr; data_size = len; ret = len; } ptr += ret; len -= ret; buffer_to_free = 0; if (ist->st->codec.codec_type == CODEC_TYPE_VIDEO) { pre_process_video_frame(ist, &picture, &buffer_to_free); } ist->frame_decoded = 1; /* frame rate emulation */ if (ist->st->codec.rate_emu) { int64_t pts = av_rescale((int64_t) ist->frame * ist->st->codec.frame_rate_base, 1000000, ist->st->codec.frame_rate); int64_t now = av_gettime() - ist->start; if (pts > now) usleep(pts - now); ist->frame++; } #if 0 /* mpeg PTS deordering : if it is a P or I frame, the PTS is the one of the next displayed one */ /* XXX: add mpeg4 too ? */ if (ist->st->codec.codec_id == CODEC_ID_MPEG1VIDEO) { if (ist->st->codec.pict_type != B_TYPE) { int64_t tmp; tmp = ist->last_ip_pts; ist->last_ip_pts = ist->frac_pts.val; ist->frac_pts.val = tmp; } } #endif /* transcode raw format, encode packets and output them */ for(i=0;isource_index == ist_index) { os = output_files[ost->file_index]; #if 0 printf(\"%d: got pts=%f %f\\n\", i, pkt.pts / 90000.0, (ist->pts - ost->st->pts.val) / 90000.0); #endif /* set the input output pts pairs */ ost->sync_ipts = (double)ist->pts * is->pts_num / is->pts_den; /* XXX: take into account the various fifos, in particular for audio */ ost->sync_opts = ost->st->pts.val; //printf(\"ipts=%lld sync_ipts=%f sync_opts=%lld pts.val=%lld pkt.pts=%lld\\n\", ist->pts, ost->sync_ipts, ost->sync_opts, ost->st->pts.val, pkt.pts); if (ost->encoding_needed) { switch(ost->st->codec.codec_type) { case CODEC_TYPE_AUDIO: do_audio_out(os, ost, ist, data_buf, data_size); break; case CODEC_TYPE_VIDEO: /* find an audio stream for synchro */ { int i; AVOutputStream *audio_sync, *ost1; audio_sync = NULL; for(i=0;ifile_index == ost->file_index && ost1->st->codec.codec_type == CODEC_TYPE_AUDIO) { audio_sync = ost1; break; } } do_video_out(os, ost, ist, &picture, &frame_size, audio_sync); if (do_vstats && frame_size) do_video_stats(os, ost, frame_size); } break; default: av_abort(); } } else { AVFrame avframe; /* no reencoding needed : output the packet directly */ /* force the input stream PTS */ memset(&avframe, 0, sizeof(AVFrame)); ost->st->codec.coded_frame= &avframe; avframe.key_frame = pkt.flags & PKT_FLAG_KEY; av_write_frame(os, ost->index, data_buf, data_size); ost->st->codec.frame_number++; ost->frame_number++; } } } av_free(buffer_to_free); } discard_packet: av_free_packet(&pkt); /* dump report by using the output first video and audio streams */ print_report(output_files, ost_table, nb_ostreams, 0); } term_exit(); /* dump report by using the first video and audio streams */ print_report(output_files, ost_table, nb_ostreams, 1); /* close each encoder */ for(i=0;iencoding_needed) { av_freep(&ost->st->codec.stats_in); avcodec_close(&ost->st->codec); } } /* close each decoder */ for(i=0;idecoding_needed) { avcodec_close(&ist->st->codec); } } /* write the trailer if needed and close file */ for(i=0;ilogfile) { fclose(ost->logfile); ost->logfile = NULL; } fifo_free(&ost->fifo); /* works even if fifo is not initialized but set to zero */ av_free(ost->pict_tmp.data[0]); if (ost->video_resample) img_resample_close(ost->img_resample_ctx); if (ost->audio_resample) audio_resample_close(ost->resample); av_free(ost); } } av_free(ost_table); } return ret; fail: ret = -ENOMEM; goto fail1; }"} {"target": 1, "idx": 13273, "func": "int avpriv_mpeg4audio_get_config(MPEG4AudioConfig *c, const uint8_t *buf, int bit_size, int sync_extension) { GetBitContext gb; int specific_config_bitindex; init_get_bits(&gb, buf, bit_size); c->object_type = get_object_type(&gb); c->sample_rate = get_sample_rate(&gb, &c->sampling_index); c->chan_config = get_bits(&gb, 4); if (c->chan_config < FF_ARRAY_ELEMS(ff_mpeg4audio_channels)) c->channels = ff_mpeg4audio_channels[c->chan_config]; c->sbr = -1; c->ps = -1; if (c->object_type == AOT_SBR || (c->object_type == AOT_PS && // check for W6132 Annex YYYY draft MP3onMP4 !(show_bits(&gb, 3) & 0x03 && !(show_bits(&gb, 9) & 0x3F)))) { if (c->object_type == AOT_PS) c->ps = 1; c->ext_object_type = AOT_SBR; c->sbr = 1; c->ext_sample_rate = get_sample_rate(&gb, &c->ext_sampling_index); c->object_type = get_object_type(&gb); if (c->object_type == AOT_ER_BSAC) c->ext_chan_config = get_bits(&gb, 4); } else { c->ext_object_type = AOT_NULL; c->ext_sample_rate = 0; } specific_config_bitindex = get_bits_count(&gb); if (c->object_type == AOT_ALS) { skip_bits(&gb, 5); if (show_bits_long(&gb, 24) != MKBETAG('\\0','A','L','S')) skip_bits_long(&gb, 24); specific_config_bitindex = get_bits_count(&gb); if (parse_config_ALS(&gb, c)) return -1; } if (c->ext_object_type != AOT_SBR && sync_extension) { while (get_bits_left(&gb) > 15) { if (show_bits(&gb, 11) == 0x2b7) { // sync extension get_bits(&gb, 11); c->ext_object_type = get_object_type(&gb); if (c->ext_object_type == AOT_SBR && (c->sbr = get_bits1(&gb)) == 1) c->ext_sample_rate = get_sample_rate(&gb, &c->ext_sampling_index); if (get_bits_left(&gb) > 11 && get_bits(&gb, 11) == 0x548) c->ps = get_bits1(&gb); break; } else get_bits1(&gb); // skip 1 bit } } //PS requires SBR if (!c->sbr) c->ps = 0; //Limit implicit PS to the HE-AACv2 Profile if ((c->ps == -1 && c->object_type != AOT_AAC_LC) || c->channels & ~0x01) c->ps = 0; return specific_config_bitindex; }"} {"target": 1, "idx": 13311, "func": "static av_always_inline void vc1_apply_p_h_loop_filter(VC1Context *v, int block_num) { MpegEncContext *s = &v->s; int mb_cbp = v->cbp[s->mb_x - 1 - s->mb_stride], block_cbp = mb_cbp >> (block_num * 4), right_cbp, mb_is_intra = v->is_intra[s->mb_x - 1 - s->mb_stride], block_is_intra = mb_is_intra >> (block_num * 4), right_is_intra; int idx, linesize = block_num > 3 ? s->uvlinesize : s->linesize, ttblk; uint8_t *dst; if (block_num > 3) { dst = s->dest[block_num - 3] - 8 * linesize; } else { dst = s->dest[0] + (block_num & 1) * 8 + ((block_num & 2) * 4 - 16) * linesize - 8; } if (s->mb_x != s->mb_width || !(block_num & 5)) { int16_t (*mv)[2]; if (block_num > 3) { right_cbp = v->cbp[s->mb_x - s->mb_stride] >> (block_num * 4); right_is_intra = v->is_intra[s->mb_x - s->mb_stride] >> (block_num * 4); mv = &v->luma_mv[s->mb_x - s->mb_stride - 1]; } else { right_cbp = (block_num & 1) ? (v->cbp[s->mb_x - s->mb_stride] >> ((block_num - 1) * 4)) : (mb_cbp >> ((block_num + 1) * 4)); right_is_intra = (block_num & 1) ? (v->is_intra[s->mb_x - s->mb_stride] >> ((block_num - 1) * 4)) : (mb_is_intra >> ((block_num + 1) * 4)); mv = &s->current_picture.motion_val[0][s->block_index[block_num] - s->b8_stride * 2 - 2]; } if (block_is_intra & 1 || right_is_intra & 1 || mv[0][0] != mv[1][0] || mv[0][1] != mv[1][1]) { v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq); } else { idx = ((right_cbp >> 1) | block_cbp) & 5; // FIXME check if (idx == 5) { v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq); } else if (idx) { if (idx == 1) v->vc1dsp.vc1_h_loop_filter4(dst + 4 * linesize, linesize, v->pq); else v->vc1dsp.vc1_h_loop_filter4(dst, linesize, v->pq); } } } dst -= 4; ttblk = (v->ttblk[s->mb_x - s->mb_stride - 1] >> (block_num * 4)) & 0xf; if (ttblk == TT_4X4 || ttblk == TT_4X8) { idx = (block_cbp | (block_cbp >> 1)) & 5; if (idx == 5) { v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq); } else if (idx) { if (idx == 1) v->vc1dsp.vc1_h_loop_filter4(dst + linesize * 4, linesize, v->pq); else v->vc1dsp.vc1_h_loop_filter4(dst, linesize, v->pq); } } }"} {"target": 1, "idx": 13316, "func": "static int read_packet(AVFormatContext *s, uint8_t *buf, int raw_packet_size, uint8_t **data) { AVIOContext *pb = s->pb; int len; for(;;) { len = ffio_read_indirect(pb, buf, TS_PACKET_SIZE, data); if (len != TS_PACKET_SIZE) return len < 0 ? len : AVERROR_EOF; /* check packet sync byte */ if ((*data)[0] != 0x47) { /* find a new packet start */ avio_seek(pb, -TS_PACKET_SIZE, SEEK_CUR); if (mpegts_resync(s) < 0) return AVERROR(EAGAIN); else continue; } else { break; } } return 0; }"} {"target": 0, "idx": 13339, "func": "int ff_parse_sample_rate(unsigned *ret, const char *arg, void *log_ctx) { char *tail; double srate = av_strtod(arg, &tail); if (*tail || srate < 1 || (int)srate != srate) { av_log(log_ctx, AV_LOG_ERROR, \"Invalid sample rate '%s'\\n\", arg); return AVERROR(EINVAL); } *ret = srate; return 0; }"} {"target": 1, "idx": 13347, "func": "static void decode_abs_store(CPUTriCoreState *env, DisasContext *ctx) { int32_t op2; int32_t r1; uint32_t address; TCGv temp; r1 = MASK_OP_ABS_S1D(ctx->opcode); address = MASK_OP_ABS_OFF18(ctx->opcode); op2 = MASK_OP_ABS_OP2(ctx->opcode); temp = tcg_const_i32(EA_ABS_FORMAT(address)); switch (op2) { case OPC2_32_ABS_ST_A: tcg_gen_qemu_st_tl(cpu_gpr_a[r1], temp, ctx->mem_idx, MO_LESL); break; case OPC2_32_ABS_ST_D: gen_st_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], temp, ctx); break; case OPC2_32_ABS_ST_DA: gen_st_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], temp, ctx); break; case OPC2_32_ABS_ST_W: tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LESL); break; } tcg_temp_free(temp); }"} {"target": 0, "idx": 13357, "func": "static void v9fs_readdir(void *opaque) { int32_t fid; V9fsFidState *fidp; ssize_t retval = 0; size_t offset = 7; uint64_t initial_offset; int32_t count; uint32_t max_count; V9fsPDU *pdu = opaque; V9fsState *s = pdu->s; pdu_unmarshal(pdu, offset, \"dqd\", &fid, &initial_offset, &max_count); trace_v9fs_readdir(pdu->tag, pdu->id, fid, initial_offset, max_count); fidp = get_fid(pdu, fid); if (fidp == NULL) { retval = -EINVAL; goto out_nofid; } if (!fidp->fs.dir) { retval = -EINVAL; goto out; } if (initial_offset == 0) { v9fs_co_rewinddir(pdu, fidp); } else { v9fs_co_seekdir(pdu, fidp, initial_offset); } count = v9fs_do_readdir(pdu, fidp, max_count); if (count < 0) { retval = count; goto out; } retval = offset; retval += pdu_marshal(pdu, offset, \"d\", count); retval += count; trace_v9fs_readdir_return(pdu->tag, pdu->id, count, retval); out: put_fid(pdu, fidp); out_nofid: complete_pdu(s, pdu, retval); }"} {"target": 0, "idx": 13363, "func": "uint64_t HELPER(lra)(CPUS390XState *env, uint64_t addr) { CPUState *cs = CPU(s390_env_get_cpu(env)); uint32_t cc = 0; int old_exc = cs->exception_index; uint64_t asc = env->psw.mask & PSW_MASK_ASC; uint64_t ret; int flags; /* XXX incomplete - has more corner cases */ if (!(env->psw.mask & PSW_MASK_64) && (addr >> 32)) { program_interrupt(env, PGM_SPECIAL_OP, 2); } cs->exception_index = old_exc; if (mmu_translate(env, addr, 0, asc, &ret, &flags)) { cc = 3; } if (cs->exception_index == EXCP_PGM) { ret = env->int_pgm_code | 0x80000000; } else { ret |= addr & ~TARGET_PAGE_MASK; } cs->exception_index = old_exc; env->cc_op = cc; return ret; }"} {"target": 0, "idx": 13365, "func": "static int bdrv_check_update_perm(BlockDriverState *bs, uint64_t new_used_perm, uint64_t new_shared_perm, BdrvChild *ignore_child, Error **errp) { BdrvChild *c; uint64_t cumulative_perms = new_used_perm; uint64_t cumulative_shared_perms = new_shared_perm; /* There is no reason why anyone couldn't tolerate write_unchanged */ assert(new_shared_perm & BLK_PERM_WRITE_UNCHANGED); QLIST_FOREACH(c, &bs->parents, next_parent) { if (c == ignore_child) { continue; } if ((new_used_perm & c->shared_perm) != new_used_perm) { char *user = bdrv_child_user_desc(c); char *perm_names = bdrv_perm_names(new_used_perm & ~c->shared_perm); error_setg(errp, \"Conflicts with use by %s as '%s', which does not \" \"allow '%s' on %s\", user, c->name, perm_names, bdrv_get_node_name(c->bs)); g_free(user); g_free(perm_names); return -EPERM; } if ((c->perm & new_shared_perm) != c->perm) { char *user = bdrv_child_user_desc(c); char *perm_names = bdrv_perm_names(c->perm & ~new_shared_perm); error_setg(errp, \"Conflicts with use by %s as '%s', which uses \" \"'%s' on %s\", user, c->name, perm_names, bdrv_get_node_name(c->bs)); g_free(user); g_free(perm_names); return -EPERM; } cumulative_perms |= c->perm; cumulative_shared_perms &= c->shared_perm; } return bdrv_check_perm(bs, cumulative_perms, cumulative_shared_perms, errp); }"} {"target": 0, "idx": 13366, "func": "static void monitor_protocol_emitter(Monitor *mon, QObject *data) { QDict *qmp; qmp = qdict_new(); if (!monitor_has_error(mon)) { /* success response */ if (data) { assert(qobject_type(data) == QTYPE_QDICT); qobject_incref(data); qdict_put_obj(qmp, \"return\", data); } else { /* return an empty QDict by default */ qdict_put(qmp, \"return\", qdict_new()); } } else { /* error response */ qdict_put(mon->error->error, \"desc\", qerror_human(mon->error)); qdict_put(qmp, \"error\", mon->error->error); QINCREF(mon->error->error); QDECREF(mon->error); mon->error = NULL; } if (mon->mc->id) { qdict_put_obj(qmp, \"id\", mon->mc->id); mon->mc->id = NULL; } monitor_json_emitter(mon, QOBJECT(qmp)); QDECREF(qmp); }"} {"target": 0, "idx": 13388, "func": "static inline int decode_vui_parameters(H264Context *h, SPS *sps) { int aspect_ratio_info_present_flag; unsigned int aspect_ratio_idc; aspect_ratio_info_present_flag = get_bits1(&h->gb); if (aspect_ratio_info_present_flag) { aspect_ratio_idc = get_bits(&h->gb, 8); if (aspect_ratio_idc == EXTENDED_SAR) { sps->sar.num = get_bits(&h->gb, 16); sps->sar.den = get_bits(&h->gb, 16); } else if (aspect_ratio_idc < FF_ARRAY_ELEMS(pixel_aspect)) { sps->sar = pixel_aspect[aspect_ratio_idc]; } else { av_log(h->avctx, AV_LOG_ERROR, \"illegal aspect ratio\\n\"); return AVERROR_INVALIDDATA; } } else { sps->sar.num = sps->sar.den = 0; } if (get_bits1(&h->gb)) /* overscan_info_present_flag */ get_bits1(&h->gb); /* overscan_appropriate_flag */ sps->video_signal_type_present_flag = get_bits1(&h->gb); if (sps->video_signal_type_present_flag) { get_bits(&h->gb, 3); /* video_format */ sps->full_range = get_bits1(&h->gb); /* video_full_range_flag */ sps->colour_description_present_flag = get_bits1(&h->gb); if (sps->colour_description_present_flag) { sps->color_primaries = get_bits(&h->gb, 8); /* colour_primaries */ sps->color_trc = get_bits(&h->gb, 8); /* transfer_characteristics */ sps->colorspace = get_bits(&h->gb, 8); /* matrix_coefficients */ if (sps->color_primaries >= AVCOL_PRI_NB) sps->color_primaries = AVCOL_PRI_UNSPECIFIED; if (sps->color_trc >= AVCOL_TRC_NB) sps->color_trc = AVCOL_TRC_UNSPECIFIED; if (sps->colorspace >= AVCOL_SPC_NB) sps->colorspace = AVCOL_SPC_UNSPECIFIED; } } /* chroma_location_info_present_flag */ if (get_bits1(&h->gb)) { /* chroma_sample_location_type_top_field */ h->avctx->chroma_sample_location = get_ue_golomb(&h->gb) + 1; get_ue_golomb(&h->gb); /* chroma_sample_location_type_bottom_field */ } sps->timing_info_present_flag = get_bits1(&h->gb); if (sps->timing_info_present_flag) { sps->num_units_in_tick = get_bits_long(&h->gb, 32); sps->time_scale = get_bits_long(&h->gb, 32); if (!sps->num_units_in_tick || !sps->time_scale) { av_log(h->avctx, AV_LOG_ERROR, \"time_scale/num_units_in_tick invalid or unsupported (%\"PRIu32\"/%\"PRIu32\")\\n\", sps->time_scale, sps->num_units_in_tick); return AVERROR_INVALIDDATA; } sps->fixed_frame_rate_flag = get_bits1(&h->gb); } sps->nal_hrd_parameters_present_flag = get_bits1(&h->gb); if (sps->nal_hrd_parameters_present_flag) if (decode_hrd_parameters(h, sps) < 0) return AVERROR_INVALIDDATA; sps->vcl_hrd_parameters_present_flag = get_bits1(&h->gb); if (sps->vcl_hrd_parameters_present_flag) if (decode_hrd_parameters(h, sps) < 0) return AVERROR_INVALIDDATA; if (sps->nal_hrd_parameters_present_flag || sps->vcl_hrd_parameters_present_flag) get_bits1(&h->gb); /* low_delay_hrd_flag */ sps->pic_struct_present_flag = get_bits1(&h->gb); sps->bitstream_restriction_flag = get_bits1(&h->gb); if (sps->bitstream_restriction_flag) { get_bits1(&h->gb); /* motion_vectors_over_pic_boundaries_flag */ get_ue_golomb(&h->gb); /* max_bytes_per_pic_denom */ get_ue_golomb(&h->gb); /* max_bits_per_mb_denom */ get_ue_golomb(&h->gb); /* log2_max_mv_length_horizontal */ get_ue_golomb(&h->gb); /* log2_max_mv_length_vertical */ sps->num_reorder_frames = get_ue_golomb(&h->gb); get_ue_golomb(&h->gb); /*max_dec_frame_buffering*/ if (get_bits_left(&h->gb) < 0) { sps->num_reorder_frames = 0; sps->bitstream_restriction_flag = 0; } if (sps->num_reorder_frames > 16U /* max_dec_frame_buffering || max_dec_frame_buffering > 16 */) { av_log(h->avctx, AV_LOG_ERROR, \"Clipping illegal num_reorder_frames %d\\n\", sps->num_reorder_frames); sps->num_reorder_frames = 16; return AVERROR_INVALIDDATA; } } if (get_bits_left(&h->gb) < 0) { av_log(h->avctx, AV_LOG_ERROR, \"Overread VUI by %d bits\\n\", -get_bits_left(&h->gb)); return AVERROR_INVALIDDATA; } return 0; }"} {"target": 0, "idx": 13415, "func": "int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num, const uint8_t *buf, int nb_sectors) { BlockDriver *drv = bs->drv; int ret; if (!drv) { return -ENOMEDIUM; } if (!drv->bdrv_write_compressed) { return -ENOTSUP; } ret = bdrv_check_request(bs, sector_num, nb_sectors); if (ret < 0) { return ret; } assert(QLIST_EMPTY(&bs->dirty_bitmaps)); return drv->bdrv_write_compressed(bs, sector_num, buf, nb_sectors); }"} {"target": 0, "idx": 13426, "func": "static int usb_host_handle_control(USBDevice *dev, USBPacket *p, int request, int value, int index, int length, uint8_t *data) { USBHostDevice *s = DO_UPCAST(USBHostDevice, dev, dev); struct usbdevfs_urb *urb; AsyncURB *aurb; int ret; /* * Process certain standard device requests. * These are infrequent and are processed synchronously. */ /* Note request is (bRequestType << 8) | bRequest */ trace_usb_host_req_control(s->bus_num, s->addr, request, value, index); switch (request) { case DeviceOutRequest | USB_REQ_SET_ADDRESS: return usb_host_set_address(s, value); case DeviceOutRequest | USB_REQ_SET_CONFIGURATION: return usb_host_set_config(s, value & 0xff); case InterfaceOutRequest | USB_REQ_SET_INTERFACE: return usb_host_set_interface(s, index, value); } /* The rest are asynchronous */ if (length > sizeof(dev->data_buf)) { fprintf(stderr, \"husb: ctrl buffer too small (%d > %zu)\\n\", length, sizeof(dev->data_buf)); return USB_RET_STALL; } aurb = async_alloc(s); aurb->packet = p; /* * Setup ctrl transfer. * * s->ctrl is laid out such that data buffer immediately follows * 'req' struct which is exactly what usbdevfs expects. */ urb = &aurb->urb; urb->type = USBDEVFS_URB_TYPE_CONTROL; urb->endpoint = p->devep; urb->buffer = &dev->setup_buf; urb->buffer_length = length + 8; urb->usercontext = s; trace_usb_host_urb_submit(s->bus_num, s->addr, aurb, urb->buffer_length, aurb->more); ret = ioctl(s->fd, USBDEVFS_SUBMITURB, urb); DPRINTF(\"husb: submit ctrl. len %u aurb %p\\n\", urb->buffer_length, aurb); if (ret < 0) { DPRINTF(\"husb: submit failed. errno %d\\n\", errno); async_free(aurb); switch(errno) { case ETIMEDOUT: return USB_RET_NAK; case EPIPE: default: return USB_RET_STALL; } } return USB_RET_ASYNC; }"} {"target": 1, "idx": 13429, "func": "static int dshow_read_header(AVFormatContext *avctx) { struct dshow_ctx *ctx = avctx->priv_data; IGraphBuilder *graph = NULL; ICreateDevEnum *devenum = NULL; IMediaControl *control = NULL; IMediaEvent *media_event = NULL; HANDLE media_event_handle; HANDLE proc; int ret = AVERROR(EIO); int r; CoInitialize(0); if (!ctx->list_devices && !parse_device_name(avctx)) { av_log(avctx, AV_LOG_ERROR, \"Malformed dshow input string.\\n\"); goto error; } ctx->video_codec_id = avctx->video_codec_id ? avctx->video_codec_id : AV_CODEC_ID_RAWVIDEO; if (ctx->pixel_format != AV_PIX_FMT_NONE) { if (ctx->video_codec_id != AV_CODEC_ID_RAWVIDEO) { av_log(avctx, AV_LOG_ERROR, \"Pixel format may only be set when \" \"video codec is not set or set to rawvideo\\n\"); ret = AVERROR(EINVAL); goto error; } } if (ctx->framerate) { r = av_parse_video_rate(&ctx->requested_framerate, ctx->framerate); if (r < 0) { av_log(avctx, AV_LOG_ERROR, \"Could not parse framerate '%s'.\\n\", ctx->framerate); goto error; } } r = CoCreateInstance(&CLSID_FilterGraph, NULL, CLSCTX_INPROC_SERVER, &IID_IGraphBuilder, (void **) &graph); if (r != S_OK) { av_log(avctx, AV_LOG_ERROR, \"Could not create capture graph.\\n\"); goto error; } ctx->graph = graph; r = CoCreateInstance(&CLSID_SystemDeviceEnum, NULL, CLSCTX_INPROC_SERVER, &IID_ICreateDevEnum, (void **) &devenum); if (r != S_OK) { av_log(avctx, AV_LOG_ERROR, \"Could not enumerate system devices.\\n\"); goto error; } if (ctx->list_devices) { av_log(avctx, AV_LOG_INFO, \"DirectShow video devices\\n\"); dshow_cycle_devices(avctx, devenum, VideoDevice, NULL); av_log(avctx, AV_LOG_INFO, \"DirectShow audio devices\\n\"); dshow_cycle_devices(avctx, devenum, AudioDevice, NULL); ret = AVERROR_EXIT; goto error; } if (ctx->list_options) { if (ctx->device_name[VideoDevice]) dshow_list_device_options(avctx, devenum, VideoDevice); if (ctx->device_name[AudioDevice]) dshow_list_device_options(avctx, devenum, AudioDevice); ret = AVERROR_EXIT; goto error; } if (ctx->device_name[VideoDevice]) { if ((r = dshow_open_device(avctx, devenum, VideoDevice)) < 0 || (r = dshow_add_device(avctx, VideoDevice)) < 0) { ret = r; goto error; } } if (ctx->device_name[AudioDevice]) { if ((r = dshow_open_device(avctx, devenum, AudioDevice)) < 0 || (r = dshow_add_device(avctx, AudioDevice)) < 0) { ret = r; goto error; } } ctx->mutex = CreateMutex(NULL, 0, NULL); if (!ctx->mutex) { av_log(avctx, AV_LOG_ERROR, \"Could not create Mutex\\n\"); goto error; } ctx->event[1] = CreateEvent(NULL, 1, 0, NULL); if (!ctx->event[1]) { av_log(avctx, AV_LOG_ERROR, \"Could not create Event\\n\"); goto error; } r = IGraphBuilder_QueryInterface(graph, &IID_IMediaControl, (void **) &control); if (r != S_OK) { av_log(avctx, AV_LOG_ERROR, \"Could not get media control.\\n\"); goto error; } ctx->control = control; r = IGraphBuilder_QueryInterface(graph, &IID_IMediaEvent, (void **) &media_event); if (r != S_OK) { av_log(avctx, AV_LOG_ERROR, \"Could not get media event.\\n\"); goto error; } ctx->media_event = media_event; r = IMediaEvent_GetEventHandle(media_event, (void *) &media_event_handle); if (r != S_OK) { av_log(avctx, AV_LOG_ERROR, \"Could not get media event handle.\\n\"); goto error; } proc = GetCurrentProcess(); r = DuplicateHandle(proc, media_event_handle, proc, &ctx->event[0], 0, 0, DUPLICATE_SAME_ACCESS); if (!r) { av_log(avctx, AV_LOG_ERROR, \"Could not duplicate media event handle.\\n\"); goto error; } r = IMediaControl_Run(control); if (r == S_FALSE) { OAFilterState pfs; r = IMediaControl_GetState(control, 0, &pfs); } if (r != S_OK) { av_log(avctx, AV_LOG_ERROR, \"Could not run filter\\n\"); goto error; } ret = 0; error: if (devenum) ICreateDevEnum_Release(devenum); if (ret < 0) dshow_read_close(avctx); return ret; }"} {"target": 1, "idx": 13437, "func": "static inline void s390_machine_initfn(Object *obj) { object_property_add_bool(obj, \"aes-key-wrap\", machine_get_aes_key_wrap, machine_set_aes_key_wrap, NULL); object_property_set_description(obj, \"aes-key-wrap\", \"enable/disable AES key wrapping using the CPACF wrapping key\", object_property_set_bool(obj, true, \"aes-key-wrap\", NULL); object_property_add_bool(obj, \"dea-key-wrap\", machine_get_dea_key_wrap, machine_set_dea_key_wrap, NULL); object_property_set_description(obj, \"dea-key-wrap\", \"enable/disable DEA key wrapping using the CPACF wrapping key\", object_property_set_bool(obj, true, \"dea-key-wrap\", NULL); object_property_add_str(obj, \"loadparm\", machine_get_loadparm, machine_set_loadparm, NULL); object_property_set_description(obj, \"loadparm\", \"Up to 8 chars in set of [A-Za-z0-9. ] (lower case chars converted\" \" to upper case) to pass to machine loader, boot manager,\" \" and guest kernel\", }"} {"target": 0, "idx": 13442, "func": "static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int nb_components, int predictor, int point_transform) { int i, mb_x, mb_y; uint16_t (*buffer)[4]; int left[4], top[4], topleft[4]; const int linesize = s->linesize[0]; const int mask = ((1 << s->bits) - 1) << point_transform; int resync_mb_y = 0; int resync_mb_x = 0; if (s->nb_components != 3 && s->nb_components != 4) return AVERROR_INVALIDDATA; if (s->v_max != 1 || s->h_max != 1 || !s->lossless) return AVERROR_INVALIDDATA; s->restart_count = s->restart_interval; av_fast_malloc(&s->ljpeg_buffer, &s->ljpeg_buffer_size, (unsigned)s->mb_width * 4 * sizeof(s->ljpeg_buffer[0][0])); buffer = s->ljpeg_buffer; for (i = 0; i < 4; i++) buffer[0][i] = 1 << (s->bits - 1); for (mb_y = 0; mb_y < s->mb_height; mb_y++) { uint8_t *ptr = s->picture_ptr->data[0] + (linesize * mb_y); if (s->interlaced && s->bottom_field) ptr += linesize >> 1; for (i = 0; i < 4; i++) top[i] = left[i] = topleft[i] = buffer[0][i]; for (mb_x = 0; mb_x < s->mb_width; mb_x++) { int modified_predictor = predictor; if (s->restart_interval && !s->restart_count){ s->restart_count = s->restart_interval; resync_mb_x = mb_x; resync_mb_y = mb_y; for(i=0; i<4; i++) top[i] = left[i]= topleft[i]= 1 << (s->bits - 1); } if (mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x || !mb_x) modified_predictor = 1; for (i=0;idc_index[i]); if(dc == 0xFFFFF) return -1; left[i] = buffer[mb_x][i] = mask & (pred + (dc << point_transform)); } if (s->restart_interval && !--s->restart_count) { align_get_bits(&s->gb); skip_bits(&s->gb, 16); /* skip RSTn */ } } if (s->rct && s->nb_components == 4) { for (mb_x = 0; mb_x < s->mb_width; mb_x++) { ptr[4*mb_x + 2] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2] - 0x200) >> 2); ptr[4*mb_x + 1] = buffer[mb_x][1] + ptr[4*mb_x + 2]; ptr[4*mb_x + 3] = buffer[mb_x][2] + ptr[4*mb_x + 2]; ptr[4*mb_x + 0] = buffer[mb_x][3]; } } else if (s->nb_components == 4) { for(i=0; icomp_index[i]; if (s->bits <= 8) { for(mb_x = 0; mb_x < s->mb_width; mb_x++) { ptr[4*mb_x+3-c] = buffer[mb_x][i]; } } else if(s->bits == 9) { return AVERROR_PATCHWELCOME; } else { for(mb_x = 0; mb_x < s->mb_width; mb_x++) { ((uint16_t*)ptr)[4*mb_x+c] = buffer[mb_x][i]; } } } } else if (s->rct) { for (mb_x = 0; mb_x < s->mb_width; mb_x++) { ptr[3*mb_x + 1] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2] - 0x200) >> 2); ptr[3*mb_x + 0] = buffer[mb_x][1] + ptr[3*mb_x + 1]; ptr[3*mb_x + 2] = buffer[mb_x][2] + ptr[3*mb_x + 1]; } } else if (s->pegasus_rct) { for (mb_x = 0; mb_x < s->mb_width; mb_x++) { ptr[3*mb_x + 1] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2]) >> 2); ptr[3*mb_x + 0] = buffer[mb_x][1] + ptr[3*mb_x + 1]; ptr[3*mb_x + 2] = buffer[mb_x][2] + ptr[3*mb_x + 1]; } } else { for(i=0; icomp_index[i]; if (s->bits <= 8) { for(mb_x = 0; mb_x < s->mb_width; mb_x++) { ptr[3*mb_x+2-c] = buffer[mb_x][i]; } } else if(s->bits == 9) { return AVERROR_PATCHWELCOME; } else { for(mb_x = 0; mb_x < s->mb_width; mb_x++) { ((uint16_t*)ptr)[3*mb_x+2-c] = buffer[mb_x][i]; } } } } } return 0; }"} {"target": 1, "idx": 13448, "func": "static void qemu_spice_display_init_one(QemuConsole *con) { SimpleSpiceDisplay *ssd = g_new0(SimpleSpiceDisplay, 1); qemu_spice_display_init_common(ssd); ssd->qxl.base.sif = &dpy_interface.base; qemu_spice_add_display_interface(&ssd->qxl, con); assert(ssd->worker); qemu_spice_create_host_memslot(ssd); ssd->dcl.ops = &display_listener_ops; ssd->dcl.con = con; register_displaychangelistener(&ssd->dcl); }"} {"target": 0, "idx": 13462, "func": "int ff_index_search_timestamp(const AVIndexEntry *entries, int nb_entries, int64_t wanted_timestamp, int flags) { int a, b, m; int64_t timestamp; a = -1; b = nb_entries; // Optimize appending index entries at the end. if (b && entries[b - 1].timestamp < wanted_timestamp) a = b - 1; while (b - a > 1) { m = (a + b) >> 1; // Search for the next non-discarded packet. while ((entries[m].flags & AVINDEX_DISCARD_FRAME) && m < b) { m++; if (m == b && entries[m].timestamp >= wanted_timestamp) { m = b - 1; break; } } timestamp = entries[m].timestamp; if (timestamp >= wanted_timestamp) b = m; if (timestamp <= wanted_timestamp) a = m; } m = (flags & AVSEEK_FLAG_BACKWARD) ? a : b; if (!(flags & AVSEEK_FLAG_ANY)) while (m >= 0 && m < nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)) m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1; if (m == nb_entries) return -1; return m; }"} {"target": 1, "idx": 13468, "func": "static int codec_get_buffer(AVCodecContext *s, AVFrame *frame) { InputStream *ist = s->opaque; FrameBuffer *buf; int ret, i; if (!ist->buffer_pool && (ret = alloc_buffer(s, ist, &ist->buffer_pool)) < 0) return ret; buf = ist->buffer_pool; ist->buffer_pool = buf->next; buf->next = NULL; if (buf->w != s->width || buf->h != s->height || buf->pix_fmt != s->pix_fmt) { av_freep(&buf->base[0]); av_free(buf); ist->dr1 = 0; if ((ret = alloc_buffer(s, ist, &buf)) < 0) return ret; } buf->refcount++; frame->opaque = buf; frame->type = FF_BUFFER_TYPE_USER; frame->extended_data = frame->data; frame->pkt_pts = s->pkt ? s->pkt->pts : AV_NOPTS_VALUE; for (i = 0; i < FF_ARRAY_ELEMS(buf->data); i++) { frame->base[i] = buf->base[i]; // XXX h264.c uses base though it shouldn't frame->data[i] = buf->data[i]; frame->linesize[i] = buf->linesize[i]; } return 0; }"} {"target": 0, "idx": 13488, "func": "static int filter_frame(AVFilterLink *inlink, AVFrame *inpic) { AVFilterContext *ctx = inlink->dst; HisteqContext *histeq = ctx->priv; AVFilterLink *outlink = ctx->outputs[0]; int strength = histeq->strength * 1000; int intensity = histeq->intensity * 1000; int x, y, i, luthi, lutlo, lut, luma, oluma, m; AVFrame *outpic; unsigned int r, g, b, jran; uint8_t *src, *dst; outpic = ff_get_video_buffer(outlink, outlink->w, outlink->h); if (!outpic) { av_frame_free(&inpic); return AVERROR(ENOMEM); } av_frame_copy_props(outpic, inpic); /* Seed random generator for antibanding. */ jran = LCG_SEED; /* Calculate and store the luminance and calculate the global histogram based on the luminance. */ memset(histeq->in_histogram, 0, sizeof(histeq->in_histogram)); src = inpic->data[0]; dst = outpic->data[0]; for (y = 0; y < inlink->h; y++) { for (x = 0; x < inlink->w * histeq->bpp; x += histeq->bpp) { GET_RGB_VALUES(r, g, b, src, histeq->rgba_map); luma = (55 * r + 182 * g + 19 * b) >> 8; dst[x + histeq->rgba_map[A]] = luma; histeq->in_histogram[luma]++; } src += inpic->linesize[0]; dst += outpic->linesize[0]; } #ifdef DEBUG for (x = 0; x < 256; x++) av_dlog(ctx, \"in[%d]: %u\\n\", x, histeq->in_histogram[x]); #endif /* Calculate the lookup table. */ histeq->LUT[0] = histeq->in_histogram[0]; /* Accumulate */ for (x = 1; x < 256; x++) histeq->LUT[x] = histeq->LUT[x-1] + histeq->in_histogram[x]; /* Normalize */ for (x = 0; x < 256; x++) histeq->LUT[x] = (histeq->LUT[x] * intensity) / (inlink->h * inlink->w); /* Adjust the LUT based on the selected strength. This is an alpha mix of the calculated LUT and a linear LUT with gain 1. */ for (x = 0; x < 256; x++) histeq->LUT[x] = (strength * histeq->LUT[x]) / 255 + ((255 - strength) * x) / 255; /* Output the equalized frame. */ memset(histeq->out_histogram, 0, sizeof(histeq->out_histogram)); src = inpic->data[0]; dst = outpic->data[0]; for (y = 0; y < inlink->h; y++) { for (x = 0; x < inlink->w * histeq->bpp; x += histeq->bpp) { luma = dst[x + histeq->rgba_map[A]]; if (luma == 0) { for (i = 0; i < histeq->bpp; ++i) dst[x + i] = 0; histeq->out_histogram[0]++; } else { lut = histeq->LUT[luma]; if (histeq->antibanding != HISTEQ_ANTIBANDING_NONE) { if (luma > 0) { lutlo = histeq->antibanding == HISTEQ_ANTIBANDING_WEAK ? (histeq->LUT[luma] + histeq->LUT[luma - 1]) / 2 : histeq->LUT[luma - 1]; } else lutlo = lut; if (luma < 255) { luthi = (histeq->antibanding == HISTEQ_ANTIBANDING_WEAK) ? (histeq->LUT[luma] + histeq->LUT[luma + 1]) / 2 : histeq->LUT[luma + 1]; } else luthi = lut; if (lutlo != luthi) { jran = LCG(jran); lut = lutlo + ((luthi - lutlo + 1) * jran) / LCG_M; } } GET_RGB_VALUES(r, g, b, src, histeq->rgba_map); if (((m = FFMAX3(r, g, b)) * lut) / luma > 255) { r = (r * 255) / m; g = (g * 255) / m; b = (b * 255) / m; } else { r = (r * lut) / luma; g = (g * lut) / luma; b = (b * lut) / luma; } dst[x + histeq->rgba_map[R]] = r; dst[x + histeq->rgba_map[G]] = g; dst[x + histeq->rgba_map[B]] = b; oluma = (55 * r + 182 * g + 19 * b) >> 8; histeq->out_histogram[oluma]++; } } src += inpic->linesize[0]; dst += outpic->linesize[0]; } #ifdef DEBUG for (x = 0; x < 256; x++) av_dlog(ctx, \"out[%d]: %u\\n\", x, histeq->out_histogram[x]); #endif av_frame_free(&inpic); return ff_filter_frame(outlink, outpic); }"} {"target": 0, "idx": 13494, "func": "static void test_visitor_in_native_list_int32(TestInputVisitorData *data, const void *unused) { test_native_list_integer_helper(data, unused, USER_DEF_NATIVE_LIST_UNION_KIND_S32); }"} {"target": 0, "idx": 13504, "func": "static void nbd_trip(void *opaque) { NBDClient *client = opaque; NBDExport *exp = client->exp; NBDRequest *req; struct nbd_request request; struct nbd_reply reply; ssize_t ret; uint32_t command; TRACE(\"Reading request.\"); if (client->closing) { return; } req = nbd_request_get(client); ret = nbd_co_receive_request(req, &request); if (ret == -EAGAIN) { goto done; } if (ret == -EIO) { goto out; } reply.handle = request.handle; reply.error = 0; if (ret < 0) { reply.error = -ret; goto error_reply; } command = request.type & NBD_CMD_MASK_COMMAND; if (command != NBD_CMD_DISC && (request.from + request.len) > exp->size) { LOG(\"From: %\" PRIu64 \", Len: %u, Size: %\" PRIu64 \", Offset: %\" PRIu64 \"\\n\", request.from, request.len, (uint64_t)exp->size, (uint64_t)exp->dev_offset); LOG(\"requested operation past EOF--bad client?\"); goto invalid_request; } if (client->closing) { /* * The client may be closed when we are blocked in * nbd_co_receive_request() */ goto done; } switch (command) { case NBD_CMD_READ: TRACE(\"Request type is READ\"); if (request.type & NBD_CMD_FLAG_FUA) { ret = blk_co_flush(exp->blk); if (ret < 0) { LOG(\"flush failed\"); reply.error = -ret; goto error_reply; } } ret = blk_pread(exp->blk, request.from + exp->dev_offset, req->data, request.len); if (ret < 0) { LOG(\"reading from file failed\"); reply.error = -ret; goto error_reply; } TRACE(\"Read %u byte(s)\", request.len); if (nbd_co_send_reply(req, &reply, request.len) < 0) goto out; break; case NBD_CMD_WRITE: TRACE(\"Request type is WRITE\"); if (exp->nbdflags & NBD_FLAG_READ_ONLY) { TRACE(\"Server is read-only, return error\"); reply.error = EROFS; goto error_reply; } TRACE(\"Writing to device\"); ret = blk_pwrite(exp->blk, request.from + exp->dev_offset, req->data, request.len, 0); if (ret < 0) { LOG(\"writing to file failed\"); reply.error = -ret; goto error_reply; } if (request.type & NBD_CMD_FLAG_FUA) { ret = blk_co_flush(exp->blk); if (ret < 0) { LOG(\"flush failed\"); reply.error = -ret; goto error_reply; } } if (nbd_co_send_reply(req, &reply, 0) < 0) { goto out; } break; case NBD_CMD_DISC: TRACE(\"Request type is DISCONNECT\"); errno = 0; goto out; case NBD_CMD_FLUSH: TRACE(\"Request type is FLUSH\"); ret = blk_co_flush(exp->blk); if (ret < 0) { LOG(\"flush failed\"); reply.error = -ret; } if (nbd_co_send_reply(req, &reply, 0) < 0) { goto out; } break; case NBD_CMD_TRIM: TRACE(\"Request type is TRIM\"); ret = blk_co_discard(exp->blk, (request.from + exp->dev_offset) / BDRV_SECTOR_SIZE, request.len / BDRV_SECTOR_SIZE); if (ret < 0) { LOG(\"discard failed\"); reply.error = -ret; } if (nbd_co_send_reply(req, &reply, 0) < 0) { goto out; } break; default: LOG(\"invalid request type (%u) received\", request.type); invalid_request: reply.error = EINVAL; error_reply: if (nbd_co_send_reply(req, &reply, 0) < 0) { goto out; } break; } TRACE(\"Request/Reply complete\"); done: nbd_request_put(req); return; out: nbd_request_put(req); client_close(client); }"} {"target": 0, "idx": 13513, "func": "void qemu_register_reset(QEMUResetHandler *func, void *opaque) { QEMUResetEntry *re = qemu_mallocz(sizeof(QEMUResetEntry)); re->func = func; re->opaque = opaque; TAILQ_INSERT_TAIL(&reset_handlers, re, entry); }"} {"target": 0, "idx": 13517, "func": "void stl_phys_notdirty(hwaddr addr, uint32_t val) { uint8_t *ptr; MemoryRegionSection *section; section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS); if (!memory_region_is_ram(section->mr) || section->readonly) { addr = memory_region_section_addr(section, addr); if (memory_region_is_ram(section->mr)) { section = &phys_sections[phys_section_rom]; } io_mem_write(section->mr, addr, val, 4); } else { unsigned long addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK) + memory_region_section_addr(section, addr); ptr = qemu_get_ram_ptr(addr1); stl_p(ptr, val); if (unlikely(in_migration)) { if (!cpu_physical_memory_is_dirty(addr1)) { /* invalidate code */ tb_invalidate_phys_page_range(addr1, addr1 + 4, 0); /* set dirty bit */ cpu_physical_memory_set_dirty_flags( addr1, (0xff & ~CODE_DIRTY_FLAG)); } } } }"} {"target": 0, "idx": 13524, "func": "static void compute_antialias_float(MPADecodeContext *s, GranuleDef *g) { float *ptr; int n, i; /* we antialias only \"long\" bands */ if (g->block_type == 2) { if (!g->switch_point) return; /* XXX: check this for 8000Hz case */ n = 1; } else { n = SBLIMIT - 1; } ptr = g->sb_hybrid + 18; for(i = n;i > 0;i--) { float tmp0, tmp1; float *csa = &csa_table_float[0][0]; #define FLOAT_AA(j)\\ tmp0= ptr[-1-j];\\ tmp1= ptr[ j];\\ ptr[-1-j] = tmp0 * csa[0+4*j] - tmp1 * csa[1+4*j];\\ ptr[ j] = tmp0 * csa[1+4*j] + tmp1 * csa[0+4*j]; FLOAT_AA(0) FLOAT_AA(1) FLOAT_AA(2) FLOAT_AA(3) FLOAT_AA(4) FLOAT_AA(5) FLOAT_AA(6) FLOAT_AA(7) ptr += 18; } }"} {"target": 1, "idx": 13548, "func": "PCIBus *i440fx_init(PCII440FXState **pi440fx_state, int *piix3_devfn, qemu_irq *pic, int ram_size) { DeviceState *dev; PCIBus *b; PCIDevice *d; I440FXState *s; PIIX3State *piix3; dev = qdev_create(NULL, \"i440FX-pcihost\"); s = FROM_SYSBUS(I440FXState, sysbus_from_qdev(dev)); b = pci_bus_new(&s->busdev.qdev, NULL, 0); s->bus = b; qdev_init_nofail(dev); d = pci_create_simple(b, 0, \"i440FX\"); *pi440fx_state = DO_UPCAST(PCII440FXState, dev, d); piix3 = DO_UPCAST(PIIX3State, dev, pci_create_simple(b, -1, \"PIIX3\")); piix3->pic = pic; pci_bus_irqs(b, piix3_set_irq, pci_slot_get_pirq, piix3, 4); (*pi440fx_state)->piix3 = piix3; *piix3_devfn = piix3->dev.devfn; ram_size = ram_size / 8 / 1024 / 1024; if (ram_size > 255) ram_size = 255; (*pi440fx_state)->dev.config[0x57]=ram_size; return b; }"} {"target": 1, "idx": 13554, "func": "uint64_t helper_cvttq_c(CPUAlphaState *env, uint64_t a) { return inline_cvttq(env, a, float_round_to_zero, 0); }"} {"target": 0, "idx": 13555, "func": "int ff_dca_core_parse(DCACoreDecoder *s, uint8_t *data, int size) { int ret; s->ext_audio_mask = 0; s->xch_pos = s->xxch_pos = s->x96_pos = 0; if ((ret = init_get_bits8(&s->gb, data, size)) < 0) return ret; s->gb_in = s->gb; if ((ret = parse_frame_header(s)) < 0) return ret; if ((ret = alloc_sample_buffer(s)) < 0) return ret; if ((ret = parse_frame_data(s, HEADER_CORE, 0)) < 0) return ret; if ((ret = parse_optional_info(s)) < 0) return ret; // Workaround for DTS in WAV if (s->frame_size > size && s->frame_size < size + 4) s->frame_size = size; if (ff_dca_seek_bits(&s->gb, s->frame_size * 8)) { av_log(s->avctx, AV_LOG_ERROR, \"Read past end of core frame\\n\"); if (s->avctx->err_recognition & AV_EF_EXPLODE) return AVERROR_INVALIDDATA; } return 0; }"} {"target": 1, "idx": 13556, "func": "static void gen_mulu(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb) { TCGv sr_cy = tcg_temp_new(); tcg_gen_muls2_tl(dest, sr_cy, srca, srcb); tcg_gen_setcondi_tl(TCG_COND_NE, sr_cy, sr_cy, 0); tcg_gen_deposit_tl(cpu_sr, cpu_sr, sr_cy, ctz32(SR_CY), 1); gen_ove_cy(dc, sr_cy); tcg_temp_free(sr_cy); }"} {"target": 0, "idx": 13571, "func": "static int decode_init_mp3on4(AVCodecContext * avctx) { MP3On4DecodeContext *s = avctx->priv_data; int i; if ((avctx->extradata_size < 2) || (avctx->extradata == NULL)) { av_log(avctx, AV_LOG_ERROR, \"Codec extradata missing or too short.\\n\"); return -1; } s->chan_cfg = (((unsigned char *)avctx->extradata)[1] >> 3) & 0x0f; s->frames = mp3Frames[s->chan_cfg]; if(!s->frames) { av_log(avctx, AV_LOG_ERROR, \"Invalid channel config number.\\n\"); return -1; } avctx->channels = mp3Channels[s->chan_cfg]; /* Init the first mp3 decoder in standard way, so that all tables get builded * We replace avctx->priv_data with the context of the first decoder so that * decode_init() does not have to be changed. * Other decoders will be inited here copying data from the first context */ // Allocate zeroed memory for the first decoder context s->mp3decctx[0] = av_mallocz(sizeof(MPADecodeContext)); // Put decoder context in place to make init_decode() happy avctx->priv_data = s->mp3decctx[0]; decode_init(avctx); // Restore mp3on4 context pointer avctx->priv_data = s; s->mp3decctx[0]->adu_mode = 1; // Set adu mode /* Create a separate codec/context for each frame (first is already ok). * Each frame is 1 or 2 channels - up to 5 frames allowed */ for (i = 1; i < s->frames; i++) { s->mp3decctx[i] = av_mallocz(sizeof(MPADecodeContext)); s->mp3decctx[i]->compute_antialias = s->mp3decctx[0]->compute_antialias; s->mp3decctx[i]->adu_mode = 1; s->mp3decctx[i]->avctx = avctx; } return 0; }"} {"target": 0, "idx": 13589, "func": "_syscall4(int,sys_utimensat,int,dirfd,const char *,pathname, const struct timespec *,tsp,int,flags) #endif #endif /* CONFIG_UTIMENSAT */ #ifdef CONFIG_INOTIFY #include #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init) static int sys_inotify_init(void) { return (inotify_init()); }"} {"target": 1, "idx": 13604, "func": "static void curl_readv_bh_cb(void *p) { CURLState *state; int running; CURLAIOCB *acb = p; BDRVCURLState *s = acb->common.bs->opaque; qemu_bh_delete(acb->bh); acb->bh = NULL; size_t start = acb->sector_num * SECTOR_SIZE; size_t end; // In case we have the requested data already (e.g. read-ahead), // we can just call the callback and be done. switch (curl_find_buf(s, start, acb->nb_sectors * SECTOR_SIZE, acb)) { case FIND_RET_OK: qemu_aio_release(acb); // fall through case FIND_RET_WAIT: return; default: break; } // No cache found, so let's start a new request state = curl_init_state(s); if (!state) { acb->common.cb(acb->common.opaque, -EIO); qemu_aio_release(acb); return; } acb->start = 0; acb->end = (acb->nb_sectors * SECTOR_SIZE); state->buf_off = 0; g_free(state->orig_buf); state->buf_start = start; state->buf_len = acb->end + s->readahead_size; end = MIN(start + state->buf_len, s->len) - 1; state->orig_buf = g_malloc(state->buf_len); state->acb[0] = acb; snprintf(state->range, 127, \"%zd-%zd\", start, end); DPRINTF(\"CURL (AIO): Reading %d at %zd (%s)\\n\", (acb->nb_sectors * SECTOR_SIZE), start, state->range); curl_easy_setopt(state->curl, CURLOPT_RANGE, state->range); curl_multi_add_handle(s->multi, state->curl); /* Tell curl it needs to kick things off */ curl_multi_socket_action(s->multi, CURL_SOCKET_TIMEOUT, 0, &running); }"} {"target": 1, "idx": 13635, "func": "static int dnxhd_encode_rdo(AVCodecContext *avctx, DNXHDEncContext *ctx) { int lambda, up_step, down_step; int last_lower = INT_MAX, last_higher = 0; int x, y, q; for (q = 1; q < avctx->qmax; q++) { ctx->qscale = q; avctx->execute2(avctx, dnxhd_calc_bits_thread, NULL, NULL, ctx->m.mb_height); } up_step = down_step = 2<lambda; for (;;) { int bits = 0; int end = 0; if (lambda == last_higher) { lambda++; end = 1; // need to set final qscales/bits } for (y = 0; y < ctx->m.mb_height; y++) { for (x = 0; x < ctx->m.mb_width; x++) { unsigned min = UINT_MAX; int qscale = 1; int mb = y*ctx->m.mb_width+x; for (q = 1; q < avctx->qmax; q++) { unsigned score = ctx->mb_rc[q][mb].bits*lambda+(ctx->mb_rc[q][mb].ssd<mb_rc[qscale][mb].bits; ctx->mb_qscale[mb] = qscale; ctx->mb_bits[mb] = ctx->mb_rc[qscale][mb].bits; } bits = (bits+31)&~31; // padding if (bits > ctx->frame_bits) break; } //av_dlog(ctx->m.avctx, \"lambda %d, up %u, down %u, bits %d, frame %d\\n\", // lambda, last_higher, last_lower, bits, ctx->frame_bits); if (end) { if (bits > ctx->frame_bits) return -1; break; } if (bits < ctx->frame_bits) { last_lower = FFMIN(lambda, last_lower); if (last_higher != 0) lambda = (lambda+last_higher)>>1; else lambda -= down_step; down_step *= 5; // XXX tune ? up_step = 1<>1; else if ((int64_t)lambda + up_step > INT_MAX) return -1; else lambda += up_step; up_step = FFMIN((int64_t)up_step*5, INT_MAX); down_step = 1<m.avctx, \"out lambda %d\\n\", lambda); ctx->lambda = lambda; return 0; }"} {"target": 1, "idx": 13636, "func": "static int find_and_decode_index(NUTContext *nut) { AVFormatContext *s = nut->avf; AVIOContext *bc = s->pb; uint64_t tmp, end; int i, j, syncpoint_count; int64_t filesize = avio_size(bc); int64_t *syncpoints; int8_t *has_keyframe; int ret = AVERROR_INVALIDDATA; avio_seek(bc, filesize - 12, SEEK_SET); avio_seek(bc, filesize - avio_rb64(bc), SEEK_SET); if (avio_rb64(bc) != INDEX_STARTCODE) { av_log(s, AV_LOG_ERROR, \"no index at the end\\n\"); return ret; } end = get_packetheader(nut, bc, 1, INDEX_STARTCODE); end += avio_tell(bc); ffio_read_varlen(bc); // max_pts GET_V(syncpoint_count, tmp < INT_MAX / 8 && tmp > 0); syncpoints = av_malloc(sizeof(int64_t) * syncpoint_count); has_keyframe = av_malloc(sizeof(int8_t) * (syncpoint_count + 1)); if (!syncpoints || !has_keyframe) return AVERROR(ENOMEM); for (i = 0; i < syncpoint_count; i++) { syncpoints[i] = ffio_read_varlen(bc); if (syncpoints[i] <= 0) goto fail; if (i) syncpoints[i] += syncpoints[i - 1]; } for (i = 0; i < s->nb_streams; i++) { int64_t last_pts = -1; for (j = 0; j < syncpoint_count;) { uint64_t x = ffio_read_varlen(bc); int type = x & 1; int n = j; x >>= 1; if (type) { int flag = x & 1; x >>= 1; if (n + x >= syncpoint_count + 1) { av_log(s, AV_LOG_ERROR, \"index overflow A\\n\"); goto fail; } while (x--) has_keyframe[n++] = flag; has_keyframe[n++] = !flag; } else { while (x != 1) { if (n >= syncpoint_count + 1) { av_log(s, AV_LOG_ERROR, \"index overflow B\\n\"); goto fail; } has_keyframe[n++] = x & 1; x >>= 1; } } if (has_keyframe[0]) { av_log(s, AV_LOG_ERROR, \"keyframe before first syncpoint in index\\n\"); goto fail; } assert(n <= syncpoint_count + 1); for (; j < n && j < syncpoint_count; j++) { if (has_keyframe[j]) { uint64_t B, A = ffio_read_varlen(bc); if (!A) { A = ffio_read_varlen(bc); B = ffio_read_varlen(bc); // eor_pts[j][i] = last_pts + A + B } else B = 0; av_add_index_entry(s->streams[i], 16 * syncpoints[j - 1], last_pts + A, 0, 0, AVINDEX_KEYFRAME); last_pts += A + B; } } } } if (skip_reserved(bc, end) || ffio_get_checksum(bc)) { av_log(s, AV_LOG_ERROR, \"index checksum mismatch\\n\"); goto fail; } ret = 0; fail: av_free(syncpoints); av_free(has_keyframe); return ret; }"} {"target": 1, "idx": 13667, "func": "static inline void menelaus_rtc_stop(struct menelaus_s *s) { qemu_del_timer(s->rtc.hz); s->rtc.next =- qemu_get_clock(rt_clock); if (s->rtc.next < 1) s->rtc.next = 1; }"} {"target": 1, "idx": 13672, "func": "int drive_init(struct drive_opt *arg, int snapshot, void *opaque) { char buf[128]; char file[1024]; char devname[128]; char serial[21]; const char *mediastr = \"\"; BlockInterfaceType type; enum { MEDIA_DISK, MEDIA_CDROM } media; int bus_id, unit_id; int cyls, heads, secs, translation; BlockDriverState *bdrv; BlockDriver *drv = NULL; QEMUMachine *machine = opaque; int max_devs; int index; int cache; int bdrv_flags, onerror; int drives_table_idx; char *str = arg->opt; static const char * const params[] = { \"bus\", \"unit\", \"if\", \"index\", \"cyls\", \"heads\", \"secs\", \"trans\", \"media\", \"snapshot\", \"file\", \"cache\", \"format\", \"serial\", \"werror\", NULL }; if (check_params(params, str) < 0) { fprintf(stderr, \"qemu: unknown parameter '%s' in '%s'\\n\", buf, str); return -1; } file[0] = 0; cyls = heads = secs = 0; bus_id = 0; unit_id = -1; translation = BIOS_ATA_TRANSLATION_AUTO; index = -1; cache = 3; if (machine->use_scsi) { type = IF_SCSI; max_devs = MAX_SCSI_DEVS; pstrcpy(devname, sizeof(devname), \"scsi\"); } else { type = IF_IDE; max_devs = MAX_IDE_DEVS; pstrcpy(devname, sizeof(devname), \"ide\"); } media = MEDIA_DISK; /* extract parameters */ if (get_param_value(buf, sizeof(buf), \"bus\", str)) { bus_id = strtol(buf, NULL, 0); if (bus_id < 0) { fprintf(stderr, \"qemu: '%s' invalid bus id\\n\", str); return -1; } } if (get_param_value(buf, sizeof(buf), \"unit\", str)) { unit_id = strtol(buf, NULL, 0); if (unit_id < 0) { fprintf(stderr, \"qemu: '%s' invalid unit id\\n\", str); return -1; } } if (get_param_value(buf, sizeof(buf), \"if\", str)) { pstrcpy(devname, sizeof(devname), buf); if (!strcmp(buf, \"ide\")) { type = IF_IDE; max_devs = MAX_IDE_DEVS; } else if (!strcmp(buf, \"scsi\")) { type = IF_SCSI; max_devs = MAX_SCSI_DEVS; } else if (!strcmp(buf, \"floppy\")) { type = IF_FLOPPY; max_devs = 0; } else if (!strcmp(buf, \"pflash\")) { type = IF_PFLASH; max_devs = 0; } else if (!strcmp(buf, \"mtd\")) { type = IF_MTD; max_devs = 0; } else if (!strcmp(buf, \"sd\")) { type = IF_SD; max_devs = 0; } else if (!strcmp(buf, \"virtio\")) { type = IF_VIRTIO; max_devs = 0; } else if (!strcmp(buf, \"xen\")) { type = IF_XEN; max_devs = 0; } else { fprintf(stderr, \"qemu: '%s' unsupported bus type '%s'\\n\", str, buf); return -1; } } if (get_param_value(buf, sizeof(buf), \"index\", str)) { index = strtol(buf, NULL, 0); if (index < 0) { fprintf(stderr, \"qemu: '%s' invalid index\\n\", str); return -1; } } if (get_param_value(buf, sizeof(buf), \"cyls\", str)) { cyls = strtol(buf, NULL, 0); } if (get_param_value(buf, sizeof(buf), \"heads\", str)) { heads = strtol(buf, NULL, 0); } if (get_param_value(buf, sizeof(buf), \"secs\", str)) { secs = strtol(buf, NULL, 0); } if (cyls || heads || secs) { if (cyls < 1 || cyls > 16383) { fprintf(stderr, \"qemu: '%s' invalid physical cyls number\\n\", str); return -1; } if (heads < 1 || heads > 16) { fprintf(stderr, \"qemu: '%s' invalid physical heads number\\n\", str); return -1; } if (secs < 1 || secs > 63) { fprintf(stderr, \"qemu: '%s' invalid physical secs number\\n\", str); return -1; } } if (get_param_value(buf, sizeof(buf), \"trans\", str)) { if (!cyls) { fprintf(stderr, \"qemu: '%s' trans must be used with cyls,heads and secs\\n\", str); return -1; } if (!strcmp(buf, \"none\")) translation = BIOS_ATA_TRANSLATION_NONE; else if (!strcmp(buf, \"lba\")) translation = BIOS_ATA_TRANSLATION_LBA; else if (!strcmp(buf, \"auto\")) translation = BIOS_ATA_TRANSLATION_AUTO; else { fprintf(stderr, \"qemu: '%s' invalid translation type\\n\", str); return -1; } } if (get_param_value(buf, sizeof(buf), \"media\", str)) { if (!strcmp(buf, \"disk\")) { media = MEDIA_DISK; } else if (!strcmp(buf, \"cdrom\")) { if (cyls || secs || heads) { fprintf(stderr, \"qemu: '%s' invalid physical CHS format\\n\", str); return -1; } media = MEDIA_CDROM; } else { fprintf(stderr, \"qemu: '%s' invalid media\\n\", str); return -1; } } if (get_param_value(buf, sizeof(buf), \"snapshot\", str)) { if (!strcmp(buf, \"on\")) snapshot = 1; else if (!strcmp(buf, \"off\")) snapshot = 0; else { fprintf(stderr, \"qemu: '%s' invalid snapshot option\\n\", str); return -1; } } if (get_param_value(buf, sizeof(buf), \"cache\", str)) { if (!strcmp(buf, \"off\") || !strcmp(buf, \"none\")) cache = 0; else if (!strcmp(buf, \"writethrough\")) cache = 1; else if (!strcmp(buf, \"writeback\")) cache = 2; else { fprintf(stderr, \"qemu: invalid cache option\\n\"); return -1; } } if (get_param_value(buf, sizeof(buf), \"format\", str)) { if (strcmp(buf, \"?\") == 0) { fprintf(stderr, \"qemu: Supported formats:\"); bdrv_iterate_format(bdrv_format_print, NULL); fprintf(stderr, \"\\n\"); return -1; } drv = bdrv_find_format(buf); if (!drv) { fprintf(stderr, \"qemu: '%s' invalid format\\n\", buf); return -1; } } if (arg->file == NULL) get_param_value(file, sizeof(file), \"file\", str); else pstrcpy(file, sizeof(file), arg->file); if (!get_param_value(serial, sizeof(serial), \"serial\", str)) memset(serial, 0, sizeof(serial)); onerror = BLOCK_ERR_STOP_ENOSPC; if (get_param_value(buf, sizeof(serial), \"werror\", str)) { if (type != IF_IDE && type != IF_SCSI && type != IF_VIRTIO) { fprintf(stderr, \"werror is no supported by this format\\n\"); return -1; } if (!strcmp(buf, \"ignore\")) onerror = BLOCK_ERR_IGNORE; else if (!strcmp(buf, \"enospc\")) onerror = BLOCK_ERR_STOP_ENOSPC; else if (!strcmp(buf, \"stop\")) onerror = BLOCK_ERR_STOP_ANY; else if (!strcmp(buf, \"report\")) onerror = BLOCK_ERR_REPORT; else { fprintf(stderr, \"qemu: '%s' invalid write error action\\n\", buf); return -1; } } /* compute bus and unit according index */ if (index != -1) { if (bus_id != 0 || unit_id != -1) { fprintf(stderr, \"qemu: '%s' index cannot be used with bus and unit\\n\", str); return -1; } if (max_devs == 0) { unit_id = index; bus_id = 0; } else { unit_id = index % max_devs; bus_id = index / max_devs; } } /* if user doesn't specify a unit_id, * try to find the first free */ if (unit_id == -1) { unit_id = 0; while (drive_get_index(type, bus_id, unit_id) != -1) { unit_id++; if (max_devs && unit_id >= max_devs) { unit_id -= max_devs; bus_id++; } } } /* check unit id */ if (max_devs && unit_id >= max_devs) { fprintf(stderr, \"qemu: '%s' unit %d too big (max is %d)\\n\", str, unit_id, max_devs - 1); return -1; } /* * ignore multiple definitions */ if (drive_get_index(type, bus_id, unit_id) != -1) return -2; /* init */ if (type == IF_IDE || type == IF_SCSI) mediastr = (media == MEDIA_CDROM) ? \"-cd\" : \"-hd\"; if (max_devs) snprintf(buf, sizeof(buf), \"%s%i%s%i\", devname, bus_id, mediastr, unit_id); else snprintf(buf, sizeof(buf), \"%s%s%i\", devname, mediastr, unit_id); bdrv = bdrv_new(buf); drives_table_idx = drive_get_free_idx(); drives_table[drives_table_idx].bdrv = bdrv; drives_table[drives_table_idx].type = type; drives_table[drives_table_idx].bus = bus_id; drives_table[drives_table_idx].unit = unit_id; drives_table[drives_table_idx].onerror = onerror; drives_table[drives_table_idx].drive_opt_idx = arg - drives_opt; strncpy(drives_table[drives_table_idx].serial, serial, sizeof(serial)); nb_drives++; switch(type) { case IF_IDE: case IF_SCSI: case IF_XEN: switch(media) { case MEDIA_DISK: if (cyls != 0) { bdrv_set_geometry_hint(bdrv, cyls, heads, secs); bdrv_set_translation_hint(bdrv, translation); } break; case MEDIA_CDROM: bdrv_set_type_hint(bdrv, BDRV_TYPE_CDROM); break; } break; case IF_SD: /* FIXME: This isn't really a floppy, but it's a reasonable approximation. */ case IF_FLOPPY: bdrv_set_type_hint(bdrv, BDRV_TYPE_FLOPPY); break; case IF_PFLASH: case IF_MTD: case IF_VIRTIO: break; case IF_COUNT: abort(); } if (!file[0]) return -2; bdrv_flags = 0; if (snapshot) { bdrv_flags |= BDRV_O_SNAPSHOT; cache = 2; /* always use write-back with snapshot */ } if (cache == 0) /* no caching */ bdrv_flags |= BDRV_O_NOCACHE; else if (cache == 2) /* write-back */ bdrv_flags |= BDRV_O_CACHE_WB; else if (cache == 3) /* not specified */ bdrv_flags |= BDRV_O_CACHE_DEF; if (bdrv_open2(bdrv, file, bdrv_flags, drv) < 0) { fprintf(stderr, \"qemu: could not open disk image %s\\n\", file); return -1; } if (bdrv_key_required(bdrv)) autostart = 0; return drives_table_idx; }"} {"target": 1, "idx": 13706, "func": "static void gic_complete_irq(gic_state * s, int cpu, int irq) { int update = 0; int cm = 1 << cpu; DPRINTF(\"EOI %d\\n\", irq); if (s->running_irq[cpu] == 1023) return; /* No active IRQ. */ if (irq != 1023) { /* Mark level triggered interrupts as pending if they are still raised. */ if (!GIC_TEST_TRIGGER(irq) && GIC_TEST_ENABLED(irq, cm) && GIC_TEST_LEVEL(irq, cm) && (GIC_TARGET(irq) & cm) != 0) { DPRINTF(\"Set %d pending mask %x\\n\", irq, cm); GIC_SET_PENDING(irq, cm); update = 1; } } if (irq != s->running_irq[cpu]) { /* Complete an IRQ that is not currently running. */ int tmp = s->running_irq[cpu]; while (s->last_active[tmp][cpu] != 1023) { if (s->last_active[tmp][cpu] == irq) { s->last_active[tmp][cpu] = s->last_active[irq][cpu]; break; } tmp = s->last_active[tmp][cpu]; } if (update) { gic_update(s); } } else { /* Complete the current running IRQ. */ gic_set_running_irq(s, cpu, s->last_active[s->running_irq[cpu]][cpu]); } }"} {"target": 0, "idx": 13712, "func": "static void init_dequant8_coeff_table(H264Context *h){ int i,q,x; const int transpose = (h->h264dsp.h264_idct8_add != ff_h264_idct8_add_c); //FIXME ugly h->dequant8_coeff[0] = h->dequant8_buffer[0]; h->dequant8_coeff[1] = h->dequant8_buffer[1]; for(i=0; i<2; i++ ){ if(i && !memcmp(h->pps.scaling_matrix8[0], h->pps.scaling_matrix8[1], 64*sizeof(uint8_t))){ h->dequant8_coeff[1] = h->dequant8_buffer[0]; break; } for(q=0; q<52; q++){ int shift = div6[q]; int idx = rem6[q]; for(x=0; x<64; x++) h->dequant8_coeff[i][q][transpose ? (x>>3)|((x&7)<<3) : x] = ((uint32_t)dequant8_coeff_init[idx][ dequant8_coeff_init_scan[((x>>1)&12) | (x&3)] ] * h->pps.scaling_matrix8[i][x]) << shift; } } }"} {"target": 1, "idx": 13726, "func": "char *vnc_display_local_addr(const char *id) { VncDisplay *vs = vnc_display_find(id); return vnc_socket_local_addr(\"%s:%s\", vs->lsock); }"} {"target": 1, "idx": 13728, "func": "PPC_OP(set_T0) { T0 = PARAM(1); RETURN(); }"} {"target": 1, "idx": 13734, "func": "static void test_validate_fail_struct_nested(TestInputVisitorData *data, const void *unused) { UserDefTwo *udp = NULL; Error *err = NULL; Visitor *v; v = validate_test_init(data, \"{ 'string0': 'string0', 'dict1': { 'string1': 'string1', 'dict2': { 'userdef1': { 'integer': 42, 'string': 'string', 'extra': [42, 23, {'foo':'bar'}] }, 'string2': 'string2'}}}\"); visit_type_UserDefTwo(v, NULL, &udp, &err); error_free_or_abort(&err); qapi_free_UserDefTwo(udp); }"} {"target": 1, "idx": 13736, "func": "static int socket_get_buffer(void *opaque, uint8_t *buf, int64_t pos, int size) { QEMUFileSocket *s = opaque; ssize_t len; do { len = qemu_recv(s->fd, buf, size, 0); } while (len == -1 && socket_error() == EINTR); if (len == -1) len = -socket_error(); return len; }"} {"target": 1, "idx": 13739, "func": "static GIOStatus ga_channel_write(GAChannel *c, const char *buf, size_t size, size_t *count) { GIOStatus status; OVERLAPPED ov = {0}; BOOL ret; DWORD written; ov.hEvent = CreateEvent(NULL, FALSE, FALSE, NULL); ret = WriteFile(c->handle, buf, size, &written, &ov); if (!ret) { if (GetLastError() == ERROR_IO_PENDING) { /* write is pending */ ret = GetOverlappedResult(c->handle, &ov, &written, TRUE); if (!ret) { if (!GetLastError()) { status = G_IO_STATUS_AGAIN; } else { status = G_IO_STATUS_ERROR; } else { /* write is complete */ status = G_IO_STATUS_NORMAL; *count = written; } else { status = G_IO_STATUS_ERROR; } else { /* write returned immediately */ status = G_IO_STATUS_NORMAL; *count = written; return status;"} {"target": 1, "idx": 13740, "func": "int ff_jni_exception_get_summary(JNIEnv *env, jthrowable exception, char **error, void *log_ctx) { int ret = 0; AVBPrint bp; char *name = NULL; char *message = NULL; jclass class_class = NULL; jmethodID get_name_id = NULL; jclass exception_class = NULL; jmethodID get_message_id = NULL; jstring string; av_bprint_init(&bp, 0, AV_BPRINT_SIZE_AUTOMATIC); exception_class = (*env)->GetObjectClass(env, exception); if ((*env)->ExceptionCheck(env)) { (*env)->ExceptionClear(env); av_log(log_ctx, AV_LOG_ERROR, \"Could not find Throwable class\\n\"); ret = AVERROR_EXTERNAL; goto done; } class_class = (*env)->GetObjectClass(env, exception_class); if ((*env)->ExceptionCheck(env)) { (*env)->ExceptionClear(env); av_log(log_ctx, AV_LOG_ERROR, \"Could not find Throwable class's class\\n\"); ret = AVERROR_EXTERNAL; goto done; } get_name_id = (*env)->GetMethodID(env, class_class, \"getName\", \"()Ljava/lang/String;\"); if ((*env)->ExceptionCheck(env)) { (*env)->ExceptionClear(env); av_log(log_ctx, AV_LOG_ERROR, \"Could not find method Class.getName()\\n\"); ret = AVERROR_EXTERNAL; goto done; } string = (*env)->CallObjectMethod(env, exception_class, get_name_id); if ((*env)->ExceptionCheck(env)) { (*env)->ExceptionClear(env); av_log(log_ctx, AV_LOG_ERROR, \"Class.getName() threw an exception\\n\"); ret = AVERROR_EXTERNAL; goto done; } if (string) { name = ff_jni_jstring_to_utf_chars(env, string, log_ctx); (*env)->DeleteLocalRef(env, string); string = NULL; } get_message_id = (*env)->GetMethodID(env, exception_class, \"getMessage\", \"()Ljava/lang/String;\"); if ((*env)->ExceptionCheck(env)) { (*env)->ExceptionClear(env); av_log(log_ctx, AV_LOG_ERROR, \"Could not find method java/lang/Throwable.getMessage()\\n\"); ret = AVERROR_EXTERNAL; goto done; } string = (*env)->CallObjectMethod(env, exception, get_message_id); if ((*env)->ExceptionCheck(env)) { (*env)->ExceptionClear(env); av_log(log_ctx, AV_LOG_ERROR, \"Throwable.getMessage() threw an exception\\n\"); ret = AVERROR_EXTERNAL; goto done; } if (string) { message = ff_jni_jstring_to_utf_chars(env, string, log_ctx); (*env)->DeleteLocalRef(env, string); string = NULL; } if (name && message) { av_bprintf(&bp, \"%s: %s\", name, message); } else if (name && !message) { av_bprintf(&bp, \"%s occurred\", name); } else if (!name && message) { av_bprintf(&bp, \"Exception: %s\", message); } else { av_log(log_ctx, AV_LOG_WARNING, \"Could not retreive exception name and message\\n\"); av_bprintf(&bp, \"Exception occurred\"); } ret = av_bprint_finalize(&bp, error); done: av_free(name); av_free(message); if (class_class) { (*env)->DeleteLocalRef(env, class_class); } if (exception_class) { (*env)->DeleteLocalRef(env, exception_class); } if (string) { (*env)->DeleteLocalRef(env, string); } return ret; }"} {"target": 1, "idx": 13748, "func": "static void vscsi_command_complete(SCSIBus *bus, int reason, uint32_t tag, uint32_t arg) { VSCSIState *s = DO_UPCAST(VSCSIState, vdev.qdev, bus->qbus.parent); vscsi_req *req = vscsi_find_req(s, tag); SCSIDevice *sdev; uint8_t *buf; int32_t res_in = 0, res_out = 0; int len, rc = 0; dprintf(\"VSCSI: SCSI cmd complete, r=0x%x tag=0x%x arg=0x%x, req=%p\\n\", reason, tag, arg, req); if (req == NULL) { fprintf(stderr, \"VSCSI: Can't find request for tag 0x%x\\n\", tag); return; } sdev = req->sdev; if (req->sensing) { if (reason == SCSI_REASON_DONE) { dprintf(\"VSCSI: Sense done !\\n\"); vscsi_send_rsp(s, req, CHECK_CONDITION, 0, 0); vscsi_put_req(s, req); } else { uint8_t *buf = sdev->info->get_buf(sdev, tag); len = MIN(arg, SCSI_SENSE_BUF_SIZE); dprintf(\"VSCSI: Sense data, %d bytes:\\n\", len); dprintf(\" %02x %02x %02x %02x %02x %02x %02x %02x\\n\", buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], buf[7]); dprintf(\" %02x %02x %02x %02x %02x %02x %02x %02x\\n\", buf[8], buf[9], buf[10], buf[11], buf[12], buf[13], buf[14], buf[15]); memcpy(req->sense, buf, len); req->senselen = len; sdev->info->read_data(sdev, req->qtag); } return; } if (reason == SCSI_REASON_DONE) { dprintf(\"VSCSI: Command complete err=%d\\n\", arg); if (arg == 0) { /* We handle overflows, not underflows for normal commands, * but hopefully nobody cares */ if (req->writing) { res_out = req->data_len; } else { res_in = req->data_len; } vscsi_send_rsp(s, req, 0, res_in, res_out); } else if (arg == CHECK_CONDITION) { dprintf(\"VSCSI: Got CHECK_CONDITION, requesting sense...\\n\"); vscsi_send_request_sense(s, req); return; } else { vscsi_send_rsp(s, req, arg, 0, 0); } vscsi_put_req(s, req); return; } /* \"arg\" is how much we have read for reads and how much we want * to write for writes (ie, how much is to be DMA'd) */ if (arg) { buf = sdev->info->get_buf(sdev, tag); rc = vscsi_srp_transfer_data(s, req, req->writing, buf, arg); } if (rc < 0) { fprintf(stderr, \"VSCSI: RDMA error rc=%d!\\n\", rc); sdev->info->cancel_io(sdev, req->qtag); vscsi_makeup_sense(s, req, HARDWARE_ERROR, 0, 0); vscsi_send_rsp(s, req, CHECK_CONDITION, 0, 0); vscsi_put_req(s, req); return; } /* Start next chunk */ req->data_len -= rc; if (req->writing) { sdev->info->write_data(sdev, req->qtag); } else { sdev->info->read_data(sdev, req->qtag); } }"} {"target": 0, "idx": 13754, "func": "void helper_store_sdr1(CPUPPCState *env, target_ulong val) { PowerPCCPU *cpu = ppc_env_get_cpu(env); if (!env->external_htab) { if (env->spr[SPR_SDR1] != val) { ppc_store_sdr1(env, val); tlb_flush(CPU(cpu)); } } }"} {"target": 0, "idx": 13768, "func": "static bool fw_cfg_comb_valid(void *opaque, target_phys_addr_t addr, unsigned size, bool is_write) { return (size == 1) || (is_write && size == 2); }"} {"target": 0, "idx": 13772, "func": "sPAPRTCETable *spapr_tce_new_table(DeviceState *owner, uint32_t liobn, uint64_t bus_offset, uint32_t page_shift, uint32_t nb_table, bool vfio_accel) { sPAPRTCETable *tcet; char tmp[64]; if (spapr_tce_find_by_liobn(liobn)) { fprintf(stderr, \"Attempted to create TCE table with duplicate\" \" LIOBN 0x%x\\n\", liobn); return NULL; } if (!nb_table) { return NULL; } tcet = SPAPR_TCE_TABLE(object_new(TYPE_SPAPR_TCE_TABLE)); tcet->liobn = liobn; tcet->bus_offset = bus_offset; tcet->page_shift = page_shift; tcet->nb_table = nb_table; tcet->vfio_accel = vfio_accel; snprintf(tmp, sizeof(tmp), \"tce-table-%x\", liobn); object_property_add_child(OBJECT(owner), tmp, OBJECT(tcet), NULL); object_property_set_bool(OBJECT(tcet), true, \"realized\", NULL); return tcet; }"} {"target": 1, "idx": 13784, "func": "static void debug_print_fis(uint8_t *fis, int cmd_len) { #ifdef DEBUG_AHCI int i; fprintf(stderr, \"fis:\"); for (i = 0; i < cmd_len; i++) { if ((i & 0xf) == 0) { fprintf(stderr, \"\\n%02x:\",i); } fprintf(stderr, \"%02x \",fis[i]); } fprintf(stderr, \"\\n\"); #endif }"} {"target": 0, "idx": 13809, "func": "static av_cold int svc_encode_init(AVCodecContext *avctx) { SVCContext *s = avctx->priv_data; SEncParamExt param = { 0 }; int err = AVERROR_UNKNOWN; int log_level; WelsTraceCallback callback_function; AVCPBProperties *props; // Mingw GCC < 4.7 on x86_32 uses an incorrect/buggy ABI for the WelsGetCodecVersion // function (for functions returning larger structs), thus skip the check in those // configurations. #if !defined(_WIN32) || !defined(__GNUC__) || !ARCH_X86_32 || AV_GCC_VERSION_AT_LEAST(4, 7) OpenH264Version libver = WelsGetCodecVersion(); if (memcmp(&libver, &g_stCodecVersion, sizeof(libver))) { av_log(avctx, AV_LOG_ERROR, \"Incorrect library version loaded\\n\"); return AVERROR(EINVAL); } #endif if (WelsCreateSVCEncoder(&s->encoder)) { av_log(avctx, AV_LOG_ERROR, \"Unable to create encoder\\n\"); return AVERROR_UNKNOWN; } // Pass all libopenh264 messages to our callback, to allow ourselves to filter them. log_level = WELS_LOG_DETAIL; (*s->encoder)->SetOption(s->encoder, ENCODER_OPTION_TRACE_LEVEL, &log_level); // Set the logging callback function to one that uses av_log() (see implementation above). callback_function = (WelsTraceCallback) libopenh264_trace_callback; (*s->encoder)->SetOption(s->encoder, ENCODER_OPTION_TRACE_CALLBACK, (void *)&callback_function); // Set the AVCodecContext as the libopenh264 callback context so that it can be passed to av_log(). (*s->encoder)->SetOption(s->encoder, ENCODER_OPTION_TRACE_CALLBACK_CONTEXT, (void *)&avctx); (*s->encoder)->GetDefaultParams(s->encoder, ¶m); param.fMaxFrameRate = avctx->time_base.den / avctx->time_base.num; param.iPicWidth = avctx->width; param.iPicHeight = avctx->height; param.iTargetBitrate = avctx->bit_rate; param.iMaxBitrate = FFMAX(avctx->rc_max_rate, avctx->bit_rate); param.iRCMode = RC_QUALITY_MODE; param.iTemporalLayerNum = 1; param.iSpatialLayerNum = 1; param.bEnableDenoise = 0; param.bEnableBackgroundDetection = 1; param.bEnableAdaptiveQuant = 1; param.bEnableFrameSkip = s->skip_frames; param.bEnableLongTermReference = 0; param.iLtrMarkPeriod = 30; param.uiIntraPeriod = avctx->gop_size; #if OPENH264_VER_AT_LEAST(1, 4) param.eSpsPpsIdStrategy = CONSTANT_ID; #else param.bEnableSpsPpsIdAddition = 0; #endif param.bPrefixNalAddingCtrl = 0; param.iLoopFilterDisableIdc = !s->loopfilter; param.iEntropyCodingModeFlag = 0; param.iMultipleThreadIdc = avctx->thread_count; if (s->profile && !strcmp(s->profile, \"main\")) param.iEntropyCodingModeFlag = 1; else if (!s->profile && avctx->coder_type == FF_CODER_TYPE_AC) param.iEntropyCodingModeFlag = 1; param.sSpatialLayers[0].iVideoWidth = param.iPicWidth; param.sSpatialLayers[0].iVideoHeight = param.iPicHeight; param.sSpatialLayers[0].fFrameRate = param.fMaxFrameRate; param.sSpatialLayers[0].iSpatialBitrate = param.iTargetBitrate; param.sSpatialLayers[0].iMaxSpatialBitrate = param.iMaxBitrate; if ((avctx->slices > 1) && (s->max_nal_size)){ av_log(avctx,AV_LOG_ERROR,\"Invalid combination -slices %d and -max_nal_size %d.\\n\",avctx->slices,s->max_nal_size); goto fail; } if (avctx->slices > 1) s->slice_mode = SM_FIXEDSLCNUM_SLICE; if (s->max_nal_size) s->slice_mode = SM_DYN_SLICE; param.sSpatialLayers[0].sSliceCfg.uiSliceMode = s->slice_mode; param.sSpatialLayers[0].sSliceCfg.sSliceArgument.uiSliceNum = avctx->slices; if (s->slice_mode == SM_DYN_SLICE) { if (s->max_nal_size){ param.uiMaxNalSize = s->max_nal_size; param.sSpatialLayers[0].sSliceCfg.sSliceArgument.uiSliceSizeConstraint = s->max_nal_size; } else { if (avctx->rtp_payload_size) { av_log(avctx,AV_LOG_DEBUG,\"Using RTP Payload size for uiMaxNalSize\"); param.uiMaxNalSize = avctx->rtp_payload_size; param.sSpatialLayers[0].sSliceCfg.sSliceArgument.uiSliceSizeConstraint = avctx->rtp_payload_size; } else { av_log(avctx,AV_LOG_ERROR,\"Invalid -max_nal_size, specify a valid max_nal_size to use -slice_mode dyn\\n\"); goto fail; } } } if ((*s->encoder)->InitializeExt(s->encoder, ¶m) != cmResultSuccess) { av_log(avctx, AV_LOG_ERROR, \"Initialize failed\\n\"); goto fail; } if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) { SFrameBSInfo fbi = { 0 }; int i, size = 0; (*s->encoder)->EncodeParameterSets(s->encoder, &fbi); for (i = 0; i < fbi.sLayerInfo[0].iNalCount; i++) size += fbi.sLayerInfo[0].pNalLengthInByte[i]; avctx->extradata = av_mallocz(size + AV_INPUT_BUFFER_PADDING_SIZE); if (!avctx->extradata) { err = AVERROR(ENOMEM); goto fail; } avctx->extradata_size = size; memcpy(avctx->extradata, fbi.sLayerInfo[0].pBsBuf, size); } props = ff_add_cpb_side_data(avctx); if (!props) { err = AVERROR(ENOMEM); goto fail; } props->max_bitrate = param.iMaxBitrate; props->avg_bitrate = param.iTargetBitrate; return 0; fail: svc_encode_close(avctx); return err; }"} {"target": 0, "idx": 13829, "func": "static void sm501_disp_ctrl_write(void *opaque, hwaddr addr, uint64_t value, unsigned size) { SM501State *s = (SM501State *)opaque; SM501_DPRINTF(\"sm501 disp ctrl regs : write addr=%x, val=%x\\n\", (unsigned)addr, (unsigned)value); switch (addr) { case SM501_DC_PANEL_CONTROL: s->dc_panel_control = value & 0x0FFF73FF; break; case SM501_DC_PANEL_PANNING_CONTROL: s->dc_panel_panning_control = value & 0xFF3FFF3F; break; case SM501_DC_PANEL_FB_ADDR: s->dc_panel_fb_addr = value & 0x8FFFFFF0; break; case SM501_DC_PANEL_FB_OFFSET: s->dc_panel_fb_offset = value & 0x3FF03FF0; break; case SM501_DC_PANEL_FB_WIDTH: s->dc_panel_fb_width = value & 0x0FFF0FFF; break; case SM501_DC_PANEL_FB_HEIGHT: s->dc_panel_fb_height = value & 0x0FFF0FFF; break; case SM501_DC_PANEL_TL_LOC: s->dc_panel_tl_location = value & 0x07FF07FF; break; case SM501_DC_PANEL_BR_LOC: s->dc_panel_br_location = value & 0x07FF07FF; break; case SM501_DC_PANEL_H_TOT: s->dc_panel_h_total = value & 0x0FFF0FFF; break; case SM501_DC_PANEL_H_SYNC: s->dc_panel_h_sync = value & 0x00FF0FFF; break; case SM501_DC_PANEL_V_TOT: s->dc_panel_v_total = value & 0x0FFF0FFF; break; case SM501_DC_PANEL_V_SYNC: s->dc_panel_v_sync = value & 0x003F0FFF; break; case SM501_DC_PANEL_HWC_ADDR: s->dc_panel_hwc_addr = value & 0x8FFFFFF0; break; case SM501_DC_PANEL_HWC_LOC: s->dc_panel_hwc_location = value & 0x0FFF0FFF; break; case SM501_DC_PANEL_HWC_COLOR_1_2: s->dc_panel_hwc_color_1_2 = value; break; case SM501_DC_PANEL_HWC_COLOR_3: s->dc_panel_hwc_color_3 = value & 0x0000FFFF; break; case SM501_DC_CRT_CONTROL: s->dc_crt_control = value & 0x0003FFFF; break; case SM501_DC_CRT_FB_ADDR: s->dc_crt_fb_addr = value & 0x8FFFFFF0; break; case SM501_DC_CRT_FB_OFFSET: s->dc_crt_fb_offset = value & 0x3FF03FF0; break; case SM501_DC_CRT_H_TOT: s->dc_crt_h_total = value & 0x0FFF0FFF; break; case SM501_DC_CRT_H_SYNC: s->dc_crt_h_sync = value & 0x00FF0FFF; break; case SM501_DC_CRT_V_TOT: s->dc_crt_v_total = value & 0x0FFF0FFF; break; case SM501_DC_CRT_V_SYNC: s->dc_crt_v_sync = value & 0x003F0FFF; break; case SM501_DC_CRT_HWC_ADDR: s->dc_crt_hwc_addr = value & 0x8FFFFFF0; break; case SM501_DC_CRT_HWC_LOC: s->dc_crt_hwc_location = value & 0x0FFF0FFF; break; case SM501_DC_CRT_HWC_COLOR_1_2: s->dc_crt_hwc_color_1_2 = value; break; case SM501_DC_CRT_HWC_COLOR_3: s->dc_crt_hwc_color_3 = value & 0x0000FFFF; break; case SM501_DC_PANEL_PALETTE ... SM501_DC_PANEL_PALETTE + 0x400 * 3 - 4: sm501_palette_write(opaque, addr - SM501_DC_PANEL_PALETTE, value); break; default: printf(\"sm501 disp ctrl : not implemented register write.\" \" addr=%x, val=%x\\n\", (int)addr, (unsigned)value); abort(); } }"} {"target": 0, "idx": 13855, "func": "void rgb15to16(const uint8_t *src,uint8_t *dst,uint32_t src_size) { #ifdef HAVE_MMX register const char* s=src+src_size; register char* d=dst+src_size; register int offs=-src_size; __asm __volatile(PREFETCH\" %0\"::\"m\"(*(s+offs)):\"memory\"); __asm __volatile( \"movq %0, %%mm4\\n\\t\" \"movq %1, %%mm5\" ::\"m\"(mask15b), \"m\"(mask15rg):\"memory\"); while(offs<0) { __asm __volatile( PREFETCH\" 32%1\\n\\t\" \"movq %1, %%mm0\\n\\t\" \"movq 8%1, %%mm2\\n\\t\" \"movq %%mm0, %%mm1\\n\\t\" \"movq %%mm2, %%mm3\\n\\t\" \"pand %%mm4, %%mm0\\n\\t\" \"pand %%mm5, %%mm1\\n\\t\" \"pand %%mm4, %%mm2\\n\\t\" \"pand %%mm5, %%mm3\\n\\t\" \"psllq $1, %%mm1\\n\\t\" \"psllq $1, %%mm3\\n\\t\" \"por %%mm1, %%mm0\\n\\t\" \"por %%mm3, %%mm2\\n\\t\" MOVNTQ\" %%mm0, %0\\n\\t\" MOVNTQ\" %%mm2, 8%0\" :\"=m\"(*(d+offs)) :\"m\"(*(s+offs)) :\"memory\"); offs+=16; } __asm __volatile(SFENCE:::\"memory\"); __asm __volatile(EMMS:::\"memory\"); #else const uint16_t *s1=( uint16_t * )src; uint16_t *d1=( uint16_t * )dst; uint16_t *e=((uint8_t *)s1)+src_size; while( s1>= 1; i = 0; do { l = *ps++; if (l & 0x80) { l = (l & 0x7F) * 2; if (pd + l > dest_end) return ps - src; memcpy(pd, ps, l); ps += l; pd += l; } else { if (pd + i > dest_end) return ps - src; for (i = 0; i < l; i++) { *pd++ = ps[0]; *pd++ = ps[1]; } ps += 2; } i += l; } while (i < src_len); return ps - src; }"} {"target": 0, "idx": 13867, "func": "void ff_avg_h264_qpel16_mc33_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride) { avc_luma_hv_qrt_and_aver_dst_16x16_msa(src + stride - 2, src - (stride * 2) + sizeof(uint8_t), stride, dst, stride); }"} {"target": 1, "idx": 13888, "func": "static int vorbis_parse(AVCodecParserContext *s1, AVCodecContext *avctx, const uint8_t **poutbuf, int *poutbuf_size, const uint8_t *buf, int buf_size) { VorbisParseContext *s = s1->priv_data; int duration; if (!s->vp && avctx->extradata && avctx->extradata_size) { s->vp = av_vorbis_parse_init(avctx->extradata, avctx->extradata_size); if (!s->vp) goto end; } if ((duration = av_vorbis_parse_frame(s->vp, buf, buf_size)) >= 0) s1->duration = duration; end: /* always return the full packet. this parser isn't doing any splitting or combining, only packet analysis */ *poutbuf = buf; *poutbuf_size = buf_size; return buf_size; }"} {"target": 1, "idx": 13890, "func": "static void xen_ram_init(PCMachineState *pcms, ram_addr_t ram_size, MemoryRegion **ram_memory_p) { MemoryRegion *sysmem = get_system_memory(); ram_addr_t block_len; uint64_t user_lowmem = object_property_get_int(qdev_get_machine(), PC_MACHINE_MAX_RAM_BELOW_4G, &error_abort); /* Handle the machine opt max-ram-below-4g. It is basically doing * min(xen limit, user limit). */ if (HVM_BELOW_4G_RAM_END <= user_lowmem) { user_lowmem = HVM_BELOW_4G_RAM_END; } if (ram_size >= user_lowmem) { pcms->above_4g_mem_size = ram_size - user_lowmem; pcms->below_4g_mem_size = user_lowmem; } else { pcms->above_4g_mem_size = 0; pcms->below_4g_mem_size = ram_size; } if (!pcms->above_4g_mem_size) { block_len = ram_size; } else { /* * Xen does not allocate the memory continuously, it keeps a * hole of the size computed above or passed in. */ block_len = (1ULL << 32) + pcms->above_4g_mem_size; } memory_region_init_ram(&ram_memory, NULL, \"xen.ram\", block_len, &error_abort); *ram_memory_p = &ram_memory; vmstate_register_ram_global(&ram_memory); memory_region_init_alias(&ram_640k, NULL, \"xen.ram.640k\", &ram_memory, 0, 0xa0000); memory_region_add_subregion(sysmem, 0, &ram_640k); /* Skip of the VGA IO memory space, it will be registered later by the VGA * emulated device. * * The area between 0xc0000 and 0x100000 will be used by SeaBIOS to load * the Options ROM, so it is registered here as RAM. */ memory_region_init_alias(&ram_lo, NULL, \"xen.ram.lo\", &ram_memory, 0xc0000, pcms->below_4g_mem_size - 0xc0000); memory_region_add_subregion(sysmem, 0xc0000, &ram_lo); if (pcms->above_4g_mem_size > 0) { memory_region_init_alias(&ram_hi, NULL, \"xen.ram.hi\", &ram_memory, 0x100000000ULL, pcms->above_4g_mem_size); memory_region_add_subregion(sysmem, 0x100000000ULL, &ram_hi); } }"} {"target": 1, "idx": 13897, "func": "static void pci_spapr_set_irq(void *opaque, int irq_num, int level) { /* * Here we use the number returned by pci_spapr_map_irq to find a * corresponding qemu_irq. */ sPAPRPHBState *phb = opaque; trace_spapr_pci_lsi_set(phb->busname, irq_num, phb->lsi_table[irq_num].irq); qemu_set_irq(spapr_phb_lsi_qirq(phb, irq_num), level); }"} {"target": 1, "idx": 13899, "func": "static int alloc_sequence_buffers(DiracContext *s) { int sbwidth = DIVRNDUP(s->source.width, 4); int sbheight = DIVRNDUP(s->source.height, 4); int i, w, h, top_padding; /* todo: think more about this / use or set Plane here */ for (i = 0; i < 3; i++) { int max_xblen = MAX_BLOCKSIZE >> (i ? s->chroma_x_shift : 0); int max_yblen = MAX_BLOCKSIZE >> (i ? s->chroma_y_shift : 0); w = s->source.width >> (i ? s->chroma_x_shift : 0); h = s->source.height >> (i ? s->chroma_y_shift : 0); /* we allocate the max we support here since num decompositions can * change from frame to frame. Stride is aligned to 16 for SIMD, and * 1<0) in arith decoding * MAX_BLOCKSIZE padding for MC: blocks can spill up to half of that * on each side */ top_padding = FFMAX(1<plane[i].idwt_buf_base = av_mallocz((w+max_xblen)*h * sizeof(IDWTELEM)); s->plane[i].idwt_tmp = av_malloc((w+16) * sizeof(IDWTELEM)); s->plane[i].idwt_buf = s->plane[i].idwt_buf_base + top_padding*w; if (!s->plane[i].idwt_buf_base || !s->plane[i].idwt_tmp) return AVERROR(ENOMEM); } w = s->source.width; h = s->source.height; /* fixme: allocate using real stride here */ s->sbsplit = av_malloc(sbwidth * sbheight); s->blmotion = av_malloc(sbwidth * sbheight * 16 * sizeof(*s->blmotion)); s->edge_emu_buffer_base = av_malloc((w+64)*MAX_BLOCKSIZE); s->mctmp = av_malloc((w+64+MAX_BLOCKSIZE) * (h*MAX_BLOCKSIZE) * sizeof(*s->mctmp)); s->mcscratch = av_malloc((w+64)*MAX_BLOCKSIZE); if (!s->sbsplit || !s->blmotion) return AVERROR(ENOMEM); return 0; }"} {"target": 1, "idx": 13905, "func": "static int hevc_handle_packet(AVFormatContext *ctx, PayloadContext *rtp_hevc_ctx, AVStream *st, AVPacket *pkt, uint32_t *timestamp, const uint8_t *buf, int len, uint16_t seq, int flags) { const uint8_t *rtp_pl = buf; int tid, lid, nal_type; int first_fragment, last_fragment, fu_type; uint8_t new_nal_header[2]; int res = 0; /* sanity check for size of input packet: 1 byte payload at least */ if (len < RTP_HEVC_PAYLOAD_HEADER_SIZE + 1) { av_log(ctx, AV_LOG_ERROR, \"Too short RTP/HEVC packet, got %d bytes\\n\", len); return AVERROR_INVALIDDATA; } /* * decode the HEVC payload header according to section 4 of draft version 6: * * 0 1 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * |F| Type | LayerId | TID | * +-------------+-----------------+ * * Forbidden zero (F): 1 bit * NAL unit type (Type): 6 bits * NUH layer ID (LayerId): 6 bits * NUH temporal ID plus 1 (TID): 3 bits */ nal_type = (buf[0] >> 1) & 0x3f; lid = ((buf[0] << 5) & 0x20) | ((buf[1] >> 3) & 0x1f); tid = buf[1] & 0x07; /* sanity check for correct layer ID */ if (lid) { /* future scalable or 3D video coding extensions */ avpriv_report_missing_feature(ctx, \"Multi-layer HEVC coding\\n\"); return AVERROR_PATCHWELCOME; } /* sanity check for correct temporal ID */ if (!tid) { av_log(ctx, AV_LOG_ERROR, \"Illegal temporal ID in RTP/HEVC packet\\n\"); return AVERROR_INVALIDDATA; } /* sanity check for correct NAL unit type */ if (nal_type > 50) { av_log(ctx, AV_LOG_ERROR, \"Unsupported (HEVC) NAL type (%d)\\n\", nal_type); return AVERROR_INVALIDDATA; } switch (nal_type) { /* video parameter set (VPS) */ case 32: /* sequence parameter set (SPS) */ case 33: /* picture parameter set (PPS) */ case 34: /* supplemental enhancement information (SEI) */ case 39: /* single NAL unit packet */ default: /* sanity check for size of input packet: 1 byte payload at least */ if (len < 1) { av_log(ctx, AV_LOG_ERROR, \"Too short RTP/HEVC packet, got %d bytes of NAL unit type %d\\n\", len, nal_type); return AVERROR_INVALIDDATA; } /* create A/V packet */ if ((res = av_new_packet(pkt, sizeof(start_sequence) + len)) < 0) return res; /* A/V packet: copy start sequence */ memcpy(pkt->data, start_sequence, sizeof(start_sequence)); /* A/V packet: copy NAL unit data */ memcpy(pkt->data + sizeof(start_sequence), buf, len); break; /* aggregated packet (AP) - with two or more NAL units */ case 48: /* pass the HEVC payload header */ buf += RTP_HEVC_PAYLOAD_HEADER_SIZE; len -= RTP_HEVC_PAYLOAD_HEADER_SIZE; /* pass the HEVC DONL field */ if (rtp_hevc_ctx->using_donl_field) { buf += RTP_HEVC_DONL_FIELD_SIZE; len -= RTP_HEVC_DONL_FIELD_SIZE; } res = ff_h264_handle_aggregated_packet(ctx, pkt, buf, len, rtp_hevc_ctx->using_donl_field ? RTP_HEVC_DOND_FIELD_SIZE : 0, NULL, 0); if (res < 0) return res; break; /* fragmentation unit (FU) */ case 49: /* pass the HEVC payload header */ buf += RTP_HEVC_PAYLOAD_HEADER_SIZE; len -= RTP_HEVC_PAYLOAD_HEADER_SIZE; /* * decode the FU header * * 0 1 2 3 4 5 6 7 * +-+-+-+-+-+-+-+-+ * |S|E| FuType | * +---------------+ * * Start fragment (S): 1 bit * End fragment (E): 1 bit * FuType: 6 bits */ first_fragment = buf[0] & 0x80; last_fragment = buf[0] & 0x40; fu_type = buf[0] & 0x3f; /* pass the HEVC FU header */ buf += RTP_HEVC_FU_HEADER_SIZE; len -= RTP_HEVC_FU_HEADER_SIZE; /* pass the HEVC DONL field */ if (rtp_hevc_ctx->using_donl_field) { buf += RTP_HEVC_DONL_FIELD_SIZE; len -= RTP_HEVC_DONL_FIELD_SIZE; } av_dlog(ctx, \" FU type %d with %d bytes\\n\", fu_type, len); if (len <= 0) { /* sanity check for size of input packet: 1 byte payload at least */ av_log(ctx, AV_LOG_ERROR, \"Too short RTP/HEVC packet, got %d bytes of NAL unit type %d\\n\", len, nal_type); return AVERROR_INVALIDDATA; } if (first_fragment && last_fragment) { av_log(ctx, AV_LOG_ERROR, \"Illegal combination of S and E bit in RTP/HEVC packet\\n\"); return AVERROR_INVALIDDATA; } new_nal_header[0] = (rtp_pl[0] & 0x81) | (fu_type << 1); new_nal_header[1] = rtp_pl[1]; res = ff_h264_handle_frag_packet(pkt, buf, len, first_fragment, new_nal_header, sizeof(new_nal_header)); break; /* PACI packet */ case 50: /* Temporal scalability control information (TSCI) */ avpriv_report_missing_feature(ctx, \"PACI packets for RTP/HEVC\\n\"); res = AVERROR_PATCHWELCOME; break; } pkt->stream_index = st->index; return res; }"} {"target": 1, "idx": 13906, "func": "int avpriv_lock_avformat(void) { if (lockmgr_cb) { if ((*lockmgr_cb)(&avformat_mutex, AV_LOCK_OBTAIN)) return -1; } return 0; }"} {"target": 1, "idx": 13915, "func": "static int libschroedinger_encode_close(AVCodecContext *avctx) { SchroEncoderParams *p_schro_params = avctx->priv_data; /* Close the encoder. */ schro_encoder_free(p_schro_params->encoder); /* Free data in the output frame queue. */ ff_schro_queue_free(&p_schro_params->enc_frame_queue, libschroedinger_free_frame); /* Free the encoder buffer. */ if (p_schro_params->enc_buf_size) av_freep(&p_schro_params->enc_buf); /* Free the video format structure. */ av_freep(&p_schro_params->format); return 0; }"} {"target": 0, "idx": 13943, "func": "static int cinepak_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int ret = 0, buf_size = avpkt->size; CinepakContext *s = avctx->priv_data; s->data = buf; s->size = buf_size; if ((ret = ff_reget_buffer(avctx, s->frame)) < 0) return ret; if (s->palette_video) { const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, NULL); if (pal) { s->frame->palette_has_changed = 1; memcpy(s->pal, pal, AVPALETTE_SIZE); } } if ((ret = cinepak_decode(s)) < 0) { av_log(avctx, AV_LOG_ERROR, \"cinepak_decode failed\\n\"); } if (s->palette_video) memcpy (s->frame->data[1], s->pal, AVPALETTE_SIZE); if ((ret = av_frame_ref(data, s->frame)) < 0) return ret; *got_frame = 1; /* report that the buffer was completely consumed */ return buf_size; }"} {"target": 0, "idx": 13951, "func": "static void taihu_405ep_init(MachineState *machine) { ram_addr_t ram_size = machine->ram_size; const char *kernel_filename = machine->kernel_filename; const char *initrd_filename = machine->initrd_filename; char *filename; qemu_irq *pic; MemoryRegion *sysmem = get_system_memory(); MemoryRegion *bios; MemoryRegion *ram_memories = g_malloc(2 * sizeof(*ram_memories)); MemoryRegion *ram = g_malloc0(sizeof(*ram)); hwaddr ram_bases[2], ram_sizes[2]; long bios_size; target_ulong kernel_base, initrd_base; long kernel_size, initrd_size; int linux_boot; int fl_idx, fl_sectors; DriveInfo *dinfo; /* RAM is soldered to the board so the size cannot be changed */ ram_size = 0x08000000; memory_region_allocate_system_memory(ram, NULL, \"taihu_405ep.ram\", ram_size); ram_bases[0] = 0; ram_sizes[0] = 0x04000000; memory_region_init_alias(&ram_memories[0], NULL, \"taihu_405ep.ram-0\", ram, ram_bases[0], ram_sizes[0]); ram_bases[1] = 0x04000000; ram_sizes[1] = 0x04000000; memory_region_init_alias(&ram_memories[1], NULL, \"taihu_405ep.ram-1\", ram, ram_bases[1], ram_sizes[1]); #ifdef DEBUG_BOARD_INIT printf(\"%s: register cpu\\n\", __func__); #endif ppc405ep_init(sysmem, ram_memories, ram_bases, ram_sizes, 33333333, &pic, kernel_filename == NULL ? 0 : 1); /* allocate and load BIOS */ #ifdef DEBUG_BOARD_INIT printf(\"%s: register BIOS\\n\", __func__); #endif fl_idx = 0; #if defined(USE_FLASH_BIOS) dinfo = drive_get(IF_PFLASH, 0, fl_idx); if (dinfo) { BlockDriverState *bs = blk_bs(blk_by_legacy_dinfo(dinfo)); bios_size = bdrv_getlength(bs); /* XXX: should check that size is 2MB */ // bios_size = 2 * 1024 * 1024; fl_sectors = (bios_size + 65535) >> 16; #ifdef DEBUG_BOARD_INIT printf(\"Register parallel flash %d size %lx\" \" at addr %lx '%s' %d\\n\", fl_idx, bios_size, -bios_size, bdrv_get_device_name(bs), fl_sectors); #endif pflash_cfi02_register((uint32_t)(-bios_size), NULL, \"taihu_405ep.bios\", bios_size, bs, 65536, fl_sectors, 1, 4, 0x0001, 0x22DA, 0x0000, 0x0000, 0x555, 0x2AA, 1); fl_idx++; } else #endif { #ifdef DEBUG_BOARD_INIT printf(\"Load BIOS from file\\n\"); #endif if (bios_name == NULL) bios_name = BIOS_FILENAME; bios = g_new(MemoryRegion, 1); memory_region_init_ram(bios, NULL, \"taihu_405ep.bios\", BIOS_SIZE, &error_abort); vmstate_register_ram_global(bios); filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); if (filename) { bios_size = load_image(filename, memory_region_get_ram_ptr(bios)); g_free(filename); if (bios_size < 0 || bios_size > BIOS_SIZE) { error_report(\"Could not load PowerPC BIOS '%s'\", bios_name); exit(1); } bios_size = (bios_size + 0xfff) & ~0xfff; memory_region_add_subregion(sysmem, (uint32_t)(-bios_size), bios); } else if (!qtest_enabled()) { error_report(\"Could not load PowerPC BIOS '%s'\", bios_name); exit(1); } memory_region_set_readonly(bios, true); } /* Register Linux flash */ dinfo = drive_get(IF_PFLASH, 0, fl_idx); if (dinfo) { BlockDriverState *bs = blk_bs(blk_by_legacy_dinfo(dinfo)); bios_size = bdrv_getlength(bs); /* XXX: should check that size is 32MB */ bios_size = 32 * 1024 * 1024; fl_sectors = (bios_size + 65535) >> 16; #ifdef DEBUG_BOARD_INIT printf(\"Register parallel flash %d size %lx\" \" at addr \" TARGET_FMT_lx \" '%s'\\n\", fl_idx, bios_size, (target_ulong)0xfc000000, bdrv_get_device_name(bs)); #endif pflash_cfi02_register(0xfc000000, NULL, \"taihu_405ep.flash\", bios_size, bs, 65536, fl_sectors, 1, 4, 0x0001, 0x22DA, 0x0000, 0x0000, 0x555, 0x2AA, 1); fl_idx++; } /* Register CLPD & LCD display */ #ifdef DEBUG_BOARD_INIT printf(\"%s: register CPLD\\n\", __func__); #endif taihu_cpld_init(sysmem, 0x50100000); /* Load kernel */ linux_boot = (kernel_filename != NULL); if (linux_boot) { #ifdef DEBUG_BOARD_INIT printf(\"%s: load kernel\\n\", __func__); #endif kernel_base = KERNEL_LOAD_ADDR; /* now we can load the kernel */ kernel_size = load_image_targphys(kernel_filename, kernel_base, ram_size - kernel_base); if (kernel_size < 0) { fprintf(stderr, \"qemu: could not load kernel '%s'\\n\", kernel_filename); exit(1); } /* load initrd */ if (initrd_filename) { initrd_base = INITRD_LOAD_ADDR; initrd_size = load_image_targphys(initrd_filename, initrd_base, ram_size - initrd_base); if (initrd_size < 0) { fprintf(stderr, \"qemu: could not load initial ram disk '%s'\\n\", initrd_filename); exit(1); } } else { initrd_base = 0; initrd_size = 0; } } else { kernel_base = 0; kernel_size = 0; initrd_base = 0; initrd_size = 0; } #ifdef DEBUG_BOARD_INIT printf(\"%s: Done\\n\", __func__); #endif }"} {"target": 1, "idx": 13969, "func": "static void *ff_avio_child_next(void *obj, void *prev) { AVIOContext *s = obj; AVIOInternal *internal = s->opaque; return prev ? NULL : internal->h; }"} {"target": 1, "idx": 13988, "func": "static void pm_ioport_read(IORange *ioport, uint64_t addr, unsigned width, uint64_t *data) { PIIX4PMState *s = container_of(ioport, PIIX4PMState, ioport); uint32_t val; switch(addr) { case 0x00: val = acpi_pm1_evt_get_sts(&s->ar, s->ar.tmr.overflow_time); break; case 0x02: val = s->ar.pm1.evt.en; break; case 0x04: val = s->ar.pm1.cnt.cnt; break; case 0x08: val = acpi_pm_tmr_get(&s->ar); break; default: val = 0; break; } PIIX4_DPRINTF(\"PM readw port=0x%04x val=0x%04x\\n\", (unsigned int)addr, val); *data = val; }"} {"target": 1, "idx": 14014, "func": "static int update_dimensions(VP8Context *s, int width, int height) { if (width != s->avctx->width || height != s->avctx->height) { if (av_image_check_size(width, height, 0, s->avctx)) return AVERROR_INVALIDDATA; vp8_decode_flush_impl(s->avctx, 1, 0, 1); avcodec_set_dimensions(s->avctx, width, height); } s->mb_width = (s->avctx->coded_width +15) / 16; s->mb_height = (s->avctx->coded_height+15) / 16; s->macroblocks_base = av_mallocz((s->mb_width+s->mb_height*2+1)*sizeof(*s->macroblocks)); s->filter_strength = av_mallocz(s->mb_width*sizeof(*s->filter_strength)); s->intra4x4_pred_mode_top = av_mallocz(s->mb_width*4); s->top_nnz = av_mallocz(s->mb_width*sizeof(*s->top_nnz)); s->top_border = av_mallocz((s->mb_width+1)*sizeof(*s->top_border)); if (!s->macroblocks_base || !s->filter_strength || !s->intra4x4_pred_mode_top || !s->top_nnz || !s->top_border) return AVERROR(ENOMEM); s->macroblocks = s->macroblocks_base + 1; return 0; }"} {"target": 1, "idx": 14022, "func": "int nbd_receive_reply(QIOChannel *ioc, NBDReply *reply, Error **errp) { int ret; const char *type; ret = nbd_read_eof(ioc, &reply->magic, sizeof(reply->magic), errp); if (ret <= 0) { return ret; } be32_to_cpus(&reply->magic); switch (reply->magic) { case NBD_SIMPLE_REPLY_MAGIC: ret = nbd_receive_simple_reply(ioc, &reply->simple, errp); if (ret < 0) { break; } trace_nbd_receive_simple_reply(reply->simple.error, nbd_err_lookup(reply->simple.error), reply->handle); if (reply->simple.error == NBD_ESHUTDOWN) { /* This works even on mingw which lacks a native ESHUTDOWN */ error_setg(errp, \"server shutting down\"); return -EINVAL; } break; case NBD_STRUCTURED_REPLY_MAGIC: ret = nbd_receive_structured_reply_chunk(ioc, &reply->structured, errp); if (ret < 0) { break; } type = nbd_reply_type_lookup(reply->structured.type); trace_nbd_receive_structured_reply_chunk(reply->structured.flags, reply->structured.type, type, reply->structured.handle, reply->structured.length); break; default: error_setg(errp, \"invalid magic (got 0x%\" PRIx32 \")\", reply->magic); return -EINVAL; } if (ret < 0) { return ret; } return 1; }"} {"target": 1, "idx": 14023, "func": "static void ne2000_receive(void *opaque, const uint8_t *buf, int size) { NE2000State *s = opaque; uint8_t *p; int total_len, next, avail, len, index, mcast_idx; uint8_t buf1[60]; static const uint8_t broadcast_macaddr[6] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; #if defined(DEBUG_NE2000) printf(\"NE2000: received len=%d\\n\", size); #endif if (!ne2000_can_receive(s)) return; /* XXX: check this */ if (s->rxcr & 0x10) { /* promiscuous: receive all */ } else { if (!memcmp(buf, broadcast_macaddr, 6)) { /* broadcast address */ if (!(s->rxcr & 0x04)) return; } else if (buf[0] & 0x01) { /* multicast */ if (!(s->rxcr & 0x08)) return; mcast_idx = compute_mcast_idx(buf); if (!(s->mult[mcast_idx >> 3] & (1 << (mcast_idx & 7)))) return; } else if (s->mem[0] == buf[0] && s->mem[2] == buf[1] && s->mem[4] == buf[2] && s->mem[6] == buf[3] && s->mem[8] == buf[4] && s->mem[10] == buf[5]) { /* match */ } else { return; } } /* if too small buffer, then expand it */ if (size < MIN_BUF_SIZE) { memcpy(buf1, buf, size); memset(buf1 + size, 0, MIN_BUF_SIZE - size); buf = buf1; size = MIN_BUF_SIZE; } index = s->curpag << 8; /* 4 bytes for header */ total_len = size + 4; /* address for next packet (4 bytes for CRC) */ next = index + ((total_len + 4 + 255) & ~0xff); if (next >= s->stop) next -= (s->stop - s->start); /* prepare packet header */ p = s->mem + index; s->rsr = ENRSR_RXOK; /* receive status */ /* XXX: check this */ if (buf[0] & 0x01) s->rsr |= ENRSR_PHY; p[0] = s->rsr; p[1] = next >> 8; p[2] = total_len; p[3] = total_len >> 8; index += 4; /* write packet data */ while (size > 0) { avail = s->stop - index; len = size; if (len > avail) len = avail; memcpy(s->mem + index, buf, len); buf += len; index += len; if (index == s->stop) index = s->start; size -= len; } s->curpag = next >> 8; /* now we can signal we have receive something */ s->isr |= ENISR_RX; ne2000_update_irq(s); }"} {"target": 1, "idx": 14024, "func": "static void truespeech_apply_twopoint_filter(TSContext *dec, int quart) { int16_t tmp[146 + 60], *ptr0, *ptr1; const int16_t *filter; int i, t, off; t = dec->offset2[quart]; if(t == 127){ memset(dec->newvec, 0, 60 * sizeof(*dec->newvec)); return; } for(i = 0; i < 146; i++) tmp[i] = dec->filtbuf[i]; off = (t / 25) + dec->offset1[quart >> 1] + 18; ptr0 = tmp + 145 - off; ptr1 = tmp + 146; filter = (const int16_t*)ts_order2_coeffs + (t % 25) * 2; for(i = 0; i < 60; i++){ t = (ptr0[0] * filter[0] + ptr0[1] * filter[1] + 0x2000) >> 14; ptr0++; dec->newvec[i] = t; ptr1[i] = t; } }"} {"target": 1, "idx": 14025, "func": "static void scsi_disk_purge_requests(SCSIDiskState *s) { SCSIDiskReq *r; while (!QTAILQ_EMPTY(&s->qdev.requests)) { r = DO_UPCAST(SCSIDiskReq, req, QTAILQ_FIRST(&s->qdev.requests)); if (r->req.aiocb) { bdrv_aio_cancel(r->req.aiocb); } scsi_remove_request(r); } }"} {"target": 1, "idx": 14031, "func": "static int net_slirp_init(VLANState *vlan) { if (!slirp_inited) { slirp_inited = 1; slirp_init(); } slirp_vc = qemu_new_vlan_client(vlan, slirp_receive, NULL); snprintf(slirp_vc->info_str, sizeof(slirp_vc->info_str), \"user redirector\"); return 0; }"} {"target": 1, "idx": 14056, "func": "static int pnm_decode_header(AVCodecContext *avctx, PNMContext * const s){ char buf1[32], tuple_type[32]; int h, w, depth, maxval;; pnm_get(s, buf1, sizeof(buf1)); if (!strcmp(buf1, \"P4\")) { avctx->pix_fmt = PIX_FMT_MONOWHITE; } else if (!strcmp(buf1, \"P5\")) { if (avctx->codec_id == CODEC_ID_PGMYUV) avctx->pix_fmt = PIX_FMT_YUV420P; else avctx->pix_fmt = PIX_FMT_GRAY8; } else if (!strcmp(buf1, \"P6\")) { avctx->pix_fmt = PIX_FMT_RGB24; } else if (!strcmp(buf1, \"P7\")) { w = -1; h = -1; maxval = -1; depth = -1; tuple_type[0] = '\\0'; for(;;) { pnm_get(s, buf1, sizeof(buf1)); if (!strcmp(buf1, \"WIDTH\")) { pnm_get(s, buf1, sizeof(buf1)); w = strtol(buf1, NULL, 10); } else if (!strcmp(buf1, \"HEIGHT\")) { pnm_get(s, buf1, sizeof(buf1)); h = strtol(buf1, NULL, 10); } else if (!strcmp(buf1, \"DEPTH\")) { pnm_get(s, buf1, sizeof(buf1)); depth = strtol(buf1, NULL, 10); } else if (!strcmp(buf1, \"MAXVAL\")) { pnm_get(s, buf1, sizeof(buf1)); maxval = strtol(buf1, NULL, 10); } else if (!strcmp(buf1, \"TUPLETYPE\")) { pnm_get(s, tuple_type, sizeof(tuple_type)); } else if (!strcmp(buf1, \"ENDHDR\")) { break; } else { return -1; } } /* check that all tags are present */ if (w <= 0 || h <= 0 || maxval <= 0 || depth <= 0 || tuple_type[0] == '\\0') return -1; avctx->width = w; avctx->height = h; if (depth == 1) { if (maxval == 1) avctx->pix_fmt = PIX_FMT_MONOWHITE; else avctx->pix_fmt = PIX_FMT_GRAY8; } else if (depth == 3) { avctx->pix_fmt = PIX_FMT_RGB24; } else if (depth == 4) { avctx->pix_fmt = PIX_FMT_RGBA32; } else { return -1; } return 0; } else { return -1; } pnm_get(s, buf1, sizeof(buf1)); avctx->width = atoi(buf1); if (avctx->width <= 0) return -1; pnm_get(s, buf1, sizeof(buf1)); avctx->height = atoi(buf1); if (avctx->height <= 0) return -1; if (avctx->pix_fmt != PIX_FMT_MONOWHITE) { pnm_get(s, buf1, sizeof(buf1)); } /* more check if YUV420 */ if (avctx->pix_fmt == PIX_FMT_YUV420P) { if ((avctx->width & 1) != 0) return -1; h = (avctx->height * 2); if ((h % 3) != 0) return -1; h /= 3; avctx->height = h; } return 0; }"} {"target": 0, "idx": 14076, "func": "void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, int len, int is_write) { return address_space_rw(&address_space_memory, addr, buf, len, is_write); }"} {"target": 0, "idx": 14090, "func": "static void v9fs_link(void *opaque) { V9fsPDU *pdu = opaque; V9fsState *s = pdu->s; int32_t dfid, oldfid; V9fsFidState *dfidp, *oldfidp; V9fsString name; size_t offset = 7; int err = 0; pdu_unmarshal(pdu, offset, \"dds\", &dfid, &oldfid, &name); trace_v9fs_link(pdu->tag, pdu->id, dfid, oldfid, name.data); dfidp = get_fid(pdu, dfid); if (dfidp == NULL) { err = -ENOENT; goto out_nofid; } oldfidp = get_fid(pdu, oldfid); if (oldfidp == NULL) { err = -ENOENT; goto out; } err = v9fs_co_link(pdu, oldfidp, dfidp, &name); if (!err) { err = offset; } out: put_fid(pdu, dfidp); out_nofid: v9fs_string_free(&name); complete_pdu(s, pdu, err); }"} {"target": 0, "idx": 14108, "func": "int dxva2_init(AVCodecContext *s) { InputStream *ist = s->opaque; int loglevel = (ist->hwaccel_id == HWACCEL_AUTO) ? AV_LOG_VERBOSE : AV_LOG_ERROR; DXVA2Context *ctx; int ret; if (!ist->hwaccel_ctx) { ret = dxva2_alloc(s); if (ret < 0) return ret; } ctx = ist->hwaccel_ctx; if (s->codec_id == AV_CODEC_ID_H264 && (s->profile & ~FF_PROFILE_H264_CONSTRAINED) > FF_PROFILE_H264_HIGH) { av_log(NULL, loglevel, \"Unsupported H.264 profile for DXVA2 HWAccel: %d\\n\", s->profile); return AVERROR(EINVAL); } if (s->codec_id == AV_CODEC_ID_HEVC && s->profile != FF_PROFILE_HEVC_MAIN && s->profile != FF_PROFILE_HEVC_MAIN_10) { av_log(NULL, loglevel, \"Unsupported HEVC profile for DXVA2 HWAccel: %d\\n\", s->profile); return AVERROR(EINVAL); } av_buffer_unref(&ctx->hw_frames_ctx); ret = dxva2_create_decoder(s); if (ret < 0) { av_log(NULL, loglevel, \"Error creating the DXVA2 decoder\\n\"); return ret; } return 0; }"} {"target": 0, "idx": 14126, "func": "static XenPTBarFlag xen_pt_bar_reg_parse(XenPCIPassthroughState *s, XenPTRegInfo *reg) { PCIDevice *d = &s->dev; XenPTRegion *region = NULL; PCIIORegion *r; int index = 0; /* check 64bit BAR */ index = xen_pt_bar_offset_to_index(reg->offset); if ((0 < index) && (index < PCI_ROM_SLOT)) { int type = s->real_device.io_regions[index - 1].type; if ((type & XEN_HOST_PCI_REGION_TYPE_MEM) && (type & XEN_HOST_PCI_REGION_TYPE_MEM_64)) { region = &s->bases[index - 1]; if (region->bar_flag != XEN_PT_BAR_FLAG_UPPER) { return XEN_PT_BAR_FLAG_UPPER; } } } /* check unused BAR */ r = &d->io_regions[index]; if (!xen_pt_get_bar_size(r)) { return XEN_PT_BAR_FLAG_UNUSED; } /* for ExpROM BAR */ if (index == PCI_ROM_SLOT) { return XEN_PT_BAR_FLAG_MEM; } /* check BAR I/O indicator */ if (s->real_device.io_regions[index].type & XEN_HOST_PCI_REGION_TYPE_IO) { return XEN_PT_BAR_FLAG_IO; } else { return XEN_PT_BAR_FLAG_MEM; } }"} {"target": 1, "idx": 14141, "func": "static av_cold int g726_encode_init(AVCodecContext *avctx) { G726Context* c = avctx->priv_data; if (avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL && avctx->sample_rate != 8000) { av_log(avctx, AV_LOG_ERROR, \"Sample rates other than 8kHz are not \" \"allowed when the compliance level is higher than unofficial. \" \"Resample or reduce the compliance level.\\n\"); return AVERROR(EINVAL); } if (avctx->sample_rate <= 0) { av_log(avctx, AV_LOG_ERROR, \"Samplerate is invalid\\n\"); return -1; } if(avctx->channels != 1){ av_log(avctx, AV_LOG_ERROR, \"Only mono is supported\\n\"); return -1; } if (avctx->bit_rate % avctx->sample_rate) { av_log(avctx, AV_LOG_ERROR, \"Bitrate - Samplerate combination is invalid\\n\"); return AVERROR(EINVAL); } c->code_size = (avctx->bit_rate + avctx->sample_rate/2) / avctx->sample_rate; if (c->code_size < 2 || c->code_size > 5) { av_log(avctx, AV_LOG_ERROR, \"Invalid number of bits %d\\n\", c->code_size); return AVERROR(EINVAL); } avctx->bits_per_coded_sample = c->code_size; g726_reset(c, c->code_size - 2); avctx->coded_frame = avcodec_alloc_frame(); if (!avctx->coded_frame) return AVERROR(ENOMEM); avctx->coded_frame->key_frame = 1; /* select a frame size that will end on a byte boundary and have a size of approximately 1024 bytes */ avctx->frame_size = ((int[]){ 4096, 2736, 2048, 1640 })[c->code_size - 2]; return 0; }"} {"target": 1, "idx": 14144, "func": "static int dv_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags) { RawDVContext *r = s->priv_data; DVDemuxContext *c = r->dv_demux; int64_t offset = dv_frame_offset(s, c, timestamp, flags); dv_offset_reset(c, offset / c->sys->frame_size); offset = avio_seek(s->pb, offset, SEEK_SET); return (offset < 0) ? offset : 0; }"} {"target": 1, "idx": 14162, "func": "static void spapr_phb_hot_plug_child(HotplugHandler *plug_handler, DeviceState *plugged_dev, Error **errp) { sPAPRPHBState *phb = SPAPR_PCI_HOST_BRIDGE(DEVICE(plug_handler)); PCIDevice *pdev = PCI_DEVICE(plugged_dev); sPAPRDRConnector *drc = spapr_phb_get_pci_drc(phb, pdev); Error *local_err = NULL; /* if DR is disabled we don't need to do anything in the case of * hotplug or coldplug callbacks */ if (!phb->dr_enabled) { /* if this is a hotplug operation initiated by the user * we need to let them know it's not enabled */ if (plugged_dev->hotplugged) { error_setg(errp, QERR_BUS_NO_HOTPLUG, object_get_typename(OBJECT(phb))); } return; } g_assert(drc); spapr_phb_add_pci_device(drc, phb, pdev, &local_err); if (local_err) { error_propagate(errp, local_err); return; } if (plugged_dev->hotplugged) { spapr_hotplug_req_add_by_index(drc); } }"} {"target": 0, "idx": 14168, "func": "av_cold int ff_dvvideo_init(AVCodecContext *avctx) { DVVideoContext *s = avctx->priv_data; DSPContext dsp; static int done = 0; int i, j; if (!done) { VLC dv_vlc; uint16_t new_dv_vlc_bits[NB_DV_VLC*2]; uint8_t new_dv_vlc_len[NB_DV_VLC*2]; uint8_t new_dv_vlc_run[NB_DV_VLC*2]; int16_t new_dv_vlc_level[NB_DV_VLC*2]; done = 1; /* it's faster to include sign bit in a generic VLC parsing scheme */ for (i = 0, j = 0; i < NB_DV_VLC; i++, j++) { new_dv_vlc_bits[j] = dv_vlc_bits[i]; new_dv_vlc_len[j] = dv_vlc_len[i]; new_dv_vlc_run[j] = dv_vlc_run[i]; new_dv_vlc_level[j] = dv_vlc_level[i]; if (dv_vlc_level[i]) { new_dv_vlc_bits[j] <<= 1; new_dv_vlc_len[j]++; j++; new_dv_vlc_bits[j] = (dv_vlc_bits[i] << 1) | 1; new_dv_vlc_len[j] = dv_vlc_len[i] + 1; new_dv_vlc_run[j] = dv_vlc_run[i]; new_dv_vlc_level[j] = -dv_vlc_level[i]; } } /* NOTE: as a trick, we use the fact the no codes are unused to accelerate the parsing of partial codes */ init_vlc(&dv_vlc, TEX_VLC_BITS, j, new_dv_vlc_len, 1, 1, new_dv_vlc_bits, 2, 2, 0); assert(dv_vlc.table_size == 1184); for (i = 0; i < dv_vlc.table_size; i++){ int code = dv_vlc.table[i][0]; int len = dv_vlc.table[i][1]; int level, run; if (len < 0){ //more bits needed run = 0; level = code; } else { run = new_dv_vlc_run [code] + 1; level = new_dv_vlc_level[code]; } ff_dv_rl_vlc[i].len = len; ff_dv_rl_vlc[i].level = level; ff_dv_rl_vlc[i].run = run; } ff_free_vlc(&dv_vlc); } /* Generic DSP setup */ ff_dsputil_init(&dsp, avctx); ff_set_cmp(&dsp, dsp.ildct_cmp, avctx->ildct_cmp); s->get_pixels = dsp.get_pixels; s->ildct_cmp = dsp.ildct_cmp[5]; /* 88DCT setup */ s->fdct[0] = dsp.fdct; s->idct_put[0] = dsp.idct_put; for (i = 0; i < 64; i++) s->dv_zigzag[0][i] = dsp.idct_permutation[ff_zigzag_direct[i]]; /* 248DCT setup */ s->fdct[1] = dsp.fdct248; s->idct_put[1] = ff_simple_idct248_put; // FIXME: need to add it to DSP memcpy(s->dv_zigzag[1], ff_zigzag248_direct, 64); avctx->coded_frame = &s->picture; s->avctx = avctx; avctx->chroma_sample_location = AVCHROMA_LOC_TOPLEFT; return 0; }"} {"target": 1, "idx": 14199, "func": "static int libquvi_read_header(AVFormatContext *s) { int i, ret; quvi_t q; quvi_media_t m; QUVIcode rc; LibQuviContext *qc = s->priv_data; char *media_url, *pagetitle; rc = quvi_init(&q); if (rc != QUVI_OK) goto quvi_fail; quvi_setopt(q, QUVIOPT_FORMAT, qc->format); rc = quvi_parse(q, s->filename, &m); if (rc != QUVI_OK) goto quvi_fail; rc = quvi_getprop(m, QUVIPROP_MEDIAURL, &media_url); if (rc != QUVI_OK) goto quvi_fail; av_assert0(!qc->fmtctx->codec_whitelist && !qc->fmtctx->format_whitelist); qc->fmtctx-> codec_whitelist = av_strdup(s->codec_whitelist); qc->fmtctx->format_whitelist = av_strdup(s->format_whitelist); ret = avformat_open_input(&qc->fmtctx, media_url, NULL, NULL); if (ret < 0) goto end; rc = quvi_getprop(m, QUVIPROP_PAGETITLE, &pagetitle); if (rc == QUVI_OK) av_dict_set(&s->metadata, \"title\", pagetitle, 0); for (i = 0; i < qc->fmtctx->nb_streams; i++) { AVStream *st = avformat_new_stream(s, NULL); AVStream *ist = qc->fmtctx->streams[i]; if (!st) { ret = AVERROR(ENOMEM); goto end; } avpriv_set_pts_info(st, ist->pts_wrap_bits, ist->time_base.num, ist->time_base.den); avcodec_copy_context(st->codec, qc->fmtctx->streams[i]->codec); } return 0; quvi_fail: av_log(s, AV_LOG_ERROR, \"%s\\n\", quvi_strerror(q, rc)); ret = AVERROR_EXTERNAL; end: quvi_parse_close(&m); quvi_close(&q); return ret; }"} {"target": 0, "idx": 14220, "func": "static int add_old_style_options(const char *fmt, QemuOpts *opts, const char *base_filename, const char *base_fmt) { if (base_filename) { if (qemu_opt_set(opts, BLOCK_OPT_BACKING_FILE, base_filename)) { error_report(\"Backing file not supported for file format '%s'\", fmt); return -1; } } if (base_fmt) { if (qemu_opt_set(opts, BLOCK_OPT_BACKING_FMT, base_fmt)) { error_report(\"Backing file format not supported for file \" \"format '%s'\", fmt); return -1; } } return 0; }"} {"target": 0, "idx": 14228, "func": "void qemu_flush_queued_packets(VLANClientState *vc) { while (!TAILQ_EMPTY(&vc->vlan->send_queue)) { VLANPacket *packet; int ret; packet = TAILQ_FIRST(&vc->vlan->send_queue); TAILQ_REMOVE(&vc->vlan->send_queue, packet, entry); ret = qemu_deliver_packet(packet->sender, packet->data, packet->size); if (ret == 0 && packet->sent_cb != NULL) { TAILQ_INSERT_HEAD(&vc->vlan->send_queue, packet, entry); break; } if (packet->sent_cb) packet->sent_cb(packet->sender, ret); qemu_free(packet); } }"} {"target": 1, "idx": 14248, "func": "LF_FUNC (h, luma, sse2) LF_IFUNC(h, luma_intra, sse2) LF_FUNC (v, luma, sse2) LF_IFUNC(v, luma_intra, sse2) /***********************************/ /* weighted prediction */ #define H264_WEIGHT(W, H, OPT) \\ void ff_h264_weight_ ## W ## x ## H ## _ ## OPT(uint8_t *dst, \\ int stride, int log2_denom, int weight, int offset); #define H264_BIWEIGHT(W, H, OPT) \\ void ff_h264_biweight_ ## W ## x ## H ## _ ## OPT(uint8_t *dst, \\ uint8_t *src, int stride, int log2_denom, int weightd, \\ int weights, int offset); #define H264_BIWEIGHT_MMX(W,H) \\ H264_WEIGHT (W, H, mmx2) \\ H264_BIWEIGHT(W, H, mmx2) #define H264_BIWEIGHT_MMX_SSE(W,H) \\ H264_BIWEIGHT_MMX(W, H) \\ H264_WEIGHT (W, H, sse2) \\ H264_BIWEIGHT (W, H, sse2) \\ H264_BIWEIGHT (W, H, ssse3) H264_BIWEIGHT_MMX_SSE(16, 16) H264_BIWEIGHT_MMX_SSE(16, 8) H264_BIWEIGHT_MMX_SSE( 8, 16) H264_BIWEIGHT_MMX_SSE( 8, 8) H264_BIWEIGHT_MMX_SSE( 8, 4) H264_BIWEIGHT_MMX ( 4, 8) H264_BIWEIGHT_MMX ( 4, 4) H264_BIWEIGHT_MMX ( 4, 2) void ff_h264dsp_init_x86(H264DSPContext *c) { int mm_flags = av_get_cpu_flags(); if (mm_flags & AV_CPU_FLAG_MMX2) { c->h264_loop_filter_strength= h264_loop_filter_strength_mmx2; } #if HAVE_YASM if (mm_flags & AV_CPU_FLAG_MMX) { c->h264_idct_dc_add= c->h264_idct_add= ff_h264_idct_add_mmx; c->h264_idct8_dc_add= c->h264_idct8_add= ff_h264_idct8_add_mmx; c->h264_idct_add16 = ff_h264_idct_add16_mmx; c->h264_idct8_add4 = ff_h264_idct8_add4_mmx; c->h264_idct_add8 = ff_h264_idct_add8_mmx; c->h264_idct_add16intra= ff_h264_idct_add16intra_mmx; if (mm_flags & AV_CPU_FLAG_MMX2) { c->h264_idct_dc_add= ff_h264_idct_dc_add_mmx2; c->h264_idct8_dc_add= ff_h264_idct8_dc_add_mmx2; c->h264_idct_add16 = ff_h264_idct_add16_mmx2; c->h264_idct8_add4 = ff_h264_idct8_add4_mmx2; c->h264_idct_add8 = ff_h264_idct_add8_mmx2; c->h264_idct_add16intra= ff_h264_idct_add16intra_mmx2; c->h264_v_loop_filter_chroma= ff_x264_deblock_v_chroma_mmxext; c->h264_h_loop_filter_chroma= ff_x264_deblock_h_chroma_mmxext; c->h264_v_loop_filter_chroma_intra= ff_x264_deblock_v_chroma_intra_mmxext; c->h264_h_loop_filter_chroma_intra= ff_x264_deblock_h_chroma_intra_mmxext; #if ARCH_X86_32 c->h264_v_loop_filter_luma= ff_x264_deblock_v_luma_mmxext; c->h264_h_loop_filter_luma= ff_x264_deblock_h_luma_mmxext; c->h264_v_loop_filter_luma_intra = ff_x264_deblock_v_luma_intra_mmxext; c->h264_h_loop_filter_luma_intra = ff_x264_deblock_h_luma_intra_mmxext; c->weight_h264_pixels_tab[0]= ff_h264_weight_16x16_mmx2; c->weight_h264_pixels_tab[1]= ff_h264_weight_16x8_mmx2; c->weight_h264_pixels_tab[2]= ff_h264_weight_8x16_mmx2; c->weight_h264_pixels_tab[3]= ff_h264_weight_8x8_mmx2; c->weight_h264_pixels_tab[4]= ff_h264_weight_8x4_mmx2; c->weight_h264_pixels_tab[5]= ff_h264_weight_4x8_mmx2; c->weight_h264_pixels_tab[6]= ff_h264_weight_4x4_mmx2; c->weight_h264_pixels_tab[7]= ff_h264_weight_4x2_mmx2; c->biweight_h264_pixels_tab[0]= ff_h264_biweight_16x16_mmx2; c->biweight_h264_pixels_tab[1]= ff_h264_biweight_16x8_mmx2; c->biweight_h264_pixels_tab[2]= ff_h264_biweight_8x16_mmx2; c->biweight_h264_pixels_tab[3]= ff_h264_biweight_8x8_mmx2; c->biweight_h264_pixels_tab[4]= ff_h264_biweight_8x4_mmx2; c->biweight_h264_pixels_tab[5]= ff_h264_biweight_4x8_mmx2; c->biweight_h264_pixels_tab[6]= ff_h264_biweight_4x4_mmx2; c->biweight_h264_pixels_tab[7]= ff_h264_biweight_4x2_mmx2; if (mm_flags&AV_CPU_FLAG_SSE2) { c->h264_idct8_add = ff_h264_idct8_add_sse2; c->h264_idct8_add4= ff_h264_idct8_add4_sse2; c->weight_h264_pixels_tab[0]= ff_h264_weight_16x16_sse2; c->weight_h264_pixels_tab[1]= ff_h264_weight_16x8_sse2; c->weight_h264_pixels_tab[2]= ff_h264_weight_8x16_sse2; c->weight_h264_pixels_tab[3]= ff_h264_weight_8x8_sse2; c->weight_h264_pixels_tab[4]= ff_h264_weight_8x4_sse2; c->biweight_h264_pixels_tab[0]= ff_h264_biweight_16x16_sse2; c->biweight_h264_pixels_tab[1]= ff_h264_biweight_16x8_sse2; c->biweight_h264_pixels_tab[2]= ff_h264_biweight_8x16_sse2; c->biweight_h264_pixels_tab[3]= ff_h264_biweight_8x8_sse2; c->biweight_h264_pixels_tab[4]= ff_h264_biweight_8x4_sse2; c->h264_v_loop_filter_luma = ff_x264_deblock_v_luma_sse2; c->h264_h_loop_filter_luma = ff_x264_deblock_h_luma_sse2; c->h264_v_loop_filter_luma_intra = ff_x264_deblock_v_luma_intra_sse2; c->h264_h_loop_filter_luma_intra = ff_x264_deblock_h_luma_intra_sse2; c->h264_idct_add16 = ff_h264_idct_add16_sse2; c->h264_idct_add8 = ff_h264_idct_add8_sse2; c->h264_idct_add16intra = ff_h264_idct_add16intra_sse2; } if (mm_flags&AV_CPU_FLAG_SSSE3) { c->biweight_h264_pixels_tab[0]= ff_h264_biweight_16x16_ssse3; c->biweight_h264_pixels_tab[1]= ff_h264_biweight_16x8_ssse3; c->biweight_h264_pixels_tab[2]= ff_h264_biweight_8x16_ssse3; c->biweight_h264_pixels_tab[3]= ff_h264_biweight_8x8_ssse3; c->biweight_h264_pixels_tab[4]= ff_h264_biweight_8x4_ssse3; } } } }"} {"target": 0, "idx": 14263, "func": "static int decode_band_hdr(IVI45DecContext *ctx, IVIBandDesc *band, AVCodecContext *avctx) { int plane, band_num, indx, transform_id, scan_indx; int i; plane = get_bits(&ctx->gb, 2); band_num = get_bits(&ctx->gb, 4); if (band->plane != plane || band->band_num != band_num) { av_log(avctx, AV_LOG_ERROR, \"Invalid band header sequence!\\n\"); return AVERROR_INVALIDDATA; } band->is_empty = get_bits1(&ctx->gb); if (!band->is_empty) { int old_blk_size = band->blk_size; /* skip header size * If header size is not given, header size is 4 bytes. */ if (get_bits1(&ctx->gb)) skip_bits(&ctx->gb, 16); band->is_halfpel = get_bits(&ctx->gb, 2); if (band->is_halfpel >= 2) { av_log(avctx, AV_LOG_ERROR, \"Invalid/unsupported mv resolution: %d!\\n\", band->is_halfpel); return AVERROR_INVALIDDATA; } #if IVI4_STREAM_ANALYSER if (!band->is_halfpel) ctx->uses_fullpel = 1; #endif band->checksum_present = get_bits1(&ctx->gb); if (band->checksum_present) band->checksum = get_bits(&ctx->gb, 16); indx = get_bits(&ctx->gb, 2); if (indx == 3) { av_log(avctx, AV_LOG_ERROR, \"Invalid block size!\\n\"); return AVERROR_INVALIDDATA; } band->mb_size = 16 >> indx; band->blk_size = 8 >> (indx >> 1); band->inherit_mv = get_bits1(&ctx->gb); band->inherit_qdelta = get_bits1(&ctx->gb); band->glob_quant = get_bits(&ctx->gb, 5); if (!get_bits1(&ctx->gb) || ctx->frame_type == IVI4_FRAMETYPE_INTRA) { transform_id = get_bits(&ctx->gb, 5); if (transform_id >= FF_ARRAY_ELEMS(transforms) || !transforms[transform_id].inv_trans) { avpriv_request_sample(avctx, \"Transform %d\", transform_id); return AVERROR_PATCHWELCOME; } if ((transform_id >= 7 && transform_id <= 9) || transform_id == 17) { avpriv_request_sample(avctx, \"DCT transform\"); return AVERROR_PATCHWELCOME; } #if IVI4_STREAM_ANALYSER if ((transform_id >= 0 && transform_id <= 2) || transform_id == 10) ctx->uses_haar = 1; #endif band->inv_transform = transforms[transform_id].inv_trans; band->dc_transform = transforms[transform_id].dc_trans; band->is_2d_trans = transforms[transform_id].is_2d_trans; if (transform_id < 10) band->transform_size = 8; else band->transform_size = 4; if (band->blk_size != band->transform_size) return AVERROR_INVALIDDATA; scan_indx = get_bits(&ctx->gb, 4); if (scan_indx == 15) { av_log(avctx, AV_LOG_ERROR, \"Custom scan pattern encountered!\\n\"); return AVERROR_INVALIDDATA; } if (scan_indx > 4 && scan_indx < 10) { if (band->blk_size != 4) return AVERROR_INVALIDDATA; } else if (band->blk_size != 8) return AVERROR_INVALIDDATA; band->scan = scan_index_to_tab[scan_indx]; band->quant_mat = get_bits(&ctx->gb, 5); if (band->quant_mat >= FF_ARRAY_ELEMS(quant_index_to_tab)) { if (band->quant_mat == 31) av_log(avctx, AV_LOG_ERROR, \"Custom quant matrix encountered!\\n\"); else avpriv_request_sample(avctx, \"Quantization matrix %d\", band->quant_mat); band->quant_mat = -1; return AVERROR_INVALIDDATA; } } else { if (old_blk_size != band->blk_size) { av_log(avctx, AV_LOG_ERROR, \"The band block size does not match the configuration \" \"inherited\\n\"); return AVERROR_INVALIDDATA; } if (band->quant_mat < 0) { av_log(avctx, AV_LOG_ERROR, \"Invalid quant_mat inherited\\n\"); return AVERROR_INVALIDDATA; } } /* decode block huffman codebook */ if (!get_bits1(&ctx->gb)) band->blk_vlc.tab = ctx->blk_vlc.tab; else if (ff_ivi_dec_huff_desc(&ctx->gb, 1, IVI_BLK_HUFF, &band->blk_vlc, avctx)) return AVERROR_INVALIDDATA; /* select appropriate rvmap table for this band */ band->rvmap_sel = get_bits1(&ctx->gb) ? get_bits(&ctx->gb, 3) : 8; /* decode rvmap probability corrections if any */ band->num_corr = 0; /* there is no corrections */ if (get_bits1(&ctx->gb)) { band->num_corr = get_bits(&ctx->gb, 8); /* get number of correction pairs */ if (band->num_corr > 61) { av_log(avctx, AV_LOG_ERROR, \"Too many corrections: %d\\n\", band->num_corr); return AVERROR_INVALIDDATA; } /* read correction pairs */ for (i = 0; i < band->num_corr * 2; i++) band->corr[i] = get_bits(&ctx->gb, 8); } } if (band->blk_size == 8) { band->intra_base = &ivi4_quant_8x8_intra[quant_index_to_tab[band->quant_mat]][0]; band->inter_base = &ivi4_quant_8x8_inter[quant_index_to_tab[band->quant_mat]][0]; } else { band->intra_base = &ivi4_quant_4x4_intra[quant_index_to_tab[band->quant_mat]][0]; band->inter_base = &ivi4_quant_4x4_inter[quant_index_to_tab[band->quant_mat]][0]; } /* Indeo 4 doesn't use scale tables */ band->intra_scale = NULL; band->inter_scale = NULL; align_get_bits(&ctx->gb); return 0; }"} {"target": 0, "idx": 14289, "func": "long do_syscall(void *cpu_env, int num, long arg1, long arg2, long arg3, long arg4, long arg5, long arg6) { long ret; struct stat st; struct kernel_statfs *stfs; #ifdef DEBUG gemu_log(\"syscall %d\\n\", num); #endif switch(num) { case TARGET_NR_exit: #ifdef HAVE_GPROF _mcleanup(); #endif _exit(arg1); ret = 0; /* avoid warning */ break; case TARGET_NR_read: ret = get_errno(read(arg1, (void *)arg2, arg3)); break; case TARGET_NR_write: ret = get_errno(write(arg1, (void *)arg2, arg3)); break; case TARGET_NR_open: ret = get_errno(open((const char *)arg1, arg2, arg3)); break; case TARGET_NR_close: ret = get_errno(close(arg1)); break; case TARGET_NR_brk: ret = do_brk((char *)arg1); break; case TARGET_NR_fork: ret = get_errno(fork()); break; case TARGET_NR_waitpid: { int *status = (int *)arg2; ret = get_errno(waitpid(arg1, status, arg3)); if (!is_error(ret) && status) tswapls((long *)&status); } break; case TARGET_NR_creat: ret = get_errno(creat((const char *)arg1, arg2)); break; case TARGET_NR_link: ret = get_errno(link((const char *)arg1, (const char *)arg2)); break; case TARGET_NR_unlink: ret = get_errno(unlink((const char *)arg1)); break; case TARGET_NR_execve: ret = get_errno(execve((const char *)arg1, (void *)arg2, (void *)arg3)); break; case TARGET_NR_chdir: ret = get_errno(chdir((const char *)arg1)); break; case TARGET_NR_time: { int *time_ptr = (int *)arg1; ret = get_errno(time((time_t *)time_ptr)); if (!is_error(ret) && time_ptr) tswap32s(time_ptr); } break; case TARGET_NR_mknod: ret = get_errno(mknod((const char *)arg1, arg2, arg3)); break; case TARGET_NR_chmod: ret = get_errno(chmod((const char *)arg1, arg2)); break; case TARGET_NR_lchown: ret = get_errno(chown((const char *)arg1, arg2, arg3)); break; case TARGET_NR_break: goto unimplemented; case TARGET_NR_oldstat: goto unimplemented; case TARGET_NR_lseek: ret = get_errno(lseek(arg1, arg2, arg3)); break; case TARGET_NR_getpid: ret = get_errno(getpid()); break; case TARGET_NR_mount: /* need to look at the data field */ goto unimplemented; case TARGET_NR_umount: ret = get_errno(umount((const char *)arg1)); break; case TARGET_NR_setuid: ret = get_errno(setuid(arg1)); break; case TARGET_NR_getuid: ret = get_errno(getuid()); break; case TARGET_NR_stime: { int *time_ptr = (int *)arg1; if (time_ptr) tswap32s(time_ptr); ret = get_errno(stime((time_t *)time_ptr)); } break; case TARGET_NR_ptrace: goto unimplemented; case TARGET_NR_alarm: ret = alarm(arg1); break; case TARGET_NR_oldfstat: goto unimplemented; case TARGET_NR_pause: ret = get_errno(pause()); break; case TARGET_NR_utime: goto unimplemented; case TARGET_NR_stty: goto unimplemented; case TARGET_NR_gtty: goto unimplemented; case TARGET_NR_access: ret = get_errno(access((const char *)arg1, arg2)); break; case TARGET_NR_nice: ret = get_errno(nice(arg1)); break; case TARGET_NR_ftime: goto unimplemented; case TARGET_NR_sync: ret = get_errno(sync()); break; case TARGET_NR_kill: ret = get_errno(kill(arg1, arg2)); break; case TARGET_NR_rename: ret = get_errno(rename((const char *)arg1, (const char *)arg2)); break; case TARGET_NR_mkdir: ret = get_errno(mkdir((const char *)arg1, arg2)); break; case TARGET_NR_rmdir: ret = get_errno(rmdir((const char *)arg1)); break; case TARGET_NR_dup: ret = get_errno(dup(arg1)); break; case TARGET_NR_pipe: { int *pipe_ptr = (int *)arg1; ret = get_errno(pipe(pipe_ptr)); if (!is_error(ret)) { tswap32s(&pipe_ptr[0]); tswap32s(&pipe_ptr[1]); } } break; case TARGET_NR_times: goto unimplemented; case TARGET_NR_prof: goto unimplemented; case TARGET_NR_setgid: ret = get_errno(setgid(arg1)); break; case TARGET_NR_getgid: ret = get_errno(getgid()); break; case TARGET_NR_signal: goto unimplemented; case TARGET_NR_geteuid: ret = get_errno(geteuid()); break; case TARGET_NR_getegid: ret = get_errno(getegid()); break; case TARGET_NR_acct: goto unimplemented; case TARGET_NR_umount2: ret = get_errno(umount2((const char *)arg1, arg2)); break; case TARGET_NR_lock: goto unimplemented; case TARGET_NR_ioctl: ret = do_ioctl(arg1, arg2, arg3); break; case TARGET_NR_fcntl: switch(arg2) { case F_GETLK: case F_SETLK: case F_SETLKW: goto unimplemented; default: ret = get_errno(fcntl(arg1, arg2, arg3)); break; } break; case TARGET_NR_mpx: goto unimplemented; case TARGET_NR_setpgid: ret = get_errno(setpgid(arg1, arg2)); break; case TARGET_NR_ulimit: goto unimplemented; case TARGET_NR_oldolduname: goto unimplemented; case TARGET_NR_umask: ret = get_errno(umask(arg1)); break; case TARGET_NR_chroot: ret = get_errno(chroot((const char *)arg1)); break; case TARGET_NR_ustat: goto unimplemented; case TARGET_NR_dup2: ret = get_errno(dup2(arg1, arg2)); break; case TARGET_NR_getppid: ret = get_errno(getppid()); break; case TARGET_NR_getpgrp: ret = get_errno(getpgrp()); break; case TARGET_NR_setsid: ret = get_errno(setsid()); break; case TARGET_NR_sigaction: #if 0 { int signum = arg1; struct target_old_sigaction *tact = arg2, *toldact = arg3; ret = get_errno(setsid()); } break; #else goto unimplemented; #endif case TARGET_NR_sgetmask: goto unimplemented; case TARGET_NR_ssetmask: goto unimplemented; case TARGET_NR_setreuid: ret = get_errno(setreuid(arg1, arg2)); break; case TARGET_NR_setregid: ret = get_errno(setregid(arg1, arg2)); break; case TARGET_NR_sigsuspend: goto unimplemented; case TARGET_NR_sigpending: goto unimplemented; case TARGET_NR_sethostname: ret = get_errno(sethostname((const char *)arg1, arg2)); break; case TARGET_NR_setrlimit: goto unimplemented; case TARGET_NR_getrlimit: goto unimplemented; case TARGET_NR_getrusage: goto unimplemented; case TARGET_NR_gettimeofday: { struct target_timeval *target_tv = (void *)arg1; struct timeval tv; ret = get_errno(gettimeofday(&tv, NULL)); if (!is_error(ret)) { target_tv->tv_sec = tswapl(tv.tv_sec); target_tv->tv_usec = tswapl(tv.tv_usec); } } break; case TARGET_NR_settimeofday: { struct target_timeval *target_tv = (void *)arg1; struct timeval tv; tv.tv_sec = tswapl(target_tv->tv_sec); tv.tv_usec = tswapl(target_tv->tv_usec); ret = get_errno(settimeofday(&tv, NULL)); } break; case TARGET_NR_getgroups: goto unimplemented; case TARGET_NR_setgroups: goto unimplemented; case TARGET_NR_select: goto unimplemented; case TARGET_NR_symlink: ret = get_errno(symlink((const char *)arg1, (const char *)arg2)); break; case TARGET_NR_oldlstat: goto unimplemented; case TARGET_NR_readlink: ret = get_errno(readlink((const char *)arg1, (char *)arg2, arg3)); break; case TARGET_NR_uselib: goto unimplemented; case TARGET_NR_swapon: ret = get_errno(swapon((const char *)arg1, arg2)); break; case TARGET_NR_reboot: goto unimplemented; case TARGET_NR_readdir: goto unimplemented; #ifdef TARGET_I386 case TARGET_NR_mmap: { uint32_t v1, v2, v3, v4, v5, v6, *vptr; vptr = (uint32_t *)arg1; v1 = tswap32(vptr[0]); v2 = tswap32(vptr[1]); v3 = tswap32(vptr[2]); v4 = tswap32(vptr[3]); v5 = tswap32(vptr[4]); v6 = tswap32(vptr[5]); ret = get_errno((long)mmap((void *)v1, v2, v3, v4, v5, v6)); } break; #endif #ifdef TARGET_I386 case TARGET_NR_mmap2: #else case TARGET_NR_mmap: #endif ret = get_errno((long)mmap((void *)arg1, arg2, arg3, arg4, arg5, arg6)); break; case TARGET_NR_munmap: ret = get_errno(munmap((void *)arg1, arg2)); break; case TARGET_NR_truncate: ret = get_errno(truncate((const char *)arg1, arg2)); break; case TARGET_NR_ftruncate: ret = get_errno(ftruncate(arg1, arg2)); break; case TARGET_NR_fchmod: ret = get_errno(fchmod(arg1, arg2)); break; case TARGET_NR_fchown: ret = get_errno(fchown(arg1, arg2, arg3)); break; case TARGET_NR_getpriority: ret = get_errno(getpriority(arg1, arg2)); break; case TARGET_NR_setpriority: ret = get_errno(setpriority(arg1, arg2, arg3)); break; case TARGET_NR_profil: goto unimplemented; case TARGET_NR_statfs: stfs = (void *)arg2; ret = get_errno(sys_statfs((const char *)arg1, stfs)); convert_statfs: if (!is_error(ret)) { tswap32s(&stfs->f_type); tswap32s(&stfs->f_bsize); tswap32s(&stfs->f_blocks); tswap32s(&stfs->f_bfree); tswap32s(&stfs->f_bavail); tswap32s(&stfs->f_files); tswap32s(&stfs->f_ffree); tswap32s(&stfs->f_fsid.val[0]); tswap32s(&stfs->f_fsid.val[1]); tswap32s(&stfs->f_namelen); } break; case TARGET_NR_fstatfs: stfs = (void *)arg2; ret = get_errno(sys_fstatfs(arg1, stfs)); goto convert_statfs; case TARGET_NR_ioperm: goto unimplemented; case TARGET_NR_socketcall: ret = do_socketcall(arg1, (long *)arg2); break; case TARGET_NR_syslog: goto unimplemented; case TARGET_NR_setitimer: goto unimplemented; case TARGET_NR_getitimer: goto unimplemented; case TARGET_NR_stat: ret = get_errno(stat((const char *)arg1, &st)); goto do_stat; case TARGET_NR_lstat: ret = get_errno(lstat((const char *)arg1, &st)); goto do_stat; case TARGET_NR_fstat: { ret = get_errno(fstat(arg1, &st)); do_stat: if (!is_error(ret)) { struct target_stat *target_st = (void *)arg2; target_st->st_dev = tswap16(st.st_dev); target_st->st_ino = tswapl(st.st_ino); target_st->st_mode = tswap16(st.st_mode); target_st->st_nlink = tswap16(st.st_nlink); target_st->st_uid = tswap16(st.st_uid); target_st->st_gid = tswap16(st.st_gid); target_st->st_rdev = tswap16(st.st_rdev); target_st->st_size = tswapl(st.st_size); target_st->st_blksize = tswapl(st.st_blksize); target_st->st_blocks = tswapl(st.st_blocks); target_st->st_atime = tswapl(st.st_atime); target_st->st_mtime = tswapl(st.st_mtime); target_st->st_ctime = tswapl(st.st_ctime); } } break; case TARGET_NR_olduname: goto unimplemented; case TARGET_NR_iopl: goto unimplemented; case TARGET_NR_vhangup: ret = get_errno(vhangup()); break; case TARGET_NR_idle: goto unimplemented; case TARGET_NR_vm86old: goto unimplemented; case TARGET_NR_wait4: { int status; target_long *status_ptr = (void *)arg2; struct rusage rusage, *rusage_ptr; struct target_rusage *target_rusage = (void *)arg4; if (target_rusage) rusage_ptr = &rusage; else rusage_ptr = NULL; ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr)); if (!is_error(ret)) { if (status_ptr) *status_ptr = tswap32(status); if (target_rusage) { target_rusage->ru_utime.tv_sec = tswapl(rusage.ru_utime.tv_sec); target_rusage->ru_utime.tv_usec = tswapl(rusage.ru_utime.tv_usec); target_rusage->ru_stime.tv_sec = tswapl(rusage.ru_stime.tv_sec); target_rusage->ru_stime.tv_usec = tswapl(rusage.ru_stime.tv_usec); target_rusage->ru_maxrss = tswapl(rusage.ru_maxrss); target_rusage->ru_ixrss = tswapl(rusage.ru_ixrss); target_rusage->ru_idrss = tswapl(rusage.ru_idrss); target_rusage->ru_isrss = tswapl(rusage.ru_isrss); target_rusage->ru_minflt = tswapl(rusage.ru_minflt); target_rusage->ru_majflt = tswapl(rusage.ru_majflt); target_rusage->ru_nswap = tswapl(rusage.ru_nswap); target_rusage->ru_inblock = tswapl(rusage.ru_inblock); target_rusage->ru_oublock = tswapl(rusage.ru_oublock); target_rusage->ru_msgsnd = tswapl(rusage.ru_msgsnd); target_rusage->ru_msgrcv = tswapl(rusage.ru_msgrcv); target_rusage->ru_nsignals = tswapl(rusage.ru_nsignals); target_rusage->ru_nvcsw = tswapl(rusage.ru_nvcsw); target_rusage->ru_nivcsw = tswapl(rusage.ru_nivcsw); } } } break; case TARGET_NR_swapoff: ret = get_errno(swapoff((const char *)arg1)); break; case TARGET_NR_sysinfo: goto unimplemented; case TARGET_NR_ipc: goto unimplemented; case TARGET_NR_fsync: ret = get_errno(fsync(arg1)); break; case TARGET_NR_sigreturn: goto unimplemented; case TARGET_NR_clone: goto unimplemented; case TARGET_NR_setdomainname: ret = get_errno(setdomainname((const char *)arg1, arg2)); break; case TARGET_NR_uname: /* no need to transcode because we use the linux syscall */ ret = get_errno(sys_uname((struct new_utsname *)arg1)); break; #ifdef TARGET_I386 case TARGET_NR_modify_ldt: ret = get_errno(gemu_modify_ldt(cpu_env, arg1, (void *)arg2, arg3)); break; #endif case TARGET_NR_adjtimex: goto unimplemented; case TARGET_NR_mprotect: ret = get_errno(mprotect((void *)arg1, arg2, arg3)); break; case TARGET_NR_sigprocmask: { int how = arg1; sigset_t set, oldset, *set_ptr; target_ulong *pset = (void *)arg2, *poldset = (void *)arg3; switch(how) { case TARGET_SIG_BLOCK: how = SIG_BLOCK; break; case TARGET_SIG_UNBLOCK: how = SIG_UNBLOCK; break; case TARGET_SIG_SETMASK: how = SIG_SETMASK; break; default: ret = -EINVAL; goto fail; } if (pset) { target_to_host_old_sigset(&set, pset); set_ptr = &set; } else { set_ptr = NULL; } ret = get_errno(sigprocmask(arg1, set_ptr, &oldset)); if (!is_error(ret) && poldset) { host_to_target_old_sigset(poldset, &oldset); } } break; case TARGET_NR_create_module: case TARGET_NR_init_module: case TARGET_NR_delete_module: case TARGET_NR_get_kernel_syms: goto unimplemented; case TARGET_NR_quotactl: goto unimplemented; case TARGET_NR_getpgid: ret = get_errno(getpgid(arg1)); break; case TARGET_NR_fchdir: ret = get_errno(fchdir(arg1)); break; case TARGET_NR_bdflush: goto unimplemented; case TARGET_NR_sysfs: goto unimplemented; case TARGET_NR_personality: ret = get_errno(mprotect((void *)arg1, arg2, arg3)); break; case TARGET_NR_afs_syscall: goto unimplemented; case TARGET_NR_setfsuid: goto unimplemented; case TARGET_NR_setfsgid: goto unimplemented; case TARGET_NR__llseek: { int64_t res; ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5)); *(int64_t *)arg4 = tswap64(res); } break; case TARGET_NR_getdents: #if TARGET_LONG_SIZE != 4 #error not supported #endif { struct dirent *dirp = (void *)arg2; long count = arg3; ret = get_errno(sys_getdents(arg1, dirp, count)); if (!is_error(ret)) { struct dirent *de; int len = ret; int reclen; de = dirp; while (len > 0) { reclen = tswap16(de->d_reclen); if (reclen > len) break; de->d_reclen = reclen; tswapls(&de->d_ino); tswapls(&de->d_off); de = (struct dirent *)((char *)de + reclen); len -= reclen; } } } break; case TARGET_NR__newselect: ret = do_select(arg1, (void *)arg2, (void *)arg3, (void *)arg4, (void *)arg5); break; case TARGET_NR_flock: goto unimplemented; case TARGET_NR_msync: ret = get_errno(msync((void *)arg1, arg2, arg3)); break; case TARGET_NR_readv: { int count = arg3; int i; struct iovec *vec; struct target_iovec *target_vec = (void *)arg2; vec = alloca(count * sizeof(struct iovec)); for(i = 0;i < count; i++) { vec[i].iov_base = (void *)tswapl(target_vec[i].iov_base); vec[i].iov_len = tswapl(target_vec[i].iov_len); } ret = get_errno(readv(arg1, vec, count)); } break; case TARGET_NR_writev: { int count = arg3; int i; struct iovec *vec; struct target_iovec *target_vec = (void *)arg2; vec = alloca(count * sizeof(struct iovec)); for(i = 0;i < count; i++) { vec[i].iov_base = (void *)tswapl(target_vec[i].iov_base); vec[i].iov_len = tswapl(target_vec[i].iov_len); } ret = get_errno(writev(arg1, vec, count)); } break; case TARGET_NR_getsid: ret = get_errno(getsid(arg1)); break; case TARGET_NR_fdatasync: goto unimplemented; case TARGET_NR__sysctl: goto unimplemented; case TARGET_NR_mlock: ret = get_errno(mlock((void *)arg1, arg2)); break; case TARGET_NR_munlock: ret = get_errno(munlock((void *)arg1, arg2)); break; case TARGET_NR_mlockall: ret = get_errno(mlockall(arg1)); break; case TARGET_NR_munlockall: ret = get_errno(munlockall()); break; case TARGET_NR_sched_setparam: goto unimplemented; case TARGET_NR_sched_getparam: goto unimplemented; case TARGET_NR_sched_setscheduler: goto unimplemented; case TARGET_NR_sched_getscheduler: goto unimplemented; case TARGET_NR_sched_yield: ret = get_errno(sched_yield()); break; case TARGET_NR_sched_get_priority_max: case TARGET_NR_sched_get_priority_min: case TARGET_NR_sched_rr_get_interval: case TARGET_NR_nanosleep: case TARGET_NR_mremap: case TARGET_NR_setresuid: case TARGET_NR_getresuid: case TARGET_NR_vm86: case TARGET_NR_query_module: case TARGET_NR_poll: case TARGET_NR_nfsservctl: case TARGET_NR_setresgid: case TARGET_NR_getresgid: case TARGET_NR_prctl: case TARGET_NR_rt_sigreturn: case TARGET_NR_rt_sigaction: case TARGET_NR_rt_sigprocmask: case TARGET_NR_rt_sigpending: case TARGET_NR_rt_sigtimedwait: case TARGET_NR_rt_sigqueueinfo: case TARGET_NR_rt_sigsuspend: case TARGET_NR_pread: case TARGET_NR_pwrite: goto unimplemented; case TARGET_NR_chown: ret = get_errno(chown((const char *)arg1, arg2, arg3)); break; case TARGET_NR_getcwd: ret = get_errno(sys_getcwd1((char *)arg1, arg2)); break; case TARGET_NR_capget: case TARGET_NR_capset: case TARGET_NR_sigaltstack: case TARGET_NR_sendfile: case TARGET_NR_getpmsg: case TARGET_NR_putpmsg: case TARGET_NR_vfork: ret = get_errno(vfork()); break; case TARGET_NR_ugetrlimit: case TARGET_NR_truncate64: case TARGET_NR_ftruncate64: case TARGET_NR_stat64: case TARGET_NR_lstat64: case TARGET_NR_fstat64: case TARGET_NR_lchown32: case TARGET_NR_getuid32: case TARGET_NR_getgid32: case TARGET_NR_geteuid32: case TARGET_NR_getegid32: case TARGET_NR_setreuid32: case TARGET_NR_setregid32: case TARGET_NR_getgroups32: case TARGET_NR_setgroups32: case TARGET_NR_fchown32: case TARGET_NR_setresuid32: case TARGET_NR_getresuid32: case TARGET_NR_setresgid32: case TARGET_NR_getresgid32: case TARGET_NR_chown32: case TARGET_NR_setuid32: case TARGET_NR_setgid32: case TARGET_NR_setfsuid32: case TARGET_NR_setfsgid32: case TARGET_NR_pivot_root: case TARGET_NR_mincore: case TARGET_NR_madvise: case TARGET_NR_getdents64: case TARGET_NR_fcntl64: case TARGET_NR_security: goto unimplemented; case TARGET_NR_gettid: ret = get_errno(gettid()); break; case TARGET_NR_readahead: case TARGET_NR_setxattr: case TARGET_NR_lsetxattr: case TARGET_NR_fsetxattr: case TARGET_NR_getxattr: case TARGET_NR_lgetxattr: case TARGET_NR_fgetxattr: case TARGET_NR_listxattr: case TARGET_NR_llistxattr: case TARGET_NR_flistxattr: case TARGET_NR_removexattr: case TARGET_NR_lremovexattr: case TARGET_NR_fremovexattr: goto unimplemented; default: unimplemented: gemu_log(\"Unsupported syscall: %d\\n\", num); ret = -ENOSYS; break; } fail: return ret; }"} {"target": 1, "idx": 14300, "func": "static int rebuild_refcount_structure(BlockDriverState *bs, BdrvCheckResult *res, void **refcount_table, int64_t *nb_clusters) { BDRVQcow2State *s = bs->opaque; int64_t first_free_cluster = 0, reftable_offset = -1, cluster = 0; int64_t refblock_offset, refblock_start, refblock_index; uint32_t reftable_size = 0; uint64_t *on_disk_reftable = NULL; void *on_disk_refblock; int ret = 0; struct { uint64_t reftable_offset; uint32_t reftable_clusters; } QEMU_PACKED reftable_offset_and_clusters; qcow2_cache_empty(bs, s->refcount_block_cache); write_refblocks: for (; cluster < *nb_clusters; cluster++) { if (!s->get_refcount(*refcount_table, cluster)) { continue; } refblock_index = cluster >> s->refcount_block_bits; refblock_start = refblock_index << s->refcount_block_bits; /* Don't allocate a cluster in a refblock already written to disk */ if (first_free_cluster < refblock_start) { first_free_cluster = refblock_start; } refblock_offset = alloc_clusters_imrt(bs, 1, refcount_table, nb_clusters, &first_free_cluster); if (refblock_offset < 0) { fprintf(stderr, \"ERROR allocating refblock: %s\\n\", strerror(-refblock_offset)); res->check_errors++; ret = refblock_offset; goto fail; } if (reftable_size <= refblock_index) { uint32_t old_reftable_size = reftable_size; uint64_t *new_on_disk_reftable; reftable_size = ROUND_UP((refblock_index + 1) * sizeof(uint64_t), s->cluster_size) / sizeof(uint64_t); new_on_disk_reftable = g_try_realloc(on_disk_reftable, reftable_size * sizeof(uint64_t)); if (!new_on_disk_reftable) { res->check_errors++; ret = -ENOMEM; goto fail; } on_disk_reftable = new_on_disk_reftable; memset(on_disk_reftable + old_reftable_size, 0, (reftable_size - old_reftable_size) * sizeof(uint64_t)); /* The offset we have for the reftable is now no longer valid; * this will leak that range, but we can easily fix that by running * a leak-fixing check after this rebuild operation */ reftable_offset = -1; } on_disk_reftable[refblock_index] = refblock_offset; /* If this is apparently the last refblock (for now), try to squeeze the * reftable in */ if (refblock_index == (*nb_clusters - 1) >> s->refcount_block_bits && reftable_offset < 0) { uint64_t reftable_clusters = size_to_clusters(s, reftable_size * sizeof(uint64_t)); reftable_offset = alloc_clusters_imrt(bs, reftable_clusters, refcount_table, nb_clusters, &first_free_cluster); if (reftable_offset < 0) { fprintf(stderr, \"ERROR allocating reftable: %s\\n\", strerror(-reftable_offset)); res->check_errors++; ret = reftable_offset; goto fail; } } ret = qcow2_pre_write_overlap_check(bs, 0, refblock_offset, s->cluster_size); if (ret < 0) { fprintf(stderr, \"ERROR writing refblock: %s\\n\", strerror(-ret)); goto fail; } /* The size of *refcount_table is always cluster-aligned, therefore the * write operation will not overflow */ on_disk_refblock = (void *)((char *) *refcount_table + refblock_index * s->cluster_size); ret = bdrv_write(bs->file, refblock_offset / BDRV_SECTOR_SIZE, on_disk_refblock, s->cluster_sectors); if (ret < 0) { fprintf(stderr, \"ERROR writing refblock: %s\\n\", strerror(-ret)); goto fail; } /* Go to the end of this refblock */ cluster = refblock_start + s->refcount_block_size - 1; } if (reftable_offset < 0) { uint64_t post_refblock_start, reftable_clusters; post_refblock_start = ROUND_UP(*nb_clusters, s->refcount_block_size); reftable_clusters = size_to_clusters(s, reftable_size * sizeof(uint64_t)); /* Not pretty but simple */ if (first_free_cluster < post_refblock_start) { first_free_cluster = post_refblock_start; } reftable_offset = alloc_clusters_imrt(bs, reftable_clusters, refcount_table, nb_clusters, &first_free_cluster); if (reftable_offset < 0) { fprintf(stderr, \"ERROR allocating reftable: %s\\n\", strerror(-reftable_offset)); res->check_errors++; ret = reftable_offset; goto fail; } goto write_refblocks; } assert(on_disk_reftable); for (refblock_index = 0; refblock_index < reftable_size; refblock_index++) { cpu_to_be64s(&on_disk_reftable[refblock_index]); } ret = qcow2_pre_write_overlap_check(bs, 0, reftable_offset, reftable_size * sizeof(uint64_t)); if (ret < 0) { fprintf(stderr, \"ERROR writing reftable: %s\\n\", strerror(-ret)); goto fail; } assert(reftable_size < INT_MAX / sizeof(uint64_t)); ret = bdrv_pwrite(bs->file, reftable_offset, on_disk_reftable, reftable_size * sizeof(uint64_t)); if (ret < 0) { fprintf(stderr, \"ERROR writing reftable: %s\\n\", strerror(-ret)); goto fail; } /* Enter new reftable into the image header */ reftable_offset_and_clusters.reftable_offset = cpu_to_be64(reftable_offset); reftable_offset_and_clusters.reftable_clusters = cpu_to_be32(size_to_clusters(s, reftable_size * sizeof(uint64_t))); ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, refcount_table_offset), &reftable_offset_and_clusters, sizeof(reftable_offset_and_clusters)); if (ret < 0) { fprintf(stderr, \"ERROR setting reftable: %s\\n\", strerror(-ret)); goto fail; } for (refblock_index = 0; refblock_index < reftable_size; refblock_index++) { be64_to_cpus(&on_disk_reftable[refblock_index]); } s->refcount_table = on_disk_reftable; s->refcount_table_offset = reftable_offset; s->refcount_table_size = reftable_size; update_max_refcount_table_index(s); return 0; fail: g_free(on_disk_reftable); return ret; }"} {"target": 0, "idx": 14306, "func": "static int synchronize_audio(VideoState *is, short *samples, int samples_size1, double pts) { int n, samples_size; double ref_clock; n = 2 * is->audio_st->codec->channels; samples_size = samples_size1; /* if not master, then we try to remove or add samples to correct the clock */ if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) || is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) { double diff, avg_diff; int wanted_size, min_size, max_size, nb_samples; ref_clock = get_master_clock(is); diff = get_audio_clock(is) - ref_clock; if (diff < AV_NOSYNC_THRESHOLD) { is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum; if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) { /* not enough measures to have a correct estimate */ is->audio_diff_avg_count++; } else { /* estimate the A-V difference */ avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef); if (fabs(avg_diff) >= is->audio_diff_threshold) { wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n); nb_samples = samples_size / n; min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n; max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n; if (wanted_size < min_size) wanted_size = min_size; else if (wanted_size > max_size) wanted_size = max_size; /* add or remove samples to correction the synchro */ if (wanted_size < samples_size) { /* remove samples */ samples_size = wanted_size; } else if (wanted_size > samples_size) { uint8_t *samples_end, *q; int nb; /* add samples */ nb = (samples_size - wanted_size); samples_end = (uint8_t *)samples + samples_size - n; q = samples_end + n; while (nb > 0) { memcpy(q, samples_end, n); q += n; nb -= n; } samples_size = wanted_size; } } av_dlog(NULL, \"diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\\n\", diff, avg_diff, samples_size - samples_size1, is->audio_clock, is->video_clock, is->audio_diff_threshold); } } else { /* too big difference : may be initial PTS errors, so reset A-V filter */ is->audio_diff_avg_count = 0; is->audio_diff_cum = 0; } } return samples_size; }"} {"target": 1, "idx": 14308, "func": "av_cold int ff_ivi_init_planes(IVIPlaneDesc *planes, const IVIPicConfig *cfg, int is_indeo4) { int p, b; uint32_t b_width, b_height, align_fac, width_aligned, height_aligned, buf_size; IVIBandDesc *band; ivi_free_buffers(planes); if (av_image_check_size(cfg->pic_width, cfg->pic_height, 0, NULL) < 0 || cfg->luma_bands < 1 || cfg->chroma_bands < 1) return AVERROR_INVALIDDATA; /* fill in the descriptor of the luminance plane */ planes[0].width = cfg->pic_width; planes[0].height = cfg->pic_height; planes[0].num_bands = cfg->luma_bands; /* fill in the descriptors of the chrominance planes */ planes[1].width = planes[2].width = (cfg->pic_width + 3) >> 2; planes[1].height = planes[2].height = (cfg->pic_height + 3) >> 2; planes[1].num_bands = planes[2].num_bands = cfg->chroma_bands; for (p = 0; p < 3; p++) { planes[p].bands = av_mallocz_array(planes[p].num_bands, sizeof(IVIBandDesc)); if (!planes[p].bands) return AVERROR(ENOMEM); /* select band dimensions: if there is only one band then it * has the full size, if there are several bands each of them * has only half size */ b_width = planes[p].num_bands == 1 ? planes[p].width : (planes[p].width + 1) >> 1; b_height = planes[p].num_bands == 1 ? planes[p].height : (planes[p].height + 1) >> 1; /* luma band buffers will be aligned on 16x16 (max macroblock size) */ /* chroma band buffers will be aligned on 8x8 (max macroblock size) */ align_fac = p ? 8 : 16; width_aligned = FFALIGN(b_width , align_fac); height_aligned = FFALIGN(b_height, align_fac); buf_size = width_aligned * height_aligned * sizeof(int16_t); for (b = 0; b < planes[p].num_bands; b++) { band = &planes[p].bands[b]; /* select appropriate plane/band */ band->plane = p; band->band_num = b; band->width = b_width; band->height = b_height; band->pitch = width_aligned; band->aheight = height_aligned; band->bufs[0] = av_mallocz(buf_size); band->bufs[1] = av_mallocz(buf_size); band->bufsize = buf_size/2; if (!band->bufs[0] || !band->bufs[1]) return AVERROR(ENOMEM); /* allocate the 3rd band buffer for scalability mode */ if (cfg->luma_bands > 1) { band->bufs[2] = av_mallocz(buf_size); if (!band->bufs[2]) return AVERROR(ENOMEM); } if (is_indeo4) { band->bufs[3] = av_mallocz(buf_size); if (!band->bufs[3]) return AVERROR(ENOMEM); } /* reset custom vlc */ planes[p].bands[0].blk_vlc.cust_desc.num_rows = 0; } } return 0; }"} {"target": 1, "idx": 14309, "func": "static av_cold int seqvideo_decode_init(AVCodecContext *avctx) { SeqVideoContext *seq = avctx->priv_data; seq->avctx = avctx; avctx->pix_fmt = AV_PIX_FMT_PAL8; seq->frame = av_frame_alloc(); if (!seq->frame) return AVERROR(ENOMEM); return 0; }"} {"target": 1, "idx": 14324, "func": "static BlockAIOCB *bdrv_co_aio_rw_vector(BdrvChild *child, int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, BdrvRequestFlags flags, BlockCompletionFunc *cb, void *opaque, bool is_write) { Coroutine *co; BlockAIOCBCoroutine *acb; acb = qemu_aio_get(&bdrv_em_co_aiocb_info, child->bs, cb, opaque); acb->child = child; acb->need_bh = true; acb->req.error = -EINPROGRESS; acb->req.sector = sector_num; acb->req.nb_sectors = nb_sectors; acb->req.qiov = qiov; acb->req.flags = flags; acb->is_write = is_write; co = qemu_coroutine_create(bdrv_co_do_rw); qemu_coroutine_enter(co, acb); bdrv_co_maybe_schedule_bh(acb); return &acb->common; }"} {"target": 1, "idx": 14350, "func": "static int standard_decode_picture_secondary_header(VC9Context *v) { GetBitContext *gb = &v->s.gb; int status = 0, index; switch (v->s.pict_type) { case P_TYPE: status = decode_p_picture_secondary_header(v); break; case B_TYPE: status = decode_b_picture_secondary_header(v); break; case BI_TYPE: case I_TYPE: break; //Nothing needed as it's done in the epilog } if (status < 0) return FRAME_SKIPED; /* AC Syntax */ v->ac_table_level = decode012(gb); if (v->s.pict_type == I_TYPE || v->s.pict_type == BI_TYPE) { v->ac2_table_level = decode012(gb); } /* DC Syntax */ index = decode012(gb); v->luma_dc_vlc = &ff_msmp4_dc_luma_vlc[index]; v->chroma_dc_vlc = &ff_msmp4_dc_chroma_vlc[index]; return 0; }"} {"target": 1, "idx": 14356, "func": "static void ccw_machine_class_init(ObjectClass *oc, void *data) { MachineClass *mc = MACHINE_CLASS(oc); NMIClass *nc = NMI_CLASS(oc); HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc); S390CcwMachineClass *s390mc = S390_MACHINE_CLASS(mc); s390mc->ri_allowed = true; s390mc->cpu_model_allowed = true; s390mc->css_migration_enabled = true; s390mc->gs_allowed = true; mc->init = ccw_init; mc->reset = s390_machine_reset; mc->hot_add_cpu = s390_hot_add_cpu; mc->block_default_type = IF_VIRTIO; mc->no_cdrom = 1; mc->no_floppy = 1; mc->no_serial = 1; mc->no_parallel = 1; mc->no_sdcard = 1; mc->use_sclp = 1; mc->max_cpus = S390_MAX_CPUS; mc->has_hotpluggable_cpus = true; mc->get_hotplug_handler = s390_get_hotplug_handler; mc->cpu_index_to_instance_props = s390_cpu_index_to_props; mc->possible_cpu_arch_ids = s390_possible_cpu_arch_ids; /* it is overridden with 'host' cpu *in kvm_arch_init* */ mc->default_cpu_type = S390_CPU_TYPE_NAME(\"qemu\"); hc->plug = s390_machine_device_plug; hc->unplug_request = s390_machine_device_unplug_request; nc->nmi_monitor_handler = s390_nmi; }"} {"target": 1, "idx": 14359, "func": "static int write_cvid_header(CinepakEncContext *s, unsigned char *buf, int num_strips, int data_size) { buf[0] = 0; AV_WB24(&buf[1], data_size + CVID_HEADER_SIZE); AV_WB16(&buf[4], s->w); AV_WB16(&buf[6], s->h); AV_WB16(&buf[8], num_strips); return CVID_HEADER_SIZE; }"} {"target": 1, "idx": 14362, "func": "static av_cold int mpeg4video_parse_init(AVCodecParserContext *s) { ParseContext1 *pc = s->priv_data; pc->enc = av_mallocz(sizeof(MpegEncContext)); if (!pc->enc) return -1; pc->first_picture = 1; return 0; }"} {"target": 1, "idx": 14366, "func": "const AVOption *av_opt_next(void *obj, const AVOption *last) { AVClass *class = *(AVClass**)obj; if (!last && class->option[0].name) return class->option; if (last && last[1].name) return ++last; return NULL; }"} {"target": 1, "idx": 14372, "func": "static void do_audio_out(AVFormatContext *s, OutputStream *ost, InputStream *ist, AVFrame *decoded_frame) { uint8_t *buftmp; int size_out, frame_bytes, resample_changed, ret; AVCodecContext *enc = ost->st->codec; AVCodecContext *dec = ist->st->codec; int osize = av_get_bytes_per_sample(enc->sample_fmt); int isize = av_get_bytes_per_sample(dec->sample_fmt); uint8_t *buf = decoded_frame->data[0]; int size = decoded_frame->nb_samples * dec->channels * isize; int out_linesize = 0; int buf_linesize = decoded_frame->linesize[0]; get_default_channel_layouts(ost, ist); if (alloc_audio_output_buf(dec, enc, decoded_frame->nb_samples, &out_linesize) < 0) { av_log(NULL, AV_LOG_FATAL, \"Error allocating audio buffer\\n\"); exit_program(1); } if (audio_sync_method > 1 || enc->channels != dec->channels || enc->channel_layout != dec->channel_layout || enc->sample_rate != dec->sample_rate || dec->sample_fmt != enc->sample_fmt) ost->audio_resample = 1; resample_changed = ost->resample_sample_fmt != dec->sample_fmt || ost->resample_channels != dec->channels || ost->resample_channel_layout != dec->channel_layout || ost->resample_sample_rate != dec->sample_rate; if ((ost->audio_resample && !ost->avr) || resample_changed) { if (resample_changed) { av_log(NULL, AV_LOG_INFO, \"Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:0x%\"PRIx64\" to rate:%d fmt:%s ch:%d chl:0x%\"PRIx64\"\\n\", ist->file_index, ist->st->index, ost->resample_sample_rate, av_get_sample_fmt_name(ost->resample_sample_fmt), ost->resample_channels, ost->resample_channel_layout, dec->sample_rate, av_get_sample_fmt_name(dec->sample_fmt), dec->channels, dec->channel_layout); ost->resample_sample_fmt = dec->sample_fmt; ost->resample_channels = dec->channels; ost->resample_channel_layout = dec->channel_layout; ost->resample_sample_rate = dec->sample_rate; if (ost->avr) avresample_close(ost->avr); } /* if audio_sync_method is >1 the resampler is needed for audio drift compensation */ if (audio_sync_method <= 1 && ost->resample_sample_fmt == enc->sample_fmt && ost->resample_channels == enc->channels && ost->resample_channel_layout == enc->channel_layout && ost->resample_sample_rate == enc->sample_rate) { ost->audio_resample = 0; } else if (ost->audio_resample) { if (!ost->avr) { ost->avr = avresample_alloc_context(); if (!ost->avr) { av_log(NULL, AV_LOG_FATAL, \"Error allocating context for libavresample\\n\"); exit_program(1); } } av_opt_set_int(ost->avr, \"in_channel_layout\", dec->channel_layout, 0); av_opt_set_int(ost->avr, \"in_sample_fmt\", dec->sample_fmt, 0); av_opt_set_int(ost->avr, \"in_sample_rate\", dec->sample_rate, 0); av_opt_set_int(ost->avr, \"out_channel_layout\", enc->channel_layout, 0); av_opt_set_int(ost->avr, \"out_sample_fmt\", enc->sample_fmt, 0); av_opt_set_int(ost->avr, \"out_sample_rate\", enc->sample_rate, 0); if (audio_sync_method > 1) av_opt_set_int(ost->avr, \"force_resampling\", 1, 0); /* if both the input and output formats are s16 or u8, use s16 as the internal sample format */ if (av_get_bytes_per_sample(dec->sample_fmt) <= 2 && av_get_bytes_per_sample(enc->sample_fmt) <= 2) { av_opt_set_int(ost->avr, \"internal_sample_fmt\", AV_SAMPLE_FMT_S16P, 0); } ret = avresample_open(ost->avr); if (ret < 0) { av_log(NULL, AV_LOG_FATAL, \"Error opening libavresample\\n\"); exit_program(1); } } } if (audio_sync_method > 0) { double delta = get_sync_ipts(ost, ist->last_dts) * enc->sample_rate - ost->sync_opts - av_fifo_size(ost->fifo) / (enc->channels * osize); int idelta = delta * dec->sample_rate / enc->sample_rate; int byte_delta = idelta * isize * dec->channels; // FIXME resample delay if (fabs(delta) > 50) { if (ist->is_start || fabs(delta) > audio_drift_threshold*enc->sample_rate) { if (byte_delta < 0) { byte_delta = FFMAX(byte_delta, -size); size += byte_delta; buf -= byte_delta; av_log(NULL, AV_LOG_VERBOSE, \"discarding %d audio samples\\n\", -byte_delta / (isize * dec->channels)); if (!size) return; ist->is_start = 0; } else { av_fast_malloc(&async_buf, &allocated_async_buf_size, byte_delta + size); if (!async_buf) { av_log(NULL, AV_LOG_FATAL, \"Out of memory in do_audio_out\\n\"); exit_program(1); } if (alloc_audio_output_buf(dec, enc, decoded_frame->nb_samples + idelta, &out_linesize) < 0) { av_log(NULL, AV_LOG_FATAL, \"Error allocating audio buffer\\n\"); exit_program(1); } ist->is_start = 0; generate_silence(async_buf, dec->sample_fmt, byte_delta); memcpy(async_buf + byte_delta, buf, size); buf = async_buf; size += byte_delta; buf_linesize = allocated_async_buf_size; av_log(NULL, AV_LOG_VERBOSE, \"adding %d audio samples of silence\\n\", idelta); } } else if (audio_sync_method > 1) { int comp = av_clip(delta, -audio_sync_method, audio_sync_method); av_log(NULL, AV_LOG_VERBOSE, \"compensating audio timestamp drift:%f compensation:%d in:%d\\n\", delta, comp, enc->sample_rate); // fprintf(stderr, \"drift:%f len:%d opts:%\"PRId64\" ipts:%\"PRId64\" fifo:%d\\n\", delta, -1, ost->sync_opts, (int64_t)(get_sync_ipts(ost) * enc->sample_rate), av_fifo_size(ost->fifo)/(ost->st->codec->channels * 2)); avresample_set_compensation(ost->avr, comp, enc->sample_rate); } } } else if (audio_sync_method == 0) ost->sync_opts = lrintf(get_sync_ipts(ost, ist->last_dts) * enc->sample_rate) - av_fifo_size(ost->fifo) / (enc->channels * osize); // FIXME wrong if (ost->audio_resample) { buftmp = audio_buf; size_out = avresample_convert(ost->avr, (void **)&buftmp, allocated_audio_buf_size, out_linesize, (void **)&buf, buf_linesize, size / (dec->channels * isize)); size_out = size_out * enc->channels * osize; } else { buftmp = buf; size_out = size; } /* now encode as many frames as possible */ if (!(enc->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE)) { /* output resampled raw samples */ if (av_fifo_realloc2(ost->fifo, av_fifo_size(ost->fifo) + size_out) < 0) { av_log(NULL, AV_LOG_FATAL, \"av_fifo_realloc2() failed\\n\"); exit_program(1); } av_fifo_generic_write(ost->fifo, buftmp, size_out, NULL); frame_bytes = enc->frame_size * osize * enc->channels; while (av_fifo_size(ost->fifo) >= frame_bytes) { av_fifo_generic_read(ost->fifo, audio_buf, frame_bytes, NULL); encode_audio_frame(s, ost, audio_buf, frame_bytes); } } else { encode_audio_frame(s, ost, buftmp, size_out); } }"} {"target": 1, "idx": 14376, "func": "static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags) { switch (shiftop) { case 0: /* LSL */ if (shift != 0) { if (flags) shifter_out_im(var, 32 - shift); tcg_gen_shli_i32(var, var, shift); } break; case 1: /* LSR */ if (shift == 0) { if (flags) { tcg_gen_shri_i32(var, var, 31); gen_set_CF(var); } tcg_gen_movi_i32(var, 0); } else { if (flags) shifter_out_im(var, shift - 1); tcg_gen_shri_i32(var, var, shift); } break; case 2: /* ASR */ if (shift == 0) shift = 32; if (flags) shifter_out_im(var, shift - 1); if (shift == 32) shift = 31; tcg_gen_sari_i32(var, var, shift); break; case 3: /* ROR/RRX */ if (shift != 0) { if (flags) shifter_out_im(var, shift - 1); tcg_gen_rotri_i32(var, var, shift); break; } else { TCGv tmp = load_cpu_field(CF); if (flags) shifter_out_im(var, 0); tcg_gen_shri_i32(var, var, 1); tcg_gen_shli_i32(tmp, tmp, 31); tcg_gen_or_i32(var, var, tmp); dead_tmp(tmp); } } };"} {"target": 1, "idx": 14379, "func": "static void vmxnet3_update_vlan_filters(VMXNET3State *s) { int i; /* Copy configuration from shared memory */ VMXNET3_READ_DRV_SHARED(s->drv_shmem, devRead.rxFilterConf.vfTable, s->vlan_table, sizeof(s->vlan_table)); /* Invert byte order when needed */ for (i = 0; i < ARRAY_SIZE(s->vlan_table); i++) { s->vlan_table[i] = le32_to_cpu(s->vlan_table[i]); } /* Dump configuration for debugging purposes */ VMW_CFPRN(\"Configured VLANs:\"); for (i = 0; i < sizeof(s->vlan_table) * 8; i++) { if (VMXNET3_VFTABLE_ENTRY_IS_SET(s->vlan_table, i)) { VMW_CFPRN(\"\\tVLAN %d is present\", i); } } }"} {"target": 0, "idx": 14387, "func": "static void arm_timer_recalibrate(arm_timer_state *s, int reload) { uint32_t limit; if ((s->control & TIMER_CTRL_PERIODIC) == 0) { /* Free running. */ if (s->control & TIMER_CTRL_32BIT) limit = 0xffffffff; else limit = 0xffff; } else { /* Periodic. */ limit = s->limit; } ptimer_set_limit(s->timer, limit, reload); }"} {"target": 0, "idx": 14388, "func": "static void term_down_char(void) { if (term_hist_entry == TERM_MAX_CMDS - 1 || term_hist_entry == -1) return; if (term_history[++term_hist_entry] != NULL) { pstrcpy(term_cmd_buf, sizeof(term_cmd_buf), term_history[term_hist_entry]); } else { term_hist_entry = -1; } term_cmd_buf_index = term_cmd_buf_size = strlen(term_cmd_buf); }"} {"target": 0, "idx": 14393, "func": "int kvm_log_start(target_phys_addr_t phys_addr, ram_addr_t size) { return kvm_dirty_pages_log_change(phys_addr, size, KVM_MEM_LOG_DIRTY_PAGES, KVM_MEM_LOG_DIRTY_PAGES); }"} {"target": 0, "idx": 14413, "func": "static void do_log(int argc, const char **argv) { int mask; if (argc != 2) goto help; if (!strcmp(argv[1], \"none\")) { mask = 0; } else { mask = cpu_str_to_log_mask(argv[1]); if (!mask) { help: help_cmd(argv[0]); return; } } cpu_set_log(mask); }"} {"target": 0, "idx": 14442, "func": "static int hls_decode_entry(AVCodecContext *avctxt, void *isFilterThread) { HEVCContext *s = avctxt->priv_data; int ctb_size = 1 << s->sps->log2_ctb_size; int more_data = 1; int x_ctb = 0; int y_ctb = 0; int ctb_addr_ts = s->pps->ctb_addr_rs_to_ts[s->sh.slice_ctb_addr_rs]; if (!ctb_addr_ts && s->sh.dependent_slice_segment_flag) { av_log(s->avctx, AV_LOG_ERROR, \"Impossible initial tile.\\n\"); return AVERROR_INVALIDDATA; } if (s->sh.dependent_slice_segment_flag) { int prev_rs = s->pps->ctb_addr_ts_to_rs[ctb_addr_ts - 1]; if (s->tab_slice_address[prev_rs] == -1) { av_log(s->avctx, AV_LOG_ERROR, \"Previous slice segment missing\\n\"); return AVERROR_INVALIDDATA; } } while (more_data && ctb_addr_ts < s->sps->ctb_size) { int ctb_addr_rs = s->pps->ctb_addr_ts_to_rs[ctb_addr_ts]; x_ctb = (ctb_addr_rs % ((s->sps->width + ctb_size - 1) >> s->sps->log2_ctb_size)) << s->sps->log2_ctb_size; y_ctb = (ctb_addr_rs / ((s->sps->width + ctb_size - 1) >> s->sps->log2_ctb_size)) << s->sps->log2_ctb_size; hls_decode_neighbour(s, x_ctb, y_ctb, ctb_addr_ts); ff_hevc_cabac_init(s, ctb_addr_ts); hls_sao_param(s, x_ctb >> s->sps->log2_ctb_size, y_ctb >> s->sps->log2_ctb_size); s->deblock[ctb_addr_rs].beta_offset = s->sh.beta_offset; s->deblock[ctb_addr_rs].tc_offset = s->sh.tc_offset; s->filter_slice_edges[ctb_addr_rs] = s->sh.slice_loop_filter_across_slices_enabled_flag; more_data = hls_coding_quadtree(s, x_ctb, y_ctb, s->sps->log2_ctb_size, 0); if (more_data < 0) { s->tab_slice_address[ctb_addr_rs] = -1; return more_data; } ctb_addr_ts++; ff_hevc_save_states(s, ctb_addr_ts); ff_hevc_hls_filters(s, x_ctb, y_ctb, ctb_size); } if (x_ctb + ctb_size >= s->sps->width && y_ctb + ctb_size >= s->sps->height) ff_hevc_hls_filter(s, x_ctb, y_ctb); return ctb_addr_ts; }"} {"target": 1, "idx": 14459, "func": "static int decode_header(MPADecodeContext *s, UINT32 header) { int sample_rate, frame_size, mpeg25, padding; int sample_rate_index, bitrate_index; if (header & (1<<20)) { s->lsf = (header & (1<<19)) ? 0 : 1; mpeg25 = 0; } else { s->lsf = 1; mpeg25 = 1; } s->layer = 4 - ((header >> 17) & 3); /* extract frequency */ sample_rate_index = (header >> 10) & 3; sample_rate = mpa_freq_tab[sample_rate_index] >> (s->lsf + mpeg25); if (sample_rate == 0) return 1; sample_rate_index += 3 * (s->lsf + mpeg25); s->sample_rate_index = sample_rate_index; s->error_protection = ((header >> 16) & 1) ^ 1; bitrate_index = (header >> 12) & 0xf; padding = (header >> 9) & 1; //extension = (header >> 8) & 1; s->mode = (header >> 6) & 3; s->mode_ext = (header >> 4) & 3; //copyright = (header >> 3) & 1; //original = (header >> 2) & 1; //emphasis = header & 3; if (s->mode == MPA_MONO) s->nb_channels = 1; else s->nb_channels = 2; if (bitrate_index != 0) { frame_size = mpa_bitrate_tab[s->lsf][s->layer - 1][bitrate_index]; s->bit_rate = frame_size * 1000; switch(s->layer) { case 1: frame_size = (frame_size * 12000) / sample_rate; frame_size = (frame_size + padding) * 4; break; case 2: frame_size = (frame_size * 144000) / sample_rate; frame_size += padding; break; default: case 3: frame_size = (frame_size * 144000) / (sample_rate << s->lsf); frame_size += padding; break; } s->frame_size = frame_size; } else { /* if no frame size computed, signal it */ if (!s->free_format_frame_size) return 1; /* free format: compute bitrate and real frame size from the frame size we extracted by reading the bitstream */ s->frame_size = s->free_format_frame_size; switch(s->layer) { case 1: s->frame_size += padding * 4; s->bit_rate = (s->frame_size * sample_rate) / 48000; break; case 2: s->frame_size += padding; s->bit_rate = (s->frame_size * sample_rate) / 144000; break; default: case 3: s->frame_size += padding; s->bit_rate = (s->frame_size * (sample_rate << s->lsf)) / 144000; break; } } s->sample_rate = sample_rate; #if defined(DEBUG) printf(\"layer%d, %d Hz, %d kbits/s, \", s->layer, s->sample_rate, s->bit_rate); if (s->nb_channels == 2) { if (s->layer == 3) { if (s->mode_ext & MODE_EXT_MS_STEREO) printf(\"ms-\"); if (s->mode_ext & MODE_EXT_I_STEREO) printf(\"i-\"); } printf(\"stereo\"); } else { printf(\"mono\"); } printf(\"\\n\"); #endif return 0; }"} {"target": 1, "idx": 14469, "func": "static av_cold int init(AVFilterContext *ctx, const char *args) { GradFunContext *gf = ctx->priv; float thresh = 1.2; int radius = 16; if (args) sscanf(args, \"%f:%d\", &thresh, &radius); thresh = av_clipf(thresh, 0.51, 255); gf->thresh = (1 << 15) / thresh; gf->radius = av_clip((radius + 1) & ~1, 4, 32); gf->blur_line = ff_gradfun_blur_line_c; gf->filter_line = ff_gradfun_filter_line_c; if (ARCH_X86) ff_gradfun_init_x86(gf); av_log(ctx, AV_LOG_VERBOSE, \"threshold:%.2f radius:%d\\n\", thresh, gf->radius); return 0; }"} {"target": 0, "idx": 14476, "func": "static int kvm_client_migration_log(struct CPUPhysMemoryClient *client, int enable) { return kvm_set_migration_log(enable); }"} {"target": 0, "idx": 14499, "func": "static int64_t rtmp_read_seek(URLContext *s, int stream_index, int64_t timestamp, int flags) { RTMP *r = s->priv_data; if (flags & AVSEEK_FLAG_BYTE) return AVERROR(ENOSYS); /* seeks are in milliseconds */ timestamp = av_rescale(timestamp, AV_TIME_BASE, 1000); if (!RTMP_SendSeek(r, timestamp)) return -1; return timestamp; }"} {"target": 0, "idx": 14508, "func": "static av_cold int mace_decode_init(AVCodecContext * avctx) { MACEContext *ctx = avctx->priv_data; if (avctx->channels > 2) return -1; avctx->sample_fmt = AV_SAMPLE_FMT_S16; avcodec_get_frame_defaults(&ctx->frame); avctx->coded_frame = &ctx->frame; return 0; }"} {"target": 1, "idx": 14521, "func": "float16 float32_to_float16(float32 a, flag ieee STATUS_PARAM) { flag aSign; int_fast16_t aExp; uint32_t aSig; uint32_t mask; uint32_t increment; int8 roundingMode; a = float32_squash_input_denormal(a STATUS_VAR); aSig = extractFloat32Frac( a ); aExp = extractFloat32Exp( a ); aSign = extractFloat32Sign( a ); if ( aExp == 0xFF ) { if (aSig) { /* Input is a NaN */ float16 r = commonNaNToFloat16( float32ToCommonNaN( a STATUS_VAR ) STATUS_VAR ); if (!ieee) { return packFloat16(aSign, 0, 0); } return r; } /* Infinity */ if (!ieee) { float_raise(float_flag_invalid STATUS_VAR); return packFloat16(aSign, 0x1f, 0x3ff); } return packFloat16(aSign, 0x1f, 0); } if (aExp == 0 && aSig == 0) { return packFloat16(aSign, 0, 0); } /* Decimal point between bits 22 and 23. */ aSig |= 0x00800000; aExp -= 0x7f; if (aExp < -14) { mask = 0x00ffffff; if (aExp >= -24) { mask >>= 25 + aExp; } } else { mask = 0x00001fff; } if (aSig & mask) { float_raise( float_flag_underflow STATUS_VAR ); roundingMode = STATUS(float_rounding_mode); switch (roundingMode) { case float_round_nearest_even: increment = (mask + 1) >> 1; if ((aSig & mask) == increment) { increment = aSig & (increment << 1); } break; case float_round_up: increment = aSign ? 0 : mask; break; case float_round_down: increment = aSign ? mask : 0; break; default: /* round_to_zero */ increment = 0; break; } aSig += increment; if (aSig >= 0x01000000) { aSig >>= 1; aExp++; } } else if (aExp < -14 && STATUS(float_detect_tininess) == float_tininess_before_rounding) { float_raise( float_flag_underflow STATUS_VAR); } if (ieee) { if (aExp > 15) { float_raise( float_flag_overflow | float_flag_inexact STATUS_VAR); return packFloat16(aSign, 0x1f, 0); } } else { if (aExp > 16) { float_raise(float_flag_invalid | float_flag_inexact STATUS_VAR); return packFloat16(aSign, 0x1f, 0x3ff); } } if (aExp < -24) { return packFloat16(aSign, 0, 0); } if (aExp < -14) { aSig >>= -14 - aExp; aExp = -14; } return packFloat16(aSign, aExp + 14, aSig >> 13); }"} {"target": 0, "idx": 14531, "func": "int virtqueue_avail_bytes(VirtQueue *vq, int in_bytes, int out_bytes) { unsigned int idx; unsigned int total_bufs, in_total, out_total; idx = vq->last_avail_idx; total_bufs = in_total = out_total = 0; while (virtqueue_num_heads(vq, idx)) { unsigned int max, num_bufs, indirect = 0; target_phys_addr_t desc_pa; int i; max = vq->vring.num; num_bufs = total_bufs; i = virtqueue_get_head(vq, idx++); desc_pa = vq->vring.desc; if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_INDIRECT) { if (vring_desc_len(desc_pa, i) % sizeof(VRingDesc)) { error_report(\"Invalid size for indirect buffer table\"); exit(1); } /* If we've got too many, that implies a descriptor loop. */ if (num_bufs >= max) { error_report(\"Looped descriptor\"); exit(1); } /* loop over the indirect descriptor table */ indirect = 1; max = vring_desc_len(desc_pa, i) / sizeof(VRingDesc); num_bufs = i = 0; desc_pa = vring_desc_addr(desc_pa, i); } do { /* If we've got too many, that implies a descriptor loop. */ if (++num_bufs > max) { error_report(\"Looped descriptor\"); exit(1); } if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_WRITE) { if (in_bytes > 0 && (in_total += vring_desc_len(desc_pa, i)) >= in_bytes) return 1; } else { if (out_bytes > 0 && (out_total += vring_desc_len(desc_pa, i)) >= out_bytes) return 1; } } while ((i = virtqueue_next_desc(desc_pa, i, max)) != max); if (!indirect) total_bufs = num_bufs; else total_bufs++; } return 0; }"} {"target": 0, "idx": 14557, "func": "static CharDriverState *qemu_chr_open_fd(int fd_in, int fd_out) { CharDriverState *chr; FDCharDriver *s; chr = g_malloc0(sizeof(CharDriverState)); s = g_malloc0(sizeof(FDCharDriver)); s->fd_in = io_channel_from_fd(fd_in); s->fd_out = io_channel_from_fd(fd_out); fcntl(fd_out, F_SETFL, O_NONBLOCK); s->chr = chr; chr->opaque = s; chr->chr_add_watch = fd_chr_add_watch; chr->chr_write = fd_chr_write; chr->chr_update_read_handler = fd_chr_update_read_handler; chr->chr_close = fd_chr_close; qemu_chr_be_generic_open(chr); return chr; }"} {"target": 0, "idx": 14566, "func": "static int g722_encode_frame(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr) { G722Context *c = avctx->priv_data; const int16_t *samples = (const int16_t *)frame->data[0]; int nb_samples, out_size, ret; out_size = (frame->nb_samples + 1) / 2; if ((ret = ff_alloc_packet2(avctx, avpkt, out_size))) return ret; nb_samples = frame->nb_samples - (frame->nb_samples & 1); if (avctx->trellis) g722_encode_trellis(c, avctx->trellis, avpkt->data, nb_samples, samples); else g722_encode_no_trellis(c, avpkt->data, nb_samples, samples); /* handle last frame with odd frame_size */ if (nb_samples < frame->nb_samples) { int16_t last_samples[2] = { samples[nb_samples], samples[nb_samples] }; encode_byte(c, &avpkt->data[nb_samples >> 1], last_samples); } if (frame->pts != AV_NOPTS_VALUE) avpkt->pts = frame->pts - ff_samples_to_time_base(avctx, avctx->delay); *got_packet_ptr = 1; return 0; }"} {"target": 0, "idx": 14588, "func": "static int gif_image_write_image(AVCodecContext *avctx, uint8_t **bytestream, uint8_t *end, const uint32_t *palette, const uint8_t *buf, const int linesize, AVPacket *pkt) { GIFContext *s = avctx->priv_data; int len = 0, height = avctx->height, width = avctx->width, x, y; int x_start = 0, y_start = 0, trans = s->transparent_index; int honor_transparency = (s->flags & GF_TRANSDIFF) && s->last_frame; const uint8_t *ptr; /* Crop image */ if ((s->flags & GF_OFFSETTING) && s->last_frame && !palette) { const uint8_t *ref = s->last_frame->data[0]; const int ref_linesize = s->last_frame->linesize[0]; int x_end = avctx->width - 1, y_end = avctx->height - 1; /* skip common lines */ while (y_start < y_end) { if (memcmp(ref + y_start*ref_linesize, buf + y_start*linesize, width)) break; y_start++; } while (y_end > y_start) { if (memcmp(ref + y_end*ref_linesize, buf + y_end*linesize, width)) break; y_end--; } height = y_end + 1 - y_start; /* skip common columns */ while (x_start < x_end) { int same_column = 1; for (y = y_start; y <= y_end; y++) { if (ref[y*ref_linesize + x_start] != buf[y*linesize + x_start]) { same_column = 0; break; } } if (!same_column) break; x_start++; } while (x_end > x_start) { int same_column = 1; for (y = y_start; y <= y_end; y++) { if (ref[y*ref_linesize + x_end] != buf[y*linesize + x_end]) { same_column = 0; break; } } if (!same_column) break; x_end--; } width = x_end + 1 - x_start; av_log(avctx, AV_LOG_DEBUG,\"%dx%d image at pos (%d;%d) [area:%dx%d]\\n\", width, height, x_start, y_start, avctx->width, avctx->height); } /* image block */ bytestream_put_byte(bytestream, GIF_IMAGE_SEPARATOR); bytestream_put_le16(bytestream, x_start); bytestream_put_le16(bytestream, y_start); bytestream_put_le16(bytestream, width); bytestream_put_le16(bytestream, height); if (!palette) { bytestream_put_byte(bytestream, 0x00); /* flags */ } else { unsigned i; bytestream_put_byte(bytestream, 1<<7 | 0x7); /* flags */ for (i = 0; i < AVPALETTE_COUNT; i++) { const uint32_t v = palette[i]; bytestream_put_be24(bytestream, v); } } if (honor_transparency && trans < 0) { trans = pick_palette_entry(buf + y_start*linesize + x_start, linesize, width, height); if (trans < 0) { // TODO, patch welcome av_log(avctx, AV_LOG_DEBUG, \"No available color, can not use transparency\\n\"); } else { uint8_t *pal_exdata = s->pal_exdata; if (!pal_exdata) pal_exdata = av_packet_new_side_data(pkt, AV_PKT_DATA_PALETTE, AVPALETTE_SIZE); if (!pal_exdata) return AVERROR(ENOMEM); memcpy(pal_exdata, s->palette, AVPALETTE_SIZE); pal_exdata[trans*4 + 3*!HAVE_BIGENDIAN] = 0x00; } } if (trans < 0) honor_transparency = 0; bytestream_put_byte(bytestream, 0x08); ff_lzw_encode_init(s->lzw, s->buf, 2 * width * height, 12, FF_LZW_GIF, put_bits); ptr = buf + y_start*linesize + x_start; if (honor_transparency) { const int ref_linesize = s->last_frame->linesize[0]; const uint8_t *ref = s->last_frame->data[0] + y_start*ref_linesize + x_start; for (y = 0; y < height; y++) { memcpy(s->tmpl, ptr, width); for (x = 0; x < width; x++) if (ref[x] == ptr[x]) s->tmpl[x] = trans; len += ff_lzw_encode(s->lzw, s->tmpl, width); ptr += linesize; ref += ref_linesize; } } else { for (y = 0; y < height; y++) { len += ff_lzw_encode(s->lzw, ptr, width); ptr += linesize; } } len += ff_lzw_encode_flush(s->lzw, flush_put_bits); ptr = s->buf; while (len > 0) { int size = FFMIN(255, len); bytestream_put_byte(bytestream, size); if (end - *bytestream < size) return -1; bytestream_put_buffer(bytestream, ptr, size); ptr += size; len -= size; } bytestream_put_byte(bytestream, 0x00); /* end of image block */ return 0; }"} {"target": 0, "idx": 14589, "func": "static int openfile(char *name, int flags) { if (bs) { fprintf(stderr, \"file open already, try 'help close'\\n\"); return 1; } bs = bdrv_new(\"hda\"); if (!bs) return 1; if (bdrv_open(bs, name, flags) == -1) { fprintf(stderr, \"%s: can't open device %s\\n\", progname, name); bs = NULL; return 1; } return 0; }"} {"target": 0, "idx": 14591, "func": "static void e500_pcihost_bridge_realize(PCIDevice *d, Error **errp) { PPCE500PCIBridgeState *b = PPC_E500_PCI_BRIDGE(d); PPCE500CCSRState *ccsr = CCSR(container_get(qdev_get_machine(), \"/e500-ccsr\")); pci_config_set_class(d->config, PCI_CLASS_BRIDGE_PCI); d->config[PCI_HEADER_TYPE] = (d->config[PCI_HEADER_TYPE] & PCI_HEADER_TYPE_MULTI_FUNCTION) | PCI_HEADER_TYPE_BRIDGE; memory_region_init_alias(&b->bar0, OBJECT(ccsr), \"e500-pci-bar0\", &ccsr->ccsr_space, 0, int128_get64(ccsr->ccsr_space.size)); pci_register_bar(d, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &b->bar0); }"} {"target": 1, "idx": 14599, "func": "void ff_clear_fixed_vector(float *out, const AMRFixed *in, int size) { int i; for (i=0; i < in->n; i++) { int x = in->x[i], repeats = !((in->no_repeat_mask >> i) & 1); do { out[x] = 0.0; x += in->pitch_lag; } while (x < size && repeats); } }"} {"target": 1, "idx": 14612, "func": "static int film_read_header(AVFormatContext *s, AVFormatParameters *ap) { FilmDemuxContext *film = s->priv_data; AVIOContext *pb = s->pb; AVStream *st; unsigned char scratch[256]; int i; unsigned int data_offset; unsigned int audio_frame_counter; film->sample_table = NULL; film->stereo_buffer = NULL; film->stereo_buffer_size = 0; /* load the main FILM header */ if (avio_read(pb, scratch, 16) != 16) return AVERROR(EIO); data_offset = AV_RB32(&scratch[4]); film->version = AV_RB32(&scratch[8]); /* load the FDSC chunk */ if (film->version == 0) { /* special case for Lemmings .film files; 20-byte header */ if (avio_read(pb, scratch, 20) != 20) return AVERROR(EIO); /* make some assumptions about the audio parameters */ film->audio_type = CODEC_ID_PCM_S8; film->audio_samplerate = 22050; film->audio_channels = 1; film->audio_bits = 8; } else { /* normal Saturn .cpk files; 32-byte header */ if (avio_read(pb, scratch, 32) != 32) return AVERROR(EIO); film->audio_samplerate = AV_RB16(&scratch[24]); film->audio_channels = scratch[21]; film->audio_bits = scratch[22]; if (scratch[23] == 2) film->audio_type = CODEC_ID_ADPCM_ADX; else if (film->audio_bits == 8) film->audio_type = CODEC_ID_PCM_S8; else if (film->audio_bits == 16) film->audio_type = CODEC_ID_PCM_S16BE; else film->audio_type = CODEC_ID_NONE; } if (AV_RB32(&scratch[0]) != FDSC_TAG) return AVERROR_INVALIDDATA; if (AV_RB32(&scratch[8]) == CVID_TAG) { film->video_type = CODEC_ID_CINEPAK; } else film->video_type = CODEC_ID_NONE; /* initialize the decoder streams */ if (film->video_type) { st = av_new_stream(s, 0); if (!st) return AVERROR(ENOMEM); film->video_stream_index = st->index; st->codec->codec_type = AVMEDIA_TYPE_VIDEO; st->codec->codec_id = film->video_type; st->codec->codec_tag = 0; /* no fourcc */ st->codec->width = AV_RB32(&scratch[16]); st->codec->height = AV_RB32(&scratch[12]); } if (film->audio_type) { st = av_new_stream(s, 0); if (!st) return AVERROR(ENOMEM); film->audio_stream_index = st->index; st->codec->codec_type = AVMEDIA_TYPE_AUDIO; st->codec->codec_id = film->audio_type; st->codec->codec_tag = 1; st->codec->channels = film->audio_channels; st->codec->sample_rate = film->audio_samplerate; if (film->audio_type == CODEC_ID_ADPCM_ADX) { st->codec->bits_per_coded_sample = 18 * 8 / 32; st->codec->block_align = st->codec->channels * 18; } else { st->codec->bits_per_coded_sample = film->audio_bits; st->codec->block_align = st->codec->channels * st->codec->bits_per_coded_sample / 8; } st->codec->bit_rate = st->codec->channels * st->codec->sample_rate * st->codec->bits_per_coded_sample; } /* load the sample table */ if (avio_read(pb, scratch, 16) != 16) return AVERROR(EIO); if (AV_RB32(&scratch[0]) != STAB_TAG) return AVERROR_INVALIDDATA; film->base_clock = AV_RB32(&scratch[8]); film->sample_count = AV_RB32(&scratch[12]); if(film->sample_count >= UINT_MAX / sizeof(film_sample)) return -1; film->sample_table = av_malloc(film->sample_count * sizeof(film_sample)); if (!film->sample_table) return AVERROR(ENOMEM); for(i=0; inb_streams; i++) av_set_pts_info(s->streams[i], 33, 1, film->base_clock); audio_frame_counter = 0; for (i = 0; i < film->sample_count; i++) { /* load the next sample record and transfer it to an internal struct */ if (avio_read(pb, scratch, 16) != 16) { av_free(film->sample_table); return AVERROR(EIO); } film->sample_table[i].sample_offset = data_offset + AV_RB32(&scratch[0]); film->sample_table[i].sample_size = AV_RB32(&scratch[4]); if (AV_RB32(&scratch[8]) == 0xFFFFFFFF) { film->sample_table[i].stream = film->audio_stream_index; film->sample_table[i].pts = audio_frame_counter; film->sample_table[i].pts *= film->base_clock; film->sample_table[i].pts /= film->audio_samplerate; if (film->audio_type == CODEC_ID_ADPCM_ADX) audio_frame_counter += (film->sample_table[i].sample_size * 32 / (18 * film->audio_channels)); else audio_frame_counter += (film->sample_table[i].sample_size / (film->audio_channels * film->audio_bits / 8)); } else { film->sample_table[i].stream = film->video_stream_index; film->sample_table[i].pts = AV_RB32(&scratch[8]) & 0x7FFFFFFF; film->sample_table[i].keyframe = (scratch[8] & 0x80) ? 0 : 1; } } film->current_sample = 0; return 0; }"} {"target": 1, "idx": 14639, "func": "static int vlc_decode_block(MimicContext *ctx, int num_coeffs, int qscale) { int16_t *block = ctx->dct_block; unsigned int pos; ctx->bdsp.clear_block(block); block[0] = get_bits(&ctx->gb, 8) << 3; for (pos = 1; pos < num_coeffs; pos++) { uint32_t vlc, num_bits; int value; int coeff; vlc = get_vlc2(&ctx->gb, ctx->vlc.table, ctx->vlc.bits, 3); if (!vlc) /* end-of-block code */ return 0; if (vlc == -1) return AVERROR_INVALIDDATA; /* pos_add and num_bits are coded in the vlc code */ pos += vlc & 15; // pos_add num_bits = vlc >> 4; // num_bits if (pos >= 64) return AVERROR_INVALIDDATA; value = get_bits(&ctx->gb, num_bits); /* FFmpeg's IDCT behaves somewhat different from the original code, so * a factor of 4 was added to the input */ coeff = vlcdec_lookup[num_bits][value]; if (pos < 3) coeff *= 16; else /* TODO Use >> 10 instead of / 1001 */ coeff = (coeff * qscale) / 1001; block[ctx->scantable.permutated[pos]] = coeff; } return 0; }"} {"target": 0, "idx": 14654, "func": "static int svq3_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; MpegEncContext *const s = avctx->priv_data; H264Context *const h = avctx->priv_data; int m, mb_type; /* special case for last picture */ if (buf_size == 0) { if (s->next_picture_ptr && !s->low_delay) { *(AVFrame *) data = *(AVFrame *) &s->next_picture; s->next_picture_ptr = NULL; *data_size = sizeof(AVFrame); } return 0; } init_get_bits (&s->gb, buf, 8*buf_size); s->mb_x = s->mb_y = h->mb_xy = 0; if (svq3_decode_slice_header(h)) return -1; s->pict_type = h->slice_type; s->picture_number = h->slice_num; if (avctx->debug&FF_DEBUG_PICT_INFO){ av_log(h->s.avctx, AV_LOG_DEBUG, \"%c hpel:%d, tpel:%d aqp:%d qp:%d, slice_num:%02X\\n\", av_get_pict_type_char(s->pict_type), h->halfpel_flag, h->thirdpel_flag, s->adaptive_quant, s->qscale, h->slice_num); } /* for hurry_up == 5 */ s->current_picture.pict_type = s->pict_type; s->current_picture.key_frame = (s->pict_type == FF_I_TYPE); /* Skip B-frames if we do not have reference frames. */ if (s->last_picture_ptr == NULL && s->pict_type == FF_B_TYPE) return 0; /* Skip B-frames if we are in a hurry. */ if (avctx->hurry_up && s->pict_type == FF_B_TYPE) return 0; /* Skip everything if we are in a hurry >= 5. */ if (avctx->hurry_up >= 5) return 0; if ( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == FF_B_TYPE) ||(avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type != FF_I_TYPE) || avctx->skip_frame >= AVDISCARD_ALL) return 0; if (s->next_p_frame_damaged) { if (s->pict_type == FF_B_TYPE) return 0; else s->next_p_frame_damaged = 0; } if (frame_start(h) < 0) return -1; if (s->pict_type == FF_B_TYPE) { h->frame_num_offset = (h->slice_num - h->prev_frame_num); if (h->frame_num_offset < 0) { h->frame_num_offset += 256; } if (h->frame_num_offset == 0 || h->frame_num_offset >= h->prev_frame_num_offset) { av_log(h->s.avctx, AV_LOG_ERROR, \"error in B-frame picture id\\n\"); return -1; } } else { h->prev_frame_num = h->frame_num; h->frame_num = h->slice_num; h->prev_frame_num_offset = (h->frame_num - h->prev_frame_num); if (h->prev_frame_num_offset < 0) { h->prev_frame_num_offset += 256; } } for (m = 0; m < 2; m++){ int i; for (i = 0; i < 4; i++){ int j; for (j = -1; j < 4; j++) h->ref_cache[m][scan8[0] + 8*i + j]= 1; if (i < 3) h->ref_cache[m][scan8[0] + 8*i + j]= PART_NOT_AVAILABLE; } } for (s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) { for (s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) { h->mb_xy = s->mb_x + s->mb_y*s->mb_stride; if ( (get_bits_count(&s->gb) + 7) >= s->gb.size_in_bits && ((get_bits_count(&s->gb) & 7) == 0 || show_bits(&s->gb, (-get_bits_count(&s->gb) & 7)) == 0)) { skip_bits(&s->gb, h->next_slice_index - get_bits_count(&s->gb)); s->gb.size_in_bits = 8*buf_size; if (svq3_decode_slice_header(h)) return -1; /* TODO: support s->mb_skip_run */ } mb_type = svq3_get_ue_golomb(&s->gb); if (s->pict_type == FF_I_TYPE) { mb_type += 8; } else if (s->pict_type == FF_B_TYPE && mb_type >= 4) { mb_type += 4; } if (mb_type > 33 || svq3_decode_mb(h, mb_type)) { av_log(h->s.avctx, AV_LOG_ERROR, \"error while decoding MB %d %d\\n\", s->mb_x, s->mb_y); return -1; } if (mb_type != 0) { hl_decode_mb (h); } if (s->pict_type != FF_B_TYPE && !s->low_delay) { s->current_picture.mb_type[s->mb_x + s->mb_y*s->mb_stride] = (s->pict_type == FF_P_TYPE && mb_type < 8) ? (mb_type - 1) : -1; } } ff_draw_horiz_band(s, 16*s->mb_y, 16); } MPV_frame_end(s); if (s->pict_type == FF_B_TYPE || s->low_delay) { *(AVFrame *) data = *(AVFrame *) &s->current_picture; } else { *(AVFrame *) data = *(AVFrame *) &s->last_picture; } avctx->frame_number = s->picture_number - 1; /* Do not output the last pic after seeking. */ if (s->last_picture_ptr || s->low_delay) { *data_size = sizeof(AVFrame); } return buf_size; }"} {"target": 1, "idx": 14658, "func": "static av_cold int rl2_decode_init(AVCodecContext *avctx) { Rl2Context *s = avctx->priv_data; int back_size; int i; s->avctx = avctx; avctx->pix_fmt = AV_PIX_FMT_PAL8; avcodec_get_frame_defaults(&s->frame); /** parse extra data */ if(!avctx->extradata || avctx->extradata_size < EXTRADATA1_SIZE){ av_log(avctx, AV_LOG_ERROR, \"invalid extradata size\\n\"); return -1; } /** get frame_offset */ s->video_base = AV_RL16(&avctx->extradata[0]); s->clr_count = AV_RL32(&avctx->extradata[2]); if(s->video_base >= avctx->width * avctx->height){ av_log(avctx, AV_LOG_ERROR, \"invalid video_base\\n\"); return -1; } /** initialize palette */ for(i=0;ipalette[i] = 0xFF << 24 | AV_RB24(&avctx->extradata[6 + i * 3]); /** decode background frame if present */ back_size = avctx->extradata_size - EXTRADATA1_SIZE; if(back_size > 0){ unsigned char* back_frame = av_mallocz(avctx->width*avctx->height); if(!back_frame) return -1; rl2_rle_decode(s,avctx->extradata + EXTRADATA1_SIZE,back_size, back_frame,avctx->width,0); s->back_frame = back_frame; } return 0; }"} {"target": 1, "idx": 14665, "func": "static void picmemset(PicContext *s, AVFrame *frame, int value, int run, int *x, int *y, int *plane, int bits_per_plane) { uint8_t *d; int shift = *plane * bits_per_plane; int mask = ((1 << bits_per_plane) - 1) << shift; value <<= shift; while (run > 0) { int j; for (j = 8-bits_per_plane; j >= 0; j -= bits_per_plane) { d = frame->data[0] + *y * frame->linesize[0]; d[*x] |= (value >> j) & mask; *x += 1; if (*x == s->width) { *y -= 1; *x = 0; if (*y < 0) { *y = s->height - 1; *plane += 1; if (*plane >= s->nb_planes) return; value <<= bits_per_plane; mask <<= bits_per_plane; } } } run--; } }"} {"target": 0, "idx": 14673, "func": "static inline void silk_stabilize_lsf(int16_t nlsf[16], int order, const uint16_t min_delta[17]) { int pass, i; for (pass = 0; pass < 20; pass++) { int k, min_diff = 0; for (i = 0; i < order+1; i++) { int low = i != 0 ? nlsf[i-1] : 0; int high = i != order ? nlsf[i] : 32768; int diff = (high - low) - (min_delta[i]); if (diff < min_diff) { min_diff = diff; k = i; if (pass == 20) break; } } if (min_diff == 0) /* no issues; stabilized */ return; /* wiggle one or two LSFs */ if (k == 0) { /* repel away from lower bound */ nlsf[0] = min_delta[0]; } else if (k == order) { /* repel away from higher bound */ nlsf[order-1] = 32768 - min_delta[order]; } else { /* repel away from current position */ int min_center = 0, max_center = 32768, center_val; /* lower extent */ for (i = 0; i < k; i++) min_center += min_delta[i]; min_center += min_delta[k] >> 1; /* upper extent */ for (i = order; i > k; i--) max_center -= min_delta[k]; max_center -= min_delta[k] >> 1; /* move apart */ center_val = nlsf[k - 1] + nlsf[k]; center_val = (center_val >> 1) + (center_val & 1); // rounded divide by 2 center_val = FFMIN(max_center, FFMAX(min_center, center_val)); nlsf[k - 1] = center_val - (min_delta[k] >> 1); nlsf[k] = nlsf[k - 1] + min_delta[k]; } } /* resort to the fall-back method, the standard method for LSF stabilization */ /* sort; as the LSFs should be nearly sorted, use insertion sort */ for (i = 1; i < order; i++) { int j, value = nlsf[i]; for (j = i - 1; j >= 0 && nlsf[j] > value; j--) nlsf[j + 1] = nlsf[j]; nlsf[j + 1] = value; } /* push forwards to increase distance */ if (nlsf[0] < min_delta[0]) nlsf[0] = min_delta[0]; for (i = 1; i < order; i++) if (nlsf[i] < nlsf[i - 1] + min_delta[i]) nlsf[i] = nlsf[i - 1] + min_delta[i]; /* push backwards to increase distance */ if (nlsf[order-1] > 32768 - min_delta[order]) nlsf[order-1] = 32768 - min_delta[order]; for (i = order-2; i >= 0; i--) if (nlsf[i] > nlsf[i + 1] - min_delta[i+1]) nlsf[i] = nlsf[i + 1] - min_delta[i+1]; return; }"} {"target": 1, "idx": 14679, "func": "static void pci_unin_main_config_writel (void *opaque, target_phys_addr_t addr, uint32_t val) { UNINState *s = opaque; UNIN_DPRINTF(\"config_writel addr \" TARGET_FMT_plx \" val %x\\n\", addr, val); #ifdef TARGET_WORDS_BIGENDIAN val = bswap32(val); #endif s->config_reg = val; }"} {"target": 1, "idx": 14680, "func": "vubr_set_vring_addr_exec(VubrDev *dev, VhostUserMsg *vmsg) { struct vhost_vring_addr *vra = &vmsg->payload.addr; unsigned int index = vra->index; VubrVirtq *vq = &dev->vq[index]; DPRINT(\"vhost_vring_addr:\\n\"); DPRINT(\" index: %d\\n\", vra->index); DPRINT(\" flags: %d\\n\", vra->flags); DPRINT(\" desc_user_addr: 0x%016llx\\n\", vra->desc_user_addr); DPRINT(\" used_user_addr: 0x%016llx\\n\", vra->used_user_addr); DPRINT(\" avail_user_addr: 0x%016llx\\n\", vra->avail_user_addr); DPRINT(\" log_guest_addr: 0x%016llx\\n\", vra->log_guest_addr); vq->desc = (struct vring_desc *)(uintptr_t)qva_to_va(dev, vra->desc_user_addr); vq->used = (struct vring_used *)(uintptr_t)qva_to_va(dev, vra->used_user_addr); vq->avail = (struct vring_avail *)(uintptr_t)qva_to_va(dev, vra->avail_user_addr); vq->log_guest_addr = vra->log_guest_addr; DPRINT(\"Setting virtq addresses:\\n\"); DPRINT(\" vring_desc at %p\\n\", vq->desc); DPRINT(\" vring_used at %p\\n\", vq->used); DPRINT(\" vring_avail at %p\\n\", vq->avail); vq->last_used_index = vq->used->idx; return 0;"} {"target": 1, "idx": 14694, "func": "void ff_get_unscaled_swscale(SwsContext *c) { const enum PixelFormat srcFormat = c->srcFormat; const enum PixelFormat dstFormat = c->dstFormat; const int flags = c->flags; const int dstH = c->dstH; int needsDither; needsDither = isAnyRGB(dstFormat) && c->dstFormatBpp < 24 && (c->dstFormatBpp < c->srcFormatBpp || (!isAnyRGB(srcFormat))); /* yv12_to_nv12 */ if ((srcFormat == PIX_FMT_YUV420P || srcFormat == PIX_FMT_YUVA420P) && (dstFormat == PIX_FMT_NV12 || dstFormat == PIX_FMT_NV21)) { c->swScale = planarToNv12Wrapper; } /* yuv2bgr */ if ((srcFormat == PIX_FMT_YUV420P || srcFormat == PIX_FMT_YUV422P || srcFormat == PIX_FMT_YUVA420P) && isAnyRGB(dstFormat) && !(flags & SWS_ACCURATE_RND) && !(dstH & 1)) { c->swScale = ff_yuv2rgb_get_func_ptr(c); } if (srcFormat == PIX_FMT_YUV410P && (dstFormat == PIX_FMT_YUV420P || dstFormat == PIX_FMT_YUVA420P) && !(flags & SWS_BITEXACT)) { c->swScale = yvu9ToYv12Wrapper; } /* bgr24toYV12 */ if (srcFormat == PIX_FMT_BGR24 && (dstFormat == PIX_FMT_YUV420P || dstFormat == PIX_FMT_YUVA420P) && !(flags & SWS_ACCURATE_RND)) c->swScale = bgr24ToYv12Wrapper; /* RGB/BGR -> RGB/BGR (no dither needed forms) */ if ( isAnyRGB(srcFormat) && isAnyRGB(dstFormat) && srcFormat != PIX_FMT_BGR8 && dstFormat != PIX_FMT_BGR8 && srcFormat != PIX_FMT_RGB8 && dstFormat != PIX_FMT_RGB8 && srcFormat != PIX_FMT_BGR4 && dstFormat != PIX_FMT_BGR4 && srcFormat != PIX_FMT_RGB4 && dstFormat != PIX_FMT_RGB4 && srcFormat != PIX_FMT_BGR4_BYTE && dstFormat != PIX_FMT_BGR4_BYTE && srcFormat != PIX_FMT_RGB4_BYTE && dstFormat != PIX_FMT_RGB4_BYTE && srcFormat != PIX_FMT_MONOBLACK && dstFormat != PIX_FMT_MONOBLACK && srcFormat != PIX_FMT_MONOWHITE && dstFormat != PIX_FMT_MONOWHITE && srcFormat != PIX_FMT_RGB48LE && dstFormat != PIX_FMT_RGB48LE && srcFormat != PIX_FMT_RGB48BE && dstFormat != PIX_FMT_RGB48BE && srcFormat != PIX_FMT_BGR48LE && dstFormat != PIX_FMT_BGR48LE && srcFormat != PIX_FMT_BGR48BE && dstFormat != PIX_FMT_BGR48BE && (!needsDither || (c->flags&(SWS_FAST_BILINEAR|SWS_POINT)))) c->swScale= rgbToRgbWrapper; /* bswap 16 bits per pixel/component packed formats */ if (IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, PIX_FMT_BGR444) || IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, PIX_FMT_BGR48) || IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, PIX_FMT_BGR555) || IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, PIX_FMT_BGR565) || IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, PIX_FMT_GRAY16) || IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, PIX_FMT_RGB444) || IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, PIX_FMT_RGB48) || IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, PIX_FMT_RGB555) || IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, PIX_FMT_RGB565)) c->swScale = packed_16bpc_bswap; if ((usePal(srcFormat) && ( dstFormat == PIX_FMT_RGB32 || dstFormat == PIX_FMT_RGB32_1 || dstFormat == PIX_FMT_RGB24 || dstFormat == PIX_FMT_BGR32 || dstFormat == PIX_FMT_BGR32_1 || dstFormat == PIX_FMT_BGR24))) c->swScale = palToRgbWrapper; if (srcFormat == PIX_FMT_YUV422P) { if (dstFormat == PIX_FMT_YUYV422) c->swScale = yuv422pToYuy2Wrapper; else if (dstFormat == PIX_FMT_UYVY422) c->swScale = yuv422pToUyvyWrapper; } /* LQ converters if -sws 0 or -sws 4*/ if (c->flags&(SWS_FAST_BILINEAR|SWS_POINT)) { /* yv12_to_yuy2 */ if (srcFormat == PIX_FMT_YUV420P || srcFormat == PIX_FMT_YUVA420P) { if (dstFormat == PIX_FMT_YUYV422) c->swScale = planarToYuy2Wrapper; else if (dstFormat == PIX_FMT_UYVY422) c->swScale = planarToUyvyWrapper; } } if (srcFormat == PIX_FMT_YUYV422 && (dstFormat == PIX_FMT_YUV420P || dstFormat == PIX_FMT_YUVA420P)) c->swScale = yuyvToYuv420Wrapper; if (srcFormat == PIX_FMT_UYVY422 && (dstFormat == PIX_FMT_YUV420P || dstFormat == PIX_FMT_YUVA420P)) c->swScale = uyvyToYuv420Wrapper; if (srcFormat == PIX_FMT_YUYV422 && dstFormat == PIX_FMT_YUV422P) c->swScale = yuyvToYuv422Wrapper; if (srcFormat == PIX_FMT_UYVY422 && dstFormat == PIX_FMT_YUV422P) c->swScale = uyvyToYuv422Wrapper; /* simple copy */ if ( srcFormat == dstFormat || (srcFormat == PIX_FMT_YUVA420P && dstFormat == PIX_FMT_YUV420P) || (srcFormat == PIX_FMT_YUV420P && dstFormat == PIX_FMT_YUVA420P) || (isPlanarYUV(srcFormat) && isGray(dstFormat)) || (isPlanarYUV(dstFormat) && isGray(srcFormat)) || (isGray(dstFormat) && isGray(srcFormat)) || (isPlanarYUV(srcFormat) && isPlanarYUV(dstFormat) && c->chrDstHSubSample == c->chrSrcHSubSample && c->chrDstVSubSample == c->chrSrcVSubSample && dstFormat != PIX_FMT_NV12 && dstFormat != PIX_FMT_NV21 && srcFormat != PIX_FMT_NV12 && srcFormat != PIX_FMT_NV21)) { if (isPacked(c->srcFormat)) c->swScale = packedCopyWrapper; else /* Planar YUV or gray */ c->swScale = planarCopyWrapper; } if (ARCH_BFIN) ff_bfin_get_unscaled_swscale(c); if (HAVE_ALTIVEC) ff_swscale_get_unscaled_altivec(c); }"} {"target": 0, "idx": 14716, "func": "static PCIBridgeWindows *pci_bridge_region_init(PCIBridge *br) { PCIDevice *pd = PCI_DEVICE(br); PCIBus *parent = pd->bus; PCIBridgeWindows *w = g_new(PCIBridgeWindows, 1); uint16_t cmd = pci_get_word(pd->config + PCI_COMMAND); pci_bridge_init_alias(br, &w->alias_pref_mem, PCI_BASE_ADDRESS_MEM_PREFETCH, \"pci_bridge_pref_mem\", &br->address_space_mem, parent->address_space_mem, cmd & PCI_COMMAND_MEMORY); pci_bridge_init_alias(br, &w->alias_mem, PCI_BASE_ADDRESS_SPACE_MEMORY, \"pci_bridge_mem\", &br->address_space_mem, parent->address_space_mem, cmd & PCI_COMMAND_MEMORY); pci_bridge_init_alias(br, &w->alias_io, PCI_BASE_ADDRESS_SPACE_IO, \"pci_bridge_io\", &br->address_space_io, parent->address_space_io, cmd & PCI_COMMAND_IO); pci_bridge_init_vga_aliases(br, parent, w->alias_vga); return w; }"} {"target": 0, "idx": 14723, "func": "static void hls_transform_tree(HEVCContext *s, int x0, int y0, int xBase, int yBase, int cb_xBase, int cb_yBase, int log2_cb_size, int log2_trafo_size, int trafo_depth, int blk_idx) { HEVCLocalContext *lc = &s->HEVClc; uint8_t split_transform_flag; if (trafo_depth > 0 && log2_trafo_size == 2) { SAMPLE_CBF(lc->tt.cbf_cb[trafo_depth], x0, y0) = SAMPLE_CBF(lc->tt.cbf_cb[trafo_depth - 1], xBase, yBase); SAMPLE_CBF(lc->tt.cbf_cr[trafo_depth], x0, y0) = SAMPLE_CBF(lc->tt.cbf_cr[trafo_depth - 1], xBase, yBase); } else { SAMPLE_CBF(lc->tt.cbf_cb[trafo_depth], x0, y0) = SAMPLE_CBF(lc->tt.cbf_cr[trafo_depth], x0, y0) = 0; } if (lc->cu.intra_split_flag) { if (trafo_depth == 1) lc->tu.cur_intra_pred_mode = lc->pu.intra_pred_mode[blk_idx]; } else { lc->tu.cur_intra_pred_mode = lc->pu.intra_pred_mode[0]; } lc->tt.cbf_luma = 1; lc->tt.inter_split_flag = s->sps->max_transform_hierarchy_depth_inter == 0 && lc->cu.pred_mode == MODE_INTER && lc->cu.part_mode != PART_2Nx2N && trafo_depth == 0; if (log2_trafo_size <= s->sps->log2_max_trafo_size && log2_trafo_size > s->sps->log2_min_tb_size && trafo_depth < lc->cu.max_trafo_depth && !(lc->cu.intra_split_flag && trafo_depth == 0)) { split_transform_flag = ff_hevc_split_transform_flag_decode(s, log2_trafo_size); } else { split_transform_flag = log2_trafo_size > s->sps->log2_max_trafo_size || (lc->cu.intra_split_flag && trafo_depth == 0) || lc->tt.inter_split_flag; } if (log2_trafo_size > 2) { if (trafo_depth == 0 || SAMPLE_CBF(lc->tt.cbf_cb[trafo_depth - 1], xBase, yBase)) { SAMPLE_CBF(lc->tt.cbf_cb[trafo_depth], x0, y0) = ff_hevc_cbf_cb_cr_decode(s, trafo_depth); } if (trafo_depth == 0 || SAMPLE_CBF(lc->tt.cbf_cr[trafo_depth - 1], xBase, yBase)) { SAMPLE_CBF(lc->tt.cbf_cr[trafo_depth], x0, y0) = ff_hevc_cbf_cb_cr_decode(s, trafo_depth); } } if (split_transform_flag) { int x1 = x0 + ((1 << log2_trafo_size) >> 1); int y1 = y0 + ((1 << log2_trafo_size) >> 1); hls_transform_tree(s, x0, y0, x0, y0, cb_xBase, cb_yBase, log2_cb_size, log2_trafo_size - 1, trafo_depth + 1, 0); hls_transform_tree(s, x1, y0, x0, y0, cb_xBase, cb_yBase, log2_cb_size, log2_trafo_size - 1, trafo_depth + 1, 1); hls_transform_tree(s, x0, y1, x0, y0, cb_xBase, cb_yBase, log2_cb_size, log2_trafo_size - 1, trafo_depth + 1, 2); hls_transform_tree(s, x1, y1, x0, y0, cb_xBase, cb_yBase, log2_cb_size, log2_trafo_size - 1, trafo_depth + 1, 3); } else { int min_tu_size = 1 << s->sps->log2_min_tb_size; int log2_min_tu_size = s->sps->log2_min_tb_size; int min_tu_width = s->sps->min_tb_width; if (lc->cu.pred_mode == MODE_INTRA || trafo_depth != 0 || SAMPLE_CBF(lc->tt.cbf_cb[trafo_depth], x0, y0) || SAMPLE_CBF(lc->tt.cbf_cr[trafo_depth], x0, y0)) { lc->tt.cbf_luma = ff_hevc_cbf_luma_decode(s, trafo_depth); } hls_transform_unit(s, x0, y0, xBase, yBase, cb_xBase, cb_yBase, log2_cb_size, log2_trafo_size, trafo_depth, blk_idx); // TODO: store cbf_luma somewhere else if (lc->tt.cbf_luma) { int i, j; for (i = 0; i < (1 << log2_trafo_size); i += min_tu_size) for (j = 0; j < (1 << log2_trafo_size); j += min_tu_size) { int x_tu = (x0 + j) >> log2_min_tu_size; int y_tu = (y0 + i) >> log2_min_tu_size; s->cbf_luma[y_tu * min_tu_width + x_tu] = 1; } } if (!s->sh.disable_deblocking_filter_flag) { ff_hevc_deblocking_boundary_strengths(s, x0, y0, log2_trafo_size, lc->slice_or_tiles_up_boundary, lc->slice_or_tiles_left_boundary); if (s->pps->transquant_bypass_enable_flag && lc->cu.cu_transquant_bypass_flag) set_deblocking_bypass(s, x0, y0, log2_trafo_size); } } }"} {"target": 1, "idx": 14741, "func": "static void aw_pulse_set2(WMAVoiceContext *s, GetBitContext *gb, int block_idx, AMRFixed *fcb) { uint16_t use_mask_mem[9]; // only 5 are used, rest is padding uint16_t *use_mask = use_mask_mem + 2; /* in this function, idx is the index in the 80-bit (+ padding) use_mask * bit-array. Since use_mask consists of 16-bit values, the lower 4 bits * of idx are the position of the bit within a particular item in the * array (0 being the most significant bit, and 15 being the least * significant bit), and the remainder (>> 4) is the index in the * use_mask[]-array. This is faster and uses less memory than using a * 80-byte/80-int array. */ int pulse_off = s->aw_first_pulse_off[block_idx], pulse_start, n, idx, range, aidx, start_off = 0; /* set offset of first pulse to within this block */ if (s->aw_n_pulses[block_idx] > 0) while (pulse_off + s->aw_pulse_range < 1) pulse_off += fcb->pitch_lag; /* find range per pulse */ if (s->aw_n_pulses[0] > 0) { if (block_idx == 0) { range = 32; } else /* block_idx = 1 */ { range = 8; if (s->aw_n_pulses[block_idx] > 0) pulse_off = s->aw_next_pulse_off_cache; } } else range = 16; pulse_start = s->aw_n_pulses[block_idx] > 0 ? pulse_off - range / 2 : 0; /* aw_pulse_set1() already applies pulses around pulse_off (to be exactly, * in the range of [pulse_off, pulse_off + s->aw_pulse_range], and thus * we exclude that range from being pulsed again in this function. */ memset(&use_mask[-2], 0, 2 * sizeof(use_mask[0])); memset( use_mask, -1, 5 * sizeof(use_mask[0])); memset(&use_mask[5], 0, 2 * sizeof(use_mask[0])); if (s->aw_n_pulses[block_idx] > 0) for (idx = pulse_off; idx < MAX_FRAMESIZE / 2; idx += fcb->pitch_lag) { int excl_range = s->aw_pulse_range; // always 16 or 24 uint16_t *use_mask_ptr = &use_mask[idx >> 4]; int first_sh = 16 - (idx & 15); *use_mask_ptr++ &= 0xFFFF << first_sh; excl_range -= first_sh; if (excl_range >= 16) { *use_mask_ptr++ = 0; *use_mask_ptr &= 0xFFFF >> (excl_range - 16); } else *use_mask_ptr &= 0xFFFF >> excl_range; } /* find the 'aidx'th offset that is not excluded */ aidx = get_bits(gb, s->aw_n_pulses[0] > 0 ? 5 - 2 * block_idx : 4); for (n = 0; n <= aidx; pulse_start++) { for (idx = pulse_start; idx < 0; idx += fcb->pitch_lag) ; if (idx >= MAX_FRAMESIZE / 2) { // find from zero if (use_mask[0]) idx = 0x0F; else if (use_mask[1]) idx = 0x1F; else if (use_mask[2]) idx = 0x2F; else if (use_mask[3]) idx = 0x3F; else if (use_mask[4]) idx = 0x4F; else return; idx -= av_log2_16bit(use_mask[idx >> 4]); } if (use_mask[idx >> 4] & (0x8000 >> (idx & 15))) { use_mask[idx >> 4] &= ~(0x8000 >> (idx & 15)); n++; start_off = idx; } } fcb->x[fcb->n] = start_off; fcb->y[fcb->n] = get_bits1(gb) ? -1.0 : 1.0; fcb->n++; /* set offset for next block, relative to start of that block */ n = (MAX_FRAMESIZE / 2 - start_off) % fcb->pitch_lag; s->aw_next_pulse_off_cache = n ? fcb->pitch_lag - n : 0; }"} {"target": 1, "idx": 14745, "func": "static void asf_build_simple_index(AVFormatContext *s, int stream_index) { ff_asf_guid g; ASFContext *asf = s->priv_data; int64_t current_pos = avio_tell(s->pb); int i; avio_seek(s->pb, asf->data_object_offset + asf->data_object_size, SEEK_SET); ff_get_guid(s->pb, &g); /* the data object can be followed by other top-level objects, * skip them until the simple index object is reached */ while (ff_guidcmp(&g, &index_guid)) { int64_t gsize = avio_rl64(s->pb); if (gsize < 24 || s->pb->eof_reached) { avio_seek(s->pb, current_pos, SEEK_SET); return; } avio_skip(s->pb, gsize - 24); ff_get_guid(s->pb, &g); } { int64_t itime, last_pos = -1; int pct, ict; int64_t av_unused gsize = avio_rl64(s->pb); ff_get_guid(s->pb, &g); itime = avio_rl64(s->pb); pct = avio_rl32(s->pb); ict = avio_rl32(s->pb); av_log(s, AV_LOG_DEBUG, \"itime:0x%\"PRIx64\", pct:%d, ict:%d\\n\", itime, pct, ict); for (i = 0; i < ict; i++) { int pktnum = avio_rl32(s->pb); int pktct = avio_rl16(s->pb); int64_t pos = s->data_offset + s->packet_size * (int64_t)pktnum; int64_t index_pts = FFMAX(av_rescale(itime, i, 10000) - asf->hdr.preroll, 0); if (pos != last_pos) { av_log(s, AV_LOG_DEBUG, \"pktnum:%d, pktct:%d pts: %\"PRId64\"\\n\", pktnum, pktct, index_pts); av_add_index_entry(s->streams[stream_index], pos, index_pts, s->packet_size, 0, AVINDEX_KEYFRAME); last_pos = pos; } } asf->index_read = ict > 0; } avio_seek(s->pb, current_pos, SEEK_SET); }"} {"target": 1, "idx": 14750, "func": "static void bdrv_qed_drain(BlockDriverState *bs) { BDRVQEDState *s = bs->opaque; /* Cancel timer and start doing I/O that were meant to happen as if it * fired, that way we get bdrv_drain() taking care of the ongoing requests * correctly. */ qed_cancel_need_check_timer(s); qed_plug_allocating_write_reqs(s); bdrv_aio_flush(s->bs, qed_clear_need_check, s); }"} {"target": 1, "idx": 14774, "func": "static void test_qemu_strtol_invalid(void) { const char *str = \" xxxx \\t abc\"; char f = 'X'; const char *endptr = &f; long res = 999; int err; err = qemu_strtol(str, &endptr, 0, &res); g_assert_cmpint(err, ==, 0); g_assert_cmpint(res, ==, 0); g_assert(endptr == str); }"} {"target": 1, "idx": 14775, "func": "static int check_bind(const char *hostname, bool *has_proto) { int fd = -1; struct addrinfo ai, *res = NULL; int rc; int ret = -1; memset(&ai, 0, sizeof(ai)); ai.ai_flags = AI_CANONNAME | AI_V4MAPPED | AI_ADDRCONFIG; ai.ai_family = AF_UNSPEC; ai.ai_socktype = SOCK_STREAM; /* lookup */ rc = getaddrinfo(hostname, NULL, &ai, &res); if (rc != 0) { if (rc == EAI_ADDRFAMILY || rc == EAI_FAMILY) { *has_proto = false; goto done; } goto cleanup; } fd = qemu_socket(res->ai_family, res->ai_socktype, res->ai_protocol); if (fd < 0) { goto cleanup; } if (bind(fd, res->ai_addr, res->ai_addrlen) < 0) { if (errno == EADDRNOTAVAIL) { *has_proto = false; goto done; } goto cleanup; } *has_proto = true; done: ret = 0; cleanup: if (fd != -1) { close(fd); } if (res) { freeaddrinfo(res); } return ret; }"} {"target": 1, "idx": 14777, "func": "static bool run_poll_handlers(AioContext *ctx, int64_t max_ns) { bool progress = false; int64_t end_time; assert(ctx->notify_me); assert(ctx->walking_handlers > 0); assert(ctx->poll_disable_cnt == 0); trace_run_poll_handlers_begin(ctx, max_ns); end_time = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + max_ns; do { AioHandler *node; QLIST_FOREACH(node, &ctx->aio_handlers, node) { if (!node->deleted && node->io_poll && node->io_poll(node->opaque)) { progress = true; } /* Caller handles freeing deleted nodes. Don't do it here. */ } } while (!progress && qemu_clock_get_ns(QEMU_CLOCK_REALTIME) < end_time); trace_run_poll_handlers_end(ctx, progress); return progress; }"} {"target": 0, "idx": 14813, "func": "static int screenpresso_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { ScreenpressoContext *ctx = avctx->priv_data; AVFrame *frame = data; int keyframe; int ret; /* Size check */ if (avpkt->size < 3) { av_log(avctx, AV_LOG_ERROR, \"Packet too small (%d)\\n\", avpkt->size); return AVERROR_INVALIDDATA; } /* Basic sanity check, but not really harmful */ if ((avpkt->data[0] != 0x73 && avpkt->data[0] != 0x72) || avpkt->data[1] != 8) { // bpp probably av_log(avctx, AV_LOG_WARNING, \"Unknown header 0x%02X%02X\\n\", avpkt->data[0], avpkt->data[1]); } keyframe = (avpkt->data[0] == 0x73); /* Resize deflate buffer and frame on resolution change */ if (ctx->inflated_size != avctx->width * avctx->height * 3) { av_frame_unref(ctx->current); ret = ff_get_buffer(avctx, ctx->current, AV_GET_BUFFER_FLAG_REF); if (ret < 0) return ret; /* If malloc fails, reset len to avoid preserving an invalid value */ ctx->inflated_size = avctx->width * avctx->height * 3; ret = av_reallocp(&ctx->inflated_buf, ctx->inflated_size); if (ret < 0) { ctx->inflated_size = 0; return ret; } } /* Inflate the frame after the 2 byte header */ ret = uncompress(ctx->inflated_buf, &ctx->inflated_size, avpkt->data + 2, avpkt->size - 2); if (ret) { av_log(avctx, AV_LOG_ERROR, \"Deflate error %d.\\n\", ret); return AVERROR_UNKNOWN; } /* When a keyframe is found, copy it (flipped) */ if (keyframe) av_image_copy_plane(ctx->current->data[0] + ctx->current->linesize[0] * (avctx->height - 1), -1 * ctx->current->linesize[0], ctx->inflated_buf, avctx->width * 3, avctx->width * 3, avctx->height); /* Otherwise sum the delta on top of the current frame */ else sum_delta_flipped(ctx->current->data[0], ctx->current->linesize[0], ctx->inflated_buf, avctx->width * 3, avctx->width * 3, avctx->height); /* Frame is ready to be output */ ret = av_frame_ref(frame, ctx->current); if (ret < 0) return ret; /* Usual properties */ if (keyframe) { frame->pict_type = AV_PICTURE_TYPE_I; frame->key_frame = 1; } else { frame->pict_type = AV_PICTURE_TYPE_P; } *got_frame = 1; return 0; }"} {"target": 1, "idx": 14814, "func": "static inline TCGv iwmmxt_load_creg(int reg) { TCGv var = new_tmp(); tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg])); return var; }"} {"target": 1, "idx": 14816, "func": "void HELPER(set_cp_reg64)(CPUARMState *env, void *rip, uint64_t value) { const ARMCPRegInfo *ri = rip; ri->writefn(env, ri, value); }"} {"target": 1, "idx": 14822, "func": "static void vhost_scsi_stop(VHostSCSI *s) { VirtIODevice *vdev = VIRTIO_DEVICE(s); BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); int ret = 0; if (!k->set_guest_notifiers) { ret = k->set_guest_notifiers(qbus->parent, s->dev.nvqs, false); if (ret < 0) { error_report(\"vhost guest notifier cleanup failed: %d\\n\", ret); } } assert(ret >= 0); vhost_scsi_clear_endpoint(s); vhost_dev_stop(&s->dev, vdev); vhost_dev_disable_notifiers(&s->dev, vdev); }"} {"target": 1, "idx": 14823, "func": "static void start_tco(const TestData *d) { uint32_t val; val = qpci_io_readw(d->dev, d->tco_io_base + TCO1_CNT); val &= ~TCO_TMR_HLT; qpci_io_writew(d->dev, d->tco_io_base + TCO1_CNT, val); }"} {"target": 0, "idx": 14840, "func": "void memory_global_dirty_log_stop(void) { global_dirty_log = false; /* Refresh DIRTY_LOG_MIGRATION bit. */ memory_region_transaction_begin(); memory_region_update_pending = true; memory_region_transaction_commit(); MEMORY_LISTENER_CALL_GLOBAL(log_global_stop, Reverse); }"} {"target": 0, "idx": 14842, "func": "static inline int open_by_handle(int mountfd, const char *fh, int flags) { errno = ENOSYS; return -1; }"} {"target": 0, "idx": 14846, "func": "static void cmd_get_event_status_notification(IDEState *s, uint8_t *buf) { const uint8_t *packet = buf; struct { uint8_t opcode; uint8_t polled; /* lsb bit is polled; others are reserved */ uint8_t reserved2[2]; uint8_t class; uint8_t reserved3[2]; uint16_t len; uint8_t control; } __attribute__((packed)) *gesn_cdb; struct { uint16_t len; uint8_t notification_class; uint8_t supported_events; } __attribute((packed)) *gesn_event_header; enum notification_class_request_type { NCR_RESERVED1 = 1 << 0, NCR_OPERATIONAL_CHANGE = 1 << 1, NCR_POWER_MANAGEMENT = 1 << 2, NCR_EXTERNAL_REQUEST = 1 << 3, NCR_MEDIA = 1 << 4, NCR_MULTI_HOST = 1 << 5, NCR_DEVICE_BUSY = 1 << 6, NCR_RESERVED2 = 1 << 7, }; enum event_notification_class_field { ENC_NO_EVENTS = 0, ENC_OPERATIONAL_CHANGE, ENC_POWER_MANAGEMENT, ENC_EXTERNAL_REQUEST, ENC_MEDIA, ENC_MULTIPLE_HOSTS, ENC_DEVICE_BUSY, ENC_RESERVED, }; unsigned int max_len, used_len; gesn_cdb = (void *)packet; gesn_event_header = (void *)buf; max_len = be16_to_cpu(gesn_cdb->len); /* It is fine by the MMC spec to not support async mode operations */ if (!(gesn_cdb->polled & 0x01)) { /* asynchronous mode */ /* Only polling is supported, asynchronous mode is not. */ ide_atapi_cmd_error(s, SENSE_ILLEGAL_REQUEST, ASC_INV_FIELD_IN_CMD_PACKET); return; } /* polling mode operation */ /* * These are the supported events. * * We currently only support requests of the 'media' type. */ gesn_event_header->supported_events = NCR_MEDIA; /* * We use |= below to set the class field; other bits in this byte * are reserved now but this is useful to do if we have to use the * reserved fields later. */ gesn_event_header->notification_class = 0; /* * Responses to requests are to be based on request priority. The * notification_class_request_type enum above specifies the * priority: upper elements are higher prio than lower ones. */ if (gesn_cdb->class & NCR_MEDIA) { gesn_event_header->notification_class |= ENC_MEDIA; used_len = event_status_media(s, buf); } else { gesn_event_header->notification_class = 0x80; /* No event available */ used_len = sizeof(*gesn_event_header); } gesn_event_header->len = cpu_to_be16(used_len - sizeof(*gesn_event_header)); ide_atapi_cmd_reply(s, used_len, max_len); }"} {"target": 0, "idx": 14848, "func": "static int decode_plane(UtvideoContext *c, int plane_no, uint8_t *dst, int step, ptrdiff_t stride, int width, int height, const uint8_t *src, int use_pred) { int i, j, slice, pix; int sstart, send; VLC vlc; GetBitContext gb; int prev, fsym; const int cmask = c->interlaced ? ~(1 + 2 * (!plane_no && c->avctx->pix_fmt == AV_PIX_FMT_YUV420P)) : ~(!plane_no && c->avctx->pix_fmt == AV_PIX_FMT_YUV420P); if (build_huff(src, &vlc, &fsym)) { av_log(c->avctx, AV_LOG_ERROR, \"Cannot build Huffman codes\\n\"); return AVERROR_INVALIDDATA; } if (fsym >= 0) { // build_huff reported a symbol to fill slices with send = 0; for (slice = 0; slice < c->slices; slice++) { uint8_t *dest; sstart = send; send = (height * (slice + 1) / c->slices) & cmask; dest = dst + sstart * stride; prev = 0x80; for (j = sstart; j < send; j++) { for (i = 0; i < width * step; i += step) { pix = fsym; if (use_pred) { prev += pix; pix = prev; } dest[i] = pix; } dest += stride; } } return 0; } src += 256; send = 0; for (slice = 0; slice < c->slices; slice++) { uint8_t *dest; int slice_data_start, slice_data_end, slice_size; sstart = send; send = (height * (slice + 1) / c->slices) & cmask; dest = dst + sstart * stride; // slice offset and size validation was done earlier slice_data_start = slice ? AV_RL32(src + slice * 4 - 4) : 0; slice_data_end = AV_RL32(src + slice * 4); slice_size = slice_data_end - slice_data_start; if (!slice_size) { av_log(c->avctx, AV_LOG_ERROR, \"Plane has more than one symbol \" \"yet a slice has a length of zero.\\n\"); goto fail; } memset(c->slice_bits + slice_size, 0, AV_INPUT_BUFFER_PADDING_SIZE); c->bdsp.bswap_buf((uint32_t *) c->slice_bits, (uint32_t *)(src + slice_data_start + c->slices * 4), (slice_data_end - slice_data_start + 3) >> 2); init_get_bits(&gb, c->slice_bits, slice_size * 8); prev = 0x80; for (j = sstart; j < send; j++) { for (i = 0; i < width * step; i += step) { pix = get_vlc2(&gb, vlc.table, VLC_BITS, 3); if (pix < 0) { av_log(c->avctx, AV_LOG_ERROR, \"Decoding error\\n\"); goto fail; } if (use_pred) { prev += pix; pix = prev; } dest[i] = pix; } if (get_bits_left(&gb) < 0) { av_log(c->avctx, AV_LOG_ERROR, \"Slice decoding ran out of bits\\n\"); goto fail; } dest += stride; } if (get_bits_left(&gb) > 32) av_log(c->avctx, AV_LOG_WARNING, \"%d bits left after decoding slice\\n\", get_bits_left(&gb)); } ff_free_vlc(&vlc); return 0; fail: ff_free_vlc(&vlc); return AVERROR_INVALIDDATA; }"} {"target": 0, "idx": 14865, "func": "static int ogg_buffer_data(AVFormatContext *s, AVStream *st, uint8_t *data, unsigned size, int64_t granule, int header) { OGGStreamContext *oggstream = st->priv_data; OGGContext *ogg = s->priv_data; int total_segments = size / 255 + 1; uint8_t *p = data; int i, segments, len, flush = 0; // Handles VFR by flushing page because this frame needs to have a timestamp // For theora, keyframes also need to have a timestamp to correctly mark // them as such, otherwise seeking will not work correctly at the very // least with old libogg versions. // Do not try to flush header packets though, that will create broken files. if (st->codec->codec_id == AV_CODEC_ID_THEORA && !header && (ogg_granule_to_timestamp(oggstream, granule) > ogg_granule_to_timestamp(oggstream, oggstream->last_granule) + 1 || ogg_key_granule(oggstream, granule))) { if (oggstream->page.granule != -1) ogg_buffer_page(s, oggstream); flush = 1; } // avoid a continued page if (!header && oggstream->page.size > 0 && MAX_PAGE_SIZE - oggstream->page.size < size) { ogg_buffer_page(s, oggstream); } for (i = 0; i < total_segments; ) { OGGPage *page = &oggstream->page; segments = FFMIN(total_segments - i, 255 - page->segments_count); if (i && !page->segments_count) page->flags |= 1; // continued packet memset(page->segments+page->segments_count, 255, segments - 1); page->segments_count += segments - 1; len = FFMIN(size, segments*255); page->segments[page->segments_count++] = len - (segments-1)*255; memcpy(page->data+page->size, p, len); p += len; size -= len; i += segments; page->size += len; if (i == total_segments) page->granule = granule; if (!header) { AVStream *st = s->streams[page->stream_index]; int64_t start = av_rescale_q(page->start_granule, st->time_base, AV_TIME_BASE_Q); int64_t next = av_rescale_q(page->granule, st->time_base, AV_TIME_BASE_Q); if (page->segments_count == 255 || (ogg->pref_size > 0 && page->size >= ogg->pref_size) || (ogg->pref_duration > 0 && next - start >= ogg->pref_duration)) { ogg_buffer_page(s, oggstream); } } } if (flush && oggstream->page.granule != -1) ogg_buffer_page(s, oggstream); return 0; }"} {"target": 0, "idx": 14872, "func": "void mpeg4_encode_picture_header(MpegEncContext * s, int picture_number) { int time_incr; int time_div, time_mod; if(s->pict_type==AV_PICTURE_TYPE_I){ if(!(s->flags&CODEC_FLAG_GLOBAL_HEADER)){ if(s->strict_std_compliance < FF_COMPLIANCE_VERY_STRICT) //HACK, the reference sw is buggy mpeg4_encode_visual_object_header(s); if(s->strict_std_compliance < FF_COMPLIANCE_VERY_STRICT || picture_number==0) //HACK, the reference sw is buggy mpeg4_encode_vol_header(s, 0, 0); } if(!(s->workaround_bugs & FF_BUG_MS)) mpeg4_encode_gop_header(s); } s->partitioned_frame= s->data_partitioning && s->pict_type!=AV_PICTURE_TYPE_B; put_bits(&s->pb, 16, 0); /* vop header */ put_bits(&s->pb, 16, VOP_STARTCODE); /* vop header */ put_bits(&s->pb, 2, s->pict_type - 1); /* pict type: I = 0 , P = 1 */ assert(s->time>=0); time_div= s->time/s->avctx->time_base.den; time_mod= s->time%s->avctx->time_base.den; time_incr= time_div - s->last_time_base; assert(time_incr >= 0); while(time_incr--) put_bits(&s->pb, 1, 1); put_bits(&s->pb, 1, 0); put_bits(&s->pb, 1, 1); /* marker */ put_bits(&s->pb, s->time_increment_bits, time_mod); /* time increment */ put_bits(&s->pb, 1, 1); /* marker */ put_bits(&s->pb, 1, 1); /* vop coded */ if ( s->pict_type == AV_PICTURE_TYPE_P || (s->pict_type == AV_PICTURE_TYPE_S && s->vol_sprite_usage==GMC_SPRITE)) { put_bits(&s->pb, 1, s->no_rounding); /* rounding type */ } put_bits(&s->pb, 3, 0); /* intra dc VLC threshold */ if(!s->progressive_sequence){ put_bits(&s->pb, 1, s->current_picture_ptr->top_field_first); put_bits(&s->pb, 1, s->alternate_scan); } //FIXME sprite stuff put_bits(&s->pb, 5, s->qscale); if (s->pict_type != AV_PICTURE_TYPE_I) put_bits(&s->pb, 3, s->f_code); /* fcode_for */ if (s->pict_type == AV_PICTURE_TYPE_B) put_bits(&s->pb, 3, s->b_code); /* fcode_back */ }"} {"target": 0, "idx": 14884, "func": "static void build_vlc(VLC *vlc, const uint8_t *bits_table, const uint8_t *val_table, int nb_codes) { uint8_t huff_size[256]; uint16_t huff_code[256]; memset(huff_size, 0, sizeof(huff_size)); build_huffman_codes(huff_size, huff_code, bits_table, val_table); init_vlc(vlc, 9, nb_codes, huff_size, 1, 1, huff_code, 2, 2); }"} {"target": 0, "idx": 14904, "func": "void qemu_coroutine_adjust_pool_size(int n) { qemu_mutex_lock(&pool_lock); pool_max_size += n; /* Callers should never take away more than they added */ assert(pool_max_size >= POOL_DEFAULT_SIZE); /* Trim oversized pool down to new max */ while (pool_size > pool_max_size) { Coroutine *co = QSLIST_FIRST(&pool); QSLIST_REMOVE_HEAD(&pool, pool_next); pool_size--; qemu_coroutine_delete(co); } qemu_mutex_unlock(&pool_lock); }"} {"target": 0, "idx": 14911, "func": "static void rndis_clear_responsequeue(USBNetState *s) { struct rndis_response *r; while ((r = s->rndis_resp.tqh_first)) { TAILQ_REMOVE(&s->rndis_resp, r, entries); qemu_free(r); } }"} {"target": 0, "idx": 14913, "func": "av_cold void ff_vp56dsp_init_x86(VP56DSPContext* c, enum AVCodecID codec) { #if HAVE_YASM int mm_flags = av_get_cpu_flags(); if (CONFIG_VP6_DECODER && codec == AV_CODEC_ID_VP6) { #if ARCH_X86_32 if (mm_flags & AV_CPU_FLAG_MMX) { c->vp6_filter_diag4 = ff_vp6_filter_diag4_mmx; } #endif if (mm_flags & AV_CPU_FLAG_SSE2) { c->vp6_filter_diag4 = ff_vp6_filter_diag4_sse2; } } #endif }"} {"target": 0, "idx": 14936, "func": "int cpu_ppc_handle_mmu_fault (CPUState *env, uint32_t address, int rw, int is_user, int is_softmmu) { uint32_t physical; int prot; int exception = 0, error_code = 0; int access_type; int ret = 0; // printf(\"%s 0\\n\", __func__); access_type = env->access_type; if (env->user_mode_only) { /* user mode only emulation */ ret = -2; goto do_fault; } /* NASTY BUG workaround */ if (access_type == ACCESS_CODE && rw) { printf(\"%s: ERROR WRITE CODE ACCESS\\n\", __func__); access_type = ACCESS_INT; } ret = get_physical_address(env, &physical, &prot, address, rw, access_type); if (ret == 0) { ret = tlb_set_page(env, address & ~0xFFF, physical, prot, is_user, is_softmmu); } else if (ret < 0) { do_fault: #if defined (DEBUG_MMU) if (loglevel > 0) cpu_ppc_dump_state(env, logfile, 0); #endif if (access_type == ACCESS_CODE) { exception = EXCP_ISI; switch (ret) { case -1: /* No matches in page tables */ error_code = EXCP_ISI_TRANSLATE; break; case -2: /* Access rights violation */ error_code = EXCP_ISI_PROT; break; case -3: /* No execute protection violation */ error_code = EXCP_ISI_NOEXEC; break; case -4: /* Direct store exception */ /* No code fetch is allowed in direct-store areas */ error_code = EXCP_ISI_DIRECT; break; } } else { exception = EXCP_DSI; switch (ret) { case -1: /* No matches in page tables */ error_code = EXCP_DSI_TRANSLATE; break; case -2: /* Access rights violation */ error_code = EXCP_DSI_PROT; break; case -4: /* Direct store exception */ switch (access_type) { case ACCESS_FLOAT: /* Floating point load/store */ exception = EXCP_ALIGN; error_code = EXCP_ALIGN_FP; break; case ACCESS_RES: /* lwarx, ldarx or srwcx. */ exception = EXCP_DSI; error_code = EXCP_DSI_NOTSUP | EXCP_DSI_DIRECT; break; case ACCESS_EXT: /* eciwx or ecowx */ exception = EXCP_DSI; error_code = EXCP_DSI_NOTSUP | EXCP_DSI_DIRECT | EXCP_DSI_ECXW; break; default: printf(\"DSI: invalid exception (%d)\\n\", ret); exception = EXCP_PROGRAM; error_code = EXCP_INVAL | EXCP_INVAL_INVAL; break; } } if (rw) error_code |= EXCP_DSI_STORE; /* Store fault address */ env->spr[DAR] = address; } #if 0 printf(\"%s: set exception to %d %02x\\n\", __func__, exception, error_code); #endif env->exception_index = exception; env->error_code = error_code; ret = 1; } return ret; }"} {"target": 1, "idx": 14942, "func": "static void spatial_decompose53i(DWTELEM *buffer, int width, int height, int stride){ int y; DWTELEM *b0= buffer + mirror(-2-1, height-1)*stride; DWTELEM *b1= buffer + mirror(-2 , height-1)*stride; for(y=-2; yopaque; int ret, fd; unsigned int datalen; if (offset < s->inode.vdi_size) { error_report(\"shrinking is not supported\"); return -EINVAL; } else if (offset > SD_MAX_VDI_SIZE) { error_report(\"too big image size\"); return -EINVAL; } fd = connect_to_sdog(s->addr, s->port); if (fd < 0) { return fd; } /* we don't need to update entire object */ datalen = SD_INODE_SIZE - sizeof(s->inode.data_vdi_id); s->inode.vdi_size = offset; ret = write_object(fd, (char *)&s->inode, vid_to_vdi_oid(s->inode.vdi_id), s->inode.nr_copies, datalen, 0, false, s->cache_enabled); close(fd); if (ret < 0) { error_report(\"failed to update an inode.\"); } return ret; }"} {"target": 0, "idx": 14982, "func": "static int rndis_parse(USBNetState *s, uint8_t *data, int length) { uint32_t msg_type; le32 *tmp = (le32 *) data; msg_type = le32_to_cpup(tmp); switch (msg_type) { case RNDIS_INITIALIZE_MSG: s->rndis_state = RNDIS_INITIALIZED; return rndis_init_response(s, (rndis_init_msg_type *) data); case RNDIS_HALT_MSG: s->rndis_state = RNDIS_UNINITIALIZED; return 0; case RNDIS_QUERY_MSG: return rndis_query_response(s, (rndis_query_msg_type *) data, length); case RNDIS_SET_MSG: return rndis_set_response(s, (rndis_set_msg_type *) data, length); case RNDIS_RESET_MSG: rndis_clear_responsequeue(s); s->out_ptr = s->in_ptr = s->in_len = 0; return rndis_reset_response(s, (rndis_reset_msg_type *) data); case RNDIS_KEEPALIVE_MSG: /* For USB: host does this every 5 seconds */ return rndis_keepalive_response(s, (rndis_keepalive_msg_type *) data); } return USB_RET_STALL; }"} {"target": 1, "idx": 15001, "func": "static int sap_write_header(AVFormatContext *s) { struct SAPState *sap = s->priv_data; char host[1024], path[1024], url[1024], announce_addr[50] = \"\"; char *option_list; int port = 9875, base_port = 5004, i, pos = 0, same_port = 0, ttl = 255; AVFormatContext **contexts = NULL; int ret = 0; struct sockaddr_storage localaddr; socklen_t addrlen = sizeof(localaddr); int udp_fd; AVDictionaryEntry* title = av_dict_get(s->metadata, \"title\", NULL, 0); if (!ff_network_init()) return AVERROR(EIO); /* extract hostname and port */ av_url_split(NULL, 0, NULL, 0, host, sizeof(host), &base_port, path, sizeof(path), s->filename); if (base_port < 0) base_port = 5004; /* search for options */ option_list = strrchr(path, '?'); if (option_list) { char buf[50]; if (av_find_info_tag(buf, sizeof(buf), \"announce_port\", option_list)) { port = strtol(buf, NULL, 10); } if (av_find_info_tag(buf, sizeof(buf), \"same_port\", option_list)) { same_port = strtol(buf, NULL, 10); } if (av_find_info_tag(buf, sizeof(buf), \"ttl\", option_list)) { ttl = strtol(buf, NULL, 10); } if (av_find_info_tag(buf, sizeof(buf), \"announce_addr\", option_list)) { av_strlcpy(announce_addr, buf, sizeof(announce_addr)); } } if (!announce_addr[0]) { struct addrinfo hints = { 0 }, *ai = NULL; hints.ai_family = AF_UNSPEC; if (getaddrinfo(host, NULL, &hints, &ai)) { av_log(s, AV_LOG_ERROR, \"Unable to resolve %s\\n\", host); ret = AVERROR(EIO); goto fail; } if (ai->ai_family == AF_INET) { /* Also known as sap.mcast.net */ av_strlcpy(announce_addr, \"224.2.127.254\", sizeof(announce_addr)); #if HAVE_STRUCT_SOCKADDR_IN6 } else if (ai->ai_family == AF_INET6) { /* With IPv6, you can use the same destination in many different * multicast subnets, to choose how far you want it routed. * This one is intended to be routed globally. */ av_strlcpy(announce_addr, \"ff0e::2:7ffe\", sizeof(announce_addr)); #endif } else { freeaddrinfo(ai); av_log(s, AV_LOG_ERROR, \"Host %s resolved to unsupported \" \"address family\\n\", host); ret = AVERROR(EIO); goto fail; } freeaddrinfo(ai); } sap->protocols = ffurl_get_protocols(NULL, NULL); if (!sap->protocols) { ret = AVERROR(ENOMEM); goto fail; } contexts = av_mallocz(sizeof(AVFormatContext*) * s->nb_streams); if (!contexts) { ret = AVERROR(ENOMEM); goto fail; } s->start_time_realtime = av_gettime(); for (i = 0; i < s->nb_streams; i++) { URLContext *fd; ff_url_join(url, sizeof(url), \"rtp\", NULL, host, base_port, \"?ttl=%d\", ttl); if (!same_port) base_port += 2; ret = ffurl_open(&fd, url, AVIO_FLAG_WRITE, &s->interrupt_callback, NULL, sap->protocols); if (ret) { ret = AVERROR(EIO); goto fail; } ret = ff_rtp_chain_mux_open(&contexts[i], s, s->streams[i], fd, 0, i); if (ret < 0) goto fail; s->streams[i]->priv_data = contexts[i]; s->streams[i]->time_base = contexts[i]->streams[0]->time_base; av_strlcpy(contexts[i]->filename, url, sizeof(contexts[i]->filename)); } if (s->nb_streams > 0 && title) av_dict_set(&contexts[0]->metadata, \"title\", title->value, 0); ff_url_join(url, sizeof(url), \"udp\", NULL, announce_addr, port, \"?ttl=%d&connect=1\", ttl); ret = ffurl_open(&sap->ann_fd, url, AVIO_FLAG_WRITE, &s->interrupt_callback, NULL, sap->protocols); if (ret) { ret = AVERROR(EIO); goto fail; } udp_fd = ffurl_get_file_handle(sap->ann_fd); if (getsockname(udp_fd, (struct sockaddr*) &localaddr, &addrlen)) { ret = AVERROR(EIO); goto fail; } if (localaddr.ss_family != AF_INET #if HAVE_STRUCT_SOCKADDR_IN6 && localaddr.ss_family != AF_INET6 #endif ) { av_log(s, AV_LOG_ERROR, \"Unsupported protocol family\\n\"); ret = AVERROR(EIO); goto fail; } sap->ann_size = 8192; sap->ann = av_mallocz(sap->ann_size); if (!sap->ann) { ret = AVERROR(EIO); goto fail; } sap->ann[pos] = (1 << 5); #if HAVE_STRUCT_SOCKADDR_IN6 if (localaddr.ss_family == AF_INET6) sap->ann[pos] |= 0x10; #endif pos++; sap->ann[pos++] = 0; /* Authentication length */ AV_WB16(&sap->ann[pos], av_get_random_seed()); pos += 2; if (localaddr.ss_family == AF_INET) { memcpy(&sap->ann[pos], &((struct sockaddr_in*)&localaddr)->sin_addr, sizeof(struct in_addr)); pos += sizeof(struct in_addr); #if HAVE_STRUCT_SOCKADDR_IN6 } else { memcpy(&sap->ann[pos], &((struct sockaddr_in6*)&localaddr)->sin6_addr, sizeof(struct in6_addr)); pos += sizeof(struct in6_addr); #endif } av_strlcpy(&sap->ann[pos], \"application/sdp\", sap->ann_size - pos); pos += strlen(&sap->ann[pos]) + 1; if (av_sdp_create(contexts, s->nb_streams, &sap->ann[pos], sap->ann_size - pos)) { ret = AVERROR_INVALIDDATA; goto fail; } av_freep(&contexts); av_log(s, AV_LOG_VERBOSE, \"SDP:\\n%s\\n\", &sap->ann[pos]); pos += strlen(&sap->ann[pos]); sap->ann_size = pos; if (sap->ann_size > sap->ann_fd->max_packet_size) { av_log(s, AV_LOG_ERROR, \"Announcement too large to send in one \" \"packet\\n\"); goto fail; } return 0; fail: av_free(contexts); sap_write_close(s); return ret; }"} {"target": 1, "idx": 15002, "func": "static void hls_prediction_unit(HEVCContext *s, int x0, int y0, int nPbW, int nPbH, int log2_cb_size, int partIdx, int idx) { #define POS(c_idx, x, y) \\ &s->frame->data[c_idx][((y) >> s->sps->vshift[c_idx]) * s->frame->linesize[c_idx] + \\ (((x) >> s->sps->hshift[c_idx]) << s->sps->pixel_shift)] HEVCLocalContext *lc = s->HEVClc; int merge_idx = 0; struct MvField current_mv = {{{ 0 }}}; int min_pu_width = s->sps->min_pu_width; MvField *tab_mvf = s->ref->tab_mvf; RefPicList *refPicList = s->ref->refPicList; HEVCFrame *ref0, *ref1; uint8_t *dst0 = POS(0, x0, y0); uint8_t *dst1 = POS(1, x0, y0); uint8_t *dst2 = POS(2, x0, y0); int log2_min_cb_size = s->sps->log2_min_cb_size; int min_cb_width = s->sps->min_cb_width; int x_cb = x0 >> log2_min_cb_size; int y_cb = y0 >> log2_min_cb_size; int x_pu, y_pu; int i, j; int skip_flag = SAMPLE_CTB(s->skip_flag, x_cb, y_cb); if (!skip_flag) lc->pu.merge_flag = ff_hevc_merge_flag_decode(s); if (skip_flag || lc->pu.merge_flag) { if (s->sh.max_num_merge_cand > 1) merge_idx = ff_hevc_merge_idx_decode(s); else merge_idx = 0; ff_hevc_luma_mv_merge_mode(s, x0, y0, nPbW, nPbH, log2_cb_size, partIdx, merge_idx, ¤t_mv); } else { hevc_luma_mv_mpv_mode(s, x0, y0, nPbW, nPbH, log2_cb_size, partIdx, merge_idx, ¤t_mv); } x_pu = x0 >> s->sps->log2_min_pu_size; y_pu = y0 >> s->sps->log2_min_pu_size; for (j = 0; j < nPbH >> s->sps->log2_min_pu_size; j++) for (i = 0; i < nPbW >> s->sps->log2_min_pu_size; i++) tab_mvf[(y_pu + j) * min_pu_width + x_pu + i] = current_mv; if (current_mv.pred_flag & PF_L0) { ref0 = refPicList[0].ref[current_mv.ref_idx[0]]; if (!ref0) return; hevc_await_progress(s, ref0, ¤t_mv.mv[0], y0, nPbH); } if (current_mv.pred_flag & PF_L1) { ref1 = refPicList[1].ref[current_mv.ref_idx[1]]; if (!ref1) return; hevc_await_progress(s, ref1, ¤t_mv.mv[1], y0, nPbH); } if (current_mv.pred_flag == PF_L0) { int x0_c = x0 >> s->sps->hshift[1]; int y0_c = y0 >> s->sps->vshift[1]; int nPbW_c = nPbW >> s->sps->hshift[1]; int nPbH_c = nPbH >> s->sps->vshift[1]; luma_mc_uni(s, dst0, s->frame->linesize[0], ref0->frame, ¤t_mv.mv[0], x0, y0, nPbW, nPbH, s->sh.luma_weight_l0[current_mv.ref_idx[0]], s->sh.luma_offset_l0[current_mv.ref_idx[0]]); chroma_mc_uni(s, dst1, s->frame->linesize[1], ref0->frame->data[1], ref0->frame->linesize[1], 0, x0_c, y0_c, nPbW_c, nPbH_c, ¤t_mv, s->sh.chroma_weight_l0[current_mv.ref_idx[0]][0], s->sh.chroma_offset_l0[current_mv.ref_idx[0]][0]); chroma_mc_uni(s, dst2, s->frame->linesize[2], ref0->frame->data[2], ref0->frame->linesize[2], 0, x0_c, y0_c, nPbW_c, nPbH_c, ¤t_mv, s->sh.chroma_weight_l0[current_mv.ref_idx[0]][1], s->sh.chroma_offset_l0[current_mv.ref_idx[0]][1]); } else if (current_mv.pred_flag == PF_L1) { int x0_c = x0 >> s->sps->hshift[1]; int y0_c = y0 >> s->sps->vshift[1]; int nPbW_c = nPbW >> s->sps->hshift[1]; int nPbH_c = nPbH >> s->sps->vshift[1]; luma_mc_uni(s, dst0, s->frame->linesize[0], ref1->frame, ¤t_mv.mv[1], x0, y0, nPbW, nPbH, s->sh.luma_weight_l1[current_mv.ref_idx[1]], s->sh.luma_offset_l1[current_mv.ref_idx[1]]); chroma_mc_uni(s, dst1, s->frame->linesize[1], ref1->frame->data[1], ref1->frame->linesize[1], 1, x0_c, y0_c, nPbW_c, nPbH_c, ¤t_mv, s->sh.chroma_weight_l1[current_mv.ref_idx[1]][0], s->sh.chroma_offset_l1[current_mv.ref_idx[1]][0]); chroma_mc_uni(s, dst2, s->frame->linesize[2], ref1->frame->data[2], ref1->frame->linesize[2], 1, x0_c, y0_c, nPbW_c, nPbH_c, ¤t_mv, s->sh.chroma_weight_l1[current_mv.ref_idx[1]][1], s->sh.chroma_offset_l1[current_mv.ref_idx[1]][1]); } else if (current_mv.pred_flag == PF_BI) { int x0_c = x0 >> s->sps->hshift[1]; int y0_c = y0 >> s->sps->vshift[1]; int nPbW_c = nPbW >> s->sps->hshift[1]; int nPbH_c = nPbH >> s->sps->vshift[1]; luma_mc_bi(s, dst0, s->frame->linesize[0], ref0->frame, ¤t_mv.mv[0], x0, y0, nPbW, nPbH, ref1->frame, ¤t_mv.mv[1], ¤t_mv); chroma_mc_bi(s, dst1, s->frame->linesize[1], ref0->frame, ref1->frame, x0_c, y0_c, nPbW_c, nPbH_c, ¤t_mv, 0); chroma_mc_bi(s, dst2, s->frame->linesize[2], ref0->frame, ref1->frame, x0_c, y0_c, nPbW_c, nPbH_c, ¤t_mv, 1); } }"} {"target": 0, "idx": 15016, "func": "static void virtio_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq) { VirtIOBlock *s = VIRTIO_BLK(vdev); VirtIOBlockReq *req; MultiReqBuffer mrb = {}; /* Some guests kick before setting VIRTIO_CONFIG_S_DRIVER_OK so start * dataplane here instead of waiting for .set_status(). */ if (s->dataplane) { virtio_blk_data_plane_start(s->dataplane); return; } blk_io_plug(s->blk); while ((req = virtio_blk_get_request(s))) { virtio_blk_handle_request(req, &mrb); } if (mrb.num_reqs) { virtio_blk_submit_multireq(s->blk, &mrb); } blk_io_unplug(s->blk); }"} {"target": 0, "idx": 15051, "func": "static void av_noinline filter_mb_edgeh( uint8_t *pix, int stride, int16_t bS[4], unsigned int qp, H264Context *h ) { const unsigned int index_a = 52 + qp + h->slice_alpha_c0_offset; const int alpha = alpha_table[index_a]; const int beta = (beta_table+52)[qp + h->slice_beta_offset]; if (alpha ==0 || beta == 0) return; if( bS[0] < 4 ) { int8_t tc[4]; tc[0] = tc0_table[index_a][bS[0]]; tc[1] = tc0_table[index_a][bS[1]]; tc[2] = tc0_table[index_a][bS[2]]; tc[3] = tc0_table[index_a][bS[3]]; h->s.dsp.h264_v_loop_filter_luma(pix, stride, alpha, beta, tc); } else { h->s.dsp.h264_v_loop_filter_luma_intra(pix, stride, alpha, beta); } }"} {"target": 0, "idx": 15055, "func": "static void realize(DeviceState *d, Error **errp) { sPAPRDRConnector *drc = SPAPR_DR_CONNECTOR(d); Object *root_container; char link_name[256]; gchar *child_name; Error *err = NULL; trace_spapr_drc_realize(spapr_drc_index(drc)); /* NOTE: we do this as part of realize/unrealize due to the fact * that the guest will communicate with the DRC via RTAS calls * referencing the global DRC index. By unlinking the DRC * from DRC_CONTAINER_PATH/ we effectively make it * inaccessible by the guest, since lookups rely on this path * existing in the composition tree */ root_container = container_get(object_get_root(), DRC_CONTAINER_PATH); snprintf(link_name, sizeof(link_name), \"%x\", spapr_drc_index(drc)); child_name = object_get_canonical_path_component(OBJECT(drc)); trace_spapr_drc_realize_child(spapr_drc_index(drc), child_name); object_property_add_alias(root_container, link_name, drc->owner, child_name, &err); g_free(child_name); if (err) { error_propagate(errp, err); return; } vmstate_register(DEVICE(drc), spapr_drc_index(drc), &vmstate_spapr_drc, drc); qemu_register_reset(drc_reset, drc); trace_spapr_drc_realize_complete(spapr_drc_index(drc)); }"} {"target": 0, "idx": 15061, "func": "static void gen_ld_asi(DisasContext *dc, TCGv dst, TCGv addr, int insn, int size, int sign) { TCGv_i32 r_asi, r_size, r_sign; r_asi = gen_get_asi(dc, insn); r_size = tcg_const_i32(size); r_sign = tcg_const_i32(sign); #ifdef TARGET_SPARC64 gen_helper_ld_asi(dst, cpu_env, addr, r_asi, r_size, r_sign); #else { TCGv_i64 t64 = tcg_temp_new_i64(); gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_size, r_sign); tcg_gen_trunc_i64_tl(dst, t64); tcg_temp_free_i64(t64); } #endif tcg_temp_free_i32(r_sign); tcg_temp_free_i32(r_size); tcg_temp_free_i32(r_asi); }"} {"target": 0, "idx": 15066, "func": "static inline float64 ucf64_itod(uint64_t i) { union { uint64_t i; float64 d; } v; v.i = i; return v.d; }"} {"target": 0, "idx": 15067, "func": "int tcg_gen_code(TCGContext *s, tcg_insn_unit *gen_code_buf) { int i, oi, oi_next, num_insns; #ifdef CONFIG_PROFILER { int n; n = s->gen_last_op_idx + 1; s->op_count += n; if (n > s->op_count_max) { s->op_count_max = n; } n = s->nb_temps; s->temp_count += n; if (n > s->temp_count_max) { s->temp_count_max = n; } } #endif #ifdef DEBUG_DISAS if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) { qemu_log(\"OP:\\n\"); tcg_dump_ops(s); qemu_log(\"\\n\"); } #endif #ifdef CONFIG_PROFILER s->opt_time -= profile_getclock(); #endif #ifdef USE_TCG_OPTIMIZATIONS tcg_optimize(s); #endif #ifdef CONFIG_PROFILER s->opt_time += profile_getclock(); s->la_time -= profile_getclock(); #endif tcg_liveness_analysis(s); #ifdef CONFIG_PROFILER s->la_time += profile_getclock(); #endif #ifdef DEBUG_DISAS if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_OPT))) { qemu_log(\"OP after optimization and liveness analysis:\\n\"); tcg_dump_ops(s); qemu_log(\"\\n\"); } #endif tcg_reg_alloc_start(s); s->code_buf = gen_code_buf; s->code_ptr = gen_code_buf; tcg_out_tb_init(s); num_insns = -1; for (oi = s->gen_first_op_idx; oi >= 0; oi = oi_next) { TCGOp * const op = &s->gen_op_buf[oi]; TCGArg * const args = &s->gen_opparam_buf[op->args]; TCGOpcode opc = op->opc; const TCGOpDef *def = &tcg_op_defs[opc]; uint16_t dead_args = s->op_dead_args[oi]; uint8_t sync_args = s->op_sync_args[oi]; oi_next = op->next; #ifdef CONFIG_PROFILER tcg_table_op_count[opc]++; #endif switch (opc) { case INDEX_op_mov_i32: case INDEX_op_mov_i64: tcg_reg_alloc_mov(s, def, args, dead_args, sync_args); break; case INDEX_op_movi_i32: case INDEX_op_movi_i64: tcg_reg_alloc_movi(s, args, dead_args, sync_args); break; case INDEX_op_insn_start: if (num_insns >= 0) { s->gen_insn_end_off[num_insns] = tcg_current_code_size(s); } num_insns++; for (i = 0; i < TARGET_INSN_START_WORDS; ++i) { target_ulong a; #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS a = ((target_ulong)args[i * 2 + 1] << 32) | args[i * 2]; #else a = args[i]; #endif s->gen_insn_data[num_insns][i] = a; } break; case INDEX_op_discard: temp_dead(s, args[0]); break; case INDEX_op_set_label: tcg_reg_alloc_bb_end(s, s->reserved_regs); tcg_out_label(s, arg_label(args[0]), s->code_ptr); break; case INDEX_op_call: tcg_reg_alloc_call(s, op->callo, op->calli, args, dead_args, sync_args); break; default: /* Sanity check that we've not introduced any unhandled opcodes. */ if (def->flags & TCG_OPF_NOT_PRESENT) { tcg_abort(); } /* Note: in order to speed up the code, it would be much faster to have specialized register allocator functions for some common argument patterns */ tcg_reg_alloc_op(s, def, opc, args, dead_args, sync_args); break; } #ifndef NDEBUG check_regs(s); #endif /* Test for (pending) buffer overflow. The assumption is that any one operation beginning below the high water mark cannot overrun the buffer completely. Thus we can test for overflow after generating code without having to check during generation. */ if (unlikely((void *)s->code_ptr > s->code_gen_highwater)) { return -1; } } tcg_debug_assert(num_insns >= 0); s->gen_insn_end_off[num_insns] = tcg_current_code_size(s); /* Generate TB finalization at the end of block */ tcg_out_tb_finalize(s); /* flush instruction cache */ flush_icache_range((uintptr_t)s->code_buf, (uintptr_t)s->code_ptr); return tcg_current_code_size(s); }"} {"target": 0, "idx": 15077, "func": "static int ehci_process_itd(EHCIState *ehci, EHCIitd *itd) { USBPort *port; USBDevice *dev; int ret; uint32_t i, j, len, pid, dir, devaddr, endp; uint32_t pg, off, ptr1, ptr2, max, mult; dir =(itd->bufptr[1] & ITD_BUFPTR_DIRECTION); devaddr = get_field(itd->bufptr[0], ITD_BUFPTR_DEVADDR); endp = get_field(itd->bufptr[0], ITD_BUFPTR_EP); max = get_field(itd->bufptr[1], ITD_BUFPTR_MAXPKT); mult = get_field(itd->bufptr[2], ITD_BUFPTR_MULT); for(i = 0; i < 8; i++) { if (itd->transact[i] & ITD_XACT_ACTIVE) { pg = get_field(itd->transact[i], ITD_XACT_PGSEL); off = itd->transact[i] & ITD_XACT_OFFSET_MASK; ptr1 = (itd->bufptr[pg] & ITD_BUFPTR_MASK); ptr2 = (itd->bufptr[pg+1] & ITD_BUFPTR_MASK); len = get_field(itd->transact[i], ITD_XACT_LENGTH); if (len > max * mult) { len = max * mult; } if (len > BUFF_SIZE) { return USB_RET_PROCERR; } qemu_sglist_init(&ehci->isgl, 2); if (off + len > 4096) { /* transfer crosses page border */ uint32_t len2 = off + len - 4096; uint32_t len1 = len - len2; qemu_sglist_add(&ehci->isgl, ptr1 + off, len1); qemu_sglist_add(&ehci->isgl, ptr2, len2); } else { qemu_sglist_add(&ehci->isgl, ptr1 + off, len); } pid = dir ? USB_TOKEN_IN : USB_TOKEN_OUT; usb_packet_setup(&ehci->ipacket, pid, devaddr, endp); usb_packet_map(&ehci->ipacket, &ehci->isgl); ret = USB_RET_NODEV; for (j = 0; j < NB_PORTS; j++) { port = &ehci->ports[j]; dev = port->dev; if (!(ehci->portsc[j] &(PORTSC_CONNECT))) { continue; } ret = usb_handle_packet(dev, &ehci->ipacket); if (ret != USB_RET_NODEV) { break; } } usb_packet_unmap(&ehci->ipacket); qemu_sglist_destroy(&ehci->isgl); #if 0 /* In isoch, there is no facility to indicate a NAK so let's * instead just complete a zero-byte transaction. Setting * DBERR seems too draconian. */ if (ret == USB_RET_NAK) { if (ehci->isoch_pause > 0) { DPRINTF(\"ISOCH: received a NAK but paused so returning\\n\"); ehci->isoch_pause--; return 0; } else if (ehci->isoch_pause == -1) { DPRINTF(\"ISOCH: recv NAK & isoch pause inactive, setting\\n\"); // Pause frindex for up to 50 msec waiting for data from // remote ehci->isoch_pause = 50; return 0; } else { DPRINTF(\"ISOCH: isoch pause timeout! return 0\\n\"); ret = 0; } } else { DPRINTF(\"ISOCH: received ACK, clearing pause\\n\"); ehci->isoch_pause = -1; } #else if (ret == USB_RET_NAK) { ret = 0; } #endif if (ret >= 0) { if (!dir) { /* OUT */ set_field(&itd->transact[i], len - ret, ITD_XACT_LENGTH); } else { /* IN */ set_field(&itd->transact[i], ret, ITD_XACT_LENGTH); } if (itd->transact[i] & ITD_XACT_IOC) { ehci_record_interrupt(ehci, USBSTS_INT); } } itd->transact[i] &= ~ITD_XACT_ACTIVE; } } return 0; }"} {"target": 0, "idx": 15082, "func": "static void qmp_input_pop(Visitor *v, void **obj) { QmpInputVisitor *qiv = to_qiv(v); StackObject *tos = QSLIST_FIRST(&qiv->stack); assert(tos && tos->qapi == obj); QSLIST_REMOVE_HEAD(&qiv->stack, node); qmp_input_stack_object_free(tos); }"} {"target": 1, "idx": 15094, "func": "static int vmd_read_header(AVFormatContext *s, AVFormatParameters *ap) { VmdDemuxContext *vmd = (VmdDemuxContext *)s->priv_data; ByteIOContext *pb = &s->pb; AVStream *st; unsigned int toc_offset; unsigned char *raw_frame_table; int raw_frame_table_size; offset_t current_offset; int i, j; unsigned int total_frames; int64_t video_pts_inc = 0; int64_t current_video_pts = 0; unsigned char chunk[BYTES_PER_FRAME_RECORD]; int lastframe = 0; /* fetch the main header, including the 2 header length bytes */ url_fseek(pb, 0, SEEK_SET); if (get_buffer(pb, vmd->vmd_header, VMD_HEADER_SIZE) != VMD_HEADER_SIZE) return AVERROR_IO; vmd->audio_sample_counter = 0; vmd->audio_frame_divisor = 1; vmd->audio_block_align = 1; /* start up the decoders */ st = av_new_stream(s, 0); if (!st) return AVERROR_NOMEM; av_set_pts_info(st, 33, 1, 90000); vmd->video_stream_index = st->index; st->codec->codec_type = CODEC_TYPE_VIDEO; st->codec->codec_id = CODEC_ID_VMDVIDEO; st->codec->codec_tag = 0; /* no fourcc */ st->codec->width = LE_16(&vmd->vmd_header[12]); st->codec->height = LE_16(&vmd->vmd_header[14]); st->codec->time_base.num = 1; st->codec->time_base.den = 10; st->codec->extradata_size = VMD_HEADER_SIZE; st->codec->extradata = av_mallocz(VMD_HEADER_SIZE + FF_INPUT_BUFFER_PADDING_SIZE); memcpy(st->codec->extradata, vmd->vmd_header, VMD_HEADER_SIZE); /* if sample rate is 0, assume no audio */ vmd->sample_rate = LE_16(&vmd->vmd_header[804]); if (vmd->sample_rate) { st = av_new_stream(s, 0); if (!st) return AVERROR_NOMEM; av_set_pts_info(st, 33, 1, 90000); vmd->audio_stream_index = st->index; st->codec->codec_type = CODEC_TYPE_AUDIO; st->codec->codec_id = CODEC_ID_VMDAUDIO; st->codec->codec_tag = 0; /* no fourcc */ st->codec->channels = vmd->audio_channels = (vmd->vmd_header[811] & 0x80) ? 2 : 1; st->codec->sample_rate = vmd->sample_rate; st->codec->block_align = vmd->audio_block_align = LE_16(&vmd->vmd_header[806]); if (st->codec->block_align & 0x8000) { st->codec->bits_per_sample = 16; st->codec->block_align = -(st->codec->block_align - 0x10000); vmd->audio_block_align = -(vmd->audio_block_align - 0x10000); } else { st->codec->bits_per_sample = 8; st->codec->bit_rate = st->codec->sample_rate * st->codec->bits_per_sample * st->codec->channels; /* for calculating pts */ vmd->audio_frame_divisor = st->codec->channels; video_pts_inc = 90000; video_pts_inc *= st->codec->block_align; video_pts_inc /= st->codec->sample_rate; video_pts_inc /= st->codec->channels; } else { /* if no audio, assume 10 frames/second */ video_pts_inc = 90000 / 10; toc_offset = LE_32(&vmd->vmd_header[812]); vmd->frame_count = LE_16(&vmd->vmd_header[6]); vmd->frames_per_block = LE_16(&vmd->vmd_header[18]); url_fseek(pb, toc_offset, SEEK_SET); raw_frame_table = NULL; vmd->frame_table = NULL; raw_frame_table_size = vmd->frame_count * 6; raw_frame_table = av_malloc(raw_frame_table_size); vmd->frame_table = av_malloc(vmd->frame_count * vmd->frames_per_block * sizeof(vmd_frame_t)); if (!raw_frame_table || !vmd->frame_table) { av_free(raw_frame_table); av_free(vmd->frame_table); return AVERROR_NOMEM; if (get_buffer(pb, raw_frame_table, raw_frame_table_size) != raw_frame_table_size) { av_free(raw_frame_table); av_free(vmd->frame_table); return AVERROR_IO; total_frames = 0; for (i = 0; i < vmd->frame_count; i++) { current_offset = LE_32(&raw_frame_table[6 * i + 2]); /* handle each entry in index block */ for (j = 0; j < vmd->frames_per_block; j++) { int type; uint32_t size; get_buffer(pb, chunk, BYTES_PER_FRAME_RECORD); type = chunk[0]; size = LE_32(&chunk[2]); if(!size) continue; switch(type) { case 1: /* Audio Chunk */ vmd->frame_table[total_frames].frame_offset = current_offset; vmd->frame_table[total_frames].stream_index = vmd->audio_stream_index; vmd->frame_table[total_frames].frame_size = size; memcpy(vmd->frame_table[total_frames].frame_record, chunk, BYTES_PER_FRAME_RECORD); total_frames++; break; case 2: /* Video Chunk */ vmd->frame_table[total_frames].frame_offset = current_offset; vmd->frame_table[total_frames].frame_size = size; vmd->frame_table[total_frames].stream_index = vmd->video_stream_index; memcpy(vmd->frame_table[total_frames].frame_record, chunk, BYTES_PER_FRAME_RECORD); vmd->frame_table[total_frames].pts = current_video_pts; if (lastframe) { vmd->frame_table[lastframe].pts = current_video_pts - video_pts_inc; lastframe = total_frames; total_frames++; break; current_offset += size; current_video_pts += video_pts_inc; av_free(raw_frame_table); vmd->current_frame = 0; vmd->frame_count = total_frames; return 0;"} {"target": 0, "idx": 15106, "func": "void dsputil_init_arm(DSPContext* c, AVCodecContext *avctx) { int idct_algo= avctx->idct_algo; ff_put_pixels_clamped = c->put_pixels_clamped; ff_add_pixels_clamped = c->add_pixels_clamped; if (avctx->lowres == 0) { if(idct_algo == FF_IDCT_AUTO){ #if HAVE_IPP idct_algo = FF_IDCT_IPP; #elif HAVE_NEON idct_algo = FF_IDCT_SIMPLENEON; #elif HAVE_ARMV6 idct_algo = FF_IDCT_SIMPLEARMV6; #elif HAVE_ARMV5TE idct_algo = FF_IDCT_SIMPLEARMV5TE; #else idct_algo = FF_IDCT_ARM; #endif } if(idct_algo==FF_IDCT_ARM){ c->idct_put= j_rev_dct_ARM_put; c->idct_add= j_rev_dct_ARM_add; c->idct = j_rev_dct_ARM; c->idct_permutation_type= FF_LIBMPEG2_IDCT_PERM; } else if (idct_algo==FF_IDCT_SIMPLEARM){ c->idct_put= simple_idct_ARM_put; c->idct_add= simple_idct_ARM_add; c->idct = simple_idct_ARM; c->idct_permutation_type= FF_NO_IDCT_PERM; #if HAVE_ARMV6 } else if (idct_algo==FF_IDCT_SIMPLEARMV6){ c->idct_put= ff_simple_idct_put_armv6; c->idct_add= ff_simple_idct_add_armv6; c->idct = ff_simple_idct_armv6; c->idct_permutation_type= FF_LIBMPEG2_IDCT_PERM; #endif #if HAVE_ARMV5TE } else if (idct_algo==FF_IDCT_SIMPLEARMV5TE){ c->idct_put= simple_idct_put_armv5te; c->idct_add= simple_idct_add_armv5te; c->idct = simple_idct_armv5te; c->idct_permutation_type = FF_NO_IDCT_PERM; #endif #if HAVE_IPP } else if (idct_algo==FF_IDCT_IPP){ c->idct_put= simple_idct_ipp_put; c->idct_add= simple_idct_ipp_add; c->idct = simple_idct_ipp; c->idct_permutation_type= FF_NO_IDCT_PERM; #endif #if HAVE_NEON } else if (idct_algo==FF_IDCT_SIMPLENEON){ c->idct_put= ff_simple_idct_put_neon; c->idct_add= ff_simple_idct_add_neon; c->idct = ff_simple_idct_neon; c->idct_permutation_type = FF_PARTTRANS_IDCT_PERM; } else if ((CONFIG_VP3_DECODER || CONFIG_VP5_DECODER || CONFIG_VP6_DECODER || CONFIG_THEORA_DECODER) && idct_algo==FF_IDCT_VP3){ c->idct_put= ff_vp3_idct_put_neon; c->idct_add= ff_vp3_idct_add_neon; c->idct = ff_vp3_idct_neon; c->idct_permutation_type = FF_TRANSPOSE_IDCT_PERM; #endif } } c->put_pixels_tab[0][0] = put_pixels16_arm; c->put_pixels_tab[0][1] = put_pixels16_x2_arm; c->put_pixels_tab[0][2] = put_pixels16_y2_arm; c->put_pixels_tab[0][3] = put_pixels16_xy2_arm; c->put_no_rnd_pixels_tab[0][0] = put_pixels16_arm; c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_arm; c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_arm; c->put_no_rnd_pixels_tab[0][3] = put_no_rnd_pixels16_xy2_arm; c->put_pixels_tab[1][0] = put_pixels8_arm; c->put_pixels_tab[1][1] = put_pixels8_x2_arm; c->put_pixels_tab[1][2] = put_pixels8_y2_arm; c->put_pixels_tab[1][3] = put_pixels8_xy2_arm; c->put_no_rnd_pixels_tab[1][0] = put_pixels8_arm; c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_arm; c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_arm; c->put_no_rnd_pixels_tab[1][3] = put_no_rnd_pixels8_xy2_arm; #if HAVE_ARMV5TE c->prefetch = ff_prefetch_arm; #endif #if HAVE_IWMMXT dsputil_init_iwmmxt(c, avctx); #endif #if HAVE_ARMVFP ff_float_init_arm_vfp(c, avctx); #endif #if HAVE_NEON ff_dsputil_init_neon(c, avctx); #endif }"} {"target": 0, "idx": 15108, "func": "av_cold void ff_h264_free_context(H264Context *h) { int i; ff_h264_free_tables(h); if (h->DPB) { for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) ff_h264_unref_picture(h, &h->DPB[i]); av_freep(&h->DPB); } h->cur_pic_ptr = NULL; for (i = 0; i < h->nb_slice_ctx; i++) av_freep(&h->slice_ctx[i].rbsp_buffer); av_freep(&h->slice_ctx); h->nb_slice_ctx = 0; for (i = 0; i < MAX_SPS_COUNT; i++) av_freep(h->sps_buffers + i); for (i = 0; i < MAX_PPS_COUNT; i++) av_freep(h->pps_buffers + i); }"} {"target": 1, "idx": 15115, "func": "bool aio_poll(AioContext *ctx, bool blocking) { AioHandler *node; int i, ret; bool progress; int64_t timeout; aio_context_acquire(ctx); progress = false; /* aio_notify can avoid the expensive event_notifier_set if * everything (file descriptors, bottom halves, timers) will * be re-evaluated before the next blocking poll(). This is * already true when aio_poll is called with blocking == false; * if blocking == true, it is only true after poll() returns, * so disable the optimization now. */ if (blocking) { atomic_add(&ctx->notify_me, 2); } ctx->walking_handlers++; assert(npfd == 0); /* fill pollfds */ if (!aio_epoll_enabled(ctx)) { QLIST_FOREACH(node, &ctx->aio_handlers, node) { if (!node->deleted && node->pfd.events && aio_node_check(ctx, node->is_external)) { add_pollfd(node); } } } timeout = blocking ? aio_compute_timeout(ctx) : 0; /* wait until next event */ if (timeout) { aio_context_release(ctx); } if (aio_epoll_check_poll(ctx, pollfds, npfd, timeout)) { AioHandler epoll_handler; epoll_handler.pfd.fd = ctx->epollfd; epoll_handler.pfd.events = G_IO_IN | G_IO_OUT | G_IO_HUP | G_IO_ERR; npfd = 0; add_pollfd(&epoll_handler); ret = aio_epoll(ctx, pollfds, npfd, timeout); } else { ret = qemu_poll_ns(pollfds, npfd, timeout); } if (blocking) { atomic_sub(&ctx->notify_me, 2); } if (timeout) { aio_context_acquire(ctx); } aio_notify_accept(ctx); /* if we have any readable fds, dispatch event */ if (ret > 0) { for (i = 0; i < npfd; i++) { nodes[i]->pfd.revents = pollfds[i].revents; } } npfd = 0; ctx->walking_handlers--; /* Run dispatch even if there were no readable fds to run timers */ if (aio_dispatch(ctx, ret > 0)) { progress = true; } aio_context_release(ctx); return progress; }"} {"target": 1, "idx": 15124, "func": "int avcodec_default_reget_buffer(AVCodecContext *s, AVFrame *pic) { av_assert0(0); }"} {"target": 1, "idx": 15125, "func": "static int latm_write_packet(AVFormatContext *s, AVPacket *pkt) { AVIOContext *pb = s->pb; PutBitContext bs; int i, len; uint8_t loas_header[] = \"\\x56\\xe0\\x00\"; uint8_t *buf = NULL; if (s->streams[0]->codec->codec_id == CODEC_ID_AAC_LATM) return ff_raw_write_packet(s, pkt); if (pkt->size > 2 && pkt->data[0] == 0xff && (pkt->data[1] >> 4) == 0xf) { av_log(s, AV_LOG_ERROR, \"ADTS header detected - ADTS will not be incorrectly muxed into LATM\\n\"); return AVERROR_INVALIDDATA; } if (pkt->size > 0x1fff) goto too_large; buf = av_malloc(pkt->size+1024); if (!buf) return AVERROR(ENOMEM); init_put_bits(&bs, buf, pkt->size+1024); latm_write_frame_header(s, &bs); /* PayloadLengthInfo() */ for (i = 0; i <= pkt->size-255; i+=255) put_bits(&bs, 8, 255); put_bits(&bs, 8, pkt->size-i); /* The LATM payload is written unaligned */ /* PayloadMux() */ if (pkt->size && (pkt->data[0] & 0xe1) == 0x81) { // Convert byte-aligned DSE to non-aligned. // Due to the input format encoding we know that // it is naturally byte-aligned in the input stream, // so there are no padding bits to account for. // To avoid having to add padding bits and rearrange // the whole stream we just remove the byte-align flag. // This allows us to remux our FATE AAC samples into latm // files that are still playable with minimal effort. put_bits(&bs, 8, pkt->data[0] & 0xfe); avpriv_copy_bits(&bs, pkt->data + 1, 8*pkt->size - 8); } else avpriv_copy_bits(&bs, pkt->data, 8*pkt->size); avpriv_align_put_bits(&bs); flush_put_bits(&bs); len = put_bits_count(&bs) >> 3; if (len > 0x1fff) goto too_large; loas_header[1] |= (len >> 8) & 0x1f; loas_header[2] |= len & 0xff; avio_write(pb, loas_header, 3); avio_write(pb, buf, len); av_free(buf); return 0; too_large: av_log(s, AV_LOG_ERROR, \"LATM packet size larger than maximum size 0x1fff\\n\"); av_free(buf); return AVERROR_INVALIDDATA; }"} {"target": 1, "idx": 15132, "func": "static void aio_read_done(void *opaque, int ret) { struct aio_ctx *ctx = opaque; struct timeval t2; gettimeofday(&t2, NULL); if (ret < 0) { printf(\"readv failed: %s\\n\", strerror(-ret)); goto out; } if (ctx->Pflag) { void *cmp_buf = g_malloc(ctx->qiov.size); memset(cmp_buf, ctx->pattern, ctx->qiov.size); if (memcmp(ctx->buf, cmp_buf, ctx->qiov.size)) { printf(\"Pattern verification failed at offset %\" PRId64 \", %zd bytes\\n\", ctx->offset, ctx->qiov.size); } g_free(cmp_buf); } if (ctx->qflag) { goto out; } if (ctx->vflag) { dump_buffer(ctx->buf, ctx->offset, ctx->qiov.size); } /* Finally, report back -- -C gives a parsable format */ t2 = tsub(t2, ctx->t1); print_report(\"read\", &t2, ctx->offset, ctx->qiov.size, ctx->qiov.size, 1, ctx->Cflag); out: qemu_io_free(ctx->buf); g_free(ctx); }"} {"target": 1, "idx": 15133, "func": "static const char *read_ts(const char *buf, int *ts_start, int *ts_end, int *x1, int *y1, int *x2, int *y2) { int i, hs, ms, ss, he, me, se; for (i=0; i<2; i++) { /* try to read timestamps in either the first or second line */ int c = sscanf(buf, \"%d:%2d:%2d%*1[,.]%3d --> %d:%2d:%2d%*1[,.]%3d\" \"%*[ ]X1:%u X2:%u Y1:%u Y2:%u\", &hs, &ms, &ss, ts_start, &he, &me, &se, ts_end, x1, x2, y1, y2); buf += strcspn(buf, \"\\n\") + 1; if (c >= 8) { *ts_start = 100*(ss + 60*(ms + 60*hs)) + *ts_start/10; *ts_end = 100*(se + 60*(me + 60*he)) + *ts_end /10; return buf; } } return NULL; }"} {"target": 1, "idx": 15136, "func": "target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr) { target_ulong pde_addr, pte_addr; uint64_t pte; target_phys_addr_t paddr; uint32_t page_offset; int page_size; if (env->cr[4] & CR4_PAE_MASK) { target_ulong pdpe_addr; uint64_t pde, pdpe; #ifdef TARGET_X86_64 if (env->hflags & HF_LMA_MASK) { uint64_t pml4e_addr, pml4e; int32_t sext; /* test virtual address sign extension */ sext = (int64_t)addr >> 47; if (sext != 0 && sext != -1) return -1; pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) & env->a20_mask; pml4e = ldq_phys(pml4e_addr); if (!(pml4e & PG_PRESENT_MASK)) return -1; pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) & env->a20_mask; pdpe = ldq_phys(pdpe_addr); if (!(pdpe & PG_PRESENT_MASK)) return -1; } else #endif { pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) & env->a20_mask; pdpe = ldq_phys(pdpe_addr); if (!(pdpe & PG_PRESENT_MASK)) return -1; } pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) & env->a20_mask; pde = ldq_phys(pde_addr); if (!(pde & PG_PRESENT_MASK)) { return -1; } if (pde & PG_PSE_MASK) { /* 2 MB page */ page_size = 2048 * 1024; pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */ } else { /* 4 KB page */ pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) & env->a20_mask; page_size = 4096; pte = ldq_phys(pte_addr); } if (!(pte & PG_PRESENT_MASK)) return -1; } else { uint32_t pde; if (!(env->cr[0] & CR0_PG_MASK)) { pte = addr; page_size = 4096; } else { /* page directory entry */ pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask; pde = ldl_phys(pde_addr); if (!(pde & PG_PRESENT_MASK)) return -1; if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) { pte = pde & ~0x003ff000; /* align to 4MB */ page_size = 4096 * 1024; } else { /* page directory entry */ pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask; pte = ldl_phys(pte_addr); if (!(pte & PG_PRESENT_MASK)) return -1; page_size = 4096; } } pte = pte & env->a20_mask; } page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1); paddr = (pte & TARGET_PAGE_MASK) + page_offset; return paddr; }"} {"target": 1, "idx": 15147, "func": "void spapr_pci_rtas_init(void) { spapr_rtas_register(\"read-pci-config\", rtas_read_pci_config); spapr_rtas_register(\"write-pci-config\", rtas_write_pci_config); spapr_rtas_register(\"ibm,read-pci-config\", rtas_ibm_read_pci_config); spapr_rtas_register(\"ibm,write-pci-config\", rtas_ibm_write_pci_config); if (msi_supported) { spapr_rtas_register(\"ibm,query-interrupt-source-number\", rtas_ibm_query_interrupt_source_number); spapr_rtas_register(\"ibm,change-msi\", rtas_ibm_change_msi); } }"} {"target": 0, "idx": 15173, "func": "static void compare_sec_rs_finalize(SocketReadState *sec_rs) { CompareState *s = container_of(sec_rs, CompareState, sec_rs); if (packet_enqueue(s, SECONDARY_IN)) { trace_colo_compare_main(\"secondary: unsupported packet in\"); } else { /* compare connection */ g_queue_foreach(&s->conn_list, colo_compare_connection, s); } }"} {"target": 1, "idx": 15201, "func": "FDCtrl *fdctrl_init_isa(DriveInfo **fds) { ISADevice *dev; dev = isa_create(\"isa-fdc\"); if (fds[0]) { qdev_prop_set_drive_nofail(&dev->qdev, \"driveA\", fds[0]->bdrv); } if (fds[1]) { qdev_prop_set_drive_nofail(&dev->qdev, \"driveB\", fds[1]->bdrv); } if (qdev_init(&dev->qdev) < 0) return NULL; return &(DO_UPCAST(FDCtrlISABus, busdev, dev)->state); }"} {"target": 1, "idx": 15206, "func": "static int asf_read_picture(AVFormatContext *s, int len) { ASFContext *asf = s->priv_data; AVPacket pkt = { 0 }; const CodecMime *mime = ff_id3v2_mime_tags; enum AVCodecID id = AV_CODEC_ID_NONE; char mimetype[64]; uint8_t *desc = NULL; AVStream *st = NULL; int ret, type, picsize, desc_len; ASFStream *asf_st; /* type + picsize + mime + desc */ if (len < 1 + 4 + 2 + 2) { av_log(s, AV_LOG_ERROR, \"Invalid attached picture size: %d.\\n\", len); return AVERROR_INVALIDDATA; } /* picture type */ type = avio_r8(s->pb); len--; if (type >= FF_ARRAY_ELEMS(ff_id3v2_picture_types) || type < 0) { av_log(s, AV_LOG_WARNING, \"Unknown attached picture type: %d.\\n\", type); type = 0; } /* picture data size */ picsize = avio_rl32(s->pb); len -= 4; /* picture MIME type */ len -= avio_get_str16le(s->pb, len, mimetype, sizeof(mimetype)); while (mime->id != AV_CODEC_ID_NONE) { if (!strncmp(mime->str, mimetype, sizeof(mimetype))) { id = mime->id; break; } mime++; } if (id == AV_CODEC_ID_NONE) { av_log(s, AV_LOG_ERROR, \"Unknown attached picture mimetype: %s.\\n\", mimetype); return 0; } if (picsize >= len) { av_log(s, AV_LOG_ERROR, \"Invalid attached picture data size: %d >= %d.\\n\", picsize, len); return AVERROR_INVALIDDATA; } /* picture description */ desc_len = (len - picsize) * 2 + 1; desc = av_malloc(desc_len); if (!desc) return AVERROR(ENOMEM); len -= avio_get_str16le(s->pb, len - picsize, desc, desc_len); ret = av_get_packet(s->pb, &pkt, picsize); if (ret < 0) goto fail; st = avformat_new_stream(s, NULL); if (!st) { ret = AVERROR(ENOMEM); goto fail; } asf->asf_st[asf->nb_streams] = av_mallocz(sizeof(*asf_st)); asf_st = asf->asf_st[asf->nb_streams]; if (!asf_st) return AVERROR(ENOMEM); st->disposition |= AV_DISPOSITION_ATTACHED_PIC; st->codec->codec_type = asf_st->type = AVMEDIA_TYPE_VIDEO; st->codec->codec_id = id; st->attached_pic = pkt; st->attached_pic.stream_index = asf_st->index = st->index; st->attached_pic.flags |= AV_PKT_FLAG_KEY; asf->nb_streams++; if (*desc) { if (av_dict_set(&st->metadata, \"title\", desc, AV_DICT_DONT_STRDUP_VAL) < 0) av_log(s, AV_LOG_WARNING, \"av_dict_set failed.\\n\"); } else av_freep(&desc); if (av_dict_set(&st->metadata, \"comment\", ff_id3v2_picture_types[type], 0) < 0) av_log(s, AV_LOG_WARNING, \"av_dict_set failed.\\n\"); return 0; fail: av_freep(&desc); av_free_packet(&pkt); return ret; }"} {"target": 1, "idx": 15209, "func": "void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome) { const ARMCPRegInfo *ri = rip; switch (ri->accessfn(env, ri)) { case CP_ACCESS_OK: case CP_ACCESS_TRAP: break; case CP_ACCESS_TRAP_UNCATEGORIZED: env->exception.syndrome = syn_uncategorized(); break; default: g_assert_not_reached();"} {"target": 1, "idx": 15239, "func": "static char *scsibus_get_dev_path(DeviceState *dev) { SCSIDevice *d = DO_UPCAST(SCSIDevice, qdev, dev); DeviceState *hba = dev->parent_bus->parent; char *id = NULL; if (hba && hba->parent_bus && hba->parent_bus->info->get_dev_path) { id = hba->parent_bus->info->get_dev_path(hba); } if (id) { return g_strdup_printf(\"%s/%d:%d:%d\", id, d->channel, d->id, d->lun); } else { return g_strdup_printf(\"%d:%d:%d\", d->channel, d->id, d->lun); } }"} {"target": 0, "idx": 15249, "func": "static int eval_lpc_coeffs(const float *in, float *tgt, int n) { int x, y; double f0, f1, f2; if (in[n] == 0) return 0; if ((f0 = *in) <= 0) return 0; in--; // To avoid a -1 subtraction in the inner loop for (x=1; x <= n; x++) { f1 = in[x+1]; for (y=0; y < x - 1; y++) f1 += in[x-y]*tgt[y]; tgt[x-1] = f2 = -f1/f0; for (y=0; y < x >> 1; y++) { float temp = tgt[y] + tgt[x-y-2]*f2; tgt[x-y-2] += tgt[y]*f2; tgt[y] = temp; } if ((f0 += f1*f2) < 0) return 0; } return 1; }"} {"target": 0, "idx": 15252, "func": "int swr_init(struct SwrContext *s){ s->in_buffer_index= 0; s->in_buffer_count= 0; s->resample_in_constraint= 0; free_temp(&s->postin); free_temp(&s->midbuf); free_temp(&s->preout); free_temp(&s->in_buffer); free_temp(&s->dither); swri_audio_convert_free(&s-> in_convert); swri_audio_convert_free(&s->out_convert); swri_audio_convert_free(&s->full_convert); swri_rematrix_free(s); s->flushed = 0; if(s-> in_sample_fmt >= AV_SAMPLE_FMT_NB){ av_log(s, AV_LOG_ERROR, \"Requested input sample format %d is invalid\\n\", s->in_sample_fmt); return AVERROR(EINVAL); } if(s->out_sample_fmt >= AV_SAMPLE_FMT_NB){ av_log(s, AV_LOG_ERROR, \"Requested output sample format %d is invalid\\n\", s->out_sample_fmt); return AVERROR(EINVAL); } //FIXME should we allow/support using FLT on material that doesnt need it ? if(av_get_planar_sample_fmt(s->in_sample_fmt) <= AV_SAMPLE_FMT_S16P || s->int_sample_fmt==AV_SAMPLE_FMT_S16P){ s->int_sample_fmt= AV_SAMPLE_FMT_S16P; }else s->int_sample_fmt= AV_SAMPLE_FMT_FLTP; if( s->int_sample_fmt != AV_SAMPLE_FMT_S16P &&s->int_sample_fmt != AV_SAMPLE_FMT_S32P &&s->int_sample_fmt != AV_SAMPLE_FMT_FLTP){ av_log(s, AV_LOG_ERROR, \"Requested sample format %s is not supported internally, S16/S32/FLT is supported\\n\", av_get_sample_fmt_name(s->int_sample_fmt)); return AVERROR(EINVAL); } set_audiodata_fmt(&s-> in, s-> in_sample_fmt); set_audiodata_fmt(&s->out, s->out_sample_fmt); if (s->out_sample_rate!=s->in_sample_rate || (s->flags & SWR_FLAG_RESAMPLE)){ s->resample = swri_resample_init(s->resample, s->out_sample_rate, s->in_sample_rate, s->filter_size, s->phase_shift, s->linear_interp, s->cutoff, s->int_sample_fmt); }else swri_resample_free(&s->resample); if( s->int_sample_fmt != AV_SAMPLE_FMT_S16P && s->int_sample_fmt != AV_SAMPLE_FMT_S32P && s->int_sample_fmt != AV_SAMPLE_FMT_FLTP && s->resample){ av_log(s, AV_LOG_ERROR, \"Resampling only supported with internal s16/s32/flt\\n\"); return -1; } if(!s->used_ch_count) s->used_ch_count= s->in.ch_count; if(s->used_ch_count && s-> in_ch_layout && s->used_ch_count != av_get_channel_layout_nb_channels(s-> in_ch_layout)){ av_log(s, AV_LOG_WARNING, \"Input channel layout has a different number of channels than the number of used channels, ignoring layout\\n\"); s-> in_ch_layout= 0; } if(!s-> in_ch_layout) s-> in_ch_layout= av_get_default_channel_layout(s->used_ch_count); if(!s->out_ch_layout) s->out_ch_layout= av_get_default_channel_layout(s->out.ch_count); s->rematrix= s->out_ch_layout !=s->in_ch_layout || s->rematrix_volume!=1.0 || s->rematrix_custom; #define RSC 1 //FIXME finetune if(!s-> in.ch_count) s-> in.ch_count= av_get_channel_layout_nb_channels(s-> in_ch_layout); if(!s->used_ch_count) s->used_ch_count= s->in.ch_count; if(!s->out.ch_count) s->out.ch_count= av_get_channel_layout_nb_channels(s->out_ch_layout); if(!s-> in.ch_count){ av_assert0(!s->in_ch_layout); av_log(s, AV_LOG_ERROR, \"Input channel count and layout are unset\\n\"); return -1; } if ((!s->out_ch_layout || !s->in_ch_layout) && s->used_ch_count != s->out.ch_count && !s->rematrix_custom) { av_log(s, AV_LOG_ERROR, \"Rematrix is needed but there is not enough information to do it\\n\"); return -1; } av_assert0(s->used_ch_count); av_assert0(s->out.ch_count); s->resample_first= RSC*s->out.ch_count/s->in.ch_count - RSC < s->out_sample_rate/(float)s-> in_sample_rate - 1.0; s->in_buffer= s->in; if(!s->resample && !s->rematrix && !s->channel_map && !s->dither_method){ s->full_convert = swri_audio_convert_alloc(s->out_sample_fmt, s-> in_sample_fmt, s-> in.ch_count, NULL, 0); return 0; } s->in_convert = swri_audio_convert_alloc(s->int_sample_fmt, s-> in_sample_fmt, s->used_ch_count, s->channel_map, 0); s->out_convert= swri_audio_convert_alloc(s->out_sample_fmt, s->int_sample_fmt, s->out.ch_count, NULL, 0); s->postin= s->in; s->preout= s->out; s->midbuf= s->in; if(s->channel_map){ s->postin.ch_count= s->midbuf.ch_count= s->used_ch_count; if(s->resample) s->in_buffer.ch_count= s->used_ch_count; } if(!s->resample_first){ s->midbuf.ch_count= s->out.ch_count; if(s->resample) s->in_buffer.ch_count = s->out.ch_count; } set_audiodata_fmt(&s->postin, s->int_sample_fmt); set_audiodata_fmt(&s->midbuf, s->int_sample_fmt); set_audiodata_fmt(&s->preout, s->int_sample_fmt); if(s->resample){ set_audiodata_fmt(&s->in_buffer, s->int_sample_fmt); } s->dither = s->preout; if(s->rematrix || s->dither_method) return swri_rematrix_init(s); return 0; }"} {"target": 1, "idx": 15260, "func": "void pc_hot_add_cpu(const int64_t id, Error **errp) { DeviceState *icc_bridge; int64_t apic_id = x86_cpu_apic_id_from_index(id); if (cpu_exists(apic_id)) { error_setg(errp, \"Unable to add CPU: %\" PRIi64 \", it already exists\", id); if (id >= max_cpus) { error_setg(errp, \"Unable to add CPU: %\" PRIi64 \", max allowed: %d\", id, max_cpus - 1); icc_bridge = DEVICE(object_resolve_path_type(\"icc-bridge\", TYPE_ICC_BRIDGE, NULL)); pc_new_cpu(current_cpu_model, apic_id, icc_bridge, errp);"} {"target": 1, "idx": 15288, "func": "static void xlnx_ep108_init(MachineState *machine) { XlnxEP108 *s = g_new0(XlnxEP108, 1); Error *err = NULL; object_initialize(&s->soc, sizeof(s->soc), TYPE_XLNX_ZYNQMP); object_property_add_child(OBJECT(machine), \"soc\", OBJECT(&s->soc), &error_abort); object_property_set_bool(OBJECT(&s->soc), true, \"realized\", &err); if (err) { error_report(\"%s\", error_get_pretty(err)); exit(1); } if (machine->ram_size > EP108_MAX_RAM_SIZE) { error_report(\"WARNING: RAM size \" RAM_ADDR_FMT \" above max supported, \" \"reduced to %llx\", machine->ram_size, EP108_MAX_RAM_SIZE); machine->ram_size = EP108_MAX_RAM_SIZE; } if (machine->ram_size <= 0x08000000) { qemu_log(\"WARNING: RAM size \" RAM_ADDR_FMT \" is small for EP108\", machine->ram_size); } memory_region_allocate_system_memory(&s->ddr_ram, NULL, \"ddr-ram\", machine->ram_size); memory_region_add_subregion(get_system_memory(), 0, &s->ddr_ram); xlnx_ep108_binfo.ram_size = machine->ram_size; xlnx_ep108_binfo.kernel_filename = machine->kernel_filename; xlnx_ep108_binfo.kernel_cmdline = machine->kernel_cmdline; xlnx_ep108_binfo.initrd_filename = machine->initrd_filename; xlnx_ep108_binfo.loader_start = 0; arm_load_kernel(s->soc.boot_cpu_ptr, &xlnx_ep108_binfo); }"} {"target": 1, "idx": 15290, "func": "static int parse_inputs(const char **buf, AVFilterInOut **curr_inputs, AVFilterInOut **open_outputs, AVClass *log_ctx) { int pad = 0; while (**buf == '[') { char *name = parse_link_name(buf, log_ctx); AVFilterInOut *match; if (!name) return AVERROR(EINVAL); /* First check if the label is not in the open_outputs list */ match = extract_inout(name, open_outputs); if (match) { av_free(name); } else { /* Not in the list, so add it as an input */ match = av_mallocz(sizeof(AVFilterInOut)); match->name = name; match->pad_idx = pad; } insert_inout(curr_inputs, match); *buf += strspn(*buf, WHITESPACES); pad++; } return pad; }"} {"target": 1, "idx": 15301, "func": "void vp8_mc_chroma(VP8Context *s, VP8ThreadData *td, uint8_t *dst1, uint8_t *dst2, ThreadFrame *ref, const VP56mv *mv, int x_off, int y_off, int block_w, int block_h, int width, int height, int linesize, vp8_mc_func mc_func[3][3]) { uint8_t *src1 = ref->f->data[1], *src2 = ref->f->data[2]; if (AV_RN32A(mv)) { int mx = mv->x&7, mx_idx = subpel_idx[0][mx]; int my = mv->y&7, my_idx = subpel_idx[0][my]; x_off += mv->x >> 3; y_off += mv->y >> 3; // edge emulation src1 += y_off * linesize + x_off; src2 += y_off * linesize + x_off; ff_thread_await_progress(ref, (3 + y_off + block_h + subpel_idx[2][my]) >> 3, 0); if (x_off < mx_idx || x_off >= width - block_w - subpel_idx[2][mx] || y_off < my_idx || y_off >= height - block_h - subpel_idx[2][my]) { s->vdsp.emulated_edge_mc(td->edge_emu_buffer, src1 - my_idx * linesize - mx_idx, linesize, block_w + subpel_idx[1][mx], block_h + subpel_idx[1][my], x_off - mx_idx, y_off - my_idx, width, height); src1 = td->edge_emu_buffer + mx_idx + linesize * my_idx; mc_func[my_idx][mx_idx](dst1, linesize, src1, linesize, block_h, mx, my); s->vdsp.emulated_edge_mc(td->edge_emu_buffer, src2 - my_idx * linesize - mx_idx, linesize, block_w + subpel_idx[1][mx], block_h + subpel_idx[1][my], x_off - mx_idx, y_off - my_idx, width, height); src2 = td->edge_emu_buffer + mx_idx + linesize * my_idx; mc_func[my_idx][mx_idx](dst2, linesize, src2, linesize, block_h, mx, my); } else { mc_func[my_idx][mx_idx](dst1, linesize, src1, linesize, block_h, mx, my); mc_func[my_idx][mx_idx](dst2, linesize, src2, linesize, block_h, mx, my); } } else { ff_thread_await_progress(ref, (3 + y_off + block_h) >> 3, 0); mc_func[0][0](dst1, linesize, src1 + y_off * linesize + x_off, linesize, block_h, 0, 0); mc_func[0][0](dst2, linesize, src2 + y_off * linesize + x_off, linesize, block_h, 0, 0); } }"} {"target": 0, "idx": 15321, "func": "static void rgb24_to_yuv444p(AVPicture *dst, AVPicture *src, int width, int height) { int src_wrap, x, y; int r, g, b; uint8_t *lum, *cb, *cr; const uint8_t *p; lum = dst->data[0]; cb = dst->data[1]; cr = dst->data[2]; src_wrap = src->linesize[0] - width * BPP; p = src->data[0]; for(y=0;ylinesize[0] - width; cb += dst->linesize[1] - width; cr += dst->linesize[2] - width; } }"} {"target": 0, "idx": 15341, "func": "static char *pcibus_get_fw_dev_path(DeviceState *dev) { PCIDevice *d = (PCIDevice *)dev; char path[50], name[33]; int off; off = snprintf(path, sizeof(path), \"%s@%x\", pci_dev_fw_name(dev, name, sizeof name), PCI_SLOT(d->devfn)); if (PCI_FUNC(d->devfn)) snprintf(path + off, sizeof(path) + off, \",%x\", PCI_FUNC(d->devfn)); return strdup(path); }"} {"target": 1, "idx": 15368, "func": "static int qio_channel_websock_handshake_send_response(QIOChannelWebsock *ioc, const char *key, Error **errp) { char combined_key[QIO_CHANNEL_WEBSOCK_CLIENT_KEY_LEN + QIO_CHANNEL_WEBSOCK_GUID_LEN + 1]; char *accept = NULL, *response = NULL; size_t responselen; g_strlcpy(combined_key, key, QIO_CHANNEL_WEBSOCK_CLIENT_KEY_LEN + 1); g_strlcat(combined_key, QIO_CHANNEL_WEBSOCK_GUID, QIO_CHANNEL_WEBSOCK_CLIENT_KEY_LEN + QIO_CHANNEL_WEBSOCK_GUID_LEN + 1); /* hash and encode it */ if (qcrypto_hash_base64(QCRYPTO_HASH_ALG_SHA1, combined_key, QIO_CHANNEL_WEBSOCK_CLIENT_KEY_LEN + QIO_CHANNEL_WEBSOCK_GUID_LEN, &accept, errp) < 0) { return -1; } response = g_strdup_printf(QIO_CHANNEL_WEBSOCK_HANDSHAKE_RESPONSE, accept); responselen = strlen(response); buffer_reserve(&ioc->encoutput, responselen); buffer_append(&ioc->encoutput, response, responselen); g_free(accept); g_free(response); return 0; }"} {"target": 1, "idx": 15373, "func": "void dpy_gfx_copy(QemuConsole *con, int src_x, int src_y, int dst_x, int dst_y, int w, int h) { DisplayState *s = con->ds; DisplayChangeListener *dcl; if (!qemu_console_is_visible(con)) { return; } QLIST_FOREACH(dcl, &s->listeners, next) { if (con != (dcl->con ? dcl->con : active_console)) { continue; } if (dcl->ops->dpy_gfx_copy) { dcl->ops->dpy_gfx_copy(dcl, src_x, src_y, dst_x, dst_y, w, h); } else { /* TODO */ dcl->ops->dpy_gfx_update(dcl, dst_x, dst_y, w, h); } } }"} {"target": 1, "idx": 15388, "func": "static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr, TCGArg *args, TCGOpDef *tcg_op_defs) { int i, nb_ops, op_index, nb_temps, nb_globals, nb_call_args; TCGOpcode op; const TCGOpDef *def; TCGArg *gen_args; TCGArg tmp; TCGCond cond; /* Array VALS has an element for each temp. If this temp holds a constant then its value is kept in VALS' element. If this temp is a copy of other ones then the other copies are available through the doubly linked circular list. */ nb_temps = s->nb_temps; nb_globals = s->nb_globals; memset(temps, 0, nb_temps * sizeof(struct tcg_temp_info)); nb_ops = tcg_opc_ptr - gen_opc_buf; gen_args = args; for (op_index = 0; op_index < nb_ops; op_index++) { op = gen_opc_buf[op_index]; def = &tcg_op_defs[op]; /* Do copy propagation */ if (op == INDEX_op_call) { int nb_oargs = args[0] >> 16; int nb_iargs = args[0] & 0xffff; for (i = nb_oargs + 1; i < nb_oargs + nb_iargs + 1; i++) { if (temps[args[i]].state == TCG_TEMP_COPY) { args[i] = find_better_copy(s, args[i]); } else { for (i = def->nb_oargs; i < def->nb_oargs + def->nb_iargs; i++) { if (temps[args[i]].state == TCG_TEMP_COPY) { args[i] = find_better_copy(s, args[i]); /* For commutative operations make constant second argument */ CASE_OP_32_64(add): CASE_OP_32_64(mul): CASE_OP_32_64(and): CASE_OP_32_64(or): CASE_OP_32_64(eqv): CASE_OP_32_64(nand): CASE_OP_32_64(nor): if (temps[args[1]].state == TCG_TEMP_CONST) { tmp = args[1]; args[1] = args[2]; args[2] = tmp; CASE_OP_32_64(brcond): if (temps[args[0]].state == TCG_TEMP_CONST && temps[args[1]].state != TCG_TEMP_CONST) { tmp = args[0]; args[0] = args[1]; args[1] = tmp; args[2] = tcg_swap_cond(args[2]); CASE_OP_32_64(setcond): if (temps[args[1]].state == TCG_TEMP_CONST && temps[args[2]].state != TCG_TEMP_CONST) { tmp = args[1]; args[1] = args[2]; args[2] = tmp; args[3] = tcg_swap_cond(args[3]); CASE_OP_32_64(movcond): cond = args[5]; if (temps[args[1]].state == TCG_TEMP_CONST && temps[args[2]].state != TCG_TEMP_CONST) { tmp = args[1]; args[1] = args[2]; args[2] = tmp; cond = tcg_swap_cond(cond); /* For movcond, we canonicalize the \"false\" input reg to match the destination reg so that the tcg backend can implement a \"move if true\" operation. */ if (args[0] == args[3]) { tmp = args[3]; args[3] = args[4]; args[4] = tmp; cond = tcg_invert_cond(cond); args[5] = cond; /* Simplify expressions for \"shift/rot r, 0, a => movi r, 0\" */ CASE_OP_32_64(shl): CASE_OP_32_64(shr): CASE_OP_32_64(sar): CASE_OP_32_64(rotl): CASE_OP_32_64(rotr): if (temps[args[1]].state == TCG_TEMP_CONST && temps[args[1]].val == 0) { /* Simplify expression for \"op r, a, 0 => mov r, a\" cases */ CASE_OP_32_64(add): CASE_OP_32_64(shl): CASE_OP_32_64(shr): CASE_OP_32_64(sar): CASE_OP_32_64(rotl): CASE_OP_32_64(rotr): CASE_OP_32_64(or): if (temps[args[1]].state == TCG_TEMP_CONST) { /* Proceed with possible constant folding. */ if (temps[args[2]].state == TCG_TEMP_CONST && temps[args[2]].val == 0) { if (temps_are_copies(args[0], args[1])) { gen_opc_buf[op_index] = INDEX_op_nop; } else { gen_opc_buf[op_index] = op_to_mov(op); tcg_opt_gen_mov(s, gen_args, args[0], args[1]); /* Simplify expression for \"op r, a, 0 => movi r, 0\" cases */ CASE_OP_32_64(and): CASE_OP_32_64(mul): if ((temps[args[2]].state == TCG_TEMP_CONST && temps[args[2]].val == 0)) { /* Simplify expression for \"op r, a, a => mov r, a\" cases */ CASE_OP_32_64(or): CASE_OP_32_64(and): if (temps_are_copies(args[0], args[1])) { gen_opc_buf[op_index] = INDEX_op_nop; } else { gen_opc_buf[op_index] = op_to_mov(op); tcg_opt_gen_mov(s, gen_args, args[0], args[1]); /* Propagate constants through copy operations and do constant folding. Constants will be substituted to arguments by register allocator where needed and possible. Also detect copies. */ CASE_OP_32_64(mov): if (temps_are_copies(args[0], args[1])) { args += 2; gen_opc_buf[op_index] = INDEX_op_nop; if (temps[args[1]].state != TCG_TEMP_CONST) { tcg_opt_gen_mov(s, gen_args, args[0], args[1]); args += 2; /* Source argument is constant. Rewrite the operation and let movi case handle it. */ op = op_to_movi(op); gen_opc_buf[op_index] = op; args[1] = temps[args[1]].val; /* fallthrough */ CASE_OP_32_64(movi): tcg_opt_gen_movi(gen_args, args[0], args[1]); args += 2; CASE_OP_32_64(not): CASE_OP_32_64(neg): CASE_OP_32_64(ext8s): CASE_OP_32_64(ext8u): CASE_OP_32_64(ext16s): CASE_OP_32_64(ext16u): case INDEX_op_ext32s_i64: case INDEX_op_ext32u_i64: if (temps[args[1]].state == TCG_TEMP_CONST) { tmp = do_constant_folding(op, temps[args[1]].val, 0); tcg_opt_gen_movi(gen_args, args[0], tmp); } else { reset_temp(args[0]); gen_args[0] = args[0]; gen_args[1] = args[1]; args += 2; CASE_OP_32_64(add): CASE_OP_32_64(mul): CASE_OP_32_64(or): CASE_OP_32_64(and): CASE_OP_32_64(shl): CASE_OP_32_64(shr): CASE_OP_32_64(sar): CASE_OP_32_64(rotl): CASE_OP_32_64(rotr): CASE_OP_32_64(andc): CASE_OP_32_64(orc): CASE_OP_32_64(eqv): CASE_OP_32_64(nand): CASE_OP_32_64(nor): if (temps[args[1]].state == TCG_TEMP_CONST && temps[args[2]].state == TCG_TEMP_CONST) { tmp = do_constant_folding(op, temps[args[1]].val, temps[args[2]].val); tcg_opt_gen_movi(gen_args, args[0], tmp); } else { reset_temp(args[0]); gen_args[0] = args[0]; gen_args[1] = args[1]; gen_args[2] = args[2]; gen_args += 3; CASE_OP_32_64(setcond): if (temps[args[1]].state == TCG_TEMP_CONST && temps[args[2]].state == TCG_TEMP_CONST) { tmp = do_constant_folding_cond(op, temps[args[1]].val, temps[args[2]].val, args[3]); tcg_opt_gen_movi(gen_args, args[0], tmp); } else { reset_temp(args[0]); gen_args[0] = args[0]; gen_args[1] = args[1]; gen_args[2] = args[2]; gen_args[3] = args[3]; gen_args += 4; args += 4; CASE_OP_32_64(brcond): if (temps[args[0]].state == TCG_TEMP_CONST && temps[args[1]].state == TCG_TEMP_CONST) { if (do_constant_folding_cond(op, temps[args[0]].val, temps[args[1]].val, args[2])) { memset(temps, 0, nb_temps * sizeof(struct tcg_temp_info)); gen_opc_buf[op_index] = INDEX_op_br; gen_args[0] = args[3]; gen_args += 1; } else { gen_opc_buf[op_index] = INDEX_op_nop; } else { memset(temps, 0, nb_temps * sizeof(struct tcg_temp_info)); reset_temp(args[0]); gen_args[0] = args[0]; gen_args[1] = args[1]; gen_args[2] = args[2]; gen_args[3] = args[3]; gen_args += 4; args += 4; CASE_OP_32_64(movcond): if (temps[args[1]].state == TCG_TEMP_CONST && temps[args[2]].state == TCG_TEMP_CONST) { tmp = do_constant_folding_cond(op, temps[args[1]].val, temps[args[2]].val, args[5]); if (temps_are_copies(args[0], args[4-tmp])) { gen_opc_buf[op_index] = INDEX_op_nop; } else if (temps[args[4-tmp]].state == TCG_TEMP_CONST) { tcg_opt_gen_movi(gen_args, args[0], temps[args[4-tmp]].val); } else { gen_opc_buf[op_index] = op_to_mov(op); tcg_opt_gen_mov(s, gen_args, args[0], args[4-tmp]); } else { reset_temp(args[0]); gen_args[0] = args[0]; gen_args[1] = args[1]; gen_args[2] = args[2]; gen_args[3] = args[3]; gen_args[4] = args[4]; gen_args[5] = args[5]; gen_args += 6; args += 6; case INDEX_op_call: nb_call_args = (args[0] >> 16) + (args[0] & 0xffff); if (!(args[nb_call_args + 1] & (TCG_CALL_CONST | TCG_CALL_PURE))) { for (i = 0; i < nb_globals; i++) { reset_temp(i); for (i = 0; i < (args[0] >> 16); i++) { reset_temp(args[i + 1]); i = nb_call_args + 3; while (i) { *gen_args = *args; args++; gen_args++; i--; /* Default case: we do know nothing about operation so no propagation is done. We trash everything if the operation is the end of a basic block, otherwise we only trash the output args. */ if (def->flags & TCG_OPF_BB_END) { memset(temps, 0, nb_temps * sizeof(struct tcg_temp_info)); } else { for (i = 0; i < def->nb_oargs; i++) { reset_temp(args[i]); for (i = 0; i < def->nb_args; i++) { gen_args[i] = args[i]; args += def->nb_args; gen_args += def->nb_args; return gen_args;"} {"target": 1, "idx": 15389, "func": "static void vnc_connect(VncDisplay *vd, int csock, bool skipauth, bool websocket) { VncState *vs = g_malloc0(sizeof(VncState)); int i; vs->csock = csock; vs->vd = vd; if (skipauth) { vs->auth = VNC_AUTH_NONE; vs->subauth = VNC_AUTH_INVALID; } else { if (websocket) { vs->auth = vd->ws_auth; vs->subauth = VNC_AUTH_INVALID; } else { vs->auth = vd->auth; vs->subauth = vd->subauth; } } VNC_DEBUG(\"Client sock=%d ws=%d auth=%d subauth=%d\\n\", csock, websocket, vs->auth, vs->subauth); vs->lossy_rect = g_malloc0(VNC_STAT_ROWS * sizeof (*vs->lossy_rect)); for (i = 0; i < VNC_STAT_ROWS; ++i) { vs->lossy_rect[i] = g_malloc0(VNC_STAT_COLS * sizeof (uint8_t)); } VNC_DEBUG(\"New client on socket %d\\n\", csock); update_displaychangelistener(&vd->dcl, VNC_REFRESH_INTERVAL_BASE); qemu_set_nonblock(vs->csock); if (websocket) { vs->websocket = 1; if (vd->ws_tls) { qemu_set_fd_handler(vs->csock, vncws_tls_handshake_io, NULL, vs); } else { qemu_set_fd_handler(vs->csock, vncws_handshake_read, NULL, vs); } } else { qemu_set_fd_handler(vs->csock, vnc_client_read, NULL, vs); } vnc_client_cache_addr(vs); vnc_qmp_event(vs, QAPI_EVENT_VNC_CONNECTED); vnc_set_share_mode(vs, VNC_SHARE_MODE_CONNECTING); if (!vs->websocket) { vnc_init_state(vs); } if (vd->num_connecting > vd->connections_limit) { QTAILQ_FOREACH(vs, &vd->clients, next) { if (vs->share_mode == VNC_SHARE_MODE_CONNECTING) { vnc_disconnect_start(vs); return; } } } }"} {"target": 0, "idx": 15401, "func": "static int ftp_current_dir(FTPContext *s) { char *res = NULL, *start = NULL, *end = NULL; int i; const char *command = \"PWD\\r\\n\"; const int pwd_codes[] = {257, 0}; if (!ftp_send_command(s, command, pwd_codes, &res)) goto fail; for (i = 0; res[i]; ++i) { if (res[i] == '\"') { if (!start) { start = res + i + 1; continue; } end = res + i; break; } } if (!end) goto fail; if (end > res && end[-1] == '/') { end[-1] = '\\0'; } else *end = '\\0'; av_strlcpy(s->path, start, sizeof(s->path)); av_free(res); return 0; fail: av_free(res); return AVERROR(EIO); }"} {"target": 0, "idx": 15411, "func": "int qemu_opts_print(QemuOpts *opts, void *dummy) { QemuOpt *opt; fprintf(stderr, \"%s: %s:\", opts->list->name, opts->id ? opts->id : \"\"); TAILQ_FOREACH(opt, &opts->head, next) { fprintf(stderr, \" %s=\\\"%s\\\"\", opt->name, opt->str); } fprintf(stderr, \"\\n\"); return 0; }"} {"target": 0, "idx": 15420, "func": "static int jpeg2000_decode_tile(Jpeg2000DecoderContext *s, Jpeg2000Tile *tile, AVFrame *picture) { int compno, reslevelno, bandno; int x, y; uint8_t *line; Jpeg2000T1Context t1; /* Loop on tile components */ for (compno = 0; compno < s->ncomponents; compno++) { Jpeg2000Component *comp = tile->comp + compno; Jpeg2000CodingStyle *codsty = tile->codsty + compno; /* Loop on resolution levels */ for (reslevelno = 0; reslevelno < codsty->nreslevels2decode; reslevelno++) { Jpeg2000ResLevel *rlevel = comp->reslevel + reslevelno; /* Loop on bands */ for (bandno = 0; bandno < rlevel->nbands; bandno++) { int nb_precincts, precno; Jpeg2000Band *band = rlevel->band + bandno; int cblkno = 0, bandpos; bandpos = bandno + (reslevelno > 0); if (band->coord[0][0] == band->coord[0][1] || band->coord[1][0] == band->coord[1][1]) continue; nb_precincts = rlevel->num_precincts_x * rlevel->num_precincts_y; /* Loop on precincts */ for (precno = 0; precno < nb_precincts; precno++) { Jpeg2000Prec *prec = band->prec + precno; /* Loop on codeblocks */ for (cblkno = 0; cblkno < prec->nb_codeblocks_width * prec->nb_codeblocks_height; cblkno++) { int x, y; Jpeg2000Cblk *cblk = prec->cblk + cblkno; decode_cblk(s, codsty, &t1, cblk, cblk->coord[0][1] - cblk->coord[0][0], cblk->coord[1][1] - cblk->coord[1][0], bandpos); x = cblk->coord[0][0]; y = cblk->coord[1][0]; if (codsty->transform == FF_DWT97) dequantization_float(x, y, cblk, comp, &t1, band); else dequantization_int(x, y, cblk, comp, &t1, band); } /* end cblk */ } /*end prec */ } /* end band */ } /* end reslevel */ /* inverse DWT */ ff_dwt_decode(&comp->dwt, codsty->transform == FF_DWT97 ? (void*)comp->f_data : (void*)comp->i_data); } /*end comp */ /* inverse MCT transformation */ if (tile->codsty[0].mct) mct_decode(s, tile); if (s->cdef[0] < 0) { for (x = 0; x < s->ncomponents; x++) s->cdef[x] = x + 1; if ((s->ncomponents & 1) == 0) s->cdef[s->ncomponents-1] = 0; } if (s->precision <= 8) { for (compno = 0; compno < s->ncomponents; compno++) { Jpeg2000Component *comp = tile->comp + compno; Jpeg2000CodingStyle *codsty = tile->codsty + compno; float *datap = comp->f_data; int32_t *i_datap = comp->i_data; int cbps = s->cbps[compno]; int w = tile->comp[compno].coord[0][1] - s->image_offset_x; int planar = !!picture->data[2]; int pixelsize = planar ? 1 : s->ncomponents; int plane = 0; if (planar) plane = s->cdef[compno] ? s->cdef[compno]-1 : (s->ncomponents-1); y = tile->comp[compno].coord[1][0] - s->image_offset_y; line = picture->data[plane] + y * picture->linesize[plane]; for (; y < tile->comp[compno].coord[1][1] - s->image_offset_y; y += s->cdy[compno]) { uint8_t *dst; x = tile->comp[compno].coord[0][0] - s->image_offset_x; dst = line + x * pixelsize + compno*!planar; if (codsty->transform == FF_DWT97) { for (; x < w; x += s->cdx[compno]) { int val = lrintf(*datap) + (1 << (cbps - 1)); /* DC level shift and clip see ISO 15444-1:2002 G.1.2 */ val = av_clip(val, 0, (1 << cbps) - 1); *dst = val << (8 - cbps); datap++; dst += pixelsize; } } else { for (; x < w; x += s->cdx[compno]) { int val = *i_datap + (1 << (cbps - 1)); /* DC level shift and clip see ISO 15444-1:2002 G.1.2 */ val = av_clip(val, 0, (1 << cbps) - 1); *dst = val << (8 - cbps); i_datap++; dst += pixelsize; } } line += picture->linesize[plane]; } } } else { for (compno = 0; compno < s->ncomponents; compno++) { Jpeg2000Component *comp = tile->comp + compno; Jpeg2000CodingStyle *codsty = tile->codsty + compno; float *datap = comp->f_data; int32_t *i_datap = comp->i_data; uint16_t *linel; int cbps = s->cbps[compno]; int w = tile->comp[compno].coord[0][1] - s->image_offset_x; int planar = !!picture->data[2]; int pixelsize = planar ? 1 : s->ncomponents; int plane = 0; if (planar) plane = s->cdef[compno] ? s->cdef[compno]-1 : (s->ncomponents-1); y = tile->comp[compno].coord[1][0] - s->image_offset_y; linel = (uint16_t *)picture->data[plane] + y * (picture->linesize[plane] >> 1); for (; y < tile->comp[compno].coord[1][1] - s->image_offset_y; y += s->cdy[compno]) { uint16_t *dst; x = tile->comp[compno].coord[0][0] - s->image_offset_x; dst = linel + (x * pixelsize + compno*!planar); if (codsty->transform == FF_DWT97) { for (; x < w; x += s-> cdx[compno]) { int val = lrintf(*datap) + (1 << (cbps - 1)); /* DC level shift and clip see ISO 15444-1:2002 G.1.2 */ val = av_clip(val, 0, (1 << cbps) - 1); /* align 12 bit values in little-endian mode */ *dst = val << (16 - cbps); datap++; dst += pixelsize; } } else { for (; x < w; x += s-> cdx[compno]) { int val = *i_datap + (1 << (cbps - 1)); /* DC level shift and clip see ISO 15444-1:2002 G.1.2 */ val = av_clip(val, 0, (1 << cbps) - 1); /* align 12 bit values in little-endian mode */ *dst = val << (16 - cbps); i_datap++; dst += pixelsize; } } linel += picture->linesize[plane] >> 1; } } } return 0; }"} {"target": 0, "idx": 15434, "func": "void qemu_run_all_timers(void) { alarm_timer->pending = 0; /* rearm timer, if not periodic */ if (alarm_timer->expired) { alarm_timer->expired = 0; qemu_rearm_alarm_timer(alarm_timer); } /* vm time timers */ qemu_run_timers(vm_clock); qemu_run_timers(rt_clock); qemu_run_timers(host_clock); }"} {"target": 0, "idx": 15449, "func": "static struct XenDevice *xen_be_get_xendev(const char *type, int dom, int dev, struct XenDevOps *ops) { struct XenDevice *xendev; xendev = xen_be_find_xendev(type, dom, dev); if (xendev) { return xendev; } /* init new xendev */ xendev = g_malloc0(ops->size); xendev->type = type; xendev->dom = dom; xendev->dev = dev; xendev->ops = ops; snprintf(xendev->be, sizeof(xendev->be), \"backend/%s/%d/%d\", xendev->type, xendev->dom, xendev->dev); snprintf(xendev->name, sizeof(xendev->name), \"%s-%d\", xendev->type, xendev->dev); xendev->debug = debug; xendev->local_port = -1; xendev->evtchndev = xen_xc_evtchn_open(NULL, 0); if (xendev->evtchndev == XC_HANDLER_INITIAL_VALUE) { xen_be_printf(NULL, 0, \"can't open evtchn device\\n\"); g_free(xendev); return NULL; } fcntl(xc_evtchn_fd(xendev->evtchndev), F_SETFD, FD_CLOEXEC); if (ops->flags & DEVOPS_FLAG_NEED_GNTDEV) { xendev->gnttabdev = xen_xc_gnttab_open(NULL, 0); if (xendev->gnttabdev == XC_HANDLER_INITIAL_VALUE) { xen_be_printf(NULL, 0, \"can't open gnttab device\\n\"); xc_evtchn_close(xendev->evtchndev); g_free(xendev); return NULL; } } else { xendev->gnttabdev = XC_HANDLER_INITIAL_VALUE; } QTAILQ_INSERT_TAIL(&xendevs, xendev, next); if (xendev->ops->alloc) { xendev->ops->alloc(xendev); } return xendev; }"} {"target": 0, "idx": 15457, "func": "static void json_message_process_token(JSONLexer *lexer, QString *token, JSONTokenType type, int x, int y) { JSONMessageParser *parser = container_of(lexer, JSONMessageParser, lexer); QDict *dict; if (type == JSON_OPERATOR) { switch (qstring_get_str(token)[0]) { case '{': parser->brace_count++; break; case '}': parser->brace_count--; break; case '[': parser->bracket_count++; break; case ']': parser->bracket_count--; break; default: break; } } dict = qdict_new(); qdict_put(dict, \"type\", qint_from_int(type)); QINCREF(token); qdict_put(dict, \"token\", token); qdict_put(dict, \"x\", qint_from_int(x)); qdict_put(dict, \"y\", qint_from_int(y)); parser->token_size += token->length; qlist_append(parser->tokens, dict); if (type == JSON_ERROR) { goto out_emit_bad; } else if (parser->brace_count < 0 || parser->bracket_count < 0 || (parser->brace_count == 0 && parser->bracket_count == 0)) { goto out_emit; } else if (parser->token_size > MAX_TOKEN_SIZE || parser->bracket_count + parser->brace_count > MAX_NESTING) { /* Security consideration, we limit total memory allocated per object * and the maximum recursion depth that a message can force. */ goto out_emit_bad; } return; out_emit_bad: /* * Clear out token list and tell the parser to emit an error * indication by passing it a NULL list */ QDECREF(parser->tokens); parser->tokens = NULL; out_emit: /* send current list of tokens to parser and reset tokenizer */ parser->brace_count = 0; parser->bracket_count = 0; parser->emit(parser, parser->tokens); if (parser->tokens) { QDECREF(parser->tokens); } parser->tokens = qlist_new(); parser->token_size = 0; }"} {"target": 0, "idx": 15482, "func": "static void set_memory_options(uint64_t *ram_slots, ram_addr_t *maxram_size) { uint64_t sz; const char *mem_str; const char *maxmem_str, *slots_str; const ram_addr_t default_ram_size = (ram_addr_t)DEFAULT_RAM_SIZE * 1024 * 1024; QemuOpts *opts = qemu_find_opts_singleton(\"memory\"); sz = 0; mem_str = qemu_opt_get(opts, \"size\"); if (mem_str) { if (!*mem_str) { error_report(\"missing 'size' option value\"); exit(EXIT_FAILURE); } sz = qemu_opt_get_size(opts, \"size\", ram_size); /* Fix up legacy suffix-less format */ if (g_ascii_isdigit(mem_str[strlen(mem_str) - 1])) { uint64_t overflow_check = sz; sz <<= 20; if ((sz >> 20) != overflow_check) { error_report(\"too large 'size' option value\"); exit(EXIT_FAILURE); } } } /* backward compatibility behaviour for case \"-m 0\" */ if (sz == 0) { sz = default_ram_size; } sz = QEMU_ALIGN_UP(sz, 8192); ram_size = sz; if (ram_size != sz) { error_report(\"ram size too large\"); exit(EXIT_FAILURE); } /* store value for the future use */ qemu_opt_set_number(opts, \"size\", ram_size, &error_abort); *maxram_size = ram_size; maxmem_str = qemu_opt_get(opts, \"maxmem\"); slots_str = qemu_opt_get(opts, \"slots\"); if (maxmem_str && slots_str) { uint64_t slots; sz = qemu_opt_get_size(opts, \"maxmem\", 0); slots = qemu_opt_get_number(opts, \"slots\", 0); if (sz < ram_size) { error_report(\"invalid value of -m option maxmem: \" \"maximum memory size (0x%\" PRIx64 \") must be at least \" \"the initial memory size (0x\" RAM_ADDR_FMT \")\", sz, ram_size); exit(EXIT_FAILURE); } else if (sz > ram_size) { if (!slots) { error_report(\"invalid value of -m option: maxmem was \" \"specified, but no hotplug slots were specified\"); exit(EXIT_FAILURE); } } else if (slots) { error_report(\"invalid value of -m option maxmem: \" \"memory slots were specified but maximum memory size \" \"(0x%\" PRIx64 \") is equal to the initial memory size \" \"(0x\" RAM_ADDR_FMT \")\", sz, ram_size); exit(EXIT_FAILURE); } *maxram_size = sz; *ram_slots = slots; } else if ((!maxmem_str && slots_str) || (maxmem_str && !slots_str)) { error_report(\"invalid -m option value: missing \" \"'%s' option\", slots_str ? \"maxmem\" : \"slots\"); exit(EXIT_FAILURE); } }"} {"target": 0, "idx": 15486, "func": "static void iscsi_refresh_limits(BlockDriverState *bs, Error **errp) { /* We don't actually refresh here, but just return data queried in * iscsi_open(): iscsi targets don't change their limits. */ IscsiLun *iscsilun = bs->opaque; uint64_t max_xfer_len = iscsilun->use_16_for_rw ? 0xffffffff : 0xffff; bs->request_alignment = iscsilun->block_size; if (iscsilun->bl.max_xfer_len) { max_xfer_len = MIN(max_xfer_len, iscsilun->bl.max_xfer_len); } if (max_xfer_len * iscsilun->block_size < INT_MAX) { bs->bl.max_transfer = max_xfer_len * iscsilun->block_size; } if (iscsilun->lbp.lbpu) { if (iscsilun->bl.max_unmap < 0xffffffff) { bs->bl.max_discard = sector_limits_lun2qemu(iscsilun->bl.max_unmap, iscsilun); } bs->bl.discard_alignment = sector_limits_lun2qemu(iscsilun->bl.opt_unmap_gran, iscsilun); } else { bs->bl.discard_alignment = iscsilun->block_size >> BDRV_SECTOR_BITS; } if (iscsilun->bl.max_ws_len < 0xffffffff / iscsilun->block_size) { bs->bl.max_pwrite_zeroes = iscsilun->bl.max_ws_len * iscsilun->block_size; } if (iscsilun->lbp.lbpws) { bs->bl.pwrite_zeroes_alignment = iscsilun->bl.opt_unmap_gran * iscsilun->block_size; } else { bs->bl.pwrite_zeroes_alignment = iscsilun->block_size; } if (iscsilun->bl.opt_xfer_len && iscsilun->bl.opt_xfer_len < INT_MAX / iscsilun->block_size) { bs->bl.opt_transfer = pow2floor(iscsilun->bl.opt_xfer_len * iscsilun->block_size); } }"} {"target": 0, "idx": 15492, "func": "static int kvm_set_mce(CPUState *env, struct kvm_x86_mce *m) { return kvm_vcpu_ioctl(env, KVM_X86_SET_MCE, m); }"} {"target": 0, "idx": 15495, "func": "void ide_atapi_cmd(IDEState *s) { uint8_t *buf; buf = s->io_buffer; #ifdef DEBUG_IDE_ATAPI { int i; printf(\"ATAPI limit=0x%x packet:\", s->lcyl | (s->hcyl << 8)); for(i = 0; i < ATAPI_PACKET_SIZE; i++) { printf(\" %02x\", buf[i]); } printf(\"\\n\"); } #endif /* * If there's a UNIT_ATTENTION condition pending, only command flagged with * ALLOW_UA are allowed to complete. with other commands getting a CHECK * condition response unless a higher priority status, defined by the drive * here, is pending. */ if (s->sense_key == UNIT_ATTENTION && !(atapi_cmd_table[s->io_buffer[0]].flags & ALLOW_UA)) { ide_atapi_cmd_check_status(s); return; } /* * When a CD gets changed, we have to report an ejected state and * then a loaded state to guests so that they detect tray * open/close and media change events. Guests that do not use * GET_EVENT_STATUS_NOTIFICATION to detect such tray open/close * states rely on this behavior. */ if (!(atapi_cmd_table[s->io_buffer[0]].flags & ALLOW_UA) && !s->tray_open && bdrv_is_inserted(s->bs) && s->cdrom_changed) { if (s->cdrom_changed == 1) { ide_atapi_cmd_error(s, NOT_READY, ASC_MEDIUM_NOT_PRESENT); s->cdrom_changed = 2; } else { ide_atapi_cmd_error(s, UNIT_ATTENTION, ASC_MEDIUM_MAY_HAVE_CHANGED); s->cdrom_changed = 0; } return; } /* Report a Not Ready condition if appropriate for the command */ if ((atapi_cmd_table[s->io_buffer[0]].flags & CHECK_READY) && (!media_present(s) || !bdrv_is_inserted(s->bs))) { ide_atapi_cmd_error(s, NOT_READY, ASC_MEDIUM_NOT_PRESENT); return; } /* Execute the command */ if (atapi_cmd_table[s->io_buffer[0]].handler) { atapi_cmd_table[s->io_buffer[0]].handler(s, buf); return; } ide_atapi_cmd_error(s, ILLEGAL_REQUEST, ASC_ILLEGAL_OPCODE); }"} {"target": 1, "idx": 15515, "func": "void ff_hevc_luma_mv_merge_mode(HEVCContext *s, int x0, int y0, int nPbW, int nPbH, int log2_cb_size, int part_idx, int merge_idx, MvField *mv) { int singleMCLFlag = 0; int nCS = 1 << log2_cb_size; LOCAL_ALIGNED(4, MvField, mergecand_list, [MRG_MAX_NUM_CANDS]); int nPbW2 = nPbW; int nPbH2 = nPbH; HEVCLocalContext *lc = &s->HEVClc; memset(mergecand_list, 0, MRG_MAX_NUM_CANDS * sizeof(*mergecand_list)); if (s->pps->log2_parallel_merge_level > 2 && nCS == 8) { singleMCLFlag = 1; x0 = lc->cu.x; y0 = lc->cu.y; nPbW = nCS; nPbH = nCS; part_idx = 0; } ff_hevc_set_neighbour_available(s, x0, y0, nPbW, nPbH); derive_spatial_merge_candidates(s, x0, y0, nPbW, nPbH, log2_cb_size, singleMCLFlag, part_idx, merge_idx, mergecand_list); if (mergecand_list[merge_idx].pred_flag[0] == 1 && mergecand_list[merge_idx].pred_flag[1] == 1 && (nPbW2 + nPbH2) == 12) { mergecand_list[merge_idx].ref_idx[1] = -1; mergecand_list[merge_idx].pred_flag[1] = 0; } *mv = mergecand_list[merge_idx]; }"} {"target": 1, "idx": 15517, "func": "int s390_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs, int cpuid, void *opaque) { S390CPU *cpu = S390_CPU(cs); return s390x_write_all_elf64_notes(\"CORE\", f, cpu, cpuid, opaque); }"} {"target": 0, "idx": 15520, "func": "static void encode_clnpass(Jpeg2000T1Context *t1, int width, int height, int bandno, int *nmsedec, int bpno) { int y0, x, y, mask = 1 << (bpno + NMSEDEC_FRACBITS); for (y0 = 0; y0 < height; y0 += 4) for (x = 0; x < width; x++){ if (y0 + 3 < height && !( (t1->flags[y0+1][x+1] & (JPEG2000_T1_SIG_NB | JPEG2000_T1_VIS | JPEG2000_T1_SIG)) || (t1->flags[y0+2][x+1] & (JPEG2000_T1_SIG_NB | JPEG2000_T1_VIS | JPEG2000_T1_SIG)) || (t1->flags[y0+3][x+1] & (JPEG2000_T1_SIG_NB | JPEG2000_T1_VIS | JPEG2000_T1_SIG)) || (t1->flags[y0+4][x+1] & (JPEG2000_T1_SIG_NB | JPEG2000_T1_VIS | JPEG2000_T1_SIG)))) { // aggregation mode int rlen; for (rlen = 0; rlen < 4; rlen++) if (t1->data[y0+rlen][x] & mask) break; ff_mqc_encode(&t1->mqc, t1->mqc.cx_states + MQC_CX_RL, rlen != 4); if (rlen == 4) continue; ff_mqc_encode(&t1->mqc, t1->mqc.cx_states + MQC_CX_UNI, rlen >> 1); ff_mqc_encode(&t1->mqc, t1->mqc.cx_states + MQC_CX_UNI, rlen & 1); for (y = y0 + rlen; y < y0 + 4; y++){ if (!(t1->flags[y+1][x+1] & (JPEG2000_T1_SIG | JPEG2000_T1_VIS))){ int ctxno = ff_jpeg2000_getsigctxno(t1->flags[y+1][x+1], bandno); if (y > y0 + rlen) ff_mqc_encode(&t1->mqc, t1->mqc.cx_states + ctxno, t1->data[y][x] & mask ? 1:0); if (t1->data[y][x] & mask){ // newly significant int xorbit; int ctxno = ff_jpeg2000_getsgnctxno(t1->flags[y+1][x+1], &xorbit); *nmsedec += getnmsedec_sig(t1->data[y][x], bpno + NMSEDEC_FRACBITS); ff_mqc_encode(&t1->mqc, t1->mqc.cx_states + ctxno, (t1->flags[y+1][x+1] >> 15) ^ xorbit); ff_jpeg2000_set_significance(t1, x, y, t1->flags[y+1][x+1] >> 15); } } t1->flags[y+1][x+1] &= ~JPEG2000_T1_VIS; } } else{ for (y = y0; y < y0 + 4 && y < height; y++){ if (!(t1->flags[y+1][x+1] & (JPEG2000_T1_SIG | JPEG2000_T1_VIS))){ int ctxno = ff_jpeg2000_getsigctxno(t1->flags[y+1][x+1], bandno); ff_mqc_encode(&t1->mqc, t1->mqc.cx_states + ctxno, t1->data[y][x] & mask ? 1:0); if (t1->data[y][x] & mask){ // newly significant int xorbit; int ctxno = ff_jpeg2000_getsgnctxno(t1->flags[y+1][x+1], &xorbit); *nmsedec += getnmsedec_sig(t1->data[y][x], bpno + NMSEDEC_FRACBITS); ff_mqc_encode(&t1->mqc, t1->mqc.cx_states + ctxno, (t1->flags[y+1][x+1] >> 15) ^ xorbit); ff_jpeg2000_set_significance(t1, x, y, t1->flags[y+1][x+1] >> 15); } } t1->flags[y+1][x+1] &= ~JPEG2000_T1_VIS; } } } }"} {"target": 0, "idx": 15521, "func": "static int ppc_hash64_pte_update_flags(struct mmu_ctx_hash64 *ctx, target_ulong *pte1p, int ret, int rw) { int store = 0; /* Update page flags */ if (!(*pte1p & HPTE64_R_R)) { /* Update accessed flag */ *pte1p |= HPTE64_R_R; store = 1; } if (!(*pte1p & HPTE64_R_C)) { if (rw == 1 && ret == 0) { /* Update changed flag */ *pte1p |= HPTE64_R_C; store = 1; } else { /* Force page fault for first write access */ ctx->prot &= ~PAGE_WRITE; } } return store; }"} {"target": 0, "idx": 15524, "func": "BlockAIOCB *dma_bdrv_write(BlockDriverState *bs, QEMUSGList *sg, uint64_t sector, void (*cb)(void *opaque, int ret), void *opaque) { return dma_bdrv_io(bs, sg, sector, bdrv_aio_writev, cb, opaque, DMA_DIRECTION_TO_DEVICE); }"} {"target": 0, "idx": 15530, "func": "static int discard_single_l2(BlockDriverState *bs, uint64_t offset, unsigned int nb_clusters, enum qcow2_discard_type type, bool full_discard) { BDRVQcow2State *s = bs->opaque; uint64_t *l2_table; int l2_index; int ret; int i; ret = get_cluster_table(bs, offset, &l2_table, &l2_index); if (ret < 0) { return ret; } /* Limit nb_clusters to one L2 table */ nb_clusters = MIN(nb_clusters, s->l2_size - l2_index); for (i = 0; i < nb_clusters; i++) { uint64_t old_l2_entry; old_l2_entry = be64_to_cpu(l2_table[l2_index + i]); /* * If full_discard is false, make sure that a discarded area reads back * as zeroes for v3 images (we cannot do it for v2 without actually * writing a zero-filled buffer). We can skip the operation if the * cluster is already marked as zero, or if it's unallocated and we * don't have a backing file. * * TODO We might want to use bdrv_get_block_status(bs) here, but we're * holding s->lock, so that doesn't work today. * * If full_discard is true, the sector should not read back as zeroes, * but rather fall through to the backing file. */ switch (qcow2_get_cluster_type(old_l2_entry)) { case QCOW2_CLUSTER_UNALLOCATED: if (full_discard || !bs->backing_hd) { continue; } break; case QCOW2_CLUSTER_ZERO: if (!full_discard) { continue; } break; case QCOW2_CLUSTER_NORMAL: case QCOW2_CLUSTER_COMPRESSED: break; default: abort(); } /* First remove L2 entries */ qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table); if (!full_discard && s->qcow_version >= 3) { l2_table[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO); } else { l2_table[l2_index + i] = cpu_to_be64(0); } /* Then decrease the refcount */ qcow2_free_any_clusters(bs, old_l2_entry, 1, type); } qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table); return nb_clusters; }"} {"target": 0, "idx": 15534, "func": "static int64_t qemu_archipelago_getlength(BlockDriverState *bs) { int64_t ret; BDRVArchipelagoState *s = bs->opaque; ret = archipelago_volume_info(s); return ret; }"} {"target": 0, "idx": 15535, "func": "static void rtas_ibm_set_eeh_option(PowerPCCPU *cpu, sPAPRMachineState *spapr, uint32_t token, uint32_t nargs, target_ulong args, uint32_t nret, target_ulong rets) { sPAPRPHBState *sphb; sPAPRPHBClass *spc; PCIDevice *pdev; uint32_t addr, option; uint64_t buid; int ret; if ((nargs != 4) || (nret != 1)) { goto param_error_exit; } buid = rtas_ldq(args, 1); addr = rtas_ld(args, 0); option = rtas_ld(args, 3); sphb = spapr_pci_find_phb(spapr, buid); if (!sphb) { goto param_error_exit; } pdev = pci_find_device(PCI_HOST_BRIDGE(sphb)->bus, (addr >> 16) & 0xFF, (addr >> 8) & 0xFF); if (!pdev || !object_dynamic_cast(OBJECT(pdev), \"vfio-pci\")) { goto param_error_exit; } spc = SPAPR_PCI_HOST_BRIDGE_GET_CLASS(sphb); if (!spc->eeh_set_option) { goto param_error_exit; } ret = spc->eeh_set_option(sphb, addr, option); rtas_st(rets, 0, ret); return; param_error_exit: rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); }"} {"target": 0, "idx": 15537, "func": "int qemu_acl_remove(qemu_acl *acl, const char *match) { qemu_acl_entry *entry; int i = 0; TAILQ_FOREACH(entry, &acl->entries, next) { i++; if (strcmp(entry->match, match) == 0) { TAILQ_REMOVE(&acl->entries, entry, next); return i; } } return -1; }"} {"target": 0, "idx": 15549, "func": "static void ac97_initfn (PCIDevice *dev) { PCIAC97LinkState *d = DO_UPCAST (PCIAC97LinkState, dev, dev); AC97LinkState *s = &d->ac97; uint8_t *c = d->dev.config; s->pci_dev = &d->dev; pci_config_set_vendor_id (c, PCI_VENDOR_ID_INTEL); /* ro */ pci_config_set_device_id (c, PCI_DEVICE_ID_INTEL_82801AA_5); /* ro */ c[0x04] = 0x00; /* pcicmd pci command rw, ro */ c[0x05] = 0x00; c[0x06] = 0x80; /* pcists pci status rwc, ro */ c[0x07] = 0x02; c[0x08] = 0x01; /* rid revision ro */ c[0x09] = 0x00; /* pi programming interface ro */ pci_config_set_class (c, PCI_CLASS_MULTIMEDIA_AUDIO); /* ro */ c[PCI_HEADER_TYPE] = PCI_HEADER_TYPE_NORMAL; /* headtyp header type ro */ c[0x10] = 0x01; /* nabmar native audio mixer base address rw */ c[0x11] = 0x00; c[0x12] = 0x00; c[0x13] = 0x00; c[0x14] = 0x01; /* nabmbar native audio bus mastering base address rw */ c[0x15] = 0x00; c[0x16] = 0x00; c[0x17] = 0x00; c[0x2c] = 0x86; /* svid subsystem vendor id rwo */ c[0x2d] = 0x80; c[0x2e] = 0x00; /* sid subsystem id rwo */ c[0x2f] = 0x00; c[0x3c] = 0x00; /* intr_ln interrupt line rw */ c[0x3d] = 0x01; /* intr_pn interrupt pin ro */ pci_register_bar (&d->dev, 0, 256 * 4, PCI_ADDRESS_SPACE_IO, ac97_map); pci_register_bar (&d->dev, 1, 64 * 4, PCI_ADDRESS_SPACE_IO, ac97_map); register_savevm (\"ac97\", 0, 2, ac97_save, ac97_load, s); qemu_register_reset (ac97_on_reset, s); AUD_register_card (\"ac97\", &s->card); ac97_on_reset (s); }"} {"target": 0, "idx": 15552, "func": "static int cook_parse(AVCodecParserContext *s1, AVCodecContext *avctx, const uint8_t **poutbuf, int *poutbuf_size, const uint8_t *buf, int buf_size) { CookParseContext *s = s1->priv_data; if (s->duration) s1->duration = s->duration; else if (avctx->extradata && avctx->extradata_size >= 8 && avctx->channels) s->duration = AV_RB16(avctx->extradata + 4) / avctx->channels; /* always return the full packet. this parser isn't doing any splitting or combining, only setting packet duration */ *poutbuf = buf; *poutbuf_size = buf_size; return buf_size; }"} {"target": 0, "idx": 15555, "func": "static void add_codec(FFServerStream *stream, AVCodecContext *av, FFServerConfig *config) { AVStream *st; AVDictionary **opts, *recommended = NULL; char *enc_config; if(stream->nb_streams >= FF_ARRAY_ELEMS(stream->streams)) return; opts = av->codec_type == AVMEDIA_TYPE_AUDIO ? &config->audio_opts : &config->video_opts; av_dict_copy(&recommended, *opts, 0); av_opt_set_dict2(av->priv_data, opts, AV_OPT_SEARCH_CHILDREN); av_opt_set_dict2(av, opts, AV_OPT_SEARCH_CHILDREN); if (av_dict_count(*opts)) av_log(NULL, AV_LOG_WARNING, \"Something is wrong, %d options are not set!\\n\", av_dict_count(*opts)); if (config->stream_use_defaults) { //TODO: reident /* compute default parameters */ switch(av->codec_type) { case AVMEDIA_TYPE_AUDIO: if (av->bit_rate == 0) { av->bit_rate = 64000; av_dict_set_int(&recommended, \"ab\", av->bit_rate, 0); } if (av->sample_rate == 0) { av->sample_rate = 22050; av_dict_set_int(&recommended, \"ar\", av->sample_rate, 0); } if (av->channels == 0) { av->channels = 1; av_dict_set_int(&recommended, \"ac\", av->channels, 0); } break; case AVMEDIA_TYPE_VIDEO: if (av->bit_rate == 0) { av->bit_rate = 64000; av_dict_set_int(&recommended, \"b\", av->bit_rate, 0); } if (av->time_base.num == 0){ av->time_base.den = 5; av->time_base.num = 1; av_dict_set(&recommended, \"time_base\", \"1/5\", 0); } if (av->width == 0 || av->height == 0) { av->width = 160; av->height = 128; av_dict_set(&recommended, \"video_size\", \"160x128\", 0); } /* Bitrate tolerance is less for streaming */ if (av->bit_rate_tolerance == 0) { av->bit_rate_tolerance = FFMAX(av->bit_rate / 4, (int64_t)av->bit_rate*av->time_base.num/av->time_base.den); av_dict_set_int(&recommended, \"bt\", av->bit_rate_tolerance, 0); } if (!av->rc_eq) { av->rc_eq = av_strdup(\"tex^qComp\"); av_dict_set(&recommended, \"rc_eq\", \"tex^qComp\", 0); } if (!av->rc_max_rate) { av->rc_max_rate = av->bit_rate * 2; av_dict_set_int(&recommended, \"maxrate\", av->rc_max_rate, 0); } if (av->rc_max_rate && !av->rc_buffer_size) { av->rc_buffer_size = av->rc_max_rate; av_dict_set_int(&recommended, \"bufsize\", av->rc_buffer_size, 0); } break; default: abort(); } } else { switch(av->codec_type) { case AVMEDIA_TYPE_AUDIO: if (av->bit_rate == 0) report_config_error(config->filename, config->line_num, AV_LOG_ERROR, &config->errors, \"audio bit rate is not set\\n\"); if (av->sample_rate == 0) report_config_error(config->filename, config->line_num, AV_LOG_ERROR, &config->errors, \"audio sample rate is not set\\n\"); break; case AVMEDIA_TYPE_VIDEO: if (av->width == 0 || av->height == 0) report_config_error(config->filename, config->line_num, AV_LOG_ERROR, &config->errors, \"video size is not set\\n\"); break; default: av_assert0(0); } } st = av_mallocz(sizeof(AVStream)); if (!st) return; av_dict_get_string(recommended, &enc_config, '=', ','); av_dict_free(&recommended); av_stream_set_recommended_encoder_configuration(st, enc_config); st->codec = av; stream->streams[stream->nb_streams++] = st; }"} {"target": 1, "idx": 15582, "func": "static void qmp_output_end_struct(Visitor *v, Error **errp) { QmpOutputVisitor *qov = to_qov(v); QObject *value = qmp_output_pop(qov); assert(qobject_type(value) == QTYPE_QDICT); }"} {"target": 1, "idx": 15589, "func": "void compute_images_mse_16bit(PSNRContext *s, const uint8_t *main_data[4], const int main_linesizes[4], const uint8_t *ref_data[4], const int ref_linesizes[4], int w, int h, double mse[4]) { int i, c, j; for (c = 0; c < s->nb_components; c++) { const int outw = s->planewidth[c]; const int outh = s->planeheight[c]; const uint16_t *main_line = (uint16_t *)main_data[c]; const uint16_t *ref_line = (uint16_t *)ref_data[c]; const int ref_linesize = ref_linesizes[c] / 2; const int main_linesize = main_linesizes[c] / 2; uint64_t m = 0; for (i = 0; i < outh; i++) { for (j = 0; j < outw; j++) m += pow2(main_line[j] - ref_line[j]); ref_line += ref_linesize; main_line += main_linesize; } mse[c] = m / (double)(outw * outh); } }"} {"target": 1, "idx": 15594, "func": "putsum(uint8_t *data, uint32_t n, uint32_t sloc, uint32_t css, uint32_t cse) { uint32_t sum; if (cse && cse < n) n = cse + 1; if (sloc < n-1) { sum = net_checksum_add(n-css, data+css); stw_be_p(data + sloc, net_checksum_finish(sum)); } }"} {"target": 0, "idx": 15615, "func": "uint32_t HELPER(clcle)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3) { uintptr_t ra = GETPC(); uint64_t destlen = get_length(env, r1 + 1); uint64_t dest = get_address(env, r1); uint64_t srclen = get_length(env, r3 + 1); uint64_t src = get_address(env, r3); uint8_t pad = a2 & 0xff; uint32_t cc = 0; if (!(destlen || srclen)) { return cc; } if (srclen > destlen) { srclen = destlen; } for (; destlen || srclen; src++, dest++, destlen--, srclen--) { uint8_t v1 = srclen ? cpu_ldub_data_ra(env, src, ra) : pad; uint8_t v2 = destlen ? cpu_ldub_data_ra(env, dest, ra) : pad; if (v1 != v2) { cc = (v1 < v2) ? 1 : 2; break; } } set_length(env, r1 + 1, destlen); /* can't use srclen here, we trunc'ed it */ set_length(env, r3 + 1, env->regs[r3 + 1] - src - env->regs[r3]); set_address(env, r1, dest); set_address(env, r3, src); return cc; }"} {"target": 0, "idx": 15634, "func": "void *bios_linker_loader_cleanup(GArray *linker) { return g_array_free(linker, false); }"} {"target": 0, "idx": 15644, "func": "const ppc_hash_pte64_t *ppc_hash64_map_hptes(PowerPCCPU *cpu, hwaddr ptex, int n) { ppc_hash_pte64_t *hptes = NULL; hwaddr pte_offset = ptex * HASH_PTE_SIZE_64; if (cpu->env.external_htab == MMU_HASH64_KVM_MANAGED_HPT) { /* * HTAB is controlled by KVM. Fetch into temporary buffer */ hptes = g_malloc(HASH_PTEG_SIZE_64); kvmppc_read_hptes(hptes, ptex, n); } else if (cpu->env.external_htab) { /* * HTAB is controlled by QEMU. Just point to the internally * accessible PTEG. */ hptes = (ppc_hash_pte64_t *)(cpu->env.external_htab + pte_offset); } else if (cpu->env.htab_base) { hwaddr plen = n * HASH_PTE_SIZE_64; hptes = address_space_map(CPU(cpu)->as, cpu->env.htab_base + pte_offset, &plen, false); if (plen < (n * HASH_PTE_SIZE_64)) { hw_error(\"%s: Unable to map all requested HPTEs\\n\", __func__); } } return hptes; }"} {"target": 0, "idx": 15653, "func": "static inline bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx) { if (arm_feature(env, ARM_FEATURE_M)) { switch (env->v7m.mpu_ctrl & (R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK)) { case R_V7M_MPU_CTRL_ENABLE_MASK: /* Enabled, but not for HardFault and NMI */ return mmu_idx == ARMMMUIdx_MNegPri || mmu_idx == ARMMMUIdx_MSNegPri; case R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK: /* Enabled for all cases */ return false; case 0: default: /* HFNMIENA set and ENABLE clear is UNPREDICTABLE, but * we warned about that in armv7m_nvic.c when the guest set it. */ return true; } } if (mmu_idx == ARMMMUIdx_S2NS) { return (env->cp15.hcr_el2 & HCR_VM) == 0; } return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0; }"} {"target": 0, "idx": 15654, "func": "static void build_processor_devices(Aml *sb_scope, unsigned acpi_cpus, AcpiCpuInfo *cpu, AcpiPmInfo *pm) { int i; Aml *dev; Aml *crs; Aml *pkg; Aml *field; Aml *ifctx; Aml *method; /* The current AML generator can cover the APIC ID range [0..255], * inclusive, for VCPU hotplug. */ QEMU_BUILD_BUG_ON(ACPI_CPU_HOTPLUG_ID_LIMIT > 256); g_assert(acpi_cpus <= ACPI_CPU_HOTPLUG_ID_LIMIT); /* create PCI0.PRES device and its _CRS to reserve CPU hotplug MMIO */ dev = aml_device(\"PCI0.\" stringify(CPU_HOTPLUG_RESOURCE_DEVICE)); aml_append(dev, aml_name_decl(\"_HID\", aml_eisaid(\"PNP0A06\"))); aml_append(dev, aml_name_decl(\"_UID\", aml_string(\"CPU Hotplug resources\")) ); /* device present, functioning, decoding, not shown in UI */ aml_append(dev, aml_name_decl(\"_STA\", aml_int(0xB))); crs = aml_resource_template(); aml_append(crs, aml_io(AML_DECODE16, pm->cpu_hp_io_base, pm->cpu_hp_io_base, 1, pm->cpu_hp_io_len) ); aml_append(dev, aml_name_decl(\"_CRS\", crs)); aml_append(sb_scope, dev); /* declare CPU hotplug MMIO region and PRS field to access it */ aml_append(sb_scope, aml_operation_region( \"PRST\", AML_SYSTEM_IO, aml_int(pm->cpu_hp_io_base), pm->cpu_hp_io_len)); field = aml_field(\"PRST\", AML_BYTE_ACC, AML_NOLOCK, AML_PRESERVE); aml_append(field, aml_named_field(\"PRS\", 256)); aml_append(sb_scope, field); /* build Processor object for each processor */ for (i = 0; i < acpi_cpus; i++) { dev = aml_processor(i, 0, 0, \"CP%.02X\", i); method = aml_method(\"_MAT\", 0, AML_NOTSERIALIZED); aml_append(method, aml_return(aml_call1(CPU_MAT_METHOD, aml_int(i)))); aml_append(dev, method); method = aml_method(\"_STA\", 0, AML_NOTSERIALIZED); aml_append(method, aml_return(aml_call1(CPU_STATUS_METHOD, aml_int(i)))); aml_append(dev, method); method = aml_method(\"_EJ0\", 1, AML_NOTSERIALIZED); aml_append(method, aml_return(aml_call2(CPU_EJECT_METHOD, aml_int(i), aml_arg(0))) ); aml_append(dev, method); aml_append(sb_scope, dev); } /* build this code: * Method(NTFY, 2) {If (LEqual(Arg0, 0x00)) {Notify(CP00, Arg1)} ...} */ /* Arg0 = Processor ID = APIC ID */ method = aml_method(AML_NOTIFY_METHOD, 2, AML_NOTSERIALIZED); for (i = 0; i < acpi_cpus; i++) { ifctx = aml_if(aml_equal(aml_arg(0), aml_int(i))); aml_append(ifctx, aml_notify(aml_name(\"CP%.02X\", i), aml_arg(1)) ); aml_append(method, ifctx); } aml_append(sb_scope, method); /* build \"Name(CPON, Package() { One, One, ..., Zero, Zero, ... })\" * * Note: The ability to create variable-sized packages was first * introduced in ACPI 2.0. ACPI 1.0 only allowed fixed-size packages * ith up to 255 elements. Windows guests up to win2k8 fail when * VarPackageOp is used. */ pkg = acpi_cpus <= 255 ? aml_package(acpi_cpus) : aml_varpackage(acpi_cpus); for (i = 0; i < acpi_cpus; i++) { uint8_t b = test_bit(i, cpu->found_cpus) ? 0x01 : 0x00; aml_append(pkg, aml_int(b)); } aml_append(sb_scope, aml_name_decl(CPU_ON_BITMAP, pkg)); }"} {"target": 0, "idx": 15655, "func": "e1000_mmio_read(void *opaque, target_phys_addr_t addr, unsigned size) { E1000State *s = opaque; unsigned int index = (addr & 0x1ffff) >> 2; if (index < NREADOPS && macreg_readops[index]) { return macreg_readops[index](s, index); } DBGOUT(UNKNOWN, \"MMIO unknown read addr=0x%08x\\n\", index<<2); return 0; }"} {"target": 1, "idx": 15664, "func": "static void ccid_on_apdu_from_guest(USBCCIDState *s, CCID_XferBlock *recv) { uint32_t len; if (ccid_card_status(s) != ICC_STATUS_PRESENT_ACTIVE) { DPRINTF(s, 1, \"usb-ccid: not sending apdu to client, no card connected\\n\"); ccid_write_data_block_error(s, recv->hdr.bSlot, recv->hdr.bSeq); return; } len = le32_to_cpu(recv->hdr.dwLength); DPRINTF(s, 1, \"%s: seq %d, len %d\\n\", __func__, recv->hdr.bSeq, len); ccid_add_pending_answer(s, (CCID_Header *)recv); if (s->card) { ccid_card_apdu_from_guest(s->card, recv->abData, len); } else { DPRINTF(s, D_WARN, \"warning: discarded apdu\\n\"); } }"} {"target": 0, "idx": 15679, "func": "void ff_h264_pred_init_x86(H264PredContext *h, int codec_id) { mm_flags = mm_support(); #if HAVE_YASM if (mm_flags & FF_MM_MMX) { h->pred16x16[VERT_PRED8x8] = ff_pred16x16_vertical_mmx; h->pred16x16[HOR_PRED8x8 ] = ff_pred16x16_horizontal_mmx; h->pred8x8 [VERT_PRED8x8] = ff_pred8x8_vertical_mmx; h->pred8x8 [HOR_PRED8x8 ] = ff_pred8x8_horizontal_mmx; if (codec_id == CODEC_ID_VP8) { h->pred16x16[PLANE_PRED8x8] = ff_pred16x16_tm_vp8_mmx; h->pred8x8 [PLANE_PRED8x8] = ff_pred8x8_tm_vp8_mmx; h->pred4x4 [TM_VP8_PRED ] = ff_pred4x4_tm_vp8_mmx; } } if (mm_flags & FF_MM_MMX2) { h->pred16x16[HOR_PRED8x8 ] = ff_pred16x16_horizontal_mmxext; h->pred16x16[DC_PRED8x8 ] = ff_pred16x16_dc_mmxext; h->pred8x8 [HOR_PRED8x8 ] = ff_pred8x8_horizontal_mmxext; h->pred4x4 [DC_PRED ] = ff_pred4x4_dc_mmxext; if (codec_id == CODEC_ID_VP8) { h->pred16x16[PLANE_PRED8x8] = ff_pred16x16_tm_vp8_mmxext; h->pred8x8 [DC_PRED8x8 ] = ff_pred8x8_dc_rv40_mmxext; h->pred8x8 [PLANE_PRED8x8] = ff_pred8x8_tm_vp8_mmxext; h->pred4x4 [TM_VP8_PRED ] = ff_pred4x4_tm_vp8_mmxext; h->pred4x4 [VERT_PRED ] = ff_pred4x4_vertical_vp8_mmxext; } } if (mm_flags & FF_MM_SSE) { h->pred16x16[VERT_PRED8x8] = ff_pred16x16_vertical_sse; h->pred16x16[DC_PRED8x8 ] = ff_pred16x16_dc_sse; } if (mm_flags & FF_MM_SSE2) { h->pred16x16[DC_PRED8x8 ] = ff_pred16x16_dc_sse2; if (codec_id == CODEC_ID_VP8) { h->pred16x16[PLANE_PRED8x8] = ff_pred16x16_tm_vp8_sse2; h->pred8x8 [PLANE_PRED8x8] = ff_pred8x8_tm_vp8_sse2; } } if (mm_flags & FF_MM_SSSE3) { h->pred16x16[HOR_PRED8x8 ] = ff_pred16x16_horizontal_ssse3; h->pred16x16[DC_PRED8x8 ] = ff_pred16x16_dc_ssse3; h->pred8x8 [HOR_PRED8x8 ] = ff_pred8x8_horizontal_ssse3; if (codec_id == CODEC_ID_VP8) { h->pred8x8 [PLANE_PRED8x8] = ff_pred8x8_tm_vp8_ssse3; h->pred4x4 [TM_VP8_PRED ] = ff_pred4x4_tm_vp8_ssse3; } } #endif }"} {"target": 1, "idx": 15690, "func": "static inline abi_long target_to_host_timespec(struct timespec *host_ts, abi_ulong target_addr) { struct target_timespec *target_ts; if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) return -TARGET_EFAULT; host_ts->tv_sec = tswapal(target_ts->tv_sec); host_ts->tv_nsec = tswapal(target_ts->tv_nsec); unlock_user_struct(target_ts, target_addr, 0); return 0; }"} {"target": 0, "idx": 15696, "func": "void DMA_init(int high_page_enable, qemu_irq *cpu_request_exit) { }"} {"target": 0, "idx": 15723, "func": "static void decode_delta_l(uint8_t *dst, const uint8_t *buf, const uint8_t *buf_end, int w, int flag, int bpp, int dst_size) { GetByteContext off0, off1, dgb, ogb; PutByteContext pb; unsigned poff0, poff1; int i, k, dstpitch; int planepitch_byte = (w + 7) / 8; int planepitch = ((w + 15) / 16) * 2; int pitch = planepitch * bpp; if (buf_end - buf <= 64) return; bytestream2_init(&off0, buf, buf_end - buf); bytestream2_init(&off1, buf + 32, buf_end - (buf + 32)); bytestream2_init_writer(&pb, dst, dst_size); dstpitch = flag ? (((w + 7) / 8) * bpp): 2; for (k = 0; k < bpp; k++) { poff0 = bytestream2_get_be32(&off0); poff1 = bytestream2_get_be32(&off1); if (!poff0) continue; if (2LL * poff0 >= buf_end - buf) return; if (2LL * poff1 >= buf_end - buf) return; bytestream2_init(&dgb, buf + 2 * poff0, buf_end - (buf + 2 * poff0)); bytestream2_init(&ogb, buf + 2 * poff1, buf_end - (buf + 2 * poff1)); while ((bytestream2_peek_be16(&ogb)) != 0xFFFF) { uint32_t offset = bytestream2_get_be16(&ogb); int16_t cnt = bytestream2_get_be16(&ogb); uint16_t data; offset = ((2 * offset) / planepitch_byte) * pitch + ((2 * offset) % planepitch_byte) + k * planepitch; if (cnt < 0) { bytestream2_seek_p(&pb, offset, SEEK_SET); cnt = -cnt; data = bytestream2_get_be16(&dgb); for (i = 0; i < cnt; i++) { bytestream2_put_be16(&pb, data); bytestream2_skip_p(&pb, dstpitch - 2); } } else { bytestream2_seek_p(&pb, offset, SEEK_SET); for (i = 0; i < cnt; i++) { data = bytestream2_get_be16(&dgb); bytestream2_put_be16(&pb, data); bytestream2_skip_p(&pb, dstpitch - 2); } } } } }"} {"target": 0, "idx": 15728, "func": "static void intel_hda_mmio_writeb(void *opaque, target_phys_addr_t addr, uint32_t val) { IntelHDAState *d = opaque; const IntelHDAReg *reg = intel_hda_reg_find(d, addr); intel_hda_reg_write(d, reg, val, 0xff); }"} {"target": 0, "idx": 15775, "func": "void serial_realize_core(SerialState *s, Error **errp) { if (!qemu_chr_fe_backend_connected(&s->chr)) { error_setg(errp, \"Can't create serial device, empty char device\"); return; } s->modem_status_poll = timer_new_ns(QEMU_CLOCK_VIRTUAL, (QEMUTimerCB *) serial_update_msl, s); s->fifo_timeout_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, (QEMUTimerCB *) fifo_timeout_int, s); qemu_register_reset(serial_reset, s); qemu_chr_fe_set_handlers(&s->chr, serial_can_receive1, serial_receive1, serial_event, NULL, s, NULL, true); fifo8_create(&s->recv_fifo, UART_FIFO_LENGTH); fifo8_create(&s->xmit_fifo, UART_FIFO_LENGTH); serial_reset(s); }"} {"target": 0, "idx": 15779, "func": "static int get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, target_ulong eaddr, int rw, int access_type) { int ret; #if 0 qemu_log(\"%s\\n\", __func__); #endif if ((access_type == ACCESS_CODE && msr_ir == 0) || (access_type != ACCESS_CODE && msr_dr == 0)) { if (env->mmu_model == POWERPC_MMU_BOOKE) { /* The BookE MMU always performs address translation. The IS and DS bits only affect the address space. */ ret = mmubooke_get_physical_address(env, ctx, eaddr, rw, access_type); } else if (env->mmu_model == POWERPC_MMU_BOOKE206) { ret = mmubooke206_get_physical_address(env, ctx, eaddr, rw, access_type); } else { /* No address translation. */ ret = check_physical(env, ctx, eaddr, rw); } } else { ret = -1; switch (env->mmu_model) { case POWERPC_MMU_32B: case POWERPC_MMU_601: /* Try to find a BAT */ if (env->nb_BATs != 0) { ret = get_bat(env, ctx, eaddr, rw, access_type); } if (ret < 0) { /* We didn't match any BAT entry or don't have BATs */ ret = get_segment32(env, ctx, eaddr, rw, access_type); } break; case POWERPC_MMU_SOFT_6xx: case POWERPC_MMU_SOFT_74xx: /* Try to find a BAT */ if (env->nb_BATs != 0) { ret = get_bat(env, ctx, eaddr, rw, access_type); } if (ret < 0) { /* We didn't match any BAT entry or don't have BATs */ ret = get_segment_6xx_tlb(env, ctx, eaddr, rw, access_type); } break; #if defined(TARGET_PPC64) case POWERPC_MMU_64B: case POWERPC_MMU_2_06: case POWERPC_MMU_2_06d: ret = get_segment64(env, ctx, eaddr, rw, access_type); break; #endif case POWERPC_MMU_SOFT_4xx: case POWERPC_MMU_SOFT_4xx_Z: ret = mmu40x_get_physical_address(env, ctx, eaddr, rw, access_type); break; case POWERPC_MMU_BOOKE: ret = mmubooke_get_physical_address(env, ctx, eaddr, rw, access_type); break; case POWERPC_MMU_BOOKE206: ret = mmubooke206_get_physical_address(env, ctx, eaddr, rw, access_type); break; case POWERPC_MMU_MPC8xx: /* XXX: TODO */ cpu_abort(env, \"MPC8xx MMU model is not implemented\\n\"); break; case POWERPC_MMU_REAL: cpu_abort(env, \"PowerPC in real mode do not do any translation\\n\"); return -1; default: cpu_abort(env, \"Unknown or invalid MMU model\\n\"); return -1; } } #if 0 qemu_log(\"%s address \" TARGET_FMT_lx \" => %d \" TARGET_FMT_plx \"\\n\", __func__, eaddr, ret, ctx->raddr); #endif return ret; }"} {"target": 0, "idx": 15788, "func": "static uint8_t eeprom24c0x_read(void) { logout(\"%u: scl = %u, sda = %u, data = 0x%02x\\n\", eeprom.tick, eeprom.scl, eeprom.sda, eeprom.data); return eeprom.sda; }"} {"target": 0, "idx": 15791, "func": "static void blockdev_do_action(int kind, void *data, Error **errp) { TransactionAction action; TransactionActionList list; action.kind = kind; action.data = data; list.value = &action; list.next = NULL; qmp_transaction(&list, errp); }"} {"target": 0, "idx": 15804, "func": "static void h263_h_loop_filter_mmx(uint8_t *src, int stride, int qscale) { if (CONFIG_H263_DECODER || CONFIG_H263_ENCODER) { const int strength = ff_h263_loop_filter_strength[qscale]; DECLARE_ALIGNED(8, uint64_t, temp)[4]; uint8_t *btemp = (uint8_t*)temp; src -= 2; transpose4x4(btemp, src, 8, stride); transpose4x4(btemp + 4, src + 4 * stride, 8, stride); __asm__ volatile ( H263_LOOP_FILTER // 5 3 4 6 : \"+m\"(temp[0]), \"+m\"(temp[1]), \"+m\"(temp[2]), \"+m\"(temp[3]) : \"g\"(2 * strength), \"m\"(ff_pb_FC) ); __asm__ volatile ( \"movq %%mm5, %%mm1 \\n\\t\" \"movq %%mm4, %%mm0 \\n\\t\" \"punpcklbw %%mm3, %%mm5 \\n\\t\" \"punpcklbw %%mm6, %%mm4 \\n\\t\" \"punpckhbw %%mm3, %%mm1 \\n\\t\" \"punpckhbw %%mm6, %%mm0 \\n\\t\" \"movq %%mm5, %%mm3 \\n\\t\" \"movq %%mm1, %%mm6 \\n\\t\" \"punpcklwd %%mm4, %%mm5 \\n\\t\" \"punpcklwd %%mm0, %%mm1 \\n\\t\" \"punpckhwd %%mm4, %%mm3 \\n\\t\" \"punpckhwd %%mm0, %%mm6 \\n\\t\" \"movd %%mm5, (%0) \\n\\t\" \"punpckhdq %%mm5, %%mm5 \\n\\t\" \"movd %%mm5, (%0, %2) \\n\\t\" \"movd %%mm3, (%0, %2, 2) \\n\\t\" \"punpckhdq %%mm3, %%mm3 \\n\\t\" \"movd %%mm3, (%0, %3) \\n\\t\" \"movd %%mm1, (%1) \\n\\t\" \"punpckhdq %%mm1, %%mm1 \\n\\t\" \"movd %%mm1, (%1, %2) \\n\\t\" \"movd %%mm6, (%1, %2, 2) \\n\\t\" \"punpckhdq %%mm6, %%mm6 \\n\\t\" \"movd %%mm6, (%1, %3) \\n\\t\" :: \"r\"(src), \"r\"(src + 4 * stride), \"r\"((x86_reg)stride), \"r\"((x86_reg)(3 * stride)) ); } }"} {"target": 0, "idx": 15811, "func": "static void bootp_reply(struct bootp_t *bp) { BOOTPClient *bc; struct mbuf *m; struct bootp_t *rbp; struct sockaddr_in saddr, daddr; struct in_addr dns_addr; int dhcp_msg_type, val; uint8_t *q; /* extract exact DHCP msg type */ dhcp_decode(bp->bp_vend, DHCP_OPT_LEN, &dhcp_msg_type); dprintf(\"bootp packet op=%d msgtype=%d\\n\", bp->bp_op, dhcp_msg_type); if (dhcp_msg_type == 0) dhcp_msg_type = DHCPREQUEST; /* Force reply for old BOOTP clients */ if (dhcp_msg_type != DHCPDISCOVER && dhcp_msg_type != DHCPREQUEST) return; /* XXX: this is a hack to get the client mac address */ memcpy(client_ethaddr, bp->bp_hwaddr, 6); if ((m = m_get()) == NULL) return; m->m_data += IF_MAXLINKHDR; rbp = (struct bootp_t *)m->m_data; m->m_data += sizeof(struct udpiphdr); memset(rbp, 0, sizeof(struct bootp_t)); if (dhcp_msg_type == DHCPDISCOVER) { new_addr: bc = get_new_addr(&daddr.sin_addr); if (!bc) { dprintf(\"no address left\\n\"); return; } memcpy(bc->macaddr, client_ethaddr, 6); } else { bc = find_addr(&daddr.sin_addr, bp->bp_hwaddr); if (!bc) { /* if never assigned, behaves as if it was already assigned (windows fix because it remembers its address) */ goto new_addr; } } if (bootp_filename) snprintf((char *)rbp->bp_file, sizeof(rbp->bp_file), \"%s\", bootp_filename); dprintf(\"offered addr=%08x\\n\", ntohl(daddr.sin_addr.s_addr)); saddr.sin_addr.s_addr = htonl(ntohl(special_addr.s_addr) | CTL_ALIAS); saddr.sin_port = htons(BOOTP_SERVER); daddr.sin_port = htons(BOOTP_CLIENT); rbp->bp_op = BOOTP_REPLY; rbp->bp_xid = bp->bp_xid; rbp->bp_htype = 1; rbp->bp_hlen = 6; memcpy(rbp->bp_hwaddr, bp->bp_hwaddr, 6); rbp->bp_yiaddr = daddr.sin_addr; /* Client IP address */ rbp->bp_siaddr = saddr.sin_addr; /* Server IP address */ daddr.sin_addr.s_addr = 0xffffffffu; q = rbp->bp_vend; memcpy(q, rfc1533_cookie, 4); q += 4; if (dhcp_msg_type == DHCPDISCOVER) { *q++ = RFC2132_MSG_TYPE; *q++ = 1; *q++ = DHCPOFFER; } else if (dhcp_msg_type == DHCPREQUEST) { *q++ = RFC2132_MSG_TYPE; *q++ = 1; *q++ = DHCPACK; } if (dhcp_msg_type == DHCPDISCOVER || dhcp_msg_type == DHCPREQUEST) { *q++ = RFC2132_SRV_ID; *q++ = 4; memcpy(q, &saddr.sin_addr, 4); q += 4; *q++ = RFC1533_NETMASK; *q++ = 4; *q++ = 0xff; *q++ = 0xff; *q++ = 0xff; *q++ = 0x00; if (!slirp_restrict) { *q++ = RFC1533_GATEWAY; *q++ = 4; memcpy(q, &saddr.sin_addr, 4); q += 4; *q++ = RFC1533_DNS; *q++ = 4; dns_addr.s_addr = htonl(ntohl(special_addr.s_addr) | CTL_DNS); memcpy(q, &dns_addr, 4); q += 4; } *q++ = RFC2132_LEASE_TIME; *q++ = 4; val = htonl(LEASE_TIME); memcpy(q, &val, 4); q += 4; if (*slirp_hostname) { val = strlen(slirp_hostname); *q++ = RFC1533_HOSTNAME; *q++ = val; memcpy(q, slirp_hostname, val); q += val; } } *q++ = RFC1533_END; m->m_len = sizeof(struct bootp_t) - sizeof(struct ip) - sizeof(struct udphdr); udp_output2(NULL, m, &saddr, &daddr, IPTOS_LOWDELAY); }"} {"target": 0, "idx": 15818, "func": "static uint32_t drc_isolate_physical(sPAPRDRConnector *drc) { switch (drc->state) { case SPAPR_DRC_STATE_PHYSICAL_POWERON: return RTAS_OUT_SUCCESS; /* Nothing to do */ case SPAPR_DRC_STATE_PHYSICAL_CONFIGURED: break; /* see below */ case SPAPR_DRC_STATE_PHYSICAL_UNISOLATE: return RTAS_OUT_PARAM_ERROR; /* not allowed */ default: g_assert_not_reached(); } /* if the guest is configuring a device attached to this DRC, we * should reset the configuration state at this point since it may * no longer be reliable (guest released device and needs to start * over, or unplug occurred so the FDT is no longer valid) */ g_free(drc->ccs); drc->ccs = NULL; drc->state = SPAPR_DRC_STATE_PHYSICAL_POWERON; if (drc->unplug_requested) { uint32_t drc_index = spapr_drc_index(drc); trace_spapr_drc_set_isolation_state_finalizing(drc_index); spapr_drc_detach(drc); } return RTAS_OUT_SUCCESS; }"} {"target": 0, "idx": 15823, "func": "static void input_linux_event_mouse(void *opaque) { InputLinux *il = opaque; struct input_event event; int rc; for (;;) { rc = read(il->fd, &event, sizeof(event)); if (rc != sizeof(event)) { if (rc < 0 && errno != EAGAIN) { fprintf(stderr, \"%s: read: %s\\n\", __func__, strerror(errno)); qemu_set_fd_handler(il->fd, NULL, NULL, NULL); close(il->fd); } break; } input_linux_handle_mouse(il, &event); } }"} {"target": 0, "idx": 15833, "func": "int float64_eq( float64 a, float64 b STATUS_PARAM ) { if ( ( ( extractFloat64Exp( a ) == 0x7FF ) && extractFloat64Frac( a ) ) || ( ( extractFloat64Exp( b ) == 0x7FF ) && extractFloat64Frac( b ) ) ) { if ( float64_is_signaling_nan( a ) || float64_is_signaling_nan( b ) ) { float_raise( float_flag_invalid STATUS_VAR); } return 0; } return ( a == b ) || ( (bits64) ( ( a | b )<<1 ) == 0 ); }"} {"target": 0, "idx": 15840, "func": "static always_inline void gen_cmp(TCGCond cond, int ra, int rb, int rc, int islit, uint8_t lit) { int l1, l2; TCGv tmp; if (unlikely(rc == 31)) return; l1 = gen_new_label(); l2 = gen_new_label(); if (ra != 31) { tmp = tcg_temp_new(TCG_TYPE_I64); tcg_gen_mov_i64(tmp, cpu_ir[ra]); } else tmp = tcg_const_i64(0); if (islit) tcg_gen_brcondi_i64(cond, tmp, lit, l1); else tcg_gen_brcond_i64(cond, tmp, cpu_ir[rb], l1); tcg_gen_movi_i64(cpu_ir[rc], 0); tcg_gen_br(l2); gen_set_label(l1); tcg_gen_movi_i64(cpu_ir[rc], 1); gen_set_label(l2); }"} {"target": 1, "idx": 15846, "func": "static int virtio_scsi_device_exit(DeviceState *qdev) { VirtIOSCSI *s = VIRTIO_SCSI(qdev); VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(qdev); unregister_savevm(qdev, \"virtio-scsi\", s); return virtio_scsi_common_exit(vs); }"} {"target": 0, "idx": 15879, "func": "static int pci_vpb_init(SysBusDevice *dev) { PCIVPBState *s = FROM_SYSBUS(PCIVPBState, dev); PCIBus *bus; int i; for (i = 0; i < 4; i++) { sysbus_init_irq(dev, &s->irq[i]); } bus = pci_register_bus(&dev->qdev, \"pci\", pci_vpb_set_irq, pci_vpb_map_irq, s->irq, get_system_memory(), get_system_io(), PCI_DEVFN(11, 0), 4); /* ??? Register memory space. */ memory_region_init_io(&s->mem_config, &pci_vpb_config_ops, bus, \"pci-vpb-selfconfig\", 0x1000000); memory_region_init_io(&s->mem_config2, &pci_vpb_config_ops, bus, \"pci-vpb-config\", 0x1000000); if (s->realview) { isa_mmio_setup(&s->isa, 0x0100000); } sysbus_init_mmio_cb2(dev, pci_vpb_map, pci_vpb_unmap); pci_create_simple(bus, -1, \"versatile_pci_host\"); return 0; }"} {"target": 0, "idx": 15883, "func": "static void rng_random_set_filename(Object *obj, const char *filename, Error **errp) { RngBackend *b = RNG_BACKEND(obj); RndRandom *s = RNG_RANDOM(obj); if (b->opened) { error_set(errp, QERR_PERMISSION_DENIED); return; } if (s->filename) { g_free(s->filename); } s->filename = g_strdup(filename); }"} {"target": 0, "idx": 15885, "func": "static void rtas_ibm_get_config_addr_info2(PowerPCCPU *cpu, sPAPRMachineState *spapr, uint32_t token, uint32_t nargs, target_ulong args, uint32_t nret, target_ulong rets) { sPAPRPHBState *sphb; PCIDevice *pdev; uint32_t addr, option; uint64_t buid; if ((nargs != 4) || (nret != 2)) { goto param_error_exit; } buid = rtas_ldq(args, 1); sphb = spapr_pci_find_phb(spapr, buid); if (!sphb) { goto param_error_exit; } if (!spapr_phb_eeh_available(sphb)) { goto param_error_exit; } /* * We always have PE address of form \"00BB0001\". \"BB\" * represents the bus number of PE's primary bus. */ option = rtas_ld(args, 3); switch (option) { case RTAS_GET_PE_ADDR: addr = rtas_ld(args, 0); pdev = spapr_pci_find_dev(spapr, buid, addr); if (!pdev) { goto param_error_exit; } rtas_st(rets, 1, (pci_bus_num(pdev->bus) << 16) + 1); break; case RTAS_GET_PE_MODE: rtas_st(rets, 1, RTAS_PE_MODE_SHARED); break; default: goto param_error_exit; } rtas_st(rets, 0, RTAS_OUT_SUCCESS); return; param_error_exit: rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); }"} {"target": 0, "idx": 15890, "func": "static void migrate_fd_completed(MigrationState *s) { DPRINTF(\"setting completed state\\n\"); migrate_fd_cleanup(s); if (s->state == MIG_STATE_ACTIVE) { s->state = MIG_STATE_COMPLETED; runstate_set(RUN_STATE_POSTMIGRATE); } notifier_list_notify(&migration_state_notifiers, s); }"} {"target": 0, "idx": 15897, "func": "static unsigned int dec_adds_r(DisasContext *dc) { TCGv t0; int size = memsize_z(dc); DIS(fprintf (logfile, \"adds.%c $r%u, $r%u\\n\", memsize_char(size), dc->op1, dc->op2)); cris_cc_mask(dc, CC_MASK_NZVC); t0 = tcg_temp_new(TCG_TYPE_TL); /* Size can only be qi or hi. */ t_gen_sext(t0, cpu_R[dc->op1], size); cris_alu(dc, CC_OP_ADD, cpu_R[dc->op2], cpu_R[dc->op2], t0, 4); tcg_temp_free(t0); return 2; }"} {"target": 0, "idx": 15906, "func": "static void create_map(vorbis_context *vc, unsigned floor_number) { vorbis_floor *floors = vc->floors; vorbis_floor0 *vf; int idx; int blockflag, n; int32_t *map; for (blockflag = 0; blockflag < 2; ++blockflag) { n = vc->blocksize[blockflag] / 2; floors[floor_number].data.t0.map[blockflag] = av_malloc((n + 1) * sizeof(int32_t)); // n + sentinel map = floors[floor_number].data.t0.map[blockflag]; vf = &floors[floor_number].data.t0; for (idx = 0; idx < n; ++idx) { map[idx] = floor(BARK((vf->rate * idx) / (2.0f * n)) * (vf->bark_map_size / BARK(vf->rate / 2.0f))); if (vf->bark_map_size-1 < map[idx]) map[idx] = vf->bark_map_size - 1; } map[n] = -1; vf->map_size[blockflag] = n; } for (idx = 0; idx <= n; ++idx) { av_dlog(NULL, \"floor0 map: map at pos %d is %d\\n\", idx, map[idx]); } }"} {"target": 0, "idx": 15911, "func": "static void virtio_scsi_parse_req(VirtIOSCSI *s, VirtQueue *vq, VirtIOSCSIReq *req) { assert(req->elem.out_num && req->elem.in_num); req->vq = vq; req->dev = s; req->sreq = NULL; req->req.buf = req->elem.out_sg[0].iov_base; req->resp.buf = req->elem.in_sg[0].iov_base; if (req->elem.out_num > 1) { qemu_sgl_init_external(&req->qsgl, &req->elem.out_sg[1], &req->elem.out_addr[1], req->elem.out_num - 1); } else { qemu_sgl_init_external(&req->qsgl, &req->elem.in_sg[1], &req->elem.in_addr[1], req->elem.in_num - 1); } }"} {"target": 0, "idx": 15924, "func": "static void usb_uas_task(UASDevice *uas, uas_ui *ui) { uint16_t tag = be16_to_cpu(ui->hdr.tag); uint64_t lun64 = be64_to_cpu(ui->task.lun); SCSIDevice *dev = usb_uas_get_dev(uas, lun64); int lun = usb_uas_get_lun(lun64); UASRequest *req; uint16_t task_tag; req = usb_uas_find_request(uas, be16_to_cpu(ui->hdr.tag)); if (req) { goto overlapped_tag; } switch (ui->task.function) { case UAS_TMF_ABORT_TASK: task_tag = be16_to_cpu(ui->task.task_tag); trace_usb_uas_tmf_abort_task(uas->dev.addr, tag, task_tag); if (dev == NULL) { goto bad_target; } if (dev->lun != lun) { goto incorrect_lun; } req = usb_uas_find_request(uas, task_tag); if (req && req->dev == dev) { scsi_req_cancel(req->req); } usb_uas_queue_response(uas, tag, UAS_RC_TMF_COMPLETE, 0); break; case UAS_TMF_LOGICAL_UNIT_RESET: trace_usb_uas_tmf_logical_unit_reset(uas->dev.addr, tag, lun); if (dev == NULL) { goto bad_target; } if (dev->lun != lun) { goto incorrect_lun; } qdev_reset_all(&dev->qdev); usb_uas_queue_response(uas, tag, UAS_RC_TMF_COMPLETE, 0); break; default: trace_usb_uas_tmf_unsupported(uas->dev.addr, tag, ui->task.function); usb_uas_queue_response(uas, tag, UAS_RC_TMF_NOT_SUPPORTED, 0); break; } return; overlapped_tag: usb_uas_queue_response(uas, req->tag, UAS_RC_OVERLAPPED_TAG, 0); return; bad_target: /* FIXME: correct? [see long comment in usb_uas_command()] */ usb_uas_queue_response(uas, tag, UAS_RC_INVALID_INFO_UNIT, 0); return; incorrect_lun: usb_uas_queue_response(uas, tag, UAS_RC_INCORRECT_LUN, 0); }"} {"target": 0, "idx": 15929, "func": "static int ide_qdev_init(DeviceState *qdev, DeviceInfo *base) { IDEDevice *dev = DO_UPCAST(IDEDevice, qdev, qdev); IDEDeviceInfo *info = DO_UPCAST(IDEDeviceInfo, qdev, base); IDEBus *bus = DO_UPCAST(IDEBus, qbus, qdev->parent_bus); if (!dev->conf.dinfo) { fprintf(stderr, \"%s: no drive specified\\n\", qdev->info->name); goto err; } if (dev->unit == -1) { dev->unit = bus->master ? 1 : 0; } switch (dev->unit) { case 0: if (bus->master) { fprintf(stderr, \"ide: tried to assign master twice\\n\"); goto err; } bus->master = dev; break; case 1: if (bus->slave) { fprintf(stderr, \"ide: tried to assign slave twice\\n\"); goto err; } bus->slave = dev; break; default: goto err; } return info->init(dev); err: return -1; }"} {"target": 0, "idx": 15943, "func": "static int tcg_match_ori(TCGType type, tcg_target_long val) { if (facilities & FACILITY_EXT_IMM) { if (type == TCG_TYPE_I32) { /* All 32-bit ORs can be performed with 1 48-bit insn. */ return 1; } } /* Look for negative values. These are best to load with LGHI. */ if (val < 0) { if (val == (int16_t)val) { return 0; } if (facilities & FACILITY_EXT_IMM) { if (val == (int32_t)val) { return 0; } } } return 1; }"} {"target": 0, "idx": 15962, "func": "void OPPROTO op_addq_EDI_T0(void) { EDI = (EDI + T0); }"} {"target": 0, "idx": 15965, "func": "static void hypercall_register_types(void) { /* hcall-pft */ spapr_register_hypercall(H_ENTER, h_enter); spapr_register_hypercall(H_REMOVE, h_remove); spapr_register_hypercall(H_PROTECT, h_protect); spapr_register_hypercall(H_READ, h_read); /* hcall-bulk */ spapr_register_hypercall(H_BULK_REMOVE, h_bulk_remove); /* hcall-dabr */ spapr_register_hypercall(H_SET_DABR, h_set_dabr); /* hcall-splpar */ spapr_register_hypercall(H_REGISTER_VPA, h_register_vpa); spapr_register_hypercall(H_CEDE, h_cede); /* processor register resource access h-calls */ spapr_register_hypercall(H_SET_SPRG0, h_set_sprg0); spapr_register_hypercall(H_SET_MODE, h_set_mode); /* \"debugger\" hcalls (also used by SLOF). Note: We do -not- differenciate * here between the \"CI\" and the \"CACHE\" variants, they will use whatever * mapping attributes qemu is using. When using KVM, the kernel will * enforce the attributes more strongly */ spapr_register_hypercall(H_LOGICAL_CI_LOAD, h_logical_load); spapr_register_hypercall(H_LOGICAL_CI_STORE, h_logical_store); spapr_register_hypercall(H_LOGICAL_CACHE_LOAD, h_logical_load); spapr_register_hypercall(H_LOGICAL_CACHE_STORE, h_logical_store); spapr_register_hypercall(H_LOGICAL_ICBI, h_logical_icbi); spapr_register_hypercall(H_LOGICAL_DCBF, h_logical_dcbf); spapr_register_hypercall(KVMPPC_H_LOGICAL_MEMOP, h_logical_memop); /* qemu/KVM-PPC specific hcalls */ spapr_register_hypercall(KVMPPC_H_RTAS, h_rtas); /* ibm,client-architecture-support support */ spapr_register_hypercall(KVMPPC_H_CAS, h_client_architecture_support); }"} {"target": 0, "idx": 15977, "func": "static void virtio_blk_update_config(VirtIODevice *vdev, uint8_t *config) { VirtIOBlock *s = VIRTIO_BLK(vdev); BlockConf *conf = &s->conf.conf; struct virtio_blk_config blkcfg; uint64_t capacity; int blk_size = conf->logical_block_size; bdrv_get_geometry(s->bs, &capacity); memset(&blkcfg, 0, sizeof(blkcfg)); virtio_stq_p(vdev, &blkcfg.capacity, capacity); virtio_stl_p(vdev, &blkcfg.seg_max, 128 - 2); virtio_stw_p(vdev, &blkcfg.cylinders, conf->cyls); virtio_stl_p(vdev, &blkcfg.blk_size, blk_size); virtio_stw_p(vdev, &blkcfg.min_io_size, conf->min_io_size / blk_size); virtio_stw_p(vdev, &blkcfg.opt_io_size, conf->opt_io_size / blk_size); blkcfg.heads = conf->heads; /* * We must ensure that the block device capacity is a multiple of * the logical block size. If that is not the case, let's use * sector_mask to adopt the geometry to have a correct picture. * For those devices where the capacity is ok for the given geometry * we don't touch the sector value of the geometry, since some devices * (like s390 dasd) need a specific value. Here the capacity is already * cyls*heads*secs*blk_size and the sector value is not block size * divided by 512 - instead it is the amount of blk_size blocks * per track (cylinder). */ if (bdrv_getlength(s->bs) / conf->heads / conf->secs % blk_size) { blkcfg.sectors = conf->secs & ~s->sector_mask; } else { blkcfg.sectors = conf->secs; } blkcfg.size_max = 0; blkcfg.physical_block_exp = get_physical_block_exp(conf); blkcfg.alignment_offset = 0; blkcfg.wce = bdrv_enable_write_cache(s->bs); memcpy(config, &blkcfg, sizeof(struct virtio_blk_config)); }"} {"target": 0, "idx": 15980, "func": "static void decode_opc (CPUMIPSState *env, DisasContext *ctx) { int32_t offset; int rs, rt, rd, sa; uint32_t op, op1; int16_t imm; /* make sure instructions are on a word boundary */ if (ctx->pc & 0x3) { env->CP0_BadVAddr = ctx->pc; generate_exception(ctx, EXCP_AdEL); return; } /* Handle blikely not taken case */ if ((ctx->hflags & MIPS_HFLAG_BMASK_BASE) == MIPS_HFLAG_BL) { int l1 = gen_new_label(); MIPS_DEBUG(\"blikely condition (\" TARGET_FMT_lx \")\", ctx->pc + 4); tcg_gen_brcondi_tl(TCG_COND_NE, bcond, 0, l1); tcg_gen_movi_i32(hflags, ctx->hflags & ~MIPS_HFLAG_BMASK); gen_goto_tb(ctx, 1, ctx->pc + 4); gen_set_label(l1); } if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) { tcg_gen_debug_insn_start(ctx->pc); } op = MASK_OP_MAJOR(ctx->opcode); rs = (ctx->opcode >> 21) & 0x1f; rt = (ctx->opcode >> 16) & 0x1f; rd = (ctx->opcode >> 11) & 0x1f; sa = (ctx->opcode >> 6) & 0x1f; imm = (int16_t)ctx->opcode; switch (op) { case OPC_SPECIAL: decode_opc_special(env, ctx); break; case OPC_SPECIAL2: decode_opc_special2_legacy(env, ctx); break; case OPC_SPECIAL3: decode_opc_special3(env, ctx); break; case OPC_REGIMM: op1 = MASK_REGIMM(ctx->opcode); switch (op1) { case OPC_BLTZL: /* REGIMM branches */ case OPC_BGEZL: case OPC_BLTZALL: case OPC_BGEZALL: check_insn_opc_removed(ctx, ISA_MIPS32R6); case OPC_BLTZ: case OPC_BGEZ: gen_compute_branch(ctx, op1, 4, rs, -1, imm << 2, 4); break; case OPC_BLTZAL: case OPC_BGEZAL: if (ctx->insn_flags & ISA_MIPS32R6) { if (rs == 0) { /* OPC_NAL, OPC_BAL */ gen_compute_branch(ctx, op1, 4, 0, -1, imm << 2, 4); } else { generate_exception(ctx, EXCP_RI); } } else { gen_compute_branch(ctx, op1, 4, rs, -1, imm << 2, 4); } break; case OPC_TGEI ... OPC_TEQI: /* REGIMM traps */ case OPC_TNEI: check_insn_opc_removed(ctx, ISA_MIPS32R6); gen_trap(ctx, op1, rs, -1, imm); break; case OPC_SYNCI: check_insn(ctx, ISA_MIPS32R2); /* Break the TB to be able to sync copied instructions immediately */ ctx->bstate = BS_STOP; break; case OPC_BPOSGE32: /* MIPS DSP branch */ #if defined(TARGET_MIPS64) case OPC_BPOSGE64: #endif check_dsp(ctx); gen_compute_branch(ctx, op1, 4, -1, -2, (int32_t)imm << 2, 4); break; #if defined(TARGET_MIPS64) case OPC_DAHI: check_insn(ctx, ISA_MIPS32R6); check_mips_64(ctx); if (rs != 0) { tcg_gen_addi_tl(cpu_gpr[rs], cpu_gpr[rs], (int64_t)imm << 32); } MIPS_DEBUG(\"dahi %s, %04x\", regnames[rs], imm); break; case OPC_DATI: check_insn(ctx, ISA_MIPS32R6); check_mips_64(ctx); if (rs != 0) { tcg_gen_addi_tl(cpu_gpr[rs], cpu_gpr[rs], (int64_t)imm << 48); } MIPS_DEBUG(\"dati %s, %04x\", regnames[rs], imm); break; #endif default: /* Invalid */ MIPS_INVAL(\"regimm\"); generate_exception(ctx, EXCP_RI); break; } break; case OPC_CP0: check_cp0_enabled(ctx); op1 = MASK_CP0(ctx->opcode); switch (op1) { case OPC_MFC0: case OPC_MTC0: case OPC_MFTR: case OPC_MTTR: #if defined(TARGET_MIPS64) case OPC_DMFC0: case OPC_DMTC0: #endif #ifndef CONFIG_USER_ONLY gen_cp0(env, ctx, op1, rt, rd); #endif /* !CONFIG_USER_ONLY */ break; case OPC_C0_FIRST ... OPC_C0_LAST: #ifndef CONFIG_USER_ONLY gen_cp0(env, ctx, MASK_C0(ctx->opcode), rt, rd); #endif /* !CONFIG_USER_ONLY */ break; case OPC_MFMC0: #ifndef CONFIG_USER_ONLY { uint32_t op2; TCGv t0 = tcg_temp_new(); op2 = MASK_MFMC0(ctx->opcode); switch (op2) { case OPC_DMT: check_insn(ctx, ASE_MT); gen_helper_dmt(t0); gen_store_gpr(t0, rt); break; case OPC_EMT: check_insn(ctx, ASE_MT); gen_helper_emt(t0); gen_store_gpr(t0, rt); break; case OPC_DVPE: check_insn(ctx, ASE_MT); gen_helper_dvpe(t0, cpu_env); gen_store_gpr(t0, rt); break; case OPC_EVPE: check_insn(ctx, ASE_MT); gen_helper_evpe(t0, cpu_env); gen_store_gpr(t0, rt); break; case OPC_DI: check_insn(ctx, ISA_MIPS32R2); save_cpu_state(ctx, 1); gen_helper_di(t0, cpu_env); gen_store_gpr(t0, rt); /* Stop translation as we may have switched the execution mode */ ctx->bstate = BS_STOP; break; case OPC_EI: check_insn(ctx, ISA_MIPS32R2); save_cpu_state(ctx, 1); gen_helper_ei(t0, cpu_env); gen_store_gpr(t0, rt); /* Stop translation as we may have switched the execution mode */ ctx->bstate = BS_STOP; break; default: /* Invalid */ MIPS_INVAL(\"mfmc0\"); generate_exception(ctx, EXCP_RI); break; } tcg_temp_free(t0); } #endif /* !CONFIG_USER_ONLY */ break; case OPC_RDPGPR: check_insn(ctx, ISA_MIPS32R2); gen_load_srsgpr(rt, rd); break; case OPC_WRPGPR: check_insn(ctx, ISA_MIPS32R2); gen_store_srsgpr(rt, rd); break; default: MIPS_INVAL(\"cp0\"); generate_exception(ctx, EXCP_RI); break; } break; case OPC_BOVC: /* OPC_BEQZALC, OPC_BEQC, OPC_ADDI */ if (ctx->insn_flags & ISA_MIPS32R6) { /* OPC_BOVC, OPC_BEQZALC, OPC_BEQC */ gen_compute_compact_branch(ctx, op, rs, rt, imm << 2); } else { /* OPC_ADDI */ /* Arithmetic with immediate opcode */ gen_arith_imm(ctx, op, rt, rs, imm); } break; case OPC_ADDIU: gen_arith_imm(ctx, op, rt, rs, imm); break; case OPC_SLTI: /* Set on less than with immediate opcode */ case OPC_SLTIU: gen_slt_imm(ctx, op, rt, rs, imm); break; case OPC_ANDI: /* Arithmetic with immediate opcode */ case OPC_LUI: /* OPC_AUI */ case OPC_ORI: case OPC_XORI: gen_logic_imm(ctx, op, rt, rs, imm); break; case OPC_J ... OPC_JAL: /* Jump */ offset = (int32_t)(ctx->opcode & 0x3FFFFFF) << 2; gen_compute_branch(ctx, op, 4, rs, rt, offset, 4); break; /* Branch */ case OPC_BLEZC: /* OPC_BGEZC, OPC_BGEC, OPC_BLEZL */ if (ctx->insn_flags & ISA_MIPS32R6) { if (rt == 0) { generate_exception(ctx, EXCP_RI); break; } /* OPC_BLEZC, OPC_BGEZC, OPC_BGEC */ gen_compute_compact_branch(ctx, op, rs, rt, imm << 2); } else { /* OPC_BLEZL */ gen_compute_branch(ctx, op, 4, rs, rt, imm << 2, 4); } break; case OPC_BGTZC: /* OPC_BLTZC, OPC_BLTC, OPC_BGTZL */ if (ctx->insn_flags & ISA_MIPS32R6) { if (rt == 0) { generate_exception(ctx, EXCP_RI); break; } /* OPC_BGTZC, OPC_BLTZC, OPC_BLTC */ gen_compute_compact_branch(ctx, op, rs, rt, imm << 2); } else { /* OPC_BGTZL */ gen_compute_branch(ctx, op, 4, rs, rt, imm << 2, 4); } break; case OPC_BLEZALC: /* OPC_BGEZALC, OPC_BGEUC, OPC_BLEZ */ if (rt == 0) { /* OPC_BLEZ */ gen_compute_branch(ctx, op, 4, rs, rt, imm << 2, 4); } else { check_insn(ctx, ISA_MIPS32R6); /* OPC_BLEZALC, OPC_BGEZALC, OPC_BGEUC */ gen_compute_compact_branch(ctx, op, rs, rt, imm << 2); } break; case OPC_BGTZALC: /* OPC_BLTZALC, OPC_BLTUC, OPC_BGTZ */ if (rt == 0) { /* OPC_BGTZ */ gen_compute_branch(ctx, op, 4, rs, rt, imm << 2, 4); } else { check_insn(ctx, ISA_MIPS32R6); /* OPC_BGTZALC, OPC_BLTZALC, OPC_BLTUC */ gen_compute_compact_branch(ctx, op, rs, rt, imm << 2); } break; case OPC_BEQL: case OPC_BNEL: check_insn_opc_removed(ctx, ISA_MIPS32R6); case OPC_BEQ: case OPC_BNE: gen_compute_branch(ctx, op, 4, rs, rt, imm << 2, 4); break; case OPC_LWL: /* Load and stores */ case OPC_LWR: case OPC_LL: check_insn_opc_removed(ctx, ISA_MIPS32R6); case OPC_LB ... OPC_LH: case OPC_LW ... OPC_LHU: gen_ld(ctx, op, rt, rs, imm); break; case OPC_SWL: case OPC_SWR: check_insn_opc_removed(ctx, ISA_MIPS32R6); case OPC_SB ... OPC_SH: case OPC_SW: gen_st(ctx, op, rt, rs, imm); break; case OPC_SC: check_insn_opc_removed(ctx, ISA_MIPS32R6); gen_st_cond(ctx, op, rt, rs, imm); break; case OPC_CACHE: check_insn_opc_removed(ctx, ISA_MIPS32R6); check_cp0_enabled(ctx); check_insn(ctx, ISA_MIPS3 | ISA_MIPS32); /* Treat as NOP. */ break; case OPC_PREF: check_insn_opc_removed(ctx, ISA_MIPS32R6); check_insn(ctx, ISA_MIPS4 | ISA_MIPS32); /* Treat as NOP. */ break; /* Floating point (COP1). */ case OPC_LWC1: case OPC_LDC1: case OPC_SWC1: case OPC_SDC1: gen_cop1_ldst(ctx, op, rt, rs, imm); break; case OPC_CP1: if (ctx->CP0_Config1 & (1 << CP0C1_FP)) { check_cp1_enabled(ctx); op1 = MASK_CP1(ctx->opcode); switch (op1) { case OPC_MFHC1: case OPC_MTHC1: check_insn(ctx, ISA_MIPS32R2); case OPC_MFC1: case OPC_CFC1: case OPC_MTC1: case OPC_CTC1: gen_cp1(ctx, op1, rt, rd); break; #if defined(TARGET_MIPS64) case OPC_DMFC1: case OPC_DMTC1: check_insn(ctx, ISA_MIPS3); gen_cp1(ctx, op1, rt, rd); break; #endif case OPC_BC1EQZ: /* OPC_BC1ANY2 */ if (ctx->insn_flags & ISA_MIPS32R6) { /* OPC_BC1EQZ */ gen_compute_branch1_r6(ctx, MASK_CP1(ctx->opcode), rt, imm << 2); } else { /* OPC_BC1ANY2 */ check_cop1x(ctx); check_insn(ctx, ASE_MIPS3D); gen_compute_branch1(ctx, MASK_BC1(ctx->opcode), (rt >> 2) & 0x7, imm << 2); } break; case OPC_BC1NEZ: check_insn(ctx, ISA_MIPS32R6); gen_compute_branch1_r6(ctx, MASK_CP1(ctx->opcode), rt, imm << 2); break; case OPC_BC1ANY4: check_insn_opc_removed(ctx, ISA_MIPS32R6); check_cop1x(ctx); check_insn(ctx, ASE_MIPS3D); /* fall through */ case OPC_BC1: check_insn_opc_removed(ctx, ISA_MIPS32R6); gen_compute_branch1(ctx, MASK_BC1(ctx->opcode), (rt >> 2) & 0x7, imm << 2); break; case OPC_PS_FMT: check_insn_opc_removed(ctx, ISA_MIPS32R6); case OPC_S_FMT: case OPC_D_FMT: gen_farith(ctx, ctx->opcode & FOP(0x3f, 0x1f), rt, rd, sa, (imm >> 8) & 0x7); break; case OPC_W_FMT: case OPC_L_FMT: { int r6_op = ctx->opcode & FOP(0x3f, 0x1f); if (ctx->insn_flags & ISA_MIPS32R6) { switch (r6_op) { case R6_OPC_CMP_AF_S: case R6_OPC_CMP_UN_S: case R6_OPC_CMP_EQ_S: case R6_OPC_CMP_UEQ_S: case R6_OPC_CMP_LT_S: case R6_OPC_CMP_ULT_S: case R6_OPC_CMP_LE_S: case R6_OPC_CMP_ULE_S: case R6_OPC_CMP_SAF_S: case R6_OPC_CMP_SUN_S: case R6_OPC_CMP_SEQ_S: case R6_OPC_CMP_SEUQ_S: case R6_OPC_CMP_SLT_S: case R6_OPC_CMP_SULT_S: case R6_OPC_CMP_SLE_S: case R6_OPC_CMP_SULE_S: case R6_OPC_CMP_OR_S: case R6_OPC_CMP_UNE_S: case R6_OPC_CMP_NE_S: case R6_OPC_CMP_SOR_S: case R6_OPC_CMP_SUNE_S: case R6_OPC_CMP_SNE_S: gen_r6_cmp_s(ctx, ctx->opcode & 0x1f, rt, rd, sa); break; case R6_OPC_CMP_AF_D: case R6_OPC_CMP_UN_D: case R6_OPC_CMP_EQ_D: case R6_OPC_CMP_UEQ_D: case R6_OPC_CMP_LT_D: case R6_OPC_CMP_ULT_D: case R6_OPC_CMP_LE_D: case R6_OPC_CMP_ULE_D: case R6_OPC_CMP_SAF_D: case R6_OPC_CMP_SUN_D: case R6_OPC_CMP_SEQ_D: case R6_OPC_CMP_SEUQ_D: case R6_OPC_CMP_SLT_D: case R6_OPC_CMP_SULT_D: case R6_OPC_CMP_SLE_D: case R6_OPC_CMP_SULE_D: case R6_OPC_CMP_OR_D: case R6_OPC_CMP_UNE_D: case R6_OPC_CMP_NE_D: case R6_OPC_CMP_SOR_D: case R6_OPC_CMP_SUNE_D: case R6_OPC_CMP_SNE_D: gen_r6_cmp_d(ctx, ctx->opcode & 0x1f, rt, rd, sa); break; default: gen_farith(ctx, ctx->opcode & FOP(0x3f, 0x1f), rt, rd, sa, (imm >> 8) & 0x7); break; } } else { gen_farith(ctx, ctx->opcode & FOP(0x3f, 0x1f), rt, rd, sa, (imm >> 8) & 0x7); } break; } default: MIPS_INVAL(\"cp1\"); generate_exception (ctx, EXCP_RI); break; } } else { generate_exception_err(ctx, EXCP_CpU, 1); } break; /* Compact branches [R6] and COP2 [non-R6] */ case OPC_BC: /* OPC_LWC2 */ case OPC_BALC: /* OPC_SWC2 */ if (ctx->insn_flags & ISA_MIPS32R6) { /* OPC_BC, OPC_BALC */ gen_compute_compact_branch(ctx, op, 0, 0, sextract32(ctx->opcode << 2, 0, 28)); } else { /* OPC_LWC2, OPC_SWC2 */ /* COP2: Not implemented. */ generate_exception_err(ctx, EXCP_CpU, 2); } break; case OPC_BEQZC: /* OPC_JIC, OPC_LDC2 */ case OPC_BNEZC: /* OPC_JIALC, OPC_SDC2 */ if (ctx->insn_flags & ISA_MIPS32R6) { if (rs != 0) { /* OPC_BEQZC, OPC_BNEZC */ gen_compute_compact_branch(ctx, op, rs, 0, sextract32(ctx->opcode << 2, 0, 23)); } else { /* OPC_JIC, OPC_JIALC */ gen_compute_compact_branch(ctx, op, 0, rt, imm); } } else { /* OPC_LWC2, OPC_SWC2 */ /* COP2: Not implemented. */ generate_exception_err(ctx, EXCP_CpU, 2); } break; case OPC_CP2: check_insn(ctx, INSN_LOONGSON2F); /* Note that these instructions use different fields. */ gen_loongson_multimedia(ctx, sa, rd, rt); break; case OPC_CP3: check_insn_opc_removed(ctx, ISA_MIPS32R6); if (ctx->CP0_Config1 & (1 << CP0C1_FP)) { check_cp1_enabled(ctx); op1 = MASK_CP3(ctx->opcode); switch (op1) { case OPC_LWXC1: case OPC_LDXC1: case OPC_LUXC1: case OPC_SWXC1: case OPC_SDXC1: case OPC_SUXC1: gen_flt3_ldst(ctx, op1, sa, rd, rs, rt); break; case OPC_PREFX: /* Treat as NOP. */ break; case OPC_ALNV_PS: case OPC_MADD_S: case OPC_MADD_D: case OPC_MADD_PS: case OPC_MSUB_S: case OPC_MSUB_D: case OPC_MSUB_PS: case OPC_NMADD_S: case OPC_NMADD_D: case OPC_NMADD_PS: case OPC_NMSUB_S: case OPC_NMSUB_D: case OPC_NMSUB_PS: gen_flt3_arith(ctx, op1, sa, rs, rd, rt); break; default: MIPS_INVAL(\"cp3\"); generate_exception (ctx, EXCP_RI); break; } } else { generate_exception_err(ctx, EXCP_CpU, 1); } break; #if defined(TARGET_MIPS64) /* MIPS64 opcodes */ case OPC_LDL ... OPC_LDR: case OPC_LLD: check_insn_opc_removed(ctx, ISA_MIPS32R6); case OPC_LWU: case OPC_LD: check_insn(ctx, ISA_MIPS3); check_mips_64(ctx); gen_ld(ctx, op, rt, rs, imm); break; case OPC_SDL ... OPC_SDR: check_insn_opc_removed(ctx, ISA_MIPS32R6); case OPC_SD: check_insn(ctx, ISA_MIPS3); check_mips_64(ctx); gen_st(ctx, op, rt, rs, imm); break; case OPC_SCD: check_insn_opc_removed(ctx, ISA_MIPS32R6); check_insn(ctx, ISA_MIPS3); check_mips_64(ctx); gen_st_cond(ctx, op, rt, rs, imm); break; case OPC_BNVC: /* OPC_BNEZALC, OPC_BNEC, OPC_DADDI */ if (ctx->insn_flags & ISA_MIPS32R6) { /* OPC_BNVC, OPC_BNEZALC, OPC_BNEC */ gen_compute_compact_branch(ctx, op, rs, rt, imm << 2); } else { /* OPC_DADDI */ check_insn(ctx, ISA_MIPS3); check_mips_64(ctx); gen_arith_imm(ctx, op, rt, rs, imm); } break; case OPC_DADDIU: check_insn(ctx, ISA_MIPS3); check_mips_64(ctx); gen_arith_imm(ctx, op, rt, rs, imm); break; #else case OPC_BNVC: /* OPC_BNEZALC, OPC_BNEC */ if (ctx->insn_flags & ISA_MIPS32R6) { gen_compute_compact_branch(ctx, op, rs, rt, imm << 2); } else { MIPS_INVAL(\"major opcode\"); generate_exception(ctx, EXCP_RI); } break; #endif case OPC_DAUI: /* OPC_JALX */ if (ctx->insn_flags & ISA_MIPS32R6) { #if defined(TARGET_MIPS64) /* OPC_DAUI */ check_mips_64(ctx); if (rt != 0) { TCGv t0 = tcg_temp_new(); gen_load_gpr(t0, rs); tcg_gen_addi_tl(cpu_gpr[rt], t0, imm << 16); tcg_temp_free(t0); } MIPS_DEBUG(\"daui %s, %s, %04x\", regnames[rt], regnames[rs], imm); #else generate_exception(ctx, EXCP_RI); MIPS_INVAL(\"major opcode\"); #endif } else { /* OPC_JALX */ check_insn(ctx, ASE_MIPS16 | ASE_MICROMIPS); offset = (int32_t)(ctx->opcode & 0x3FFFFFF) << 2; gen_compute_branch(ctx, op, 4, rs, rt, offset, 4); } break; case OPC_MDMX: check_insn(ctx, ASE_MDMX); /* MDMX: Not implemented. */ break; case OPC_PCREL: check_insn(ctx, ISA_MIPS32R6); gen_pcrel(ctx, rs, imm); break; default: /* Invalid */ MIPS_INVAL(\"major opcode\"); generate_exception(ctx, EXCP_RI); break; } }"} {"target": 0, "idx": 15985, "func": "bool blk_dev_is_tray_open(BlockBackend *blk) { if (blk->dev_ops && blk->dev_ops->is_tray_open) { return blk->dev_ops->is_tray_open(blk->dev_opaque); } return false; }"} {"target": 1, "idx": 15987, "func": "static av_cold int tdsc_init(AVCodecContext *avctx) { TDSCContext *ctx = avctx->priv_data; const AVCodec *codec; int ret; avctx->pix_fmt = AV_PIX_FMT_BGR24; /* These needs to be set to estimate buffer and frame size */ if (!(avctx->width && avctx->height)) { av_log(avctx, AV_LOG_ERROR, \"Video size not set.\\n\"); return AVERROR_INVALIDDATA; } /* This value should be large enough for a RAW-only frame plus headers */ ctx->deflatelen = avctx->width * avctx->height * (3 + 1); ret = av_reallocp(&ctx->deflatebuffer, ctx->deflatelen); if (ret < 0) return ret; /* Allocate reference and JPEG frame */ ctx->refframe = av_frame_alloc(); ctx->jpgframe = av_frame_alloc(); if (!ctx->refframe || !ctx->jpgframe) return AVERROR(ENOMEM); /* Prepare everything needed for JPEG decoding */ codec = avcodec_find_decoder(AV_CODEC_ID_MJPEG); if (!codec) return AVERROR_BUG; ctx->jpeg_avctx = avcodec_alloc_context3(codec); if (!ctx->jpeg_avctx) return AVERROR(ENOMEM); ctx->jpeg_avctx->flags = avctx->flags; ctx->jpeg_avctx->flags2 = avctx->flags2; ctx->jpeg_avctx->dct_algo = avctx->dct_algo; ctx->jpeg_avctx->idct_algo = avctx->idct_algo;; ret = avcodec_open2(ctx->jpeg_avctx, codec, NULL); if (ret < 0) return ret; /* Set the output pixel format on the reference frame */ ctx->refframe->format = avctx->pix_fmt; return 0; }"} {"target": 1, "idx": 15999, "func": "int hvf_vcpu_exec(CPUState *cpu) { X86CPU *x86_cpu = X86_CPU(cpu); CPUX86State *env = &x86_cpu->env; int ret = 0; uint64_t rip = 0; cpu->halted = 0; if (hvf_process_events(cpu)) { return EXCP_HLT; } do { if (cpu->vcpu_dirty) { hvf_put_registers(cpu); cpu->vcpu_dirty = false; } if (hvf_inject_interrupts(cpu)) { return EXCP_INTERRUPT; } vmx_update_tpr(cpu); qemu_mutex_unlock_iothread(); if (!cpu_is_bsp(X86_CPU(cpu)) && cpu->halted) { qemu_mutex_lock_iothread(); return EXCP_HLT; } hv_return_t r = hv_vcpu_run(cpu->hvf_fd); assert_hvf_ok(r); /* handle VMEXIT */ uint64_t exit_reason = rvmcs(cpu->hvf_fd, VMCS_EXIT_REASON); uint64_t exit_qual = rvmcs(cpu->hvf_fd, VMCS_EXIT_QUALIFICATION); uint32_t ins_len = (uint32_t)rvmcs(cpu->hvf_fd, VMCS_EXIT_INSTRUCTION_LENGTH); uint64_t idtvec_info = rvmcs(cpu->hvf_fd, VMCS_IDT_VECTORING_INFO); hvf_store_events(cpu, ins_len, idtvec_info); rip = rreg(cpu->hvf_fd, HV_X86_RIP); RFLAGS(env) = rreg(cpu->hvf_fd, HV_X86_RFLAGS); env->eflags = RFLAGS(env); qemu_mutex_lock_iothread(); update_apic_tpr(cpu); current_cpu = cpu; ret = 0; switch (exit_reason) { case EXIT_REASON_HLT: { macvm_set_rip(cpu, rip + ins_len); if (!((cpu->interrupt_request & CPU_INTERRUPT_HARD) && (EFLAGS(env) & IF_MASK)) && !(cpu->interrupt_request & CPU_INTERRUPT_NMI) && !(idtvec_info & VMCS_IDT_VEC_VALID)) { cpu->halted = 1; ret = EXCP_HLT; } ret = EXCP_INTERRUPT; break; } case EXIT_REASON_MWAIT: { ret = EXCP_INTERRUPT; break; } /* Need to check if MMIO or unmmaped fault */ case EXIT_REASON_EPT_FAULT: { hvf_slot *slot; addr_t gpa = rvmcs(cpu->hvf_fd, VMCS_GUEST_PHYSICAL_ADDRESS); if (((idtvec_info & VMCS_IDT_VEC_VALID) == 0) && ((exit_qual & EXIT_QUAL_NMIUDTI) != 0)) { vmx_set_nmi_blocking(cpu); } slot = hvf_find_overlap_slot(gpa, gpa); /* mmio */ if (ept_emulation_fault(slot, gpa, exit_qual)) { struct x86_decode decode; load_regs(cpu); env->hvf_emul->fetch_rip = rip; decode_instruction(env, &decode); exec_instruction(env, &decode); store_regs(cpu); break; } break; } case EXIT_REASON_INOUT: { uint32_t in = (exit_qual & 8) != 0; uint32_t size = (exit_qual & 7) + 1; uint32_t string = (exit_qual & 16) != 0; uint32_t port = exit_qual >> 16; /*uint32_t rep = (exit_qual & 0x20) != 0;*/ #if 1 if (!string && in) { uint64_t val = 0; load_regs(cpu); hvf_handle_io(env, port, &val, 0, size, 1); if (size == 1) { AL(env) = val; } else if (size == 2) { AX(env) = val; } else if (size == 4) { RAX(env) = (uint32_t)val; } else { VM_PANIC(\"size\"); } RIP(env) += ins_len; store_regs(cpu); break; } else if (!string && !in) { RAX(env) = rreg(cpu->hvf_fd, HV_X86_RAX); hvf_handle_io(env, port, &RAX(env), 1, size, 1); macvm_set_rip(cpu, rip + ins_len); break; } #endif struct x86_decode decode; load_regs(cpu); env->hvf_emul->fetch_rip = rip; decode_instruction(env, &decode); VM_PANIC_ON(ins_len != decode.len); exec_instruction(env, &decode); store_regs(cpu); break; } case EXIT_REASON_CPUID: { uint32_t rax = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RAX); uint32_t rbx = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RBX); uint32_t rcx = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RCX); uint32_t rdx = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RDX); cpu_x86_cpuid(env, rax, rcx, &rax, &rbx, &rcx, &rdx); wreg(cpu->hvf_fd, HV_X86_RAX, rax); wreg(cpu->hvf_fd, HV_X86_RBX, rbx); wreg(cpu->hvf_fd, HV_X86_RCX, rcx); wreg(cpu->hvf_fd, HV_X86_RDX, rdx); macvm_set_rip(cpu, rip + ins_len); break; } case EXIT_REASON_XSETBV: { X86CPU *x86_cpu = X86_CPU(cpu); CPUX86State *env = &x86_cpu->env; uint32_t eax = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RAX); uint32_t ecx = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RCX); uint32_t edx = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RDX); if (ecx) { macvm_set_rip(cpu, rip + ins_len); break; } env->xcr0 = ((uint64_t)edx << 32) | eax; wreg(cpu->hvf_fd, HV_X86_XCR0, env->xcr0 | 1); macvm_set_rip(cpu, rip + ins_len); break; } case EXIT_REASON_INTR_WINDOW: vmx_clear_int_window_exiting(cpu); ret = EXCP_INTERRUPT; break; case EXIT_REASON_NMI_WINDOW: vmx_clear_nmi_window_exiting(cpu); ret = EXCP_INTERRUPT; break; case EXIT_REASON_EXT_INTR: /* force exit and allow io handling */ ret = EXCP_INTERRUPT; break; case EXIT_REASON_RDMSR: case EXIT_REASON_WRMSR: { load_regs(cpu); if (exit_reason == EXIT_REASON_RDMSR) { simulate_rdmsr(cpu); } else { simulate_wrmsr(cpu); } RIP(env) += rvmcs(cpu->hvf_fd, VMCS_EXIT_INSTRUCTION_LENGTH); store_regs(cpu); break; } case EXIT_REASON_CR_ACCESS: { int cr; int reg; load_regs(cpu); cr = exit_qual & 15; reg = (exit_qual >> 8) & 15; switch (cr) { case 0x0: { macvm_set_cr0(cpu->hvf_fd, RRX(env, reg)); break; } case 4: { macvm_set_cr4(cpu->hvf_fd, RRX(env, reg)); break; } case 8: { X86CPU *x86_cpu = X86_CPU(cpu); if (exit_qual & 0x10) { RRX(env, reg) = cpu_get_apic_tpr(x86_cpu->apic_state); } else { int tpr = RRX(env, reg); cpu_set_apic_tpr(x86_cpu->apic_state, tpr); ret = EXCP_INTERRUPT; } break; } default: error_report(\"Unrecognized CR %d\\n\", cr); abort(); } RIP(env) += ins_len; store_regs(cpu); break; } case EXIT_REASON_APIC_ACCESS: { /* TODO */ struct x86_decode decode; load_regs(cpu); env->hvf_emul->fetch_rip = rip; decode_instruction(env, &decode); exec_instruction(env, &decode); store_regs(cpu); break; } case EXIT_REASON_TPR: { ret = 1; break; } case EXIT_REASON_TASK_SWITCH: { uint64_t vinfo = rvmcs(cpu->hvf_fd, VMCS_IDT_VECTORING_INFO); x68_segment_selector sel = {.sel = exit_qual & 0xffff}; vmx_handle_task_switch(cpu, sel, (exit_qual >> 30) & 0x3, vinfo & VMCS_INTR_VALID, vinfo & VECTORING_INFO_VECTOR_MASK, vinfo & VMCS_INTR_T_MASK); break; } case EXIT_REASON_TRIPLE_FAULT: { qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); ret = EXCP_INTERRUPT; break; } case EXIT_REASON_RDPMC: wreg(cpu->hvf_fd, HV_X86_RAX, 0); wreg(cpu->hvf_fd, HV_X86_RDX, 0); macvm_set_rip(cpu, rip + ins_len); break; case VMX_REASON_VMCALL: /* TODO: inject #GP fault */ break; default: error_report(\"%llx: unhandled exit %llx\\n\", rip, exit_reason); } } while (ret == 0); return ret; }"} {"target": 0, "idx": 16020, "func": "int intel_h263_decode_picture_header(MpegEncContext *s) { int format; /* picture header */ if (get_bits_long(&s->gb, 22) != 0x20) { av_log(s->avctx, AV_LOG_ERROR, \"Bad picture start code\\n\"); return -1; } s->picture_number = get_bits(&s->gb, 8); /* picture timestamp */ if (get_bits1(&s->gb) != 1) { av_log(s->avctx, AV_LOG_ERROR, \"Bad marker\\n\"); return -1; /* marker */ } if (get_bits1(&s->gb) != 0) { av_log(s->avctx, AV_LOG_ERROR, \"Bad H263 id\\n\"); return -1; /* h263 id */ } skip_bits1(&s->gb); /* split screen off */ skip_bits1(&s->gb); /* camera off */ skip_bits1(&s->gb); /* freeze picture release off */ format = get_bits(&s->gb, 3); if (format != 7) { av_log(s->avctx, AV_LOG_ERROR, \"Intel H263 free format not supported\\n\"); return -1; } s->h263_plus = 0; s->pict_type = I_TYPE + get_bits1(&s->gb); s->unrestricted_mv = get_bits1(&s->gb); s->h263_long_vectors = s->unrestricted_mv; if (get_bits1(&s->gb) != 0) { av_log(s->avctx, AV_LOG_ERROR, \"SAC not supported\\n\"); return -1; /* SAC: off */ } if (get_bits1(&s->gb) != 0) { s->obmc= 1; av_log(s->avctx, AV_LOG_ERROR, \"Advanced Prediction Mode not supported\\n\"); // return -1; /* advanced prediction mode: off */ } if (get_bits1(&s->gb) != 0) { av_log(s->avctx, AV_LOG_ERROR, \"PB frame mode no supported\\n\"); return -1; /* PB frame mode */ } /* skip unknown header garbage */ skip_bits(&s->gb, 41); s->qscale = get_bits(&s->gb, 5); skip_bits1(&s->gb); /* Continuous Presence Multipoint mode: off */ /* PEI */ while (get_bits1(&s->gb) != 0) { skip_bits(&s->gb, 8); } s->f_code = 1; s->y_dc_scale_table= s->c_dc_scale_table= ff_mpeg1_dc_scale_table; return 0; }"} {"target": 1, "idx": 16047, "func": "int bdrv_pwrite(BlockDriverState *bs, int64_t offset, const void *buf1, int count1) { BlockDriver *drv = bs->drv; if (!drv) return -ENOMEDIUM; if (!drv->bdrv_pwrite) return bdrv_pwrite_em(bs, offset, buf1, count1); if (bdrv_wr_badreq_bytes(bs, offset, count1)) return -EDOM; return drv->bdrv_pwrite(bs, offset, buf1, count1); }"} {"target": 1, "idx": 16057, "func": "static int local_truncate(FsContext *ctx, V9fsPath *fs_path, off_t size) { char *buffer; int ret; char *path = fs_path->data; buffer = rpath(ctx, path); ret = truncate(buffer, size); g_free(buffer); return ret; }"} {"target": 0, "idx": 16068, "func": "void uuid_unparse(const uuid_t uu, char *out) { snprintf(out, 37, UUID_FMT, uu[0], uu[1], uu[2], uu[3], uu[4], uu[5], uu[6], uu[7], uu[8], uu[9], uu[10], uu[11], uu[12], uu[13], uu[14], uu[15]); }"} {"target": 0, "idx": 16083, "func": "static int stellaris_enet_can_receive(void *opaque) { stellaris_enet_state *s = (stellaris_enet_state *)opaque; if ((s->rctl & SE_RCTL_RXEN) == 0) return 1; return (s->np < 31); }"} {"target": 0, "idx": 16089, "func": "void acpi_memory_unplug_cb(MemHotplugState *mem_st, DeviceState *dev, Error **errp) { MemStatus *mdev; mdev = acpi_memory_slot_status(mem_st, dev, errp); if (!mdev) { return; } /* nvdimm device hot unplug is not supported yet. */ assert(!object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM)); mdev->is_enabled = false; mdev->dimm = NULL; }"} {"target": 0, "idx": 16090, "func": "alloc_f(int argc, char **argv) { int64_t offset; int nb_sectors; char s1[64]; int num; int ret; const char *retstr; offset = cvtnum(argv[1]); if (offset & 0x1ff) { printf(\"offset %lld is not sector aligned\\n\", (long long)offset); return 0; } if (argc == 3) nb_sectors = cvtnum(argv[2]); else nb_sectors = 1; ret = bdrv_is_allocated(bs, offset >> 9, nb_sectors, &num); cvtstr(offset, s1, sizeof(s1)); retstr = ret ? \"allocated\" : \"not allocated\"; if (nb_sectors == 1) printf(\"sector %s at offset %s\\n\", retstr, s1); else printf(\"%d/%d sectors %s at offset %s\\n\", num, nb_sectors, retstr, s1); return 0; }"} {"target": 1, "idx": 16093, "func": "static void hmp_cont_cb(void *opaque, int err) { if (!err) { qmp_cont(NULL); } }"} {"target": 1, "idx": 16112, "func": "static int megasas_ctrl_get_info(MegasasState *s, MegasasCmd *cmd) { PCIDevice *pci_dev = PCI_DEVICE(s); PCIDeviceClass *pci_class = PCI_DEVICE_GET_CLASS(pci_dev); MegasasBaseClass *base_class = MEGASAS_DEVICE_GET_CLASS(s); struct mfi_ctrl_info info; size_t dcmd_size = sizeof(info); BusChild *kid; int num_pd_disks = 0; memset(&info, 0x0, cmd->iov_size); if (cmd->iov_size < dcmd_size) { trace_megasas_dcmd_invalid_xfer_len(cmd->index, cmd->iov_size, dcmd_size); return MFI_STAT_INVALID_PARAMETER; } info.pci.vendor = cpu_to_le16(pci_class->vendor_id); info.pci.device = cpu_to_le16(pci_class->device_id); info.pci.subvendor = cpu_to_le16(pci_class->subsystem_vendor_id); info.pci.subdevice = cpu_to_le16(pci_class->subsystem_id); /* * For some reason the firmware supports * only up to 8 device ports. * Despite supporting a far larger number * of devices for the physical devices. * So just display the first 8 devices * in the device port list, independent * of how many logical devices are actually * present. */ info.host.type = MFI_INFO_HOST_PCIE; info.device.type = MFI_INFO_DEV_SAS3G; info.device.port_count = 8; QTAILQ_FOREACH(kid, &s->bus.qbus.children, sibling) { SCSIDevice *sdev = DO_UPCAST(SCSIDevice, qdev, kid->child); uint16_t pd_id; if (num_pd_disks < 8) { pd_id = ((sdev->id & 0xFF) << 8) | (sdev->lun & 0xFF); info.device.port_addr[num_pd_disks] = cpu_to_le64(megasas_get_sata_addr(pd_id)); } num_pd_disks++; } memcpy(info.product_name, base_class->product_name, 24); snprintf(info.serial_number, 32, \"%s\", s->hba_serial); snprintf(info.package_version, 0x60, \"%s-QEMU\", qemu_hw_version()); memcpy(info.image_component[0].name, \"APP\", 3); snprintf(info.image_component[0].version, 10, \"%s-QEMU\", base_class->product_version); memcpy(info.image_component[0].build_date, \"Apr 1 2014\", 11); memcpy(info.image_component[0].build_time, \"12:34:56\", 8); info.image_component_count = 1; if (pci_dev->has_rom) { uint8_t biosver[32]; uint8_t *ptr; ptr = memory_region_get_ram_ptr(&pci_dev->rom); memcpy(biosver, ptr + 0x41, 31); memcpy(info.image_component[1].name, \"BIOS\", 4); memcpy(info.image_component[1].version, biosver, strlen((const char *)biosver)); info.image_component_count++; } info.current_fw_time = cpu_to_le32(megasas_fw_time()); info.max_arms = 32; info.max_spans = 8; info.max_arrays = MEGASAS_MAX_ARRAYS; info.max_lds = MFI_MAX_LD; info.max_cmds = cpu_to_le16(s->fw_cmds); info.max_sg_elements = cpu_to_le16(s->fw_sge); info.max_request_size = cpu_to_le32(MEGASAS_MAX_SECTORS); if (!megasas_is_jbod(s)) info.lds_present = cpu_to_le16(num_pd_disks); info.pd_present = cpu_to_le16(num_pd_disks); info.pd_disks_present = cpu_to_le16(num_pd_disks); info.hw_present = cpu_to_le32(MFI_INFO_HW_NVRAM | MFI_INFO_HW_MEM | MFI_INFO_HW_FLASH); info.memory_size = cpu_to_le16(512); info.nvram_size = cpu_to_le16(32); info.flash_size = cpu_to_le16(16); info.raid_levels = cpu_to_le32(MFI_INFO_RAID_0); info.adapter_ops = cpu_to_le32(MFI_INFO_AOPS_RBLD_RATE | MFI_INFO_AOPS_SELF_DIAGNOSTIC | MFI_INFO_AOPS_MIXED_ARRAY); info.ld_ops = cpu_to_le32(MFI_INFO_LDOPS_DISK_CACHE_POLICY | MFI_INFO_LDOPS_ACCESS_POLICY | MFI_INFO_LDOPS_IO_POLICY | MFI_INFO_LDOPS_WRITE_POLICY | MFI_INFO_LDOPS_READ_POLICY); info.max_strips_per_io = cpu_to_le16(s->fw_sge); info.stripe_sz_ops.min = 3; info.stripe_sz_ops.max = ctz32(MEGASAS_MAX_SECTORS + 1); info.properties.pred_fail_poll_interval = cpu_to_le16(300); info.properties.intr_throttle_cnt = cpu_to_le16(16); info.properties.intr_throttle_timeout = cpu_to_le16(50); info.properties.rebuild_rate = 30; info.properties.patrol_read_rate = 30; info.properties.bgi_rate = 30; info.properties.cc_rate = 30; info.properties.recon_rate = 30; info.properties.cache_flush_interval = 4; info.properties.spinup_drv_cnt = 2; info.properties.spinup_delay = 6; info.properties.ecc_bucket_size = 15; info.properties.ecc_bucket_leak_rate = cpu_to_le16(1440); info.properties.expose_encl_devices = 1; info.properties.OnOffProperties = cpu_to_le32(MFI_CTRL_PROP_EnableJBOD); info.pd_ops = cpu_to_le32(MFI_INFO_PDOPS_FORCE_ONLINE | MFI_INFO_PDOPS_FORCE_OFFLINE); info.pd_mix_support = cpu_to_le32(MFI_INFO_PDMIX_SAS | MFI_INFO_PDMIX_SATA | MFI_INFO_PDMIX_LD); cmd->iov_size -= dma_buf_read((uint8_t *)&info, dcmd_size, &cmd->qsg); return MFI_STAT_OK; }"} {"target": 1, "idx": 16135, "func": "int ff_thread_video_encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame, int *got_packet_ptr){ ThreadContext *c = avctx->internal->frame_thread_encoder; Task task; int ret; av_assert1(!*got_packet_ptr); if(frame){ if(!(avctx->flags & CODEC_FLAG_INPUT_PRESERVED)){ AVFrame *new = avcodec_alloc_frame(); if(!new) return AVERROR(ENOMEM); pthread_mutex_lock(&c->buffer_mutex); ret = c->parent_avctx->get_buffer(c->parent_avctx, new); pthread_mutex_unlock(&c->buffer_mutex); if(ret<0) return ret; new->pts = frame->pts; new->quality = frame->quality; new->pict_type = frame->pict_type; av_image_copy(new->data, new->linesize, (const uint8_t **)frame->data, frame->linesize, avctx->pix_fmt, avctx->width, avctx->height); frame = new; } task.index = c->task_index; task.indata = (void*)frame; pthread_mutex_lock(&c->task_fifo_mutex); av_fifo_generic_write(c->task_fifo, &task, sizeof(task), NULL); pthread_cond_signal(&c->task_fifo_cond); pthread_mutex_unlock(&c->task_fifo_mutex); c->task_index = (c->task_index+1) % BUFFER_SIZE; if(!c->finished_tasks[c->finished_task_index].outdata && (c->task_index - c->finished_task_index) % BUFFER_SIZE <= avctx->thread_count) return 0; } if(c->task_index == c->finished_task_index) return 0; pthread_mutex_lock(&c->finished_task_mutex); while (!c->finished_tasks[c->finished_task_index].outdata) { pthread_cond_wait(&c->finished_task_cond, &c->finished_task_mutex); } task = c->finished_tasks[c->finished_task_index]; *pkt = *(AVPacket*)(task.outdata); av_freep(&c->finished_tasks[c->finished_task_index].outdata); c->finished_task_index = (c->finished_task_index+1) % BUFFER_SIZE; pthread_mutex_unlock(&c->finished_task_mutex); *got_packet_ptr = 1; return task.return_code; }"} {"target": 0, "idx": 16147, "func": "int vmstate_load_state(QEMUFile *f, const VMStateDescription *vmsd, void *opaque, int version_id) { VMStateField *field = vmsd->fields; if (version_id > vmsd->version_id) { return -EINVAL; } if (version_id < vmsd->minimum_version_id_old) { return -EINVAL; } if (version_id < vmsd->minimum_version_id) { return vmsd->load_state_old(f, opaque, version_id); } while(field->name) { if (field->version_id <= version_id) { void *base_addr = opaque + field->offset; int ret, i, n_elems = 1; if (field->flags & VMS_ARRAY) { n_elems = field->num; } else if (field->flags & VMS_VARRAY) { n_elems = *(size_t *)(opaque+field->num_offset); } if (field->flags & VMS_POINTER) { base_addr = *(void **)base_addr; } for (i = 0; i < n_elems; i++) { void *addr = base_addr + field->size * i; if (field->flags & VMS_STRUCT) { ret = vmstate_load_state(f, field->vmsd, addr, version_id); } else { ret = field->info->get(f, addr, field->size); } if (ret < 0) { return ret; } } } field++; } if (vmsd->run_after_load) return vmsd->run_after_load(opaque); return 0; }"} {"target": 0, "idx": 16155, "func": "static void tcg_out_setcond2(TCGContext *s, TCGCond cond, TCGReg ret, TCGReg al, TCGReg ah, TCGReg bl, TCGReg bh) { TCGReg tmp0 = TCG_TMP0; TCGReg tmp1 = ret; assert(ret != TCG_TMP0); if (ret == ah || ret == bh) { assert(ret != TCG_TMP1); tmp1 = TCG_TMP1; } switch (cond) { case TCG_COND_EQ: case TCG_COND_NE: tmp1 = tcg_out_reduce_eq2(s, tmp0, tmp1, al, ah, bl, bh); tcg_out_setcond(s, cond, ret, tmp1, TCG_REG_ZERO); break; default: tcg_out_setcond(s, TCG_COND_EQ, tmp0, ah, bh); tcg_out_setcond(s, tcg_unsigned_cond(cond), tmp1, al, bl); tcg_out_opc_reg(s, OPC_AND, tmp1, tmp1, tmp0); tcg_out_setcond(s, tcg_high_cond(cond), tmp0, ah, bh); tcg_out_opc_reg(s, OPC_OR, ret, tmp1, tmp0); break; } }"} {"target": 0, "idx": 16163, "func": "static void display_mouse_define(DisplayChangeListener *dcl, QEMUCursor *c) { SimpleSpiceDisplay *ssd = container_of(dcl, SimpleSpiceDisplay, dcl); qemu_mutex_lock(&ssd->lock); if (c) { cursor_get(c); } cursor_put(ssd->cursor); ssd->cursor = c; ssd->hot_x = c->hot_x; ssd->hot_y = c->hot_y; g_free(ssd->ptr_move); ssd->ptr_move = NULL; g_free(ssd->ptr_define); ssd->ptr_define = qemu_spice_create_cursor_update(ssd, c, 0); qemu_mutex_unlock(&ssd->lock); }"} {"target": 0, "idx": 16164, "func": "static inline void stw_phys_internal(hwaddr addr, uint32_t val, enum device_endian endian) { uint8_t *ptr; MemoryRegionSection *section; section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS); if (!memory_region_is_ram(section->mr) || section->readonly) { addr = memory_region_section_addr(section, addr); if (memory_region_is_ram(section->mr)) { section = &phys_sections[phys_section_rom]; } #if defined(TARGET_WORDS_BIGENDIAN) if (endian == DEVICE_LITTLE_ENDIAN) { val = bswap16(val); } #else if (endian == DEVICE_BIG_ENDIAN) { val = bswap16(val); } #endif io_mem_write(section->mr, addr, val, 2); } else { unsigned long addr1; addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK) + memory_region_section_addr(section, addr); /* RAM case */ ptr = qemu_get_ram_ptr(addr1); switch (endian) { case DEVICE_LITTLE_ENDIAN: stw_le_p(ptr, val); break; case DEVICE_BIG_ENDIAN: stw_be_p(ptr, val); break; default: stw_p(ptr, val); break; } invalidate_and_set_dirty(addr1, 2); } }"} {"target": 1, "idx": 16177, "func": "static void compute_scale_factors(unsigned char scale_code[SBLIMIT], unsigned char scale_factors[SBLIMIT][3], int sb_samples[3][12][SBLIMIT], int sblimit) { int *p, vmax, v, n, i, j, k, code; int index, d1, d2; unsigned char *sf = &scale_factors[0][0]; for(j=0;j vmax) vmax = v; } /* compute the scale factor index using log 2 computations */ if (vmax > 0) { n = av_log2(vmax); /* n is the position of the MSB of vmax. now use at most 2 compares to find the index */ index = (21 - n) * 3 - 3; if (index >= 0) { while (vmax <= scale_factor_table[index+1]) index++; } else { index = 0; /* very unlikely case of overflow */ } } else { index = 63; } #if 0 printf(\"%2d:%d in=%x %x %d\\n\", j, i, vmax, scale_factor_table[index], index); #endif /* store the scale factor */ assert(index >=0 && index <= 63); sf[i] = index; } /* compute the transmission factor : look if the scale factors are close enough to each other */ d1 = scale_diff_table[sf[0] - sf[1] + 64]; d2 = scale_diff_table[sf[1] - sf[2] + 64]; /* handle the 25 cases */ switch(d1 * 5 + d2) { case 0*5+0: case 0*5+4: case 3*5+4: case 4*5+0: case 4*5+4: code = 0; break; case 0*5+1: case 0*5+2: case 4*5+1: case 4*5+2: code = 3; sf[2] = sf[1]; break; case 0*5+3: case 4*5+3: code = 3; sf[1] = sf[2]; break; case 1*5+0: case 1*5+4: case 2*5+4: code = 1; sf[1] = sf[0]; break; case 1*5+1: case 1*5+2: case 2*5+0: case 2*5+1: case 2*5+2: code = 2; sf[1] = sf[2] = sf[0]; break; case 2*5+3: case 3*5+3: code = 2; sf[0] = sf[1] = sf[2]; break; case 3*5+0: case 3*5+1: case 3*5+2: code = 2; sf[0] = sf[2] = sf[1]; break; case 1*5+3: code = 2; if (sf[0] > sf[2]) sf[0] = sf[2]; sf[1] = sf[2] = sf[0]; break; default: abort(); } #if 0 printf(\"%d: %2d %2d %2d %d %d -> %d\\n\", j, sf[0], sf[1], sf[2], d1, d2, code); #endif scale_code[j] = code; sf += 3; } }"} {"target": 0, "idx": 16183, "func": "static void opt_new_stream(const char *opt, const char *arg) { AVFormatContext *oc; if (nb_output_files <= 0) { fprintf(stderr, \"At least one output file must be specified\\n\"); ffmpeg_exit(1); } oc = output_files[nb_output_files - 1]; if (!strcmp(opt, \"newvideo\" )) new_video_stream (oc); else if (!strcmp(opt, \"newaudio\" )) new_audio_stream (oc); else if (!strcmp(opt, \"newsubtitle\")) new_subtitle_stream(oc); else assert(0); }"} {"target": 1, "idx": 16190, "func": "static inline void RENAME(yuy2toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst, unsigned int width, unsigned int height, int lumStride, int chromStride, int srcStride) { unsigned y; const unsigned chromWidth= width>>1; for(y=0; y= HUF_ENCSIZE || iM < 0 || iM >= HUF_ENCSIZE || src_size < 0) return AVERROR_INVALIDDATA; bytestream2_skip(gb, 4); freq = av_calloc(HUF_ENCSIZE, sizeof(*freq)); hdec = av_calloc(HUF_DECSIZE, sizeof(*hdec)); if (!freq || !hdec) { ret = AVERROR(ENOMEM); goto fail; } if ((ret = huf_unpack_enc_table(gb, im, iM, freq)) < 0) goto fail; if (nBits > 8 * bytestream2_get_bytes_left(gb)) { ret = AVERROR_INVALIDDATA; goto fail; } if ((ret = huf_build_dec_table(freq, im, iM, hdec)) < 0) goto fail; ret = huf_decode(freq, hdec, gb, nBits, iM, dst_size, dst); fail: for (i = 0; i < HUF_DECSIZE; i++) { if (hdec[i].p) av_freep(&hdec[i].p); } av_free(freq); av_free(hdec); return ret; }"} {"target": 1, "idx": 16203, "func": "static int hevc_frame_start(HEVCContext *s) { HEVCLocalContext *lc = &s->HEVClc; int ret; memset(s->horizontal_bs, 0, 2 * s->bs_width * (s->bs_height + 1)); memset(s->vertical_bs, 0, 2 * s->bs_width * (s->bs_height + 1)); memset(s->cbf_luma, 0, s->sps->min_tb_width * s->sps->min_tb_height); memset(s->is_pcm, 0, s->sps->min_pu_width * s->sps->min_pu_height); lc->start_of_tiles_x = 0; s->is_decoded = 0; if (s->pps->tiles_enabled_flag) lc->end_of_tiles_x = s->pps->column_width[0] << s->sps->log2_ctb_size; ret = ff_hevc_set_new_ref(s, s->sps->sao_enabled ? &s->sao_frame : &s->frame, s->poc); if (ret < 0) goto fail; ret = ff_hevc_frame_rps(s); if (ret < 0) { av_log(s->avctx, AV_LOG_ERROR, \"Error constructing the frame RPS.\\n\"); goto fail; } ret = set_side_data(s); if (ret < 0) goto fail; av_frame_unref(s->output_frame); ret = ff_hevc_output_frame(s, s->output_frame, 0); if (ret < 0) goto fail; ff_thread_finish_setup(s->avctx); return 0; fail: if (s->ref) ff_thread_report_progress(&s->ref->tf, INT_MAX, 0); s->ref = NULL; return ret; }"} {"target": 1, "idx": 16204, "func": "static int local_open2(FsContext *fs_ctx, V9fsPath *dir_path, const char *name, int flags, FsCred *credp, V9fsFidOpenState *fs) { char *path; int fd = -1; int err = -1; int serrno = 0; V9fsString fullname; char *buffer; /* * Mark all the open to not follow symlinks */ flags |= O_NOFOLLOW; v9fs_string_init(&fullname); v9fs_string_sprintf(&fullname, \"%s/%s\", dir_path->data, name); path = fullname.data; /* Determine the security model */ if (fs_ctx->export_flags & V9FS_SM_MAPPED) { buffer = rpath(fs_ctx, path); fd = open(buffer, flags, SM_LOCAL_MODE_BITS); if (fd == -1) { g_free(buffer); err = fd; goto out; } credp->fc_mode = credp->fc_mode|S_IFREG; /* Set cleint credentials in xattr */ err = local_set_xattr(buffer, credp); if (err == -1) { serrno = errno; goto err_end; } } else if (fs_ctx->export_flags & V9FS_SM_MAPPED_FILE) { buffer = rpath(fs_ctx, path); fd = open(buffer, flags, SM_LOCAL_MODE_BITS); if (fd == -1) { g_free(buffer); err = fd; goto out; } credp->fc_mode = credp->fc_mode|S_IFREG; /* Set client credentials in .virtfs_metadata directory files */ err = local_set_mapped_file_attr(fs_ctx, path, credp); if (err == -1) { serrno = errno; goto err_end; } } else if ((fs_ctx->export_flags & V9FS_SM_PASSTHROUGH) || (fs_ctx->export_flags & V9FS_SM_NONE)) { buffer = rpath(fs_ctx, path); fd = open(buffer, flags, credp->fc_mode); if (fd == -1) { g_free(buffer); err = fd; goto out; } err = local_post_create_passthrough(fs_ctx, path, credp); if (err == -1) { serrno = errno; goto err_end; } } err = fd; fs->fd = fd; goto out; err_end: close(fd); remove(buffer); errno = serrno; g_free(buffer); out: v9fs_string_free(&fullname); return err; }"} {"target": 1, "idx": 16205, "func": "static void fix_coding_method_array (int sb, int channels, sb_int8_array coding_method) { int j,k; int ch; int run, case_val; int switchtable[23] = {0,5,1,5,5,5,5,5,2,5,5,5,5,5,5,5,3,5,5,5,5,5,4}; for (ch = 0; ch < channels; ch++) { for (j = 0; j < 64; ) { if((coding_method[ch][sb][j] - 8) > 22) { run = 1; case_val = 8; } else { switch (switchtable[coding_method[ch][sb][j]]) { case 0: run = 10; case_val = 10; break; case 1: run = 1; case_val = 16; break; case 2: run = 5; case_val = 24; break; case 3: run = 3; case_val = 30; break; case 4: run = 1; case_val = 30; break; case 5: run = 1; case_val = 8; break; default: run = 1; case_val = 8; break; } } for (k = 0; k < run; k++) if (j + k < 128) if (coding_method[ch][sb + (j + k) / 64][(j + k) % 64] > coding_method[ch][sb][j]) if (k > 0) { SAMPLES_NEEDED //not debugged, almost never used memset(&coding_method[ch][sb][j + k], case_val, k * sizeof(int8_t)); memset(&coding_method[ch][sb][j + k], case_val, 3 * sizeof(int8_t)); } j += run; } } }"} {"target": 1, "idx": 16207, "func": "static void gen_mtdcrx(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); #else if (unlikely(ctx->pr)) { gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); return; } /* NIP cannot be restored if the memory exception comes from an helper */ gen_update_nip(ctx, ctx->nip - 4); gen_helper_store_dcr(cpu_env, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); /* Note: Rc update flag set leads to undefined state of Rc0 */ #endif }"} {"target": 1, "idx": 16211, "func": "int usb_packet_map(USBPacket *p, QEMUSGList *sgl) { int is_write = (p->pid == USB_TOKEN_IN); target_phys_addr_t len; void *mem; int i; for (i = 0; i < sgl->nsg; i++) { len = sgl->sg[i].len; mem = cpu_physical_memory_map(sgl->sg[i].base, &len, is_write); if (!mem) { goto err; } qemu_iovec_add(&p->iov, mem, len); if (len != sgl->sg[i].len) { goto err; } } return 0; err: usb_packet_unmap(p); return -1; }"} {"target": 1, "idx": 16216, "func": "static void get_sensor_evt_status(IPMIBmcSim *ibs, uint8_t *cmd, unsigned int cmd_len, uint8_t *rsp, unsigned int *rsp_len, unsigned int max_rsp_len) { IPMISensor *sens; IPMI_CHECK_CMD_LEN(3); if ((cmd[2] > MAX_SENSORS) || !IPMI_SENSOR_GET_PRESENT(ibs->sensors + cmd[2])) { rsp[2] = IPMI_CC_REQ_ENTRY_NOT_PRESENT; return; } sens = ibs->sensors + cmd[2]; IPMI_ADD_RSP_DATA(sens->reading); IPMI_ADD_RSP_DATA(IPMI_SENSOR_GET_RET_STATUS(sens)); IPMI_ADD_RSP_DATA(sens->assert_states & 0xff); IPMI_ADD_RSP_DATA((sens->assert_states >> 8) & 0xff); IPMI_ADD_RSP_DATA(sens->deassert_states & 0xff); IPMI_ADD_RSP_DATA((sens->deassert_states >> 8) & 0xff); }"} {"target": 0, "idx": 16227, "func": "void ff_mjpeg_encode_mb(MpegEncContext *s, int16_t block[12][64]) { int i; if (s->chroma_format == CHROMA_444) { encode_block(s, block[0], 0); encode_block(s, block[2], 2); encode_block(s, block[4], 4); encode_block(s, block[8], 8); encode_block(s, block[5], 5); encode_block(s, block[9], 9); if (16*s->mb_x+8 < s->width) { encode_block(s, block[1], 1); encode_block(s, block[3], 3); encode_block(s, block[6], 6); encode_block(s, block[10], 10); encode_block(s, block[7], 7); encode_block(s, block[11], 11); } } else { for(i=0;i<5;i++) { encode_block(s, block[i], i); } if (s->chroma_format == CHROMA_420) { encode_block(s, block[5], 5); } else { encode_block(s, block[6], 6); encode_block(s, block[5], 5); encode_block(s, block[7], 7); } } }"} {"target": 0, "idx": 16237, "func": "int pte_update_flags(mmu_ctx_t *ctx, target_ulong *pte1p, int ret, int rw) { int store = 0; /* Update page flags */ if (!(*pte1p & 0x00000100)) { /* Update accessed flag */ *pte1p |= 0x00000100; store = 1; } if (!(*pte1p & 0x00000080)) { if (rw == 1 && ret == 0) { /* Update changed flag */ *pte1p |= 0x00000080; store = 1; } else { /* Force page fault for first write access */ ctx->prot &= ~PAGE_WRITE; } } return store; }"} {"target": 0, "idx": 16246, "func": "static void enable_device(AcpiPciHpState *s, unsigned bsel, int slot) { s->acpi_pcihp_pci_status[bsel].device_present |= (1U << slot); }"} {"target": 0, "idx": 16255, "func": "void ff_avg_h264_qpel16_mc12_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride) { avc_luma_midh_qrt_and_aver_dst_16w_msa(src - (2 * stride) - 2, stride, dst, stride, 16, 0); }"} {"target": 0, "idx": 16256, "func": "int av_parse_color(uint8_t *rgba_color, const char *color_string, int slen, void *log_ctx) { char *tail, color_string2[128]; const ColorEntry *entry; int len, hex_offset = 0; if (color_string[0] == '#') { hex_offset = 1; } else if (!strncmp(color_string, \"0x\", 2)) hex_offset = 2; if (slen < 0) slen = strlen(color_string); av_strlcpy(color_string2, color_string + hex_offset, FFMIN(slen-hex_offset+1, sizeof(color_string2))); if ((tail = strchr(color_string2, ALPHA_SEP))) *tail++ = 0; len = strlen(color_string2); rgba_color[3] = 255; if (!av_strcasecmp(color_string2, \"random\") || !av_strcasecmp(color_string2, \"bikeshed\")) { int rgba = av_get_random_seed(); rgba_color[0] = rgba >> 24; rgba_color[1] = rgba >> 16; rgba_color[2] = rgba >> 8; rgba_color[3] = rgba; } else if (hex_offset || strspn(color_string2, \"0123456789ABCDEFabcdef\") == len) { char *tail; unsigned int rgba = strtoul(color_string2, &tail, 16); if (*tail || (len != 6 && len != 8)) { av_log(log_ctx, AV_LOG_ERROR, \"Invalid 0xRRGGBB[AA] color string: '%s'\\n\", color_string2); return AVERROR(EINVAL); } if (len == 8) { rgba_color[3] = rgba; rgba >>= 8; } rgba_color[0] = rgba >> 16; rgba_color[1] = rgba >> 8; rgba_color[2] = rgba; } else { entry = bsearch(color_string2, color_table, FF_ARRAY_ELEMS(color_table), sizeof(ColorEntry), color_table_compare); if (!entry) { av_log(log_ctx, AV_LOG_ERROR, \"Cannot find color '%s'\\n\", color_string2); return AVERROR(EINVAL); } memcpy(rgba_color, entry->rgb_color, 3); } if (tail) { unsigned long int alpha; const char *alpha_string = tail; if (!strncmp(alpha_string, \"0x\", 2)) { alpha = strtoul(alpha_string, &tail, 16); } else { alpha = 255 * strtod(alpha_string, &tail); } if (tail == alpha_string || *tail || alpha > 255) { av_log(log_ctx, AV_LOG_ERROR, \"Invalid alpha value specifier '%s' in '%s'\\n\", alpha_string, color_string); return AVERROR(EINVAL); } rgba_color[3] = alpha; } return 0; }"} {"target": 0, "idx": 16274, "func": "static int raw_inactivate(BlockDriverState *bs) { int ret; uint64_t perm = 0; uint64_t shared = BLK_PERM_ALL; ret = raw_handle_perm_lock(bs, RAW_PL_PREPARE, perm, shared, NULL); if (ret) { return ret; } raw_handle_perm_lock(bs, RAW_PL_COMMIT, perm, shared, NULL); return 0; }"} {"target": 0, "idx": 16276, "func": "static void qmp_output_type_any(Visitor *v, const char *name, QObject **obj, Error **errp) { QmpOutputVisitor *qov = to_qov(v); qobject_incref(*obj); qmp_output_add_obj(qov, name, *obj); }"} {"target": 0, "idx": 16281, "func": "static int setup_sigcontext(struct target_sigcontext *sc, CPUSH4State *regs, unsigned long mask) { int err = 0; int i; #define COPY(x) __put_user(regs->x, &sc->sc_##x) COPY(gregs[0]); COPY(gregs[1]); COPY(gregs[2]); COPY(gregs[3]); COPY(gregs[4]); COPY(gregs[5]); COPY(gregs[6]); COPY(gregs[7]); COPY(gregs[8]); COPY(gregs[9]); COPY(gregs[10]); COPY(gregs[11]); COPY(gregs[12]); COPY(gregs[13]); COPY(gregs[14]); COPY(gregs[15]); COPY(gbr); COPY(mach); COPY(macl); COPY(pr); COPY(sr); COPY(pc); #undef COPY for (i=0; i<16; i++) { __put_user(regs->fregs[i], &sc->sc_fpregs[i]); } __put_user(regs->fpscr, &sc->sc_fpscr); __put_user(regs->fpul, &sc->sc_fpul); /* non-iBCS2 extensions.. */ __put_user(mask, &sc->oldmask); return err; }"} {"target": 1, "idx": 16293, "func": "static void vfio_unmap_bar(VFIODevice *vdev, int nr) { VFIOBAR *bar = &vdev->bars[nr]; if (!bar->size) { return; } vfio_bar_quirk_teardown(vdev, nr); memory_region_del_subregion(&bar->mem, &bar->mmap_mem); munmap(bar->mmap, memory_region_size(&bar->mmap_mem)); if (vdev->msix && vdev->msix->table_bar == nr) { memory_region_del_subregion(&bar->mem, &vdev->msix->mmap_mem); munmap(vdev->msix->mmap, memory_region_size(&vdev->msix->mmap_mem)); } memory_region_destroy(&bar->mem); }"} {"target": 0, "idx": 16318, "func": "static bool ept_emulation_fault(uint64_t ept_qual) { int read, write; /* EPT fault on an instruction fetch doesn't make sense here */ if (ept_qual & EPT_VIOLATION_INST_FETCH) { return false; } /* EPT fault must be a read fault or a write fault */ read = ept_qual & EPT_VIOLATION_DATA_READ ? 1 : 0; write = ept_qual & EPT_VIOLATION_DATA_WRITE ? 1 : 0; if ((read | write) == 0) { return false; } /* * The EPT violation must have been caused by accessing a * guest-physical address that is a translation of a guest-linear * address. */ if ((ept_qual & EPT_VIOLATION_GLA_VALID) == 0 || (ept_qual & EPT_VIOLATION_XLAT_VALID) == 0) { return false; } return true; }"} {"target": 0, "idx": 16335, "func": "yuv2rgba64_full_X_c_template(SwsContext *c, const int16_t *lumFilter, const int32_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int32_t **chrUSrc, const int32_t **chrVSrc, int chrFilterSize, const int32_t **alpSrc, uint16_t *dest, int dstW, int y, enum AVPixelFormat target, int hasAlpha, int eightbytes) { int i; int A = 0xffff<<14; for (i = 0; i < dstW; i++) { int j; int Y = -0x40000000; int U = -128 << 23; // 19 int V = -128 << 23; int R, G, B; for (j = 0; j < lumFilterSize; j++) { Y += lumSrc[j][i] * (unsigned)lumFilter[j]; } for (j = 0; j < chrFilterSize; j++) {; U += chrUSrc[j][i] * (unsigned)chrFilter[j]; V += chrVSrc[j][i] * (unsigned)chrFilter[j]; } if (hasAlpha) { A = -0x40000000; for (j = 0; j < lumFilterSize; j++) { A += alpSrc[j][i] * (unsigned)lumFilter[j]; } A >>= 1; A += 0x20002000; } // 8bit: 12+15=27; 16-bit: 12+19=31 Y >>= 14; // 10 Y += 0x10000; U >>= 14; V >>= 14; // 8bit: 27 -> 17bit, 16bit: 31 - 14 = 17bit Y -= c->yuv2rgb_y_offset; Y *= c->yuv2rgb_y_coeff; Y += 1 << 13; // 21 // 8bit: 17 + 13bit = 30bit, 16bit: 17 + 13bit = 30bit R = V * c->yuv2rgb_v2r_coeff; G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff; B = U * c->yuv2rgb_u2b_coeff; // 8bit: 30 - 22 = 8bit, 16bit: 30bit - 14 = 16bit output_pixel(&dest[0], av_clip_uintp2(R_B + Y, 30) >> 14); output_pixel(&dest[1], av_clip_uintp2( G + Y, 30) >> 14); output_pixel(&dest[2], av_clip_uintp2(B_R + Y, 30) >> 14); if (eightbytes) { output_pixel(&dest[3], av_clip_uintp2(A, 30) >> 14); dest += 4; } else { dest += 3; } } }"} {"target": 1, "idx": 16352, "func": "static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc) { int addr_regl, addr_reg1, addr_meml; int data_regl, data_regh, data_reg1, data_reg2; int mem_index, s_bits; #if defined(CONFIG_SOFTMMU) void *label1_ptr, *label2_ptr; int sp_args; #endif #if TARGET_LONG_BITS == 64 # if defined(CONFIG_SOFTMMU) uint8_t *label3_ptr; # endif int addr_regh, addr_reg2, addr_memh; #endif data_regl = *args++; if (opc == 3) data_regh = *args++; else data_regh = 0; addr_regl = *args++; #if TARGET_LONG_BITS == 64 addr_regh = *args++; #endif mem_index = *args; s_bits = opc & 3; if (opc == 3) { #if defined(TCG_TARGET_WORDS_BIGENDIAN) data_reg1 = data_regh; data_reg2 = data_regl; #else data_reg1 = data_regl; data_reg2 = data_regh; #endif } else { data_reg1 = data_regl; data_reg2 = 0; } #if TARGET_LONG_BITS == 64 # if defined(TCG_TARGET_WORDS_BIGENDIAN) addr_reg1 = addr_regh; addr_reg2 = addr_regl; addr_memh = 0; addr_meml = 4; # else addr_reg1 = addr_regl; addr_reg2 = addr_regh; addr_memh = 4; addr_meml = 0; # endif #else addr_reg1 = addr_regl; addr_meml = 0; #endif #if defined(CONFIG_SOFTMMU) tcg_out_opc_sa(s, OPC_SRL, TCG_REG_A0, addr_regl, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_A0, TCG_REG_A0, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS); tcg_out_opc_reg(s, OPC_ADDU, TCG_REG_A0, TCG_REG_A0, TCG_AREG0); tcg_out_opc_imm(s, OPC_LW, TCG_REG_AT, TCG_REG_A0, offsetof(CPUState, tlb_table[mem_index][0].addr_read) + addr_meml); tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_T0, TARGET_PAGE_MASK | ((1 << s_bits) - 1)); tcg_out_opc_reg(s, OPC_AND, TCG_REG_T0, TCG_REG_T0, addr_regl); # if TARGET_LONG_BITS == 64 label3_ptr = s->code_ptr; tcg_out_opc_imm(s, OPC_BNE, TCG_REG_T0, TCG_REG_AT, 0); tcg_out_nop(s); tcg_out_opc_imm(s, OPC_LW, TCG_REG_AT, TCG_REG_A0, offsetof(CPUState, tlb_table[mem_index][0].addr_read) + addr_memh); label1_ptr = s->code_ptr; tcg_out_opc_imm(s, OPC_BEQ, addr_regh, TCG_REG_AT, 0); tcg_out_nop(s); reloc_pc16(label3_ptr, (tcg_target_long) s->code_ptr); # else label1_ptr = s->code_ptr; tcg_out_opc_imm(s, OPC_BEQ, TCG_REG_T0, TCG_REG_AT, 0); tcg_out_nop(s); # endif /* slow path */ sp_args = TCG_REG_A0; tcg_out_mov(s, sp_args++, addr_reg1); # if TARGET_LONG_BITS == 64 tcg_out_mov(s, sp_args++, addr_reg2); # endif tcg_out_movi(s, TCG_TYPE_I32, sp_args++, mem_index); tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_T9, (tcg_target_long)qemu_ld_helpers[s_bits]); tcg_out_opc_reg(s, OPC_JALR, TCG_REG_RA, TCG_REG_T9, 0); tcg_out_nop(s); switch(opc) { case 0: tcg_out_opc_imm(s, OPC_ANDI, data_reg1, TCG_REG_V0, 0xff); break; case 0 | 4: tcg_out_opc_sa(s, OPC_SLL, TCG_REG_V0, TCG_REG_V0, 24); tcg_out_opc_sa(s, OPC_SRA, data_reg1, TCG_REG_V0, 24); break; case 1: tcg_out_opc_imm(s, OPC_ANDI, data_reg1, TCG_REG_V0, 0xffff); break; case 1 | 4: tcg_out_opc_sa(s, OPC_SLL, TCG_REG_V0, TCG_REG_V0, 16); tcg_out_opc_sa(s, OPC_SRA, data_reg1, TCG_REG_V0, 16); break; case 2: tcg_out_mov(s, data_reg1, TCG_REG_V0); break; case 3: tcg_out_mov(s, data_reg2, TCG_REG_V1); tcg_out_mov(s, data_reg1, TCG_REG_V0); break; default: tcg_abort(); } label2_ptr = s->code_ptr; tcg_out_opc_imm(s, OPC_BEQ, TCG_REG_ZERO, TCG_REG_ZERO, 0); tcg_out_nop(s); /* label1: fast path */ reloc_pc16(label1_ptr, (tcg_target_long) s->code_ptr); tcg_out_opc_imm(s, OPC_LW, TCG_REG_V0, TCG_REG_A0, offsetof(CPUState, tlb_table[mem_index][0].addend) + addr_meml); tcg_out_opc_reg(s, OPC_ADDU, TCG_REG_V0, TCG_REG_V0, addr_regl); addr_reg1 = TCG_REG_V0; #endif switch(opc) { case 0: tcg_out_opc_imm(s, OPC_LBU, data_reg1, addr_reg1, 0); break; case 0 | 4: tcg_out_opc_imm(s, OPC_LB, data_reg1, addr_reg1, 0); break; case 1: if (TCG_NEED_BSWAP) { tcg_out_opc_imm(s, OPC_LHU, TCG_REG_T0, addr_reg1, 0); tcg_out_bswap16(s, data_reg1, TCG_REG_T0); } else { tcg_out_opc_imm(s, OPC_LHU, data_reg1, addr_reg1, 0); } break; case 1 | 4: if (TCG_NEED_BSWAP) { tcg_out_opc_imm(s, OPC_LHU, TCG_REG_T0, addr_reg1, 0); tcg_out_bswap16s(s, data_reg1, TCG_REG_T0); } else { tcg_out_opc_imm(s, OPC_LH, data_reg1, addr_reg1, 0); } break; case 2: if (TCG_NEED_BSWAP) { tcg_out_opc_imm(s, OPC_LW, TCG_REG_T0, addr_reg1, 0); tcg_out_bswap32(s, data_reg1, TCG_REG_T0); } else { tcg_out_opc_imm(s, OPC_LW, data_reg1, addr_reg1, 0); } break; case 3: #if !defined(CONFIG_SOFTMMU) tcg_out_mov(s, TCG_REG_V0, addr_reg1); addr_reg1 = TCG_REG_V0; #endif if (TCG_NEED_BSWAP) { tcg_out_opc_imm(s, OPC_LW, TCG_REG_T0, addr_reg1, 4); tcg_out_bswap32(s, data_reg1, TCG_REG_T0); tcg_out_opc_imm(s, OPC_LW, TCG_REG_T0, addr_reg1, 0); tcg_out_bswap32(s, data_reg2, TCG_REG_T0); } else { tcg_out_opc_imm(s, OPC_LW, data_reg1, addr_reg1, 0); tcg_out_opc_imm(s, OPC_LW, data_reg2, addr_reg1, 4); } break; default: tcg_abort(); } #if defined(CONFIG_SOFTMMU) reloc_pc16(label2_ptr, (tcg_target_long) s->code_ptr); #endif }"} {"target": 1, "idx": 16360, "func": "static void dec_calc(DisasContext *dc, uint32_t insn) { uint32_t op0, op1, op2; uint32_t ra, rb, rd; op0 = extract32(insn, 0, 4); op1 = extract32(insn, 8, 2); op2 = extract32(insn, 6, 2); ra = extract32(insn, 16, 5); rb = extract32(insn, 11, 5); rd = extract32(insn, 21, 5); switch (op0) { case 0x0000: switch (op1) { case 0x00: /* l.add */ LOG_DIS(\"l.add r%d, r%d, r%d\\n\", rd, ra, rb); { TCGLabel *lab = gen_new_label(); TCGv_i64 ta = tcg_temp_new_i64(); TCGv_i64 tb = tcg_temp_new_i64(); TCGv_i64 td = tcg_temp_local_new_i64(); TCGv_i32 res = tcg_temp_local_new_i32(); TCGv_i32 sr_ove = tcg_temp_local_new_i32(); tcg_gen_extu_i32_i64(ta, cpu_R[ra]); tcg_gen_extu_i32_i64(tb, cpu_R[rb]); tcg_gen_add_i64(td, ta, tb); tcg_gen_extrl_i64_i32(res, td); tcg_gen_shri_i64(td, td, 31); tcg_gen_andi_i64(td, td, 0x3); /* Jump to lab when no overflow. */ tcg_gen_brcondi_i64(TCG_COND_EQ, td, 0x0, lab); tcg_gen_brcondi_i64(TCG_COND_EQ, td, 0x3, lab); tcg_gen_ori_i32(cpu_sr, cpu_sr, (SR_OV | SR_CY)); tcg_gen_andi_i32(sr_ove, cpu_sr, SR_OVE); tcg_gen_brcondi_i32(TCG_COND_NE, sr_ove, SR_OVE, lab); gen_exception(dc, EXCP_RANGE); gen_set_label(lab); tcg_gen_mov_i32(cpu_R[rd], res); tcg_temp_free_i64(ta); tcg_temp_free_i64(tb); tcg_temp_free_i64(td); tcg_temp_free_i32(res); tcg_temp_free_i32(sr_ove); } break; default: gen_illegal_exception(dc); break; } break; case 0x0001: /* l.addc */ switch (op1) { case 0x00: LOG_DIS(\"l.addc r%d, r%d, r%d\\n\", rd, ra, rb); { TCGLabel *lab = gen_new_label(); TCGv_i64 ta = tcg_temp_new_i64(); TCGv_i64 tb = tcg_temp_new_i64(); TCGv_i64 tcy = tcg_temp_local_new_i64(); TCGv_i64 td = tcg_temp_local_new_i64(); TCGv_i32 res = tcg_temp_local_new_i32(); TCGv_i32 sr_cy = tcg_temp_local_new_i32(); TCGv_i32 sr_ove = tcg_temp_local_new_i32(); tcg_gen_extu_i32_i64(ta, cpu_R[ra]); tcg_gen_extu_i32_i64(tb, cpu_R[rb]); tcg_gen_andi_i32(sr_cy, cpu_sr, SR_CY); tcg_gen_extu_i32_i64(tcy, sr_cy); tcg_gen_shri_i64(tcy, tcy, 10); tcg_gen_add_i64(td, ta, tb); tcg_gen_add_i64(td, td, tcy); tcg_gen_extrl_i64_i32(res, td); tcg_gen_shri_i64(td, td, 32); tcg_gen_andi_i64(td, td, 0x3); /* Jump to lab when no overflow. */ tcg_gen_brcondi_i64(TCG_COND_EQ, td, 0x0, lab); tcg_gen_brcondi_i64(TCG_COND_EQ, td, 0x3, lab); tcg_gen_ori_i32(cpu_sr, cpu_sr, (SR_OV | SR_CY)); tcg_gen_andi_i32(sr_ove, cpu_sr, SR_OVE); tcg_gen_brcondi_i32(TCG_COND_NE, sr_ove, SR_OVE, lab); gen_exception(dc, EXCP_RANGE); gen_set_label(lab); tcg_gen_mov_i32(cpu_R[rd], res); tcg_temp_free_i64(ta); tcg_temp_free_i64(tb); tcg_temp_free_i64(tcy); tcg_temp_free_i64(td); tcg_temp_free_i32(res); tcg_temp_free_i32(sr_cy); tcg_temp_free_i32(sr_ove); } break; default: gen_illegal_exception(dc); break; } break; case 0x0002: /* l.sub */ switch (op1) { case 0x00: LOG_DIS(\"l.sub r%d, r%d, r%d\\n\", rd, ra, rb); { TCGLabel *lab = gen_new_label(); TCGv_i64 ta = tcg_temp_new_i64(); TCGv_i64 tb = tcg_temp_new_i64(); TCGv_i64 td = tcg_temp_local_new_i64(); TCGv_i32 res = tcg_temp_local_new_i32(); TCGv_i32 sr_ove = tcg_temp_local_new_i32(); tcg_gen_extu_i32_i64(ta, cpu_R[ra]); tcg_gen_extu_i32_i64(tb, cpu_R[rb]); tcg_gen_sub_i64(td, ta, tb); tcg_gen_extrl_i64_i32(res, td); tcg_gen_shri_i64(td, td, 31); tcg_gen_andi_i64(td, td, 0x3); /* Jump to lab when no overflow. */ tcg_gen_brcondi_i64(TCG_COND_EQ, td, 0x0, lab); tcg_gen_brcondi_i64(TCG_COND_EQ, td, 0x3, lab); tcg_gen_ori_i32(cpu_sr, cpu_sr, (SR_OV | SR_CY)); tcg_gen_andi_i32(sr_ove, cpu_sr, SR_OVE); tcg_gen_brcondi_i32(TCG_COND_NE, sr_ove, SR_OVE, lab); gen_exception(dc, EXCP_RANGE); gen_set_label(lab); tcg_gen_mov_i32(cpu_R[rd], res); tcg_temp_free_i64(ta); tcg_temp_free_i64(tb); tcg_temp_free_i64(td); tcg_temp_free_i32(res); tcg_temp_free_i32(sr_ove); } break; default: gen_illegal_exception(dc); break; } break; case 0x0003: /* l.and */ switch (op1) { case 0x00: LOG_DIS(\"l.and r%d, r%d, r%d\\n\", rd, ra, rb); tcg_gen_and_tl(cpu_R[rd], cpu_R[ra], cpu_R[rb]); break; default: gen_illegal_exception(dc); break; } break; case 0x0004: /* l.or */ switch (op1) { case 0x00: LOG_DIS(\"l.or r%d, r%d, r%d\\n\", rd, ra, rb); tcg_gen_or_tl(cpu_R[rd], cpu_R[ra], cpu_R[rb]); break; default: gen_illegal_exception(dc); break; } break; case 0x0005: switch (op1) { case 0x00: /* l.xor */ LOG_DIS(\"l.xor r%d, r%d, r%d\\n\", rd, ra, rb); tcg_gen_xor_tl(cpu_R[rd], cpu_R[ra], cpu_R[rb]); break; default: gen_illegal_exception(dc); break; } break; case 0x0006: switch (op1) { case 0x03: /* l.mul */ LOG_DIS(\"l.mul r%d, r%d, r%d\\n\", rd, ra, rb); if (ra != 0 && rb != 0) { gen_helper_mul32(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]); } else { tcg_gen_movi_tl(cpu_R[rd], 0x0); } break; default: gen_illegal_exception(dc); break; } break; case 0x0009: switch (op1) { case 0x03: /* l.div */ LOG_DIS(\"l.div r%d, r%d, r%d\\n\", rd, ra, rb); { TCGLabel *lab0 = gen_new_label(); TCGLabel *lab1 = gen_new_label(); TCGLabel *lab2 = gen_new_label(); TCGLabel *lab3 = gen_new_label(); TCGv_i32 sr_ove = tcg_temp_local_new_i32(); if (rb == 0) { tcg_gen_ori_tl(cpu_sr, cpu_sr, (SR_OV | SR_CY)); tcg_gen_andi_tl(sr_ove, cpu_sr, SR_OVE); tcg_gen_brcondi_tl(TCG_COND_NE, sr_ove, SR_OVE, lab0); gen_exception(dc, EXCP_RANGE); gen_set_label(lab0); } else { tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_R[rb], 0x00000000, lab1); tcg_gen_brcondi_tl(TCG_COND_NE, cpu_R[ra], 0x80000000, lab2); tcg_gen_brcondi_tl(TCG_COND_NE, cpu_R[rb], 0xffffffff, lab2); gen_set_label(lab1); tcg_gen_ori_tl(cpu_sr, cpu_sr, (SR_OV | SR_CY)); tcg_gen_andi_tl(sr_ove, cpu_sr, SR_OVE); tcg_gen_brcondi_tl(TCG_COND_NE, sr_ove, SR_OVE, lab3); gen_exception(dc, EXCP_RANGE); gen_set_label(lab2); tcg_gen_div_tl(cpu_R[rd], cpu_R[ra], cpu_R[rb]); gen_set_label(lab3); } tcg_temp_free_i32(sr_ove); } break; default: gen_illegal_exception(dc); break; } break; case 0x000a: switch (op1) { case 0x03: /* l.divu */ LOG_DIS(\"l.divu r%d, r%d, r%d\\n\", rd, ra, rb); { TCGLabel *lab0 = gen_new_label(); TCGLabel *lab1 = gen_new_label(); TCGLabel *lab2 = gen_new_label(); TCGv_i32 sr_ove = tcg_temp_local_new_i32(); if (rb == 0) { tcg_gen_ori_tl(cpu_sr, cpu_sr, (SR_OV | SR_CY)); tcg_gen_andi_tl(sr_ove, cpu_sr, SR_OVE); tcg_gen_brcondi_tl(TCG_COND_NE, sr_ove, SR_OVE, lab0); gen_exception(dc, EXCP_RANGE); gen_set_label(lab0); } else { tcg_gen_brcondi_tl(TCG_COND_NE, cpu_R[rb], 0x00000000, lab1); tcg_gen_ori_tl(cpu_sr, cpu_sr, (SR_OV | SR_CY)); tcg_gen_andi_tl(sr_ove, cpu_sr, SR_OVE); tcg_gen_brcondi_tl(TCG_COND_NE, sr_ove, SR_OVE, lab2); gen_exception(dc, EXCP_RANGE); gen_set_label(lab1); tcg_gen_divu_tl(cpu_R[rd], cpu_R[ra], cpu_R[rb]); gen_set_label(lab2); } tcg_temp_free_i32(sr_ove); } break; default: gen_illegal_exception(dc); break; } break; case 0x000b: switch (op1) { case 0x03: /* l.mulu */ LOG_DIS(\"l.mulu r%d, r%d, r%d\\n\", rd, ra, rb); if (rb != 0 && ra != 0) { TCGv_i64 result = tcg_temp_local_new_i64(); TCGv_i64 tra = tcg_temp_local_new_i64(); TCGv_i64 trb = tcg_temp_local_new_i64(); TCGv_i64 high = tcg_temp_new_i64(); TCGv_i32 sr_ove = tcg_temp_local_new_i32(); TCGLabel *lab = gen_new_label(); /* Calculate each result. */ tcg_gen_extu_i32_i64(tra, cpu_R[ra]); tcg_gen_extu_i32_i64(trb, cpu_R[rb]); tcg_gen_mul_i64(result, tra, trb); tcg_temp_free_i64(tra); tcg_temp_free_i64(trb); tcg_gen_shri_i64(high, result, TARGET_LONG_BITS); /* Overflow or not. */ tcg_gen_brcondi_i64(TCG_COND_EQ, high, 0x00000000, lab); tcg_gen_ori_tl(cpu_sr, cpu_sr, (SR_OV | SR_CY)); tcg_gen_andi_tl(sr_ove, cpu_sr, SR_OVE); tcg_gen_brcondi_tl(TCG_COND_NE, sr_ove, SR_OVE, lab); gen_exception(dc, EXCP_RANGE); gen_set_label(lab); tcg_temp_free_i64(high); tcg_gen_trunc_i64_tl(cpu_R[rd], result); tcg_temp_free_i64(result); tcg_temp_free_i32(sr_ove); } else { tcg_gen_movi_tl(cpu_R[rd], 0); } break; default: gen_illegal_exception(dc); break; } break; case 0x000e: switch (op1) { case 0x00: /* l.cmov */ LOG_DIS(\"l.cmov r%d, r%d, r%d\\n\", rd, ra, rb); { TCGLabel *lab = gen_new_label(); TCGv res = tcg_temp_local_new(); TCGv sr_f = tcg_temp_new(); tcg_gen_andi_tl(sr_f, cpu_sr, SR_F); tcg_gen_mov_tl(res, cpu_R[rb]); tcg_gen_brcondi_tl(TCG_COND_NE, sr_f, SR_F, lab); tcg_gen_mov_tl(res, cpu_R[ra]); gen_set_label(lab); tcg_gen_mov_tl(cpu_R[rd], res); tcg_temp_free(sr_f); tcg_temp_free(res); } break; default: gen_illegal_exception(dc); break; } break; case 0x000f: switch (op1) { case 0x00: /* l.ff1 */ LOG_DIS(\"l.ff1 r%d, r%d, r%d\\n\", rd, ra, rb); tcg_gen_ctzi_tl(cpu_R[rd], cpu_R[ra], -1); tcg_gen_addi_tl(cpu_R[rd], cpu_R[rd], 1); break; case 0x01: /* l.fl1 */ LOG_DIS(\"l.fl1 r%d, r%d, r%d\\n\", rd, ra, rb); tcg_gen_clzi_tl(cpu_R[rd], cpu_R[ra], TARGET_LONG_BITS); tcg_gen_subfi_tl(cpu_R[rd], TARGET_LONG_BITS, cpu_R[rd]); break; default: gen_illegal_exception(dc); break; } break; case 0x0008: switch (op1) { case 0x00: switch (op2) { case 0x00: /* l.sll */ LOG_DIS(\"l.sll r%d, r%d, r%d\\n\", rd, ra, rb); tcg_gen_shl_tl(cpu_R[rd], cpu_R[ra], cpu_R[rb]); break; case 0x01: /* l.srl */ LOG_DIS(\"l.srl r%d, r%d, r%d\\n\", rd, ra, rb); tcg_gen_shr_tl(cpu_R[rd], cpu_R[ra], cpu_R[rb]); break; case 0x02: /* l.sra */ LOG_DIS(\"l.sra r%d, r%d, r%d\\n\", rd, ra, rb); tcg_gen_sar_tl(cpu_R[rd], cpu_R[ra], cpu_R[rb]); break; case 0x03: /* l.ror */ LOG_DIS(\"l.ror r%d, r%d, r%d\\n\", rd, ra, rb); tcg_gen_rotr_tl(cpu_R[rd], cpu_R[ra], cpu_R[rb]); break; default: gen_illegal_exception(dc); break; } break; default: gen_illegal_exception(dc); break; } break; case 0x000c: switch (op1) { case 0x00: switch (op2) { case 0x00: /* l.exths */ LOG_DIS(\"l.exths r%d, r%d\\n\", rd, ra); tcg_gen_ext16s_tl(cpu_R[rd], cpu_R[ra]); break; case 0x01: /* l.extbs */ LOG_DIS(\"l.extbs r%d, r%d\\n\", rd, ra); tcg_gen_ext8s_tl(cpu_R[rd], cpu_R[ra]); break; case 0x02: /* l.exthz */ LOG_DIS(\"l.exthz r%d, r%d\\n\", rd, ra); tcg_gen_ext16u_tl(cpu_R[rd], cpu_R[ra]); break; case 0x03: /* l.extbz */ LOG_DIS(\"l.extbz r%d, r%d\\n\", rd, ra); tcg_gen_ext8u_tl(cpu_R[rd], cpu_R[ra]); break; default: gen_illegal_exception(dc); break; } break; default: gen_illegal_exception(dc); break; } break; case 0x000d: switch (op1) { case 0x00: switch (op2) { case 0x00: /* l.extws */ LOG_DIS(\"l.extws r%d, r%d\\n\", rd, ra); tcg_gen_ext32s_tl(cpu_R[rd], cpu_R[ra]); break; case 0x01: /* l.extwz */ LOG_DIS(\"l.extwz r%d, r%d\\n\", rd, ra); tcg_gen_ext32u_tl(cpu_R[rd], cpu_R[ra]); break; default: gen_illegal_exception(dc); break; } break; default: gen_illegal_exception(dc); break; } break; default: gen_illegal_exception(dc); break; } }"} {"target": 1, "idx": 16365, "func": "int qemu_calculate_timeout(void) { #ifndef CONFIG_IOTHREAD int timeout; if (!vm_running) timeout = 5000; else { /* XXX: use timeout computed from timers */ int64_t add; int64_t delta; /* Advance virtual time to the next event. */ delta = qemu_icount_delta(); if (delta > 0) { /* If virtual time is ahead of real time then just wait for IO. */ timeout = (delta + 999999) / 1000000; } else { /* Wait for either IO to occur or the next timer event. */ add = qemu_next_deadline(); /* We advance the timer before checking for IO. Limit the amount we advance so that early IO activity won't get the guest too far ahead. */ if (add > 10000000) add = 10000000; delta += add; qemu_icount += qemu_icount_round (add); timeout = delta / 1000000; if (timeout < 0) timeout = 0; } } return timeout; #else /* CONFIG_IOTHREAD */ return 1000; #endif }"} {"target": 1, "idx": 16370, "func": "static int ftp_abort(URLContext *h) { static const char *command = \"ABOR\\r\\n\"; int err; static const int abor_codes[] = {225, 226, 0}; FTPContext *s = h->priv_data; /* According to RCF 959: \"ABOR command tells the server to abort the previous FTP service command and any associated transfer of data.\" There are FTP server implementations that don't response to any commands during data transfer in passive mode (including ABOR). This implementation closes data connection by force. */ if (ftp_send_command(s, command, NULL, NULL) < 0) { ftp_close_both_connections(s); if ((err = ftp_connect_control_connection(h)) < 0) { av_log(h, AV_LOG_ERROR, \"Reconnect failed.\\n\"); return err; } } else { ftp_close_data_connection(s); } if (ftp_status(s, NULL, abor_codes) < 225) { /* wu-ftpd also closes control connection after data connection closing */ ffurl_closep(&s->conn_control); if ((err = ftp_connect_control_connection(h)) < 0) { av_log(h, AV_LOG_ERROR, \"Reconnect failed.\\n\"); return err; } } return 0; }"} {"target": 1, "idx": 16385, "func": "static int rtmp_packet_read_one_chunk(URLContext *h, RTMPPacket *p, int chunk_size, RTMPPacket **prev_pkt_ptr, int *nb_prev_pkt, uint8_t hdr) { uint8_t buf[16]; int channel_id, timestamp, size; uint32_t ts_field; // non-extended timestamp or delta field uint32_t extra = 0; enum RTMPPacketType type; int written = 0; int ret, toread; RTMPPacket *prev_pkt; written++; channel_id = hdr & 0x3F; if (channel_id < 2) { //special case for channel number >= 64 buf[1] = 0; if (ffurl_read_complete(h, buf, channel_id + 1) != channel_id + 1) return AVERROR(EIO); written += channel_id + 1; channel_id = AV_RL16(buf) + 64; if ((ret = ff_rtmp_check_alloc_array(prev_pkt_ptr, nb_prev_pkt, channel_id)) < 0) return ret; prev_pkt = *prev_pkt_ptr; size = prev_pkt[channel_id].size; type = prev_pkt[channel_id].type; extra = prev_pkt[channel_id].extra; hdr >>= 6; // header size indicator if (hdr == RTMP_PS_ONEBYTE) { ts_field = prev_pkt[channel_id].ts_field; } else { if (ffurl_read_complete(h, buf, 3) != 3) return AVERROR(EIO); written += 3; ts_field = AV_RB24(buf); if (hdr != RTMP_PS_FOURBYTES) { if (ffurl_read_complete(h, buf, 3) != 3) return AVERROR(EIO); written += 3; size = AV_RB24(buf); if (ffurl_read_complete(h, buf, 1) != 1) return AVERROR(EIO); written++; type = buf[0]; if (hdr == RTMP_PS_TWELVEBYTES) { if (ffurl_read_complete(h, buf, 4) != 4) return AVERROR(EIO); written += 4; extra = AV_RL32(buf); if (ts_field == 0xFFFFFF) { if (ffurl_read_complete(h, buf, 4) != 4) return AVERROR(EIO); timestamp = AV_RB32(buf); } else { timestamp = ts_field; if (hdr != RTMP_PS_TWELVEBYTES) timestamp += prev_pkt[channel_id].timestamp; if (!prev_pkt[channel_id].read) { if ((ret = ff_rtmp_packet_create(p, channel_id, type, timestamp, size)) < 0) return ret; p->read = written; p->offset = 0; prev_pkt[channel_id].ts_field = ts_field; prev_pkt[channel_id].timestamp = timestamp; } else { // previous packet in this channel hasn't completed reading RTMPPacket *prev = &prev_pkt[channel_id]; p->data = prev->data; p->size = prev->size; p->channel_id = prev->channel_id; p->type = prev->type; p->ts_field = prev->ts_field; p->extra = prev->extra; p->offset = prev->offset; p->read = prev->read + written; p->timestamp = prev->timestamp; prev->data = NULL; p->extra = extra; // save history prev_pkt[channel_id].channel_id = channel_id; prev_pkt[channel_id].type = type; prev_pkt[channel_id].size = size; prev_pkt[channel_id].extra = extra; size = size - p->offset; toread = FFMIN(size, chunk_size); if (ffurl_read_complete(h, p->data + p->offset, toread) != toread) { ff_rtmp_packet_destroy(p); return AVERROR(EIO); size -= toread; p->read += toread; p->offset += toread; if (size > 0) { RTMPPacket *prev = &prev_pkt[channel_id]; prev->data = p->data; prev->read = p->read; prev->offset = p->offset; return AVERROR(EAGAIN); prev_pkt[channel_id].read = 0; // read complete; reset if needed return p->read;"} {"target": 0, "idx": 16406, "func": "void eeprom93xx_write(eeprom_t *eeprom, int eecs, int eesk, int eedi) { uint8_t tick = eeprom->tick; uint8_t eedo = eeprom->eedo; uint16_t address = eeprom->address; uint8_t command = eeprom->command; logout(\"CS=%u SK=%u DI=%u DO=%u, tick = %u\\n\", eecs, eesk, eedi, eedo, tick); if (! eeprom->eecs && eecs) { /* Start chip select cycle. */ logout(\"Cycle start, waiting for 1st start bit (0)\\n\"); tick = 0; command = 0x0; address = 0x0; } else if (eeprom->eecs && ! eecs) { /* End chip select cycle. This triggers write / erase. */ if (eeprom->writable) { uint8_t subcommand = address >> (eeprom->addrbits - 2); if (command == 0 && subcommand == 2) { /* Erase all. */ for (address = 0; address < eeprom->size; address++) { eeprom->contents[address] = 0xffff; } } else if (command == 3) { /* Erase word. */ eeprom->contents[address] = 0xffff; } else if (tick >= 2 + 2 + eeprom->addrbits + 16) { if (command == 1) { /* Write word. */ eeprom->contents[address] &= eeprom->data; } else if (command == 0 && subcommand == 1) { /* Write all. */ for (address = 0; address < eeprom->size; address++) { eeprom->contents[address] &= eeprom->data; } } } } /* Output DO is tristate, read results in 1. */ eedo = 1; } else if (eecs && ! eeprom->eesk && eesk) { /* Raising edge of clock shifts data in. */ if (tick == 0) { /* Wait for 1st start bit. */ if (eedi == 0) { logout(\"Got correct 1st start bit, waiting for 2nd start bit (1)\\n\"); tick++; } else { logout(\"wrong 1st start bit (is 1, should be 0)\\n\"); tick = 2; //~ assert(!\"wrong start bit\"); } } else if (tick == 1) { /* Wait for 2nd start bit. */ if (eedi != 0) { logout(\"Got correct 2nd start bit, getting command + address\\n\"); tick++; } else { logout(\"1st start bit is longer than needed\\n\"); } } else if (tick < 2 + 2) { /* Got 2 start bits, transfer 2 opcode bits. */ tick++; command <<= 1; if (eedi) { command += 1; } } else if (tick < 2 + 2 + eeprom->addrbits) { /* Got 2 start bits and 2 opcode bits, transfer all address bits. */ tick++; address = ((address << 1) | eedi); if (tick == 2 + 2 + eeprom->addrbits) { logout(\"%s command, address = 0x%02x (value 0x%04x)\\n\", opstring[command], address, eeprom->contents[address]); if (command == 2) { eedo = 0; } address = address % eeprom->size; if (command == 0) { /* Command code in upper 2 bits of address. */ switch (address >> (eeprom->addrbits - 2)) { case 0: logout(\"write disable command\\n\"); eeprom->writable = 0; break; case 1: logout(\"write all command\\n\"); break; case 2: logout(\"erase all command\\n\"); break; case 3: logout(\"write enable command\\n\"); eeprom->writable = 1; break; } } else { /* Read, write or erase word. */ eeprom->data = eeprom->contents[address]; } } } else if (tick < 2 + 2 + eeprom->addrbits + 16) { /* Transfer 16 data bits. */ tick++; if (command == 2) { /* Read word. */ eedo = ((eeprom->data & 0x8000) != 0); } eeprom->data <<= 1; eeprom->data += eedi; } else { logout(\"additional unneeded tick, not processed\\n\"); } } /* Save status of EEPROM. */ eeprom->tick = tick; eeprom->eecs = eecs; eeprom->eesk = eesk; eeprom->eedo = eedo; eeprom->address = address; eeprom->command = command; }"} {"target": 0, "idx": 16410, "func": "static void *iothread_run(void *opaque) { IOThread *iothread = opaque; qemu_mutex_lock(&iothread->init_done_lock); iothread->thread_id = qemu_get_thread_id(); qemu_cond_signal(&iothread->init_done_cond); qemu_mutex_unlock(&iothread->init_done_lock); while (!iothread->stopping) { aio_context_acquire(iothread->ctx); while (!iothread->stopping && aio_poll(iothread->ctx, true)) { /* Progress was made, keep going */ } aio_context_release(iothread->ctx); } return NULL; }"} {"target": 0, "idx": 16412, "func": "static av_cold int encode_close(AVCodecContext *avctx) { av_frame_free(&avctx->coded_frame); return 0; }"} {"target": 0, "idx": 16415, "func": "static void pc_dimm_get_size(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { int64_t value; MemoryRegion *mr; PCDIMMDevice *dimm = PC_DIMM(obj); mr = host_memory_backend_get_memory(dimm->hostmem, errp); value = memory_region_size(mr); visit_type_int(v, name, &value, errp); }"} {"target": 0, "idx": 16419, "func": "pflash_t *pflash_cfi02_register(target_phys_addr_t base, ram_addr_t off, BlockDriverState *bs, uint32_t sector_len, int nb_blocs, int nb_mappings, int width, uint16_t id0, uint16_t id1, uint16_t id2, uint16_t id3, uint16_t unlock_addr0, uint16_t unlock_addr1) { pflash_t *pfl; int32_t chip_len; chip_len = sector_len * nb_blocs; /* XXX: to be fixed */ #if 0 if (total_len != (8 * 1024 * 1024) && total_len != (16 * 1024 * 1024) && total_len != (32 * 1024 * 1024) && total_len != (64 * 1024 * 1024)) return NULL; #endif pfl = qemu_mallocz(sizeof(pflash_t)); /* FIXME: Allocate ram ourselves. */ pfl->storage = qemu_get_ram_ptr(off); pfl->fl_mem = cpu_register_io_memory(pflash_read_ops, pflash_write_ops, pfl); pfl->off = off; pfl->base = base; pfl->chip_len = chip_len; pfl->mappings = nb_mappings; pflash_register_memory(pfl, 1); pfl->bs = bs; if (pfl->bs) { /* read the initial flash content */ bdrv_read(pfl->bs, 0, pfl->storage, chip_len >> 9); } #if 0 /* XXX: there should be a bit to set up read-only, * the same way the hardware does (with WP pin). */ pfl->ro = 1; #else pfl->ro = 0; #endif pfl->timer = qemu_new_timer(vm_clock, pflash_timer, pfl); pfl->sector_len = sector_len; pfl->width = width; pfl->wcycle = 0; pfl->cmd = 0; pfl->status = 0; pfl->ident[0] = id0; pfl->ident[1] = id1; pfl->ident[2] = id2; pfl->ident[3] = id3; pfl->unlock_addr[0] = unlock_addr0; pfl->unlock_addr[1] = unlock_addr1; /* Hardcoded CFI table (mostly from SG29 Spansion flash) */ pfl->cfi_len = 0x52; /* Standard \"QRY\" string */ pfl->cfi_table[0x10] = 'Q'; pfl->cfi_table[0x11] = 'R'; pfl->cfi_table[0x12] = 'Y'; /* Command set (AMD/Fujitsu) */ pfl->cfi_table[0x13] = 0x02; pfl->cfi_table[0x14] = 0x00; /* Primary extended table address */ pfl->cfi_table[0x15] = 0x31; pfl->cfi_table[0x16] = 0x00; /* Alternate command set (none) */ pfl->cfi_table[0x17] = 0x00; pfl->cfi_table[0x18] = 0x00; /* Alternate extended table (none) */ pfl->cfi_table[0x19] = 0x00; pfl->cfi_table[0x1A] = 0x00; /* Vcc min */ pfl->cfi_table[0x1B] = 0x27; /* Vcc max */ pfl->cfi_table[0x1C] = 0x36; /* Vpp min (no Vpp pin) */ pfl->cfi_table[0x1D] = 0x00; /* Vpp max (no Vpp pin) */ pfl->cfi_table[0x1E] = 0x00; /* Reserved */ pfl->cfi_table[0x1F] = 0x07; /* Timeout for min size buffer write (NA) */ pfl->cfi_table[0x20] = 0x00; /* Typical timeout for block erase (512 ms) */ pfl->cfi_table[0x21] = 0x09; /* Typical timeout for full chip erase (4096 ms) */ pfl->cfi_table[0x22] = 0x0C; /* Reserved */ pfl->cfi_table[0x23] = 0x01; /* Max timeout for buffer write (NA) */ pfl->cfi_table[0x24] = 0x00; /* Max timeout for block erase */ pfl->cfi_table[0x25] = 0x0A; /* Max timeout for chip erase */ pfl->cfi_table[0x26] = 0x0D; /* Device size */ pfl->cfi_table[0x27] = ctz32(chip_len); /* Flash device interface (8 & 16 bits) */ pfl->cfi_table[0x28] = 0x02; pfl->cfi_table[0x29] = 0x00; /* Max number of bytes in multi-bytes write */ /* XXX: disable buffered write as it's not supported */ // pfl->cfi_table[0x2A] = 0x05; pfl->cfi_table[0x2A] = 0x00; pfl->cfi_table[0x2B] = 0x00; /* Number of erase block regions (uniform) */ pfl->cfi_table[0x2C] = 0x01; /* Erase block region 1 */ pfl->cfi_table[0x2D] = nb_blocs - 1; pfl->cfi_table[0x2E] = (nb_blocs - 1) >> 8; pfl->cfi_table[0x2F] = sector_len >> 8; pfl->cfi_table[0x30] = sector_len >> 16; /* Extended */ pfl->cfi_table[0x31] = 'P'; pfl->cfi_table[0x32] = 'R'; pfl->cfi_table[0x33] = 'I'; pfl->cfi_table[0x34] = '1'; pfl->cfi_table[0x35] = '0'; pfl->cfi_table[0x36] = 0x00; pfl->cfi_table[0x37] = 0x00; pfl->cfi_table[0x38] = 0x00; pfl->cfi_table[0x39] = 0x00; pfl->cfi_table[0x3a] = 0x00; pfl->cfi_table[0x3b] = 0x00; pfl->cfi_table[0x3c] = 0x00; return pfl; }"} {"target": 0, "idx": 16422, "func": "static int v9fs_do_chmod(V9fsState *s, V9fsString *path, mode_t mode) { return s->ops->chmod(&s->ctx, path->data, mode); }"} {"target": 1, "idx": 16431, "func": "static int check_physical (CPUState *env, mmu_ctx_t *ctx, target_ulong eaddr, int rw) { int in_plb, ret; ctx->raddr = eaddr; ctx->prot = PAGE_READ; ret = 0; switch (env->mmu_model) { case POWERPC_MMU_32B: case POWERPC_MMU_SOFT_6xx: case POWERPC_MMU_SOFT_74xx: case POWERPC_MMU_601: case POWERPC_MMU_SOFT_4xx: case POWERPC_MMU_REAL_4xx: case POWERPC_MMU_BOOKE: ctx->prot |= PAGE_WRITE; break; #if defined(TARGET_PPC64) case POWERPC_MMU_64B: case POWERPC_MMU_64BRIDGE: /* Real address are 60 bits long */ ctx->raddr &= 0x0FFFFFFFFFFFFFFFULL; ctx->prot |= PAGE_WRITE; break; #endif case POWERPC_MMU_SOFT_4xx_Z: if (unlikely(msr_pe != 0)) { /* 403 family add some particular protections, * using PBL/PBU registers for accesses with no translation. */ in_plb = /* Check PLB validity */ (env->pb[0] < env->pb[1] && /* and address in plb area */ eaddr >= env->pb[0] && eaddr < env->pb[1]) || (env->pb[2] < env->pb[3] && eaddr >= env->pb[2] && eaddr < env->pb[3]) ? 1 : 0; if (in_plb ^ msr_px) { /* Access in protected area */ if (rw == 1) { /* Access is not allowed */ ret = -2; } } else { /* Read-write access is allowed */ ctx->prot |= PAGE_WRITE; } } break; case POWERPC_MMU_BOOKE_FSL: /* XXX: TODO */ cpu_abort(env, \"BookE FSL MMU model not implemented\\n\"); break; default: cpu_abort(env, \"Unknown or invalid MMU model\\n\"); return -1; } return ret; }"} {"target": 1, "idx": 16437, "func": "void ff_xvmc_field_end(MpegEncContext *s) { struct xvmc_pix_fmt *render = (struct xvmc_pix_fmt*)s->current_picture.f.data[2]; assert(render); if (render->filled_mv_blocks_num > 0) ff_mpeg_draw_horiz_band(s, 0, 0); }"} {"target": 1, "idx": 16449, "func": "void set_system_memory_map(MemoryRegion *mr) { memory_region_transaction_begin(); address_space_memory.root = mr; memory_region_transaction_commit(); }"} {"target": 0, "idx": 16452, "func": "pp_mode *pp_get_mode_by_name_and_quality(const char *name, int quality) { char temp[GET_MODE_BUFFER_SIZE]; char *p= temp; static const char filterDelimiters[] = \",/\"; static const char optionDelimiters[] = \":\"; struct PPMode *ppMode; char *filterToken; ppMode= av_malloc(sizeof(PPMode)); ppMode->lumMode= 0; ppMode->chromMode= 0; ppMode->maxTmpNoise[0]= 700; ppMode->maxTmpNoise[1]= 1500; ppMode->maxTmpNoise[2]= 3000; ppMode->maxAllowedY= 234; ppMode->minAllowedY= 16; ppMode->baseDcDiff= 256/8; ppMode->flatnessThreshold= 56-16-1; ppMode->maxClippedThreshold= 0.01; ppMode->error=0; memset(temp, 0, GET_MODE_BUFFER_SIZE); av_strlcpy(temp, name, GET_MODE_BUFFER_SIZE - 1); av_log(NULL, AV_LOG_DEBUG, \"pp: %s\\n\", name); for(;;){ char *filterName; int q= 1000000; //PP_QUALITY_MAX; int chrom=-1; int luma=-1; char *option; char *options[OPTIONS_ARRAY_SIZE]; int i; int filterNameOk=0; int numOfUnknownOptions=0; int enable=1; //does the user want us to enabled or disabled the filter filterToken= strtok(p, filterDelimiters); if(filterToken == NULL) break; p+= strlen(filterToken) + 1; // p points to next filterToken filterName= strtok(filterToken, optionDelimiters); av_log(NULL, AV_LOG_DEBUG, \"pp: %s::%s\\n\", filterToken, filterName); if(*filterName == '-'){ enable=0; filterName++; } for(;;){ //for all options option= strtok(NULL, optionDelimiters); if(option == NULL) break; av_log(NULL, AV_LOG_DEBUG, \"pp: option: %s\\n\", option); if(!strcmp(\"autoq\", option) || !strcmp(\"a\", option)) q= quality; else if(!strcmp(\"nochrom\", option) || !strcmp(\"y\", option)) chrom=0; else if(!strcmp(\"chrom\", option) || !strcmp(\"c\", option)) chrom=1; else if(!strcmp(\"noluma\", option) || !strcmp(\"n\", option)) luma=0; else{ options[numOfUnknownOptions] = option; numOfUnknownOptions++; } if(numOfUnknownOptions >= OPTIONS_ARRAY_SIZE-1) break; } options[numOfUnknownOptions] = NULL; /* replace stuff from the replace Table */ for(i=0; replaceTable[2*i]!=NULL; i++){ if(!strcmp(replaceTable[2*i], filterName)){ int newlen= strlen(replaceTable[2*i + 1]); int plen; int spaceLeft; if(p==NULL) p= temp, *p=0; //last filter else p--, *p=','; //not last filter plen= strlen(p); spaceLeft= p - temp + plen; if(spaceLeft + newlen >= GET_MODE_BUFFER_SIZE - 1){ ppMode->error++; break; } memmove(p + newlen, p, plen+1); memcpy(p, replaceTable[2*i + 1], newlen); filterNameOk=1; } } for(i=0; filters[i].shortName!=NULL; i++){ if( !strcmp(filters[i].longName, filterName) || !strcmp(filters[i].shortName, filterName)){ ppMode->lumMode &= ~filters[i].mask; ppMode->chromMode &= ~filters[i].mask; filterNameOk=1; if(!enable) break; // user wants to disable it if(q >= filters[i].minLumQuality && luma) ppMode->lumMode|= filters[i].mask; if(chrom==1 || (chrom==-1 && filters[i].chromDefault)) if(q >= filters[i].minChromQuality) ppMode->chromMode|= filters[i].mask; if(filters[i].mask == LEVEL_FIX){ int o; ppMode->minAllowedY= 16; ppMode->maxAllowedY= 234; for(o=0; options[o]!=NULL; o++){ if( !strcmp(options[o],\"fullyrange\") ||!strcmp(options[o],\"f\")){ ppMode->minAllowedY= 0; ppMode->maxAllowedY= 255; numOfUnknownOptions--; } } } else if(filters[i].mask == TEMP_NOISE_FILTER) { int o; int numOfNoises=0; for(o=0; options[o]!=NULL; o++){ char *tail; ppMode->maxTmpNoise[numOfNoises]= strtol(options[o], &tail, 0); if(tail!=options[o]){ numOfNoises++; numOfUnknownOptions--; if(numOfNoises >= 3) break; } } } else if(filters[i].mask == V_DEBLOCK || filters[i].mask == H_DEBLOCK || filters[i].mask == V_A_DEBLOCK || filters[i].mask == H_A_DEBLOCK){ int o; for(o=0; options[o]!=NULL && o<2; o++){ char *tail; int val= strtol(options[o], &tail, 0); if(tail==options[o]) break; numOfUnknownOptions--; if(o==0) ppMode->baseDcDiff= val; else ppMode->flatnessThreshold= val; } } else if(filters[i].mask == FORCE_QUANT){ int o; ppMode->forcedQuant= 15; for(o=0; options[o]!=NULL && o<1; o++){ char *tail; int val= strtol(options[o], &tail, 0); if(tail==options[o]) break; numOfUnknownOptions--; ppMode->forcedQuant= val; } } } } if(!filterNameOk) ppMode->error++; ppMode->error += numOfUnknownOptions; } av_log(NULL, AV_LOG_DEBUG, \"pp: lumMode=%X, chromMode=%X\\n\", ppMode->lumMode, ppMode->chromMode); if(ppMode->error){ av_log(NULL, AV_LOG_ERROR, \"%d errors in postprocess string \\\"%s\\\"\\n\", ppMode->error, name); av_free(ppMode); return NULL; } return ppMode; }"} {"target": 1, "idx": 16465, "func": "int qcow2_write_l1_entry(BlockDriverState *bs, int l1_index) { BDRVQcowState *s = bs->opaque; uint64_t buf[L1_ENTRIES_PER_SECTOR]; int l1_start_index; int i, ret; l1_start_index = l1_index & ~(L1_ENTRIES_PER_SECTOR - 1); for (i = 0; i < L1_ENTRIES_PER_SECTOR; i++) { buf[i] = cpu_to_be64(s->l1_table[l1_start_index + i]); } ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_DEFAULT & ~QCOW2_OL_ACTIVE_L1, s->l1_table_offset + 8 * l1_start_index, sizeof(buf)); if (ret < 0) { return ret; } BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE); ret = bdrv_pwrite_sync(bs->file, s->l1_table_offset + 8 * l1_start_index, buf, sizeof(buf)); if (ret < 0) { return ret; } return 0; }"} {"target": 0, "idx": 16483, "func": "static void zero_bss(abi_ulong elf_bss, abi_ulong last_bss, int prot) { uintptr_t host_start, host_map_start, host_end; last_bss = TARGET_PAGE_ALIGN(last_bss); /* ??? There is confusion between qemu_real_host_page_size and qemu_host_page_size here and elsewhere in target_mmap, which may lead to the end of the data section mapping from the file not being mapped. At least there was an explicit test and comment for that here, suggesting that \"the file size must be known\". The comment probably pre-dates the introduction of the fstat system call in target_mmap which does in fact find out the size. What isn't clear is if the workaround here is still actually needed. For now, continue with it, but merge it with the \"normal\" mmap that would allocate the bss. */ host_start = (uintptr_t) g2h(elf_bss); host_end = (uintptr_t) g2h(last_bss); host_map_start = (host_start + qemu_real_host_page_size - 1); host_map_start &= -qemu_real_host_page_size; if (host_map_start < host_end) { void *p = mmap((void *)host_map_start, host_end - host_map_start, prot, MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); if (p == MAP_FAILED) { perror(\"cannot mmap brk\"); exit(-1); } /* Since we didn't use target_mmap, make sure to record the validity of the pages with qemu. */ page_set_flags(elf_bss & TARGET_PAGE_MASK, last_bss, prot|PAGE_VALID); } if (host_start < host_map_start) { memset((void *)host_start, 0, host_map_start - host_start); } }"} {"target": 0, "idx": 16498, "func": "static void avc_biwgt_4x4multiple_msa(uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, int32_t height, int32_t log2_denom, int32_t src_weight, int32_t dst_weight, int32_t offset_in) { uint8_t cnt; uint32_t load0, load1, load2, load3; v16i8 src_wgt, dst_wgt, wgt; v16i8 src0, src1, src2, src3; v16i8 dst0, dst1, dst2, dst3; v8i16 temp0, temp1, temp2, temp3; v8i16 denom, offset, add_val; int32_t val = 128 * (src_weight + dst_weight); offset_in = ((offset_in + 1) | 1) << log2_denom; src_wgt = __msa_fill_b(src_weight); dst_wgt = __msa_fill_b(dst_weight); offset = __msa_fill_h(offset_in); denom = __msa_fill_h(log2_denom + 1); add_val = __msa_fill_h(val); offset += add_val; wgt = __msa_ilvev_b(dst_wgt, src_wgt); for (cnt = height / 4; cnt--;) { LOAD_4WORDS_WITH_STRIDE(src, src_stride, load0, load1, load2, load3); src += (4 * src_stride); src0 = (v16i8) __msa_fill_w(load0); src1 = (v16i8) __msa_fill_w(load1); src2 = (v16i8) __msa_fill_w(load2); src3 = (v16i8) __msa_fill_w(load3); LOAD_4WORDS_WITH_STRIDE(dst, dst_stride, load0, load1, load2, load3); dst0 = (v16i8) __msa_fill_w(load0); dst1 = (v16i8) __msa_fill_w(load1); dst2 = (v16i8) __msa_fill_w(load2); dst3 = (v16i8) __msa_fill_w(load3); XORI_B_4VECS_SB(src0, src1, src2, src3, src0, src1, src2, src3, 128); XORI_B_4VECS_SB(dst0, dst1, dst2, dst3, dst0, dst1, dst2, dst3, 128); ILVR_B_4VECS_SH(src0, src1, src2, src3, dst0, dst1, dst2, dst3, temp0, temp1, temp2, temp3); temp0 = __msa_dpadd_s_h(offset, wgt, (v16i8) temp0); temp1 = __msa_dpadd_s_h(offset, wgt, (v16i8) temp1); temp2 = __msa_dpadd_s_h(offset, wgt, (v16i8) temp2); temp3 = __msa_dpadd_s_h(offset, wgt, (v16i8) temp3); SRA_4VECS(temp0, temp1, temp2, temp3, temp0, temp1, temp2, temp3, denom); temp0 = CLIP_UNSIGNED_CHAR_H(temp0); temp1 = CLIP_UNSIGNED_CHAR_H(temp1); temp2 = CLIP_UNSIGNED_CHAR_H(temp2); temp3 = CLIP_UNSIGNED_CHAR_H(temp3); PCKEV_B_STORE_4_BYTES_4(temp0, temp1, temp2, temp3, dst, dst_stride); dst += (4 * dst_stride); } }"} {"target": 0, "idx": 16501, "func": "static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1, intptr_t arg2) { int opi, opx; assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32); if (type == TCG_TYPE_I32) { opi = LWZ, opx = LWZX; } else { opi = LD, opx = LDX; } tcg_out_mem_long(s, opi, opx, ret, arg1, arg2); }"} {"target": 0, "idx": 16507, "func": "void do_delvm(Monitor *mon, const QDict *qdict) { BlockDriverState *bs, *bs1; Error *err = NULL; const char *name = qdict_get_str(qdict, \"name\"); bs = find_vmstate_bs(); if (!bs) { monitor_printf(mon, \"No block device supports snapshots\\n\"); return; } bs1 = NULL; while ((bs1 = bdrv_next(bs1))) { if (bdrv_can_snapshot(bs1)) { bdrv_snapshot_delete_by_id_or_name(bs, name, &err); if (err) { monitor_printf(mon, \"Error while deleting snapshot on device '%s':\" \" %s\\n\", bdrv_get_device_name(bs), error_get_pretty(err)); error_free(err); } } } }"} {"target": 0, "idx": 16521, "func": "void object_property_add(Object *obj, const char *name, const char *type, ObjectPropertyAccessor *get, ObjectPropertyAccessor *set, ObjectPropertyRelease *release, void *opaque, Error **errp) { ObjectProperty *prop; QTAILQ_FOREACH(prop, &obj->properties, node) { if (strcmp(prop->name, name) == 0) { error_setg(errp, \"attempt to add duplicate property '%s'\" \" to object (type '%s')\", name, object_get_typename(obj)); return; } } prop = g_malloc0(sizeof(*prop)); prop->name = g_strdup(name); prop->type = g_strdup(type); prop->get = get; prop->set = set; prop->release = release; prop->opaque = opaque; QTAILQ_INSERT_TAIL(&obj->properties, prop, node); }"} {"target": 0, "idx": 16524, "func": "static void collie_init(MachineState *machine) { const char *cpu_model = machine->cpu_model; const char *kernel_filename = machine->kernel_filename; const char *kernel_cmdline = machine->kernel_cmdline; const char *initrd_filename = machine->initrd_filename; StrongARMState *s; DriveInfo *dinfo; MemoryRegion *sysmem = get_system_memory(); if (!cpu_model) { cpu_model = \"sa1110\"; } s = sa1110_init(sysmem, collie_binfo.ram_size, cpu_model); dinfo = drive_get(IF_PFLASH, 0, 0); pflash_cfi01_register(SA_CS0, NULL, \"collie.fl1\", 0x02000000, dinfo ? blk_bs(blk_by_legacy_dinfo(dinfo)) : NULL, (64 * 1024), 512, 4, 0x00, 0x00, 0x00, 0x00, 0); dinfo = drive_get(IF_PFLASH, 0, 1); pflash_cfi01_register(SA_CS1, NULL, \"collie.fl2\", 0x02000000, dinfo ? blk_bs(blk_by_legacy_dinfo(dinfo)) : NULL, (64 * 1024), 512, 4, 0x00, 0x00, 0x00, 0x00, 0); sysbus_create_simple(\"scoop\", 0x40800000, NULL); collie_binfo.kernel_filename = kernel_filename; collie_binfo.kernel_cmdline = kernel_cmdline; collie_binfo.initrd_filename = initrd_filename; collie_binfo.board_id = 0x208; arm_load_kernel(s->cpu, &collie_binfo); }"} {"target": 0, "idx": 16536, "func": "void avg_pixels16_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h) { POWERPC_TBL_DECLARE(altivec_avg_pixels16_num, 1); #ifdef ALTIVEC_USE_REFERENCE_C_CODE int i; POWERPC_TBL_START_COUNT(altivec_avg_pixels16_num, 1); for(i=0; il)); op_avg(*((uint32_t*)(block+4)),(((const struct unaligned_32 *)(pixels+4))->l)); op_avg(*((uint32_t*)(block+8)),(((const struct unaligned_32 *)(pixels+8))->l)); op_avg(*((uint32_t*)(block+12)),(((const struct unaligned_32 *)(pixels+12))->l)); pixels+=line_size; block +=line_size; } POWERPC_TBL_STOP_COUNT(altivec_avg_pixels16_num, 1); #else /* ALTIVEC_USE_REFERENCE_C_CODE */ register vector unsigned char pixelsv1, pixelsv2, pixelsv, blockv; register vector unsigned char perm = vec_lvsl(0, pixels); int i; POWERPC_TBL_START_COUNT(altivec_avg_pixels16_num, 1); for(i=0; ifinished)) { ioreq = LIST_FIRST(&blkdev->finished); send_notify += blk_send_response_one(ioreq); ioreq_release(ioreq); } if (send_notify) xen_be_send_notify(&blkdev->xendev); }"} {"target": 0, "idx": 16566, "func": "static int interface_client_monitors_config(QXLInstance *sin, VDAgentMonitorsConfig *monitors_config) { PCIQXLDevice *qxl = container_of(sin, PCIQXLDevice, ssd.qxl); QXLRom *rom = memory_region_get_ram_ptr(&qxl->rom_bar); int i; unsigned max_outputs = ARRAY_SIZE(rom->client_monitors_config.heads); if (qxl->revision < 4) { trace_qxl_client_monitors_config_unsupported_by_device(qxl->id, qxl->revision); return 0; } /* * Older windows drivers set int_mask to 0 when their ISR is called, * then later set it to ~0. So it doesn't relate to the actual interrupts * handled. However, they are old, so clearly they don't support this * interrupt */ if (qxl->ram->int_mask == 0 || qxl->ram->int_mask == ~0 || !(qxl->ram->int_mask & QXL_INTERRUPT_CLIENT_MONITORS_CONFIG)) { trace_qxl_client_monitors_config_unsupported_by_guest(qxl->id, qxl->ram->int_mask, monitors_config); return 0; } if (!monitors_config) { return 1; } #if SPICE_SERVER_VERSION >= 0x000c06 /* release 0.12.6 */ /* limit number of outputs based on setting limit */ if (qxl->max_outputs && qxl->max_outputs <= max_outputs) { max_outputs = qxl->max_outputs; } #endif memset(&rom->client_monitors_config, 0, sizeof(rom->client_monitors_config)); rom->client_monitors_config.count = monitors_config->num_of_monitors; /* monitors_config->flags ignored */ if (rom->client_monitors_config.count >= max_outputs) { trace_qxl_client_monitors_config_capped(qxl->id, monitors_config->num_of_monitors, max_outputs); rom->client_monitors_config.count = max_outputs; } for (i = 0 ; i < rom->client_monitors_config.count ; ++i) { VDAgentMonConfig *monitor = &monitors_config->monitors[i]; QXLURect *rect = &rom->client_monitors_config.heads[i]; /* monitor->depth ignored */ rect->left = monitor->x; rect->top = monitor->y; rect->right = monitor->x + monitor->width; rect->bottom = monitor->y + monitor->height; } rom->client_monitors_config_crc = qxl_crc32( (const uint8_t *)&rom->client_monitors_config, sizeof(rom->client_monitors_config)); trace_qxl_client_monitors_config_crc(qxl->id, sizeof(rom->client_monitors_config), rom->client_monitors_config_crc); trace_qxl_interrupt_client_monitors_config(qxl->id, rom->client_monitors_config.count, rom->client_monitors_config.heads); qxl_send_events(qxl, QXL_INTERRUPT_CLIENT_MONITORS_CONFIG); return 1; }"} {"target": 0, "idx": 16585, "func": "static void virtio_pci_realize(PCIDevice *pci_dev, Error **errp) { VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev); VirtioPCIClass *k = VIRTIO_PCI_GET_CLASS(pci_dev); /* * virtio pci bar layout used by default. * subclasses can re-arrange things if needed. * * region 0 -- virtio legacy io bar * region 1 -- msi-x bar * region 4+5 -- virtio modern memory (64bit) bar * */ proxy->legacy_io_bar = 0; proxy->msix_bar = 1; proxy->modern_io_bar = 2; proxy->modern_mem_bar = 4; proxy->common.offset = 0x0; proxy->common.size = 0x1000; proxy->common.type = VIRTIO_PCI_CAP_COMMON_CFG; proxy->isr.offset = 0x1000; proxy->isr.size = 0x1000; proxy->isr.type = VIRTIO_PCI_CAP_ISR_CFG; proxy->device.offset = 0x2000; proxy->device.size = 0x1000; proxy->device.type = VIRTIO_PCI_CAP_DEVICE_CFG; proxy->notify.offset = 0x3000; proxy->notify.size = QEMU_VIRTIO_PCI_QUEUE_MEM_MULT * VIRTIO_QUEUE_MAX; proxy->notify.type = VIRTIO_PCI_CAP_NOTIFY_CFG; proxy->notify_pio.offset = 0x0; proxy->notify_pio.size = 0x4; proxy->notify_pio.type = VIRTIO_PCI_CAP_NOTIFY_CFG; /* subclasses can enforce modern, so do this unconditionally */ memory_region_init(&proxy->modern_bar, OBJECT(proxy), \"virtio-pci\", 2 * QEMU_VIRTIO_PCI_QUEUE_MEM_MULT * VIRTIO_QUEUE_MAX); memory_region_init_alias(&proxy->modern_cfg, OBJECT(proxy), \"virtio-pci-cfg\", &proxy->modern_bar, 0, memory_region_size(&proxy->modern_bar)); address_space_init(&proxy->modern_as, &proxy->modern_cfg, \"virtio-pci-cfg-as\"); if (pci_is_express(pci_dev) && pci_bus_is_express(pci_dev->bus) && !pci_bus_is_root(pci_dev->bus)) { int pos; pos = pcie_endpoint_cap_init(pci_dev, 0); assert(pos > 0); pos = pci_add_capability(pci_dev, PCI_CAP_ID_PM, 0, PCI_PM_SIZEOF); assert(pos > 0); /* * Indicates that this function complies with revision 1.2 of the * PCI Power Management Interface Specification. */ pci_set_word(pci_dev->config + pos + PCI_PM_PMC, 0x3); } else { /* * make future invocations of pci_is_express() return false * and pci_config_size() return PCI_CONFIG_SPACE_SIZE. */ pci_dev->cap_present &= ~QEMU_PCI_CAP_EXPRESS; } virtio_pci_bus_new(&proxy->bus, sizeof(proxy->bus), proxy); if (k->realize) { k->realize(proxy, errp); } }"} {"target": 0, "idx": 16602, "func": "static int cdxl_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *pkt) { CDXLVideoContext *c = avctx->priv_data; AVFrame * const p = &c->frame; int ret, w, h, encoding, format, buf_size = pkt->size; const uint8_t *buf = pkt->data; if (buf_size < 32) return AVERROR_INVALIDDATA; encoding = buf[1] & 7; format = buf[1] & 0xE0; w = AV_RB16(&buf[14]); h = AV_RB16(&buf[16]); c->bpp = buf[19]; c->palette_size = AV_RB16(&buf[20]); c->palette = buf + 32; c->video = c->palette + c->palette_size; c->video_size = buf_size - c->palette_size - 32; if (c->palette_size > 512) return AVERROR_INVALIDDATA; if (buf_size < c->palette_size + 32) return AVERROR_INVALIDDATA; if (c->bpp < 1) return AVERROR_INVALIDDATA; if (c->bpp > 8) { av_log_ask_for_sample(avctx, \"unsupported pixel size: %d\\n\", c->bpp); return AVERROR_PATCHWELCOME; } if (format) { av_log_ask_for_sample(avctx, \"unsupported pixel format: %d\\n\", format); return AVERROR_PATCHWELCOME; } if ((ret = av_image_check_size(w, h, 0, avctx)) < 0) return ret; if (w != avctx->width || h != avctx->height) avcodec_set_dimensions(avctx, w, h); if (c->video_size < FFALIGN(avctx->width, 16) * avctx->height * c->bpp / 8) return AVERROR_INVALIDDATA; if (!encoding && c->palette_size && c->bpp <= 8) { avctx->pix_fmt = PIX_FMT_PAL8; } else if (encoding == 1 && (c->bpp == 6 || c->bpp == 8)) { if (c->palette_size != (1 << (c->bpp - 1))) return AVERROR_INVALIDDATA; avctx->pix_fmt = PIX_FMT_BGR24; } else { av_log_ask_for_sample(avctx, \"unsupported encoding %d and bpp %d\\n\", encoding, c->bpp); return AVERROR_PATCHWELCOME; } if (p->data[0]) avctx->release_buffer(avctx, p); p->reference = 0; if ((ret = avctx->get_buffer(avctx, p)) < 0) { av_log(avctx, AV_LOG_ERROR, \"get_buffer() failed\\n\"); return ret; } p->pict_type = AV_PICTURE_TYPE_I; if (encoding) { av_fast_padded_malloc(&c->new_video, &c->new_video_size, h * w + FF_INPUT_BUFFER_PADDING_SIZE); if (!c->new_video) return AVERROR(ENOMEM); if (c->bpp == 8) cdxl_decode_ham8(c); else cdxl_decode_ham6(c); } else { cdxl_decode_rgb(c); } *data_size = sizeof(AVFrame); *(AVFrame*)data = c->frame; return buf_size; }"} {"target": 1, "idx": 16614, "func": "static int decode_0(PAFVideoDecContext *c, uint8_t *pkt, uint8_t code) { uint32_t opcode_size, offset; uint8_t *dst, *dend, mask = 0, color = 0; const uint8_t *src, *send, *opcodes; int i, j, op = 0; i = bytestream2_get_byte(&c->gb); if (i) { if (code & 0x10) { int align; align = bytestream2_tell(&c->gb) & 3; if (align) bytestream2_skip(&c->gb, 4 - align); } do { int page, val, x, y; val = bytestream2_get_be16(&c->gb); page = val >> 14; x = (val & 0x7F) * 2; y = ((val >> 7) & 0x7F) * 2; dst = c->frame[page] + x + y * c->width; dend = c->frame[page] + c->frame_size; offset = (x & 0x7F) * 2; j = bytestream2_get_le16(&c->gb) + offset; do { offset++; if (dst + 3 * c->width + 4 > dend) return AVERROR_INVALIDDATA; read4x4block(c, dst, c->width); if ((offset & 0x3F) == 0) dst += c->width * 3; dst += 4; } while (offset < j); } while (--i); } dst = c->frame[c->current_frame]; dend = c->frame[c->current_frame] + c->frame_size; do { set_src_position(c, &src, &send); if ((src + 3 * c->width + 4 > send) || (dst + 3 * c->width + 4 > dend)) return AVERROR_INVALIDDATA; copy_block4(dst, src, c->width, c->width, 4); i++; if ((i & 0x3F) == 0) dst += c->width * 3; dst += 4; } while (i < c->video_size / 16); opcode_size = bytestream2_get_le16(&c->gb); bytestream2_skip(&c->gb, 2); if (bytestream2_get_bytes_left(&c->gb) < opcode_size) return AVERROR_INVALIDDATA; opcodes = pkt + bytestream2_tell(&c->gb); bytestream2_skipu(&c->gb, opcode_size); dst = c->frame[c->current_frame]; for (i = 0; i < c->height; i += 4, dst += c->width * 3) for (j = 0; j < c->width; j += 4, dst += 4) { int opcode, k = 0; if (op > opcode_size) return AVERROR_INVALIDDATA; if (j & 4) { opcode = opcodes[op] & 15; op++; } else { opcode = opcodes[op] >> 4; } while (block_sequences[opcode][k]) { offset = c->width * 2; code = block_sequences[opcode][k++]; switch (code) { case 2: offset = 0; case 3: color = bytestream2_get_byte(&c->gb); case 4: mask = bytestream2_get_byte(&c->gb); copy_color_mask(dst + offset, c->width, mask, color); break; case 5: offset = 0; case 6: set_src_position(c, &src, &send); case 7: if (src + offset + c->width + 4 > send) return AVERROR_INVALIDDATA; mask = bytestream2_get_byte(&c->gb); copy_src_mask(dst + offset, c->width, mask, src + offset); break; } } } return 0; }"} {"target": 0, "idx": 16628, "func": "static void save_native_fp_fsave(CPUState *env) { int fptag, i, j; uint16_t fpuc; struct fpstate fp1, *fp = &fp1; asm volatile (\"fsave %0\" : : \"m\" (*fp)); env->fpuc = fp->fpuc; env->fpstt = (fp->fpus >> 11) & 7; env->fpus = fp->fpus & ~0x3800; fptag = fp->fptag; for(i = 0;i < 8; i++) { env->fptags[i] = ((fptag & 3) == 3); fptag >>= 2; } j = env->fpstt; for(i = 0;i < 8; i++) { memcpy(&env->fpregs[j].d, &fp->fpregs1[i * 10], 10); j = (j + 1) & 7; } /* we must restore the default rounding state */ fpuc = 0x037f | (env->fpuc & (3 << 10)); asm volatile(\"fldcw %0\" : : \"m\" (fpuc)); }"} {"target": 0, "idx": 16651, "func": "static int decode_thread(void *arg) { VideoState *is = arg; AVFormatContext *ic; int err, i, ret, video_index, audio_index, subtitle_index; AVPacket pkt1, *pkt = &pkt1; AVFormatParameters params, *ap = ¶ms; video_index = -1; audio_index = -1; subtitle_index = -1; is->video_stream = -1; is->audio_stream = -1; is->subtitle_stream = -1; global_video_state = is; url_set_interrupt_cb(decode_interrupt_cb); memset(ap, 0, sizeof(*ap)); ap->width = frame_width; ap->height= frame_height; ap->time_base= (AVRational){1, 25}; ap->pix_fmt = frame_pix_fmt; err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap); if (err < 0) { print_error(is->filename, err); ret = -1; goto fail; } is->ic = ic; if(genpts) ic->flags |= AVFMT_FLAG_GENPTS; err = av_find_stream_info(ic); if (err < 0) { fprintf(stderr, \"%s: could not find codec parameters\\n\", is->filename); ret = -1; goto fail; } if(ic->pb) ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end /* if seeking requested, we execute it */ if (start_time != AV_NOPTS_VALUE) { int64_t timestamp; timestamp = start_time; /* add the stream start time */ if (ic->start_time != AV_NOPTS_VALUE) timestamp += ic->start_time; ret = av_seek_frame(ic, -1, timestamp, AVSEEK_FLAG_BACKWARD); if (ret < 0) { fprintf(stderr, \"%s: could not seek to position %0.3f\\n\", is->filename, (double)timestamp / AV_TIME_BASE); } } for(i = 0; i < ic->nb_streams; i++) { AVCodecContext *enc = ic->streams[i]->codec; ic->streams[i]->discard = AVDISCARD_ALL; switch(enc->codec_type) { case CODEC_TYPE_AUDIO: if ((audio_index < 0 || wanted_audio_stream-- > 0) && !audio_disable) audio_index = i; break; case CODEC_TYPE_VIDEO: if ((video_index < 0 || wanted_video_stream-- > 0) && !video_disable) video_index = i; break; case CODEC_TYPE_SUBTITLE: if (wanted_subtitle_stream-- >= 0 && !video_disable) subtitle_index = i; break; default: break; } } if (show_status) { dump_format(ic, 0, is->filename, 0); dump_stream_info(ic); } /* open the streams */ if (audio_index >= 0) { stream_component_open(is, audio_index); } if (video_index >= 0) { stream_component_open(is, video_index); } else { if (!display_disable) is->show_audio = 1; } if (subtitle_index >= 0) { stream_component_open(is, subtitle_index); } if (is->video_stream < 0 && is->audio_stream < 0) { fprintf(stderr, \"%s: could not open codecs\\n\", is->filename); ret = -1; goto fail; } for(;;) { if (is->abort_request) break; if (is->paused != is->last_paused) { is->last_paused = is->paused; if (is->paused) av_read_pause(ic); else av_read_play(ic); } #if CONFIG_RTSP_DEMUXER if (is->paused && !strcmp(ic->iformat->name, \"rtsp\")) { /* wait 10 ms to avoid trying to get another packet */ /* XXX: horrible */ SDL_Delay(10); continue; } #endif if (is->seek_req) { int stream_index= -1; int64_t seek_target= is->seek_pos; if (is-> video_stream >= 0) stream_index= is-> video_stream; else if(is-> audio_stream >= 0) stream_index= is-> audio_stream; else if(is->subtitle_stream >= 0) stream_index= is->subtitle_stream; if(stream_index>=0){ seek_target= av_rescale_q(seek_target, AV_TIME_BASE_Q, ic->streams[stream_index]->time_base); } ret = av_seek_frame(is->ic, stream_index, seek_target, is->seek_flags); if (ret < 0) { fprintf(stderr, \"%s: error while seeking\\n\", is->ic->filename); }else{ if (is->audio_stream >= 0) { packet_queue_flush(&is->audioq); packet_queue_put(&is->audioq, &flush_pkt); } if (is->subtitle_stream >= 0) { packet_queue_flush(&is->subtitleq); packet_queue_put(&is->subtitleq, &flush_pkt); } if (is->video_stream >= 0) { packet_queue_flush(&is->videoq); packet_queue_put(&is->videoq, &flush_pkt); } } is->seek_req = 0; } /* if the queue are full, no need to read more */ if (is->audioq.size > MAX_AUDIOQ_SIZE || is->videoq.size > MAX_VIDEOQ_SIZE || is->subtitleq.size > MAX_SUBTITLEQ_SIZE) { /* wait 10 ms */ SDL_Delay(10); continue; } if(url_feof(ic->pb)) { av_init_packet(pkt); pkt->data=NULL; pkt->size=0; pkt->stream_index= is->video_stream; packet_queue_put(&is->videoq, pkt); continue; } ret = av_read_frame(ic, pkt); if (ret < 0) { if (ret != AVERROR_EOF && url_ferror(ic->pb) == 0) { SDL_Delay(100); /* wait for user event */ continue; } else break; } if (pkt->stream_index == is->audio_stream) { packet_queue_put(&is->audioq, pkt); } else if (pkt->stream_index == is->video_stream) { packet_queue_put(&is->videoq, pkt); } else if (pkt->stream_index == is->subtitle_stream) { packet_queue_put(&is->subtitleq, pkt); } else { av_free_packet(pkt); } } /* wait until the end */ while (!is->abort_request) { SDL_Delay(100); } ret = 0; fail: /* disable interrupting */ global_video_state = NULL; /* close each stream */ if (is->audio_stream >= 0) stream_component_close(is, is->audio_stream); if (is->video_stream >= 0) stream_component_close(is, is->video_stream); if (is->subtitle_stream >= 0) stream_component_close(is, is->subtitle_stream); if (is->ic) { av_close_input_file(is->ic); is->ic = NULL; /* safety */ } url_set_interrupt_cb(NULL); if (ret != 0) { SDL_Event event; event.type = FF_QUIT_EVENT; event.user.data1 = is; SDL_PushEvent(&event); } return 0; }"} {"target": 1, "idx": 16664, "func": "static int adpcm_decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; ADPCMDecodeContext *c = avctx->priv_data; ADPCMChannelStatus *cs; int n, m, channel, i; short *samples; const uint8_t *src; int st; /* stereo */ int count1, count2; int nb_samples, coded_samples, ret; nb_samples = get_nb_samples(avctx, buf, buf_size, &coded_samples); if (nb_samples <= 0) { av_log(avctx, AV_LOG_ERROR, \"invalid number of samples in packet\\n\"); } /* get output buffer */ c->frame.nb_samples = nb_samples; if ((ret = avctx->get_buffer(avctx, &c->frame)) < 0) { av_log(avctx, AV_LOG_ERROR, \"get_buffer() failed\\n\"); return ret; } samples = (short *)c->frame.data[0]; /* use coded_samples when applicable */ /* it is always <= nb_samples, so the output buffer will be large enough */ if (coded_samples) { if (coded_samples != nb_samples) av_log(avctx, AV_LOG_WARNING, \"mismatch in coded sample count\\n\"); c->frame.nb_samples = nb_samples = coded_samples; } src = buf; st = avctx->channels == 2 ? 1 : 0; switch(avctx->codec->id) { case CODEC_ID_ADPCM_IMA_QT: /* In QuickTime, IMA is encoded by chunks of 34 bytes (=64 samples). Channel data is interleaved per-chunk. */ for (channel = 0; channel < avctx->channels; channel++) { int16_t predictor; int step_index; cs = &(c->status[channel]); /* (pppppp) (piiiiiii) */ /* Bits 15-7 are the _top_ 9 bits of the 16-bit initial predictor value */ predictor = AV_RB16(src); step_index = predictor & 0x7F; predictor &= 0xFF80; src += 2; if (cs->step_index == step_index) { int diff = (int)predictor - cs->predictor; if (diff < 0) diff = - diff; if (diff > 0x7f) goto update; } else { update: cs->step_index = step_index; cs->predictor = predictor; } if (cs->step_index > 88){ av_log(avctx, AV_LOG_ERROR, \"ERROR: step_index = %i\\n\", cs->step_index); cs->step_index = 88; } samples = (short *)c->frame.data[0] + channel; for (m = 0; m < 32; m++) { *samples = adpcm_ima_qt_expand_nibble(cs, src[0] & 0x0F, 3); samples += avctx->channels; *samples = adpcm_ima_qt_expand_nibble(cs, src[0] >> 4 , 3); samples += avctx->channels; src ++; } } break; case CODEC_ID_ADPCM_IMA_WAV: if (avctx->block_align != 0 && buf_size > avctx->block_align) buf_size = avctx->block_align; for(i=0; ichannels; i++){ cs = &(c->status[i]); cs->predictor = *samples++ = (int16_t)bytestream_get_le16(&src); cs->step_index = *src++; if (cs->step_index > 88){ av_log(avctx, AV_LOG_ERROR, \"ERROR: step_index = %i\\n\", cs->step_index); cs->step_index = 88; } if (*src++) av_log(avctx, AV_LOG_ERROR, \"unused byte should be null but is %d!!\\n\", src[-1]); /* unused */ } for (n = (nb_samples - 1) / 8; n > 0; n--) { for (i = 0; i < avctx->channels; i++) { cs = &c->status[i]; for (m = 0; m < 4; m++) { uint8_t v = *src++; *samples = adpcm_ima_expand_nibble(cs, v & 0x0F, 3); samples += avctx->channels; *samples = adpcm_ima_expand_nibble(cs, v >> 4 , 3); samples += avctx->channels; } samples -= 8 * avctx->channels - 1; } samples += 7 * avctx->channels; } break; case CODEC_ID_ADPCM_4XM: for (i = 0; i < avctx->channels; i++) c->status[i].predictor= (int16_t)bytestream_get_le16(&src); for (i = 0; i < avctx->channels; i++) { c->status[i].step_index= (int16_t)bytestream_get_le16(&src); c->status[i].step_index = av_clip(c->status[i].step_index, 0, 88); } for (i = 0; i < avctx->channels; i++) { samples = (short *)c->frame.data[0] + i; cs = &c->status[i]; for (n = nb_samples >> 1; n > 0; n--, src++) { uint8_t v = *src; *samples = adpcm_ima_expand_nibble(cs, v & 0x0F, 4); samples += avctx->channels; *samples = adpcm_ima_expand_nibble(cs, v >> 4 , 4); samples += avctx->channels; } } break; case CODEC_ID_ADPCM_MS: { int block_predictor; if (avctx->block_align != 0 && buf_size > avctx->block_align) buf_size = avctx->block_align; block_predictor = av_clip(*src++, 0, 6); c->status[0].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor]; c->status[0].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor]; if (st) { block_predictor = av_clip(*src++, 0, 6); c->status[1].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor]; c->status[1].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor]; } c->status[0].idelta = (int16_t)bytestream_get_le16(&src); if (st){ c->status[1].idelta = (int16_t)bytestream_get_le16(&src); } c->status[0].sample1 = bytestream_get_le16(&src); if (st) c->status[1].sample1 = bytestream_get_le16(&src); c->status[0].sample2 = bytestream_get_le16(&src); if (st) c->status[1].sample2 = bytestream_get_le16(&src); *samples++ = c->status[0].sample2; if (st) *samples++ = c->status[1].sample2; *samples++ = c->status[0].sample1; if (st) *samples++ = c->status[1].sample1; for(n = (nb_samples - 2) >> (1 - st); n > 0; n--, src++) { *samples++ = adpcm_ms_expand_nibble(&c->status[0 ], src[0] >> 4 ); *samples++ = adpcm_ms_expand_nibble(&c->status[st], src[0] & 0x0F); } break; } case CODEC_ID_ADPCM_IMA_DK4: if (avctx->block_align != 0 && buf_size > avctx->block_align) buf_size = avctx->block_align; for (channel = 0; channel < avctx->channels; channel++) { cs = &c->status[channel]; cs->predictor = (int16_t)bytestream_get_le16(&src); cs->step_index = *src++; src++; *samples++ = cs->predictor; } for (n = nb_samples >> (1 - st); n > 0; n--, src++) { uint8_t v = *src; *samples++ = adpcm_ima_expand_nibble(&c->status[0 ], v >> 4 , 3); *samples++ = adpcm_ima_expand_nibble(&c->status[st], v & 0x0F, 3); } break; case CODEC_ID_ADPCM_IMA_DK3: { unsigned char last_byte = 0; unsigned char nibble; int decode_top_nibble_next = 0; int end_of_packet = 0; int diff_channel; if (avctx->block_align != 0 && buf_size > avctx->block_align) buf_size = avctx->block_align; c->status[0].predictor = (int16_t)AV_RL16(src + 10); c->status[1].predictor = (int16_t)AV_RL16(src + 12); c->status[0].step_index = src[14]; c->status[1].step_index = src[15]; /* sign extend the predictors */ src += 16; diff_channel = c->status[1].predictor; /* the DK3_GET_NEXT_NIBBLE macro issues the break statement when * the buffer is consumed */ while (1) { /* for this algorithm, c->status[0] is the sum channel and * c->status[1] is the diff channel */ /* process the first predictor of the sum channel */ DK3_GET_NEXT_NIBBLE(); adpcm_ima_expand_nibble(&c->status[0], nibble, 3); /* process the diff channel predictor */ DK3_GET_NEXT_NIBBLE(); adpcm_ima_expand_nibble(&c->status[1], nibble, 3); /* process the first pair of stereo PCM samples */ diff_channel = (diff_channel + c->status[1].predictor) / 2; *samples++ = c->status[0].predictor + c->status[1].predictor; *samples++ = c->status[0].predictor - c->status[1].predictor; /* process the second predictor of the sum channel */ DK3_GET_NEXT_NIBBLE(); adpcm_ima_expand_nibble(&c->status[0], nibble, 3); /* process the second pair of stereo PCM samples */ diff_channel = (diff_channel + c->status[1].predictor) / 2; *samples++ = c->status[0].predictor + c->status[1].predictor; *samples++ = c->status[0].predictor - c->status[1].predictor; } break; } case CODEC_ID_ADPCM_IMA_ISS: for (channel = 0; channel < avctx->channels; channel++) { cs = &c->status[channel]; cs->predictor = (int16_t)bytestream_get_le16(&src); cs->step_index = *src++; src++; } for (n = nb_samples >> (1 - st); n > 0; n--, src++) { uint8_t v1, v2; uint8_t v = *src; /* nibbles are swapped for mono */ if (st) { v1 = v >> 4; v2 = v & 0x0F; } else { v2 = v >> 4; v1 = v & 0x0F; } *samples++ = adpcm_ima_expand_nibble(&c->status[0 ], v1, 3); *samples++ = adpcm_ima_expand_nibble(&c->status[st], v2, 3); } break; case CODEC_ID_ADPCM_IMA_WS: while (src < buf + buf_size) { uint8_t v = *src++; *samples++ = adpcm_ima_expand_nibble(&c->status[0], v >> 4 , 3); *samples++ = adpcm_ima_expand_nibble(&c->status[st], v & 0x0F, 3); } break; case CODEC_ID_ADPCM_XA: while (buf_size >= 128) { xa_decode(samples, src, &c->status[0], &c->status[1], avctx->channels); src += 128; samples += 28 * 8; buf_size -= 128; } break; case CODEC_ID_ADPCM_IMA_EA_EACS: src += 4; // skip sample count (already read) for (i=0; i<=st; i++) c->status[i].step_index = bytestream_get_le32(&src); for (i=0; i<=st; i++) c->status[i].predictor = bytestream_get_le32(&src); for (n = nb_samples >> (1 - st); n > 0; n--, src++) { *samples++ = adpcm_ima_expand_nibble(&c->status[0], *src>>4, 3); *samples++ = adpcm_ima_expand_nibble(&c->status[st], *src&0x0F, 3); } break; case CODEC_ID_ADPCM_IMA_EA_SEAD: for (n = nb_samples >> (1 - st); n > 0; n--, src++) { *samples++ = adpcm_ima_expand_nibble(&c->status[0], src[0] >> 4, 6); *samples++ = adpcm_ima_expand_nibble(&c->status[st],src[0]&0x0F, 6); } break; case CODEC_ID_ADPCM_EA: { int32_t previous_left_sample, previous_right_sample; int32_t current_left_sample, current_right_sample; int32_t next_left_sample, next_right_sample; int32_t coeff1l, coeff2l, coeff1r, coeff2r; uint8_t shift_left, shift_right; /* Each EA ADPCM frame has a 12-byte header followed by 30-byte pieces, each coding 28 stereo samples. */ src += 4; // skip sample count (already read) current_left_sample = (int16_t)bytestream_get_le16(&src); previous_left_sample = (int16_t)bytestream_get_le16(&src); current_right_sample = (int16_t)bytestream_get_le16(&src); previous_right_sample = (int16_t)bytestream_get_le16(&src); for (count1 = 0; count1 < nb_samples / 28; count1++) { coeff1l = ea_adpcm_table[ *src >> 4 ]; coeff2l = ea_adpcm_table[(*src >> 4 ) + 4]; coeff1r = ea_adpcm_table[*src & 0x0F]; coeff2r = ea_adpcm_table[(*src & 0x0F) + 4]; src++; shift_left = 20 - (*src >> 4); shift_right = 20 - (*src & 0x0F); src++; for (count2 = 0; count2 < 28; count2++) { next_left_sample = sign_extend(*src >> 4, 4) << shift_left; next_right_sample = sign_extend(*src, 4) << shift_right; src++; next_left_sample = (next_left_sample + (current_left_sample * coeff1l) + (previous_left_sample * coeff2l) + 0x80) >> 8; next_right_sample = (next_right_sample + (current_right_sample * coeff1r) + (previous_right_sample * coeff2r) + 0x80) >> 8; previous_left_sample = current_left_sample; current_left_sample = av_clip_int16(next_left_sample); previous_right_sample = current_right_sample; current_right_sample = av_clip_int16(next_right_sample); *samples++ = (unsigned short)current_left_sample; *samples++ = (unsigned short)current_right_sample; } } if (src - buf == buf_size - 2) src += 2; // Skip terminating 0x0000 break; } case CODEC_ID_ADPCM_EA_MAXIS_XA: { int coeff[2][2], shift[2]; for(channel = 0; channel < avctx->channels; channel++) { for (i=0; i<2; i++) coeff[channel][i] = ea_adpcm_table[(*src >> 4) + 4*i]; shift[channel] = 20 - (*src & 0x0F); src++; } for (count1 = 0; count1 < nb_samples / 2; count1++) { for(i = 4; i >= 0; i-=4) { /* Pairwise samples LL RR (st) or LL LL (mono) */ for(channel = 0; channel < avctx->channels; channel++) { int32_t sample = sign_extend(src[channel] >> i, 4) << shift[channel]; sample = (sample + c->status[channel].sample1 * coeff[channel][0] + c->status[channel].sample2 * coeff[channel][1] + 0x80) >> 8; c->status[channel].sample2 = c->status[channel].sample1; c->status[channel].sample1 = av_clip_int16(sample); *samples++ = c->status[channel].sample1; } } src+=avctx->channels; } /* consume whole packet */ src = buf + buf_size; break; } case CODEC_ID_ADPCM_EA_R1: case CODEC_ID_ADPCM_EA_R2: case CODEC_ID_ADPCM_EA_R3: { /* channel numbering 2chan: 0=fl, 1=fr 4chan: 0=fl, 1=rl, 2=fr, 3=rr 6chan: 0=fl, 1=c, 2=fr, 3=rl, 4=rr, 5=sub */ const int big_endian = avctx->codec->id == CODEC_ID_ADPCM_EA_R3; int32_t previous_sample, current_sample, next_sample; int32_t coeff1, coeff2; uint8_t shift; unsigned int channel; uint16_t *samplesC; const uint8_t *srcC; const uint8_t *src_end = buf + buf_size; int count = 0; src += 4; // skip sample count (already read) for (channel=0; channelchannels; channel++) { int32_t offset = (big_endian ? bytestream_get_be32(&src) : bytestream_get_le32(&src)) + (avctx->channels-channel-1) * 4; if ((offset < 0) || (offset >= src_end - src - 4)) break; srcC = src + offset; samplesC = samples + channel; if (avctx->codec->id == CODEC_ID_ADPCM_EA_R1) { current_sample = (int16_t)bytestream_get_le16(&srcC); previous_sample = (int16_t)bytestream_get_le16(&srcC); } else { current_sample = c->status[channel].predictor; previous_sample = c->status[channel].prev_sample; } for (count1 = 0; count1 < nb_samples / 28; count1++) { if (*srcC == 0xEE) { /* only seen in R2 and R3 */ srcC++; if (srcC > src_end - 30*2) break; current_sample = (int16_t)bytestream_get_be16(&srcC); previous_sample = (int16_t)bytestream_get_be16(&srcC); for (count2=0; count2<28; count2++) { *samplesC = (int16_t)bytestream_get_be16(&srcC); samplesC += avctx->channels; } } else { coeff1 = ea_adpcm_table[ *srcC>>4 ]; coeff2 = ea_adpcm_table[(*srcC>>4) + 4]; shift = 20 - (*srcC++ & 0x0F); if (srcC > src_end - 14) break; for (count2=0; count2<28; count2++) { if (count2 & 1) next_sample = sign_extend(*srcC++, 4) << shift; else next_sample = sign_extend(*srcC >> 4, 4) << shift; next_sample += (current_sample * coeff1) + (previous_sample * coeff2); next_sample = av_clip_int16(next_sample >> 8); previous_sample = current_sample; current_sample = next_sample; *samplesC = current_sample; samplesC += avctx->channels; } } } if (!count) { count = count1; } else if (count != count1) { av_log(avctx, AV_LOG_WARNING, \"per-channel sample count mismatch\\n\"); count = FFMAX(count, count1); } if (avctx->codec->id != CODEC_ID_ADPCM_EA_R1) { c->status[channel].predictor = current_sample; c->status[channel].prev_sample = previous_sample; } } c->frame.nb_samples = count * 28; src = src_end; break; } case CODEC_ID_ADPCM_EA_XAS: for (channel=0; channelchannels; channel++) { int coeff[2][4], shift[4]; short *s2, *s = &samples[channel]; for (n=0; n<4; n++, s+=32*avctx->channels) { for (i=0; i<2; i++) coeff[i][n] = ea_adpcm_table[(src[0]&0x0F)+4*i]; shift[n] = 20 - (src[2] & 0x0F); for (s2=s, i=0; i<2; i++, src+=2, s2+=avctx->channels) s2[0] = (src[0]&0xF0) + (src[1]<<8); } for (m=2; m<32; m+=2) { s = &samples[m*avctx->channels + channel]; for (n=0; n<4; n++, src++, s+=32*avctx->channels) { for (s2=s, i=0; i<8; i+=4, s2+=avctx->channels) { int level = sign_extend(*src >> (4 - i), 4) << shift[n]; int pred = s2[-1*avctx->channels] * coeff[0][n] + s2[-2*avctx->channels] * coeff[1][n]; s2[0] = av_clip_int16((level + pred + 0x80) >> 8); } } } } break; case CODEC_ID_ADPCM_IMA_AMV: case CODEC_ID_ADPCM_IMA_SMJPEG: c->status[0].predictor = (int16_t)bytestream_get_le16(&src); c->status[0].step_index = bytestream_get_le16(&src); if (avctx->codec->id == CODEC_ID_ADPCM_IMA_AMV) src+=4; for (n = nb_samples >> (1 - st); n > 0; n--, src++) { char hi, lo; lo = *src & 0x0F; hi = *src >> 4; if (avctx->codec->id == CODEC_ID_ADPCM_IMA_AMV) FFSWAP(char, hi, lo); *samples++ = adpcm_ima_expand_nibble(&c->status[0], lo, 3); *samples++ = adpcm_ima_expand_nibble(&c->status[0], hi, 3); } break; case CODEC_ID_ADPCM_CT: for (n = nb_samples >> (1 - st); n > 0; n--, src++) { uint8_t v = *src; *samples++ = adpcm_ct_expand_nibble(&c->status[0 ], v >> 4 ); *samples++ = adpcm_ct_expand_nibble(&c->status[st], v & 0x0F); } break; case CODEC_ID_ADPCM_SBPRO_4: case CODEC_ID_ADPCM_SBPRO_3: case CODEC_ID_ADPCM_SBPRO_2: if (!c->status[0].step_index) { /* the first byte is a raw sample */ *samples++ = 128 * (*src++ - 0x80); if (st) *samples++ = 128 * (*src++ - 0x80); c->status[0].step_index = 1; nb_samples--; } if (avctx->codec->id == CODEC_ID_ADPCM_SBPRO_4) { for (n = nb_samples >> (1 - st); n > 0; n--, src++) { *samples++ = adpcm_sbpro_expand_nibble(&c->status[0], src[0] >> 4, 4, 0); *samples++ = adpcm_sbpro_expand_nibble(&c->status[st], src[0] & 0x0F, 4, 0); } } else if (avctx->codec->id == CODEC_ID_ADPCM_SBPRO_3) { for (n = nb_samples / 3; n > 0; n--, src++) { *samples++ = adpcm_sbpro_expand_nibble(&c->status[0], src[0] >> 5 , 3, 0); *samples++ = adpcm_sbpro_expand_nibble(&c->status[0], (src[0] >> 2) & 0x07, 3, 0); *samples++ = adpcm_sbpro_expand_nibble(&c->status[0], src[0] & 0x03, 2, 0); } } else { for (n = nb_samples >> (2 - st); n > 0; n--, src++) { *samples++ = adpcm_sbpro_expand_nibble(&c->status[0], src[0] >> 6 , 2, 2); *samples++ = adpcm_sbpro_expand_nibble(&c->status[st], (src[0] >> 4) & 0x03, 2, 2); *samples++ = adpcm_sbpro_expand_nibble(&c->status[0], (src[0] >> 2) & 0x03, 2, 2); *samples++ = adpcm_sbpro_expand_nibble(&c->status[st], src[0] & 0x03, 2, 2); } } break; case CODEC_ID_ADPCM_SWF: { GetBitContext gb; const int *table; int k0, signmask, nb_bits, count; int size = buf_size*8; init_get_bits(&gb, buf, size); //read bits & initial values nb_bits = get_bits(&gb, 2)+2; //av_log(NULL,AV_LOG_INFO,\"nb_bits: %d\\n\", nb_bits); table = swf_index_tables[nb_bits-2]; k0 = 1 << (nb_bits-2); signmask = 1 << (nb_bits-1); while (get_bits_count(&gb) <= size - 22*avctx->channels) { for (i = 0; i < avctx->channels; i++) { *samples++ = c->status[i].predictor = get_sbits(&gb, 16); c->status[i].step_index = get_bits(&gb, 6); } for (count = 0; get_bits_count(&gb) <= size - nb_bits*avctx->channels && count < 4095; count++) { int i; for (i = 0; i < avctx->channels; i++) { // similar to IMA adpcm int delta = get_bits(&gb, nb_bits); int step = ff_adpcm_step_table[c->status[i].step_index]; long vpdiff = 0; // vpdiff = (delta+0.5)*step/4 int k = k0; do { if (delta & k) vpdiff += step; step >>= 1; k >>= 1; } while(k); vpdiff += step; if (delta & signmask) c->status[i].predictor -= vpdiff; else c->status[i].predictor += vpdiff; c->status[i].step_index += table[delta & (~signmask)]; c->status[i].step_index = av_clip(c->status[i].step_index, 0, 88); c->status[i].predictor = av_clip_int16(c->status[i].predictor); *samples++ = c->status[i].predictor; } } } src += buf_size; break; } case CODEC_ID_ADPCM_YAMAHA: for (n = nb_samples >> (1 - st); n > 0; n--, src++) { uint8_t v = *src; *samples++ = adpcm_yamaha_expand_nibble(&c->status[0 ], v & 0x0F); *samples++ = adpcm_yamaha_expand_nibble(&c->status[st], v >> 4 ); } break; case CODEC_ID_ADPCM_THP: { int table[2][16]; int prev[2][2]; int ch; src += 4; // skip channel size src += 4; // skip number of samples (already read) for (i = 0; i < 32; i++) table[0][i] = (int16_t)bytestream_get_be16(&src); /* Initialize the previous sample. */ for (i = 0; i < 4; i++) prev[0][i] = (int16_t)bytestream_get_be16(&src); for (ch = 0; ch <= st; ch++) { samples = (short *)c->frame.data[0] + ch; /* Read in every sample for this channel. */ for (i = 0; i < nb_samples / 14; i++) { int index = (*src >> 4) & 7; unsigned int exp = *src++ & 15; int factor1 = table[ch][index * 2]; int factor2 = table[ch][index * 2 + 1]; /* Decode 14 samples. */ for (n = 0; n < 14; n++) { int32_t sampledat; if(n&1) sampledat = sign_extend(*src++, 4); else sampledat = sign_extend(*src >> 4, 4); sampledat = ((prev[ch][0]*factor1 + prev[ch][1]*factor2) >> 11) + (sampledat << exp); *samples = av_clip_int16(sampledat); prev[ch][1] = prev[ch][0]; prev[ch][0] = *samples++; /* In case of stereo, skip one sample, this sample is for the other channel. */ samples += st; } } } break; } default: return -1; } *got_frame_ptr = 1; *(AVFrame *)data = c->frame; return src - buf; }"} {"target": 1, "idx": 16666, "func": "static void cpu_ioreq_pio(ioreq_t *req) { int i, sign; sign = req->df ? -1 : 1; if (req->dir == IOREQ_READ) { if (!req->data_is_ptr) { req->data = do_inp(req->addr, req->size); } else { uint32_t tmp; for (i = 0; i < req->count; i++) { tmp = do_inp(req->addr, req->size); cpu_physical_memory_write( req->data + (sign * i * (int64_t)req->size), (uint8_t *) &tmp, req->size); } } } else if (req->dir == IOREQ_WRITE) { if (!req->data_is_ptr) { do_outp(req->addr, req->size, req->data); } else { for (i = 0; i < req->count; i++) { uint32_t tmp = 0; cpu_physical_memory_read( req->data + (sign * i * (int64_t)req->size), (uint8_t*) &tmp, req->size); do_outp(req->addr, req->size, tmp); } } } }"} {"target": 1, "idx": 16693, "func": "static char *choose_pixel_fmts(OutputStream *ost) { if (ost->keep_pix_fmt) { if (ost->filter) avfilter_graph_set_auto_convert(ost->filter->graph->graph, AVFILTER_AUTO_CONVERT_NONE); if (ost->st->codec->pix_fmt == PIX_FMT_NONE) return NULL; ost->pix_fmts[0] = ost->st->codec->pix_fmt; return ost->pix_fmts; } if (ost->st->codec->pix_fmt != PIX_FMT_NONE) { return av_strdup(av_get_pix_fmt_name(choose_pixel_fmt(ost->st, ost->enc, ost->st->codec->pix_fmt))); } else if (ost->enc->pix_fmts) { const enum PixelFormat *p; AVIOContext *s = NULL; uint8_t *ret; int len; if (avio_open_dyn_buf(&s) < 0) exit_program(1); p = ost->enc->pix_fmts; if (ost->st->codec->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL) { if (ost->st->codec->codec_id == CODEC_ID_MJPEG) { p = (const enum PixelFormat[]) { PIX_FMT_YUVJ420P, PIX_FMT_YUVJ422P, PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_NONE }; } else if (ost->st->codec->codec_id == CODEC_ID_LJPEG) { p = (const enum PixelFormat[]) { PIX_FMT_YUVJ420P, PIX_FMT_YUVJ422P, PIX_FMT_YUVJ444P, PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_YUV444P, PIX_FMT_BGRA, PIX_FMT_NONE }; } } for (; *p != PIX_FMT_NONE; p++) avio_printf(s, \"%s:\", av_get_pix_fmt_name(*p)); len = avio_close_dyn_buf(s, &ret); ret[len - 1] = 0; return ret; } else return NULL; }"} {"target": 1, "idx": 16700, "func": "static void mov_metadata_creation_time(AVMetadata **metadata, time_t time) { char buffer[32]; if (time) { time -= 2082844800; /* seconds between 1904-01-01 and Epoch */ strftime(buffer, sizeof(buffer), \"%Y-%m-%d %H:%M:%S\", gmtime(&time)); av_metadata_set2(metadata, \"creation_time\", buffer, 0); } }"} {"target": 1, "idx": 16708, "func": "static uint32_t hpet_ram_readl(void *opaque, target_phys_addr_t addr) { HPETState *s = (HPETState *)opaque; uint64_t cur_tick, index; DPRINTF(\"qemu: Enter hpet_ram_readl at %\" PRIx64 \"\\n\", addr); index = addr; /*address range of all TN regs*/ if (index >= 0x100 && index <= 0x3ff) { uint8_t timer_id = (addr - 0x100) / 0x20; if (timer_id > HPET_NUM_TIMERS - 1) { printf(\"qemu: timer id out of range\\n\"); return 0; } HPETTimer *timer = &s->timer[timer_id]; switch ((addr - 0x100) % 0x20) { case HPET_TN_CFG: return timer->config; case HPET_TN_CFG + 4: // Interrupt capabilities return timer->config >> 32; case HPET_TN_CMP: // comparator register return timer->cmp; case HPET_TN_CMP + 4: return timer->cmp >> 32; case HPET_TN_ROUTE: return timer->fsb >> 32; default: DPRINTF(\"qemu: invalid hpet_ram_readl\\n\"); break; } } else { switch (index) { case HPET_ID: return s->capability; case HPET_PERIOD: return s->capability >> 32; case HPET_CFG: return s->config; case HPET_CFG + 4: DPRINTF(\"qemu: invalid HPET_CFG + 4 hpet_ram_readl \\n\"); return 0; case HPET_COUNTER: if (hpet_enabled()) cur_tick = hpet_get_ticks(); else cur_tick = s->hpet_counter; DPRINTF(\"qemu: reading counter = %\" PRIx64 \"\\n\", cur_tick); return cur_tick; case HPET_COUNTER + 4: if (hpet_enabled()) cur_tick = hpet_get_ticks(); else cur_tick = s->hpet_counter; DPRINTF(\"qemu: reading counter + 4 = %\" PRIx64 \"\\n\", cur_tick); return cur_tick >> 32; case HPET_STATUS: return s->isr; default: DPRINTF(\"qemu: invalid hpet_ram_readl\\n\"); break; } } return 0; }"} {"target": 1, "idx": 16717, "func": "void usb_claim_port(USBDevice *dev, Error **errp) { USBBus *bus = usb_bus_from_device(dev); USBPort *port; assert(dev->port == NULL); if (dev->port_path) { QTAILQ_FOREACH(port, &bus->free, next) { if (strcmp(port->path, dev->port_path) == 0) { break; } } if (port == NULL) { error_setg(errp, \"usb port %s (bus %s) not found (in use?)\", dev->port_path, bus->qbus.name); return; } } else { if (bus->nfree == 1 && strcmp(object_get_typename(OBJECT(dev)), \"usb-hub\") != 0) { /* Create a new hub and chain it on */ usb_create_simple(bus, \"usb-hub\"); } if (bus->nfree == 0) { error_setg(errp, \"tried to attach usb device %s to a bus \" \"with no free ports\", dev->product_desc); return; } port = QTAILQ_FIRST(&bus->free); } trace_usb_port_claim(bus->busnr, port->path); QTAILQ_REMOVE(&bus->free, port, next); bus->nfree--; dev->port = port; port->dev = dev; QTAILQ_INSERT_TAIL(&bus->used, port, next); bus->nused++; }"} {"target": 0, "idx": 16723, "func": "static void vmxnet3_update_features(VMXNET3State *s) { uint32_t guest_features; int rxcso_supported; guest_features = VMXNET3_READ_DRV_SHARED32(s->drv_shmem, devRead.misc.uptFeatures); rxcso_supported = VMXNET_FLAG_IS_SET(guest_features, UPT1_F_RXCSUM); s->rx_vlan_stripping = VMXNET_FLAG_IS_SET(guest_features, UPT1_F_RXVLAN); s->lro_supported = VMXNET_FLAG_IS_SET(guest_features, UPT1_F_LRO); VMW_CFPRN(\"Features configuration: LRO: %d, RXCSUM: %d, VLANSTRIP: %d\", s->lro_supported, rxcso_supported, s->rx_vlan_stripping); if (s->peer_has_vhdr) { qemu_peer_set_offload(qemu_get_queue(s->nic), rxcso_supported, s->lro_supported, s->lro_supported, 0, 0); } }"} {"target": 0, "idx": 16724, "func": "static int virtio_blk_exit_pci(PCIDevice *pci_dev) { VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev); blockdev_mark_auto_del(proxy->block.dinfo->bdrv); return virtio_exit_pci(pci_dev); }"} {"target": 0, "idx": 16727, "func": "yuv2rgba64_full_1_c_template(SwsContext *c, const int32_t *buf0, const int32_t *ubuf[2], const int32_t *vbuf[2], const int32_t *abuf0, uint16_t *dest, int dstW, int uvalpha, int y, enum AVPixelFormat target, int hasAlpha, int eightbytes) { const int32_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0]; int i; int A = 0xffff<<14; if (uvalpha < 2048) { for (i = 0; i < dstW; i++) { int Y = (buf0[i]) >> 2; int U = (ubuf0[i] + (-128 << 11)) >> 2; int V = (vbuf0[i] + (-128 << 11)) >> 2; int R, G, B; Y -= c->yuv2rgb_y_offset; Y *= c->yuv2rgb_y_coeff; Y += 1 << 13; if (hasAlpha) { A = abuf0[i] << 11; A += 1 << 13; } R = V * c->yuv2rgb_v2r_coeff; G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff; B = U * c->yuv2rgb_u2b_coeff; output_pixel(&dest[0], av_clip_uintp2(R_B + Y, 30) >> 14); output_pixel(&dest[1], av_clip_uintp2( G + Y, 30) >> 14); output_pixel(&dest[2], av_clip_uintp2(B_R + Y, 30) >> 14); if (eightbytes) { output_pixel(&dest[3], av_clip_uintp2(A, 30) >> 14); dest += 4; } else { dest += 3; } } } else { const int32_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1]; int A = 0xffff<<14; for (i = 0; i < dstW; i++) { int Y = (buf0[i] ) >> 2; int U = (ubuf0[i] + ubuf1[i] + (-128 << 12)) >> 3; int V = (vbuf0[i] + vbuf1[i] + (-128 << 12)) >> 3; int R, G, B; Y -= c->yuv2rgb_y_offset; Y *= c->yuv2rgb_y_coeff; Y += 1 << 13; if (hasAlpha) { A = abuf0[i] << 11; A += 1 << 13; } R = V * c->yuv2rgb_v2r_coeff; G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff; B = U * c->yuv2rgb_u2b_coeff; output_pixel(&dest[0], av_clip_uintp2(R_B + Y, 30) >> 14); output_pixel(&dest[1], av_clip_uintp2( G + Y, 30) >> 14); output_pixel(&dest[2], av_clip_uintp2(B_R + Y, 30) >> 14); if (eightbytes) { output_pixel(&dest[3], av_clip_uintp2(A, 30) >> 14); dest += 4; } else { dest += 3; } } } }"} {"target": 0, "idx": 16728, "func": "static void uhci_async_complete(USBPacket *packet, void *opaque) { UHCIState *s = opaque; UHCIAsync *async = (UHCIAsync *) packet; DPRINTF(\"uhci: async complete. td 0x%x token 0x%x\\n\", async->td, async->token); async->done = 1; uhci_process_frame(s); }"} {"target": 0, "idx": 16735, "func": "static void gen_neon_unzip(int reg, int q, int tmp, int size) { int n; TCGv t0, t1; for (n = 0; n < q + 1; n += 2) { t0 = neon_load_reg(reg, n); t1 = neon_load_reg(reg, n + 1); switch (size) { case 0: gen_neon_unzip_u8(t0, t1); break; case 1: gen_neon_zip_u16(t0, t1); break; /* zip and unzip are the same. */ case 2: /* no-op */; break; default: abort(); } neon_store_scratch(tmp + n, t0); neon_store_scratch(tmp + n + 1, t1); } }"} {"target": 0, "idx": 16752, "func": "static void parse_numa_distance(NumaDistOptions *dist, Error **errp) { uint16_t src = dist->src; uint16_t dst = dist->dst; uint8_t val = dist->val; if (src >= MAX_NODES || dst >= MAX_NODES) { error_setg(errp, \"Invalid node %\" PRIu16 \", max possible could be %\" PRIu16, MAX(src, dst), MAX_NODES); return; } if (!numa_info[src].present || !numa_info[dst].present) { error_setg(errp, \"Source/Destination NUMA node is missing. \" \"Please use '-numa node' option to declare it first.\"); return; } if (val < NUMA_DISTANCE_MIN) { error_setg(errp, \"NUMA distance (%\" PRIu8 \") is invalid, \" \"it shouldn't be less than %d.\", val, NUMA_DISTANCE_MIN); return; } if (src == dst && val != NUMA_DISTANCE_MIN) { error_setg(errp, \"Local distance of node %d should be %d.\", src, NUMA_DISTANCE_MIN); return; } numa_info[src].distance[dst] = val; have_numa_distance = true; }"} {"target": 1, "idx": 16755, "func": "static int parse_palette(AVCodecContext *avctx, GetByteContext *gbc, uint32_t *pal, int colors) { int i; for (i = 0; i <= colors; i++) { uint8_t r, g, b; unsigned int idx = bytestream2_get_be16(gbc); /* color index */ if (idx > 255) { av_log(avctx, AV_LOG_WARNING, \"Palette index out of range: %u\\n\", idx); bytestream2_skip(gbc, 6); continue; } r = bytestream2_get_byte(gbc); bytestream2_skip(gbc, 1); g = bytestream2_get_byte(gbc); bytestream2_skip(gbc, 1); b = bytestream2_get_byte(gbc); bytestream2_skip(gbc, 1); pal[idx] = (r << 16) | (g << 8) | b; } return 0; }"} {"target": 1, "idx": 16767, "func": "static int compare_int64(const void *a, const void *b) { int64_t va = *(int64_t *)a, vb = *(int64_t *)b; return va < vb ? -1 : va > vb ? +1 : 0; }"} {"target": 1, "idx": 16771, "func": "uint32_t kvm_arch_get_supported_cpuid(CPUState *env, uint32_t function, uint32_t index, int reg) { struct kvm_cpuid2 *cpuid; int i, max; uint32_t ret = 0; uint32_t cpuid_1_edx; if (!kvm_check_extension(env->kvm_state, KVM_CAP_EXT_CPUID)) { return -1U; } max = 1; while ((cpuid = try_get_cpuid(env->kvm_state, max)) == NULL) { max *= 2; } for (i = 0; i < cpuid->nent; ++i) { if (cpuid->entries[i].function == function && cpuid->entries[i].index == index) { switch (reg) { case R_EAX: ret = cpuid->entries[i].eax; break; case R_EBX: ret = cpuid->entries[i].ebx; break; case R_ECX: ret = cpuid->entries[i].ecx; break; case R_EDX: ret = cpuid->entries[i].edx; switch (function) { case 1: /* KVM before 2.6.30 misreports the following features */ ret |= CPUID_MTRR | CPUID_PAT | CPUID_MCE | CPUID_MCA; break; case 0x80000001: /* On Intel, kvm returns cpuid according to the Intel spec, * so add missing bits according to the AMD spec: */ cpuid_1_edx = kvm_arch_get_supported_cpuid(env, 1, 0, R_EDX); ret |= cpuid_1_edx & 0x183f7ff; break; } break; } } } qemu_free(cpuid); return ret; }"} {"target": 0, "idx": 16780, "func": "static bool get_next_page(GuestPhysBlock **blockptr, uint64_t *pfnptr, uint8_t **bufptr, DumpState *s) { GuestPhysBlock *block = *blockptr; hwaddr addr; uint8_t *buf; /* block == NULL means the start of the iteration */ if (!block) { block = QTAILQ_FIRST(&s->guest_phys_blocks.head); *blockptr = block; assert(block->target_start % s->page_size == 0); assert(block->target_end % s->page_size == 0); *pfnptr = paddr_to_pfn(block->target_start); if (bufptr) { *bufptr = block->host_addr; } return true; } *pfnptr = *pfnptr + 1; addr = pfn_to_paddr(*pfnptr); if ((addr >= block->target_start) && (addr + s->page_size <= block->target_end)) { buf = block->host_addr + (addr - block->target_start); } else { /* the next page is in the next block */ block = QTAILQ_NEXT(block, next); *blockptr = block; if (!block) { return false; } assert(block->target_start % s->page_size == 0); assert(block->target_end % s->page_size == 0); *pfnptr = paddr_to_pfn(block->target_start); buf = block->host_addr; } if (bufptr) { *bufptr = buf; } return true; }"} {"target": 0, "idx": 16783, "func": "static coroutine_fn int qcow2_co_writev(BlockDriverState *bs, int64_t sector_num, int remaining_sectors, QEMUIOVector *qiov) { BDRVQcowState *s = bs->opaque; int index_in_cluster; int n_end; int ret; int cur_nr_sectors; /* number of sectors in current iteration */ uint64_t cluster_offset; QEMUIOVector hd_qiov; uint64_t bytes_done = 0; uint8_t *cluster_data = NULL; QCowL2Meta *l2meta; trace_qcow2_writev_start_req(qemu_coroutine_self(), sector_num, remaining_sectors); qemu_iovec_init(&hd_qiov, qiov->niov); s->cluster_cache_offset = -1; /* disable compressed cache */ qemu_co_mutex_lock(&s->lock); while (remaining_sectors != 0) { l2meta = NULL; trace_qcow2_writev_start_part(qemu_coroutine_self()); index_in_cluster = sector_num & (s->cluster_sectors - 1); n_end = index_in_cluster + remaining_sectors; if (s->crypt_method && n_end > QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors) { n_end = QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors; } ret = qcow2_alloc_cluster_offset(bs, sector_num << 9, index_in_cluster, n_end, &cur_nr_sectors, &cluster_offset, &l2meta); if (ret < 0) { goto fail; } if (l2meta->nb_clusters > 0 && (s->compatible_features & QCOW2_COMPAT_LAZY_REFCOUNTS)) { qcow2_mark_dirty(bs); } assert((cluster_offset & 511) == 0); qemu_iovec_reset(&hd_qiov); qemu_iovec_concat(&hd_qiov, qiov, bytes_done, cur_nr_sectors * 512); if (s->crypt_method) { if (!cluster_data) { cluster_data = qemu_blockalign(bs, QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size); } assert(hd_qiov.size <= QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size); qemu_iovec_to_buf(&hd_qiov, 0, cluster_data, hd_qiov.size); qcow2_encrypt_sectors(s, sector_num, cluster_data, cluster_data, cur_nr_sectors, 1, &s->aes_encrypt_key); qemu_iovec_reset(&hd_qiov); qemu_iovec_add(&hd_qiov, cluster_data, cur_nr_sectors * 512); } qemu_co_mutex_unlock(&s->lock); BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO); trace_qcow2_writev_data(qemu_coroutine_self(), (cluster_offset >> 9) + index_in_cluster); ret = bdrv_co_writev(bs->file, (cluster_offset >> 9) + index_in_cluster, cur_nr_sectors, &hd_qiov); qemu_co_mutex_lock(&s->lock); if (ret < 0) { goto fail; } if (l2meta != NULL) { ret = qcow2_alloc_cluster_link_l2(bs, l2meta); if (ret < 0) { goto fail; } run_dependent_requests(s, l2meta); g_free(l2meta); l2meta = NULL; } remaining_sectors -= cur_nr_sectors; sector_num += cur_nr_sectors; bytes_done += cur_nr_sectors * 512; trace_qcow2_writev_done_part(qemu_coroutine_self(), cur_nr_sectors); } ret = 0; fail: if (l2meta != NULL) { run_dependent_requests(s, l2meta); g_free(l2meta); } qemu_co_mutex_unlock(&s->lock); qemu_iovec_destroy(&hd_qiov); qemu_vfree(cluster_data); trace_qcow2_writev_done_req(qemu_coroutine_self(), ret); return ret; }"} {"target": 0, "idx": 16787, "func": "int usb_desc_get_descriptor(USBDevice *dev, int value, uint8_t *dest, size_t len) { const USBDesc *desc = dev->info->usb_desc; uint8_t buf[256]; uint8_t type = value >> 8; uint8_t index = value & 0xff; int ret = -1; switch(type) { case USB_DT_DEVICE: ret = usb_desc_device(&desc->id, desc->full, buf, sizeof(buf)); trace_usb_desc_device(dev->addr, len, ret); break; case USB_DT_CONFIG: if (index < desc->full->bNumConfigurations) { ret = usb_desc_config(desc->full->confs + index, buf, sizeof(buf)); } trace_usb_desc_config(dev->addr, index, len, ret); break; case USB_DT_STRING: ret = usb_desc_string(dev, index, buf, sizeof(buf)); trace_usb_desc_string(dev->addr, index, len, ret); break; default: fprintf(stderr, \"%s: %d unknown type %d (len %zd)\\n\", __FUNCTION__, dev->addr, type, len); break; } if (ret > 0) { if (ret > len) { ret = len; } memcpy(dest, buf, ret); } return ret; }"} {"target": 0, "idx": 16792, "func": "static void connex_init(MachineState *machine) { PXA2xxState *cpu; DriveInfo *dinfo; int be; MemoryRegion *address_space_mem = get_system_memory(); uint32_t connex_rom = 0x01000000; uint32_t connex_ram = 0x04000000; cpu = pxa255_init(address_space_mem, connex_ram); dinfo = drive_get(IF_PFLASH, 0, 0); if (!dinfo && !qtest_enabled()) { fprintf(stderr, \"A flash image must be given with the \" \"'pflash' parameter\\n\"); exit(1); } #ifdef TARGET_WORDS_BIGENDIAN be = 1; #else be = 0; #endif if (!pflash_cfi01_register(0x00000000, NULL, \"connext.rom\", connex_rom, dinfo ? blk_bs(blk_by_legacy_dinfo(dinfo)) : NULL, sector_len, connex_rom / sector_len, 2, 0, 0, 0, 0, be)) { fprintf(stderr, \"qemu: Error registering flash memory.\\n\"); exit(1); } /* Interrupt line of NIC is connected to GPIO line 36 */ smc91c111_init(&nd_table[0], 0x04000300, qdev_get_gpio_in(cpu->gpio, 36)); }"} {"target": 0, "idx": 16794, "func": "void aio_set_event_notifier(AioContext *ctx, EventNotifier *notifier, bool is_external, EventNotifierHandler *io_read, AioPollFn *io_poll) { aio_set_fd_handler(ctx, event_notifier_get_fd(notifier), is_external, (IOHandler *)io_read, NULL, io_poll, notifier); }"} {"target": 0, "idx": 16795, "func": "void av_xtea_crypt(AVXTEA *ctx, uint8_t *dst, const uint8_t *src, int count, uint8_t *iv, int decrypt) { int i; while (count > 0) { if (decrypt) { xtea_crypt_ecb(ctx, dst, src, decrypt); if (iv) { for (i = 0; i < 8; i++) dst[i] = dst[i] ^ iv[i]; memcpy(iv, src, 8); } } else { if (iv) { for (i = 0; i < 8; i++) dst[i] = src[i] ^ iv[i]; xtea_crypt_ecb(ctx, dst, dst, decrypt); memcpy(iv, dst, 8); } else { xtea_crypt_ecb(ctx, dst, src, decrypt); } } src += 8; dst += 8; count -= 8; } }"} {"target": 0, "idx": 16808, "func": "static void read_chapter(AVFormatContext *s, AVIOContext *pb, int len, char *ttag, ID3v2ExtraMeta **extra_meta) { AVRational time_base = {1, 1000}; uint32_t start, end; AVChapter *chapter; uint8_t *dst = NULL; int taglen; char tag[5]; decode_str(s, pb, 0, &dst, &len); if (len < 16) return; start = avio_rb32(pb); end = avio_rb32(pb); avio_skip(pb, 8); chapter = avpriv_new_chapter(s, s->nb_chapters + 1, time_base, start, end, dst); if (!chapter) { av_free(dst); return; } len -= 16; while (len > 10) { avio_read(pb, tag, 4); tag[4] = 0; taglen = avio_rb32(pb); avio_skip(pb, 2); len -= 10; if (taglen < 0 || taglen > len) { av_free(dst); return; } if (tag[0] == 'T') read_ttag(s, pb, taglen, &chapter->metadata, tag); else avio_skip(pb, taglen); len -= taglen; } ff_metadata_conv(&chapter->metadata, NULL, ff_id3v2_34_metadata_conv); ff_metadata_conv(&chapter->metadata, NULL, ff_id3v2_4_metadata_conv); av_free(dst); }"} {"target": 0, "idx": 16812, "func": "void kqemu_modify_page(CPUState *env, ram_addr_t ram_addr) { unsigned long page_index; int ret; #ifdef _WIN32 DWORD temp; #endif page_index = ram_addr >> TARGET_PAGE_BITS; if (!modified_ram_pages_table[page_index]) { #if 0 printf(\"%d: modify_page=%08lx\\n\", nb_modified_ram_pages, ram_addr); #endif modified_ram_pages_table[page_index] = 1; modified_ram_pages[nb_modified_ram_pages++] = ram_addr; if (nb_modified_ram_pages >= KQEMU_MAX_MODIFIED_RAM_PAGES) { /* flush */ #ifdef _WIN32 ret = DeviceIoControl(kqemu_fd, KQEMU_MODIFY_RAM_PAGES, &nb_modified_ram_pages, sizeof(nb_modified_ram_pages), NULL, 0, &temp, NULL); #else ret = ioctl(kqemu_fd, KQEMU_MODIFY_RAM_PAGES, &nb_modified_ram_pages); #endif kqemu_reset_modified_ram_pages(); } } }"} {"target": 0, "idx": 16829, "func": "static bool aio_epoll_check_poll(AioContext *ctx, GPollFD *pfds, unsigned npfd, int64_t timeout) { if (!ctx->epoll_available) { return false; } if (aio_epoll_enabled(ctx)) { return true; } if (npfd >= EPOLL_ENABLE_THRESHOLD) { if (aio_epoll_try_enable(ctx)) { return true; } else { aio_epoll_disable(ctx); } } return false; }"} {"target": 0, "idx": 16842, "func": "static void aarch64_cpu_register_types(void) { int i; type_register_static(&aarch64_cpu_type_info); for (i = 0; i < ARRAY_SIZE(aarch64_cpus); i++) { aarch64_cpu_register(&aarch64_cpus[i]); } }"} {"target": 0, "idx": 16862, "func": "static void musicpal_lcd_write(void *opaque, target_phys_addr_t offset, uint64_t value, unsigned size) { musicpal_lcd_state *s = opaque; switch (offset) { case MP_LCD_IRQCTRL: s->irqctrl = value; break; case MP_LCD_SPICTRL: if (value == MP_LCD_SPI_DATA || value == MP_LCD_SPI_CMD) { s->mode = value; } else { s->mode = MP_LCD_SPI_INVALID; } break; case MP_LCD_INST: if (value >= MP_LCD_INST_SETPAGE0 && value <= MP_LCD_INST_SETPAGE7) { s->page = value - MP_LCD_INST_SETPAGE0; s->page_off = 0; } break; case MP_LCD_DATA: if (s->mode == MP_LCD_SPI_CMD) { if (value >= MP_LCD_INST_SETPAGE0 && value <= MP_LCD_INST_SETPAGE7) { s->page = value - MP_LCD_INST_SETPAGE0; s->page_off = 0; } } else if (s->mode == MP_LCD_SPI_DATA) { s->video_ram[s->page*128 + s->page_off] = value; s->page_off = (s->page_off + 1) & 127; } break; } }"} {"target": 0, "idx": 16870, "func": "static inline uint32_t nvic_gprio_mask(NVICState *s) { return ~0U << (s->prigroup + 1); }"} {"target": 0, "idx": 16879, "func": "int qemu_eventfd(int fds[2]) { #ifdef CONFIG_EVENTFD int ret; ret = eventfd(0, 0); if (ret >= 0) { fds[0] = ret; qemu_set_cloexec(ret); if ((fds[1] = dup(ret)) == -1) { close(ret); return -1; } qemu_set_cloexec(fds[1]); return 0; } if (errno != ENOSYS) { return -1; } #endif return qemu_pipe(fds); }"} {"target": 0, "idx": 16888, "func": "static int bdrv_qed_truncate(BlockDriverState *bs, int64_t offset) { return -ENOTSUP; }"} {"target": 0, "idx": 16890, "func": "void HELPER(wsr_ibreakenable)(uint32_t v) { uint32_t change = v ^ env->sregs[IBREAKENABLE]; unsigned i; for (i = 0; i < env->config->nibreak; ++i) { if (change & (1 << i)) { tb_invalidate_phys_page_range( env->sregs[IBREAKA + i], env->sregs[IBREAKA + i] + 1, 0); } } env->sregs[IBREAKENABLE] = v & ((1 << env->config->nibreak) - 1); }"} {"target": 1, "idx": 16924, "func": "static void openrisc_cpu_class_init(ObjectClass *oc, void *data) { OpenRISCCPUClass *occ = OPENRISC_CPU_CLASS(oc); CPUClass *cc = CPU_CLASS(occ); DeviceClass *dc = DEVICE_CLASS(oc); occ->parent_realize = dc->realize; dc->realize = openrisc_cpu_realizefn; occ->parent_reset = cc->reset; cc->reset = openrisc_cpu_reset; cc->class_by_name = openrisc_cpu_class_by_name; cc->has_work = openrisc_cpu_has_work; cc->do_interrupt = openrisc_cpu_do_interrupt; cc->cpu_exec_interrupt = openrisc_cpu_exec_interrupt; cc->dump_state = openrisc_cpu_dump_state; cc->set_pc = openrisc_cpu_set_pc; cc->gdb_read_register = openrisc_cpu_gdb_read_register; cc->gdb_write_register = openrisc_cpu_gdb_write_register; #ifdef CONFIG_USER_ONLY cc->handle_mmu_fault = openrisc_cpu_handle_mmu_fault; #else cc->get_phys_page_debug = openrisc_cpu_get_phys_page_debug; dc->vmsd = &vmstate_openrisc_cpu; #endif cc->gdb_num_core_regs = 32 + 3; /* * Reason: openrisc_cpu_initfn() calls cpu_exec_init(), which saves * the object in cpus -> dangling pointer after final * object_unref(). */ dc->cannot_destroy_with_object_finalize_yet = true; }"} {"target": 1, "idx": 16930, "func": "static void tcp_accept_incoming_migration(void *opaque) { struct sockaddr_in addr; socklen_t addrlen = sizeof(addr); int s = (intptr_t)opaque; QEMUFile *f; int c; do { c = qemu_accept(s, (struct sockaddr *)&addr, &addrlen); } while (c == -1 && socket_error() == EINTR); qemu_set_fd_handler2(s, NULL, NULL, NULL, NULL); closesocket(s); DPRINTF(\"accepted migration\\n\"); if (c == -1) { fprintf(stderr, \"could not accept migration connection\\n\"); goto out; } f = qemu_fopen_socket(c, \"rb\"); if (f == NULL) { fprintf(stderr, \"could not qemu_fopen socket\\n\"); goto out; } process_incoming_migration(f); return; out: closesocket(c); }"} {"target": 1, "idx": 16933, "func": "static void quorum_copy_qiov(QEMUIOVector *dest, QEMUIOVector *source) { int i; assert(dest->niov == source->niov); assert(dest->size == source->size); for (i = 0; i < source->niov; i++) { assert(dest->iov[i].iov_len == source->iov[i].iov_len); memcpy(dest->iov[i].iov_base, source->iov[i].iov_base, source->iov[i].iov_len); } }"} {"target": 0, "idx": 16984, "func": "static int decode_sgirle8(AVCodecContext *avctx, uint8_t *dst, const uint8_t *src, int src_size, int width, int height, int linesize) { const uint8_t *src_end = src + src_size; int x = 0, y = 0; #define INC_XY(n) \\ x += n; \\ if (x >= width) { \\ y++; \\ if (y >= height) \\ return 0; \\ x = 0; \\ } while (src_end - src >= 2) { uint8_t v = *src++; if (v > 0 && v < 0xC0) { do { int length = FFMIN(v, width - x); memset(dst + y*linesize + x, RGB332_TO_BGR8(*src), length); INC_XY(length); v -= length; } while (v > 0); src++; } else if (v >= 0xC1) { v -= 0xC0; do { int length = FFMIN3(v, width - x, src_end - src); if (src_end - src < length) break; memcpy_rgb332_to_bgr8(dst + y*linesize + x, src, length); INC_XY(length); src += length; v -= length; } while (v > 0); } else { avpriv_request_sample(avctx, \"opcode %d\", v); return AVERROR_PATCHWELCOME; } } return 0; }"} {"target": 0, "idx": 16986, "func": "static int hls_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags) { HLSContext *c = s->priv_data; int i; int64_t seek_timestamp; int valid_for = -1; if ((flags & AVSEEK_FLAG_BYTE) || !c->variants[0]->playlists[0]->finished) return AVERROR(ENOSYS); seek_timestamp = stream_index < 0 ? timestamp : av_rescale_rnd(timestamp, AV_TIME_BASE, s->streams[stream_index]->time_base.den, flags & AVSEEK_FLAG_BACKWARD ? AV_ROUND_DOWN : AV_ROUND_UP); if (s->duration < seek_timestamp) return AVERROR(EIO); for (i = 0; i < c->n_playlists; i++) { /* check first that the timestamp is valid for some playlist */ struct playlist *pls = c->playlists[i]; int seq_no; if (find_timestamp_in_playlist(c, pls, seek_timestamp, &seq_no)) { /* set segment now so we do not need to search again below */ pls->cur_seq_no = seq_no; valid_for = i; break; } } if (valid_for < 0) return AVERROR(EIO); for (i = 0; i < c->n_playlists; i++) { /* Reset reading */ struct playlist *pls = c->playlists[i]; if (pls->input) { ffurl_close(pls->input); pls->input = NULL; } av_free_packet(&pls->pkt); reset_packet(&pls->pkt); pls->pb.eof_reached = 0; /* Clear any buffered data */ pls->pb.buf_end = pls->pb.buf_ptr = pls->pb.buffer; /* Reset the pos, to let the mpegts demuxer know we've seeked. */ pls->pb.pos = 0; pls->seek_timestamp = seek_timestamp; pls->seek_flags = flags; /* set closest segment seq_no for playlists not handled above */ if (valid_for != i) find_timestamp_in_playlist(c, pls, seek_timestamp, &pls->cur_seq_no); } c->cur_timestamp = seek_timestamp; return 0; }"} {"target": 0, "idx": 16999, "func": "static int assigned_initfn(struct PCIDevice *pci_dev) { AssignedDevice *dev = DO_UPCAST(AssignedDevice, dev, pci_dev); uint8_t e_intx; int r; Error *local_err = NULL; if (!kvm_enabled()) { error_report(\"pci-assign: error: requires KVM support\"); return -1; } if (!dev->host.domain && !dev->host.bus && !dev->host.slot && !dev->host.function) { error_report(\"pci-assign: error: no host device specified\"); return -1; } /* * Set up basic config space access control. Will be further refined during * device initialization. */ assigned_dev_emulate_config_read(dev, 0, PCI_CONFIG_SPACE_SIZE); assigned_dev_direct_config_read(dev, PCI_STATUS, 2); assigned_dev_direct_config_read(dev, PCI_REVISION_ID, 1); assigned_dev_direct_config_read(dev, PCI_CLASS_PROG, 3); assigned_dev_direct_config_read(dev, PCI_CACHE_LINE_SIZE, 1); assigned_dev_direct_config_read(dev, PCI_LATENCY_TIMER, 1); assigned_dev_direct_config_read(dev, PCI_BIST, 1); assigned_dev_direct_config_read(dev, PCI_CARDBUS_CIS, 4); assigned_dev_direct_config_read(dev, PCI_SUBSYSTEM_VENDOR_ID, 2); assigned_dev_direct_config_read(dev, PCI_SUBSYSTEM_ID, 2); assigned_dev_direct_config_read(dev, PCI_CAPABILITY_LIST + 1, 7); assigned_dev_direct_config_read(dev, PCI_MIN_GNT, 1); assigned_dev_direct_config_read(dev, PCI_MAX_LAT, 1); memcpy(dev->emulate_config_write, dev->emulate_config_read, sizeof(dev->emulate_config_read)); get_real_device(dev, &local_err); if (local_err) { qerror_report_err(local_err); error_free(local_err); goto out; } if (assigned_device_pci_cap_init(pci_dev, &local_err) < 0) { qerror_report_err(local_err); error_free(local_err); goto out; } /* intercept MSI-X entry page in the MMIO */ if (dev->cap.available & ASSIGNED_DEVICE_CAP_MSIX) { assigned_dev_register_msix_mmio(dev, &local_err); if (local_err) { qerror_report_err(local_err); error_free(local_err); goto out; } } /* handle real device's MMIO/PIO BARs */ assigned_dev_register_regions(dev->real_device.regions, dev->real_device.region_number, dev, &local_err); if (local_err) { qerror_report_err(local_err); error_free(local_err); goto out; } /* handle interrupt routing */ e_intx = dev->dev.config[PCI_INTERRUPT_PIN] - 1; dev->intpin = e_intx; dev->intx_route.mode = PCI_INTX_DISABLED; dev->intx_route.irq = -1; /* assign device to guest */ r = assign_device(dev); if (r < 0) { goto out; } /* assign legacy INTx to the device */ r = assign_intx(dev); if (r < 0) { goto assigned_out; } assigned_dev_load_option_rom(dev); add_boot_device_path(dev->bootindex, &pci_dev->qdev, NULL); return 0; assigned_out: deassign_device(dev); out: free_assigned_device(dev); return -1; }"} {"target": 0, "idx": 17004, "func": "int xen_be_send_notify(struct XenDevice *xendev) { return xc_evtchn_notify(xendev->evtchndev, xendev->local_port); }"} {"target": 0, "idx": 17009, "func": "static int64_t wrap_timestamp(AVStream *st, int64_t timestamp) { if (st->pts_wrap_behavior != AV_PTS_WRAP_IGNORE && st->pts_wrap_bits < 64 && st->pts_wrap_reference != AV_NOPTS_VALUE && timestamp != AV_NOPTS_VALUE) { if (st->pts_wrap_behavior == AV_PTS_WRAP_ADD_OFFSET && timestamp < st->pts_wrap_reference) return timestamp + (1ULL<pts_wrap_bits); else if (st->pts_wrap_behavior == AV_PTS_WRAP_SUB_OFFSET && timestamp >= st->pts_wrap_reference) return timestamp - (1ULL<pts_wrap_bits); } return timestamp; }"} {"target": 0, "idx": 17012, "func": "static void ide_atapi_cmd_reply_end(IDEState *s) { int byte_count_limit, size, ret; #ifdef DEBUG_IDE_ATAPI printf(\"reply: tx_size=%d elem_tx_size=%d index=%d\\n\", s->packet_transfer_size, s->elementary_transfer_size, s->io_buffer_index); #endif if (s->packet_transfer_size <= 0) { /* end of transfer */ ide_transfer_stop(s); s->status = READY_STAT; s->nsector = (s->nsector & ~7) | ATAPI_INT_REASON_IO | ATAPI_INT_REASON_CD; ide_set_irq(s); #ifdef DEBUG_IDE_ATAPI printf(\"status=0x%x\\n\", s->status); #endif } else { /* see if a new sector must be read */ if (s->lba != -1 && s->io_buffer_index >= s->cd_sector_size) { ret = cd_read_sector(s->bs, s->lba, s->io_buffer, s->cd_sector_size); if (ret < 0) { ide_transfer_stop(s); ide_atapi_io_error(s, ret); return; } s->lba++; s->io_buffer_index = 0; } if (s->elementary_transfer_size > 0) { /* there are some data left to transmit in this elementary transfer */ size = s->cd_sector_size - s->io_buffer_index; if (size > s->elementary_transfer_size) size = s->elementary_transfer_size; ide_transfer_start(s, s->io_buffer + s->io_buffer_index, size, ide_atapi_cmd_reply_end); s->packet_transfer_size -= size; s->elementary_transfer_size -= size; s->io_buffer_index += size; } else { /* a new transfer is needed */ s->nsector = (s->nsector & ~7) | ATAPI_INT_REASON_IO; byte_count_limit = s->lcyl | (s->hcyl << 8); #ifdef DEBUG_IDE_ATAPI printf(\"byte_count_limit=%d\\n\", byte_count_limit); #endif if (byte_count_limit == 0xffff) byte_count_limit--; size = s->packet_transfer_size; if (size > byte_count_limit) { /* byte count limit must be even if this case */ if (byte_count_limit & 1) byte_count_limit--; size = byte_count_limit; } s->lcyl = size; s->hcyl = size >> 8; s->elementary_transfer_size = size; /* we cannot transmit more than one sector at a time */ if (s->lba != -1) { if (size > (s->cd_sector_size - s->io_buffer_index)) size = (s->cd_sector_size - s->io_buffer_index); } ide_transfer_start(s, s->io_buffer + s->io_buffer_index, size, ide_atapi_cmd_reply_end); s->packet_transfer_size -= size; s->elementary_transfer_size -= size; s->io_buffer_index += size; ide_set_irq(s); #ifdef DEBUG_IDE_ATAPI printf(\"status=0x%x\\n\", s->status); #endif } } }"} {"target": 0, "idx": 17021, "func": "static void slavio_led_mem_writes(void *opaque, target_phys_addr_t addr, uint32_t val) { MiscState *s = opaque; uint32_t saddr; saddr = addr & LED_MAXADDR; MISC_DPRINTF(\"Write diagnostic LED reg 0x\" TARGET_FMT_plx \" = %x\\n\", addr, val); switch (saddr) { case 0: s->leds = val; break; default: break; } }"} {"target": 0, "idx": 17030, "func": "int coroutine_fn bdrv_co_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors) { int max_discard; if (!bs->drv) { return -ENOMEDIUM; } else if (bdrv_check_request(bs, sector_num, nb_sectors)) { return -EIO; } else if (bs->read_only) { return -EROFS; } bdrv_reset_dirty(bs, sector_num, nb_sectors); /* Do nothing if disabled. */ if (!(bs->open_flags & BDRV_O_UNMAP)) { return 0; } if (!bs->drv->bdrv_co_discard && !bs->drv->bdrv_aio_discard) { return 0; } max_discard = bs->bl.max_discard ? bs->bl.max_discard : MAX_DISCARD_DEFAULT; while (nb_sectors > 0) { int ret; int num = nb_sectors; /* align request */ if (bs->bl.discard_alignment && num >= bs->bl.discard_alignment && sector_num % bs->bl.discard_alignment) { if (num > bs->bl.discard_alignment) { num = bs->bl.discard_alignment; } num -= sector_num % bs->bl.discard_alignment; } /* limit request size */ if (num > max_discard) { num = max_discard; } if (bs->drv->bdrv_co_discard) { ret = bs->drv->bdrv_co_discard(bs, sector_num, num); } else { BlockDriverAIOCB *acb; CoroutineIOCompletion co = { .coroutine = qemu_coroutine_self(), }; acb = bs->drv->bdrv_aio_discard(bs, sector_num, nb_sectors, bdrv_co_io_em_complete, &co); if (acb == NULL) { return -EIO; } else { qemu_coroutine_yield(); ret = co.ret; } } if (ret) { return ret; } sector_num += num; nb_sectors -= num; } return 0; }"} {"target": 0, "idx": 17043, "func": "static void xlnx_ep108_machine_init(MachineClass *mc) { mc->desc = \"Xilinx ZynqMP EP108 board\"; mc->init = xlnx_ep108_init; mc->block_default_type = IF_IDE; mc->units_per_default_bus = 1; mc->ignore_memory_transaction_failures = true; }"} {"target": 0, "idx": 17048, "func": "static void tcg_out_logicali(TCGContext *s, AArch64Insn insn, TCGType ext, TCGReg rd, TCGReg rn, uint64_t limm) { unsigned h, l, r, c; assert(is_limm(limm)); h = clz64(limm); l = ctz64(limm); if (l == 0) { r = 0; /* form 0....01....1 */ c = ctz64(~limm) - 1; if (h == 0) { r = clz64(~limm); /* form 1..10..01..1 */ c += r; } } else { r = 64 - l; /* form 1....10....0 or 0..01..10..0 */ c = r - h - 1; } if (ext == TCG_TYPE_I32) { r &= 31; c &= 31; } tcg_out_insn_3404(s, insn, ext, rd, rn, ext, r, c); }"} {"target": 0, "idx": 17072, "func": "void ich9_pm_init(PCIDevice *lpc_pci, ICH9LPCPMRegs *pm, bool smm_enabled, qemu_irq sci_irq) { memory_region_init(&pm->io, OBJECT(lpc_pci), \"ich9-pm\", ICH9_PMIO_SIZE); memory_region_set_enabled(&pm->io, false); memory_region_add_subregion(pci_address_space_io(lpc_pci), 0, &pm->io); acpi_pm_tmr_init(&pm->acpi_regs, ich9_pm_update_sci_fn, &pm->io); acpi_pm1_evt_init(&pm->acpi_regs, ich9_pm_update_sci_fn, &pm->io); acpi_pm1_cnt_init(&pm->acpi_regs, &pm->io, pm->disable_s3, pm->disable_s4, pm->s4_val); acpi_gpe_init(&pm->acpi_regs, ICH9_PMIO_GPE0_LEN); memory_region_init_io(&pm->io_gpe, OBJECT(lpc_pci), &ich9_gpe_ops, pm, \"acpi-gpe0\", ICH9_PMIO_GPE0_LEN); memory_region_add_subregion(&pm->io, ICH9_PMIO_GPE0_STS, &pm->io_gpe); memory_region_init_io(&pm->io_smi, OBJECT(lpc_pci), &ich9_smi_ops, pm, \"acpi-smi\", 8); memory_region_add_subregion(&pm->io, ICH9_PMIO_SMI_EN, &pm->io_smi); pm->smm_enabled = smm_enabled; pm->enable_tco = true; acpi_pm_tco_init(&pm->tco_regs, &pm->io); pm->irq = sci_irq; qemu_register_reset(pm_reset, pm); pm->powerdown_notifier.notify = pm_powerdown_req; qemu_register_powerdown_notifier(&pm->powerdown_notifier); legacy_acpi_cpu_hotplug_init(pci_address_space_io(lpc_pci), OBJECT(lpc_pci), &pm->gpe_cpu, ICH9_CPU_HOTPLUG_IO_BASE); if (pm->acpi_memory_hotplug.is_enabled) { acpi_memory_hotplug_init(pci_address_space_io(lpc_pci), OBJECT(lpc_pci), &pm->acpi_memory_hotplug); } }"} {"target": 0, "idx": 17082, "func": "static void handle_sys(DisasContext *s, uint32_t insn, bool isread, unsigned int op0, unsigned int op1, unsigned int op2, unsigned int crn, unsigned int crm, unsigned int rt) { const ARMCPRegInfo *ri; TCGv_i64 tcg_rt; ri = get_arm_cp_reginfo(s->cp_regs, ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, crn, crm, op0, op1, op2)); if (!ri) { /* Unknown register; this might be a guest error or a QEMU * unimplemented feature. */ qemu_log_mask(LOG_UNIMP, \"%s access to unsupported AArch64 \" \"system register op0:%d op1:%d crn:%d crm:%d op2:%d\\n\", isread ? \"read\" : \"write\", op0, op1, crn, crm, op2); unallocated_encoding(s); return; } /* Check access permissions */ if (!cp_access_ok(s->current_el, ri, isread)) { unallocated_encoding(s); return; } if (ri->accessfn) { /* Emit code to perform further access permissions checks at * runtime; this may result in an exception. */ TCGv_ptr tmpptr; TCGv_i32 tcg_syn; uint32_t syndrome; gen_a64_set_pc_im(s->pc - 4); tmpptr = tcg_const_ptr(ri); syndrome = syn_aa64_sysregtrap(op0, op1, op2, crn, crm, rt, isread); tcg_syn = tcg_const_i32(syndrome); gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn); tcg_temp_free_ptr(tmpptr); tcg_temp_free_i32(tcg_syn); } /* Handle special cases first */ switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) { case ARM_CP_NOP: return; case ARM_CP_NZCV: tcg_rt = cpu_reg(s, rt); if (isread) { gen_get_nzcv(tcg_rt); } else { gen_set_nzcv(tcg_rt); } return; case ARM_CP_CURRENTEL: /* Reads as current EL value from pstate, which is * guaranteed to be constant by the tb flags. */ tcg_rt = cpu_reg(s, rt); tcg_gen_movi_i64(tcg_rt, s->current_el << 2); return; case ARM_CP_DC_ZVA: /* Writes clear the aligned block of memory which rt points into. */ tcg_rt = cpu_reg(s, rt); gen_helper_dc_zva(cpu_env, tcg_rt); return; default: break; } if (use_icount && (ri->type & ARM_CP_IO)) { gen_io_start(); } tcg_rt = cpu_reg(s, rt); if (isread) { if (ri->type & ARM_CP_CONST) { tcg_gen_movi_i64(tcg_rt, ri->resetvalue); } else if (ri->readfn) { TCGv_ptr tmpptr; tmpptr = tcg_const_ptr(ri); gen_helper_get_cp_reg64(tcg_rt, cpu_env, tmpptr); tcg_temp_free_ptr(tmpptr); } else { tcg_gen_ld_i64(tcg_rt, cpu_env, ri->fieldoffset); } } else { if (ri->type & ARM_CP_CONST) { /* If not forbidden by access permissions, treat as WI */ return; } else if (ri->writefn) { TCGv_ptr tmpptr; tmpptr = tcg_const_ptr(ri); gen_helper_set_cp_reg64(cpu_env, tmpptr, tcg_rt); tcg_temp_free_ptr(tmpptr); } else { tcg_gen_st_i64(tcg_rt, cpu_env, ri->fieldoffset); } } if (use_icount && (ri->type & ARM_CP_IO)) { /* I/O operations must end the TB here (whether read or write) */ gen_io_end(); s->is_jmp = DISAS_UPDATE; } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) { /* We default to ending the TB on a coprocessor register write, * but allow this to be suppressed by the register definition * (usually only necessary to work around guest bugs). */ s->is_jmp = DISAS_UPDATE; } }"} {"target": 0, "idx": 17085, "func": "static Visitor *visitor_input_test_init_raw(TestInputVisitorData *data, const char *json_string) { return visitor_input_test_init_internal(data, json_string, NULL); }"} {"target": 0, "idx": 17092, "func": "static void do_info_version(Monitor *mon) { monitor_printf(mon, \"%s\\n\", QEMU_VERSION); }"} {"target": 1, "idx": 17101, "func": "static int qcrypto_ivgen_essiv_init(QCryptoIVGen *ivgen, const uint8_t *key, size_t nkey, Error **errp) { uint8_t *salt; size_t nhash; size_t nsalt; QCryptoIVGenESSIV *essiv = g_new0(QCryptoIVGenESSIV, 1); /* Not necessarily the same as nkey */ nsalt = qcrypto_cipher_get_key_len(ivgen->cipher); nhash = qcrypto_hash_digest_len(ivgen->hash); /* Salt must be larger of hash size or key size */ salt = g_new0(uint8_t, MAX(nhash, nsalt)); if (qcrypto_hash_bytes(ivgen->hash, (const gchar *)key, nkey, &salt, &nhash, errp) < 0) { g_free(essiv); return -1; } /* Now potentially truncate salt to match cipher key len */ essiv->cipher = qcrypto_cipher_new(ivgen->cipher, QCRYPTO_CIPHER_MODE_ECB, salt, MIN(nhash, nsalt), errp); if (!essiv->cipher) { g_free(essiv); return -1; } ivgen->private = essiv; return 0; }"} {"target": 1, "idx": 17113, "func": "static void mpeg1_encode_sequence_header(MpegEncContext *s) { unsigned int vbv_buffer_size; unsigned int fps, v; int n; UINT64 time_code; if ((s->picture_number % s->gop_size) == 0) { /* mpeg1 header repeated every gop */ put_header(s, SEQ_START_CODE); /* search closest frame rate */ { int i, dmin, d; s->frame_rate_index = 0; dmin = 0x7fffffff; for(i=1;i<9;i++) { d = abs(s->frame_rate - frame_rate_tab[i]); if (d < dmin) { dmin = d; s->frame_rate_index = i; } } } put_bits(&s->pb, 12, s->width); put_bits(&s->pb, 12, s->height); put_bits(&s->pb, 4, 1); /* 1/1 aspect ratio */ put_bits(&s->pb, 4, s->frame_rate_index); v = s->bit_rate / 400; if (v > 0x3ffff) v = 0x3ffff; put_bits(&s->pb, 18, v); put_bits(&s->pb, 1, 1); /* marker */ /* vbv buffer size: slightly greater than an I frame. We add some margin just in case */ vbv_buffer_size = (3 * s->I_frame_bits) / (2 * 8); put_bits(&s->pb, 10, (vbv_buffer_size + 16383) / 16384); put_bits(&s->pb, 1, 1); /* constrained parameter flag */ put_bits(&s->pb, 1, 0); /* no custom intra matrix */ put_bits(&s->pb, 1, 0); /* no custom non intra matrix */ put_header(s, GOP_START_CODE); put_bits(&s->pb, 1, 0); /* do drop frame */ /* time code : we must convert from the real frame rate to a fake mpeg frame rate in case of low frame rate */ fps = frame_rate_tab[s->frame_rate_index]; time_code = s->fake_picture_number * FRAME_RATE_BASE; s->gop_picture_number = s->fake_picture_number; put_bits(&s->pb, 5, (UINT32)((time_code / (fps * 3600)) % 24)); put_bits(&s->pb, 6, (UINT32)((time_code / (fps * 60)) % 60)); put_bits(&s->pb, 1, 1); put_bits(&s->pb, 6, (UINT32)((time_code / fps) % 60)); put_bits(&s->pb, 6, (UINT32)((time_code % fps) / FRAME_RATE_BASE)); put_bits(&s->pb, 1, 1); /* closed gop */ put_bits(&s->pb, 1, 0); /* broken link */ } if (s->frame_rate < (24 * FRAME_RATE_BASE) && s->picture_number > 0) { /* insert empty P pictures to slow down to the desired frame rate. Each fake pictures takes about 20 bytes */ fps = frame_rate_tab[s->frame_rate_index]; n = ((s->picture_number * fps) / s->frame_rate) - 1; while (s->fake_picture_number < n) { mpeg1_skip_picture(s, s->fake_picture_number - s->gop_picture_number); s->fake_picture_number++; } } s->fake_picture_number++; }"} {"target": 0, "idx": 17118, "func": "void ff_put_h264_qpel4_mc33_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride) { avc_luma_hv_qrt_4w_msa(src + stride - 2, src - (stride * 2) + sizeof(uint8_t), stride, dst, stride, 4); }"} {"target": 0, "idx": 17120, "func": "static av_cold int mpeg_mux_init(AVFormatContext *ctx) { MpegMuxContext *s = ctx->priv_data; int bitrate, i, mpa_id, mpv_id, h264_id, mps_id, ac3_id, dts_id, lpcm_id, j; AVStream *st; StreamInfo *stream; int audio_bitrate; int video_bitrate; s->packet_number = 0; s->is_vcd = (CONFIG_MPEG1VCD_MUXER && ctx->oformat == &ff_mpeg1vcd_muxer); s->is_svcd = (CONFIG_MPEG2SVCD_MUXER && ctx->oformat == &ff_mpeg2svcd_muxer); s->is_mpeg2 = ((CONFIG_MPEG2VOB_MUXER && ctx->oformat == &ff_mpeg2vob_muxer) || (CONFIG_MPEG2DVD_MUXER && ctx->oformat == &ff_mpeg2dvd_muxer) || (CONFIG_MPEG2SVCD_MUXER && ctx->oformat == &ff_mpeg2svcd_muxer)); s->is_dvd = (CONFIG_MPEG2DVD_MUXER && ctx->oformat == &ff_mpeg2dvd_muxer); if (ctx->packet_size) { if (ctx->packet_size < 20 || ctx->packet_size > (1 << 23) + 10) { av_log(ctx, AV_LOG_ERROR, \"Invalid packet size %d\\n\", ctx->packet_size); goto fail; } s->packet_size = ctx->packet_size; } else s->packet_size = 2048; if (ctx->max_delay < 0) /* Not set by the caller */ ctx->max_delay = AV_TIME_BASE*7/10; s->vcd_padding_bytes_written = 0; s->vcd_padding_bitrate_num = 0; s->audio_bound = 0; s->video_bound = 0; mpa_id = AUDIO_ID; ac3_id = AC3_ID; dts_id = DTS_ID; mpv_id = VIDEO_ID; h264_id = H264_ID; mps_id = SUB_ID; lpcm_id = LPCM_ID; for (i = 0; i < ctx->nb_streams; i++) { st = ctx->streams[i]; stream = av_mallocz(sizeof(StreamInfo)); if (!stream) goto fail; st->priv_data = stream; avpriv_set_pts_info(st, 64, 1, 90000); switch (st->codec->codec_type) { case AVMEDIA_TYPE_AUDIO: if (!s->is_mpeg2 && (st->codec->codec_id == AV_CODEC_ID_AC3 || st->codec->codec_id == AV_CODEC_ID_DTS || st->codec->codec_id == AV_CODEC_ID_PCM_S16BE)) av_log(ctx, AV_LOG_WARNING, \"%s in MPEG-1 system streams is not widely supported, \" \"consider using the vob or the dvd muxer \" \"to force a MPEG-2 program stream.\\n\", avcodec_get_name(st->codec->codec_id)); if (st->codec->codec_id == AV_CODEC_ID_AC3) { stream->id = ac3_id++; } else if (st->codec->codec_id == AV_CODEC_ID_DTS) { stream->id = dts_id++; } else if (st->codec->codec_id == AV_CODEC_ID_PCM_S16BE) { stream->id = lpcm_id++; for (j = 0; j < 4; j++) { if (lpcm_freq_tab[j] == st->codec->sample_rate) break; } if (j == 4) goto fail; if (st->codec->channels > 8) return -1; stream->lpcm_header[0] = 0x0c; stream->lpcm_header[1] = (st->codec->channels - 1) | (j << 4); stream->lpcm_header[2] = 0x80; stream->lpcm_align = st->codec->channels * 2; } else { stream->id = mpa_id++; } /* This value HAS to be used for VCD (see VCD standard, p. IV-7). * Right now it is also used for everything else. */ stream->max_buffer_size = 4 * 1024; s->audio_bound++; break; case AVMEDIA_TYPE_VIDEO: if (st->codec->codec_id == AV_CODEC_ID_H264) stream->id = h264_id++; else stream->id = mpv_id++; if (st->codec->rc_buffer_size) stream->max_buffer_size = 6 * 1024 + st->codec->rc_buffer_size / 8; else { av_log(ctx, AV_LOG_WARNING, \"VBV buffer size not set, using default size of 130KB\\n\" \"If you want the mpeg file to be compliant to some specification\\n\" \"Like DVD, VCD or others, make sure you set the correct buffer size\\n\"); // FIXME: this is probably too small as default stream->max_buffer_size = 230 * 1024; } if (stream->max_buffer_size > 1024 * 8191) { av_log(ctx, AV_LOG_WARNING, \"buffer size %d, too large\\n\", stream->max_buffer_size); stream->max_buffer_size = 1024 * 8191; } s->video_bound++; break; case AVMEDIA_TYPE_SUBTITLE: stream->id = mps_id++; stream->max_buffer_size = 16 * 1024; break; default: return -1; } stream->fifo = av_fifo_alloc(16); if (!stream->fifo) goto fail; } bitrate = 0; audio_bitrate = 0; video_bitrate = 0; for (i = 0; i < ctx->nb_streams; i++) { int codec_rate; st = ctx->streams[i]; stream = (StreamInfo *)st->priv_data; if (st->codec->rc_max_rate || st->codec->codec_type == AVMEDIA_TYPE_VIDEO) codec_rate = st->codec->rc_max_rate; else codec_rate = st->codec->bit_rate; if (!codec_rate) codec_rate = (1 << 21) * 8 * 50 / ctx->nb_streams; bitrate += codec_rate; if ((stream->id & 0xe0) == AUDIO_ID) audio_bitrate += codec_rate; else if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) video_bitrate += codec_rate; } if (s->user_mux_rate) { s->mux_rate = (s->user_mux_rate + (8 * 50) - 1) / (8 * 50); } else { /* we increase slightly the bitrate to take into account the * headers. XXX: compute it exactly */ bitrate += bitrate / 20; bitrate += 10000; s->mux_rate = (bitrate + (8 * 50) - 1) / (8 * 50); if (s->mux_rate >= (1<<22)) { av_log(ctx, AV_LOG_WARNING, \"mux rate %d is too large\\n\", s->mux_rate); s->mux_rate = (1<<22) - 1; } } if (s->is_vcd) { int64_t overhead_rate; /* The VCD standard mandates that the mux_rate field is 3528 * (see standard p. IV-6). * The value is actually \"wrong\", i.e. if you calculate * it using the normal formula and the 75 sectors per second transfer * rate you get a different value because the real pack size is 2324, * not 2352. But the standard explicitly specifies that the mux_rate * field in the header must have this value. */ // s->mux_rate = 2352 * 75 / 50; /* = 3528 */ /* The VCD standard states that the muxed stream must be * exactly 75 packs / second (the data rate of a single speed cdrom). * Since the video bitrate (probably 1150000 bits/sec) will be below * the theoretical maximum we have to add some padding packets * to make up for the lower data rate. * (cf. VCD standard p. IV-6 ) */ /* Add the header overhead to the data rate. * 2279 data bytes per audio pack, 2294 data bytes per video pack */ overhead_rate = audio_bitrate * 2294LL * (2324 - 2279); overhead_rate += video_bitrate * 2279LL * (2324 - 2294); /* Add padding so that the full bitrate is 2324*75 bytes/sec */ s->vcd_padding_bitrate_num = (2324LL * 75 * 8 - bitrate) * 2279 * 2294 - overhead_rate; #define VCD_PADDING_BITRATE_DEN (2279 * 2294) } if (s->is_vcd || s->is_mpeg2) /* every packet */ s->pack_header_freq = 1; else /* every 2 seconds */ s->pack_header_freq = 2 * bitrate / s->packet_size / 8; /* the above seems to make pack_header_freq zero sometimes */ if (s->pack_header_freq == 0) s->pack_header_freq = 1; if (s->is_mpeg2) /* every 200 packets. Need to look at the spec. */ s->system_header_freq = s->pack_header_freq * 40; else if (s->is_vcd) /* the standard mandates that there are only two system headers * in the whole file: one in the first packet of each stream. * (see standard p. IV-7 and IV-8) */ s->system_header_freq = 0x7fffffff; else s->system_header_freq = s->pack_header_freq * 5; for (i = 0; i < ctx->nb_streams; i++) { stream = ctx->streams[i]->priv_data; stream->packet_number = 0; } s->system_header_size = get_system_header_size(ctx); s->last_scr = AV_NOPTS_VALUE; return 0; fail: for (i = 0; i < ctx->nb_streams; i++) av_freep(&ctx->streams[i]->priv_data); return AVERROR(ENOMEM); }"} {"target": 0, "idx": 17124, "func": "static void dsputil_init_mmx2(DSPContext *c, AVCodecContext *avctx, int mm_flags) { const int bit_depth = avctx->bits_per_raw_sample; const int high_bit_depth = bit_depth > 8; c->prefetch = prefetch_mmx2; if (!high_bit_depth) { c->put_pixels_tab[0][1] = put_pixels16_x2_mmx2; c->put_pixels_tab[0][2] = put_pixels16_y2_mmx2; c->avg_pixels_tab[0][0] = avg_pixels16_mmx2; c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmx2; c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmx2; c->put_pixels_tab[1][1] = put_pixels8_x2_mmx2; c->put_pixels_tab[1][2] = put_pixels8_y2_mmx2; c->avg_pixels_tab[1][0] = avg_pixels8_mmx2; c->avg_pixels_tab[1][1] = avg_pixels8_x2_mmx2; c->avg_pixels_tab[1][2] = avg_pixels8_y2_mmx2; } if (!(avctx->flags & CODEC_FLAG_BITEXACT)) { if (!high_bit_depth) { c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx2; c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmx2; c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmx2; c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx2; c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx2; c->avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx2; } if (CONFIG_VP3_DECODER && HAVE_YASM) { c->vp3_v_loop_filter = ff_vp3_v_loop_filter_mmx2; c->vp3_h_loop_filter = ff_vp3_h_loop_filter_mmx2; } } if (CONFIG_VP3_DECODER && HAVE_YASM) c->vp3_idct_dc_add = ff_vp3_idct_dc_add_mmx2; if (CONFIG_VP3_DECODER && (avctx->codec_id == CODEC_ID_VP3 || avctx->codec_id == CODEC_ID_THEORA)) { c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_exact_mmx2; c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_exact_mmx2; } if (CONFIG_H264QPEL) { SET_QPEL_FUNCS(put_qpel, 0, 16, mmx2, ); SET_QPEL_FUNCS(put_qpel, 1, 8, mmx2, ); SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, mmx2, ); SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, mmx2, ); SET_QPEL_FUNCS(avg_qpel, 0, 16, mmx2, ); SET_QPEL_FUNCS(avg_qpel, 1, 8, mmx2, ); if (!high_bit_depth) { SET_QPEL_FUNCS(put_h264_qpel, 0, 16, mmx2, ); SET_QPEL_FUNCS(put_h264_qpel, 1, 8, mmx2, ); SET_QPEL_FUNCS(put_h264_qpel, 2, 4, mmx2, ); SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, mmx2, ); SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, mmx2, ); SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, mmx2, ); } else if (bit_depth == 10) { #if HAVE_YASM #if !ARCH_X86_64 SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, 10_mmxext, ff_); SET_QPEL_FUNCS(put_h264_qpel, 0, 16, 10_mmxext, ff_); SET_QPEL_FUNCS(put_h264_qpel, 1, 8, 10_mmxext, ff_); SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, 10_mmxext, ff_); #endif SET_QPEL_FUNCS(put_h264_qpel, 2, 4, 10_mmxext, ff_); SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, 10_mmxext, ff_); #endif } SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, mmx2, ); SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, mmx2, ); SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, mmx2, ); SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, mmx2, ); } #if HAVE_YASM if (!high_bit_depth && CONFIG_H264CHROMA) { c->avg_h264_chroma_pixels_tab[0] = ff_avg_h264_chroma_mc8_mmx2_rnd; c->avg_h264_chroma_pixels_tab[1] = ff_avg_h264_chroma_mc4_mmx2; c->avg_h264_chroma_pixels_tab[2] = ff_avg_h264_chroma_mc2_mmx2; c->put_h264_chroma_pixels_tab[2] = ff_put_h264_chroma_mc2_mmx2; } if (bit_depth == 10 && CONFIG_H264CHROMA) { c->put_h264_chroma_pixels_tab[2] = ff_put_h264_chroma_mc2_10_mmxext; c->avg_h264_chroma_pixels_tab[2] = ff_avg_h264_chroma_mc2_10_mmxext; c->put_h264_chroma_pixels_tab[1] = ff_put_h264_chroma_mc4_10_mmxext; c->avg_h264_chroma_pixels_tab[1] = ff_avg_h264_chroma_mc4_10_mmxext; } c->add_hfyu_median_prediction = ff_add_hfyu_median_prediction_mmx2; c->scalarproduct_int16 = ff_scalarproduct_int16_mmx2; c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_mmx2; if (avctx->flags & CODEC_FLAG_BITEXACT) { c->apply_window_int16 = ff_apply_window_int16_mmxext_ba; } else { c->apply_window_int16 = ff_apply_window_int16_mmxext; } #endif }"} {"target": 0, "idx": 17128, "func": "static int vaapi_encode_h264_init_sequence_params(AVCodecContext *avctx) { VAAPIEncodeContext *ctx = avctx->priv_data; VAEncSequenceParameterBufferH264 *vseq = ctx->codec_sequence_params; VAEncPictureParameterBufferH264 *vpic = ctx->codec_picture_params; VAAPIEncodeH264Context *priv = ctx->priv_data; VAAPIEncodeH264MiscSequenceParams *mseq = &priv->misc_sequence_params; int i; { vseq->seq_parameter_set_id = 0; vseq->level_idc = avctx->level; vseq->max_num_ref_frames = 2; vseq->picture_width_in_mbs = priv->mb_width; vseq->picture_height_in_mbs = priv->mb_height; vseq->seq_fields.bits.chroma_format_idc = 1; vseq->seq_fields.bits.frame_mbs_only_flag = 1; vseq->seq_fields.bits.direct_8x8_inference_flag = 1; vseq->seq_fields.bits.log2_max_frame_num_minus4 = 4; vseq->seq_fields.bits.pic_order_cnt_type = 0; if (ctx->input_width != ctx->aligned_width || ctx->input_height != ctx->aligned_height) { vseq->frame_cropping_flag = 1; vseq->frame_crop_left_offset = 0; vseq->frame_crop_right_offset = (ctx->aligned_width - ctx->input_width) / 2; vseq->frame_crop_top_offset = 0; vseq->frame_crop_bottom_offset = (ctx->aligned_height - ctx->input_height) / 2; } else { vseq->frame_cropping_flag = 0; } vseq->vui_parameters_present_flag = 1; if (avctx->sample_aspect_ratio.num != 0) { vseq->vui_fields.bits.aspect_ratio_info_present_flag = 1; // There is a large enum of these which we could support // individually rather than using the generic X/Y form? if (avctx->sample_aspect_ratio.num == avctx->sample_aspect_ratio.den) { vseq->aspect_ratio_idc = 1; } else { vseq->aspect_ratio_idc = 255; // Extended SAR. vseq->sar_width = avctx->sample_aspect_ratio.num; vseq->sar_height = avctx->sample_aspect_ratio.den; } } if (avctx->color_primaries != AVCOL_PRI_UNSPECIFIED || avctx->color_trc != AVCOL_TRC_UNSPECIFIED || avctx->colorspace != AVCOL_SPC_UNSPECIFIED) { mseq->video_signal_type_present_flag = 1; mseq->video_format = 5; // Unspecified. mseq->video_full_range_flag = 0; mseq->colour_description_present_flag = 1; // These enums are derived from the standard and hence // we can just use the values directly. mseq->colour_primaries = avctx->color_primaries; mseq->transfer_characteristics = avctx->color_trc; mseq->matrix_coefficients = avctx->colorspace; } vseq->bits_per_second = avctx->bit_rate; vseq->vui_fields.bits.timing_info_present_flag = 1; if (avctx->framerate.num > 0 && avctx->framerate.den > 0) { vseq->num_units_in_tick = avctx->framerate.num; vseq->time_scale = 2 * avctx->framerate.den; mseq->fixed_frame_rate_flag = 1; } else { vseq->num_units_in_tick = avctx->time_base.num; vseq->time_scale = 2 * avctx->time_base.den; mseq->fixed_frame_rate_flag = 0; } if (ctx->va_rc_mode == VA_RC_CBR) { priv->send_timing_sei = 1; mseq->nal_hrd_parameters_present_flag = 1; mseq->cpb_cnt_minus1 = 0; // Try to scale these to a sensible range so that the // golomb encode of the value is not overlong. mseq->bit_rate_scale = av_clip_uintp2(av_log2(avctx->bit_rate) - 15 - 6, 4); mseq->bit_rate_value_minus1[0] = (avctx->bit_rate >> mseq->bit_rate_scale + 6) - 1; mseq->cpb_size_scale = av_clip_uintp2(av_log2(priv->hrd_params.hrd.buffer_size) - 15 - 4, 4); mseq->cpb_size_value_minus1[0] = (priv->hrd_params.hrd.buffer_size >> mseq->cpb_size_scale + 4) - 1; // CBR mode isn't actually available here, despite naming. mseq->cbr_flag[0] = 0; mseq->initial_cpb_removal_delay_length_minus1 = 23; mseq->cpb_removal_delay_length_minus1 = 23; mseq->dpb_output_delay_length_minus1 = 7; mseq->time_offset_length = 0; // This calculation can easily overflow 32 bits. mseq->initial_cpb_removal_delay = 90000 * (uint64_t)priv->hrd_params.hrd.initial_buffer_fullness / priv->hrd_params.hrd.buffer_size; mseq->initial_cpb_removal_delay_offset = 0; } else { priv->send_timing_sei = 0; mseq->nal_hrd_parameters_present_flag = 0; } vseq->intra_period = ctx->p_per_i * (ctx->b_per_p + 1); vseq->intra_idr_period = vseq->intra_period; vseq->ip_period = ctx->b_per_p + 1; } { vpic->CurrPic.picture_id = VA_INVALID_ID; vpic->CurrPic.flags = VA_PICTURE_H264_INVALID; for (i = 0; i < FF_ARRAY_ELEMS(vpic->ReferenceFrames); i++) { vpic->ReferenceFrames[i].picture_id = VA_INVALID_ID; vpic->ReferenceFrames[i].flags = VA_PICTURE_H264_INVALID; } vpic->coded_buf = VA_INVALID_ID; vpic->pic_parameter_set_id = 0; vpic->seq_parameter_set_id = 0; vpic->num_ref_idx_l0_active_minus1 = 0; vpic->num_ref_idx_l1_active_minus1 = 0; vpic->pic_fields.bits.entropy_coding_mode_flag = ((avctx->profile & 0xff) != 66); vpic->pic_fields.bits.weighted_pred_flag = 0; vpic->pic_fields.bits.weighted_bipred_idc = 0; vpic->pic_fields.bits.transform_8x8_mode_flag = ((avctx->profile & 0xff) >= 100); vpic->pic_init_qp = priv->fixed_qp_idr; } { mseq->profile_idc = avctx->profile & 0xff; if (avctx->profile & FF_PROFILE_H264_CONSTRAINED) mseq->constraint_set1_flag = 1; if (avctx->profile & FF_PROFILE_H264_INTRA) mseq->constraint_set3_flag = 1; } return 0; }"} {"target": 0, "idx": 17133, "func": "void if_start(Slirp *slirp) { uint64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); bool from_batchq, next_from_batchq; struct mbuf *ifm, *ifm_next, *ifqt; DEBUG_CALL(\"if_start\"); if (slirp->if_start_busy) { return; } slirp->if_start_busy = true; if (slirp->if_fastq.ifq_next != &slirp->if_fastq) { ifm_next = slirp->if_fastq.ifq_next; next_from_batchq = false; } else if (slirp->next_m != &slirp->if_batchq) { /* Nothing on fastq, pick up from batchq via next_m */ ifm_next = slirp->next_m; next_from_batchq = true; } else { ifm_next = NULL; } while (ifm_next) { ifm = ifm_next; from_batchq = next_from_batchq; ifm_next = ifm->ifq_next; if (ifm_next == &slirp->if_fastq) { /* No more packets in fastq, switch to batchq */ ifm_next = slirp->next_m; next_from_batchq = true; } if (ifm_next == &slirp->if_batchq) { /* end of batchq */ ifm_next = NULL; } /* Try to send packet unless it already expired */ if (ifm->expiration_date >= now && !if_encap(slirp, ifm)) { /* Packet is delayed due to pending ARP resolution */ continue; } if (ifm == slirp->next_m) { /* Set which packet to send on next iteration */ slirp->next_m = ifm->ifq_next; } /* Remove it from the queue */ ifqt = ifm->ifq_prev; remque(ifm); /* If there are more packets for this session, re-queue them */ if (ifm->ifs_next != ifm) { struct mbuf *next = ifm->ifs_next; insque(next, ifqt); ifs_remque(ifm); if (!from_batchq) { /* Next packet in fastq is from the same session */ ifm_next = next; next_from_batchq = false; } else if (slirp->next_m == &slirp->if_batchq) { /* Set next_m and ifm_next if the session packet is now the * only one on batchq */ slirp->next_m = ifm_next = next; } } /* Update so_queued */ if (ifm->ifq_so && --ifm->ifq_so->so_queued == 0) { /* If there's no more queued, reset nqueued */ ifm->ifq_so->so_nqueued = 0; } m_free(ifm); } slirp->if_start_busy = false; }"} {"target": 0, "idx": 17141, "func": "static void handle_ti(ESPState *s) { uint32_t dmalen, minlen; if (s->dma && !s->dma_enabled) { s->dma_cb = handle_ti; return; } dmalen = s->rregs[ESP_TCLO]; dmalen |= s->rregs[ESP_TCMID] << 8; dmalen |= s->rregs[ESP_TCHI] << 16; if (dmalen==0) { dmalen=0x10000; } s->dma_counter = dmalen; if (s->do_cmd) minlen = (dmalen < 32) ? dmalen : 32; else if (s->ti_size < 0) minlen = (dmalen < -s->ti_size) ? dmalen : -s->ti_size; else minlen = (dmalen < s->ti_size) ? dmalen : s->ti_size; trace_esp_handle_ti(minlen); if (s->dma) { s->dma_left = minlen; s->rregs[ESP_RSTAT] &= ~STAT_TC; esp_do_dma(s); } if (s->do_cmd) { trace_esp_handle_ti_cmd(s->cmdlen); s->ti_size = 0; s->cmdlen = 0; s->do_cmd = 0; do_cmd(s, s->cmdbuf); } }"} {"target": 0, "idx": 17152, "func": "static void lsi53c895a_register_devices(void) { type_register_static(&lsi_info); type_register_static_alias(&lsi_info, \"lsi\"); }"} {"target": 1, "idx": 17172, "func": "static void receive_from_chr_layer(SCLPConsole *scon, const uint8_t *buf, int size) { /* read data must fit into current buffer */ assert(size <= SIZE_BUFFER_VT220 - scon->iov_data_len); /* put byte-stream from character layer into buffer */ memcpy(&scon->iov[scon->iov_bs], buf, size); scon->iov_data_len += size; scon->iov_sclp_rest += size; scon->iov_bs += size; scon->event.event_pending = true; }"} {"target": 1, "idx": 17199, "func": "static void usbredir_do_attach(void *opaque) { USBRedirDevice *dev = opaque; /* In order to work properly with XHCI controllers we need these caps */ if ((dev->dev.port->speedmask & USB_SPEED_MASK_SUPER) && !( usb_redir_cap_ep_info_max_packet_size) && usb_redir_cap_64bits_ids))) { ERROR(\"usb-redir-host lacks capabilities needed for use with XHCI\\n\"); usbredir_reject_device(dev); return; } if (usb_device_attach(&dev->dev) != 0) { WARNING(\"rejecting device due to speed mismatch\\n\"); usbredir_reject_device(dev); } }"} {"target": 0, "idx": 17203, "func": "static av_cold void set_bandwidth(AC3EncodeContext *s, int cutoff) { int ch, bw_code; if (cutoff) { /* calculate bandwidth based on user-specified cutoff frequency */ int fbw_coeffs; cutoff = av_clip(cutoff, 1, s->sample_rate >> 1); fbw_coeffs = cutoff * 2 * AC3_MAX_COEFS / s->sample_rate; bw_code = av_clip((fbw_coeffs - 73) / 3, 0, 60); } else { /* use default bandwidth setting */ /* XXX: should compute the bandwidth according to the frame size, so that we avoid annoying high frequency artifacts */ bw_code = 50; } /* set number of coefficients for each channel */ for (ch = 0; ch < s->fbw_channels; ch++) { s->bandwidth_code[ch] = bw_code; s->nb_coefs[ch] = bw_code * 3 + 73; } if (s->lfe_on) s->nb_coefs[s->lfe_channel] = 7; /* LFE channel always has 7 coefs */ }"} {"target": 0, "idx": 17206, "func": "static inline void RENAME(yuy2ToUV)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1, const uint8_t *src2, long width, uint32_t *unused) { #if COMPILE_TEMPLATE_MMX __asm__ volatile( \"movq \"MANGLE(bm01010101)\", %%mm4 \\n\\t\" \"mov %0, %%\"REG_a\" \\n\\t\" \"1: \\n\\t\" \"movq (%1, %%\"REG_a\",4), %%mm0 \\n\\t\" \"movq 8(%1, %%\"REG_a\",4), %%mm1 \\n\\t\" \"psrlw $8, %%mm0 \\n\\t\" \"psrlw $8, %%mm1 \\n\\t\" \"packuswb %%mm1, %%mm0 \\n\\t\" \"movq %%mm0, %%mm1 \\n\\t\" \"psrlw $8, %%mm0 \\n\\t\" \"pand %%mm4, %%mm1 \\n\\t\" \"packuswb %%mm0, %%mm0 \\n\\t\" \"packuswb %%mm1, %%mm1 \\n\\t\" \"movd %%mm0, (%3, %%\"REG_a\") \\n\\t\" \"movd %%mm1, (%2, %%\"REG_a\") \\n\\t\" \"add $4, %%\"REG_a\" \\n\\t\" \" js 1b \\n\\t\" : : \"g\" ((x86_reg)-width), \"r\" (src1+width*4), \"r\" (dstU+width), \"r\" (dstV+width) : \"%\"REG_a ); #else int i; for (i=0; iin_num; i++) { size_t size = MIN(len - offset, elem->in_sg[i].iov_len); cpu_physical_memory_unmap(elem->in_sg[i].iov_base, elem->in_sg[i].iov_len, 1, size); offset += size; } for (i = 0; i < elem->out_num; i++) cpu_physical_memory_unmap(elem->out_sg[i].iov_base, elem->out_sg[i].iov_len, 0, elem->out_sg[i].iov_len); }"} {"target": 0, "idx": 17246, "func": "static int vmdk_reopen_prepare(BDRVReopenState *state, BlockReopenQueue *queue, Error **errp) { BDRVVmdkState *s; int ret = -1; int i; VmdkExtent *e; assert(state != NULL); assert(state->bs != NULL); if (queue == NULL) { error_setg(errp, \"No reopen queue for VMDK extents\"); goto exit; } s = state->bs->opaque; assert(s != NULL); for (i = 0; i < s->num_extents; i++) { e = &s->extents[i]; if (e->file != state->bs->file) { bdrv_reopen_queue(queue, e->file, state->flags); } } ret = 0; exit: return ret; }"} {"target": 0, "idx": 17258, "func": "static inline void tcg_out_goto_label(TCGContext *s, int label_index) { TCGLabel *l = &s->labels[label_index]; if (!l->has_value) { tcg_out_reloc(s, s->code_ptr, R_AARCH64_JUMP26, label_index, 0); tcg_out_goto_noaddr(s); } else { tcg_out_goto(s, l->u.value_ptr); } }"} {"target": 0, "idx": 17266, "func": "static void ac97_map (PCIDevice *pci_dev, int region_num, uint32_t addr, uint32_t size, int type) { PCIAC97LinkState *d = (PCIAC97LinkState *) pci_dev; AC97LinkState *s = &d->ac97; if (!region_num) { s->base[0] = addr; register_ioport_read (addr, 256 * 1, 1, nam_readb, d); register_ioport_read (addr, 256 * 2, 2, nam_readw, d); register_ioport_read (addr, 256 * 4, 4, nam_readl, d); register_ioport_write (addr, 256 * 1, 1, nam_writeb, d); register_ioport_write (addr, 256 * 2, 2, nam_writew, d); register_ioport_write (addr, 256 * 4, 4, nam_writel, d); } else { s->base[1] = addr; register_ioport_read (addr, 64 * 1, 1, nabm_readb, d); register_ioport_read (addr, 64 * 2, 2, nabm_readw, d); register_ioport_read (addr, 64 * 4, 4, nabm_readl, d); register_ioport_write (addr, 64 * 1, 1, nabm_writeb, d); register_ioport_write (addr, 64 * 2, 2, nabm_writew, d); register_ioport_write (addr, 64 * 4, 4, nabm_writel, d); } }"} {"target": 0, "idx": 17267, "func": "void vmstate_unregister(DeviceState *dev, const VMStateDescription *vmsd, void *opaque) { SaveStateEntry *se, *new_se; QTAILQ_FOREACH_SAFE(se, &savevm_state.handlers, entry, new_se) { if (se->vmsd == vmsd && se->opaque == opaque) { QTAILQ_REMOVE(&savevm_state.handlers, se, entry); if (se->compat) { g_free(se->compat); } g_free(se); } } }"} {"target": 0, "idx": 17274, "func": "static int xenfb_send_motion(struct XenInput *xenfb, int rel_x, int rel_y, int rel_z) { union xenkbd_in_event event; memset(&event, 0, XENKBD_IN_EVENT_SIZE); event.type = XENKBD_TYPE_MOTION; event.motion.rel_x = rel_x; event.motion.rel_y = rel_y; #if __XEN_LATEST_INTERFACE_VERSION__ >= 0x00030207 event.motion.rel_z = rel_z; #endif return xenfb_kbd_event(xenfb, &event); }"} {"target": 1, "idx": 17280, "func": "static int read_header(AVFormatContext *s, AVFormatParameters *ap) { JVDemuxContext *jv = s->priv_data; AVIOContext *pb = s->pb; AVStream *vst, *ast; int64_t audio_pts = 0; int64_t offset; int i; avio_skip(pb, 80); ast = av_new_stream(s, 0); vst = av_new_stream(s, 1); if (!ast || !vst) return AVERROR(ENOMEM); vst->codec->codec_type = CODEC_TYPE_VIDEO; vst->codec->codec_id = CODEC_ID_JV; vst->codec->codec_tag = 0; /* no fourcc */ vst->codec->width = avio_rl16(pb); vst->codec->height = avio_rl16(pb); vst->nb_frames = ast->nb_index_entries = avio_rl16(pb); av_set_pts_info(vst, 64, avio_rl16(pb), 1000); avio_skip(pb, 4); ast->codec->codec_type = CODEC_TYPE_AUDIO; ast->codec->codec_id = CODEC_ID_PCM_U8; ast->codec->codec_tag = 0; /* no fourcc */ ast->codec->sample_rate = avio_rl16(pb); ast->codec->channels = 1; av_set_pts_info(ast, 64, 1, ast->codec->sample_rate); avio_skip(pb, 10); ast->index_entries = av_malloc(ast->nb_index_entries * sizeof(*ast->index_entries)); if (!ast->index_entries) return AVERROR(ENOMEM); jv->frames = av_malloc(ast->nb_index_entries * sizeof(JVFrame)); if (!jv->frames) return AVERROR(ENOMEM); offset = 0x68 + ast->nb_index_entries * 16; for(i = 0; i < ast->nb_index_entries; i++) { AVIndexEntry *e = ast->index_entries + i; JVFrame *jvf = jv->frames + i; /* total frame size including audio, video, palette data and padding */ e->size = avio_rl32(pb); e->timestamp = i; e->pos = offset; offset += e->size; jvf->audio_size = avio_rl32(pb); jvf->video_size = avio_rl32(pb); jvf->palette_size = avio_r8(pb) ? 768 : 0; if (avio_r8(pb)) av_log(s, AV_LOG_WARNING, \"unsupported audio codec\\n\"); jvf->video_type = avio_r8(pb); avio_skip(pb, 1); e->timestamp = jvf->audio_size ? audio_pts : AV_NOPTS_VALUE; audio_pts += jvf->audio_size; e->flags = jvf->video_type != 1 ? AVINDEX_KEYFRAME : 0; } jv->state = JV_AUDIO; return 0; }"} {"target": 0, "idx": 17288, "func": "static int mov_write_colr_tag(AVIOContext *pb, MOVTrack *track) { // Ref (MOV): https://developer.apple.com/library/mac/technotes/tn2162/_index.html#//apple_ref/doc/uid/DTS40013070-CH1-TNTAG9 // Ref (MP4): ISO/IEC 14496-12:2012 if (track->enc->color_primaries == AVCOL_PRI_UNSPECIFIED && track->enc->color_trc == AVCOL_TRC_UNSPECIFIED && track->enc->colorspace == AVCOL_SPC_UNSPECIFIED) { if ((track->enc->width >= 1920 && track->enc->height >= 1080) || (track->enc->width == 1280 && track->enc->height == 720)) { av_log(NULL, AV_LOG_WARNING, \"color primaries unspecified, assuming bt709\\n\"); track->enc->color_primaries = AVCOL_PRI_BT709; } else if (track->enc->width == 720 && track->height == 576) { av_log(NULL, AV_LOG_WARNING, \"color primaries unspecified, assuming bt470bg\\n\"); track->enc->color_primaries = AVCOL_PRI_BT470BG; } else if (track->enc->width == 720 && (track->height == 486 || track->height == 480)) { av_log(NULL, AV_LOG_WARNING, \"color primaries unspecified, assuming smpte170\\n\"); track->enc->color_primaries = AVCOL_PRI_SMPTE170M; } else { av_log(NULL, AV_LOG_WARNING, \"color primaries unspecified, unable to assume anything\\n\"); } switch (track->enc->color_primaries) { case AVCOL_PRI_BT709: track->enc->color_trc = AVCOL_TRC_BT709; track->enc->colorspace = AVCOL_SPC_BT709; break; case AVCOL_PRI_SMPTE170M: case AVCOL_PRI_BT470BG: track->enc->color_trc = AVCOL_TRC_BT709; track->enc->colorspace = AVCOL_SPC_SMPTE170M; break; } } /* We should only ever be called by MOV or MP4. */ av_assert0(track->mode == MODE_MOV || track->mode == MODE_MP4); avio_wb32(pb, 18 + (track->mode == MODE_MP4)); ffio_wfourcc(pb, \"colr\"); if (track->mode == MODE_MP4) ffio_wfourcc(pb, \"nclx\"); else ffio_wfourcc(pb, \"nclc\"); switch (track->enc->color_primaries) { case AVCOL_PRI_BT709: avio_wb16(pb, 1); break; case AVCOL_PRI_SMPTE170M: case AVCOL_PRI_SMPTE240M: avio_wb16(pb, 6); break; case AVCOL_PRI_BT470BG: avio_wb16(pb, 5); break; default: avio_wb16(pb, 2); } switch (track->enc->color_trc) { case AVCOL_TRC_BT709: avio_wb16(pb, 1); break; case AVCOL_TRC_SMPTE170M: avio_wb16(pb, 1); break; // remapped case AVCOL_TRC_SMPTE240M: avio_wb16(pb, 7); break; default: avio_wb16(pb, 2); } switch (track->enc->colorspace) { case AVCOL_TRC_BT709: avio_wb16(pb, 1); break; case AVCOL_SPC_BT470BG: case AVCOL_PRI_SMPTE170M: avio_wb16(pb, 6); break; case AVCOL_PRI_SMPTE240M: avio_wb16(pb, 7); break; default: avio_wb16(pb, 2); } if (track->mode == MODE_MP4) { int full_range = track->enc->color_range == AVCOL_RANGE_JPEG; avio_w8(pb, full_range << 7); return 19; } else { return 18; } }"} {"target": 0, "idx": 17301, "func": "static int slirp_socket_load(QEMUFile *f, struct socket *so) { if (tcp_attach(so) < 0) return -ENOMEM; so->so_urgc = qemu_get_be32(f); so->so_ffamily = qemu_get_be16(f); switch (so->so_ffamily) { case AF_INET: so->so_faddr.s_addr = qemu_get_be32(f); so->so_fport = qemu_get_be16(f); break; default: error_report( \"so_ffamily unknown, unable to restore so_faddr and so_lport\\n\"); } so->so_lfamily = qemu_get_be16(f); switch (so->so_lfamily) { case AF_INET: so->so_laddr.s_addr = qemu_get_be32(f); so->so_lport = qemu_get_be16(f); break; default: error_report( \"so_ffamily unknown, unable to restore so_laddr and so_lport\\n\"); } so->so_iptos = qemu_get_byte(f); so->so_emu = qemu_get_byte(f); so->so_type = qemu_get_byte(f); so->so_state = qemu_get_be32(f); if (slirp_sbuf_load(f, &so->so_rcv) < 0) return -ENOMEM; if (slirp_sbuf_load(f, &so->so_snd) < 0) return -ENOMEM; slirp_tcp_load(f, so->so_tcpcb); return 0; }"} {"target": 0, "idx": 17307, "func": "static int qdev_print_devinfo(DeviceInfo *info, char *dest, int len) { int pos = 0; pos += snprintf(dest+pos, len-pos, \"name \\\"%s\\\", bus %s\", info->name, info->bus_info->name); if (info->alias) pos += snprintf(dest+pos, len-pos, \", alias \\\"%s\\\"\", info->alias); if (info->desc) pos += snprintf(dest+pos, len-pos, \", desc \\\"%s\\\"\", info->desc); if (info->no_user) pos += snprintf(dest+pos, len-pos, \", no-user\"); return pos; }"} {"target": 0, "idx": 17308, "func": "static int get_last_needed_nal(H264Context *h, const uint8_t *buf, int buf_size) { int next_avc = h->is_avc ? 0 : buf_size; int nal_index = 0; int buf_index = 0; int nals_needed = 0; while(1) { int nalsize = 0; int dst_length, bit_length, consumed; const uint8_t *ptr; if (buf_index >= next_avc) { nalsize = get_avc_nalsize(h, buf, buf_size, &buf_index); if (nalsize < 0) break; next_avc = buf_index + nalsize; } else { buf_index = find_start_code(buf, buf_size, buf_index, next_avc); if (buf_index >= buf_size) break; } ptr = ff_h264_decode_nal(h, buf + buf_index, &dst_length, &consumed, next_avc - buf_index); if (ptr == NULL || dst_length < 0) return AVERROR_INVALIDDATA; buf_index += consumed; bit_length = get_bit_length(h, buf, ptr, dst_length, buf_index, next_avc); nal_index++; /* packets can sometimes contain multiple PPS/SPS, * e.g. two PAFF field pictures in one packet, or a demuxer * which splits NALs strangely if so, when frame threading we * can't start the next thread until we've read all of them */ switch (h->nal_unit_type) { case NAL_SPS: case NAL_PPS: nals_needed = nal_index; break; case NAL_DPA: case NAL_IDR_SLICE: case NAL_SLICE: init_get_bits(&h->gb, ptr, bit_length); if (!get_ue_golomb(&h->gb)) nals_needed = nal_index; } } return nals_needed; }"} {"target": 0, "idx": 17314, "func": "e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt, const E1000E_RxRing *rxr, const E1000E_RSSInfo *rss_info) { PCIDevice *d = core->owner; dma_addr_t base; uint8_t desc[E1000_MAX_RX_DESC_LEN]; size_t desc_size; size_t desc_offset = 0; size_t iov_ofs = 0; struct iovec *iov = net_rx_pkt_get_iovec(pkt); size_t size = net_rx_pkt_get_total_len(pkt); size_t total_size = size + e1000x_fcs_len(core->mac); const E1000E_RingInfo *rxi; size_t ps_hdr_len = 0; bool do_ps = e1000e_do_ps(core, pkt, &ps_hdr_len); rxi = rxr->i; do { hwaddr ba[MAX_PS_BUFFERS]; e1000e_ba_state bastate = { { 0 } }; bool is_last = false; bool is_first = true; desc_size = total_size - desc_offset; if (desc_size > core->rx_desc_buf_size) { desc_size = core->rx_desc_buf_size; } base = e1000e_ring_head_descr(core, rxi); pci_dma_read(d, base, &desc, core->rx_desc_len); trace_e1000e_rx_descr(rxi->idx, base, core->rx_desc_len); e1000e_read_rx_descr(core, desc, &ba); if (ba[0]) { if (desc_offset < size) { static const uint32_t fcs_pad; size_t iov_copy; size_t copy_size = size - desc_offset; if (copy_size > core->rx_desc_buf_size) { copy_size = core->rx_desc_buf_size; } /* For PS mode copy the packet header first */ if (do_ps) { if (is_first) { size_t ps_hdr_copied = 0; do { iov_copy = MIN(ps_hdr_len - ps_hdr_copied, iov->iov_len - iov_ofs); e1000e_write_hdr_to_rx_buffers(core, &ba, &bastate, iov->iov_base, iov_copy); copy_size -= iov_copy; ps_hdr_copied += iov_copy; iov_ofs += iov_copy; if (iov_ofs == iov->iov_len) { iov++; iov_ofs = 0; } } while (ps_hdr_copied < ps_hdr_len); is_first = false; } else { /* Leave buffer 0 of each descriptor except first */ /* empty as per spec 7.1.5.1 */ e1000e_write_hdr_to_rx_buffers(core, &ba, &bastate, NULL, 0); } } /* Copy packet payload */ while (copy_size) { iov_copy = MIN(copy_size, iov->iov_len - iov_ofs); e1000e_write_to_rx_buffers(core, &ba, &bastate, iov->iov_base + iov_ofs, iov_copy); copy_size -= iov_copy; iov_ofs += iov_copy; if (iov_ofs == iov->iov_len) { iov++; iov_ofs = 0; } } if (desc_offset + desc_size >= total_size) { /* Simulate FCS checksum presence in the last descriptor */ e1000e_write_to_rx_buffers(core, &ba, &bastate, (const char *) &fcs_pad, e1000x_fcs_len(core->mac)); } } desc_offset += desc_size; if (desc_offset >= total_size) { is_last = true; } } else { /* as per intel docs; skip descriptors with null buf addr */ trace_e1000e_rx_null_descriptor(); } e1000e_write_rx_descr(core, desc, is_last ? core->rx_pkt : NULL, rss_info, do_ps ? ps_hdr_len : 0, &bastate.written); pci_dma_write(d, base, &desc, core->rx_desc_len); e1000e_ring_advance(core, rxi, core->rx_desc_len / E1000_MIN_RX_DESC_LEN); } while (desc_offset < total_size); e1000e_update_rx_stats(core, size, total_size); }"} {"target": 0, "idx": 17325, "func": "static int cirrus_do_copy(CirrusVGAState *s, int dst, int src, int w, int h) { int sx = 0, sy = 0; int dx = 0, dy = 0; int depth = 0; int notify = 0; /* make sure to only copy if it's a plain copy ROP */ if (*s->cirrus_rop == cirrus_bitblt_rop_fwd_src || *s->cirrus_rop == cirrus_bitblt_rop_bkwd_src) { int width, height; depth = s->vga.get_bpp(&s->vga) / 8; if (!depth) { return 0; } s->vga.get_resolution(&s->vga, &width, &height); /* extra x, y */ sx = (src % ABS(s->cirrus_blt_srcpitch)) / depth; sy = (src / ABS(s->cirrus_blt_srcpitch)); dx = (dst % ABS(s->cirrus_blt_dstpitch)) / depth; dy = (dst / ABS(s->cirrus_blt_dstpitch)); /* normalize width */ w /= depth; /* if we're doing a backward copy, we have to adjust our x/y to be the upper left corner (instead of the lower right corner) */ if (s->cirrus_blt_dstpitch < 0) { sx -= (s->cirrus_blt_width / depth) - 1; dx -= (s->cirrus_blt_width / depth) - 1; sy -= s->cirrus_blt_height - 1; dy -= s->cirrus_blt_height - 1; } /* are we in the visible portion of memory? */ if (sx >= 0 && sy >= 0 && dx >= 0 && dy >= 0 && (sx + w) <= width && (sy + h) <= height && (dx + w) <= width && (dy + h) <= height) { notify = 1; } } (*s->cirrus_rop) (s, s->vga.vram_ptr + s->cirrus_blt_dstaddr, s->vga.vram_ptr + s->cirrus_blt_srcaddr, s->cirrus_blt_dstpitch, s->cirrus_blt_srcpitch, s->cirrus_blt_width, s->cirrus_blt_height); if (notify) { dpy_gfx_update(s->vga.con, dx, dy, s->cirrus_blt_width / depth, s->cirrus_blt_height); } /* we don't have to notify the display that this portion has changed since qemu_console_copy implies this */ cirrus_invalidate_region(s, s->cirrus_blt_dstaddr, s->cirrus_blt_dstpitch, s->cirrus_blt_width, s->cirrus_blt_height); return 1; }"} {"target": 1, "idx": 17328, "func": "static int write_l1_entry(BlockDriverState *bs, int l1_index) { BDRVQcowState *s = bs->opaque; uint64_t buf[L1_ENTRIES_PER_SECTOR]; int l1_start_index; int i, ret; l1_start_index = l1_index & ~(L1_ENTRIES_PER_SECTOR - 1); for (i = 0; i < L1_ENTRIES_PER_SECTOR; i++) { buf[i] = cpu_to_be64(s->l1_table[l1_start_index + i]); } BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE); ret = bdrv_pwrite(bs->file, s->l1_table_offset + 8 * l1_start_index, buf, sizeof(buf)); if (ret < 0) { return ret; } return 0; }"} {"target": 1, "idx": 17329, "func": "int vhost_dev_init(struct vhost_dev *hdev, void *opaque, VhostBackendType backend_type) { uint64_t features; int i, r; if (vhost_set_backend_type(hdev, backend_type) < 0) { close((uintptr_t)opaque); return -1; } if (hdev->vhost_ops->vhost_backend_init(hdev, opaque) < 0) { close((uintptr_t)opaque); return -errno; } r = hdev->vhost_ops->vhost_call(hdev, VHOST_SET_OWNER, NULL); if (r < 0) { goto fail; } r = hdev->vhost_ops->vhost_call(hdev, VHOST_GET_FEATURES, &features); if (r < 0) { goto fail; } for (i = 0; i < hdev->nvqs; ++i) { r = vhost_virtqueue_init(hdev, hdev->vqs + i, hdev->vq_index + i); if (r < 0) { goto fail_vq; } } hdev->features = features; hdev->memory_listener = (MemoryListener) { .begin = vhost_begin, .commit = vhost_commit, .region_add = vhost_region_add, .region_del = vhost_region_del, .region_nop = vhost_region_nop, .log_start = vhost_log_start, .log_stop = vhost_log_stop, .log_sync = vhost_log_sync, .log_global_start = vhost_log_global_start, .log_global_stop = vhost_log_global_stop, .eventfd_add = vhost_eventfd_add, .eventfd_del = vhost_eventfd_del, .priority = 10 }; hdev->migration_blocker = NULL; if (!(hdev->features & (0x1ULL << VHOST_F_LOG_ALL))) { error_setg(&hdev->migration_blocker, \"Migration disabled: vhost lacks VHOST_F_LOG_ALL feature.\"); migrate_add_blocker(hdev->migration_blocker); } hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions)); hdev->n_mem_sections = 0; hdev->mem_sections = NULL; hdev->log = NULL; hdev->log_size = 0; hdev->log_enabled = false; hdev->started = false; hdev->memory_changed = false; memory_listener_register(&hdev->memory_listener, &address_space_memory); return 0; fail_vq: while (--i >= 0) { vhost_virtqueue_cleanup(hdev->vqs + i); } fail: r = -errno; hdev->vhost_ops->vhost_backend_cleanup(hdev); QLIST_REMOVE(hdev, entry); return r; }"} {"target": 1, "idx": 17337, "func": "int bdrv_read(BlockDriverState *bs, int64_t sector_num, uint8_t *buf, int nb_sectors) { BlockDriver *drv = bs->drv; if (!drv) return -ENOMEDIUM; if (drv->bdrv_pread) { int ret, len; len = nb_sectors * 512; ret = drv->bdrv_pread(bs, sector_num * 512, buf, len); if (ret < 0) return ret; else if (ret != len) return -EINVAL; else { bs->rd_bytes += (unsigned) len; bs->rd_ops ++; return 0; } } else { return drv->bdrv_read(bs, sector_num, buf, nb_sectors); } }"} {"target": 1, "idx": 17341, "func": "static int make_cdt24_entry(int p1, int p2, int16_t *cdt) { int r, b; b = cdt[p2]; r = cdt[p1]<<16; return (b+r) << 1; }"} {"target": 1, "idx": 17350, "func": "static AVFrame *do_psnr(AVFilterContext *ctx, AVFrame *main, const AVFrame *ref) { PSNRContext *s = ctx->priv; double comp_mse[4], mse = 0; int j, c; AVDictionary **metadata = avpriv_frame_get_metadatap(main); s->compute_mse(s, (const uint8_t **)main->data, main->linesize, (const uint8_t **)ref->data, ref->linesize, main->width, main->height, comp_mse); for (j = 0; j < s->nb_components; j++) mse += comp_mse[j] * s->planeweight[j]; s->min_mse = FFMIN(s->min_mse, mse); s->max_mse = FFMAX(s->max_mse, mse); s->mse += mse; for (j = 0; j < s->nb_components; j++) s->mse_comp[j] += comp_mse[j]; s->nb_frames++; for (j = 0; j < s->nb_components; j++) { c = s->is_rgb ? s->rgba_map[j] : j; set_meta(metadata, \"lavfi.psnr.mse.\", s->comps[j], comp_mse[c]); set_meta(metadata, \"lavfi.psnr.psnr.\", s->comps[j], get_psnr(comp_mse[c], 1, s->max[c])); } set_meta(metadata, \"lavfi.psnr.mse_avg\", 0, mse); set_meta(metadata, \"lavfi.psnr.psnr_avg\", 0, get_psnr(mse, 1, s->average_max)); if (s->stats_file) { fprintf(s->stats_file, \"n:%\"PRId64\" mse_avg:%0.2f \", s->nb_frames, mse); for (j = 0; j < s->nb_components; j++) { c = s->is_rgb ? s->rgba_map[j] : j; fprintf(s->stats_file, \"mse_%c:%0.2f \", s->comps[j], comp_mse[c]); } for (j = 0; j < s->nb_components; j++) { c = s->is_rgb ? s->rgba_map[j] : j; fprintf(s->stats_file, \"psnr_%c:%0.2f \", s->comps[j], get_psnr(comp_mse[c], 1, s->max[c])); } fprintf(s->stats_file, \"\\n\"); } return main; }"} {"target": 1, "idx": 17353, "func": "static void matroska_add_index_entries(MatroskaDemuxContext *matroska) { EbmlList *index_list; MatroskaIndex *index; int index_scale = 1; int i, j; if (matroska->ctx->flags & AVFMT_FLAG_IGNIDX) return; index_list = &matroska->index; index = index_list->elem; if (index_list->nb_elem && index[0].time > 1E14 / matroska->time_scale) { av_log(matroska->ctx, AV_LOG_WARNING, \"Working around broken index.\\n\"); index_scale = matroska->time_scale; } for (i = 0; i < index_list->nb_elem; i++) { EbmlList *pos_list = &index[i].pos; MatroskaIndexPos *pos = pos_list->elem; for (j = 0; j < pos_list->nb_elem; j++) { MatroskaTrack *track = matroska_find_track_by_num(matroska, pos[j].track); if (track && track->stream) av_add_index_entry(track->stream, pos[j].pos + matroska->segment_start, index[i].time / index_scale, 0, 0, AVINDEX_KEYFRAME); } } }"} {"target": 0, "idx": 17357, "func": "static inline void downmix_3f_2r_to_mono(float *samples) { int i; for (i = 0; i < 256; i++) { samples[i] += (samples[i + 256] + samples[i + 512] + samples[i + 768] + samples[i + 1024]); samples[i + 256] = samples[i + 512] = samples[i + 768] = samples[i + 1024] = 0; } }"} {"target": 1, "idx": 17362, "func": "static bool vtd_decide_config(IntelIOMMUState *s, Error **errp) { X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(s); /* Currently Intel IOMMU IR only support \"kernel-irqchip={off|split}\" */ if (x86_iommu->intr_supported && kvm_irqchip_in_kernel() && !kvm_irqchip_is_split()) { error_setg(errp, \"Intel Interrupt Remapping cannot work with \" \"kernel-irqchip=on, please use 'split|off'.\"); return false; } if (s->intr_eim == ON_OFF_AUTO_ON && !x86_iommu->intr_supported) { error_setg(errp, \"eim=on cannot be selected without intremap=on\"); return false; } if (s->intr_eim == ON_OFF_AUTO_AUTO) { s->intr_eim = x86_iommu->intr_supported ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF; } return true; }"} {"target": 1, "idx": 17370, "func": "static int no_init_in (HWVoiceIn *hw, struct audsettings *as) { audio_pcm_init_info (&hw->info, as); hw->samples = 1024; return 0; }"} {"target": 1, "idx": 17381, "func": "static int nsv_read_chunk(AVFormatContext *s, int fill_header) { NSVContext *nsv = s->priv_data; AVIOContext *pb = s->pb; AVStream *st[2] = {NULL, NULL}; NSVStream *nst; AVPacket *pkt; int i, err = 0; uint8_t auxcount; /* number of aux metadata, also 4 bits of vsize */ uint32_t vsize; uint16_t asize; uint16_t auxsize; if (nsv->ahead[0].data || nsv->ahead[1].data) return 0; //-1; /* hey! eat what you've in your plate first! */ null_chunk_retry: if (pb->eof_reached) return -1; for (i = 0; i < NSV_MAX_RESYNC_TRIES && nsv->state < NSV_FOUND_NSVS && !err; i++) err = nsv_resync(s); if (err < 0) return err; if (nsv->state == NSV_FOUND_NSVS) err = nsv_parse_NSVs_header(s); if (err < 0) return err; if (nsv->state != NSV_HAS_READ_NSVS && nsv->state != NSV_FOUND_BEEF) return -1; auxcount = avio_r8(pb); vsize = avio_rl16(pb); asize = avio_rl16(pb); vsize = (vsize << 4) | (auxcount >> 4); auxcount &= 0x0f; av_log(s, AV_LOG_TRACE, \"NSV CHUNK %\"PRIu8\" aux, %\"PRIu32\" bytes video, %\"PRIu16\" bytes audio\\n\", auxcount, vsize, asize); /* skip aux stuff */ for (i = 0; i < auxcount; i++) { uint32_t av_unused auxtag; auxsize = avio_rl16(pb); auxtag = avio_rl32(pb); avio_skip(pb, auxsize); vsize -= auxsize + sizeof(uint16_t) + sizeof(uint32_t); /* that's becoming brain-dead */ } if (pb->eof_reached) return -1; if (!vsize && !asize) { nsv->state = NSV_UNSYNC; goto null_chunk_retry; } /* map back streams to v,a */ if (s->nb_streams > 0) st[s->streams[0]->id] = s->streams[0]; if (s->nb_streams > 1) st[s->streams[1]->id] = s->streams[1]; if (vsize && st[NSV_ST_VIDEO]) { nst = st[NSV_ST_VIDEO]->priv_data; pkt = &nsv->ahead[NSV_ST_VIDEO]; av_get_packet(pb, pkt, vsize); pkt->stream_index = st[NSV_ST_VIDEO]->index;//NSV_ST_VIDEO; pkt->dts = nst->frame_offset; pkt->flags |= nsv->state == NSV_HAS_READ_NSVS ? AV_PKT_FLAG_KEY : 0; /* keyframe only likely on a sync frame */ for (i = 0; i < FFMIN(8, vsize); i++) av_log(s, AV_LOG_TRACE, \"NSV video: [%d] = %02\"PRIx8\"\\n\", i, pkt->data[i]); } if(st[NSV_ST_VIDEO]) ((NSVStream*)st[NSV_ST_VIDEO]->priv_data)->frame_offset++; if (asize && st[NSV_ST_AUDIO]) { nst = st[NSV_ST_AUDIO]->priv_data; pkt = &nsv->ahead[NSV_ST_AUDIO]; /* read raw audio specific header on the first audio chunk... */ /* on ALL audio chunks ?? seems so! */ if (asize && st[NSV_ST_AUDIO]->codecpar->codec_tag == MKTAG('P', 'C', 'M', ' ')/* && fill_header*/) { uint8_t bps; uint8_t channels; uint16_t samplerate; bps = avio_r8(pb); channels = avio_r8(pb); samplerate = avio_rl16(pb); if (!channels || !samplerate) return AVERROR_INVALIDDATA; asize-=4; av_log(s, AV_LOG_TRACE, \"NSV RAWAUDIO: bps %\"PRIu8\", nchan %\"PRIu8\", srate %\"PRIu16\"\\n\", bps, channels, samplerate); if (fill_header) { st[NSV_ST_AUDIO]->need_parsing = AVSTREAM_PARSE_NONE; /* we know everything */ if (bps != 16) { av_log(s, AV_LOG_TRACE, \"NSV AUDIO bit/sample != 16 (%\"PRIu8\")!!!\\n\", bps); } bps /= channels; // ??? if (bps == 8) st[NSV_ST_AUDIO]->codecpar->codec_id = AV_CODEC_ID_PCM_U8; samplerate /= 4;/* UGH ??? XXX */ channels = 1; st[NSV_ST_AUDIO]->codecpar->channels = channels; st[NSV_ST_AUDIO]->codecpar->sample_rate = samplerate; av_log(s, AV_LOG_TRACE, \"NSV RAWAUDIO: bps %\"PRIu8\", nchan %\"PRIu8\", srate %\"PRIu16\"\\n\", bps, channels, samplerate); } } av_get_packet(pb, pkt, asize); pkt->stream_index = st[NSV_ST_AUDIO]->index;//NSV_ST_AUDIO; pkt->flags |= nsv->state == NSV_HAS_READ_NSVS ? AV_PKT_FLAG_KEY : 0; /* keyframe only likely on a sync frame */ if( nsv->state == NSV_HAS_READ_NSVS && st[NSV_ST_VIDEO] ) { /* on a nsvs frame we have new information on a/v sync */ pkt->dts = (((NSVStream*)st[NSV_ST_VIDEO]->priv_data)->frame_offset-1); pkt->dts *= (int64_t)1000 * nsv->framerate.den; pkt->dts += (int64_t)nsv->avsync * nsv->framerate.num; av_log(s, AV_LOG_TRACE, \"NSV AUDIO: sync:%\"PRId16\", dts:%\"PRId64, nsv->avsync, pkt->dts); } nst->frame_offset++; } nsv->state = NSV_UNSYNC; return 0; }"} {"target": 1, "idx": 17411, "func": "static int block_save_complete(QEMUFile *f, void *opaque) { int ret; DPRINTF(\"Enter save live complete submitted %d transferred %d\\n\", block_mig_state.submitted, block_mig_state.transferred); ret = flush_blks(f); if (ret) { return ret; } blk_mig_reset_dirty_cursor(); /* we know for sure that save bulk is completed and all async read completed */ blk_mig_lock(); assert(block_mig_state.submitted == 0); blk_mig_unlock(); do { ret = blk_mig_save_dirty_block(f, 0); if (ret < 0) { return ret; } } while (ret == 0); /* report completion */ qemu_put_be64(f, (100 << BDRV_SECTOR_BITS) | BLK_MIG_FLAG_PROGRESS); DPRINTF(\"Block migration completed\\n\"); qemu_put_be64(f, BLK_MIG_FLAG_EOS); blk_mig_cleanup(); return 0; }"} {"target": 1, "idx": 17412, "func": "matroska_parse_block(MatroskaDemuxContext *matroska, uint8_t *data, int size, int64_t pos, uint64_t cluster_time, uint64_t duration, int is_keyframe, int is_bframe) { int res = 0; int track; AVStream *st; AVPacket *pkt; uint8_t *origdata = data; int16_t block_time; uint32_t *lace_size = NULL; int n, flags, laces = 0; uint64_t num; int stream_index; /* first byte(s): tracknum */ if ((n = matroska_ebmlnum_uint(data, size, &num)) < 0) { av_log(matroska->ctx, AV_LOG_ERROR, \"EBML block data error\\n\"); av_free(origdata); return res; } data += n; size -= n; /* fetch track from num */ track = matroska_find_track_by_num(matroska, num); if (size <= 3 || track < 0 || track >= matroska->num_tracks) { av_log(matroska->ctx, AV_LOG_INFO, \"Invalid stream %d or size %u\\n\", track, size); av_free(origdata); return res; } stream_index = matroska->tracks[track]->stream_index; if (stream_index < 0 || stream_index >= matroska->ctx->nb_streams) { av_free(origdata); return res; } st = matroska->ctx->streams[stream_index]; if (st->discard >= AVDISCARD_ALL) { av_free(origdata); return res; } if (duration == AV_NOPTS_VALUE) duration = matroska->tracks[track]->default_duration / matroska->time_scale; /* block_time (relative to cluster time) */ block_time = AV_RB16(data); data += 2; flags = *data++; size -= 3; if (is_keyframe == -1) is_keyframe = flags & 0x80 ? PKT_FLAG_KEY : 0; if (matroska->skip_to_keyframe) { if (!is_keyframe || st != matroska->skip_to_stream) { av_free(origdata); return res; } matroska->skip_to_keyframe = 0; } switch ((flags & 0x06) >> 1) { case 0x0: /* no lacing */ laces = 1; lace_size = av_mallocz(sizeof(int)); lace_size[0] = size; break; case 0x1: /* xiph lacing */ case 0x2: /* fixed-size lacing */ case 0x3: /* EBML lacing */ assert(size>0); // size <=3 is checked before size-=3 above laces = (*data) + 1; data += 1; size -= 1; lace_size = av_mallocz(laces * sizeof(int)); switch ((flags & 0x06) >> 1) { case 0x1: /* xiph lacing */ { uint8_t temp; uint32_t total = 0; for (n = 0; res == 0 && n < laces - 1; n++) { while (1) { if (size == 0) { res = -1; break; } temp = *data; lace_size[n] += temp; data += 1; size -= 1; if (temp != 0xff) break; } total += lace_size[n]; } lace_size[n] = size - total; break; } case 0x2: /* fixed-size lacing */ for (n = 0; n < laces; n++) lace_size[n] = size / laces; break; case 0x3: /* EBML lacing */ { uint32_t total; n = matroska_ebmlnum_uint(data, size, &num); if (n < 0) { av_log(matroska->ctx, AV_LOG_INFO, \"EBML block data error\\n\"); break; } data += n; size -= n; total = lace_size[0] = num; for (n = 1; res == 0 && n < laces - 1; n++) { int64_t snum; int r; r = matroska_ebmlnum_sint (data, size, &snum); if (r < 0) { av_log(matroska->ctx, AV_LOG_INFO, \"EBML block data error\\n\"); break; } data += r; size -= r; lace_size[n] = lace_size[n - 1] + snum; total += lace_size[n]; } lace_size[n] = size - total; break; } } break; } if (res == 0) { uint64_t timecode = AV_NOPTS_VALUE; if (cluster_time != (uint64_t)-1 && (block_time >= 0 || cluster_time >= -block_time)) timecode = cluster_time + block_time; for (n = 0; n < laces; n++) { if (st->codec->codec_id == CODEC_ID_RA_288 || st->codec->codec_id == CODEC_ID_COOK || st->codec->codec_id == CODEC_ID_ATRAC3) { MatroskaAudioTrack *audiotrack = (MatroskaAudioTrack *)matroska->tracks[track]; int a = st->codec->block_align; int sps = audiotrack->sub_packet_size; int cfs = audiotrack->coded_framesize; int h = audiotrack->sub_packet_h; int y = audiotrack->sub_packet_cnt; int w = audiotrack->frame_size; int x; if (!audiotrack->pkt_cnt) { if (st->codec->codec_id == CODEC_ID_RA_288) for (x=0; xbuf+x*2*w+y*cfs, data+x*cfs, cfs); else for (x=0; xbuf+sps*(h*x+((h+1)/2)*(y&1)+(y>>1)), data+x*sps, sps); if (++audiotrack->sub_packet_cnt >= h) { audiotrack->sub_packet_cnt = 0; audiotrack->pkt_cnt = h*w / a; } } while (audiotrack->pkt_cnt) { pkt = av_mallocz(sizeof(AVPacket)); av_new_packet(pkt, a); memcpy(pkt->data, audiotrack->buf + a * (h*w / a - audiotrack->pkt_cnt--), a); pkt->pos = pos; pkt->stream_index = stream_index; matroska_queue_packet(matroska, pkt); } } else { int result, offset = 0, ilen, olen, pkt_size = lace_size[n]; uint8_t *pkt_data = data; if (matroska->tracks[track]->encoding_scope & 1) { switch (matroska->tracks[track]->encoding_algo) { case MATROSKA_TRACK_ENCODING_COMP_HEADERSTRIP: offset = matroska->tracks[track]->encoding_settings_len; break; case MATROSKA_TRACK_ENCODING_COMP_LZO: pkt_data = NULL; do { ilen = lace_size[n]; olen = pkt_size *= 3; pkt_data = av_realloc(pkt_data, pkt_size+LZO_OUTPUT_PADDING); result = lzo1x_decode(pkt_data, &olen, data, &ilen); } while (result==LZO_OUTPUT_FULL && pkt_size<10000000); if (result) { continue; } pkt_size -= olen; break; #ifdef CONFIG_ZLIB case MATROSKA_TRACK_ENCODING_COMP_ZLIB: { z_stream zstream = {0}; pkt_data = NULL; if (inflateInit(&zstream) != Z_OK) continue; zstream.next_in = data; zstream.avail_in = lace_size[n]; do { pkt_size *= 3; pkt_data = av_realloc(pkt_data, pkt_size); zstream.avail_out = pkt_size - zstream.total_out; zstream.next_out = pkt_data + zstream.total_out; result = inflate(&zstream, Z_NO_FLUSH); } while (result==Z_OK && pkt_size<10000000); pkt_size = zstream.total_out; inflateEnd(&zstream); if (result != Z_STREAM_END) { continue; } break; } #endif #ifdef CONFIG_BZLIB case MATROSKA_TRACK_ENCODING_COMP_BZLIB: { bz_stream bzstream = {0}; pkt_data = NULL; if (BZ2_bzDecompressInit(&bzstream, 0, 0) != BZ_OK) continue; bzstream.next_in = data; bzstream.avail_in = lace_size[n]; do { pkt_size *= 3; pkt_data = av_realloc(pkt_data, pkt_size); bzstream.avail_out = pkt_size - bzstream.total_out_lo32; bzstream.next_out = pkt_data + bzstream.total_out_lo32; result = BZ2_bzDecompress(&bzstream); } while (result==BZ_OK && pkt_size<10000000); pkt_size = bzstream.total_out_lo32; BZ2_bzDecompressEnd(&bzstream); if (result != BZ_STREAM_END) { continue; } break; } #endif } } pkt = av_mallocz(sizeof(AVPacket)); /* XXX: prevent data copy... */ if (av_new_packet(pkt, pkt_size+offset) < 0) { av_free(pkt); res = AVERROR(ENOMEM); n = laces-1; break; } if (offset) memcpy (pkt->data, matroska->tracks[track]->encoding_settings, offset); memcpy (pkt->data+offset, pkt_data, pkt_size); if (n == 0) pkt->flags = is_keyframe; pkt->stream_index = stream_index; pkt->pts = timecode; pkt->pos = pos; pkt->duration = duration; matroska_queue_packet(matroska, pkt); } if (timecode != AV_NOPTS_VALUE) timecode = duration ? timecode + duration : AV_NOPTS_VALUE; data += lace_size[n]; } } av_free(lace_size); av_free(origdata); return res; }"} {"target": 1, "idx": 17413, "func": "void ff_vp56_init_range_decoder(VP56RangeCoder *c, const uint8_t *buf, int buf_size) { c->high = 255; c->bits = -16; c->buffer = buf; c->end = buf + buf_size; c->code_word = bytestream_get_be24(&c->buffer); }"} {"target": 1, "idx": 17428, "func": "int vring_pop(VirtIODevice *vdev, Vring *vring, VirtQueueElement **p_elem) { struct vring_desc desc; unsigned int i, head, found = 0, num = vring->vr.num; uint16_t avail_idx, last_avail_idx; VirtQueueElement *elem = NULL; int ret; /* If there was a fatal error then refuse operation */ if (vring->broken) { ret = -EFAULT; goto out; } /* Check it isn't doing very strange things with descriptor numbers. */ last_avail_idx = vring->last_avail_idx; avail_idx = vring->vr.avail->idx; barrier(); /* load indices now and not again later */ if (unlikely((uint16_t)(avail_idx - last_avail_idx) > num)) { error_report(\"Guest moved used index from %u to %u\", last_avail_idx, avail_idx); ret = -EFAULT; goto out; } /* If there's nothing new since last we looked. */ if (avail_idx == last_avail_idx) { ret = -EAGAIN; goto out; } /* Only get avail ring entries after they have been exposed by guest. */ smp_rmb(); /* Grab the next descriptor number they're advertising, and increment * the index we've seen. */ head = vring->vr.avail->ring[last_avail_idx % num]; elem = g_slice_new(VirtQueueElement); elem->index = head; elem->in_num = elem->out_num = 0; /* If their number is silly, that's an error. */ if (unlikely(head >= num)) { error_report(\"Guest says index %u > %u is available\", head, num); ret = -EFAULT; goto out; } if (vdev->guest_features & (1 << VIRTIO_RING_F_EVENT_IDX)) { vring_avail_event(&vring->vr) = vring->vr.avail->idx; } i = head; do { if (unlikely(i >= num)) { error_report(\"Desc index is %u > %u, head = %u\", i, num, head); ret = -EFAULT; goto out; } if (unlikely(++found > num)) { error_report(\"Loop detected: last one at %u vq size %u head %u\", i, num, head); ret = -EFAULT; goto out; } desc = vring->vr.desc[i]; /* Ensure descriptor is loaded before accessing fields */ barrier(); if (desc.flags & VRING_DESC_F_INDIRECT) { ret = get_indirect(vring, elem, &desc); if (ret < 0) { goto out; } continue; } ret = get_desc(vring, elem, &desc); if (ret < 0) { goto out; } i = desc.next; } while (desc.flags & VRING_DESC_F_NEXT); /* On success, increment avail index. */ vring->last_avail_idx++; *p_elem = elem; return head; out: assert(ret < 0); if (ret == -EFAULT) { vring->broken = true; } if (elem) { vring_unmap_element(elem); g_slice_free(VirtQueueElement, elem); } *p_elem = NULL; return ret; }"} {"target": 0, "idx": 17439, "func": "static int ftp_connect_control_connection(URLContext *h) { char buf[CONTROL_BUFFER_SIZE], opts_format[20]; int err; AVDictionary *opts = NULL; FTPContext *s = h->priv_data; const int connect_codes[] = {220, 0}; s->conn_control_block_flag = 0; if (!s->conn_control) { ff_url_join(buf, sizeof(buf), \"tcp\", NULL, s->hostname, s->server_control_port, NULL); if (s->rw_timeout != -1) { snprintf(opts_format, sizeof(opts_format), \"%d\", s->rw_timeout); av_dict_set(&opts, \"timeout\", opts_format, 0); } /* if option is not given, don't pass it and let tcp use its own default */ err = ffurl_open(&s->conn_control, buf, AVIO_FLAG_READ_WRITE, &s->conn_control_interrupt_cb, &opts); av_dict_free(&opts); if (err < 0) { av_log(h, AV_LOG_ERROR, \"Cannot open control connection\\n\"); return err; } /* consume all messages from server */ if (!ftp_status(s, NULL, connect_codes)) { av_log(h, AV_LOG_ERROR, \"FTP server not ready for new users\\n\"); err = AVERROR(EACCES); return err; } if ((err = ftp_auth(s)) < 0) { av_log(h, AV_LOG_ERROR, \"FTP authentication failed\\n\"); return err; } if ((err = ftp_type(s)) < 0) { av_dlog(h, \"Set content type failed\\n\"); return err; } } return 0; }"} {"target": 1, "idx": 17449, "func": "void qpci_memread(QPCIDevice *dev, void *data, void *buf, size_t len) { uintptr_t addr = (uintptr_t)data; g_assert(addr >= QPCI_PIO_LIMIT); dev->bus->memread(dev->bus, addr, buf, len); }"} {"target": 0, "idx": 17452, "func": "static int is_intra_more_likely(ERContext *s) { int is_intra_likely, i, j, undamaged_count, skip_amount, mb_x, mb_y; if (!s->last_pic.f || !s->last_pic.f->data[0]) return 1; // no previous frame available -> use spatial prediction undamaged_count = 0; for (i = 0; i < s->mb_num; i++) { const int mb_xy = s->mb_index2xy[i]; const int error = s->error_status_table[mb_xy]; if (!((error & ER_DC_ERROR) && (error & ER_MV_ERROR))) undamaged_count++; } if (s->avctx->codec_id == AV_CODEC_ID_H264 && s->ref_count <= 0) return 1; if (undamaged_count < 5) return 0; // almost all MBs damaged -> use temporal prediction #if FF_API_XVMC FF_DISABLE_DEPRECATION_WARNINGS // prevent dsp.sad() check, that requires access to the image if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration && s->cur_pic.f->pict_type == AV_PICTURE_TYPE_I) return 1; FF_ENABLE_DEPRECATION_WARNINGS #endif /* FF_API_XVMC */ skip_amount = FFMAX(undamaged_count / 50, 1); // check only up to 50 MBs is_intra_likely = 0; j = 0; for (mb_y = 0; mb_y < s->mb_height - 1; mb_y++) { for (mb_x = 0; mb_x < s->mb_width; mb_x++) { int error; const int mb_xy = mb_x + mb_y * s->mb_stride; error = s->error_status_table[mb_xy]; if ((error & ER_DC_ERROR) && (error & ER_MV_ERROR)) continue; // skip damaged j++; // skip a few to speed things up if ((j % skip_amount) != 0) continue; if (s->cur_pic.f->pict_type == AV_PICTURE_TYPE_I) { int *linesize = s->cur_pic.f->linesize; uint8_t *mb_ptr = s->cur_pic.f->data[0] + mb_x * 16 + mb_y * 16 * linesize[0]; uint8_t *last_mb_ptr = s->last_pic.f->data[0] + mb_x * 16 + mb_y * 16 * linesize[0]; if (s->avctx->codec_id == AV_CODEC_ID_H264) { // FIXME } else { ff_thread_await_progress(s->last_pic.tf, mb_y, 0); } is_intra_likely += s->mecc->sad[0](NULL, last_mb_ptr, mb_ptr, linesize[0], 16); is_intra_likely -= s->mecc->sad[0](NULL, last_mb_ptr, last_mb_ptr + linesize[0] * 16, linesize[0], 16); } else { if (IS_INTRA(s->cur_pic.mb_type[mb_xy])) is_intra_likely++; else is_intra_likely--; } } } return is_intra_likely > 0; }"} {"target": 1, "idx": 17480, "func": "void Release(void *ctx) { ContextInfo *ci; ci = (ContextInfo *) ctx; if (ci->cache) { imlib_context_set_image(ci->cache->image); imlib_free_image(); av_free(ci->cache); } if (ctx) { if (ci->imageOverlaid) { imlib_context_set_image(ci->imageOverlaid); imlib_free_image(); } ff_eval_free(ci->expr_x); ff_eval_free(ci->expr_y); ff_eval_free(ci->expr_R); ff_eval_free(ci->expr_G); ff_eval_free(ci->expr_B); sws_freeContext(ci->toRGB_convert_ctx); sws_freeContext(ci->fromRGB_convert_ctx); av_free(ctx); } }"} {"target": 1, "idx": 17482, "func": "void qemu_aio_flush(void) { AioHandler *node; int ret; do { ret = 0; /* * If there are pending emulated aio start them now so flush * will be able to return 1. */ qemu_aio_wait(); LIST_FOREACH(node, &aio_handlers, node) { ret |= node->io_flush(node->opaque); } } while (ret > 0); }"} {"target": 1, "idx": 17513, "func": "static void quantize_and_encode_band_cost_ZERO_mips(struct AACEncContext *s, PutBitContext *pb, const float *in, float *out, const float *scaled, int size, int scale_idx, int cb, const float lambda, const float uplim, int *bits, const float ROUNDING) { int i; if (bits) *bits = 0; if (out) { for (i = 0; i < size; i += 4) { out[i ] = 0.0f; out[i+1] = 0.0f; out[i+2] = 0.0f; out[i+3] = 0.0f; } } }"} {"target": 1, "idx": 17518, "func": "static int xen_platform_initfn(PCIDevice *dev) { PCIXenPlatformState *d = XEN_PLATFORM(dev); uint8_t *pci_conf; pci_conf = dev->config; pci_set_word(pci_conf + PCI_COMMAND, PCI_COMMAND_IO | PCI_COMMAND_MEMORY); pci_config_set_prog_interface(pci_conf, 0); pci_conf[PCI_INTERRUPT_PIN] = 1; platform_ioport_bar_setup(d); pci_register_bar(dev, 0, PCI_BASE_ADDRESS_SPACE_IO, &d->bar); /* reserve 16MB mmio address for share memory*/ platform_mmio_setup(d); pci_register_bar(dev, 1, PCI_BASE_ADDRESS_MEM_PREFETCH, &d->mmio_bar); platform_fixed_ioport_init(d); return 0; }"} {"target": 0, "idx": 17534, "func": "static void mmap_release_buffer(AVPacket *pkt) { struct v4l2_buffer buf; int res, fd; struct buff_data *buf_descriptor = pkt->priv; if (pkt->data == NULL) return; memset(&buf, 0, sizeof(struct v4l2_buffer)); buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; buf.memory = V4L2_MEMORY_MMAP; buf.index = buf_descriptor->index; fd = buf_descriptor->fd; av_free(buf_descriptor); res = ioctl(fd, VIDIOC_QBUF, &buf); if (res < 0) av_log(NULL, AV_LOG_ERROR, \"ioctl(VIDIOC_QBUF): %s\\n\", strerror(errno)); pkt->data = NULL; pkt->size = 0; }"} {"target": 1, "idx": 17548, "func": "static void coroutine_fn c1_fn(void *opaque) { Coroutine *c2 = opaque; qemu_coroutine_enter(c2, NULL); }"} {"target": 1, "idx": 17553, "func": "static int write_target_commit(BlockDriverState *bs, int64_t sector_num, const uint8_t* buffer, int nb_sectors) { BDRVVVFATState* s = bs->opaque; return try_commit(s); }"} {"target": 1, "idx": 17554, "func": "static void show_packets(AVFormatContext *fmt_ctx) { AVPacket pkt; av_init_packet(&pkt); probe_array_header(\"packets\", 0); while (!av_read_frame(fmt_ctx, &pkt)) show_packet(fmt_ctx, &pkt); probe_array_footer(\"packets\", 0); }"} {"target": 0, "idx": 17576, "func": "static void sdhci_write_dataport(SDHCIState *s, uint32_t value, unsigned size) { unsigned i; /* Check that there is free space left in a buffer */ if (!(s->prnsts & SDHC_SPACE_AVAILABLE)) { ERRPRINT(\"Can't write to data buffer: buffer full\\n\"); return; } for (i = 0; i < size; i++) { s->fifo_buffer[s->data_count] = value & 0xFF; s->data_count++; value >>= 8; if (s->data_count >= (s->blksize & 0x0fff)) { DPRINT_L2(\"write buffer filled with %u bytes of data\\n\", s->data_count); s->data_count = 0; s->prnsts &= ~SDHC_SPACE_AVAILABLE; if (s->prnsts & SDHC_DOING_WRITE) { SDHCI_GET_CLASS(s)->write_block_to_card(s); } } } }"} {"target": 0, "idx": 17581, "func": "static uint64_t omap_ulpd_pm_read(void *opaque, target_phys_addr_t addr, unsigned size) { struct omap_mpu_state_s *s = (struct omap_mpu_state_s *) opaque; uint16_t ret; if (size != 2) { return omap_badwidth_read16(opaque, addr); } switch (addr) { case 0x14: /* IT_STATUS */ ret = s->ulpd_pm_regs[addr >> 2]; s->ulpd_pm_regs[addr >> 2] = 0; qemu_irq_lower(s->irq[1][OMAP_INT_GAUGE_32K]); return ret; case 0x18: /* Reserved */ case 0x1c: /* Reserved */ case 0x20: /* Reserved */ case 0x28: /* Reserved */ case 0x2c: /* Reserved */ OMAP_BAD_REG(addr); case 0x00: /* COUNTER_32_LSB */ case 0x04: /* COUNTER_32_MSB */ case 0x08: /* COUNTER_HIGH_FREQ_LSB */ case 0x0c: /* COUNTER_HIGH_FREQ_MSB */ case 0x10: /* GAUGING_CTRL */ case 0x24: /* SETUP_ANALOG_CELL3_ULPD1 */ case 0x30: /* CLOCK_CTRL */ case 0x34: /* SOFT_REQ */ case 0x38: /* COUNTER_32_FIQ */ case 0x3c: /* DPLL_CTRL */ case 0x40: /* STATUS_REQ */ /* XXX: check clk::usecount state for every clock */ case 0x48: /* LOCL_TIME */ case 0x4c: /* APLL_CTRL */ case 0x50: /* POWER_CTRL */ return s->ulpd_pm_regs[addr >> 2]; } OMAP_BAD_REG(addr); return 0; }"} {"target": 0, "idx": 17587, "func": "static int milkymist_memcard_init(SysBusDevice *dev) { MilkymistMemcardState *s = MILKYMIST_MEMCARD(dev); DriveInfo *dinfo; BlockDriverState *bs; dinfo = drive_get_next(IF_SD); bs = dinfo ? blk_bs(blk_by_legacy_dinfo(dinfo)) : NULL; s->card = sd_init(bs, false); if (s->card == NULL) { return -1; } s->enabled = bs && bdrv_is_inserted(bs); memory_region_init_io(&s->regs_region, OBJECT(s), &memcard_mmio_ops, s, \"milkymist-memcard\", R_MAX * 4); sysbus_init_mmio(dev, &s->regs_region); return 0; }"} {"target": 0, "idx": 17596, "func": "static void visitor_output_setup_internal(TestOutputVisitorData *data, bool human) { data->human = human; data->sov = string_output_visitor_new(human); g_assert(data->sov); data->ov = string_output_get_visitor(data->sov); g_assert(data->ov); }"} {"target": 0, "idx": 17597, "func": "static int get_riff(AVFormatContext *s, AVIOContext *pb) { AVIContext *avi = s->priv_data; char header[8]; int i; /* check RIFF header */ avio_read(pb, header, 4); avi->riff_end = avio_rl32(pb); /* RIFF chunk size */ avi->riff_end += avio_tell(pb); /* RIFF chunk end */ avio_read(pb, header+4, 4); for(i=0; avi_headers[i][0]; i++) if(!memcmp(header, avi_headers[i], 8)) break; if(!avi_headers[i][0]) return -1; if(header[7] == 0x19) av_log(s, AV_LOG_INFO, \"This file has been generated by a totally broken muxer.\\n\"); return 0; }"} {"target": 1, "idx": 17611, "func": "_eth_get_rss_ex_src_addr(const struct iovec *pkt, int pkt_frags, size_t dsthdr_offset, struct ip6_ext_hdr *ext_hdr, struct in6_address *src_addr) { size_t bytes_left = (ext_hdr->ip6r_len + 1) * 8 - sizeof(*ext_hdr); struct ip6_option_hdr opthdr; size_t opt_offset = dsthdr_offset + sizeof(*ext_hdr); while (bytes_left > sizeof(opthdr)) { size_t input_size = iov_size(pkt, pkt_frags); size_t bytes_read, optlen; if (input_size < opt_offset) { return false; } bytes_read = iov_to_buf(pkt, pkt_frags, opt_offset, &opthdr, sizeof(opthdr)); if (bytes_read != sizeof(opthdr)) { return false; } optlen = (opthdr.type == IP6_OPT_PAD1) ? 1 : (opthdr.len + sizeof(opthdr)); if (optlen > bytes_left) { return false; } if (opthdr.type == IP6_OPT_HOME) { size_t input_size = iov_size(pkt, pkt_frags); if (input_size < opt_offset + sizeof(opthdr)) { return false; } bytes_read = iov_to_buf(pkt, pkt_frags, opt_offset + sizeof(opthdr), src_addr, sizeof(*src_addr)); return bytes_read == sizeof(src_addr); } opt_offset += optlen; bytes_left -= optlen; } return false; }"} {"target": 0, "idx": 17615, "func": "static int altivec_uyvy_rgb32 (SwsContext *c, unsigned char **in, int *instrides, int srcSliceY, int srcSliceH, unsigned char **oplanes, int *outstrides) { int w = c->srcW; int h = srcSliceH; int i,j; vector unsigned char uyvy; vector signed short Y,U,V; vector signed short vx,ux,uvx; vector signed short R0,G0,B0,R1,G1,B1; vector unsigned char R,G,B; vector unsigned char *out; ubyte *img; img = in[0]; out = (vector unsigned char *)(oplanes[0]+srcSliceY*outstrides[0]); for (i=0;iopaque; uint64_t start, last, cluster_offset, k; if (size <= 0) return; start = start_of_cluster(s, offset); last = start_of_cluster(s, offset + size - 1); for(cluster_offset = start; cluster_offset <= last; cluster_offset += s->cluster_size) { k = cluster_offset >> s->cluster_bits; if (k >= refcount_table_size) { fprintf(stderr, \"Warning: cluster offset=0x%\" PRIx64 \" is after \" \"the end of the image file, can't properly check refcounts.\\n\", cluster_offset); res->check_errors++; } else { if (++refcount_table[k] == 0) { fprintf(stderr, \"ERROR: overflow cluster offset=0x%\" PRIx64 \"\\n\", cluster_offset); res->corruptions++; } } } }"} {"target": 0, "idx": 17641, "func": "void stw_phys(target_phys_addr_t addr, uint32_t val) { uint16_t v = tswap16(val); cpu_physical_memory_write(addr, (const uint8_t *)&v, 2); }"} {"target": 0, "idx": 17653, "func": "static void gen_branch(DisasContext *ctx, int insn_bytes) { if (ctx->hflags & MIPS_HFLAG_BMASK) { int proc_hflags = ctx->hflags & MIPS_HFLAG_BMASK; /* Branches completion */ ctx->hflags &= ~MIPS_HFLAG_BMASK; ctx->bstate = BS_BRANCH; save_cpu_state(ctx, 0); /* FIXME: Need to clear can_do_io. */ switch (proc_hflags & MIPS_HFLAG_BMASK_BASE) { case MIPS_HFLAG_FBNSLOT: MIPS_DEBUG(\"forbidden slot\"); gen_goto_tb(ctx, 0, ctx->pc + insn_bytes); break; case MIPS_HFLAG_B: /* unconditional branch */ MIPS_DEBUG(\"unconditional branch\"); if (proc_hflags & MIPS_HFLAG_BX) { tcg_gen_xori_i32(hflags, hflags, MIPS_HFLAG_M16); } gen_goto_tb(ctx, 0, ctx->btarget); break; case MIPS_HFLAG_BL: /* blikely taken case */ MIPS_DEBUG(\"blikely branch taken\"); gen_goto_tb(ctx, 0, ctx->btarget); break; case MIPS_HFLAG_BC: /* Conditional branch */ MIPS_DEBUG(\"conditional branch\"); { int l1 = gen_new_label(); tcg_gen_brcondi_tl(TCG_COND_NE, bcond, 0, l1); gen_goto_tb(ctx, 1, ctx->pc + insn_bytes); gen_set_label(l1); gen_goto_tb(ctx, 0, ctx->btarget); } break; case MIPS_HFLAG_BR: /* unconditional branch to register */ MIPS_DEBUG(\"branch to register\"); if (ctx->insn_flags & (ASE_MIPS16 | ASE_MICROMIPS)) { TCGv t0 = tcg_temp_new(); TCGv_i32 t1 = tcg_temp_new_i32(); tcg_gen_andi_tl(t0, btarget, 0x1); tcg_gen_trunc_tl_i32(t1, t0); tcg_temp_free(t0); tcg_gen_andi_i32(hflags, hflags, ~(uint32_t)MIPS_HFLAG_M16); tcg_gen_shli_i32(t1, t1, MIPS_HFLAG_M16_SHIFT); tcg_gen_or_i32(hflags, hflags, t1); tcg_temp_free_i32(t1); tcg_gen_andi_tl(cpu_PC, btarget, ~(target_ulong)0x1); } else { tcg_gen_mov_tl(cpu_PC, btarget); } if (ctx->singlestep_enabled) { save_cpu_state(ctx, 0); gen_helper_0e0i(raise_exception, EXCP_DEBUG); } tcg_gen_exit_tb(0); break; default: MIPS_DEBUG(\"unknown branch\"); break; } } }"} {"target": 0, "idx": 17664, "func": "void slirp_cleanup(Slirp *slirp) { TAILQ_REMOVE(&slirp_instances, slirp, entry); unregister_savevm(\"slirp\", slirp); qemu_free(slirp->tftp_prefix); qemu_free(slirp->bootp_filename); qemu_free(slirp); }"} {"target": 0, "idx": 17670, "func": "void scsi_req_abort(SCSIRequest *req, int status) { if (!req->enqueued) { return; } scsi_req_ref(req); scsi_req_dequeue(req); req->io_canceled = true; if (req->ops->cancel_io) { req->ops->cancel_io(req); } scsi_req_complete(req, status); scsi_req_unref(req); }"} {"target": 0, "idx": 17678, "func": "rdt_free_extradata (PayloadContext *rdt) { int i; for (i = 0; i < MAX_STREAMS; i++) if (rdt->rmst[i]) { ff_rm_free_rmstream(rdt->rmst[i]); av_freep(&rdt->rmst[i]); } if (rdt->rmctx) av_close_input_stream(rdt->rmctx); av_freep(&rdt->mlti_data); av_free(rdt); }"} {"target": 0, "idx": 17682, "func": "build_dsdt(GArray *table_data, GArray *linker, AcpiPmInfo *pm, AcpiMiscInfo *misc, PcPciInfo *pci, MachineState *machine) { CrsRangeEntry *entry; Aml *dsdt, *sb_scope, *scope, *dev, *method, *field, *pkg, *crs; GPtrArray *mem_ranges = g_ptr_array_new_with_free_func(crs_range_free); GPtrArray *io_ranges = g_ptr_array_new_with_free_func(crs_range_free); PCMachineState *pcms = PC_MACHINE(machine); uint32_t nr_mem = machine->ram_slots; int root_bus_limit = 0xFF; PCIBus *bus = NULL; int i; dsdt = init_aml_allocator(); /* Reserve space for header */ acpi_data_push(dsdt->buf, sizeof(AcpiTableHeader)); build_dbg_aml(dsdt); if (misc->is_piix4) { sb_scope = aml_scope(\"_SB\"); dev = aml_device(\"PCI0\"); aml_append(dev, aml_name_decl(\"_HID\", aml_eisaid(\"PNP0A03\"))); aml_append(dev, aml_name_decl(\"_ADR\", aml_int(0))); aml_append(dev, aml_name_decl(\"_UID\", aml_int(1))); aml_append(sb_scope, dev); aml_append(dsdt, sb_scope); build_hpet_aml(dsdt); build_piix4_pm(dsdt); build_piix4_isa_bridge(dsdt); build_isa_devices_aml(dsdt); build_piix4_pci_hotplug(dsdt); build_piix4_pci0_int(dsdt); } else { sb_scope = aml_scope(\"_SB\"); aml_append(sb_scope, aml_operation_region(\"PCST\", AML_SYSTEM_IO, aml_int(0xae00), 0x0c)); aml_append(sb_scope, aml_operation_region(\"PCSB\", AML_SYSTEM_IO, aml_int(0xae0c), 0x01)); field = aml_field(\"PCSB\", AML_ANY_ACC, AML_NOLOCK, AML_WRITE_AS_ZEROS); aml_append(field, aml_named_field(\"PCIB\", 8)); aml_append(sb_scope, field); aml_append(dsdt, sb_scope); sb_scope = aml_scope(\"_SB\"); dev = aml_device(\"PCI0\"); aml_append(dev, aml_name_decl(\"_HID\", aml_eisaid(\"PNP0A08\"))); aml_append(dev, aml_name_decl(\"_CID\", aml_eisaid(\"PNP0A03\"))); aml_append(dev, aml_name_decl(\"_ADR\", aml_int(0))); aml_append(dev, aml_name_decl(\"_UID\", aml_int(1))); aml_append(dev, aml_name_decl(\"SUPP\", aml_int(0))); aml_append(dev, aml_name_decl(\"CTRL\", aml_int(0))); aml_append(dev, build_q35_osc_method()); aml_append(sb_scope, dev); aml_append(dsdt, sb_scope); build_hpet_aml(dsdt); build_q35_isa_bridge(dsdt); build_isa_devices_aml(dsdt); build_q35_pci0_int(dsdt); } build_legacy_cpu_hotplug_aml(dsdt, machine, pm->cpu_hp_io_base); build_memory_hotplug_aml(dsdt, nr_mem, pm->mem_hp_io_base, pm->mem_hp_io_len); scope = aml_scope(\"_GPE\"); { aml_append(scope, aml_name_decl(\"_HID\", aml_string(\"ACPI0006\"))); if (misc->is_piix4) { method = aml_method(\"_E01\", 0, AML_NOTSERIALIZED); aml_append(method, aml_acquire(aml_name(\"\\\\_SB.PCI0.BLCK\"), 0xFFFF)); aml_append(method, aml_call0(\"\\\\_SB.PCI0.PCNT\")); aml_append(method, aml_release(aml_name(\"\\\\_SB.PCI0.BLCK\"))); aml_append(scope, method); } method = aml_method(\"_E03\", 0, AML_NOTSERIALIZED); aml_append(method, aml_call0(MEMORY_HOTPLUG_HANDLER_PATH)); aml_append(scope, method); } aml_append(dsdt, scope); bus = PC_MACHINE(machine)->bus; if (bus) { QLIST_FOREACH(bus, &bus->child, sibling) { uint8_t bus_num = pci_bus_num(bus); uint8_t numa_node = pci_bus_numa_node(bus); /* look only for expander root buses */ if (!pci_bus_is_root(bus)) { continue; } if (bus_num < root_bus_limit) { root_bus_limit = bus_num - 1; } scope = aml_scope(\"\\\\_SB\"); dev = aml_device(\"PC%.02X\", bus_num); aml_append(dev, aml_name_decl(\"_UID\", aml_int(bus_num))); aml_append(dev, aml_name_decl(\"_HID\", aml_eisaid(\"PNP0A03\"))); aml_append(dev, aml_name_decl(\"_BBN\", aml_int(bus_num))); if (numa_node != NUMA_NODE_UNASSIGNED) { aml_append(dev, aml_name_decl(\"_PXM\", aml_int(numa_node))); } aml_append(dev, build_prt(false)); crs = build_crs(PCI_HOST_BRIDGE(BUS(bus)->parent), io_ranges, mem_ranges); aml_append(dev, aml_name_decl(\"_CRS\", crs)); aml_append(scope, dev); aml_append(dsdt, scope); } } scope = aml_scope(\"\\\\_SB.PCI0\"); /* build PCI0._CRS */ crs = aml_resource_template(); aml_append(crs, aml_word_bus_number(AML_MIN_FIXED, AML_MAX_FIXED, AML_POS_DECODE, 0x0000, 0x0, root_bus_limit, 0x0000, root_bus_limit + 1)); aml_append(crs, aml_io(AML_DECODE16, 0x0CF8, 0x0CF8, 0x01, 0x08)); aml_append(crs, aml_word_io(AML_MIN_FIXED, AML_MAX_FIXED, AML_POS_DECODE, AML_ENTIRE_RANGE, 0x0000, 0x0000, 0x0CF7, 0x0000, 0x0CF8)); crs_replace_with_free_ranges(io_ranges, 0x0D00, 0xFFFF); for (i = 0; i < io_ranges->len; i++) { entry = g_ptr_array_index(io_ranges, i); aml_append(crs, aml_word_io(AML_MIN_FIXED, AML_MAX_FIXED, AML_POS_DECODE, AML_ENTIRE_RANGE, 0x0000, entry->base, entry->limit, 0x0000, entry->limit - entry->base + 1)); } aml_append(crs, aml_dword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED, AML_CACHEABLE, AML_READ_WRITE, 0, 0x000A0000, 0x000BFFFF, 0, 0x00020000)); crs_replace_with_free_ranges(mem_ranges, pci->w32.begin, pci->w32.end - 1); for (i = 0; i < mem_ranges->len; i++) { entry = g_ptr_array_index(mem_ranges, i); aml_append(crs, aml_dword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED, AML_NON_CACHEABLE, AML_READ_WRITE, 0, entry->base, entry->limit, 0, entry->limit - entry->base + 1)); } if (pci->w64.begin) { aml_append(crs, aml_qword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED, AML_CACHEABLE, AML_READ_WRITE, 0, pci->w64.begin, pci->w64.end - 1, 0, pci->w64.end - pci->w64.begin)); } if (misc->tpm_version != TPM_VERSION_UNSPEC) { aml_append(crs, aml_memory32_fixed(TPM_TIS_ADDR_BASE, TPM_TIS_ADDR_SIZE, AML_READ_WRITE)); } aml_append(scope, aml_name_decl(\"_CRS\", crs)); /* reserve GPE0 block resources */ dev = aml_device(\"GPE0\"); aml_append(dev, aml_name_decl(\"_HID\", aml_string(\"PNP0A06\"))); aml_append(dev, aml_name_decl(\"_UID\", aml_string(\"GPE0 resources\"))); /* device present, functioning, decoding, not shown in UI */ aml_append(dev, aml_name_decl(\"_STA\", aml_int(0xB))); crs = aml_resource_template(); aml_append(crs, aml_io(AML_DECODE16, pm->gpe0_blk, pm->gpe0_blk, 1, pm->gpe0_blk_len) ); aml_append(dev, aml_name_decl(\"_CRS\", crs)); aml_append(scope, dev); g_ptr_array_free(io_ranges, true); g_ptr_array_free(mem_ranges, true); /* reserve PCIHP resources */ if (pm->pcihp_io_len) { dev = aml_device(\"PHPR\"); aml_append(dev, aml_name_decl(\"_HID\", aml_string(\"PNP0A06\"))); aml_append(dev, aml_name_decl(\"_UID\", aml_string(\"PCI Hotplug resources\"))); /* device present, functioning, decoding, not shown in UI */ aml_append(dev, aml_name_decl(\"_STA\", aml_int(0xB))); crs = aml_resource_template(); aml_append(crs, aml_io(AML_DECODE16, pm->pcihp_io_base, pm->pcihp_io_base, 1, pm->pcihp_io_len) ); aml_append(dev, aml_name_decl(\"_CRS\", crs)); aml_append(scope, dev); } aml_append(dsdt, scope); /* create S3_ / S4_ / S5_ packages if necessary */ scope = aml_scope(\"\\\\\"); if (!pm->s3_disabled) { pkg = aml_package(4); aml_append(pkg, aml_int(1)); /* PM1a_CNT.SLP_TYP */ aml_append(pkg, aml_int(1)); /* PM1b_CNT.SLP_TYP, FIXME: not impl. */ aml_append(pkg, aml_int(0)); /* reserved */ aml_append(pkg, aml_int(0)); /* reserved */ aml_append(scope, aml_name_decl(\"_S3\", pkg)); } if (!pm->s4_disabled) { pkg = aml_package(4); aml_append(pkg, aml_int(pm->s4_val)); /* PM1a_CNT.SLP_TYP */ /* PM1b_CNT.SLP_TYP, FIXME: not impl. */ aml_append(pkg, aml_int(pm->s4_val)); aml_append(pkg, aml_int(0)); /* reserved */ aml_append(pkg, aml_int(0)); /* reserved */ aml_append(scope, aml_name_decl(\"_S4\", pkg)); } pkg = aml_package(4); aml_append(pkg, aml_int(0)); /* PM1a_CNT.SLP_TYP */ aml_append(pkg, aml_int(0)); /* PM1b_CNT.SLP_TYP not impl. */ aml_append(pkg, aml_int(0)); /* reserved */ aml_append(pkg, aml_int(0)); /* reserved */ aml_append(scope, aml_name_decl(\"_S5\", pkg)); aml_append(dsdt, scope); /* create fw_cfg node, unconditionally */ { /* when using port i/o, the 8-bit data register *always* overlaps * with half of the 16-bit control register. Hence, the total size * of the i/o region used is FW_CFG_CTL_SIZE; when using DMA, the * DMA control register is located at FW_CFG_DMA_IO_BASE + 4 */ uint8_t io_size = object_property_get_bool(OBJECT(pcms->fw_cfg), \"dma_enabled\", NULL) ? ROUND_UP(FW_CFG_CTL_SIZE, 4) + sizeof(dma_addr_t) : FW_CFG_CTL_SIZE; scope = aml_scope(\"\\\\_SB.PCI0\"); dev = aml_device(\"FWCF\"); aml_append(dev, aml_name_decl(\"_HID\", aml_string(\"QEMU0002\"))); /* device present, functioning, decoding, not shown in UI */ aml_append(dev, aml_name_decl(\"_STA\", aml_int(0xB))); crs = aml_resource_template(); aml_append(crs, aml_io(AML_DECODE16, FW_CFG_IO_BASE, FW_CFG_IO_BASE, 0x01, io_size) ); aml_append(dev, aml_name_decl(\"_CRS\", crs)); aml_append(scope, dev); aml_append(dsdt, scope); } if (misc->applesmc_io_base) { scope = aml_scope(\"\\\\_SB.PCI0.ISA\"); dev = aml_device(\"SMC\"); aml_append(dev, aml_name_decl(\"_HID\", aml_eisaid(\"APP0001\"))); /* device present, functioning, decoding, not shown in UI */ aml_append(dev, aml_name_decl(\"_STA\", aml_int(0xB))); crs = aml_resource_template(); aml_append(crs, aml_io(AML_DECODE16, misc->applesmc_io_base, misc->applesmc_io_base, 0x01, APPLESMC_MAX_DATA_LENGTH) ); aml_append(crs, aml_irq_no_flags(6)); aml_append(dev, aml_name_decl(\"_CRS\", crs)); aml_append(scope, dev); aml_append(dsdt, scope); } if (misc->pvpanic_port) { scope = aml_scope(\"\\\\_SB.PCI0.ISA\"); dev = aml_device(\"PEVT\"); aml_append(dev, aml_name_decl(\"_HID\", aml_string(\"QEMU0001\"))); crs = aml_resource_template(); aml_append(crs, aml_io(AML_DECODE16, misc->pvpanic_port, misc->pvpanic_port, 1, 1) ); aml_append(dev, aml_name_decl(\"_CRS\", crs)); aml_append(dev, aml_operation_region(\"PEOR\", AML_SYSTEM_IO, aml_int(misc->pvpanic_port), 1)); field = aml_field(\"PEOR\", AML_BYTE_ACC, AML_NOLOCK, AML_PRESERVE); aml_append(field, aml_named_field(\"PEPT\", 8)); aml_append(dev, field); /* device present, functioning, decoding, shown in UI */ aml_append(dev, aml_name_decl(\"_STA\", aml_int(0xF))); method = aml_method(\"RDPT\", 0, AML_NOTSERIALIZED); aml_append(method, aml_store(aml_name(\"PEPT\"), aml_local(0))); aml_append(method, aml_return(aml_local(0))); aml_append(dev, method); method = aml_method(\"WRPT\", 1, AML_NOTSERIALIZED); aml_append(method, aml_store(aml_arg(0), aml_name(\"PEPT\"))); aml_append(dev, method); aml_append(scope, dev); aml_append(dsdt, scope); } sb_scope = aml_scope(\"\\\\_SB\"); { build_memory_devices(sb_scope, nr_mem, pm->mem_hp_io_base, pm->mem_hp_io_len); { Object *pci_host; PCIBus *bus = NULL; pci_host = acpi_get_i386_pci_host(); if (pci_host) { bus = PCI_HOST_BRIDGE(pci_host)->bus; } if (bus) { Aml *scope = aml_scope(\"PCI0\"); /* Scan all PCI buses. Generate tables to support hotplug. */ build_append_pci_bus_devices(scope, bus, pm->pcihp_bridge_en); if (misc->tpm_version != TPM_VERSION_UNSPEC) { dev = aml_device(\"ISA.TPM\"); aml_append(dev, aml_name_decl(\"_HID\", aml_eisaid(\"PNP0C31\"))); aml_append(dev, aml_name_decl(\"_STA\", aml_int(0xF))); crs = aml_resource_template(); aml_append(crs, aml_memory32_fixed(TPM_TIS_ADDR_BASE, TPM_TIS_ADDR_SIZE, AML_READ_WRITE)); /* FIXME: TPM_TIS_IRQ=5 conflicts with PNP0C0F irqs, Rewrite to take IRQ from TPM device model and fix default IRQ value there to use some unused IRQ */ /* aml_append(crs, aml_irq_no_flags(TPM_TIS_IRQ)); */ aml_append(dev, aml_name_decl(\"_CRS\", crs)); aml_append(scope, dev); } aml_append(sb_scope, scope); } } aml_append(dsdt, sb_scope); } /* copy AML table into ACPI tables blob and patch header there */ g_array_append_vals(table_data, dsdt->buf->data, dsdt->buf->len); build_header(linker, table_data, (void *)(table_data->data + table_data->len - dsdt->buf->len), \"DSDT\", dsdt->buf->len, 1, NULL, NULL); free_aml_allocator(); }"} {"target": 0, "idx": 17699, "func": "static void arm_cpu_do_interrupt_aarch64(CPUState *cs) { ARMCPU *cpu = ARM_CPU(cs); CPUARMState *env = &cpu->env; unsigned int new_el = env->exception.target_el; target_ulong addr = env->cp15.vbar_el[new_el]; unsigned int new_mode = aarch64_pstate_mode(new_el, true); if (arm_current_el(env) < new_el) { if (env->aarch64) { addr += 0x400; } else { addr += 0x600; } } else if (pstate_read(env) & PSTATE_SP) { addr += 0x200; } switch (cs->exception_index) { case EXCP_PREFETCH_ABORT: case EXCP_DATA_ABORT: env->cp15.far_el[new_el] = env->exception.vaddress; qemu_log_mask(CPU_LOG_INT, \"...with FAR 0x%\" PRIx64 \"\\n\", env->cp15.far_el[new_el]); /* fall through */ case EXCP_BKPT: case EXCP_UDEF: case EXCP_SWI: case EXCP_HVC: case EXCP_HYP_TRAP: case EXCP_SMC: env->cp15.esr_el[new_el] = env->exception.syndrome; break; case EXCP_IRQ: case EXCP_VIRQ: addr += 0x80; break; case EXCP_FIQ: case EXCP_VFIQ: addr += 0x100; break; case EXCP_SEMIHOST: qemu_log_mask(CPU_LOG_INT, \"...handling as semihosting call 0x%\" PRIx64 \"\\n\", env->xregs[0]); env->xregs[0] = do_arm_semihosting(env); return; default: cpu_abort(cs, \"Unhandled exception 0x%x\\n\", cs->exception_index); } if (is_a64(env)) { env->banked_spsr[aarch64_banked_spsr_index(new_el)] = pstate_read(env); aarch64_save_sp(env, arm_current_el(env)); env->elr_el[new_el] = env->pc; } else { env->banked_spsr[aarch64_banked_spsr_index(new_el)] = cpsr_read(env); if (!env->thumb) { env->cp15.esr_el[new_el] |= 1 << 25; } env->elr_el[new_el] = env->regs[15]; aarch64_sync_32_to_64(env); env->condexec_bits = 0; } qemu_log_mask(CPU_LOG_INT, \"...with ELR 0x%\" PRIx64 \"\\n\", env->elr_el[new_el]); pstate_write(env, PSTATE_DAIF | new_mode); env->aarch64 = 1; aarch64_restore_sp(env, new_el); env->pc = addr; qemu_log_mask(CPU_LOG_INT, \"...to EL%d PC 0x%\" PRIx64 \" PSTATE 0x%x\\n\", new_el, env->pc, pstate_read(env)); }"} {"target": 0, "idx": 17700, "func": "static bool pc_machine_get_nvdimm(Object *obj, Error **errp) { PCMachineState *pcms = PC_MACHINE(obj); return pcms->nvdimm; }"} {"target": 0, "idx": 17710, "func": "build_header(GArray *linker, GArray *table_data, AcpiTableHeader *h, const char *sig, int len, uint8_t rev, const char *oem_table_id) { memcpy(&h->signature, sig, 4); h->length = cpu_to_le32(len); h->revision = rev; memcpy(h->oem_id, ACPI_BUILD_APPNAME6, 6); if (oem_table_id) { strncpy((char *)h->oem_table_id, oem_table_id, sizeof(h->oem_table_id)); } else { memcpy(h->oem_table_id, ACPI_BUILD_APPNAME4, 4); memcpy(h->oem_table_id + 4, sig, 4); } h->oem_revision = cpu_to_le32(1); memcpy(h->asl_compiler_id, ACPI_BUILD_APPNAME4, 4); h->asl_compiler_revision = cpu_to_le32(1); h->checksum = 0; /* Checksum to be filled in by Guest linker */ bios_linker_loader_add_checksum(linker, ACPI_BUILD_TABLE_FILE, table_data->data, h, len, &h->checksum); }"} {"target": 0, "idx": 17723, "func": "static int read_quant_table(RangeCoder *c, int16_t *quant_table, int scale) { int v; int i = 0; uint8_t state[CONTEXT_SIZE]; memset(state, 128, sizeof(state)); for (v = 0; i < 128; v++) { unsigned len = get_symbol(c, state, 0) + 1; if (len > 128 - i) return AVERROR_INVALIDDATA; while (len--) { quant_table[i] = scale * v; i++; } } for (i = 1; i < 128; i++) quant_table[256 - i] = -quant_table[i]; quant_table[128] = -quant_table[127]; return 2 * v - 1; }"} {"target": 0, "idx": 17735, "func": "static int tb_unreliable(AVCodecContext *c) { if (c->time_base.den >= 101L * c->time_base.num || c->time_base.den < 5L * c->time_base.num || // c->codec_tag == AV_RL32(\"DIVX\") || // c->codec_tag == AV_RL32(\"XVID\") || c->codec_id == AV_CODEC_ID_MPEG2VIDEO || c->codec_id == AV_CODEC_ID_H264) return 1; return 0; }"} {"target": 1, "idx": 17737, "func": "int swr_init(struct SwrContext *s){ s->in_buffer_index= 0; s->in_buffer_count= 0; s->resample_in_constraint= 0; free_temp(&s->postin); free_temp(&s->midbuf); free_temp(&s->preout); free_temp(&s->in_buffer); swri_audio_convert_free(&s-> in_convert); swri_audio_convert_free(&s->out_convert); swri_audio_convert_free(&s->full_convert); s-> in.planar= av_sample_fmt_is_planar(s-> in_sample_fmt); s->out.planar= av_sample_fmt_is_planar(s->out_sample_fmt); s-> in_sample_fmt= av_get_alt_sample_fmt(s-> in_sample_fmt, 0); s->out_sample_fmt= av_get_alt_sample_fmt(s->out_sample_fmt, 0); if(s-> in_sample_fmt >= AV_SAMPLE_FMT_NB){ av_log(s, AV_LOG_ERROR, \"Requested sample format %s is invalid\\n\", av_get_sample_fmt_name(s->in_sample_fmt)); return AVERROR(EINVAL); } if(s->out_sample_fmt >= AV_SAMPLE_FMT_NB){ av_log(s, AV_LOG_ERROR, \"Requested sample format %s is invalid\\n\", av_get_sample_fmt_name(s->out_sample_fmt)); return AVERROR(EINVAL); } if( s->int_sample_fmt != AV_SAMPLE_FMT_S16 &&s->int_sample_fmt != AV_SAMPLE_FMT_FLT){ av_log(s, AV_LOG_ERROR, \"Requested sample format %s is not supported internally, only float & S16 is supported\\n\", av_get_sample_fmt_name(s->int_sample_fmt)); return AVERROR(EINVAL); } //FIXME should we allow/support using FLT on material that doesnt need it ? if(s->in_sample_fmt <= AV_SAMPLE_FMT_S16 || s->int_sample_fmt==AV_SAMPLE_FMT_S16){ s->int_sample_fmt= AV_SAMPLE_FMT_S16; }else s->int_sample_fmt= AV_SAMPLE_FMT_FLT; if (s->out_sample_rate!=s->in_sample_rate || (s->flags & SWR_FLAG_RESAMPLE)){ s->resample = swri_resample_init(s->resample, s->out_sample_rate, s->in_sample_rate, 16, 10, 0, 0.8); }else swri_resample_free(&s->resample); if(s->int_sample_fmt != AV_SAMPLE_FMT_S16 && s->resample){ av_log(s, AV_LOG_ERROR, \"Resampling only supported with internal s16 currently\\n\"); //FIXME return -1; } if(!s->used_ch_count) s->used_ch_count= s->in.ch_count; if(s->used_ch_count && s-> in_ch_layout && s->used_ch_count != av_get_channel_layout_nb_channels(s-> in_ch_layout)){ av_log(s, AV_LOG_WARNING, \"Input channel layout has a different number of channels than the number of used channels, ignoring layout\\n\"); s-> in_ch_layout= 0; } if(!s-> in_ch_layout) s-> in_ch_layout= av_get_default_channel_layout(s->used_ch_count); if(!s->out_ch_layout) s->out_ch_layout= av_get_default_channel_layout(s->out.ch_count); s->rematrix= s->out_ch_layout !=s->in_ch_layout || s->rematrix_volume!=1.0; #define RSC 1 //FIXME finetune if(!s-> in.ch_count) s-> in.ch_count= av_get_channel_layout_nb_channels(s-> in_ch_layout); if(!s->used_ch_count) s->used_ch_count= s->in.ch_count; if(!s->out.ch_count) s->out.ch_count= av_get_channel_layout_nb_channels(s->out_ch_layout); av_assert0(s-> in.ch_count); av_assert0(s->used_ch_count); av_assert0(s->out.ch_count); s->resample_first= RSC*s->out.ch_count/s->in.ch_count - RSC < s->out_sample_rate/(float)s-> in_sample_rate - 1.0; s-> in.bps= av_get_bytes_per_sample(s-> in_sample_fmt); s->int_bps= av_get_bytes_per_sample(s->int_sample_fmt); s->out.bps= av_get_bytes_per_sample(s->out_sample_fmt); if(!s->resample && !s->rematrix && !s->channel_map){ s->full_convert = swri_audio_convert_alloc(s->out_sample_fmt, s-> in_sample_fmt, s-> in.ch_count, NULL, 0); return 0; } s->in_convert = swri_audio_convert_alloc(s->int_sample_fmt, s-> in_sample_fmt, s->used_ch_count, s->channel_map, 0); s->out_convert= swri_audio_convert_alloc(s->out_sample_fmt, s->int_sample_fmt, s->out.ch_count, NULL, 0); s->postin= s->in; s->preout= s->out; s->midbuf= s->in; s->in_buffer= s->in; if(s->channel_map){ s->postin.ch_count= s->midbuf.ch_count= s->in_buffer.ch_count= s->used_ch_count; } if(!s->resample_first){ s->midbuf.ch_count= s->out.ch_count; s->in_buffer.ch_count = s->out.ch_count; } s->in_buffer.bps = s->postin.bps = s->midbuf.bps = s->preout.bps = s->int_bps; s->in_buffer.planar = s->postin.planar = s->midbuf.planar = s->preout.planar = 1; if(s->rematrix) return swri_rematrix_init(s); return 0; }"} {"target": 1, "idx": 17742, "func": "static int svq1_decode_init(AVCodecContext *avctx) { MpegEncContext *s = avctx->priv_data; int i; MPV_decode_defaults(s); s->avctx = avctx; s->width = (avctx->width+3)&~3; s->height = (avctx->height+3)&~3; s->codec_id= avctx->codec->id; avctx->pix_fmt = PIX_FMT_YUV410P; avctx->has_b_frames= 1; // not true, but DP frames and these behave like unidirectional b frames s->flags= avctx->flags; if (MPV_common_init(s) < 0) return -1; init_vlc(&svq1_block_type, 2, 4, &svq1_block_type_vlc[0][1], 2, 1, &svq1_block_type_vlc[0][0], 2, 1); init_vlc(&svq1_motion_component, 7, 33, &mvtab[0][1], 2, 1, &mvtab[0][0], 2, 1); for (i = 0; i < 6; i++) { init_vlc(&svq1_intra_multistage[i], 3, 8, &svq1_intra_multistage_vlc[i][0][1], 2, 1, &svq1_intra_multistage_vlc[i][0][0], 2, 1); init_vlc(&svq1_inter_multistage[i], 3, 8, &svq1_inter_multistage_vlc[i][0][1], 2, 1, &svq1_inter_multistage_vlc[i][0][0], 2, 1); } init_vlc(&svq1_intra_mean, 8, 256, &svq1_intra_mean_vlc[0][1], 4, 2, &svq1_intra_mean_vlc[0][0], 4, 2); init_vlc(&svq1_inter_mean, 9, 512, &svq1_inter_mean_vlc[0][1], 4, 2, &svq1_inter_mean_vlc[0][0], 4, 2); return 0; }"} {"target": 1, "idx": 17752, "func": "static int rm_read_audio_stream_info(AVFormatContext *s, AVIOContext *pb, AVStream *st, RMStream *ast, int read_all) { char buf[256]; uint32_t version; int ret; /* ra type header */ version = avio_rb16(pb); /* version */ if (version == 3) { unsigned bytes_per_minute; int header_size = avio_rb16(pb); int64_t startpos = avio_tell(pb); avio_skip(pb, 8); bytes_per_minute = avio_rb16(pb); avio_skip(pb, 4); rm_read_metadata(s, 0); if ((startpos + header_size) >= avio_tell(pb) + 2) { // fourcc (should always be \"lpcJ\") avio_r8(pb); get_str8(pb, buf, sizeof(buf)); } // Skip extra header crap (this should never happen) if ((startpos + header_size) > avio_tell(pb)) avio_skip(pb, header_size + startpos - avio_tell(pb)); if (bytes_per_minute) st->codec->bit_rate = 8LL * bytes_per_minute / 60; st->codec->sample_rate = 8000; st->codec->channels = 1; st->codec->channel_layout = AV_CH_LAYOUT_MONO; st->codec->codec_type = AVMEDIA_TYPE_AUDIO; st->codec->codec_id = AV_CODEC_ID_RA_144; ast->deint_id = DEINT_ID_INT0; } else { int flavor, sub_packet_h, coded_framesize, sub_packet_size; int codecdata_length; unsigned bytes_per_minute; /* old version (4) */ avio_skip(pb, 2); /* unused */ avio_rb32(pb); /* .ra4 */ avio_rb32(pb); /* data size */ avio_rb16(pb); /* version2 */ avio_rb32(pb); /* header size */ flavor= avio_rb16(pb); /* add codec info / flavor */ ast->coded_framesize = coded_framesize = avio_rb32(pb); /* coded frame size */ avio_rb32(pb); /* ??? */ bytes_per_minute = avio_rb32(pb); if (version == 4) { if (bytes_per_minute) st->codec->bit_rate = 8LL * bytes_per_minute / 60; } avio_rb32(pb); /* ??? */ ast->sub_packet_h = sub_packet_h = avio_rb16(pb); /* 1 */ st->codec->block_align= avio_rb16(pb); /* frame size */ ast->sub_packet_size = sub_packet_size = avio_rb16(pb); /* sub packet size */ avio_rb16(pb); /* ??? */ if (version == 5) { avio_rb16(pb); avio_rb16(pb); avio_rb16(pb); } st->codec->sample_rate = avio_rb16(pb); avio_rb32(pb); st->codec->channels = avio_rb16(pb); if (version == 5) { ast->deint_id = avio_rl32(pb); avio_read(pb, buf, 4); buf[4] = 0; } else { get_str8(pb, buf, sizeof(buf)); /* desc */ ast->deint_id = AV_RL32(buf); get_str8(pb, buf, sizeof(buf)); /* desc */ } st->codec->codec_type = AVMEDIA_TYPE_AUDIO; st->codec->codec_tag = AV_RL32(buf); st->codec->codec_id = ff_codec_get_id(ff_rm_codec_tags, st->codec->codec_tag); switch (st->codec->codec_id) { case AV_CODEC_ID_AC3: st->need_parsing = AVSTREAM_PARSE_FULL; break; case AV_CODEC_ID_RA_288: st->codec->extradata_size= 0; ast->audio_framesize = st->codec->block_align; st->codec->block_align = coded_framesize; break; case AV_CODEC_ID_COOK: st->need_parsing = AVSTREAM_PARSE_HEADERS; case AV_CODEC_ID_ATRAC3: case AV_CODEC_ID_SIPR: if (read_all) { codecdata_length = 0; } else { avio_rb16(pb); avio_r8(pb); if (version == 5) avio_r8(pb); codecdata_length = avio_rb32(pb); if(codecdata_length + FF_INPUT_BUFFER_PADDING_SIZE <= (unsigned)codecdata_length){ av_log(s, AV_LOG_ERROR, \"codecdata_length too large\\n\"); return -1; } } ast->audio_framesize = st->codec->block_align; if (st->codec->codec_id == AV_CODEC_ID_SIPR) { if (flavor > 3) { av_log(s, AV_LOG_ERROR, \"bad SIPR file flavor %d\\n\", flavor); return -1; } st->codec->block_align = ff_sipr_subpk_size[flavor]; } else { if(sub_packet_size <= 0){ av_log(s, AV_LOG_ERROR, \"sub_packet_size is invalid\\n\"); return -1; } st->codec->block_align = ast->sub_packet_size; } if ((ret = rm_read_extradata(pb, st->codec, codecdata_length)) < 0) return ret; break; case AV_CODEC_ID_AAC: avio_rb16(pb); avio_r8(pb); if (version == 5) avio_r8(pb); codecdata_length = avio_rb32(pb); if(codecdata_length + FF_INPUT_BUFFER_PADDING_SIZE <= (unsigned)codecdata_length){ av_log(s, AV_LOG_ERROR, \"codecdata_length too large\\n\"); return -1; } if (codecdata_length >= 1) { avio_r8(pb); if ((ret = rm_read_extradata(pb, st->codec, codecdata_length - 1)) < 0) return ret; } break; default: av_strlcpy(st->codec->codec_name, buf, sizeof(st->codec->codec_name)); } if (ast->deint_id == DEINT_ID_INT4 || ast->deint_id == DEINT_ID_GENR || ast->deint_id == DEINT_ID_SIPR) { if (st->codec->block_align <= 0 || ast->audio_framesize * sub_packet_h > (unsigned)INT_MAX || ast->audio_framesize * sub_packet_h < st->codec->block_align) return AVERROR_INVALIDDATA; if (av_new_packet(&ast->pkt, ast->audio_framesize * sub_packet_h) < 0) return AVERROR(ENOMEM); } switch (ast->deint_id) { case DEINT_ID_INT4: if (ast->coded_framesize > ast->audio_framesize || sub_packet_h <= 1 || ast->coded_framesize * sub_packet_h > (2 + (sub_packet_h & 1)) * ast->audio_framesize) return AVERROR_INVALIDDATA; break; case DEINT_ID_GENR: if (ast->sub_packet_size <= 0 || ast->sub_packet_size > ast->audio_framesize) return AVERROR_INVALIDDATA; break; case DEINT_ID_SIPR: case DEINT_ID_INT0: case DEINT_ID_VBRS: case DEINT_ID_VBRF: break; default: av_log(s, AV_LOG_ERROR, \"Unknown interleaver %X\\n\", ast->deint_id); return AVERROR_INVALIDDATA; } if (read_all) { avio_r8(pb); avio_r8(pb); avio_r8(pb); rm_read_metadata(s, 0); } } return 0; }"} {"target": 1, "idx": 17765, "func": "int show_license(void *optctx, const char *opt, const char *arg) { printf( #if CONFIG_NONFREE \"This version of %s has nonfree parts compiled in.\\n\" \"Therefore it is not legally redistributable.\\n\", program_name #elif CONFIG_GPLV3 \"%s is free software; you can redistribute it and/or modify\\n\" \"it under the terms of the GNU General Public License as published by\\n\" \"the Free Software Foundation; either version 3 of the License, or\\n\" \"(at your option) any later version.\\n\" \"\\n\" \"%s is distributed in the hope that it will be useful,\\n\" \"but WITHOUT ANY WARRANTY; without even the implied warranty of\\n\" \"MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\\n\" \"GNU General Public License for more details.\\n\" \"\\n\" \"You should have received a copy of the GNU General Public License\\n\" \"along with %s. If not, see .\\n\", program_name, program_name, program_name #elif CONFIG_GPL \"%s is free software; you can redistribute it and/or modify\\n\" \"it under the terms of the GNU General Public License as published by\\n\" \"the Free Software Foundation; either version 2 of the License, or\\n\" \"(at your option) any later version.\\n\" \"\\n\" \"%s is distributed in the hope that it will be useful,\\n\" \"but WITHOUT ANY WARRANTY; without even the implied warranty of\\n\" \"MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\\n\" \"GNU General Public License for more details.\\n\" \"\\n\" \"You should have received a copy of the GNU General Public License\\n\" \"along with %s; if not, write to the Free Software\\n\" \"Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\\n\", program_name, program_name, program_name #elif CONFIG_LGPLV3 \"%s is free software; you can redistribute it and/or modify\\n\" \"it under the terms of the GNU Lesser General Public License as published by\\n\" \"the Free Software Foundation; either version 3 of the License, or\\n\" \"(at your option) any later version.\\n\" \"\\n\" \"%s is distributed in the hope that it will be useful,\\n\" \"but WITHOUT ANY WARRANTY; without even the implied warranty of\\n\" \"MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\\n\" \"GNU Lesser General Public License for more details.\\n\" \"\\n\" \"You should have received a copy of the GNU Lesser General Public License\\n\" \"along with %s. If not, see .\\n\", program_name, program_name, program_name #else \"%s is free software; you can redistribute it and/or\\n\" \"modify it under the terms of the GNU Lesser General Public\\n\" \"License as published by the Free Software Foundation; either\\n\" \"version 2.1 of the License, or (at your option) any later version.\\n\" \"\\n\" \"%s is distributed in the hope that it will be useful,\\n\" \"but WITHOUT ANY WARRANTY; without even the implied warranty of\\n\" \"MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\\n\" \"Lesser General Public License for more details.\\n\" \"\\n\" \"You should have received a copy of the GNU Lesser General Public\\n\" \"License along with %s; if not, write to the Free Software\\n\" \"Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\\n\", program_name, program_name, program_name #endif ); return 0; }"} {"target": 1, "idx": 17766, "func": "int ff_rtsp_make_setup_request(AVFormatContext *s, const char *host, int port, int lower_transport, const char *real_challenge) { RTSPState *rt = s->priv_data; int rtx = 0, j, i, err, interleave = 0, port_off; RTSPStream *rtsp_st; RTSPMessageHeader reply1, *reply = &reply1; char cmd[2048]; const char *trans_pref; if (rt->transport == RTSP_TRANSPORT_RDT) trans_pref = \"x-pn-tng\"; else trans_pref = \"RTP/AVP\"; /* default timeout: 1 minute */ rt->timeout = 60; /* for each stream, make the setup request */ /* XXX: we assume the same server is used for the control of each * RTSP stream */ /* Choose a random starting offset within the first half of the * port range, to allow for a number of ports to try even if the offset * happens to be at the end of the random range. */ port_off = av_get_random_seed() % ((rt->rtp_port_max - rt->rtp_port_min)/2); /* even random offset */ port_off -= port_off & 0x01; for (j = rt->rtp_port_min + port_off, i = 0; i < rt->nb_rtsp_streams; ++i) { char transport[2048]; /* * WMS serves all UDP data over a single connection, the RTX, which * isn't necessarily the first in the SDP but has to be the first * to be set up, else the second/third SETUP will fail with a 461. */ if (lower_transport == RTSP_LOWER_TRANSPORT_UDP && rt->server_type == RTSP_SERVER_WMS) { if (i == 0) { /* rtx first */ for (rtx = 0; rtx < rt->nb_rtsp_streams; rtx++) { int len = strlen(rt->rtsp_streams[rtx]->control_url); if (len >= 4 && !strcmp(rt->rtsp_streams[rtx]->control_url + len - 4, \"/rtx\")) break; } if (rtx == rt->nb_rtsp_streams) return -1; /* no RTX found */ rtsp_st = rt->rtsp_streams[rtx]; } else rtsp_st = rt->rtsp_streams[i > rtx ? i : i - 1]; } else rtsp_st = rt->rtsp_streams[i]; /* RTP/UDP */ if (lower_transport == RTSP_LOWER_TRANSPORT_UDP) { char buf[256]; if (rt->server_type == RTSP_SERVER_WMS && i > 1) { port = reply->transports[0].client_port_min; goto have_port; } /* first try in specified port range */ while (j <= rt->rtp_port_max) { ff_url_join(buf, sizeof(buf), \"rtp\", NULL, host, -1, \"?localport=%d\", j); /* we will use two ports per rtp stream (rtp and rtcp) */ j += 2; if (!ffurl_open(&rtsp_st->rtp_handle, buf, AVIO_FLAG_READ_WRITE, &s->interrupt_callback, NULL)) goto rtp_opened; } av_log(s, AV_LOG_ERROR, \"Unable to open an input RTP port\\n\"); err = AVERROR(EIO); goto fail; rtp_opened: port = ff_rtp_get_local_rtp_port(rtsp_st->rtp_handle); have_port: snprintf(transport, sizeof(transport) - 1, \"%s/UDP;\", trans_pref); if (rt->server_type != RTSP_SERVER_REAL) av_strlcat(transport, \"unicast;\", sizeof(transport)); av_strlcatf(transport, sizeof(transport), \"client_port=%d\", port); if (rt->transport == RTSP_TRANSPORT_RTP && !(rt->server_type == RTSP_SERVER_WMS && i > 0)) av_strlcatf(transport, sizeof(transport), \"-%d\", port + 1); } /* RTP/TCP */ else if (lower_transport == RTSP_LOWER_TRANSPORT_TCP) { /* For WMS streams, the application streams are only used for * UDP. When trying to set it up for TCP streams, the server * will return an error. Therefore, we skip those streams. */ if (rt->server_type == RTSP_SERVER_WMS && (rtsp_st->stream_index < 0 || s->streams[rtsp_st->stream_index]->codec->codec_type == AVMEDIA_TYPE_DATA)) continue; snprintf(transport, sizeof(transport) - 1, \"%s/TCP;\", trans_pref); if (rt->transport != RTSP_TRANSPORT_RDT) av_strlcat(transport, \"unicast;\", sizeof(transport)); av_strlcatf(transport, sizeof(transport), \"interleaved=%d-%d\", interleave, interleave + 1); interleave += 2; } else if (lower_transport == RTSP_LOWER_TRANSPORT_UDP_MULTICAST) { snprintf(transport, sizeof(transport) - 1, \"%s/UDP;multicast\", trans_pref); } if (s->oformat) { av_strlcat(transport, \";mode=receive\", sizeof(transport)); } else if (rt->server_type == RTSP_SERVER_REAL || rt->server_type == RTSP_SERVER_WMS) av_strlcat(transport, \";mode=play\", sizeof(transport)); snprintf(cmd, sizeof(cmd), \"Transport: %s\\r\\n\", transport); if (rt->accept_dynamic_rate) av_strlcat(cmd, \"x-Dynamic-Rate: 0\\r\\n\", sizeof(cmd)); if (i == 0 && rt->server_type == RTSP_SERVER_REAL && CONFIG_RTPDEC) { char real_res[41], real_csum[9]; ff_rdt_calc_response_and_checksum(real_res, real_csum, real_challenge); av_strlcatf(cmd, sizeof(cmd), \"If-Match: %s\\r\\n\" \"RealChallenge2: %s, sd=%s\\r\\n\", rt->session_id, real_res, real_csum); } ff_rtsp_send_cmd(s, \"SETUP\", rtsp_st->control_url, cmd, reply, NULL); if (reply->status_code == 461 /* Unsupported protocol */ && i == 0) { err = 1; goto fail; } else if (reply->status_code != RTSP_STATUS_OK || reply->nb_transports != 1) { err = AVERROR_INVALIDDATA; goto fail; } /* XXX: same protocol for all streams is required */ if (i > 0) { if (reply->transports[0].lower_transport != rt->lower_transport || reply->transports[0].transport != rt->transport) { err = AVERROR_INVALIDDATA; goto fail; } } else { rt->lower_transport = reply->transports[0].lower_transport; rt->transport = reply->transports[0].transport; } /* Fail if the server responded with another lower transport mode * than what we requested. */ if (reply->transports[0].lower_transport != lower_transport) { av_log(s, AV_LOG_ERROR, \"Nonmatching transport in server reply\\n\"); err = AVERROR_INVALIDDATA; goto fail; } switch(reply->transports[0].lower_transport) { case RTSP_LOWER_TRANSPORT_TCP: rtsp_st->interleaved_min = reply->transports[0].interleaved_min; rtsp_st->interleaved_max = reply->transports[0].interleaved_max; break; case RTSP_LOWER_TRANSPORT_UDP: { char url[1024], options[30] = \"\"; if (rt->rtsp_flags & RTSP_FLAG_FILTER_SRC) av_strlcpy(options, \"?connect=1\", sizeof(options)); /* Use source address if specified */ if (reply->transports[0].source[0]) { ff_url_join(url, sizeof(url), \"rtp\", NULL, reply->transports[0].source, reply->transports[0].server_port_min, \"%s\", options); } else { ff_url_join(url, sizeof(url), \"rtp\", NULL, host, reply->transports[0].server_port_min, \"%s\", options); } if (!(rt->server_type == RTSP_SERVER_WMS && i > 1) && ff_rtp_set_remote_url(rtsp_st->rtp_handle, url) < 0) { err = AVERROR_INVALIDDATA; goto fail; } /* Try to initialize the connection state in a * potential NAT router by sending dummy packets. * RTP/RTCP dummy packets are used for RDT, too. */ if (!(rt->server_type == RTSP_SERVER_WMS && i > 1) && s->iformat && CONFIG_RTPDEC) ff_rtp_send_punch_packets(rtsp_st->rtp_handle); break; } case RTSP_LOWER_TRANSPORT_UDP_MULTICAST: { char url[1024], namebuf[50], optbuf[20] = \"\"; struct sockaddr_storage addr; int port, ttl; if (reply->transports[0].destination.ss_family) { addr = reply->transports[0].destination; port = reply->transports[0].port_min; ttl = reply->transports[0].ttl; } else { addr = rtsp_st->sdp_ip; port = rtsp_st->sdp_port; ttl = rtsp_st->sdp_ttl; } if (ttl > 0) snprintf(optbuf, sizeof(optbuf), \"?ttl=%d\", ttl); getnameinfo((struct sockaddr*) &addr, sizeof(addr), namebuf, sizeof(namebuf), NULL, 0, NI_NUMERICHOST); ff_url_join(url, sizeof(url), \"rtp\", NULL, namebuf, port, \"%s\", optbuf); if (ffurl_open(&rtsp_st->rtp_handle, url, AVIO_FLAG_READ_WRITE, &s->interrupt_callback, NULL) < 0) { err = AVERROR_INVALIDDATA; goto fail; } break; } } if ((err = rtsp_open_transport_ctx(s, rtsp_st))) goto fail; } if (reply->timeout > 0) rt->timeout = reply->timeout; if (rt->server_type == RTSP_SERVER_REAL) rt->need_subscription = 1; return 0; fail: ff_rtsp_undo_setup(s); return err; }"} {"target": 0, "idx": 17786, "func": "static int parse_fmtp(AVFormatContext *s, AVStream *stream, PayloadContext *data, const char *attr, const char *value) { AVCodecParameters *par = stream->codecpar; int res, i; if (!strcmp(attr, \"config\")) { res = parse_fmtp_config(par, value); if (res < 0) return res; } if (par->codec_id == AV_CODEC_ID_AAC) { /* Looking for a known attribute */ for (i = 0; attr_names[i].str; ++i) { if (!av_strcasecmp(attr, attr_names[i].str)) { if (attr_names[i].type == ATTR_NAME_TYPE_INT) { *(int *)((char *)data+ attr_names[i].offset) = atoi(value); } else if (attr_names[i].type == ATTR_NAME_TYPE_STR) *(char **)((char *)data+ attr_names[i].offset) = av_strdup(value); } } } return 0; }"} {"target": 0, "idx": 17787, "func": "static int ffserver_set_int_param(int *dest, const char *value, int factor, int min, int max, FFServerConfig *config, const char *error_msg, ...) { int tmp; char *tailp; if (!value || !value[0]) goto error; errno = 0; tmp = strtol(value, &tailp, 0); if (tmp < min || tmp > max) goto error; if (factor) { if (FFABS(tmp) > INT_MAX / FFABS(factor)) goto error; tmp *= factor; } if (tailp[0] || errno) goto error; if (dest) *dest = tmp; return 0; error: if (config) { va_list vl; va_start(vl, error_msg); vreport_config_error(config->filename, config->line_num, AV_LOG_ERROR, &config->errors, error_msg, vl); va_end(vl); } return AVERROR(EINVAL); }"} {"target": 0, "idx": 17792, "func": "static int64_t find_tag(AVIOContext *pb, uint32_t tag1) { unsigned int tag; int64_t size; for (;;) { if (url_feof(pb)) return AVERROR_EOF; size = next_tag(pb, &tag); if (tag == tag1) break; wav_seek_tag(pb, size, SEEK_CUR); } return size; }"} {"target": 0, "idx": 17803, "func": "av_cold void ff_float_dsp_init_x86(AVFloatDSPContext *fdsp) { int cpu_flags = av_get_cpu_flags(); #if HAVE_6REGS && HAVE_INLINE_ASM if (INLINE_AMD3DNOWEXT(cpu_flags)) { fdsp->vector_fmul_window = vector_fmul_window_3dnowext; } if (INLINE_SSE(cpu_flags)) { fdsp->vector_fmul_window = vector_fmul_window_sse; } #endif if (EXTERNAL_SSE(cpu_flags)) { fdsp->vector_fmul = ff_vector_fmul_sse; fdsp->vector_fmac_scalar = ff_vector_fmac_scalar_sse; fdsp->vector_fmul_scalar = ff_vector_fmul_scalar_sse; fdsp->vector_fmul_add = ff_vector_fmul_add_sse; fdsp->vector_fmul_reverse = ff_vector_fmul_reverse_sse; fdsp->scalarproduct_float = ff_scalarproduct_float_sse; fdsp->butterflies_float = ff_butterflies_float_sse; } if (EXTERNAL_SSE2(cpu_flags)) { fdsp->vector_dmul_scalar = ff_vector_dmul_scalar_sse2; } if (EXTERNAL_AVX(cpu_flags)) { fdsp->vector_fmul = ff_vector_fmul_avx; fdsp->vector_fmac_scalar = ff_vector_fmac_scalar_avx; fdsp->vector_dmul_scalar = ff_vector_dmul_scalar_avx; fdsp->vector_fmul_add = ff_vector_fmul_add_avx; fdsp->vector_fmul_reverse = ff_vector_fmul_reverse_avx; } }"} {"target": 1, "idx": 17805, "func": "static uint64_t omap2_inth_read(void *opaque, target_phys_addr_t addr, unsigned size) { struct omap_intr_handler_s *s = (struct omap_intr_handler_s *) opaque; int offset = addr; int bank_no, line_no; struct omap_intr_handler_bank_s *bank = NULL; if ((offset & 0xf80) == 0x80) { bank_no = (offset & 0x60) >> 5; if (bank_no < s->nbanks) { offset &= ~0x60; bank = &s->bank[bank_no]; } } switch (offset) { case 0x00: /* INTC_REVISION */ return s->revision; case 0x10: /* INTC_SYSCONFIG */ return (s->autoidle >> 2) & 1; case 0x14: /* INTC_SYSSTATUS */ return 1; /* RESETDONE */ case 0x40: /* INTC_SIR_IRQ */ return s->sir_intr[0]; case 0x44: /* INTC_SIR_FIQ */ return s->sir_intr[1]; case 0x48: /* INTC_CONTROL */ return (!s->mask) << 2; /* GLOBALMASK */ case 0x4c: /* INTC_PROTECTION */ case 0x50: /* INTC_IDLE */ return s->autoidle & 3; /* Per-bank registers */ case 0x80: /* INTC_ITR */ return bank->inputs; case 0x84: /* INTC_MIR */ return bank->mask; case 0x88: /* INTC_MIR_CLEAR */ case 0x8c: /* INTC_MIR_SET */ case 0x90: /* INTC_ISR_SET */ return bank->swi; case 0x94: /* INTC_ISR_CLEAR */ case 0x98: /* INTC_PENDING_IRQ */ return bank->irqs & ~bank->mask & ~bank->fiq; case 0x9c: /* INTC_PENDING_FIQ */ return bank->irqs & ~bank->mask & bank->fiq; /* Per-line registers */ case 0x100 ... 0x300: /* INTC_ILR */ bank_no = (offset - 0x100) >> 7; if (bank_no > s->nbanks) break; bank = &s->bank[bank_no]; line_no = (offset & 0x7f) >> 2; return (bank->priority[line_no] << 2) | ((bank->fiq >> line_no) & 1); } }"} {"target": 0, "idx": 17807, "func": "static inline void get_limits(MpegEncContext *s, int *range, int *xmin, int *ymin, int *xmax, int *ymax, int f_code) { *range = 8 * (1 << (f_code - 1)); /* XXX: temporary kludge to avoid overflow for msmpeg4 */ if (s->out_format == FMT_H263 && !s->h263_msmpeg4) *range *= 2; if (s->unrestricted_mv) { *xmin = -16; *ymin = -16; if (s->h263_plus) *range *= 2; if(s->avctx->codec->id!=CODEC_ID_MPEG4){ *xmax = s->mb_width*16; *ymax = s->mb_height*16; }else { *xmax = s->width; *ymax = s->height; } } else { *xmin = 0; *ymin = 0; *xmax = s->mb_width*16 - 16; *ymax = s->mb_height*16 - 16; } }"} {"target": 0, "idx": 17808, "func": "static int device_try_init(AVFormatContext *ctx, enum AVPixelFormat pix_fmt, int *width, int *height, uint32_t *desired_format, enum AVCodecID *codec_id) { int ret, i; *desired_format = avpriv_fmt_ff2v4l(pix_fmt, ctx->video_codec_id); if (*desired_format) { ret = device_init(ctx, width, height, *desired_format); if (ret < 0) { *desired_format = 0; if (ret != AVERROR(EINVAL)) return ret; } } if (!*desired_format) { for (i = 0; avpriv_fmt_conversion_table[i].codec_id != AV_CODEC_ID_NONE; i++) { if (ctx->video_codec_id == AV_CODEC_ID_NONE || avpriv_fmt_conversion_table[i].codec_id == ctx->video_codec_id) { av_log(ctx, AV_LOG_DEBUG, \"Trying to set codec:%s pix_fmt:%s\\n\", avcodec_get_name(avpriv_fmt_conversion_table[i].codec_id), (char *)av_x_if_null(av_get_pix_fmt_name(avpriv_fmt_conversion_table[i].ff_fmt), \"none\")); *desired_format = avpriv_fmt_conversion_table[i].v4l2_fmt; ret = device_init(ctx, width, height, *desired_format); if (ret >= 0) break; else if (ret != AVERROR(EINVAL)) return ret; *desired_format = 0; } } if (*desired_format == 0) { av_log(ctx, AV_LOG_ERROR, \"Cannot find a proper format for \" \"codec '%s' (id %d), pixel format '%s' (id %d)\\n\", avcodec_get_name(ctx->video_codec_id), ctx->video_codec_id, (char *)av_x_if_null(av_get_pix_fmt_name(pix_fmt), \"none\"), pix_fmt); ret = AVERROR(EINVAL); } } *codec_id = avpriv_fmt_v4l2codec(*desired_format); av_assert0(*codec_id != AV_CODEC_ID_NONE); return ret; }"} {"target": 1, "idx": 17833, "func": "static void lsi_soft_reset(LSIState *s) { lsi_request *p; DPRINTF(\"Reset\\n\"); s->carry = 0; s->msg_action = 0; s->msg_len = 0; s->waiting = 0; s->dsa = 0; s->dnad = 0; s->dbc = 0; s->temp = 0; memset(s->scratch, 0, sizeof(s->scratch)); s->istat0 = 0; s->istat1 = 0; s->dcmd = 0x40; s->dstat = LSI_DSTAT_DFE; s->dien = 0; s->sist0 = 0; s->sist1 = 0; s->sien0 = 0; s->sien1 = 0; s->mbox0 = 0; s->mbox1 = 0; s->dfifo = 0; s->ctest2 = LSI_CTEST2_DACK; s->ctest3 = 0; s->ctest4 = 0; s->ctest5 = 0; s->ccntl0 = 0; s->ccntl1 = 0; s->dsp = 0; s->dsps = 0; s->dmode = 0; s->dcntl = 0; s->scntl0 = 0xc0; s->scntl1 = 0; s->scntl2 = 0; s->scntl3 = 0; s->sstat0 = 0; s->sstat1 = 0; s->scid = 7; s->sxfer = 0; s->socl = 0; s->sdid = 0; s->ssid = 0; s->stest1 = 0; s->stest2 = 0; s->stest3 = 0; s->sidl = 0; s->stime0 = 0; s->respid0 = 0x80; s->respid1 = 0; s->mmrs = 0; s->mmws = 0; s->sfs = 0; s->drs = 0; s->sbms = 0; s->dbms = 0; s->dnad64 = 0; s->pmjad1 = 0; s->pmjad2 = 0; s->rbc = 0; s->ua = 0; s->ia = 0; s->sbc = 0; s->csbc = 0; s->sbr = 0; while (!QTAILQ_EMPTY(&s->queue)) { p = QTAILQ_FIRST(&s->queue); QTAILQ_REMOVE(&s->queue, p, next); g_free(p); } if (s->current) { g_free(s->current); s->current = NULL; } }"} {"target": 1, "idx": 17877, "func": "static void mclms_predict(WmallDecodeCtx *s, int icoef, int *pred) { int ich, i; int order = s->mclms_order; int num_channels = s->num_channels; for (ich = 0; ich < num_channels; ich++) { pred[ich] = 0; if (!s->is_channel_coded[ich]) continue; for (i = 0; i < order * num_channels; i++) pred[ich] += s->mclms_prevvalues[i + s->mclms_recent] * s->mclms_coeffs[i + order * num_channels * ich]; for (i = 0; i < ich; i++) pred[ich] += s->channel_residues[i][icoef] * s->mclms_coeffs_cur[i + num_channels * ich]; pred[ich] += 1 << s->mclms_scaling - 1; pred[ich] >>= s->mclms_scaling; s->channel_residues[ich][icoef] += pred[ich]; } }"} {"target": 1, "idx": 17883, "func": "static void vnc_dpy_copy(DisplayState *ds, int src_x, int src_y, int dst_x, int dst_y, int w, int h) { VncDisplay *vd = ds->opaque; VncState *vs = vd->clients; while (vs != NULL) { if (vnc_has_feature(vs, VNC_FEATURE_COPYRECT)) vnc_copy(vs, src_x, src_y, dst_x, dst_y, w, h); else /* TODO */ vnc_update(vs, dst_x, dst_y, w, h); vs = vs->next; } }"} {"target": 1, "idx": 17885, "func": "static int emulated_exitfn(CCIDCardState *base) { EmulatedState *card = DO_UPCAST(EmulatedState, base, base); VEvent *vevent = vevent_new(VEVENT_LAST, NULL, NULL); vevent_queue_vevent(vevent); /* stop vevent thread */ qemu_mutex_lock(&card->apdu_thread_quit_mutex); card->quit_apdu_thread = 1; /* stop handle_apdu thread */ qemu_cond_signal(&card->handle_apdu_cond); qemu_cond_wait(&card->apdu_thread_quit_cond, &card->apdu_thread_quit_mutex); /* handle_apdu thread stopped, can destroy all of it's mutexes */ qemu_cond_destroy(&card->handle_apdu_cond); qemu_cond_destroy(&card->apdu_thread_quit_cond); qemu_mutex_destroy(&card->apdu_thread_quit_mutex); qemu_mutex_destroy(&card->handle_apdu_mutex); qemu_mutex_destroy(&card->vreader_mutex); qemu_mutex_destroy(&card->event_list_mutex); return 0; }"} {"target": 1, "idx": 17893, "func": "static inline int get_chroma_qp(H264Context *h, int qscale){ return h->pps.chroma_qp_table[qscale & 0xff]; }"} {"target": 1, "idx": 17904, "func": "int AAC_RENAME(ff_ps_read_data)(AVCodecContext *avctx, GetBitContext *gb_host, PSContext *ps, int bits_left) { int e; int bit_count_start = get_bits_count(gb_host); int header; int bits_consumed; GetBitContext gbc = *gb_host, *gb = &gbc; header = get_bits1(gb); if (header) { //enable_ps_header ps->enable_iid = get_bits1(gb); if (ps->enable_iid) { int iid_mode = get_bits(gb, 3); if (iid_mode > 5) { av_log(avctx, AV_LOG_ERROR, \"iid_mode %d is reserved.\\n\", iid_mode); goto err; } ps->nr_iid_par = nr_iidicc_par_tab[iid_mode]; ps->iid_quant = iid_mode > 2; ps->nr_ipdopd_par = nr_iidopd_par_tab[iid_mode]; } ps->enable_icc = get_bits1(gb); if (ps->enable_icc) { ps->icc_mode = get_bits(gb, 3); if (ps->icc_mode > 5) { av_log(avctx, AV_LOG_ERROR, \"icc_mode %d is reserved.\\n\", ps->icc_mode); goto err; } ps->nr_icc_par = nr_iidicc_par_tab[ps->icc_mode]; } ps->enable_ext = get_bits1(gb); } ps->frame_class = get_bits1(gb); ps->num_env_old = ps->num_env; ps->num_env = num_env_tab[ps->frame_class][get_bits(gb, 2)]; ps->border_position[0] = -1; if (ps->frame_class) { for (e = 1; e <= ps->num_env; e++) ps->border_position[e] = get_bits(gb, 5); } else for (e = 1; e <= ps->num_env; e++) ps->border_position[e] = (e * numQMFSlots >> ff_log2_tab[ps->num_env]) - 1; if (ps->enable_iid) { for (e = 0; e < ps->num_env; e++) { int dt = get_bits1(gb); if (read_iid_data(avctx, gb, ps, ps->iid_par, huff_iid[2*dt+ps->iid_quant], e, dt)) goto err; } } else memset(ps->iid_par, 0, sizeof(ps->iid_par)); if (ps->enable_icc) for (e = 0; e < ps->num_env; e++) { int dt = get_bits1(gb); if (read_icc_data(avctx, gb, ps, ps->icc_par, dt ? huff_icc_dt : huff_icc_df, e, dt)) goto err; } else memset(ps->icc_par, 0, sizeof(ps->icc_par)); if (ps->enable_ext) { int cnt = get_bits(gb, 4); if (cnt == 15) { cnt += get_bits(gb, 8); } cnt *= 8; while (cnt > 7) { int ps_extension_id = get_bits(gb, 2); cnt -= 2 + ps_read_extension_data(gb, ps, ps_extension_id); } if (cnt < 0) { av_log(avctx, AV_LOG_ERROR, \"ps extension overflow %d\\n\", cnt); goto err; } skip_bits(gb, cnt); } ps->enable_ipdopd &= !PS_BASELINE; //Fix up envelopes if (!ps->num_env || ps->border_position[ps->num_env] < numQMFSlots - 1) { //Create a fake envelope int source = ps->num_env ? ps->num_env - 1 : ps->num_env_old - 1; int b; if (source >= 0 && source != ps->num_env) { if (ps->enable_iid) { memcpy(ps->iid_par+ps->num_env, ps->iid_par+source, sizeof(ps->iid_par[0])); } if (ps->enable_icc) { memcpy(ps->icc_par+ps->num_env, ps->icc_par+source, sizeof(ps->icc_par[0])); } if (ps->enable_ipdopd) { memcpy(ps->ipd_par+ps->num_env, ps->ipd_par+source, sizeof(ps->ipd_par[0])); memcpy(ps->opd_par+ps->num_env, ps->opd_par+source, sizeof(ps->opd_par[0])); } } if (ps->enable_iid){ for (b = 0; b < ps->nr_iid_par; b++) { if (FFABS(ps->iid_par[ps->num_env][b]) > 7 + 8 * ps->iid_quant) { av_log(avctx, AV_LOG_ERROR, \"iid_par invalid\\n\"); goto err; } } } if (ps->enable_icc){ for (b = 0; b < ps->nr_iid_par; b++) { if (ps->icc_par[ps->num_env][b] > 7U) { av_log(avctx, AV_LOG_ERROR, \"icc_par invalid\\n\"); goto err; } } } ps->num_env++; ps->border_position[ps->num_env] = numQMFSlots - 1; } ps->is34bands_old = ps->is34bands; if (!PS_BASELINE && (ps->enable_iid || ps->enable_icc)) ps->is34bands = (ps->enable_iid && ps->nr_iid_par == 34) || (ps->enable_icc && ps->nr_icc_par == 34); //Baseline if (!ps->enable_ipdopd) { memset(ps->ipd_par, 0, sizeof(ps->ipd_par)); memset(ps->opd_par, 0, sizeof(ps->opd_par)); } if (header) ps->start = 1; bits_consumed = get_bits_count(gb) - bit_count_start; if (bits_consumed <= bits_left) { skip_bits_long(gb_host, bits_consumed); return bits_consumed; } av_log(avctx, AV_LOG_ERROR, \"Expected to read %d PS bits actually read %d.\\n\", bits_left, bits_consumed); err: ps->start = 0; skip_bits_long(gb_host, bits_left); memset(ps->iid_par, 0, sizeof(ps->iid_par)); memset(ps->icc_par, 0, sizeof(ps->icc_par)); memset(ps->ipd_par, 0, sizeof(ps->ipd_par)); memset(ps->opd_par, 0, sizeof(ps->opd_par)); return bits_left; }"} {"target": 1, "idx": 17908, "func": "static int read_gab2_sub(AVFormatContext *s, AVStream *st, AVPacket *pkt) { if (pkt->size >= 7 && pkt->size < INT_MAX - AVPROBE_PADDING_SIZE && !strcmp(pkt->data, \"GAB2\") && AV_RL16(pkt->data + 5) == 2) { uint8_t desc[256]; int score = AVPROBE_SCORE_EXTENSION, ret; AVIStream *ast = st->priv_data; AVInputFormat *sub_demuxer; AVRational time_base; int size; AVIOContext *pb = avio_alloc_context(pkt->data + 7, pkt->size - 7, 0, NULL, NULL, NULL, NULL); AVProbeData pd; unsigned int desc_len = avio_rl32(pb); if (desc_len > pb->buf_end - pb->buf_ptr) goto error; ret = avio_get_str16le(pb, desc_len, desc, sizeof(desc)); avio_skip(pb, desc_len - ret); if (*desc) av_dict_set(&st->metadata, \"title\", desc, 0); avio_rl16(pb); /* flags? */ avio_rl32(pb); /* data size */ size = pb->buf_end - pb->buf_ptr; pd = (AVProbeData) { .buf = av_mallocz(size + AVPROBE_PADDING_SIZE), .buf_size = size }; if (!pd.buf) goto error; memcpy(pd.buf, pb->buf_ptr, size); sub_demuxer = av_probe_input_format2(&pd, 1, &score); av_freep(&pd.buf); if (!sub_demuxer) goto error; if (!(ast->sub_ctx = avformat_alloc_context())) goto error; ast->sub_ctx->pb = pb; av_assert0(!ast->sub_ctx->codec_whitelist && !ast->sub_ctx->format_whitelist); ast->sub_ctx-> codec_whitelist = av_strdup(s->codec_whitelist); ast->sub_ctx->format_whitelist = av_strdup(s->format_whitelist); if (!avformat_open_input(&ast->sub_ctx, \"\", sub_demuxer, NULL)) { ff_read_packet(ast->sub_ctx, &ast->sub_pkt); *st->codec = *ast->sub_ctx->streams[0]->codec; ast->sub_ctx->streams[0]->codec->extradata = NULL; time_base = ast->sub_ctx->streams[0]->time_base; avpriv_set_pts_info(st, 64, time_base.num, time_base.den); } ast->sub_buffer = pkt->data; memset(pkt, 0, sizeof(*pkt)); return 1; error: av_freep(&pb); } return 0; }"} {"target": 1, "idx": 17909, "func": "void qvirtio_pci_device_enable(QVirtioPCIDevice *d) { qpci_device_enable(d->pdev); d->addr = qpci_iomap(d->pdev, 0, NULL); g_assert(d->addr != NULL); }"} {"target": 0, "idx": 17913, "func": "static int movie_request_frame(AVFilterLink *outlink) { AVFilterBufferRef *outpicref; MovieContext *movie = outlink->src->priv; int ret; if (movie->is_done) return AVERROR_EOF; if ((ret = movie_get_frame(outlink)) < 0) return ret; outpicref = avfilter_ref_buffer(movie->picref, ~0); ff_start_frame(outlink, outpicref); ff_draw_slice(outlink, 0, outlink->h, 1); ff_end_frame(outlink); avfilter_unref_buffer(movie->picref); movie->picref = NULL; return 0; }"} {"target": 1, "idx": 17929, "func": "static void process_client(AVIOContext *client, const char *in_uri) { AVIOContext *input = NULL; uint8_t buf[1024]; int ret, n, reply_code; uint8_t *resource = NULL; while ((ret = avio_handshake(client)) > 0) { av_opt_get(client, \"resource\", AV_OPT_SEARCH_CHILDREN, &resource); // check for strlen(resource) is necessary, because av_opt_get() // may return empty string. if (resource && strlen(resource)) break; } if (ret < 0) goto end; av_log(client, AV_LOG_TRACE, \"resource=%p\\n\", resource); if (resource && resource[0] == '/' && !strcmp((resource + 1), in_uri)) { reply_code = 200; } else { reply_code = AVERROR_HTTP_NOT_FOUND; } if ((ret = av_opt_set_int(client, \"reply_code\", reply_code, AV_OPT_SEARCH_CHILDREN)) < 0) { av_log(client, AV_LOG_ERROR, \"Failed to set reply_code: %s.\\n\", av_err2str(ret)); goto end; } av_log(client, AV_LOG_TRACE, \"Set reply code to %d\\n\", reply_code); while ((ret = avio_handshake(client)) > 0); if (ret < 0) goto end; fprintf(stderr, \"Handshake performed.\\n\"); if (reply_code != 200) goto end; fprintf(stderr, \"Opening input file.\\n\"); if ((ret = avio_open2(&input, in_uri, AVIO_FLAG_READ, NULL, NULL)) < 0) { av_log(input, AV_LOG_ERROR, \"Failed to open input: %s: %s.\\n\", in_uri, av_err2str(ret)); goto end; } for(;;) { n = avio_read(input, buf, sizeof(buf)); if (n < 0) { if (n == AVERROR_EOF) break; av_log(input, AV_LOG_ERROR, \"Error reading from input: %s.\\n\", av_err2str(n)); break; } avio_write(client, buf, n); avio_flush(client); } end: fprintf(stderr, \"Flushing client\\n\"); avio_flush(client); fprintf(stderr, \"Closing client\\n\"); avio_close(client); fprintf(stderr, \"Closing input\\n\"); avio_close(input); }"} {"target": 1, "idx": 17932, "func": "static inline void pred_direct_motion(H264Context * const h, int *mb_type){ MpegEncContext * const s = &h->s; const int mb_xy = s->mb_x + s->mb_y*s->mb_stride; const int b8_xy = 2*s->mb_x + 2*s->mb_y*h->b8_stride; const int b4_xy = 4*s->mb_x + 4*s->mb_y*h->b_stride; const int mb_type_col = h->ref_list[1][0].mb_type[mb_xy]; const int16_t (*l1mv0)[2] = (const int16_t (*)[2]) &h->ref_list[1][0].motion_val[0][b4_xy]; const int16_t (*l1mv1)[2] = (const int16_t (*)[2]) &h->ref_list[1][0].motion_val[1][b4_xy]; const int8_t *l1ref0 = &h->ref_list[1][0].ref_index[0][b8_xy]; const int8_t *l1ref1 = &h->ref_list[1][0].ref_index[1][b8_xy]; const int is_b8x8 = IS_8X8(*mb_type); int sub_mb_type; int i8, i4; if(IS_8X8(mb_type_col) && !h->sps.direct_8x8_inference_flag){ /* FIXME save sub mb types from previous frames (or derive from MVs) * so we know exactly what block size to use */ sub_mb_type = MB_TYPE_8x8|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_SUB_4x4 */ *mb_type = MB_TYPE_8x8|MB_TYPE_L0L1; }else if(!is_b8x8 && (IS_16X16(mb_type_col) || IS_INTRA(mb_type_col))){ sub_mb_type = MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_SUB_8x8 */ *mb_type = MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_16x16 */ }else{ sub_mb_type = MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_SUB_8x8 */ *mb_type = MB_TYPE_8x8|MB_TYPE_L0L1; } if(!is_b8x8) *mb_type |= MB_TYPE_DIRECT2; tprintf(\"mb_type = %08x, sub_mb_type = %08x, is_b8x8 = %d, mb_type_col = %08x\\n\", *mb_type, sub_mb_type, is_b8x8, mb_type_col); if(h->direct_spatial_mv_pred){ int ref[2]; int mv[2][2]; int list; /* ref = min(neighbors) */ for(list=0; list<2; list++){ int refa = h->ref_cache[list][scan8[0] - 1]; int refb = h->ref_cache[list][scan8[0] - 8]; int refc = h->ref_cache[list][scan8[0] - 8 + 4]; if(refc == -2) refc = h->ref_cache[list][scan8[0] - 8 - 1]; ref[list] = refa; if(ref[list] < 0 || (refb < ref[list] && refb >= 0)) ref[list] = refb; if(ref[list] < 0 || (refc < ref[list] && refc >= 0)) ref[list] = refc; if(ref[list] < 0) ref[list] = -1; } if(ref[0] < 0 && ref[1] < 0){ ref[0] = ref[1] = 0; mv[0][0] = mv[0][1] = mv[1][0] = mv[1][1] = 0; }else{ for(list=0; list<2; list++){ if(ref[list] >= 0) pred_motion(h, 0, 4, list, ref[list], &mv[list][0], &mv[list][1]); else mv[list][0] = mv[list][1] = 0; } } if(ref[1] < 0){ *mb_type &= ~MB_TYPE_P0L1; sub_mb_type &= ~MB_TYPE_P0L1; }else if(ref[0] < 0){ *mb_type &= ~MB_TYPE_P0L0; sub_mb_type &= ~MB_TYPE_P0L0; } if(IS_16X16(*mb_type)){ fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, ref[0], 1); fill_rectangle(&h->ref_cache[1][scan8[0]], 4, 4, 8, ref[1], 1); if(!IS_INTRA(mb_type_col) && ( (l1ref0[0] == 0 && ABS(l1mv0[0][0]) <= 1 && ABS(l1mv0[0][1]) <= 1) || (l1ref0[0] < 0 && l1ref1[0] == 0 && ABS(l1mv1[0][0]) <= 1 && ABS(l1mv1[0][1]) <= 1 && (h->x264_build>33 || !h->x264_build)))){ if(ref[0] > 0) fill_rectangle(&h->mv_cache[0][scan8[0]], 4, 4, 8, pack16to32(mv[0][0],mv[0][1]), 4); else fill_rectangle(&h->mv_cache[0][scan8[0]], 4, 4, 8, 0, 4); if(ref[1] > 0) fill_rectangle(&h->mv_cache[1][scan8[0]], 4, 4, 8, pack16to32(mv[1][0],mv[1][1]), 4); else fill_rectangle(&h->mv_cache[1][scan8[0]], 4, 4, 8, 0, 4); }else{ fill_rectangle(&h->mv_cache[0][scan8[0]], 4, 4, 8, pack16to32(mv[0][0],mv[0][1]), 4); fill_rectangle(&h->mv_cache[1][scan8[0]], 4, 4, 8, pack16to32(mv[1][0],mv[1][1]), 4); } }else{ for(i8=0; i8<4; i8++){ const int x8 = i8&1; const int y8 = i8>>1; if(is_b8x8 && !IS_DIRECT(h->sub_mb_type[i8])) continue; h->sub_mb_type[i8] = sub_mb_type; fill_rectangle(&h->mv_cache[0][scan8[i8*4]], 2, 2, 8, pack16to32(mv[0][0],mv[0][1]), 4); fill_rectangle(&h->mv_cache[1][scan8[i8*4]], 2, 2, 8, pack16to32(mv[1][0],mv[1][1]), 4); fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, ref[0], 1); fill_rectangle(&h->ref_cache[1][scan8[i8*4]], 2, 2, 8, ref[1], 1); /* col_zero_flag */ if(!IS_INTRA(mb_type_col) && ( l1ref0[x8 + y8*h->b8_stride] == 0 || (l1ref0[x8 + y8*h->b8_stride] < 0 && l1ref1[x8 + y8*h->b8_stride] == 0 && (h->x264_build>33 || !h->x264_build)))){ const int16_t (*l1mv)[2]= l1ref0[x8 + y8*h->b8_stride] == 0 ? l1mv0 : l1mv1; for(i4=0; i4<4; i4++){ const int16_t *mv_col = l1mv[x8*2 + (i4&1) + (y8*2 + (i4>>1))*h->b_stride]; if(ABS(mv_col[0]) <= 1 && ABS(mv_col[1]) <= 1){ if(ref[0] == 0) *(uint32_t*)h->mv_cache[0][scan8[i8*4+i4]] = 0; if(ref[1] == 0) *(uint32_t*)h->mv_cache[1][scan8[i8*4+i4]] = 0; } } } } } }else{ /* direct temporal mv pred */ if(IS_16X16(*mb_type)){ fill_rectangle(&h->ref_cache[1][scan8[0]], 4, 4, 8, 0, 1); if(IS_INTRA(mb_type_col)){ fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, 0, 1); fill_rectangle(&h-> mv_cache[0][scan8[0]], 4, 4, 8, 0, 4); fill_rectangle(&h-> mv_cache[1][scan8[0]], 4, 4, 8, 0, 4); }else{ const int ref0 = l1ref0[0] >= 0 ? h->map_col_to_list0[0][l1ref0[0]] : h->map_col_to_list0[1][l1ref1[0]]; const int dist_scale_factor = h->dist_scale_factor[ref0]; const int16_t *mv_col = l1ref0[0] >= 0 ? l1mv0[0] : l1mv1[0]; int mv_l0[2]; mv_l0[0] = (dist_scale_factor * mv_col[0] + 128) >> 8; mv_l0[1] = (dist_scale_factor * mv_col[1] + 128) >> 8; fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, ref0, 1); fill_rectangle(&h-> mv_cache[0][scan8[0]], 4, 4, 8, pack16to32(mv_l0[0],mv_l0[1]), 4); fill_rectangle(&h-> mv_cache[1][scan8[0]], 4, 4, 8, pack16to32(mv_l0[0]-mv_col[0],mv_l0[1]-mv_col[1]), 4); } }else{ for(i8=0; i8<4; i8++){ const int x8 = i8&1; const int y8 = i8>>1; int ref0, dist_scale_factor; const int16_t (*l1mv)[2]= l1mv0; if(is_b8x8 && !IS_DIRECT(h->sub_mb_type[i8])) continue; h->sub_mb_type[i8] = sub_mb_type; if(IS_INTRA(mb_type_col)){ fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, 0, 1); fill_rectangle(&h->ref_cache[1][scan8[i8*4]], 2, 2, 8, 0, 1); fill_rectangle(&h-> mv_cache[0][scan8[i8*4]], 2, 2, 8, 0, 4); fill_rectangle(&h-> mv_cache[1][scan8[i8*4]], 2, 2, 8, 0, 4); continue; } ref0 = l1ref0[x8 + y8*h->b8_stride]; if(ref0 >= 0) ref0 = h->map_col_to_list0[0][ref0]; else{ ref0 = h->map_col_to_list0[1][l1ref1[x8 + y8*h->b8_stride]]; l1mv= l1mv1; } dist_scale_factor = h->dist_scale_factor[ref0]; fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, ref0, 1); fill_rectangle(&h->ref_cache[1][scan8[i8*4]], 2, 2, 8, 0, 1); for(i4=0; i4<4; i4++){ const int16_t *mv_col = l1mv[x8*2 + (i4&1) + (y8*2 + (i4>>1))*h->b_stride]; int16_t *mv_l0 = h->mv_cache[0][scan8[i8*4+i4]]; mv_l0[0] = (dist_scale_factor * mv_col[0] + 128) >> 8; mv_l0[1] = (dist_scale_factor * mv_col[1] + 128) >> 8; *(uint32_t*)h->mv_cache[1][scan8[i8*4+i4]] = pack16to32(mv_l0[0]-mv_col[0],mv_l0[1]-mv_col[1]); } } } } }"} {"target": 0, "idx": 17953, "func": "static int bdrv_qed_open(BlockDriverState *bs, QDict *options, int flags, Error **errp) { BDRVQEDState *s = bs->opaque; QEDHeader le_header; int64_t file_size; int ret; s->bs = bs; QSIMPLEQ_INIT(&s->allocating_write_reqs); ret = bdrv_pread(bs->file, 0, &le_header, sizeof(le_header)); if (ret < 0) { return ret; } qed_header_le_to_cpu(&le_header, &s->header); if (s->header.magic != QED_MAGIC) { error_setg(errp, \"Image not in QED format\"); return -EINVAL; } if (s->header.features & ~QED_FEATURE_MASK) { /* image uses unsupported feature bits */ char buf[64]; snprintf(buf, sizeof(buf), \"%\" PRIx64, s->header.features & ~QED_FEATURE_MASK); error_set(errp, QERR_UNKNOWN_BLOCK_FORMAT_FEATURE, bdrv_get_device_name(bs), \"QED\", buf); return -ENOTSUP; } if (!qed_is_cluster_size_valid(s->header.cluster_size)) { return -EINVAL; } /* Round down file size to the last cluster */ file_size = bdrv_getlength(bs->file); if (file_size < 0) { return file_size; } s->file_size = qed_start_of_cluster(s, file_size); if (!qed_is_table_size_valid(s->header.table_size)) { return -EINVAL; } if (!qed_is_image_size_valid(s->header.image_size, s->header.cluster_size, s->header.table_size)) { return -EINVAL; } if (!qed_check_table_offset(s, s->header.l1_table_offset)) { return -EINVAL; } s->table_nelems = (s->header.cluster_size * s->header.table_size) / sizeof(uint64_t); s->l2_shift = ffs(s->header.cluster_size) - 1; s->l2_mask = s->table_nelems - 1; s->l1_shift = s->l2_shift + ffs(s->table_nelems) - 1; /* Header size calculation must not overflow uint32_t */ if (s->header.header_size > UINT32_MAX / s->header.cluster_size) { return -EINVAL; } if ((s->header.features & QED_F_BACKING_FILE)) { if ((uint64_t)s->header.backing_filename_offset + s->header.backing_filename_size > s->header.cluster_size * s->header.header_size) { return -EINVAL; } ret = qed_read_string(bs->file, s->header.backing_filename_offset, s->header.backing_filename_size, bs->backing_file, sizeof(bs->backing_file)); if (ret < 0) { return ret; } if (s->header.features & QED_F_BACKING_FORMAT_NO_PROBE) { pstrcpy(bs->backing_format, sizeof(bs->backing_format), \"raw\"); } } /* Reset unknown autoclear feature bits. This is a backwards * compatibility mechanism that allows images to be opened by older * programs, which \"knock out\" unknown feature bits. When an image is * opened by a newer program again it can detect that the autoclear * feature is no longer valid. */ if ((s->header.autoclear_features & ~QED_AUTOCLEAR_FEATURE_MASK) != 0 && !bdrv_is_read_only(bs->file) && !(flags & BDRV_O_INCOMING)) { s->header.autoclear_features &= QED_AUTOCLEAR_FEATURE_MASK; ret = qed_write_header_sync(s); if (ret) { return ret; } /* From here on only known autoclear feature bits are valid */ bdrv_flush(bs->file); } s->l1_table = qed_alloc_table(s); qed_init_l2_cache(&s->l2_cache); ret = qed_read_l1_table_sync(s); if (ret) { goto out; } /* If image was not closed cleanly, check consistency */ if (!(flags & BDRV_O_CHECK) && (s->header.features & QED_F_NEED_CHECK)) { /* Read-only images cannot be fixed. There is no risk of corruption * since write operations are not possible. Therefore, allow * potentially inconsistent images to be opened read-only. This can * aid data recovery from an otherwise inconsistent image. */ if (!bdrv_is_read_only(bs->file) && !(flags & BDRV_O_INCOMING)) { BdrvCheckResult result = {0}; ret = qed_check(s, &result, true); if (ret) { goto out; } } } bdrv_qed_attach_aio_context(bs, bdrv_get_aio_context(bs)); out: if (ret) { qed_free_l2_cache(&s->l2_cache); qemu_vfree(s->l1_table); } return ret; }"} {"target": 0, "idx": 17954, "func": "void ff_put_h264_qpel8_mc31_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride) { avc_luma_hv_qrt_8w_msa(src - 2, src - (stride * 2) + sizeof(uint8_t), stride, dst, stride, 8); }"} {"target": 0, "idx": 17972, "func": "static CharDriverState *qemu_chr_open_pipe(const char *id, ChardevBackend *backend, ChardevReturn *ret, Error **errp) { ChardevHostdev *opts = backend->u.pipe; const char *filename = opts->device; CharDriverState *chr; WinCharState *s; chr = qemu_chr_alloc(); s = g_new0(WinCharState, 1); chr->opaque = s; chr->chr_write = win_chr_write; chr->chr_close = win_chr_close; if (win_chr_pipe_init(chr, filename, errp) < 0) { g_free(s); g_free(chr); return NULL; } return chr; }"} {"target": 0, "idx": 17981, "func": "uint32_t HELPER(clz)(uint32_t x) { int count; for (count = 32; x; count--) x >>= 1; return count; }"} {"target": 0, "idx": 18015, "func": "static void kvm_get_fallback_smmu_info(PowerPCCPU *cpu, struct kvm_ppc_smmu_info *info) { CPUPPCState *env = &cpu->env; CPUState *cs = CPU(cpu); memset(info, 0, sizeof(*info)); /* We don't have the new KVM_PPC_GET_SMMU_INFO ioctl, so * need to \"guess\" what the supported page sizes are. * * For that to work we make a few assumptions: * * - If KVM_CAP_PPC_GET_PVINFO is supported we are running \"PR\" * KVM which only supports 4K and 16M pages, but supports them * regardless of the backing store characteritics. We also don't * support 1T segments. * * This is safe as if HV KVM ever supports that capability or PR * KVM grows supports for more page/segment sizes, those versions * will have implemented KVM_CAP_PPC_GET_SMMU_INFO and thus we * will not hit this fallback * * - Else we are running HV KVM. This means we only support page * sizes that fit in the backing store. Additionally we only * advertize 64K pages if the processor is ARCH 2.06 and we assume * P7 encodings for the SLB and hash table. Here too, we assume * support for any newer processor will mean a kernel that * implements KVM_CAP_PPC_GET_SMMU_INFO and thus doesn't hit * this fallback. */ if (kvm_check_extension(cs->kvm_state, KVM_CAP_PPC_GET_PVINFO)) { /* No flags */ info->flags = 0; info->slb_size = 64; /* Standard 4k base page size segment */ info->sps[0].page_shift = 12; info->sps[0].slb_enc = 0; info->sps[0].enc[0].page_shift = 12; info->sps[0].enc[0].pte_enc = 0; /* Standard 16M large page size segment */ info->sps[1].page_shift = 24; info->sps[1].slb_enc = SLB_VSID_L; info->sps[1].enc[0].page_shift = 24; info->sps[1].enc[0].pte_enc = 0; } else { int i = 0; /* HV KVM has backing store size restrictions */ info->flags = KVM_PPC_PAGE_SIZES_REAL; if (env->mmu_model & POWERPC_MMU_1TSEG) { info->flags |= KVM_PPC_1T_SEGMENTS; } if (env->mmu_model == POWERPC_MMU_2_06 || env->mmu_model == POWERPC_MMU_2_07) { info->slb_size = 32; } else { info->slb_size = 64; } /* Standard 4k base page size segment */ info->sps[i].page_shift = 12; info->sps[i].slb_enc = 0; info->sps[i].enc[0].page_shift = 12; info->sps[i].enc[0].pte_enc = 0; i++; /* 64K on MMU 2.06 and later */ if (env->mmu_model == POWERPC_MMU_2_06 || env->mmu_model == POWERPC_MMU_2_07) { info->sps[i].page_shift = 16; info->sps[i].slb_enc = 0x110; info->sps[i].enc[0].page_shift = 16; info->sps[i].enc[0].pte_enc = 1; i++; } /* Standard 16M large page size segment */ info->sps[i].page_shift = 24; info->sps[i].slb_enc = SLB_VSID_L; info->sps[i].enc[0].page_shift = 24; info->sps[i].enc[0].pte_enc = 0; } }"} {"target": 0, "idx": 18020, "func": "void helper_iret_protected(int shift) { helper_ret_protected(shift, 1, 0); }"} {"target": 0, "idx": 18038, "func": "void qemu_bh_schedule(QEMUBH *bh) { AioContext *ctx; ctx = bh->ctx; bh->idle = 0; /* The memory barrier implicit in atomic_xchg makes sure that: * 1. idle & any writes needed by the callback are done before the * locations are read in the aio_bh_poll. * 2. ctx is loaded before scheduled is set and the callback has a chance * to execute. */ if (atomic_xchg(&bh->scheduled, 1) == 0) { aio_notify(ctx); } }"} {"target": 0, "idx": 18044, "func": "static SpiceTimer *timer_add(SpiceTimerFunc func, void *opaque) { SpiceTimer *timer; timer = qemu_mallocz(sizeof(*timer)); timer->timer = qemu_new_timer(rt_clock, func, opaque); QTAILQ_INSERT_TAIL(&timers, timer, next); return timer; }"} {"target": 0, "idx": 18067, "func": "static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy *proxy, unsigned int queue_no, unsigned int vector, MSIMessage msg) { VirtQueue *vq = virtio_get_queue(proxy->vdev, queue_no); EventNotifier *n = virtio_queue_get_guest_notifier(vq); VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector]; int ret; if (irqfd->users == 0) { ret = kvm_irqchip_add_msi_route(kvm_state, msg); if (ret < 0) { return ret; } irqfd->virq = ret; } irqfd->users++; ret = kvm_irqchip_add_irq_notifier(kvm_state, n, irqfd->virq); if (ret < 0) { if (--irqfd->users == 0) { kvm_irqchip_release_virq(kvm_state, irqfd->virq); } return ret; } virtio_queue_set_guest_notifier_fd_handler(vq, true, true); return 0; }"} {"target": 1, "idx": 18077, "func": "static int output_packet(InputStream *ist, OutputStream *ost_table, int nb_ostreams, const AVPacket *pkt) { int ret = 0, i; int got_output; int64_t pkt_pts = AV_NOPTS_VALUE; AVPacket avpkt; if (ist->next_dts == AV_NOPTS_VALUE) ist->next_dts = ist->dts; if (ist->next_pts == AV_NOPTS_VALUE) ist->next_pts = ist->pts; if (pkt == NULL) { /* EOF handling */ av_init_packet(&avpkt); avpkt.data = NULL; avpkt.size = 0; goto handle_eof; } else { avpkt = *pkt; } if (pkt->dts != AV_NOPTS_VALUE) { ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q); if (ist->st->codec->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed) ist->next_pts = ist->pts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q); } if(pkt->pts != AV_NOPTS_VALUE) pkt_pts = av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q); // while we have more to decode or while the decoder did output something on EOF while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) { int duration; handle_eof: ist->pts = ist->next_pts; ist->dts = ist->next_dts; if (avpkt.size && avpkt.size != pkt->size) { av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING, \"Multiple frames in a packet from stream %d\\n\", pkt->stream_index); ist->showed_multi_packet_warning = 1; } switch (ist->st->codec->codec_type) { case AVMEDIA_TYPE_AUDIO: ret = transcode_audio (ist, &avpkt, &got_output); break; case AVMEDIA_TYPE_VIDEO: ret = transcode_video (ist, &avpkt, &got_output, &pkt_pts); if (avpkt.duration) { duration = av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q); } else if(ist->st->codec->time_base.num != 0) { int ticks= ist->st->parser ? ist->st->parser->repeat_pict+1 : ist->st->codec->ticks_per_frame; duration = ((int64_t)AV_TIME_BASE * ist->st->codec->time_base.num * ticks) / ist->st->codec->time_base.den; } else duration = 0; if(ist->dts != AV_NOPTS_VALUE && duration) { ist->next_dts += duration; }else ist->next_dts = AV_NOPTS_VALUE; if (got_output) ist->next_pts += duration; //FIXME the duration is not correct in some cases break; case AVMEDIA_TYPE_SUBTITLE: ret = transcode_subtitles(ist, &avpkt, &got_output); break; default: return -1; } if (ret < 0) return ret; avpkt.dts= avpkt.pts= AV_NOPTS_VALUE; // touch data and size only if not EOF if (pkt) { if(ist->st->codec->codec_type != AVMEDIA_TYPE_AUDIO) ret = avpkt.size; avpkt.data += ret; avpkt.size -= ret; } if (!got_output) { continue; } } /* handle stream copy */ if (!ist->decoding_needed) { rate_emu_sleep(ist); ist->dts = ist->next_dts; switch (ist->st->codec->codec_type) { case AVMEDIA_TYPE_AUDIO: ist->next_dts += ((int64_t)AV_TIME_BASE * ist->st->codec->frame_size) / ist->st->codec->sample_rate; break; case AVMEDIA_TYPE_VIDEO: if (pkt->duration) { ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q); } else if(ist->st->codec->time_base.num != 0) { int ticks= ist->st->parser ? ist->st->parser->repeat_pict + 1 : ist->st->codec->ticks_per_frame; ist->next_dts += ((int64_t)AV_TIME_BASE * ist->st->codec->time_base.num * ticks) / ist->st->codec->time_base.den; } break; } ist->pts = ist->dts; ist->next_pts = ist->next_dts; } for (i = 0; pkt && i < nb_ostreams; i++) { OutputStream *ost = &ost_table[i]; if (!check_output_constraints(ist, ost) || ost->encoding_needed) continue; do_streamcopy(ist, ost, pkt); } return 0; }"} {"target": 0, "idx": 18095, "func": "static void search_for_quantizers_anmr(AVCodecContext *avctx, AACEncContext *s, SingleChannelElement *sce, const float lambda) { int q, w, w2, g, start = 0; int i, j; int idx; TrellisPath paths[TRELLIS_STAGES][TRELLIS_STATES]; int bandaddr[TRELLIS_STAGES]; int minq; float mincost; float q0f = FLT_MAX, q1f = 0.0f, qnrgf = 0.0f; int q0, q1, qcnt = 0; for (i = 0; i < 1024; i++) { float t = fabsf(sce->coeffs[i]); if (t > 0.0f) { q0f = FFMIN(q0f, t); q1f = FFMAX(q1f, t); qnrgf += t*t; qcnt++; } } if (!qcnt) { memset(sce->sf_idx, 0, sizeof(sce->sf_idx)); memset(sce->zeroes, 1, sizeof(sce->zeroes)); return; } //minimum scalefactor index is when minimum nonzero coefficient after quantizing is not clipped q0 = av_clip_uint8(log2(q0f)*4 - 69 + SCALE_ONE_POS - SCALE_DIV_512); //maximum scalefactor index is when maximum coefficient after quantizing is still not zero q1 = av_clip_uint8(log2(q1f)*4 + 6 + SCALE_ONE_POS - SCALE_DIV_512); //av_log(NULL, AV_LOG_ERROR, \"q0 %d, q1 %d\\n\", q0, q1); if (q1 - q0 > 60) { int q0low = q0; int q1high = q1; //minimum scalefactor index is when maximum nonzero coefficient after quantizing is not clipped int qnrg = av_clip_uint8(log2(sqrt(qnrgf/qcnt))*4 - 31 + SCALE_ONE_POS - SCALE_DIV_512); q1 = qnrg + 30; q0 = qnrg - 30; //av_log(NULL, AV_LOG_ERROR, \"q0 %d, q1 %d\\n\", q0, q1); if (q0 < q0low) { q1 += q0low - q0; q0 = q0low; } else if (q1 > q1high) { q0 -= q1 - q1high; q1 = q1high; } } //av_log(NULL, AV_LOG_ERROR, \"q0 %d, q1 %d\\n\", q0, q1); for (i = 0; i < TRELLIS_STATES; i++) { paths[0][i].cost = 0.0f; paths[0][i].prev = -1; } for (j = 1; j < TRELLIS_STAGES; j++) { for (i = 0; i < TRELLIS_STATES; i++) { paths[j][i].cost = INFINITY; paths[j][i].prev = -2; } } idx = 1; abs_pow34_v(s->scoefs, sce->coeffs, 1024); for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) { start = w*128; for (g = 0; g < sce->ics.num_swb; g++) { const float *coefs = sce->coeffs + start; float qmin, qmax; int nz = 0; bandaddr[idx] = w * 16 + g; qmin = INT_MAX; qmax = 0.0f; for (w2 = 0; w2 < sce->ics.group_len[w]; w2++) { FFPsyBand *band = &s->psy.psy_bands[s->cur_channel*PSY_MAX_BANDS+(w+w2)*16+g]; if (band->energy <= band->threshold || band->threshold == 0.0f) { sce->zeroes[(w+w2)*16+g] = 1; continue; } sce->zeroes[(w+w2)*16+g] = 0; nz = 1; for (i = 0; i < sce->ics.swb_sizes[g]; i++) { float t = fabsf(coefs[w2*128+i]); if (t > 0.0f) qmin = FFMIN(qmin, t); qmax = FFMAX(qmax, t); } } if (nz) { int minscale, maxscale; float minrd = INFINITY; //minimum scalefactor index is when minimum nonzero coefficient after quantizing is not clipped minscale = av_clip_uint8(log2(qmin)*4 - 69 + SCALE_ONE_POS - SCALE_DIV_512); //maximum scalefactor index is when maximum coefficient after quantizing is still not zero maxscale = av_clip_uint8(log2(qmax)*4 + 6 + SCALE_ONE_POS - SCALE_DIV_512); minscale = av_clip(minscale - q0, 0, TRELLIS_STATES - 1); maxscale = av_clip(maxscale - q0, 0, TRELLIS_STATES); for (q = minscale; q < maxscale; q++) { float dist = 0; int cb = find_min_book(sce->sf_idx[w*16+g], sce->ics.group_len[w], sce->ics.swb_sizes[g], s->scoefs+start); for (w2 = 0; w2 < sce->ics.group_len[w]; w2++) { FFPsyBand *band = &s->psy.psy_bands[s->cur_channel*PSY_MAX_BANDS+(w+w2)*16+g]; dist += quantize_band_cost(s, coefs + w2*128, s->scoefs + start + w2*128, sce->ics.swb_sizes[g], q + q0, cb, lambda / band->threshold, INFINITY, NULL); } minrd = FFMIN(minrd, dist); for (i = 0; i < q1 - q0; i++) { float cost; if (isinf(paths[idx - 1][i].cost)) continue; cost = paths[idx - 1][i].cost + dist + ff_aac_scalefactor_bits[q - i + SCALE_DIFF_ZERO]; if (cost < paths[idx][q].cost) { paths[idx][q].cost = cost; paths[idx][q].prev = i; } } } } else { for (q = 0; q < q1 - q0; q++) { if (!isinf(paths[idx - 1][q].cost)) { paths[idx][q].cost = paths[idx - 1][q].cost + 1; paths[idx][q].prev = q; continue; } for (i = 0; i < q1 - q0; i++) { float cost; if (isinf(paths[idx - 1][i].cost)) continue; cost = paths[idx - 1][i].cost + ff_aac_scalefactor_bits[q - i + SCALE_DIFF_ZERO]; if (cost < paths[idx][q].cost) { paths[idx][q].cost = cost; paths[idx][q].prev = i; } } } } sce->zeroes[w*16+g] = !nz; start += sce->ics.swb_sizes[g]; idx++; } } idx--; mincost = paths[idx][0].cost; minq = 0; for (i = 1; i < TRELLIS_STATES; i++) { if (paths[idx][i].cost < mincost) { mincost = paths[idx][i].cost; minq = i; } } while (idx) { sce->sf_idx[bandaddr[idx]] = minq + q0; minq = paths[idx][minq].prev; idx--; } //set the same quantizers inside window groups for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) for (g = 0; g < sce->ics.num_swb; g++) for (w2 = 1; w2 < sce->ics.group_len[w]; w2++) sce->sf_idx[(w+w2)*16+g] = sce->sf_idx[w*16+g]; }"} {"target": 0, "idx": 18138, "func": "static int create_fixed_disk(int fd, uint8_t *buf, int64_t total_size) { int ret = -EIO; /* Add footer to total size */ total_size += 512; if (ftruncate(fd, total_size) != 0) { ret = -errno; goto fail; } if (lseek(fd, -512, SEEK_END) < 0) { goto fail; } if (write(fd, buf, HEADER_SIZE) != HEADER_SIZE) { goto fail; } ret = 0; fail: return ret; }"} {"target": 0, "idx": 18142, "func": "static int local_readdir_r(FsContext *ctx, V9fsFidOpenState *fs, struct dirent *entry, struct dirent **result) { return readdir_r(fs->dir, entry, result); }"} {"target": 0, "idx": 18150, "func": "static int zrle_send_framebuffer_update(VncState *vs, int x, int y, int w, int h) { bool be = !!(vs->clientds.flags & QEMU_BIG_ENDIAN_FLAG); size_t bytes; int zywrle_level; if (vs->zrle.type == VNC_ENCODING_ZYWRLE) { if (!vs->vd->lossy || vs->tight.quality < 0 || vs->tight.quality == 9) { zywrle_level = 0; vs->zrle.type = VNC_ENCODING_ZRLE; } else if (vs->tight.quality < 3) { zywrle_level = 3; } else if (vs->tight.quality < 6) { zywrle_level = 2; } else { zywrle_level = 1; } } else { zywrle_level = 0; } vnc_zrle_start(vs); switch(vs->clientds.pf.bytes_per_pixel) { case 1: zrle_encode_8ne(vs, x, y, w, h, zywrle_level); break; case 2: if (vs->clientds.pf.gmax > 0x1F) { if (be) { zrle_encode_16be(vs, x, y, w, h, zywrle_level); } else { zrle_encode_16le(vs, x, y, w, h, zywrle_level); } } else { if (be) { zrle_encode_15be(vs, x, y, w, h, zywrle_level); } else { zrle_encode_15le(vs, x, y, w, h, zywrle_level); } } break; case 4: { bool fits_in_ls3bytes; bool fits_in_ms3bytes; fits_in_ls3bytes = ((vs->clientds.pf.rmax << vs->clientds.pf.rshift) < (1 << 24) && (vs->clientds.pf.gmax << vs->clientds.pf.gshift) < (1 << 24) && (vs->clientds.pf.bmax << vs->clientds.pf.bshift) < (1 << 24)); fits_in_ms3bytes = (vs->clientds.pf.rshift > 7 && vs->clientds.pf.gshift > 7 && vs->clientds.pf.bshift > 7); if ((fits_in_ls3bytes && !be) || (fits_in_ms3bytes && be)) { if (be) { zrle_encode_24abe(vs, x, y, w, h, zywrle_level); } else { zrle_encode_24ale(vs, x, y, w, h, zywrle_level); } } else if ((fits_in_ls3bytes && be) || (fits_in_ms3bytes && !be)) { if (be) { zrle_encode_24bbe(vs, x, y, w, h, zywrle_level); } else { zrle_encode_24ble(vs, x, y, w, h, zywrle_level); } } else { if (be) { zrle_encode_32be(vs, x, y, w, h, zywrle_level); } else { zrle_encode_32le(vs, x, y, w, h, zywrle_level); } } } break; } vnc_zrle_stop(vs); bytes = zrle_compress_data(vs, Z_DEFAULT_COMPRESSION); vnc_framebuffer_update(vs, x, y, w, h, vs->zrle.type); vnc_write_u32(vs, bytes); vnc_write(vs, vs->zrle.zlib.buffer, vs->zrle.zlib.offset); return 1; }"} {"target": 0, "idx": 18159, "func": "static uint64_t mmio_ide_status_read(void *opaque, target_phys_addr_t addr, unsigned size) { MMIOState *s= opaque; return ide_status_read(&s->bus, 0); }"} {"target": 0, "idx": 18161, "func": "void tlb_flush_page(CPUState *env, target_ulong addr) { int i; #if defined(DEBUG_TLB) printf(\"tlb_flush_page: \" TARGET_FMT_lx \"\\n\", addr); #endif /* must reset current TB so that interrupts cannot modify the links while we are modifying them */ env->current_tb = NULL; addr &= TARGET_PAGE_MASK; i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); tlb_flush_entry(&env->tlb_table[0][i], addr); tlb_flush_entry(&env->tlb_table[1][i], addr); #if (NB_MMU_MODES >= 3) tlb_flush_entry(&env->tlb_table[2][i], addr); #if (NB_MMU_MODES == 4) tlb_flush_entry(&env->tlb_table[3][i], addr); #endif #endif tlb_flush_jmp_cache(env, addr); #ifdef USE_KQEMU if (env->kqemu_enabled) { kqemu_flush_page(env, addr); } #endif }"} {"target": 0, "idx": 18171, "func": "void ff_slice_thread_free(AVCodecContext *avctx) { ThreadContext *c = avctx->thread_opaque; int i; pthread_mutex_lock(&c->current_job_lock); c->done = 1; pthread_cond_broadcast(&c->current_job_cond); pthread_mutex_unlock(&c->current_job_lock); for (i=0; ithread_count; i++) pthread_join(c->workers[i], NULL); pthread_mutex_destroy(&c->current_job_lock); pthread_cond_destroy(&c->current_job_cond); pthread_cond_destroy(&c->last_job_cond); av_free(c->workers); av_freep(&avctx->thread_opaque); }"} {"target": 1, "idx": 18175, "func": "static int ass_decode_frame(AVCodecContext *avctx, void *data, int *got_sub_ptr, AVPacket *avpkt) { const char *ptr = avpkt->data; int len, size = avpkt->size; while (size > 0) { ASSDialog *dialog = ff_ass_split_dialog(avctx->priv_data, ptr, 0, NULL); int duration = dialog->end - dialog->start; len = ff_ass_add_rect(data, ptr, 0, duration, 1); if (len < 0) return len; ptr += len; size -= len; } *got_sub_ptr = avpkt->size > 0; return avpkt->size; }"} {"target": 1, "idx": 18187, "func": "static int init_blk_migration(QEMUFile *f) { BlockDriverState *bs; BlkMigDevState *bmds; int64_t sectors; BdrvNextIterator it; int i, num_bs = 0; struct { BlkMigDevState *bmds; BlockDriverState *bs; } *bmds_bs; Error *local_err = NULL; int ret; block_mig_state.submitted = 0; block_mig_state.read_done = 0; block_mig_state.transferred = 0; block_mig_state.total_sector_sum = 0; block_mig_state.prev_progress = -1; block_mig_state.bulk_completed = 0; block_mig_state.zero_blocks = migrate_zero_blocks(); for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) { num_bs++; } bmds_bs = g_malloc0(num_bs * sizeof(*bmds_bs)); for (i = 0, bs = bdrv_first(&it); bs; bs = bdrv_next(&it), i++) { if (bdrv_is_read_only(bs)) { continue; } sectors = bdrv_nb_sectors(bs); if (sectors <= 0) { ret = sectors; goto out; } bmds = g_new0(BlkMigDevState, 1); bmds->blk = blk_new(BLK_PERM_CONSISTENT_READ, BLK_PERM_ALL); bmds->blk_name = g_strdup(bdrv_get_device_name(bs)); bmds->bulk_completed = 0; bmds->total_sectors = sectors; bmds->completed_sectors = 0; bmds->shared_base = migrate_use_block_incremental(); assert(i < num_bs); bmds_bs[i].bmds = bmds; bmds_bs[i].bs = bs; block_mig_state.total_sector_sum += sectors; if (bmds->shared_base) { DPRINTF(\"Start migration for %s with shared base image\\n\", bdrv_get_device_name(bs)); } else { DPRINTF(\"Start full migration for %s\\n\", bdrv_get_device_name(bs)); } QSIMPLEQ_INSERT_TAIL(&block_mig_state.bmds_list, bmds, entry); } /* Can only insert new BDSes now because doing so while iterating block * devices may end up in a deadlock (iterating the new BDSes, too). */ for (i = 0; i < num_bs; i++) { BlkMigDevState *bmds = bmds_bs[i].bmds; BlockDriverState *bs = bmds_bs[i].bs; if (bmds) { ret = blk_insert_bs(bmds->blk, bs, &local_err); if (ret < 0) { error_report_err(local_err); goto out; } alloc_aio_bitmap(bmds); error_setg(&bmds->blocker, \"block device is in use by migration\"); bdrv_op_block_all(bs, bmds->blocker); } } ret = 0; out: g_free(bmds_bs); return ret; }"} {"target": 1, "idx": 18194, "func": "static int find_dirty_height(VncState *vs, int y, int last_x, int x) { int h; for (h = 1; h < (vs->serverds.height - y); h++) { int tmp_x; if (!vnc_get_bit(vs->dirty_row[y + h], last_x)) break; for (tmp_x = last_x; tmp_x < x; tmp_x++) vnc_clear_bit(vs->dirty_row[y + h], tmp_x); } return h; }"} {"target": 1, "idx": 18195, "func": "static void test_dispatch_cmd_failure(void) { QDict *req = qdict_new(); QObject *resp; qdict_put_obj(req, \"execute\", QOBJECT(qstring_from_str(\"user_def_cmd2\"))); resp = qmp_dispatch(QOBJECT(req)); assert(resp != NULL); assert(qdict_haskey(qobject_to_qdict(resp), \"error\")); qobject_decref(resp); QDECREF(req); /* check that with extra arguments it throws an error */ req = qdict_new(); qdict_put(args, \"a\", qint_from_int(66)); qdict_put(req, \"arguments\", args); qdict_put_obj(req, \"execute\", QOBJECT(qstring_from_str(\"user_def_cmd\"))); resp = qmp_dispatch(QOBJECT(req)); assert(resp != NULL); assert(qdict_haskey(qobject_to_qdict(resp), \"error\")); qobject_decref(resp); QDECREF(req); }"} {"target": 0, "idx": 18200, "func": "static void add_index_entry(AVStream *st, int64_t pos, int64_t timestamp, int flags) { AVIndexEntry *entries, *ie; entries = av_fast_realloc(st->index_entries, &st->index_entries_allocated_size, (st->nb_index_entries + 1) * sizeof(AVIndexEntry)); if (entries) { st->index_entries = entries; ie = &entries[st->nb_index_entries++]; ie->pos = pos; ie->timestamp = timestamp; ie->flags = flags; } }"} {"target": 1, "idx": 18202, "func": "static int rv10_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; MpegEncContext *s = avctx->priv_data; AVFrame *pict = data; int i, ret; int slice_count; const uint8_t *slices_hdr = NULL; av_dlog(avctx, \"*****frame %d size=%d\\n\", avctx->frame_number, buf_size); /* no supplementary picture */ if (buf_size == 0) { return 0; } if (!avctx->slice_count) { slice_count = (*buf++) + 1; buf_size--; if (!slice_count || buf_size <= 8 * slice_count) { av_log(avctx, AV_LOG_ERROR, \"Invalid slice count: %d.\\n\", slice_count); return AVERROR_INVALIDDATA; } slices_hdr = buf + 4; buf += 8 * slice_count; buf_size -= 8 * slice_count; } else slice_count = avctx->slice_count; for (i = 0; i < slice_count; i++) { unsigned offset = get_slice_offset(avctx, slices_hdr, i); int size, size2; if (offset >= buf_size) return AVERROR_INVALIDDATA; if (i + 1 == slice_count) size = buf_size - offset; else size = get_slice_offset(avctx, slices_hdr, i + 1) - offset; if (i + 2 >= slice_count) size2 = buf_size - offset; else size2 = get_slice_offset(avctx, slices_hdr, i + 2) - offset; if (size <= 0 || size2 <= 0 || offset + FFMAX(size, size2) > buf_size) return AVERROR_INVALIDDATA; if ((ret = rv10_decode_packet(avctx, buf + offset, size, size2)) < 0) return ret; if (ret > 8 * size) i++; } if (s->current_picture_ptr != NULL && s->mb_y >= s->mb_height) { ff_er_frame_end(&s->er); ff_MPV_frame_end(s); if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) { if ((ret = av_frame_ref(pict, &s->current_picture_ptr->f)) < 0) return ret; ff_print_debug_info(s, s->current_picture_ptr); } else if (s->last_picture_ptr != NULL) { if ((ret = av_frame_ref(pict, &s->last_picture_ptr->f)) < 0) return ret; ff_print_debug_info(s, s->last_picture_ptr); } if (s->last_picture_ptr || s->low_delay) { *got_frame = 1; } // so we can detect if frame_end was not called (find some nicer solution...) s->current_picture_ptr = NULL; } return avpkt->size; }"} {"target": 1, "idx": 18203, "func": "ff_rm_read_mdpr_codecdata (AVFormatContext *s, AVIOContext *pb, AVStream *st, RMStream *rst, int codec_data_size) { unsigned int v; int size; int64_t codec_pos; int ret; avpriv_set_pts_info(st, 64, 1, 1000); codec_pos = avio_tell(pb); v = avio_rb32(pb); if (v == MKTAG(0xfd, 'a', 'r', '.')) { /* ra type header */ if (rm_read_audio_stream_info(s, pb, st, rst, 0)) return -1; } else if (v == MKBETAG('L', 'S', 'D', ':')) { avio_seek(pb, -4, SEEK_CUR); if ((ret = rm_read_extradata(pb, st->codec, codec_data_size)) < 0) return ret; st->codec->codec_type = AVMEDIA_TYPE_AUDIO; st->codec->codec_tag = AV_RL32(st->codec->extradata); st->codec->codec_id = ff_codec_get_id(ff_rm_codec_tags, st->codec->codec_tag); } else { int fps; if (avio_rl32(pb) != MKTAG('V', 'I', 'D', 'O')) { fail1: av_log(st->codec, AV_LOG_ERROR, \"Unsupported video codec\\n\"); goto skip; } st->codec->codec_tag = avio_rl32(pb); st->codec->codec_id = ff_codec_get_id(ff_rm_codec_tags, st->codec->codec_tag); // av_log(s, AV_LOG_DEBUG, \"%X %X\\n\", st->codec->codec_tag, MKTAG('R', 'V', '2', '0')); if (st->codec->codec_id == CODEC_ID_NONE) goto fail1; st->codec->width = avio_rb16(pb); st->codec->height = avio_rb16(pb); avio_skip(pb, 2); // looks like bits per sample avio_skip(pb, 4); // always zero? st->codec->codec_type = AVMEDIA_TYPE_VIDEO; st->need_parsing = AVSTREAM_PARSE_TIMESTAMPS; fps = avio_rb32(pb); if ((ret = rm_read_extradata(pb, st->codec, codec_data_size - (avio_tell(pb) - codec_pos))) < 0) return ret; av_reduce(&st->r_frame_rate.den, &st->r_frame_rate.num, 0x10000, fps, (1 << 30) - 1); st->avg_frame_rate = st->r_frame_rate; } skip: /* skip codec info */ size = avio_tell(pb) - codec_pos; avio_skip(pb, codec_data_size - size); return 0; }"} {"target": 1, "idx": 18211, "func": "static void put_uint64(QEMUFile *f, void *pv, size_t size) { uint64_t *v = pv; qemu_put_be64s(f, v); }"} {"target": 0, "idx": 18220, "func": "QJSON *qjson_new(void) { QJSON *json = QJSON(object_new(TYPE_QJSON)); return json; }"} {"target": 0, "idx": 18231, "func": "void kqemu_record_dump(void) { PCRecord **pr, *r; int i, h; FILE *f; int64_t total, sum; pr = malloc(sizeof(PCRecord *) * nb_pc_records); i = 0; total = 0; for(h = 0; h < PC_REC_HASH_SIZE; h++) { for(r = pc_rec_hash[h]; r != NULL; r = r->next) { pr[i++] = r; total += r->count; } } qsort(pr, nb_pc_records, sizeof(PCRecord *), pc_rec_cmp); f = fopen(\"/tmp/kqemu.stats\", \"w\"); if (!f) { perror(\"/tmp/kqemu.stats\"); exit(1); } fprintf(f, \"total: %\" PRId64 \"\\n\", total); sum = 0; for(i = 0; i < nb_pc_records; i++) { r = pr[i]; sum += r->count; fprintf(f, \"%08lx: %\" PRId64 \" %0.2f%% %0.2f%%\\n\", r->pc, r->count, (double)r->count / (double)total * 100.0, (double)sum / (double)total * 100.0); } fclose(f); free(pr); kqemu_record_flush(); }"} {"target": 0, "idx": 18254, "func": "static void pcnet_receive(void *opaque, const uint8_t *buf, size_t size) { PCNetState *s = opaque; int is_padr = 0, is_bcast = 0, is_ladr = 0; uint8_t buf1[60]; int remaining; int crc_err = 0; if (CSR_DRX(s) || CSR_STOP(s) || CSR_SPND(s) || !size) return; #ifdef PCNET_DEBUG printf(\"pcnet_receive size=%d\\n\", size); #endif /* if too small buffer, then expand it */ if (size < MIN_BUF_SIZE) { memcpy(buf1, buf, size); memset(buf1 + size, 0, MIN_BUF_SIZE - size); buf = buf1; size = MIN_BUF_SIZE; } if (CSR_PROM(s) || (is_padr=padr_match(s, buf, size)) || (is_bcast=padr_bcast(s, buf, size)) || (is_ladr=ladr_match(s, buf, size))) { pcnet_rdte_poll(s); if (!(CSR_CRST(s) & 0x8000) && s->rdra) { struct pcnet_RMD rmd; int rcvrc = CSR_RCVRC(s)-1,i; target_phys_addr_t nrda; for (i = CSR_RCVRL(s)-1; i > 0; i--, rcvrc--) { if (rcvrc <= 1) rcvrc = CSR_RCVRL(s); nrda = s->rdra + (CSR_RCVRL(s) - rcvrc) * (BCR_SWSTYLE(s) ? 16 : 8 ); RMDLOAD(&rmd, nrda); if (GET_FIELD(rmd.status, RMDS, OWN)) { #ifdef PCNET_DEBUG_RMD printf(\"pcnet - scan buffer: RCVRC=%d PREV_RCVRC=%d\\n\", rcvrc, CSR_RCVRC(s)); #endif CSR_RCVRC(s) = rcvrc; pcnet_rdte_poll(s); break; } } } if (!(CSR_CRST(s) & 0x8000)) { #ifdef PCNET_DEBUG_RMD printf(\"pcnet - no buffer: RCVRC=%d\\n\", CSR_RCVRC(s)); #endif s->csr[0] |= 0x1000; /* Set MISS flag */ CSR_MISSC(s)++; } else { uint8_t *src = s->buffer; target_phys_addr_t crda = CSR_CRDA(s); struct pcnet_RMD rmd; int pktcount = 0; if (!s->looptest) { memcpy(src, buf, size); /* no need to compute the CRC */ src[size] = 0; src[size + 1] = 0; src[size + 2] = 0; src[size + 3] = 0; size += 4; } else if (s->looptest == PCNET_LOOPTEST_CRC || !CSR_DXMTFCS(s) || size < MIN_BUF_SIZE+4) { uint32_t fcs = ~0; uint8_t *p = src; while (p != &src[size]) CRC(fcs, *p++); *(uint32_t *)p = htonl(fcs); size += 4; } else { uint32_t fcs = ~0; uint8_t *p = src; while (p != &src[size-4]) CRC(fcs, *p++); crc_err = (*(uint32_t *)p != htonl(fcs)); } #ifdef PCNET_DEBUG_MATCH PRINT_PKTHDR(buf); #endif RMDLOAD(&rmd, PHYSADDR(s,crda)); /*if (!CSR_LAPPEN(s))*/ SET_FIELD(&rmd.status, RMDS, STP, 1); #define PCNET_RECV_STORE() do { \\ int count = MIN(4096 - GET_FIELD(rmd.buf_length, RMDL, BCNT),remaining); \\ target_phys_addr_t rbadr = PHYSADDR(s, rmd.rbadr); \\ s->phys_mem_write(s->dma_opaque, rbadr, src, count, CSR_BSWP(s)); \\ src += count; remaining -= count; \\ SET_FIELD(&rmd.status, RMDS, OWN, 0); \\ RMDSTORE(&rmd, PHYSADDR(s,crda)); \\ pktcount++; \\ } while (0) remaining = size; PCNET_RECV_STORE(); if ((remaining > 0) && CSR_NRDA(s)) { target_phys_addr_t nrda = CSR_NRDA(s); #ifdef PCNET_DEBUG_RMD PRINT_RMD(&rmd); #endif RMDLOAD(&rmd, PHYSADDR(s,nrda)); if (GET_FIELD(rmd.status, RMDS, OWN)) { crda = nrda; PCNET_RECV_STORE(); #ifdef PCNET_DEBUG_RMD PRINT_RMD(&rmd); #endif if ((remaining > 0) && (nrda=CSR_NNRD(s))) { RMDLOAD(&rmd, PHYSADDR(s,nrda)); if (GET_FIELD(rmd.status, RMDS, OWN)) { crda = nrda; PCNET_RECV_STORE(); } } } } #undef PCNET_RECV_STORE RMDLOAD(&rmd, PHYSADDR(s,crda)); if (remaining == 0) { SET_FIELD(&rmd.msg_length, RMDM, MCNT, size); SET_FIELD(&rmd.status, RMDS, ENP, 1); SET_FIELD(&rmd.status, RMDS, PAM, !CSR_PROM(s) && is_padr); SET_FIELD(&rmd.status, RMDS, LFAM, !CSR_PROM(s) && is_ladr); SET_FIELD(&rmd.status, RMDS, BAM, !CSR_PROM(s) && is_bcast); if (crc_err) { SET_FIELD(&rmd.status, RMDS, CRC, 1); SET_FIELD(&rmd.status, RMDS, ERR, 1); } } else { SET_FIELD(&rmd.status, RMDS, OFLO, 1); SET_FIELD(&rmd.status, RMDS, BUFF, 1); SET_FIELD(&rmd.status, RMDS, ERR, 1); } RMDSTORE(&rmd, PHYSADDR(s,crda)); s->csr[0] |= 0x0400; #ifdef PCNET_DEBUG printf(\"RCVRC=%d CRDA=0x%08x BLKS=%d\\n\", CSR_RCVRC(s), PHYSADDR(s,CSR_CRDA(s)), pktcount); #endif #ifdef PCNET_DEBUG_RMD PRINT_RMD(&rmd); #endif while (pktcount--) { if (CSR_RCVRC(s) <= 1) CSR_RCVRC(s) = CSR_RCVRL(s); else CSR_RCVRC(s)--; } pcnet_rdte_poll(s); } } pcnet_poll(s); pcnet_update_irq(s); }"} {"target": 0, "idx": 18267, "func": "static void nbd_reply_ready(void *opaque) { NbdClientSession *s = opaque; uint64_t i; int ret; if (s->reply.handle == 0) { /* No reply already in flight. Fetch a header. It is possible * that another thread has done the same thing in parallel, so * the socket is not readable anymore. */ ret = nbd_receive_reply(s->sock, &s->reply); if (ret == -EAGAIN) { return; } if (ret < 0) { s->reply.handle = 0; goto fail; } } /* There's no need for a mutex on the receive side, because the * handler acts as a synchronization point and ensures that only * one coroutine is called until the reply finishes. */ i = HANDLE_TO_INDEX(s, s->reply.handle); if (i >= MAX_NBD_REQUESTS) { goto fail; } if (s->recv_coroutine[i]) { qemu_coroutine_enter(s->recv_coroutine[i], NULL); return; } fail: nbd_teardown_connection(s); }"} {"target": 0, "idx": 18268, "func": "static void ffm_write_data(AVFormatContext *s, const uint8_t *buf, int size, int64_t pts, int header) { FFMContext *ffm = s->priv_data; int len; if (header && ffm->frame_offset == 0) { ffm->frame_offset = ffm->packet_ptr - ffm->packet + FFM_HEADER_SIZE; ffm->pts = pts; } /* write as many packets as needed */ while (size > 0) { len = ffm->packet_end - ffm->packet_ptr; if (len > size) len = size; memcpy(ffm->packet_ptr, buf, len); ffm->packet_ptr += len; buf += len; size -= len; if (ffm->packet_ptr >= ffm->packet_end) { /* special case : no pts in packet : we leave the current one */ if (ffm->pts == 0) ffm->pts = pts; flush_packet(s); } } }"} {"target": 0, "idx": 18272, "func": "static inline void vmsvga_cursor_define(struct vmsvga_state_s *s, struct vmsvga_cursor_definition_s *c) { QEMUCursor *qc; int i, pixels; qc = cursor_alloc(c->width, c->height); qc->hot_x = c->hot_x; qc->hot_y = c->hot_y; switch (c->bpp) { case 1: cursor_set_mono(qc, 0xffffff, 0x000000, (void*)c->image, 1, (void*)c->mask); #ifdef DEBUG cursor_print_ascii_art(qc, \"vmware/mono\"); #endif break; case 32: /* fill alpha channel from mask, set color to zero */ cursor_set_mono(qc, 0x000000, 0x000000, (void*)c->mask, 1, (void*)c->mask); /* add in rgb values */ pixels = c->width * c->height; for (i = 0; i < pixels; i++) { qc->data[i] |= c->image[i] & 0xffffff; } #ifdef DEBUG cursor_print_ascii_art(qc, \"vmware/32bit\"); #endif break; default: fprintf(stderr, \"%s: unhandled bpp %d, using fallback cursor\\n\", __FUNCTION__, c->bpp); cursor_put(qc); qc = cursor_builtin_left_ptr(); } dpy_cursor_define(s->vga.ds, qc); cursor_put(qc); }"} {"target": 0, "idx": 18279, "func": "static void FUNCC(pred4x4_horizontal_add)(uint8_t *_pix, const int16_t *_block, ptrdiff_t stride) { int i; pixel *pix = (pixel*)_pix; const dctcoef *block = (const dctcoef*)_block; stride >>= sizeof(pixel)-1; for(i=0; i<4; i++){ pixel v = pix[-1]; pix[0]= v += block[0]; pix[1]= v += block[1]; pix[2]= v += block[2]; pix[3]= v + block[3]; pix+= stride; block+= 4; } }"} {"target": 0, "idx": 18282, "func": "static void set_proc_name(const char *s) { #ifdef __linux__ char name[16]; if (!s) return; name[sizeof(name) - 1] = 0; strncpy(name, s, sizeof(name)); /* Could rewrite argv[0] too, but that's a bit more complicated. This simple way is enough for `top'. */ prctl(PR_SET_NAME, name); #endif }"} {"target": 1, "idx": 18285, "func": "int nbd_client_session_co_flush(NbdClientSession *client) { struct nbd_request request; struct nbd_reply reply; ssize_t ret; if (!(client->nbdflags & NBD_FLAG_SEND_FLUSH)) { return 0; } request.type = NBD_CMD_FLUSH; if (client->nbdflags & NBD_FLAG_SEND_FUA) { request.type |= NBD_CMD_FLAG_FUA; } request.from = 0; request.len = 0; nbd_coroutine_start(client, &request); ret = nbd_co_send_request(client, &request, NULL, 0); if (ret < 0) { reply.error = -ret; } else { nbd_co_receive_reply(client, &request, &reply, NULL, 0); } nbd_coroutine_end(client, &request); return -reply.error; }"} {"target": 0, "idx": 18304, "func": "int av_opencl_buffer_read(uint8_t *dst_buf, cl_mem src_cl_buf, size_t buf_size) { cl_int status; void *mapped = clEnqueueMapBuffer(gpu_env.command_queue, src_cl_buf, CL_TRUE,CL_MAP_READ, 0, buf_size, 0, NULL, NULL, &status); if (status != CL_SUCCESS) { av_log(&openclutils, AV_LOG_ERROR, \"Could not map OpenCL buffer: %s\\n\", opencl_errstr(status)); return AVERROR_EXTERNAL; } memcpy(dst_buf, mapped, buf_size); status = clEnqueueUnmapMemObject(gpu_env.command_queue, src_cl_buf, mapped, 0, NULL, NULL); if (status != CL_SUCCESS) { av_log(&openclutils, AV_LOG_ERROR, \"Could not unmap OpenCL buffer: %s\\n\", opencl_errstr(status)); return AVERROR_EXTERNAL; } return 0; }"} {"target": 1, "idx": 18311, "func": "static int mov_read_stsc(MOVContext *c, AVIOContext *pb, MOVAtom atom) { AVStream *st; MOVStreamContext *sc; unsigned int i, entries; if (c->fc->nb_streams < 1) return 0; st = c->fc->streams[c->fc->nb_streams-1]; sc = st->priv_data; avio_r8(pb); /* version */ avio_rb24(pb); /* flags */ entries = avio_rb32(pb); av_log(c->fc, AV_LOG_TRACE, \"track[%i].stsc.entries = %i\\n\", c->fc->nb_streams-1, entries); if (!entries) return 0; if (entries >= UINT_MAX / sizeof(*sc->stsc_data)) return AVERROR_INVALIDDATA; sc->stsc_data = av_malloc(entries * sizeof(*sc->stsc_data)); if (!sc->stsc_data) return AVERROR(ENOMEM); for (i = 0; i < entries && !pb->eof_reached; i++) { sc->stsc_data[i].first = avio_rb32(pb); sc->stsc_data[i].count = avio_rb32(pb); sc->stsc_data[i].id = avio_rb32(pb); if (sc->stsc_data[i].id > sc->stsd_count) return AVERROR_INVALIDDATA; } sc->stsc_count = i; if (pb->eof_reached) return AVERROR_EOF; return 0; }"} {"target": 0, "idx": 18323, "func": "static int avi_read_tag(AVFormatContext *s, AVStream *st, uint32_t tag, uint32_t size) { AVIOContext *pb = s->pb; char key[5] = { 0 }; char *value; size += (size & 1); if (size == UINT_MAX) return AVERROR(EINVAL); value = av_malloc(size + 1); if (!value) return AVERROR(ENOMEM); avio_read(pb, value, size); value[size] = 0; AV_WL32(key, tag); return av_dict_set(st ? &st->metadata : &s->metadata, key, value, AV_DICT_DONT_STRDUP_VAL); }"} {"target": 0, "idx": 18324, "func": "static void hybrid_analysis(float out[91][32][2], float in[5][44][2], float L[2][38][64], int is34, int len) { int i, j; for (i = 0; i < 5; i++) { for (j = 0; j < 38; j++) { in[i][j+6][0] = L[0][j][i]; in[i][j+6][1] = L[1][j][i]; } } if (is34) { hybrid4_8_12_cx(in[0], out, f34_0_12, 12, len); hybrid4_8_12_cx(in[1], out+12, f34_1_8, 8, len); hybrid4_8_12_cx(in[2], out+20, f34_2_4, 4, len); hybrid4_8_12_cx(in[3], out+24, f34_2_4, 4, len); hybrid4_8_12_cx(in[4], out+28, f34_2_4, 4, len); for (i = 0; i < 59; i++) { for (j = 0; j < len; j++) { out[i+32][j][0] = L[0][j][i+5]; out[i+32][j][1] = L[1][j][i+5]; } } } else { hybrid6_cx(in[0], out, f20_0_8, len); hybrid2_re(in[1], out+6, g1_Q2, len, 1); hybrid2_re(in[2], out+8, g1_Q2, len, 0); for (i = 0; i < 61; i++) { for (j = 0; j < len; j++) { out[i+10][j][0] = L[0][j][i+3]; out[i+10][j][1] = L[1][j][i+3]; } } } //update in_buf for (i = 0; i < 5; i++) { memcpy(in[i], in[i]+32, 6 * sizeof(in[i][0])); } }"} {"target": 1, "idx": 18326, "func": "void qemu_file_set_rate_limit(QEMUFile *f, int64_t limit) { f->xfer_limit = limit; }"} {"target": 1, "idx": 18329, "func": "static void qobject_input_type_bool(Visitor *v, const char *name, bool *obj, Error **errp) { QObjectInputVisitor *qiv = to_qiv(v); QObject *qobj = qobject_input_get_object(qiv, name, true, errp); QBool *qbool; if (!qobj) { return; } qbool = qobject_to_qbool(qobj); if (!qbool) { error_setg(errp, QERR_INVALID_PARAMETER_TYPE, name ? name : \"null\", \"boolean\"); return; } *obj = qbool_get_bool(qbool); }"} {"target": 0, "idx": 18344, "func": "int kvm_arch_get_registers(CPUState *cs) { S390CPU *cpu = S390_CPU(cs); CPUS390XState *env = &cpu->env; struct kvm_one_reg reg; struct kvm_sregs sregs; struct kvm_regs regs; int i, r; /* get the PSW */ env->psw.addr = cs->kvm_run->psw_addr; env->psw.mask = cs->kvm_run->psw_mask; /* the GPRS */ if (cap_sync_regs && cs->kvm_run->kvm_valid_regs & KVM_SYNC_GPRS) { for (i = 0; i < 16; i++) { env->regs[i] = cs->kvm_run->s.regs.gprs[i]; } } else { r = kvm_vcpu_ioctl(cs, KVM_GET_REGS, ®s); if (r < 0) { return r; } for (i = 0; i < 16; i++) { env->regs[i] = regs.gprs[i]; } } /* The ACRS and CRS */ if (cap_sync_regs && cs->kvm_run->kvm_valid_regs & KVM_SYNC_ACRS && cs->kvm_run->kvm_valid_regs & KVM_SYNC_CRS) { for (i = 0; i < 16; i++) { env->aregs[i] = cs->kvm_run->s.regs.acrs[i]; env->cregs[i] = cs->kvm_run->s.regs.crs[i]; } } else { r = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs); if (r < 0) { return r; } for (i = 0; i < 16; i++) { env->aregs[i] = sregs.acrs[i]; env->cregs[i] = sregs.crs[i]; } } /* The prefix */ if (cap_sync_regs && cs->kvm_run->kvm_valid_regs & KVM_SYNC_PREFIX) { env->psa = cs->kvm_run->s.regs.prefix; } /* One Regs */ reg.id = KVM_REG_S390_CPU_TIMER; reg.addr = (__u64)&(env->cputm); r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®); if (r < 0) { return r; } reg.id = KVM_REG_S390_CLOCK_COMP; reg.addr = (__u64)&(env->ckc); r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®); if (r < 0) { return r; } reg.id = KVM_REG_S390_TODPR; reg.addr = (__u64)&(env->todpr); r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®); if (r < 0) { return r; } if (cap_async_pf) { reg.id = KVM_REG_S390_PFTOKEN; reg.addr = (__u64)&(env->pfault_token); r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®); if (r < 0) { return r; } reg.id = KVM_REG_S390_PFCOMPARE; reg.addr = (__u64)&(env->pfault_compare); r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®); if (r < 0) { return r; } reg.id = KVM_REG_S390_PFSELECT; reg.addr = (__u64)&(env->pfault_select); r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®); if (r < 0) { return r; } } return 0; }"} {"target": 0, "idx": 18345, "func": "int select_watchdog(const char *p) { WatchdogTimerModel *model; QemuOpts *opts; /* -watchdog ? lists available devices and exits cleanly. */ if (strcmp(p, \"?\") == 0) { LIST_FOREACH(model, &watchdog_list, entry) { fprintf(stderr, \"\\t%s\\t%s\\n\", model->wdt_name, model->wdt_description); } return 2; } LIST_FOREACH(model, &watchdog_list, entry) { if (strcasecmp(model->wdt_name, p) == 0) { /* add the device */ opts = qemu_opts_create(&qemu_device_opts, NULL, 0); qemu_opt_set(opts, \"driver\", p); return 0; } } fprintf(stderr, \"Unknown -watchdog device. Supported devices are:\\n\"); LIST_FOREACH(model, &watchdog_list, entry) { fprintf(stderr, \"\\t%s\\t%s\\n\", model->wdt_name, model->wdt_description); } return 1; }"} {"target": 0, "idx": 18351, "func": "static void virtio_balloon_handle_output(VirtIODevice *vdev, VirtQueue *vq) { VirtIOBalloon *s = VIRTIO_BALLOON(vdev); VirtQueueElement elem; MemoryRegionSection section; while (virtqueue_pop(vq, &elem)) { size_t offset = 0; uint32_t pfn; while (iov_to_buf(elem.out_sg, elem.out_num, offset, &pfn, 4) == 4) { ram_addr_t pa; ram_addr_t addr; int p = virtio_ldl_p(vdev, &pfn); pa = (ram_addr_t) p << VIRTIO_BALLOON_PFN_SHIFT; offset += 4; /* FIXME: remove get_system_memory(), but how? */ section = memory_region_find(get_system_memory(), pa, 1); if (!int128_nz(section.size) || !memory_region_is_ram(section.mr)) continue; trace_virtio_balloon_handle_output(memory_region_name(section.mr), pa); /* Using memory_region_get_ram_ptr is bending the rules a bit, but should be OK because we only want a single page. */ addr = section.offset_within_region; balloon_page(memory_region_get_ram_ptr(section.mr) + addr, !!(vq == s->dvq)); memory_region_unref(section.mr); } virtqueue_push(vq, &elem, offset); virtio_notify(vdev, vq); } }"} {"target": 0, "idx": 18371, "func": "static av_cold int vorbis_decode_init(AVCodecContext *avccontext) { vorbis_context *vc = avccontext->priv_data; uint8_t *headers = avccontext->extradata; int headers_len = avccontext->extradata_size; uint8_t *header_start[3]; int header_len[3]; GetBitContext *gb = &vc->gb; int hdr_type, ret; vc->avccontext = avccontext; ff_dsputil_init(&vc->dsp, avccontext); ff_fmt_convert_init(&vc->fmt_conv, avccontext); if (avccontext->request_sample_fmt == AV_SAMPLE_FMT_FLT) { avccontext->sample_fmt = AV_SAMPLE_FMT_FLT; vc->scale_bias = 1.0f; } else { avccontext->sample_fmt = AV_SAMPLE_FMT_S16; vc->scale_bias = 32768.0f; } if (!headers_len) { av_log(avccontext, AV_LOG_ERROR, \"Extradata missing.\\n\"); return AVERROR_INVALIDDATA; } if ((ret = avpriv_split_xiph_headers(headers, headers_len, 30, header_start, header_len)) < 0) { av_log(avccontext, AV_LOG_ERROR, \"Extradata corrupt.\\n\"); return ret; } init_get_bits(gb, header_start[0], header_len[0]*8); hdr_type = get_bits(gb, 8); if (hdr_type != 1) { av_log(avccontext, AV_LOG_ERROR, \"First header is not the id header.\\n\"); return AVERROR_INVALIDDATA; } if ((ret = vorbis_parse_id_hdr(vc))) { av_log(avccontext, AV_LOG_ERROR, \"Id header corrupt.\\n\"); vorbis_free(vc); return ret; } init_get_bits(gb, header_start[2], header_len[2]*8); hdr_type = get_bits(gb, 8); if (hdr_type != 5) { av_log(avccontext, AV_LOG_ERROR, \"Third header is not the setup header.\\n\"); vorbis_free(vc); return AVERROR_INVALIDDATA; } if ((ret = vorbis_parse_setup_hdr(vc))) { av_log(avccontext, AV_LOG_ERROR, \"Setup header corrupt.\\n\"); vorbis_free(vc); return ret; } if (vc->audio_channels > 8) avccontext->channel_layout = 0; else avccontext->channel_layout = ff_vorbis_channel_layouts[vc->audio_channels - 1]; avccontext->channels = vc->audio_channels; avccontext->sample_rate = vc->audio_samplerate; avccontext->frame_size = FFMIN(vc->blocksize[0], vc->blocksize[1]) >> 2; avcodec_get_frame_defaults(&vc->frame); avccontext->coded_frame = &vc->frame; return 0; }"} {"target": 1, "idx": 18384, "func": "static void blk_send(QEMUFile *f, BlkMigBlock * blk) { int len; uint64_t flags = BLK_MIG_FLAG_DEVICE_BLOCK; if (block_mig_state.zero_blocks && buffer_is_zero(blk->buf, BLOCK_SIZE)) { flags |= BLK_MIG_FLAG_ZERO_BLOCK; } /* sector number and flags */ qemu_put_be64(f, (blk->sector << BDRV_SECTOR_BITS) | flags); /* device name */ len = strlen(bdrv_get_device_name(blk->bmds->bs)); qemu_put_byte(f, len); qemu_put_buffer(f, (uint8_t *)bdrv_get_device_name(blk->bmds->bs), len); /* if a block is zero we need to flush here since the network * bandwidth is now a lot higher than the storage device bandwidth. * thus if we queue zero blocks we slow down the migration */ if (flags & BLK_MIG_FLAG_ZERO_BLOCK) { qemu_fflush(f); return; } qemu_put_buffer(f, blk->buf, BLOCK_SIZE); }"} {"target": 1, "idx": 18409, "func": "int64_t qmp_guest_fsfreeze_freeze(Error **err) { int ret = 0, i = 0; FsMountList mounts; struct FsMount *mount; Error *local_err = NULL; int fd; slog(\"guest-fsfreeze called\"); execute_fsfreeze_hook(FSFREEZE_HOOK_FREEZE, &local_err); if (error_is_set(&local_err)) { error_propagate(err, local_err); return -1; } QTAILQ_INIT(&mounts); build_fs_mount_list(&mounts, &local_err); if (error_is_set(&local_err)) { error_propagate(err, local_err); return -1; } /* cannot risk guest agent blocking itself on a write in this state */ ga_set_frozen(ga_state); QTAILQ_FOREACH(mount, &mounts, next) { fd = qemu_open(mount->dirname, O_RDONLY); if (fd == -1) { error_setg_errno(err, errno, \"failed to open %s\", mount->dirname); goto error; } /* we try to cull filesytems we know won't work in advance, but other * filesytems may not implement fsfreeze for less obvious reasons. * these will report EOPNOTSUPP. we simply ignore these when tallying * the number of frozen filesystems. * * any other error means a failure to freeze a filesystem we * expect to be freezable, so return an error in those cases * and return system to thawed state. */ ret = ioctl(fd, FIFREEZE); if (ret == -1) { if (errno != EOPNOTSUPP) { error_setg_errno(err, errno, \"failed to freeze %s\", mount->dirname); close(fd); goto error; } } else { i++; } close(fd); } free_fs_mount_list(&mounts); return i; error: free_fs_mount_list(&mounts); qmp_guest_fsfreeze_thaw(NULL); return 0; }"} {"target": 1, "idx": 18412, "func": "int qemu_devtree_setprop_string(void *fdt, const char *node_path, const char *property, const char *string) { int offset; offset = fdt_path_offset(fdt, node_path); if (offset < 0) return offset; return fdt_setprop_string(fdt, offset, property, string); }"} {"target": 1, "idx": 18426, "func": "static uint32_t calc_optimal_rice_params(RiceContext *rc, int porder, uint32_t *sums, int n, int pred_order) { int i; int k, cnt, part; uint32_t all_bits; part = (1 << porder); all_bits = 4 * part; cnt = (n >> porder) - pred_order; for (i = 0; i < part; i++) { k = find_optimal_param(sums[i], cnt); rc->params[i] = k; all_bits += rice_encode_count(sums[i], cnt, k); cnt = n >> porder; } rc->porder = porder; return all_bits; }"} {"target": 1, "idx": 18437, "func": "static void external_snapshot_commit(BlkActionState *common) { ExternalSnapshotState *state = DO_UPCAST(ExternalSnapshotState, common, common); bdrv_set_aio_context(state->new_bs, state->aio_context); /* This removes our old bs and adds the new bs */ bdrv_append(state->new_bs, state->old_bs); /* We don't need (or want) to use the transactional * bdrv_reopen_multiple() across all the entries at once, because we * don't want to abort all of them if one of them fails the reopen */ bdrv_reopen(state->old_bs, state->old_bs->open_flags & ~BDRV_O_RDWR, NULL); }"} {"target": 1, "idx": 18438, "func": "int av_image_alloc(uint8_t *pointers[4], int linesizes[4], int w, int h, enum PixelFormat pix_fmt, int align) { int i, ret; uint8_t *buf; if ((ret = av_image_check_size(w, h, 0, NULL)) < 0) return ret; if ((ret = av_image_fill_linesizes(linesizes, pix_fmt, w)) < 0) return ret; for (i = 0; i < 4; i++) linesizes[i] = FFALIGN(linesizes[i], align); if ((ret = av_image_fill_pointers(pointers, pix_fmt, h, NULL, linesizes)) < 0) return ret; buf = av_malloc(ret + align); if (!buf) return AVERROR(ENOMEM); if ((ret = av_image_fill_pointers(pointers, pix_fmt, h, buf, linesizes)) < 0) { av_free(buf); return ret; } if (av_pix_fmt_descriptors[pix_fmt].flags & PIX_FMT_PAL) ff_set_systematic_pal2((uint32_t*)pointers[1], pix_fmt); return ret; }"} {"target": 1, "idx": 18439, "func": "static void rc4030_dma_as_update_one(rc4030State *s, int index, uint32_t frame) { if (index < MAX_TL_ENTRIES) { memory_region_set_enabled(&s->dma_mrs[index], false); } if (!frame) { return; } if (index >= MAX_TL_ENTRIES) { qemu_log_mask(LOG_UNIMP, \"rc4030: trying to use too high \" \"translation table entry %d (max allowed=%d)\", index, MAX_TL_ENTRIES); return; } memory_region_set_alias_offset(&s->dma_mrs[index], frame); memory_region_set_enabled(&s->dma_mrs[index], true); }"} {"target": 1, "idx": 18447, "func": "static bool virtio_scsi_data_plane_handle_cmd(VirtIODevice *vdev, VirtQueue *vq) { VirtIOSCSI *s = (VirtIOSCSI *)vdev; assert(s->ctx && s->dataplane_started); return virtio_scsi_handle_cmd_vq(s, vq); }"} {"target": 1, "idx": 18457, "func": "USBDevice *usb_host_device_open(const char *devname) { struct usb_device_info bus_info, dev_info; USBDevice *d = NULL; USBHostDevice *dev; char ctlpath[PATH_MAX + 1]; char buspath[PATH_MAX + 1]; int bfd, dfd, bus, address, i; int ugendebug = UGEN_DEBUG_LEVEL; if (usb_host_find_device(&bus, &address, devname) < 0) return NULL; snprintf(buspath, PATH_MAX, \"/dev/usb%d\", bus); bfd = open(buspath, O_RDWR); if (bfd < 0) { #ifdef DEBUG printf(\"usb_host_device_open: failed to open usb bus - %s\\n\", strerror(errno)); #endif return NULL; } bus_info.udi_addr = address; if (ioctl(bfd, USB_DEVICEINFO, &bus_info) < 0) { #ifdef DEBUG printf(\"usb_host_device_open: failed to grab bus information - %s\\n\", strerror(errno)); #endif return NULL; } #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__) snprintf(ctlpath, PATH_MAX, \"/dev/%s\", bus_info.udi_devnames[0]); #else snprintf(ctlpath, PATH_MAX, \"/dev/%s.00\", bus_info.udi_devnames[0]); #endif dfd = open(ctlpath, O_RDWR); if (dfd < 0) { dfd = open(ctlpath, O_RDONLY); if (dfd < 0) { #ifdef DEBUG printf(\"usb_host_device_open: failed to open usb device %s - %s\\n\", ctlpath, strerror(errno)); #endif } } if (dfd >= 0) { if (ioctl(dfd, USB_GET_DEVICEINFO, &dev_info) < 0) { #ifdef DEBUG printf(\"usb_host_device_open: failed to grab device info - %s\\n\", strerror(errno)); #endif goto fail; } d = usb_create(NULL /* FIXME */, \"usb-host\"); dev = DO_UPCAST(USBHostDevice, dev, d); if (dev_info.udi_speed == 1) dev->dev.speed = USB_SPEED_LOW - 1; else dev->dev.speed = USB_SPEED_FULL - 1; if (strncmp(dev_info.udi_product, \"product\", 7) != 0) pstrcpy(dev->dev.product_desc, sizeof(dev->dev.product_desc), dev_info.udi_product); else snprintf(dev->dev.product_desc, sizeof(dev->dev.product_desc), \"host:%s\", devname); pstrcpy(dev->devpath, sizeof(dev->devpath), \"/dev/\"); pstrcat(dev->devpath, sizeof(dev->devpath), dev_info.udi_devnames[0]); /* Mark the endpoints as not yet open */ for (i = 0; i < USB_MAX_ENDPOINTS; i++) dev->ep_fd[i] = -1; ioctl(dfd, USB_SETDEBUG, &ugendebug); return (USBDevice *)dev; } fail: return NULL; }"} {"target": 0, "idx": 18462, "func": "static av_cold int twin_decode_init(AVCodecContext *avctx) { int ret; TwinContext *tctx = avctx->priv_data; int isampf, ibps; tctx->avctx = avctx; avctx->sample_fmt = AV_SAMPLE_FMT_FLTP; if (!avctx->extradata || avctx->extradata_size < 12) { av_log(avctx, AV_LOG_ERROR, \"Missing or incomplete extradata\\n\"); return AVERROR_INVALIDDATA; } avctx->channels = AV_RB32(avctx->extradata ) + 1; avctx->bit_rate = AV_RB32(avctx->extradata + 4) * 1000; isampf = AV_RB32(avctx->extradata + 8); if (isampf < 8 || isampf > 44) { av_log(avctx, AV_LOG_ERROR, \"Unsupported sample rate\\n\"); return AVERROR_INVALIDDATA; } switch (isampf) { case 44: avctx->sample_rate = 44100; break; case 22: avctx->sample_rate = 22050; break; case 11: avctx->sample_rate = 11025; break; default: avctx->sample_rate = isampf * 1000; break; } if (avctx->channels <= 0 || avctx->channels > CHANNELS_MAX) { av_log(avctx, AV_LOG_ERROR, \"Unsupported number of channels: %i\\n\", avctx->channels); return -1; } avctx->channel_layout = avctx->channels == 1 ? AV_CH_LAYOUT_MONO : AV_CH_LAYOUT_STEREO; ibps = avctx->bit_rate / (1000 * avctx->channels); if (ibps > 255) { av_log(avctx, AV_LOG_ERROR, \"unsupported per channel bitrate %dkbps\\n\", ibps); return AVERROR_INVALIDDATA; } switch ((isampf << 8) + ibps) { case (8 <<8) + 8: tctx->mtab = &mode_08_08; break; case (11<<8) + 8: tctx->mtab = &mode_11_08; break; case (11<<8) + 10: tctx->mtab = &mode_11_10; break; case (16<<8) + 16: tctx->mtab = &mode_16_16; break; case (22<<8) + 20: tctx->mtab = &mode_22_20; break; case (22<<8) + 24: tctx->mtab = &mode_22_24; break; case (22<<8) + 32: tctx->mtab = &mode_22_32; break; case (44<<8) + 40: tctx->mtab = &mode_44_40; break; case (44<<8) + 48: tctx->mtab = &mode_44_48; break; default: av_log(avctx, AV_LOG_ERROR, \"This version does not support %d kHz - %d kbit/s/ch mode.\\n\", isampf, isampf); return -1; } ff_dsputil_init(&tctx->dsp, avctx); avpriv_float_dsp_init(&tctx->fdsp, avctx->flags & CODEC_FLAG_BITEXACT); if ((ret = init_mdct_win(tctx))) { av_log(avctx, AV_LOG_ERROR, \"Error initializing MDCT\\n\"); twin_decode_close(avctx); return ret; } init_bitstream_params(tctx); memset_float(tctx->bark_hist[0][0], 0.1, FF_ARRAY_ELEMS(tctx->bark_hist)); avcodec_get_frame_defaults(&tctx->frame); avctx->coded_frame = &tctx->frame; return 0; }"} {"target": 1, "idx": 18474, "func": "static int copy_chapters(InputFile *ifile, OutputFile *ofile, int copy_metadata) { AVFormatContext *is = ifile->ctx; AVFormatContext *os = ofile->ctx; int i; for (i = 0; i < is->nb_chapters; i++) { AVChapter *in_ch = is->chapters[i], *out_ch; int64_t ts_off = av_rescale_q(ofile->start_time - ifile->ts_offset, AV_TIME_BASE_Q, in_ch->time_base); int64_t rt = (ofile->recording_time == INT64_MAX) ? INT64_MAX : av_rescale_q(ofile->recording_time, AV_TIME_BASE_Q, in_ch->time_base); if (in_ch->end < ts_off) continue; if (rt != INT64_MAX && in_ch->start > rt + ts_off) break; out_ch = av_mallocz(sizeof(AVChapter)); if (!out_ch) return AVERROR(ENOMEM); out_ch->id = in_ch->id; out_ch->time_base = in_ch->time_base; out_ch->start = FFMAX(0, in_ch->start - ts_off); out_ch->end = FFMIN(rt, in_ch->end - ts_off); if (copy_metadata) av_dict_copy(&out_ch->metadata, in_ch->metadata, 0); os->nb_chapters++; os->chapters = av_realloc(os->chapters, sizeof(AVChapter) * os->nb_chapters); if (!os->chapters) return AVERROR(ENOMEM); os->chapters[os->nb_chapters - 1] = out_ch; } return 0; }"} {"target": 1, "idx": 18477, "func": "int trace_record_start(TraceBufferRecord *rec, TraceEventID event, size_t datasize) { unsigned int idx, rec_off, old_idx, new_idx; uint32_t rec_len = sizeof(TraceRecord) + datasize; uint64_t event_u64 = event; uint64_t timestamp_ns = get_clock(); do { old_idx = g_atomic_int_get(&trace_idx); smp_rmb(); new_idx = old_idx + rec_len; if (new_idx - writeout_idx > TRACE_BUF_LEN) { /* Trace Buffer Full, Event dropped ! */ g_atomic_int_inc(&dropped_events); return -ENOSPC; } } while (!g_atomic_int_compare_and_exchange(&trace_idx, old_idx, new_idx)); idx = old_idx % TRACE_BUF_LEN; rec_off = idx; rec_off = write_to_buffer(rec_off, &event_u64, sizeof(event_u64)); rec_off = write_to_buffer(rec_off, ×tamp_ns, sizeof(timestamp_ns)); rec_off = write_to_buffer(rec_off, &rec_len, sizeof(rec_len)); rec_off = write_to_buffer(rec_off, &trace_pid, sizeof(trace_pid)); rec->tbuf_idx = idx; rec->rec_off = (idx + sizeof(TraceRecord)) % TRACE_BUF_LEN; return 0; }"} {"target": 1, "idx": 18493, "func": "static float64 roundAndPackFloat64( flag zSign, int16 zExp, uint64_t zSig STATUS_PARAM) { int8 roundingMode; flag roundNearestEven; int16 roundIncrement, roundBits; flag isTiny; roundingMode = STATUS(float_rounding_mode); roundNearestEven = ( roundingMode == float_round_nearest_even ); roundIncrement = 0x200; if ( ! roundNearestEven ) { if ( roundingMode == float_round_to_zero ) { roundIncrement = 0; } else { roundIncrement = 0x3FF; if ( zSign ) { if ( roundingMode == float_round_up ) roundIncrement = 0; } else { if ( roundingMode == float_round_down ) roundIncrement = 0; } } } roundBits = zSig & 0x3FF; if ( 0x7FD <= (uint16_t) zExp ) { if ( ( 0x7FD < zExp ) || ( ( zExp == 0x7FD ) && ( (int64_t) ( zSig + roundIncrement ) < 0 ) ) ) { float_raise( float_flag_overflow | float_flag_inexact STATUS_VAR); return packFloat64( zSign, 0x7FF, - ( roundIncrement == 0 )); } if ( zExp < 0 ) { if ( STATUS(flush_to_zero) ) return packFloat64( zSign, 0, 0 ); isTiny = ( STATUS(float_detect_tininess) == float_tininess_before_rounding ) || ( zExp < -1 ) || ( zSig + roundIncrement < LIT64( 0x8000000000000000 ) ); shift64RightJamming( zSig, - zExp, &zSig ); zExp = 0; roundBits = zSig & 0x3FF; if ( isTiny && roundBits ) float_raise( float_flag_underflow STATUS_VAR); } } if ( roundBits ) STATUS(float_exception_flags) |= float_flag_inexact; zSig = ( zSig + roundIncrement )>>10; zSig &= ~ ( ( ( roundBits ^ 0x200 ) == 0 ) & roundNearestEven ); if ( zSig == 0 ) zExp = 0; return packFloat64( zSign, zExp, zSig ); }"} {"target": 1, "idx": 18512, "func": "static void xen_remap_bucket(MapCacheEntry *entry, hwaddr size, hwaddr address_index) { uint8_t *vaddr_base; xen_pfn_t *pfns; int *err; unsigned int i; hwaddr nb_pfn = size >> XC_PAGE_SHIFT; trace_xen_remap_bucket(address_index); pfns = g_malloc0(nb_pfn * sizeof (xen_pfn_t)); err = g_malloc0(nb_pfn * sizeof (int)); if (entry->vaddr_base != NULL) { if (munmap(entry->vaddr_base, entry->size) != 0) { perror(\"unmap fails\"); exit(-1); } } g_free(entry->valid_mapping); entry->valid_mapping = NULL; for (i = 0; i < nb_pfn; i++) { pfns[i] = (address_index << (MCACHE_BUCKET_SHIFT-XC_PAGE_SHIFT)) + i; } vaddr_base = xc_map_foreign_bulk(xen_xc, xen_domid, PROT_READ|PROT_WRITE, pfns, err, nb_pfn); if (vaddr_base == NULL) { perror(\"xc_map_foreign_bulk\"); exit(-1); } entry->vaddr_base = vaddr_base; entry->paddr_index = address_index; entry->size = size; entry->valid_mapping = (unsigned long *) g_malloc0(sizeof(unsigned long) * BITS_TO_LONGS(size >> XC_PAGE_SHIFT)); bitmap_zero(entry->valid_mapping, nb_pfn); for (i = 0; i < nb_pfn; i++) { if (!err[i]) { bitmap_set(entry->valid_mapping, i, 1); } } g_free(pfns); g_free(err); }"} {"target": 1, "idx": 18513, "func": "void usb_packet_complete(USBDevice *dev, USBPacket *p) { USBEndpoint *ep = p->ep; int ret; assert(p->state == USB_PACKET_ASYNC); assert(QTAILQ_FIRST(&ep->queue) == p); usb_packet_set_state(p, USB_PACKET_COMPLETE); QTAILQ_REMOVE(&ep->queue, p, queue); dev->port->ops->complete(dev->port, p); while (!QTAILQ_EMPTY(&ep->queue)) { p = QTAILQ_FIRST(&ep->queue); if (p->state == USB_PACKET_ASYNC) { break; } assert(p->state == USB_PACKET_QUEUED); ret = usb_process_one(p); if (ret == USB_RET_ASYNC) { usb_packet_set_state(p, USB_PACKET_ASYNC); break; } p->result = ret; usb_packet_set_state(p, USB_PACKET_COMPLETE); QTAILQ_REMOVE(&ep->queue, p, queue); dev->port->ops->complete(dev->port, p); } }"} {"target": 0, "idx": 18518, "func": "static void dyn_buf_write(void *opaque, UINT8 *buf, int buf_size) { DynBuffer *d = opaque; int new_size, new_allocated_size; UINT8 *new_buffer; /* reallocate buffer if needed */ new_size = d->pos + buf_size; new_allocated_size = d->allocated_size; while (new_size > new_allocated_size) { if (!new_allocated_size) new_allocated_size = new_size; else new_allocated_size = (new_allocated_size * 3) / 2; } if (new_allocated_size > d->allocated_size) { new_buffer = av_malloc(new_allocated_size); if (!new_buffer) return; memcpy(new_buffer, d->buffer, d->size); av_free(d->buffer); d->buffer = new_buffer; d->allocated_size = new_allocated_size; } memcpy(d->buffer + d->pos, buf, buf_size); d->pos = new_size; if (d->pos > d->size) d->size = d->pos; }"} {"target": 0, "idx": 18523, "func": "static inline int RENAME(yuv420_rgb24)(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY, int srcSliceH, uint8_t* dst[], int dstStride[]){ int y, h_size; if(c->srcFormat == PIX_FMT_YUV422P){ srcStride[1] *= 2; srcStride[2] *= 2; } h_size= (c->dstW+7)&~7; if(h_size*3 > dstStride[0]) h_size-=8; __asm__ __volatile__ (\"pxor %mm4, %mm4;\" /* zero mm4 */ ); for (y= 0; y>1)*srcStride[1]; uint8_t *_pv = src[2] + (y>>1)*srcStride[2]; long index= -h_size/2; /* this mmx assembly code deals with SINGLE scan line at a time, it convert 8 pixels in each iteration */ __asm__ __volatile__ ( /* load data for start of next scan line */ \"movd (%2, %0), %%mm0;\" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */ \"movd (%3, %0), %%mm1;\" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */ \"movq (%5, %0, 2), %%mm6;\" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */ // \".balign 16 \\n\\t\" \"1: \\n\\t\" YUV2RGB /* mm0=B, %%mm2=G, %%mm1=R */ #ifdef HAVE_MMX2 \"movq \"MANGLE(M24A)\", %%mm4 \\n\\t\" \"movq \"MANGLE(M24C)\", %%mm7 \\n\\t\" \"pshufw $0x50, %%mm0, %%mm5 \\n\\t\" /* B3 B2 B3 B2 B1 B0 B1 B0 */ \"pshufw $0x50, %%mm2, %%mm3 \\n\\t\" /* G3 G2 G3 G2 G1 G0 G1 G0 */ \"pshufw $0x00, %%mm1, %%mm6 \\n\\t\" /* R1 R0 R1 R0 R1 R0 R1 R0 */ \"pand %%mm4, %%mm5 \\n\\t\" /* B2 B1 B0 */ \"pand %%mm4, %%mm3 \\n\\t\" /* G2 G1 G0 */ \"pand %%mm7, %%mm6 \\n\\t\" /* R1 R0 */ \"psllq $8, %%mm3 \\n\\t\" /* G2 G1 G0 */ \"por %%mm5, %%mm6 \\n\\t\" \"por %%mm3, %%mm6 \\n\\t\" MOVNTQ\" %%mm6, (%1) \\n\\t\" \"psrlq $8, %%mm2 \\n\\t\" /* 00 G7 G6 G5 G4 G3 G2 G1 */ \"pshufw $0xA5, %%mm0, %%mm5 \\n\\t\" /* B5 B4 B5 B4 B3 B2 B3 B2 */ \"pshufw $0x55, %%mm2, %%mm3 \\n\\t\" /* G4 G3 G4 G3 G4 G3 G4 G3 */ \"pshufw $0xA5, %%mm1, %%mm6 \\n\\t\" /* R5 R4 R5 R4 R3 R2 R3 R2 */ \"pand \"MANGLE(M24B)\", %%mm5 \\n\\t\" /* B5 B4 B3 */ \"pand %%mm7, %%mm3 \\n\\t\" /* G4 G3 */ \"pand %%mm4, %%mm6 \\n\\t\" /* R4 R3 R2 */ \"por %%mm5, %%mm3 \\n\\t\" /* B5 G4 B4 G3 B3 */ \"por %%mm3, %%mm6 \\n\\t\" MOVNTQ\" %%mm6, 8(%1) \\n\\t\" \"pshufw $0xFF, %%mm0, %%mm5 \\n\\t\" /* B7 B6 B7 B6 B7 B6 B6 B7 */ \"pshufw $0xFA, %%mm2, %%mm3 \\n\\t\" /* 00 G7 00 G7 G6 G5 G6 G5 */ \"pshufw $0xFA, %%mm1, %%mm6 \\n\\t\" /* R7 R6 R7 R6 R5 R4 R5 R4 */ \"movd 4 (%2, %0), %%mm0;\" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */ \"pand %%mm7, %%mm5 \\n\\t\" /* B7 B6 */ \"pand %%mm4, %%mm3 \\n\\t\" /* G7 G6 G5 */ \"pand \"MANGLE(M24B)\", %%mm6 \\n\\t\" /* R7 R6 R5 */ \"movd 4 (%3, %0), %%mm1;\" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */ \\ \"por %%mm5, %%mm3 \\n\\t\" \"por %%mm3, %%mm6 \\n\\t\" MOVNTQ\" %%mm6, 16(%1) \\n\\t\" \"movq 8 (%5, %0, 2), %%mm6;\" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */ \"pxor %%mm4, %%mm4 \\n\\t\" #else \"pxor %%mm4, %%mm4 \\n\\t\" \"movq %%mm0, %%mm5 \\n\\t\" /* B */ \"movq %%mm1, %%mm6 \\n\\t\" /* R */ \"punpcklbw %%mm2, %%mm0 \\n\\t\" /* GBGBGBGB 0 */ \"punpcklbw %%mm4, %%mm1 \\n\\t\" /* 0R0R0R0R 0 */ \"punpckhbw %%mm2, %%mm5 \\n\\t\" /* GBGBGBGB 2 */ \"punpckhbw %%mm4, %%mm6 \\n\\t\" /* 0R0R0R0R 2 */ \"movq %%mm0, %%mm7 \\n\\t\" /* GBGBGBGB 0 */ \"movq %%mm5, %%mm3 \\n\\t\" /* GBGBGBGB 2 */ \"punpcklwd %%mm1, %%mm7 \\n\\t\" /* 0RGB0RGB 0 */ \"punpckhwd %%mm1, %%mm0 \\n\\t\" /* 0RGB0RGB 1 */ \"punpcklwd %%mm6, %%mm5 \\n\\t\" /* 0RGB0RGB 2 */ \"punpckhwd %%mm6, %%mm3 \\n\\t\" /* 0RGB0RGB 3 */ \"movq %%mm7, %%mm2 \\n\\t\" /* 0RGB0RGB 0 */ \"movq %%mm0, %%mm6 \\n\\t\" /* 0RGB0RGB 1 */ \"movq %%mm5, %%mm1 \\n\\t\" /* 0RGB0RGB 2 */ \"movq %%mm3, %%mm4 \\n\\t\" /* 0RGB0RGB 3 */ \"psllq $40, %%mm7 \\n\\t\" /* RGB00000 0 */ \"psllq $40, %%mm0 \\n\\t\" /* RGB00000 1 */ \"psllq $40, %%mm5 \\n\\t\" /* RGB00000 2 */ \"psllq $40, %%mm3 \\n\\t\" /* RGB00000 3 */ \"punpckhdq %%mm2, %%mm7 \\n\\t\" /* 0RGBRGB0 0 */ \"punpckhdq %%mm6, %%mm0 \\n\\t\" /* 0RGBRGB0 1 */ \"punpckhdq %%mm1, %%mm5 \\n\\t\" /* 0RGBRGB0 2 */ \"punpckhdq %%mm4, %%mm3 \\n\\t\" /* 0RGBRGB0 3 */ \"psrlq $8, %%mm7 \\n\\t\" /* 00RGBRGB 0 */ \"movq %%mm0, %%mm6 \\n\\t\" /* 0RGBRGB0 1 */ \"psllq $40, %%mm0 \\n\\t\" /* GB000000 1 */ \"por %%mm0, %%mm7 \\n\\t\" /* GBRGBRGB 0 */ MOVNTQ\" %%mm7, (%1) \\n\\t\" \"movd 4 (%2, %0), %%mm0;\" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */ \"psrlq $24, %%mm6 \\n\\t\" /* 0000RGBR 1 */ \"movq %%mm5, %%mm1 \\n\\t\" /* 0RGBRGB0 2 */ \"psllq $24, %%mm5 \\n\\t\" /* BRGB0000 2 */ \"por %%mm5, %%mm6 \\n\\t\" /* BRGBRGBR 1 */ MOVNTQ\" %%mm6, 8(%1) \\n\\t\" \"movq 8 (%5, %0, 2), %%mm6;\" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */ \"psrlq $40, %%mm1 \\n\\t\" /* 000000RG 2 */ \"psllq $8, %%mm3 \\n\\t\" /* RGBRGB00 3 */ \"por %%mm3, %%mm1 \\n\\t\" /* RGBRGBRG 2 */ MOVNTQ\" %%mm1, 16(%1) \\n\\t\" \"movd 4 (%3, %0), %%mm1;\" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */ \"pxor %%mm4, %%mm4 \\n\\t\" #endif \"add $24, %1 \\n\\t\" \"add $4, %0 \\n\\t\" \" js 1b \\n\\t\" : \"+r\" (index), \"+r\" (_image) : \"r\" (_pu - index), \"r\" (_pv - index), \"r\"(&c->redDither), \"r\" (_py - 2*index) ); } __asm__ __volatile__ (EMMS); return srcSliceH; }"} {"target": 0, "idx": 18529, "func": "static int hex_to_data(uint8_t *data, const char *p) { int c, len, v; len = 0; v = 1; for(;;) { skip_spaces(&p); if (*p == '\\0') break; c = toupper((unsigned char)*p++); if (c >= '0' && c <= '9') c = c - '0'; else if (c >= 'A' && c <= 'F') c = c - 'A' + 10; else break; v = (v << 4) | c; if (v & 0x100) { if (data) data[len] = v; len++; v = 1; } } return len; }"} {"target": 0, "idx": 18545, "func": "static int RENAME(dct_quantize)(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow) { x86_reg last_non_zero_p1; int level=0, q; //=0 is because gcc says uninitialized ... const uint16_t *qmat, *bias; LOCAL_ALIGNED_16(int16_t, temp_block, [64]); av_assert2((7&(int)(&temp_block[0])) == 0); //did gcc align it correctly? //s->fdct (block); RENAME_FDCT(ff_fdct)(block); // cannot be anything else ... if(s->dct_error_sum) s->denoise_dct(s, block); if (s->mb_intra) { int dummy; if (n < 4){ q = s->y_dc_scale; bias = s->q_intra_matrix16[qscale][1]; qmat = s->q_intra_matrix16[qscale][0]; }else{ q = s->c_dc_scale; bias = s->q_chroma_intra_matrix16[qscale][1]; qmat = s->q_chroma_intra_matrix16[qscale][0]; } /* note: block[0] is assumed to be positive */ if (!s->h263_aic) { __asm__ volatile ( \"mul %%ecx \\n\\t\" : \"=d\" (level), \"=a\"(dummy) : \"a\" ((block[0]>>2) + q), \"c\" (ff_inverse[q<<1]) ); } else /* For AIC we skip quant/dequant of INTRADC */ level = (block[0] + 4)>>3; block[0]=0; //avoid fake overflow // temp_block[0] = (block[0] + (q >> 1)) / q; last_non_zero_p1 = 1; } else { last_non_zero_p1 = 0; bias = s->q_inter_matrix16[qscale][1]; qmat = s->q_inter_matrix16[qscale][0]; } if((s->out_format == FMT_H263 || s->out_format == FMT_H261) && s->mpeg_quant==0){ __asm__ volatile( \"movd %%\"FF_REG_a\", \"MM\"3 \\n\\t\" // last_non_zero_p1 SPREADW(MM\"3\") \"pxor \"MM\"7, \"MM\"7 \\n\\t\" // 0 \"pxor \"MM\"4, \"MM\"4 \\n\\t\" // 0 MOVQ\" (%2), \"MM\"5 \\n\\t\" // qmat[0] \"pxor \"MM\"6, \"MM\"6 \\n\\t\" \"psubw (%3), \"MM\"6 \\n\\t\" // -bias[0] \"mov $-128, %%\"FF_REG_a\" \\n\\t\" \".p2align 4 \\n\\t\" \"1: \\n\\t\" MOVQ\" (%1, %%\"FF_REG_a\"), \"MM\"0 \\n\\t\" // block[i] SAVE_SIGN(MM\"1\", MM\"0\") // ABS(block[i]) \"psubusw \"MM\"6, \"MM\"0 \\n\\t\" // ABS(block[i]) + bias[0] \"pmulhw \"MM\"5, \"MM\"0 \\n\\t\" // (ABS(block[i])*qmat[0] - bias[0]*qmat[0])>>16 \"por \"MM\"0, \"MM\"4 \\n\\t\" RESTORE_SIGN(MM\"1\", MM\"0\") // out=((ABS(block[i])*qmat[0] - bias[0]*qmat[0])>>16)*sign(block[i]) MOVQ\" \"MM\"0, (%5, %%\"FF_REG_a\") \\n\\t\" \"pcmpeqw \"MM\"7, \"MM\"0 \\n\\t\" // out==0 ? 0xFF : 0x00 MOVQ\" (%4, %%\"FF_REG_a\"), \"MM\"1 \\n\\t\" MOVQ\" \"MM\"7, (%1, %%\"FF_REG_a\") \\n\\t\" // 0 \"pandn \"MM\"1, \"MM\"0 \\n\\t\" PMAXW(MM\"0\", MM\"3\") \"add $\"MMREG_WIDTH\", %%\"FF_REG_a\" \\n\\t\" \" js 1b \\n\\t\" PMAX(MM\"3\", MM\"0\") \"movd \"MM\"3, %%\"FF_REG_a\" \\n\\t\" \"movzbl %%al, %%eax \\n\\t\" // last_non_zero_p1 : \"+a\" (last_non_zero_p1) : \"r\" (block+64), \"r\" (qmat), \"r\" (bias), \"r\" (inv_zigzag_direct16 + 64), \"r\" (temp_block + 64) XMM_CLOBBERS_ONLY(\"%xmm0\", \"%xmm1\", \"%xmm2\", \"%xmm3\", \"%xmm4\", \"%xmm5\", \"%xmm6\", \"%xmm7\") ); }else{ // FMT_H263 __asm__ volatile( \"movd %%\"FF_REG_a\", \"MM\"3 \\n\\t\" // last_non_zero_p1 SPREADW(MM\"3\") \"pxor \"MM\"7, \"MM\"7 \\n\\t\" // 0 \"pxor \"MM\"4, \"MM\"4 \\n\\t\" // 0 \"mov $-128, %%\"FF_REG_a\" \\n\\t\" \".p2align 4 \\n\\t\" \"1: \\n\\t\" MOVQ\" (%1, %%\"FF_REG_a\"), \"MM\"0 \\n\\t\" // block[i] SAVE_SIGN(MM\"1\", MM\"0\") // ABS(block[i]) MOVQ\" (%3, %%\"FF_REG_a\"), \"MM\"6 \\n\\t\" // bias[0] \"paddusw \"MM\"6, \"MM\"0 \\n\\t\" // ABS(block[i]) + bias[0] MOVQ\" (%2, %%\"FF_REG_a\"), \"MM\"5 \\n\\t\" // qmat[i] \"pmulhw \"MM\"5, \"MM\"0 \\n\\t\" // (ABS(block[i])*qmat[0] + bias[0]*qmat[0])>>16 \"por \"MM\"0, \"MM\"4 \\n\\t\" RESTORE_SIGN(MM\"1\", MM\"0\") // out=((ABS(block[i])*qmat[0] - bias[0]*qmat[0])>>16)*sign(block[i]) MOVQ\" \"MM\"0, (%5, %%\"FF_REG_a\") \\n\\t\" \"pcmpeqw \"MM\"7, \"MM\"0 \\n\\t\" // out==0 ? 0xFF : 0x00 MOVQ\" (%4, %%\"FF_REG_a\"), \"MM\"1 \\n\\t\" MOVQ\" \"MM\"7, (%1, %%\"FF_REG_a\") \\n\\t\" // 0 \"pandn \"MM\"1, \"MM\"0 \\n\\t\" PMAXW(MM\"0\", MM\"3\") \"add $\"MMREG_WIDTH\", %%\"FF_REG_a\" \\n\\t\" \" js 1b \\n\\t\" PMAX(MM\"3\", MM\"0\") \"movd \"MM\"3, %%\"FF_REG_a\" \\n\\t\" \"movzbl %%al, %%eax \\n\\t\" // last_non_zero_p1 : \"+a\" (last_non_zero_p1) : \"r\" (block+64), \"r\" (qmat+64), \"r\" (bias+64), \"r\" (inv_zigzag_direct16 + 64), \"r\" (temp_block + 64) XMM_CLOBBERS_ONLY(\"%xmm0\", \"%xmm1\", \"%xmm2\", \"%xmm3\", \"%xmm4\", \"%xmm5\", \"%xmm6\", \"%xmm7\") ); } __asm__ volatile( \"movd %1, \"MM\"1 \\n\\t\" // max_qcoeff SPREADW(MM\"1\") \"psubusw \"MM\"1, \"MM\"4 \\n\\t\" \"packuswb \"MM\"4, \"MM\"4 \\n\\t\" #if COMPILE_TEMPLATE_SSE2 \"packsswb \"MM\"4, \"MM\"4 \\n\\t\" #endif \"movd \"MM\"4, %0 \\n\\t\" // *overflow : \"=g\" (*overflow) : \"g\" (s->max_qcoeff) ); if(s->mb_intra) block[0]= level; else block[0]= temp_block[0]; if (s->idsp.perm_type == FF_IDCT_PERM_SIMPLE) { if(last_non_zero_p1 <= 1) goto end; block[0x08] = temp_block[0x01]; block[0x10] = temp_block[0x08]; block[0x20] = temp_block[0x10]; if(last_non_zero_p1 <= 4) goto end; block[0x18] = temp_block[0x09]; block[0x04] = temp_block[0x02]; block[0x09] = temp_block[0x03]; if(last_non_zero_p1 <= 7) goto end; block[0x14] = temp_block[0x0A]; block[0x28] = temp_block[0x11]; block[0x12] = temp_block[0x18]; block[0x02] = temp_block[0x20]; if(last_non_zero_p1 <= 11) goto end; block[0x1A] = temp_block[0x19]; block[0x24] = temp_block[0x12]; block[0x19] = temp_block[0x0B]; block[0x01] = temp_block[0x04]; block[0x0C] = temp_block[0x05]; if(last_non_zero_p1 <= 16) goto end; block[0x11] = temp_block[0x0C]; block[0x29] = temp_block[0x13]; block[0x16] = temp_block[0x1A]; block[0x0A] = temp_block[0x21]; block[0x30] = temp_block[0x28]; block[0x22] = temp_block[0x30]; block[0x38] = temp_block[0x29]; block[0x06] = temp_block[0x22]; if(last_non_zero_p1 <= 24) goto end; block[0x1B] = temp_block[0x1B]; block[0x21] = temp_block[0x14]; block[0x1C] = temp_block[0x0D]; block[0x05] = temp_block[0x06]; block[0x0D] = temp_block[0x07]; block[0x15] = temp_block[0x0E]; block[0x2C] = temp_block[0x15]; block[0x13] = temp_block[0x1C]; if(last_non_zero_p1 <= 32) goto end; block[0x0B] = temp_block[0x23]; block[0x34] = temp_block[0x2A]; block[0x2A] = temp_block[0x31]; block[0x32] = temp_block[0x38]; block[0x3A] = temp_block[0x39]; block[0x26] = temp_block[0x32]; block[0x39] = temp_block[0x2B]; block[0x03] = temp_block[0x24]; if(last_non_zero_p1 <= 40) goto end; block[0x1E] = temp_block[0x1D]; block[0x25] = temp_block[0x16]; block[0x1D] = temp_block[0x0F]; block[0x2D] = temp_block[0x17]; block[0x17] = temp_block[0x1E]; block[0x0E] = temp_block[0x25]; block[0x31] = temp_block[0x2C]; block[0x2B] = temp_block[0x33]; if(last_non_zero_p1 <= 48) goto end; block[0x36] = temp_block[0x3A]; block[0x3B] = temp_block[0x3B]; block[0x23] = temp_block[0x34]; block[0x3C] = temp_block[0x2D]; block[0x07] = temp_block[0x26]; block[0x1F] = temp_block[0x1F]; block[0x0F] = temp_block[0x27]; block[0x35] = temp_block[0x2E]; if(last_non_zero_p1 <= 56) goto end; block[0x2E] = temp_block[0x35]; block[0x33] = temp_block[0x3C]; block[0x3E] = temp_block[0x3D]; block[0x27] = temp_block[0x36]; block[0x3D] = temp_block[0x2F]; block[0x2F] = temp_block[0x37]; block[0x37] = temp_block[0x3E]; block[0x3F] = temp_block[0x3F]; }else if(s->idsp.perm_type == FF_IDCT_PERM_LIBMPEG2){ if(last_non_zero_p1 <= 1) goto end; block[0x04] = temp_block[0x01]; block[0x08] = temp_block[0x08]; block[0x10] = temp_block[0x10]; if(last_non_zero_p1 <= 4) goto end; block[0x0C] = temp_block[0x09]; block[0x01] = temp_block[0x02]; block[0x05] = temp_block[0x03]; if(last_non_zero_p1 <= 7) goto end; block[0x09] = temp_block[0x0A]; block[0x14] = temp_block[0x11]; block[0x18] = temp_block[0x18]; block[0x20] = temp_block[0x20]; if(last_non_zero_p1 <= 11) goto end; block[0x1C] = temp_block[0x19]; block[0x11] = temp_block[0x12]; block[0x0D] = temp_block[0x0B]; block[0x02] = temp_block[0x04]; block[0x06] = temp_block[0x05]; if(last_non_zero_p1 <= 16) goto end; block[0x0A] = temp_block[0x0C]; block[0x15] = temp_block[0x13]; block[0x19] = temp_block[0x1A]; block[0x24] = temp_block[0x21]; block[0x28] = temp_block[0x28]; block[0x30] = temp_block[0x30]; block[0x2C] = temp_block[0x29]; block[0x21] = temp_block[0x22]; if(last_non_zero_p1 <= 24) goto end; block[0x1D] = temp_block[0x1B]; block[0x12] = temp_block[0x14]; block[0x0E] = temp_block[0x0D]; block[0x03] = temp_block[0x06]; block[0x07] = temp_block[0x07]; block[0x0B] = temp_block[0x0E]; block[0x16] = temp_block[0x15]; block[0x1A] = temp_block[0x1C]; if(last_non_zero_p1 <= 32) goto end; block[0x25] = temp_block[0x23]; block[0x29] = temp_block[0x2A]; block[0x34] = temp_block[0x31]; block[0x38] = temp_block[0x38]; block[0x3C] = temp_block[0x39]; block[0x31] = temp_block[0x32]; block[0x2D] = temp_block[0x2B]; block[0x22] = temp_block[0x24]; if(last_non_zero_p1 <= 40) goto end; block[0x1E] = temp_block[0x1D]; block[0x13] = temp_block[0x16]; block[0x0F] = temp_block[0x0F]; block[0x17] = temp_block[0x17]; block[0x1B] = temp_block[0x1E]; block[0x26] = temp_block[0x25]; block[0x2A] = temp_block[0x2C]; block[0x35] = temp_block[0x33]; if(last_non_zero_p1 <= 48) goto end; block[0x39] = temp_block[0x3A]; block[0x3D] = temp_block[0x3B]; block[0x32] = temp_block[0x34]; block[0x2E] = temp_block[0x2D]; block[0x23] = temp_block[0x26]; block[0x1F] = temp_block[0x1F]; block[0x27] = temp_block[0x27]; block[0x2B] = temp_block[0x2E]; if(last_non_zero_p1 <= 56) goto end; block[0x36] = temp_block[0x35]; block[0x3A] = temp_block[0x3C]; block[0x3E] = temp_block[0x3D]; block[0x33] = temp_block[0x36]; block[0x2F] = temp_block[0x2F]; block[0x37] = temp_block[0x37]; block[0x3B] = temp_block[0x3E]; block[0x3F] = temp_block[0x3F]; }else{ if(last_non_zero_p1 <= 1) goto end; block[0x01] = temp_block[0x01]; block[0x08] = temp_block[0x08]; block[0x10] = temp_block[0x10]; if(last_non_zero_p1 <= 4) goto end; block[0x09] = temp_block[0x09]; block[0x02] = temp_block[0x02]; block[0x03] = temp_block[0x03]; if(last_non_zero_p1 <= 7) goto end; block[0x0A] = temp_block[0x0A]; block[0x11] = temp_block[0x11]; block[0x18] = temp_block[0x18]; block[0x20] = temp_block[0x20]; if(last_non_zero_p1 <= 11) goto end; block[0x19] = temp_block[0x19]; block[0x12] = temp_block[0x12]; block[0x0B] = temp_block[0x0B]; block[0x04] = temp_block[0x04]; block[0x05] = temp_block[0x05]; if(last_non_zero_p1 <= 16) goto end; block[0x0C] = temp_block[0x0C]; block[0x13] = temp_block[0x13]; block[0x1A] = temp_block[0x1A]; block[0x21] = temp_block[0x21]; block[0x28] = temp_block[0x28]; block[0x30] = temp_block[0x30]; block[0x29] = temp_block[0x29]; block[0x22] = temp_block[0x22]; if(last_non_zero_p1 <= 24) goto end; block[0x1B] = temp_block[0x1B]; block[0x14] = temp_block[0x14]; block[0x0D] = temp_block[0x0D]; block[0x06] = temp_block[0x06]; block[0x07] = temp_block[0x07]; block[0x0E] = temp_block[0x0E]; block[0x15] = temp_block[0x15]; block[0x1C] = temp_block[0x1C]; if(last_non_zero_p1 <= 32) goto end; block[0x23] = temp_block[0x23]; block[0x2A] = temp_block[0x2A]; block[0x31] = temp_block[0x31]; block[0x38] = temp_block[0x38]; block[0x39] = temp_block[0x39]; block[0x32] = temp_block[0x32]; block[0x2B] = temp_block[0x2B]; block[0x24] = temp_block[0x24]; if(last_non_zero_p1 <= 40) goto end; block[0x1D] = temp_block[0x1D]; block[0x16] = temp_block[0x16]; block[0x0F] = temp_block[0x0F]; block[0x17] = temp_block[0x17]; block[0x1E] = temp_block[0x1E]; block[0x25] = temp_block[0x25]; block[0x2C] = temp_block[0x2C]; block[0x33] = temp_block[0x33]; if(last_non_zero_p1 <= 48) goto end; block[0x3A] = temp_block[0x3A]; block[0x3B] = temp_block[0x3B]; block[0x34] = temp_block[0x34]; block[0x2D] = temp_block[0x2D]; block[0x26] = temp_block[0x26]; block[0x1F] = temp_block[0x1F]; block[0x27] = temp_block[0x27]; block[0x2E] = temp_block[0x2E]; if(last_non_zero_p1 <= 56) goto end; block[0x35] = temp_block[0x35]; block[0x3C] = temp_block[0x3C]; block[0x3D] = temp_block[0x3D]; block[0x36] = temp_block[0x36]; block[0x2F] = temp_block[0x2F]; block[0x37] = temp_block[0x37]; block[0x3E] = temp_block[0x3E]; block[0x3F] = temp_block[0x3F]; } end: return last_non_zero_p1 - 1; }"} {"target": 1, "idx": 18556, "func": "static void qdm2_fft_decode_tones (QDM2Context *q, int duration, GetBitContext *gb, int b) { int channel, stereo, phase, exp; int local_int_4, local_int_8, stereo_phase, local_int_10; int local_int_14, stereo_exp, local_int_20, local_int_28; int n, offset; local_int_4 = 0; local_int_28 = 0; local_int_20 = 2; local_int_8 = (4 - duration); local_int_10 = 1 << (q->group_order - duration - 1); offset = 1; while (1) { if (q->superblocktype_2_3) { while ((n = qdm2_get_vlc(gb, &vlc_tab_fft_tone_offset[local_int_8], 1, 2)) < 2) { offset = 1; if (n == 0) { local_int_4 += local_int_10; local_int_28 += (1 << local_int_8); } else { local_int_4 += 8*local_int_10; local_int_28 += (8 << local_int_8); } } offset += (n - 2); } else { offset += qdm2_get_vlc(gb, &vlc_tab_fft_tone_offset[local_int_8], 1, 2); while (offset >= (local_int_10 - 1)) { offset += (1 - (local_int_10 - 1)); local_int_4 += local_int_10; local_int_28 += (1 << local_int_8); } } if (local_int_4 >= q->group_size) local_int_14 = (offset >> local_int_8); if (q->nb_channels > 1) { channel = get_bits1(gb); stereo = get_bits1(gb); } else { channel = 0; stereo = 0; } exp = qdm2_get_vlc(gb, (b ? &fft_level_exp_vlc : &fft_level_exp_alt_vlc), 0, 2); exp += q->fft_level_exp[fft_level_index_table[local_int_14]]; exp = (exp < 0) ? 0 : exp; phase = get_bits(gb, 3); stereo_exp = 0; stereo_phase = 0; if (stereo) { stereo_exp = (exp - qdm2_get_vlc(gb, &fft_stereo_exp_vlc, 0, 1)); stereo_phase = (phase - qdm2_get_vlc(gb, &fft_stereo_phase_vlc, 0, 1)); if (stereo_phase < 0) stereo_phase += 8; } if (q->frequency_range > (local_int_14 + 1)) { int sub_packet = (local_int_20 + local_int_28); qdm2_fft_init_coefficient(q, sub_packet, offset, duration, channel, exp, phase); if (stereo) qdm2_fft_init_coefficient(q, sub_packet, offset, duration, (1 - channel), stereo_exp, stereo_phase); } offset++; } }"} {"target": 1, "idx": 18559, "func": "static void cuda_receive_packet(CUDAState *s, const uint8_t *data, int len) { uint8_t obuf[16] = { CUDA_PACKET, 0, data[0] }; int autopoll; uint32_t ti; switch(data[0]) { case CUDA_AUTOPOLL: autopoll = (data[1] != 0); if (autopoll != s->autopoll) { s->autopoll = autopoll; if (autopoll) { timer_mod(s->adb_poll_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + (get_ticks_per_sec() / CUDA_ADB_POLL_FREQ)); } else { timer_del(s->adb_poll_timer); } } cuda_send_packet_to_host(s, obuf, 3); break; case CUDA_GET_6805_ADDR: cuda_send_packet_to_host(s, obuf, 3); break; case CUDA_SET_TIME: ti = (((uint32_t)data[1]) << 24) + (((uint32_t)data[2]) << 16) + (((uint32_t)data[3]) << 8) + data[4]; s->tick_offset = ti - (qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / get_ticks_per_sec()); cuda_send_packet_to_host(s, obuf, 3); break; case CUDA_GET_TIME: ti = s->tick_offset + (qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / get_ticks_per_sec()); obuf[3] = ti >> 24; obuf[4] = ti >> 16; obuf[5] = ti >> 8; obuf[6] = ti; cuda_send_packet_to_host(s, obuf, 7); break; case CUDA_FILE_SERVER_FLAG: case CUDA_SET_DEVICE_LIST: case CUDA_SET_AUTO_RATE: case CUDA_SET_POWER_MESSAGES: cuda_send_packet_to_host(s, obuf, 3); break; case CUDA_POWERDOWN: cuda_send_packet_to_host(s, obuf, 3); qemu_system_shutdown_request(); break; case CUDA_RESET_SYSTEM: cuda_send_packet_to_host(s, obuf, 3); qemu_system_reset_request(); break; case CUDA_COMBINED_FORMAT_IIC: obuf[1] = 0x5; break; case CUDA_GET_SET_IIC: if (len == 4) { cuda_send_packet_to_host(s, obuf, 3); } else { } break; default: break; } }"} {"target": 1, "idx": 18562, "func": "static void bitmap_free(Qcow2Bitmap *bm) { g_free(bm->name); g_free(bm);"} {"target": 1, "idx": 18578, "func": "static int vp6_parse_coeff(VP56Context *s) { VP56RangeCoder *c = s->ccp; VP56Model *model = s->modelp; uint8_t *permute = s->idct_scantable; uint8_t *model1, *model2, *model3; int coeff, sign, coeff_idx; int b, i, cg, idx, ctx; int pt = 0; /* plane type (0 for Y, 1 for U or V) */ if (c->end >= c->buffer && c->bits >= 0) { av_log(s->avctx, AV_LOG_ERROR, \"End of AC stream reached in vp6_parse_coeff\\n\"); return AVERROR_INVALIDDATA; } for (b=0; b<6; b++) { int ct = 1; /* code type */ int run = 1; if (b > 3) pt = 1; ctx = s->left_block[ff_vp56_b6to4[b]].not_null_dc + s->above_blocks[s->above_block_idx[b]].not_null_dc; model1 = model->coeff_dccv[pt]; model2 = model->coeff_dcct[pt][ctx]; coeff_idx = 0; for (;;) { if ((coeff_idx>1 && ct==0) || vp56_rac_get_prob_branchy(c, model2[0])) { /* parse a coeff */ if (vp56_rac_get_prob_branchy(c, model2[2])) { if (vp56_rac_get_prob_branchy(c, model2[3])) { idx = vp56_rac_get_tree(c, ff_vp56_pc_tree, model1); coeff = ff_vp56_coeff_bias[idx+5]; for (i=ff_vp56_coeff_bit_length[idx]; i>=0; i--) coeff += vp56_rac_get_prob(c, ff_vp56_coeff_parse_table[idx][i]) << i; } else { if (vp56_rac_get_prob_branchy(c, model2[4])) coeff = 3 + vp56_rac_get_prob(c, model1[5]); else coeff = 2; } ct = 2; } else { ct = 1; coeff = 1; } sign = vp56_rac_get(c); coeff = (coeff ^ -sign) + sign; if (coeff_idx) coeff *= s->dequant_ac; idx = model->coeff_index_to_pos[coeff_idx]; s->block_coeff[b][permute[idx]] = coeff; run = 1; } else { /* parse a run */ ct = 0; if (coeff_idx > 0) { if (!vp56_rac_get_prob_branchy(c, model2[1])) break; model3 = model->coeff_runv[coeff_idx >= 6]; run = vp56_rac_get_tree(c, vp6_pcr_tree, model3); if (!run) for (run=9, i=0; i<6; i++) run += vp56_rac_get_prob(c, model3[i+8]) << i; } } coeff_idx += run; if (coeff_idx >= 64) break; cg = vp6_coeff_groups[coeff_idx]; model1 = model2 = model->coeff_ract[pt][ct][cg]; } s->left_block[ff_vp56_b6to4[b]].not_null_dc = s->above_blocks[s->above_block_idx[b]].not_null_dc = !!s->block_coeff[b][0]; } return 0; }"} {"target": 0, "idx": 18583, "func": "static int ftp_passive_mode_epsv(FTPContext *s) { char *res = NULL, *start = NULL, *end = NULL; int i; static const char d = '|'; static const char *command = \"EPSV\\r\\n\"; static const int epsv_codes[] = {229, 0}; if (ftp_send_command(s, command, epsv_codes, &res) != 229 || !res) goto fail; for (i = 0; res[i]; ++i) { if (res[i] == '(') { start = res + i + 1; } else if (res[i] == ')') { end = res + i; break; } } if (!start || !end) goto fail; *end = '\\0'; if (strlen(start) < 5) goto fail; if (start[0] != d || start[1] != d || start[2] != d || end[-1] != d) goto fail; start += 3; end[-1] = '\\0'; s->server_data_port = atoi(start); av_dlog(s, \"Server data port: %d\\n\", s->server_data_port); av_free(res); return 0; fail: av_free(res); s->server_data_port = -1; return AVERROR(ENOSYS); }"} {"target": 0, "idx": 18596, "func": "static void qemu_enqueue_packet(VLANClientState *sender, const uint8_t *buf, int size, NetPacketSent *sent_cb) { VLANPacket *packet; packet = qemu_malloc(sizeof(VLANPacket) + size); packet->sender = sender; packet->size = size; packet->sent_cb = sent_cb; memcpy(packet->data, buf, size); TAILQ_INSERT_TAIL(&sender->vlan->send_queue, packet, entry); }"} {"target": 0, "idx": 18597, "func": "static inline uint64_t cpu_ppc_get_tb(CPUPPCState *env) { /* TO FIX */ return 0; }"} {"target": 0, "idx": 18598, "func": "uint16_t net_checksum_tcpudp(uint16_t length, uint16_t proto, uint8_t *addrs, uint8_t *buf) { uint32_t sum = 0; sum += net_checksum_add(length, buf); // payload sum += net_checksum_add(8, addrs); // src + dst address sum += proto + length; // protocol & length return net_checksum_finish(sum); }"} {"target": 0, "idx": 18614, "func": "void cpu_loop_exit(CPUState *env1) { env1->current_tb = NULL; longjmp(env1->jmp_env, 1); }"} {"target": 0, "idx": 18619, "func": "void ppc_hw_interrupt (CPUPPCState *env) { int raised = 0; #if 1 if (loglevel & CPU_LOG_INT) { fprintf(logfile, \"%s: %p pending %08x req %08x me %d ee %d\\n\", __func__, env, env->pending_interrupts, env->interrupt_request, msr_me, msr_ee); } #endif /* Raise it */ if (env->pending_interrupts & (1 << PPC_INTERRUPT_RESET)) { /* External reset / critical input */ /* XXX: critical input should be handled another way. * This code is not correct ! */ env->exception_index = EXCP_RESET; env->pending_interrupts &= ~(1 << PPC_INTERRUPT_RESET); raised = 1; } if (raised == 0 && msr_me != 0) { /* Machine check exception */ if (env->pending_interrupts & (1 << PPC_INTERRUPT_MCK)) { env->exception_index = EXCP_MACHINE_CHECK; env->pending_interrupts &= ~(1 << PPC_INTERRUPT_MCK); raised = 1; } } if (raised == 0 && msr_ee != 0) { #if defined(TARGET_PPC64H) /* PowerPC 64 with hypervisor mode support */ /* Hypervisor decrementer exception */ if (env->pending_interrupts & (1 << PPC_INTERRUPT_HDECR)) { env->exception_index = EXCP_HDECR; env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDECR); raised = 1; } else #endif /* Decrementer exception */ if (env->pending_interrupts & (1 << PPC_INTERRUPT_DECR)) { env->exception_index = EXCP_DECR; env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DECR); raised = 1; /* Programmable interval timer on embedded PowerPC */ } else if (env->pending_interrupts & (1 << PPC_INTERRUPT_PIT)) { env->exception_index = EXCP_40x_PIT; env->pending_interrupts &= ~(1 << PPC_INTERRUPT_PIT); raised = 1; /* Fixed interval timer on embedded PowerPC */ } else if (env->pending_interrupts & (1 << PPC_INTERRUPT_FIT)) { env->exception_index = EXCP_40x_FIT; env->pending_interrupts &= ~(1 << PPC_INTERRUPT_FIT); raised = 1; /* Watchdog timer on embedded PowerPC */ } else if (env->pending_interrupts & (1 << PPC_INTERRUPT_WDT)) { env->exception_index = EXCP_40x_WATCHDOG; env->pending_interrupts &= ~(1 << PPC_INTERRUPT_WDT); raised = 1; /* External interrupt */ } else if (env->pending_interrupts & (1 << PPC_INTERRUPT_EXT)) { env->exception_index = EXCP_EXTERNAL; /* Taking an external interrupt does not clear the external * interrupt status */ #if 0 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_EXT); #endif raised = 1; #if 0 // TODO /* Thermal interrupt */ } else if (env->pending_interrupts & (1 << PPC_INTERRUPT_THERM)) { env->exception_index = EXCP_970_THRM; env->pending_interrupts &= ~(1 << PPC_INTERRUPT_THERM); raised = 1; #endif } #if 0 // TODO /* External debug exception */ } else if (env->pending_interrupts & (1 << PPC_INTERRUPT_DEBUG)) { env->exception_index = EXCP_xxx; env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DEBUG); raised = 1; #endif } if (raised != 0) { env->error_code = 0; do_interrupt(env); } }"} {"target": 0, "idx": 18620, "func": "gen_intermediate_code_internal(CPUState *env, TranslationBlock *tb, int search_pc) { uint16_t *gen_opc_end; uint32_t pc_start; int j, lj; struct DisasContext ctx; struct DisasContext *dc = &ctx; uint32_t next_page_start, org_flags; target_ulong npc; int num_insns; int max_insns; qemu_log_try_set_file(stderr); pc_start = tb->pc; dc->env = env; dc->tb = tb; org_flags = dc->synced_flags = dc->tb_flags = tb->flags; gen_opc_end = gen_opc_buf + OPC_MAX_SIZE; dc->is_jmp = DISAS_NEXT; dc->jmp = 0; dc->delayed_branch = !!(dc->tb_flags & D_FLAG); dc->pc = pc_start; dc->singlestep_enabled = env->singlestep_enabled; dc->cpustate_changed = 0; dc->abort_at_next_insn = 0; dc->nr_nops = 0; if (pc_start & 3) cpu_abort(env, \"Microblaze: unaligned PC=%x\\n\", pc_start); if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) { #if !SIM_COMPAT qemu_log(\"--------------\\n\"); log_cpu_state(env, 0); #endif } next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; lj = -1; num_insns = 0; max_insns = tb->cflags & CF_COUNT_MASK; if (max_insns == 0) max_insns = CF_COUNT_MASK; gen_icount_start(); do { #if SIM_COMPAT if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) { tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc); gen_helper_debug(); } #endif check_breakpoint(env, dc); if (search_pc) { j = gen_opc_ptr - gen_opc_buf; if (lj < j) { lj++; while (lj < j) gen_opc_instr_start[lj++] = 0; } gen_opc_pc[lj] = dc->pc; gen_opc_instr_start[lj] = 1; gen_opc_icount[lj] = num_insns; } /* Pretty disas. */ LOG_DIS(\"%8.8x:\\t\", dc->pc); if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) gen_io_start(); dc->clear_imm = 1; decode(dc); if (dc->clear_imm) dc->tb_flags &= ~IMM_FLAG; dc->pc += 4; num_insns++; if (dc->delayed_branch) { dc->delayed_branch--; if (!dc->delayed_branch) { if (dc->tb_flags & DRTI_FLAG) do_rti(dc); if (dc->tb_flags & DRTB_FLAG) do_rtb(dc); if (dc->tb_flags & DRTE_FLAG) do_rte(dc); /* Clear the delay slot flag. */ dc->tb_flags &= ~D_FLAG; /* If it is a direct jump, try direct chaining. */ if (dc->jmp != JMP_DIRECT) { eval_cond_jmp(dc, env_btarget, tcg_const_tl(dc->pc)); dc->is_jmp = DISAS_JUMP; } break; } } if (env->singlestep_enabled) break; } while (!dc->is_jmp && !dc->cpustate_changed && gen_opc_ptr < gen_opc_end && !singlestep && (dc->pc < next_page_start) && num_insns < max_insns); npc = dc->pc; if (dc->jmp == JMP_DIRECT) { if (dc->tb_flags & D_FLAG) { dc->is_jmp = DISAS_UPDATE; tcg_gen_movi_tl(cpu_SR[SR_PC], npc); sync_jmpstate(dc); } else npc = dc->jmp_pc; } if (tb->cflags & CF_LAST_IO) gen_io_end(); /* Force an update if the per-tb cpu state has changed. */ if (dc->is_jmp == DISAS_NEXT && (dc->cpustate_changed || org_flags != dc->tb_flags)) { dc->is_jmp = DISAS_UPDATE; tcg_gen_movi_tl(cpu_SR[SR_PC], npc); } t_sync_flags(dc); if (unlikely(env->singlestep_enabled)) { t_gen_raise_exception(dc, EXCP_DEBUG); if (dc->is_jmp == DISAS_NEXT) tcg_gen_movi_tl(cpu_SR[SR_PC], npc); } else { switch(dc->is_jmp) { case DISAS_NEXT: gen_goto_tb(dc, 1, npc); break; default: case DISAS_JUMP: case DISAS_UPDATE: /* indicate that the hash table must be used to find the next TB */ tcg_gen_exit_tb(0); break; case DISAS_TB_JUMP: /* nothing more to generate */ break; } } gen_icount_end(tb, num_insns); *gen_opc_ptr = INDEX_op_end; if (search_pc) { j = gen_opc_ptr - gen_opc_buf; lj++; while (lj <= j) gen_opc_instr_start[lj++] = 0; } else { tb->size = dc->pc - pc_start; tb->icount = num_insns; } #ifdef DEBUG_DISAS #if !SIM_COMPAT if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) { qemu_log(\"\\n\"); #if DISAS_GNU log_target_disas(pc_start, dc->pc - pc_start, 0); #endif qemu_log(\"\\nisize=%d osize=%td\\n\", dc->pc - pc_start, gen_opc_ptr - gen_opc_buf); } #endif #endif assert(!dc->abort_at_next_insn); }"} {"target": 1, "idx": 18624, "func": "static void add_bytes_l2_c(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w) { long i; for (i = 0; i <= w - sizeof(long); i += sizeof(long)) { long a = *(long *)(src1 + i); long b = *(long *)(src2 + i); *(long *)(dst + i) = ((a & pb_7f) + (b & pb_7f)) ^ ((a ^ b) & pb_80); } for (; i < w; i++) dst[i] = src1[i] + src2[i]; }"} {"target": 1, "idx": 18629, "func": "static int calculate_refcounts(BlockDriverState *bs, BdrvCheckResult *res, BdrvCheckMode fix, bool *rebuild, uint16_t **refcount_table, int64_t *nb_clusters) { BDRVQcowState *s = bs->opaque; int64_t i; QCowSnapshot *sn; int ret; if (!*refcount_table) { int64_t old_size = 0; ret = realloc_refcount_array(s, refcount_table, &old_size, *nb_clusters); if (ret < 0) { res->check_errors++; return ret; } } /* header */ ret = inc_refcounts(bs, res, refcount_table, nb_clusters, 0, s->cluster_size); if (ret < 0) { return ret; } /* current L1 table */ ret = check_refcounts_l1(bs, res, refcount_table, nb_clusters, s->l1_table_offset, s->l1_size, CHECK_FRAG_INFO); if (ret < 0) { return ret; } /* snapshots */ for (i = 0; i < s->nb_snapshots; i++) { sn = s->snapshots + i; ret = check_refcounts_l1(bs, res, refcount_table, nb_clusters, sn->l1_table_offset, sn->l1_size, 0); if (ret < 0) { return ret; } } ret = inc_refcounts(bs, res, refcount_table, nb_clusters, s->snapshots_offset, s->snapshots_size); if (ret < 0) { return ret; } /* refcount data */ ret = inc_refcounts(bs, res, refcount_table, nb_clusters, s->refcount_table_offset, s->refcount_table_size * sizeof(uint64_t)); if (ret < 0) { return ret; } return check_refblocks(bs, res, fix, rebuild, refcount_table, nb_clusters); }"} {"target": 1, "idx": 18640, "func": "static void ne2000_ioport_write(void *opaque, uint32_t addr, uint32_t val) { NE2000State *s = opaque; int offset, page; addr &= 0xf; #ifdef DEBUG_NE2000 printf(\"NE2000: write addr=0x%x val=0x%02x\\n\", addr, val); #endif if (addr == E8390_CMD) { /* control register */ s->cmd = val; if (val & E8390_START) { s->isr &= ~ENISR_RESET; /* test specific case: zero length transfert */ if ((val & (E8390_RREAD | E8390_RWRITE)) && s->rcnt == 0) { s->isr |= ENISR_RDC; ne2000_update_irq(s); } if (val & E8390_TRANS) { qemu_send_packet(s->nd, s->mem + (s->tpsr << 8), s->tcnt); /* signal end of transfert */ s->tsr = ENTSR_PTX; s->isr |= ENISR_TX; ne2000_update_irq(s); } } } else { page = s->cmd >> 6; offset = addr | (page << 4); switch(offset) { case EN0_STARTPG: s->start = val << 8; break; case EN0_STOPPG: s->stop = val << 8; break; case EN0_BOUNDARY: s->boundary = val; break; case EN0_IMR: s->imr = val; ne2000_update_irq(s); break; case EN0_TPSR: s->tpsr = val; break; case EN0_TCNTLO: s->tcnt = (s->tcnt & 0xff00) | val; break; case EN0_TCNTHI: s->tcnt = (s->tcnt & 0x00ff) | (val << 8); break; case EN0_RSARLO: s->rsar = (s->rsar & 0xff00) | val; break; case EN0_RSARHI: s->rsar = (s->rsar & 0x00ff) | (val << 8); break; case EN0_RCNTLO: s->rcnt = (s->rcnt & 0xff00) | val; break; case EN0_RCNTHI: s->rcnt = (s->rcnt & 0x00ff) | (val << 8); break; case EN0_DCFG: s->dcfg = val; break; case EN0_ISR: s->isr &= ~(val & 0x7f); ne2000_update_irq(s); break; case EN1_PHYS ... EN1_PHYS + 5: s->phys[offset - EN1_PHYS] = val; break; case EN1_CURPAG: s->curpag = val; break; case EN1_MULT ... EN1_MULT + 7: s->mult[offset - EN1_MULT] = val; break; } } }"} {"target": 0, "idx": 18646, "func": "static int vc1_filter_line(uint8_t* src, int stride, int pq){ int a0, a1, a2, a3, d, clip, filt3 = 0; uint8_t *cm = ff_cropTbl + MAX_NEG_CROP; a0 = (2*(src[-2*stride] - src[ 1*stride]) - 5*(src[-1*stride] - src[ 0*stride]) + 4) >> 3; if(FFABS(a0) < pq){ a1 = (2*(src[-4*stride] - src[-1*stride]) - 5*(src[-3*stride] - src[-2*stride]) + 4) >> 3; a2 = (2*(src[ 0*stride] - src[ 3*stride]) - 5*(src[ 1*stride] - src[ 2*stride]) + 4) >> 3; a3 = FFMIN(FFABS(a1), FFABS(a2)); if(a3 < FFABS(a0)){ d = 5 * ((a0 >=0 ? a3 : -a3) - a0) / 8; clip = (src[-1*stride] - src[ 0*stride])/2; if(clip){ filt3 = 1; if(clip > 0) d = av_clip(d, 0, clip); else d = av_clip(d, clip, 0); src[-1*stride] = cm[src[-1*stride] - d]; src[ 0*stride] = cm[src[ 0*stride] + d]; } } } return filt3; }"} {"target": 0, "idx": 18648, "func": "static int rle_unpack(const unsigned char *src, unsigned char *dest, int src_count, int src_size, int dest_len) { unsigned char *pd; int i, l; unsigned char *dest_end = dest + dest_len; GetByteContext gb; bytestream2_init(&gb, src, src_size); pd = dest; if (src_count & 1) { if (bytestream2_get_bytes_left(&gb) < 1) return 0; *pd++ = bytestream2_get_byteu(&gb); } src_count >>= 1; i = 0; do { if (bytestream2_get_bytes_left(&gb) < 1) break; l = bytestream2_get_byteu(&gb); if (l & 0x80) { l = (l & 0x7F) * 2; if (dest_end - pd < l || bytestream2_get_bytes_left(&gb) < l) return bytestream2_tell(&gb); bytestream2_get_bufferu(&gb, pd, l); pd += l; } else { if (dest_end - pd < i || bytestream2_get_bytes_left(&gb) < 2) return bytestream2_tell(&gb); for (i = 0; i < l; i++) { *pd++ = bytestream2_get_byteu(&gb); *pd++ = bytestream2_get_byteu(&gb); } bytestream2_skip(&gb, 2); } i += l; } while (i < src_count); return bytestream2_tell(&gb); }"} {"target": 0, "idx": 18649, "func": "static int recheck_discard_flags(AVFormatContext *s, int first) { HLSContext *c = s->priv_data; int i, changed = 0; /* Check if any new streams are needed */ for (i = 0; i < c->n_playlists; i++) c->playlists[i]->cur_needed = 0; for (i = 0; i < s->nb_streams; i++) { AVStream *st = s->streams[i]; struct playlist *pls = c->playlists[s->streams[i]->id]; if (st->discard < AVDISCARD_ALL) pls->cur_needed = 1; } for (i = 0; i < c->n_playlists; i++) { struct playlist *pls = c->playlists[i]; if (pls->cur_needed && !pls->needed) { pls->needed = 1; changed = 1; pls->cur_seq_no = select_cur_seq_no(c, pls); pls->pb.eof_reached = 0; if (c->cur_timestamp != AV_NOPTS_VALUE) { /* catch up */ pls->seek_timestamp = c->cur_timestamp; pls->seek_flags = AVSEEK_FLAG_ANY; pls->seek_stream_index = -1; } av_log(s, AV_LOG_INFO, \"Now receiving playlist %d, segment %d\\n\", i, pls->cur_seq_no); } else if (first && !pls->cur_needed && pls->needed) { if (pls->input) ff_format_io_close(pls->parent, &pls->input); pls->needed = 0; changed = 1; av_log(s, AV_LOG_INFO, \"No longer receiving playlist %d\\n\", i); } } return changed; }"} {"target": 0, "idx": 18673, "func": "static int tpm_passthrough_unix_transfer(int tpm_fd, const TPMLocality *locty_data) { return tpm_passthrough_unix_tx_bufs(tpm_fd, locty_data->w_buffer.buffer, locty_data->w_offset, locty_data->r_buffer.buffer, locty_data->r_buffer.size); }"} {"target": 0, "idx": 18675, "func": "void do_tw (int flags) { if (!likely(!(((int32_t)T0 < (int32_t)T1 && (flags & 0x10)) || ((int32_t)T0 > (int32_t)T1 && (flags & 0x08)) || ((int32_t)T0 == (int32_t)T1 && (flags & 0x04)) || ((uint32_t)T0 < (uint32_t)T1 && (flags & 0x02)) || ((uint32_t)T0 > (uint32_t)T1 && (flags & 0x01))))) { do_raise_exception_err(EXCP_PROGRAM, EXCP_TRAP); } }"} {"target": 1, "idx": 18696, "func": "void sh4_translate_init(void) { int i; static const char * const gregnames[24] = { \"R0_BANK0\", \"R1_BANK0\", \"R2_BANK0\", \"R3_BANK0\", \"R4_BANK0\", \"R5_BANK0\", \"R6_BANK0\", \"R7_BANK0\", \"R8\", \"R9\", \"R10\", \"R11\", \"R12\", \"R13\", \"R14\", \"R15\", \"R0_BANK1\", \"R1_BANK1\", \"R2_BANK1\", \"R3_BANK1\", \"R4_BANK1\", \"R5_BANK1\", \"R6_BANK1\", \"R7_BANK1\" }; static const char * const fregnames[32] = { \"FPR0_BANK0\", \"FPR1_BANK0\", \"FPR2_BANK0\", \"FPR3_BANK0\", \"FPR4_BANK0\", \"FPR5_BANK0\", \"FPR6_BANK0\", \"FPR7_BANK0\", \"FPR8_BANK0\", \"FPR9_BANK0\", \"FPR10_BANK0\", \"FPR11_BANK0\", \"FPR12_BANK0\", \"FPR13_BANK0\", \"FPR14_BANK0\", \"FPR15_BANK0\", \"FPR0_BANK1\", \"FPR1_BANK1\", \"FPR2_BANK1\", \"FPR3_BANK1\", \"FPR4_BANK1\", \"FPR5_BANK1\", \"FPR6_BANK1\", \"FPR7_BANK1\", \"FPR8_BANK1\", \"FPR9_BANK1\", \"FPR10_BANK1\", \"FPR11_BANK1\", \"FPR12_BANK1\", \"FPR13_BANK1\", \"FPR14_BANK1\", \"FPR15_BANK1\", }; for (i = 0; i < 24; i++) { cpu_gregs[i] = tcg_global_mem_new_i32(cpu_env, offsetof(CPUSH4State, gregs[i]), gregnames[i]); } memcpy(cpu_gregs + 24, cpu_gregs + 8, 8 * sizeof(TCGv)); cpu_pc = tcg_global_mem_new_i32(cpu_env, offsetof(CPUSH4State, pc), \"PC\"); cpu_sr = tcg_global_mem_new_i32(cpu_env, offsetof(CPUSH4State, sr), \"SR\"); cpu_sr_m = tcg_global_mem_new_i32(cpu_env, offsetof(CPUSH4State, sr_m), \"SR_M\"); cpu_sr_q = tcg_global_mem_new_i32(cpu_env, offsetof(CPUSH4State, sr_q), \"SR_Q\"); cpu_sr_t = tcg_global_mem_new_i32(cpu_env, offsetof(CPUSH4State, sr_t), \"SR_T\"); cpu_ssr = tcg_global_mem_new_i32(cpu_env, offsetof(CPUSH4State, ssr), \"SSR\"); cpu_spc = tcg_global_mem_new_i32(cpu_env, offsetof(CPUSH4State, spc), \"SPC\"); cpu_gbr = tcg_global_mem_new_i32(cpu_env, offsetof(CPUSH4State, gbr), \"GBR\"); cpu_vbr = tcg_global_mem_new_i32(cpu_env, offsetof(CPUSH4State, vbr), \"VBR\"); cpu_sgr = tcg_global_mem_new_i32(cpu_env, offsetof(CPUSH4State, sgr), \"SGR\"); cpu_dbr = tcg_global_mem_new_i32(cpu_env, offsetof(CPUSH4State, dbr), \"DBR\"); cpu_mach = tcg_global_mem_new_i32(cpu_env, offsetof(CPUSH4State, mach), \"MACH\"); cpu_macl = tcg_global_mem_new_i32(cpu_env, offsetof(CPUSH4State, macl), \"MACL\"); cpu_pr = tcg_global_mem_new_i32(cpu_env, offsetof(CPUSH4State, pr), \"PR\"); cpu_fpscr = tcg_global_mem_new_i32(cpu_env, offsetof(CPUSH4State, fpscr), \"FPSCR\"); cpu_fpul = tcg_global_mem_new_i32(cpu_env, offsetof(CPUSH4State, fpul), \"FPUL\"); cpu_flags = tcg_global_mem_new_i32(cpu_env, offsetof(CPUSH4State, flags), \"_flags_\"); cpu_delayed_pc = tcg_global_mem_new_i32(cpu_env, offsetof(CPUSH4State, delayed_pc), \"_delayed_pc_\"); cpu_delayed_cond = tcg_global_mem_new_i32(cpu_env, offsetof(CPUSH4State, delayed_cond), \"_delayed_cond_\"); cpu_ldst = tcg_global_mem_new_i32(cpu_env, offsetof(CPUSH4State, ldst), \"_ldst_\"); for (i = 0; i < 32; i++) cpu_fregs[i] = tcg_global_mem_new_i32(cpu_env, offsetof(CPUSH4State, fregs[i]), fregnames[i]); }"} {"target": 1, "idx": 18700, "func": "AVStream *add_audio_stream(AVFormatContext *oc, int codec_id) { AVCodec *codec; AVCodecContext *c; AVStream *st; st = av_new_stream(oc, 1); if (!st) { fprintf(stderr, \"Could not alloc stream\\n\"); exit(1); } /* find the MP2 encoder */ codec = avcodec_find_encoder(codec_id); if (!codec) { fprintf(stderr, \"codec not found\\n\"); exit(1); } c = &st->codec; c->codec_type = CODEC_TYPE_AUDIO; /* put sample parameters */ c->bit_rate = 64000; c->sample_rate = 44100; c->channels = 2; /* open it */ if (avcodec_open(c, codec) < 0) { fprintf(stderr, \"could not open codec\\n\"); exit(1); } /* init signal generator */ t = 0; tincr = 2 * M_PI * 440.0 / c->sample_rate; audio_outbuf_size = 10000; audio_outbuf = malloc(audio_outbuf_size); /* ugly hack for PCM codecs (will be removed ASAP with new PCM support to compute the input frame size in samples */ if (c->frame_size <= 1) { audio_input_frame_size = audio_outbuf_size / c->channels; switch(st->codec.codec_id) { case CODEC_ID_PCM_S16LE: case CODEC_ID_PCM_S16BE: case CODEC_ID_PCM_U16LE: case CODEC_ID_PCM_U16BE: audio_input_frame_size >>= 1; break; default: break; } } else { audio_input_frame_size = c->frame_size; } samples = malloc(audio_input_frame_size * 2 * c->channels); return st; }"} {"target": 1, "idx": 18719, "func": "static av_cold int libopenjpeg_encode_init(AVCodecContext *avctx) { LibOpenJPEGContext *ctx = avctx->priv_data; int err = AVERROR(ENOMEM); opj_set_default_encoder_parameters(&ctx->enc_params); ctx->enc_params.cp_rsiz = ctx->profile; ctx->enc_params.mode = !!avctx->global_quality; ctx->enc_params.cp_cinema = ctx->cinema_mode; ctx->enc_params.prog_order = ctx->prog_order; ctx->enc_params.numresolution = ctx->numresolution; ctx->enc_params.cp_disto_alloc = ctx->disto_alloc; ctx->enc_params.cp_fixed_alloc = ctx->fixed_alloc; ctx->enc_params.cp_fixed_quality = ctx->fixed_quality; ctx->enc_params.tcp_numlayers = ctx->numlayers; ctx->enc_params.tcp_rates[0] = FFMAX(avctx->compression_level, 0) * 2; if (ctx->cinema_mode > 0) { cinema_parameters(&ctx->enc_params); } ctx->compress = opj_create_compress(ctx->format); if (!ctx->compress) { av_log(avctx, AV_LOG_ERROR, \"Error creating the compressor\\n\"); return AVERROR(ENOMEM); } ctx->image = mj2_create_image(avctx, &ctx->enc_params); if (!ctx->image) { av_log(avctx, AV_LOG_ERROR, \"Error creating the mj2 image\\n\"); err = AVERROR(EINVAL); goto fail; } avctx->coded_frame = av_frame_alloc(); if (!avctx->coded_frame) { av_log(avctx, AV_LOG_ERROR, \"Error allocating coded frame\\n\"); goto fail; } memset(&ctx->event_mgr, 0, sizeof(opj_event_mgr_t)); ctx->event_mgr.info_handler = info_callback; ctx->event_mgr.error_handler = error_callback; ctx->event_mgr.warning_handler = warning_callback; opj_set_event_mgr((opj_common_ptr) ctx->compress, &ctx->event_mgr, avctx); return 0; fail: opj_destroy_compress(ctx->compress); ctx->compress = NULL; opj_image_destroy(ctx->image); ctx->image = NULL; av_freep(&avctx->coded_frame); return err; }"} {"target": 0, "idx": 18724, "func": "static inline unsigned int msi_nr_vectors(uint16_t flags) { return 1U << ((flags & PCI_MSI_FLAGS_QSIZE) >> (ffs(PCI_MSI_FLAGS_QSIZE) - 1)); }"} {"target": 0, "idx": 18727, "func": "static void openrisc_sim_init(MachineState *machine) { ram_addr_t ram_size = machine->ram_size; const char *cpu_model = machine->cpu_model; const char *kernel_filename = machine->kernel_filename; OpenRISCCPU *cpu = NULL; MemoryRegion *ram; int n; if (!cpu_model) { cpu_model = \"or1200\"; } for (n = 0; n < smp_cpus; n++) { cpu = OPENRISC_CPU(cpu_generic_init(TYPE_OPENRISC_CPU, cpu_model)); qemu_register_reset(main_cpu_reset, cpu); main_cpu_reset(cpu); } ram = g_malloc(sizeof(*ram)); memory_region_init_ram(ram, NULL, \"openrisc.ram\", ram_size, &error_fatal); memory_region_add_subregion(get_system_memory(), 0, ram); cpu_openrisc_pic_init(cpu); cpu_openrisc_clock_init(cpu); serial_mm_init(get_system_memory(), 0x90000000, 0, cpu->env.irq[2], 115200, serial_hds[0], DEVICE_NATIVE_ENDIAN); if (nd_table[0].used) { openrisc_sim_net_init(get_system_memory(), 0x92000000, 0x92000400, cpu->env.irq[4], nd_table); } cpu_openrisc_load_kernel(ram_size, kernel_filename, cpu); }"} {"target": 0, "idx": 18741, "func": "static void ohci_reset(void *opaque) { OHCIState *ohci = opaque; OHCIPort *port; int i; ohci_bus_stop(ohci); ohci->ctl = 0; ohci->old_ctl = 0; ohci->status = 0; ohci->intr_status = 0; ohci->intr = OHCI_INTR_MIE; ohci->hcca = 0; ohci->ctrl_head = ohci->ctrl_cur = 0; ohci->bulk_head = ohci->bulk_cur = 0; ohci->per_cur = 0; ohci->done = 0; ohci->done_count = 7; /* FSMPS is marked TBD in OCHI 1.0, what gives ffs? * I took the value linux sets ... */ ohci->fsmps = 0x2778; ohci->fi = 0x2edf; ohci->fit = 0; ohci->frt = 0; ohci->frame_number = 0; ohci->pstart = 0; ohci->lst = OHCI_LS_THRESH; ohci->rhdesc_a = OHCI_RHA_NPS | ohci->num_ports; ohci->rhdesc_b = 0x0; /* Impl. specific */ ohci->rhstatus = 0; for (i = 0; i < ohci->num_ports; i++) { port = &ohci->rhport[i]; port->ctrl = 0; if (port->port.dev) { usb_attach(&port->port, port->port.dev); } } if (ohci->async_td) { usb_cancel_packet(&ohci->usb_packet); ohci->async_td = 0; } DPRINTF(\"usb-ohci: Reset %s\\n\", ohci->name); }"} {"target": 0, "idx": 18744, "func": "uint64_t helper_fsub(CPUPPCState *env, uint64_t arg1, uint64_t arg2) { CPU_DoubleU farg1, farg2; farg1.ll = arg1; farg2.ll = arg2; if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) && float64_is_neg(farg1.d) == float64_is_neg(farg2.d))) { /* Magnitude subtraction of infinities */ farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI); } else { if (unlikely(float64_is_signaling_nan(farg1.d) || float64_is_signaling_nan(farg2.d))) { /* sNaN subtraction */ fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN); } farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status); } return farg1.ll; }"} {"target": 1, "idx": 18759, "func": "static int net_socket_connect_init(NetClientState *peer, const char *model, const char *name, const char *host_str) { NetSocketState *s; int fd, connected, ret; struct sockaddr_in saddr; if (parse_host_port(&saddr, host_str) < 0) return -1; fd = qemu_socket(PF_INET, SOCK_STREAM, 0); if (fd < 0) { perror(\"socket\"); return -1; } qemu_set_nonblock(fd); connected = 0; for(;;) { ret = connect(fd, (struct sockaddr *)&saddr, sizeof(saddr)); if (ret < 0) { if (errno == EINTR || errno == EWOULDBLOCK) { /* continue */ } else if (errno == EINPROGRESS || errno == EALREADY || errno == EINVAL) { break; } else { perror(\"connect\"); closesocket(fd); return -1; } } else { connected = 1; break; } } s = net_socket_fd_init(peer, model, name, fd, connected); if (!s) return -1; snprintf(s->nc.info_str, sizeof(s->nc.info_str), \"socket: connect to %s:%d\", inet_ntoa(saddr.sin_addr), ntohs(saddr.sin_port)); return 0; }"} {"target": 1, "idx": 18773, "func": "static int64_t load_kernel (void) { int64_t kernel_entry, kernel_high; long initrd_size; ram_addr_t initrd_offset; int big_endian; uint32_t *prom_buf; long prom_size; int prom_index = 0; uint64_t (*xlate_to_kseg0) (void *opaque, uint64_t addr); #ifdef TARGET_WORDS_BIGENDIAN big_endian = 1; #else big_endian = 0; #endif if (load_elf(loaderparams.kernel_filename, cpu_mips_kseg0_to_phys, NULL, (uint64_t *)&kernel_entry, NULL, (uint64_t *)&kernel_high, big_endian, ELF_MACHINE, 1) < 0) { fprintf(stderr, \"qemu: could not load kernel '%s'\\n\", loaderparams.kernel_filename); exit(1); } /* Sanity check where the kernel has been linked */ if (kvm_enabled()) { if (kernel_entry & 0x80000000ll) { error_report(\"KVM guest kernels must be linked in useg. \" \"Did you forget to enable CONFIG_KVM_GUEST?\"); exit(1); } xlate_to_kseg0 = cpu_mips_kvm_um_phys_to_kseg0; } else { if (!(kernel_entry & 0x80000000ll)) { error_report(\"KVM guest kernels aren't supported with TCG. \" \"Did you unintentionally enable CONFIG_KVM_GUEST?\"); exit(1); } xlate_to_kseg0 = cpu_mips_phys_to_kseg0; } /* load initrd */ initrd_size = 0; initrd_offset = 0; if (loaderparams.initrd_filename) { initrd_size = get_image_size (loaderparams.initrd_filename); if (initrd_size > 0) { initrd_offset = (kernel_high + ~INITRD_PAGE_MASK) & INITRD_PAGE_MASK; if (initrd_offset + initrd_size > ram_size) { fprintf(stderr, \"qemu: memory too small for initial ram disk '%s'\\n\", loaderparams.initrd_filename); exit(1); } initrd_size = load_image_targphys(loaderparams.initrd_filename, initrd_offset, ram_size - initrd_offset); } if (initrd_size == (target_ulong) -1) { fprintf(stderr, \"qemu: could not load initial ram disk '%s'\\n\", loaderparams.initrd_filename); exit(1); } } /* Setup prom parameters. */ prom_size = ENVP_NB_ENTRIES * (sizeof(int32_t) + ENVP_ENTRY_SIZE); prom_buf = g_malloc(prom_size); prom_set(prom_buf, prom_index++, \"%s\", loaderparams.kernel_filename); if (initrd_size > 0) { prom_set(prom_buf, prom_index++, \"rd_start=0x%\" PRIx64 \" rd_size=%li %s\", xlate_to_kseg0(NULL, initrd_offset), initrd_size, loaderparams.kernel_cmdline); } else { prom_set(prom_buf, prom_index++, \"%s\", loaderparams.kernel_cmdline); } prom_set(prom_buf, prom_index++, \"memsize\"); prom_set(prom_buf, prom_index++, \"%i\", MIN(loaderparams.ram_size, 256 << 20)); prom_set(prom_buf, prom_index++, \"modetty0\"); prom_set(prom_buf, prom_index++, \"38400n8r\"); prom_set(prom_buf, prom_index++, NULL); rom_add_blob_fixed(\"prom\", prom_buf, prom_size, cpu_mips_kseg0_to_phys(NULL, ENVP_ADDR)); return kernel_entry; }"} {"target": 0, "idx": 18781, "func": "static void test_validate_fail_union_anon(TestInputVisitorData *data, const void *unused) { UserDefAnonUnion *tmp = NULL; Visitor *v; Error *errp = NULL; v = validate_test_init(data, \"3.14\"); visit_type_UserDefAnonUnion(v, &tmp, NULL, &errp); g_assert(error_is_set(&errp)); qapi_free_UserDefAnonUnion(tmp); }"} {"target": 0, "idx": 18799, "func": "static int vhost_virtqueue_init(struct vhost_dev *dev, struct vhost_virtqueue *vq, int n) { struct vhost_vring_file file = { .index = n, }; int r = event_notifier_init(&vq->masked_notifier, 0); if (r < 0) { return r; } file.fd = event_notifier_get_fd(&vq->masked_notifier); r = dev->vhost_ops->vhost_call(dev, VHOST_SET_VRING_CALL, &file); if (r) { r = -errno; goto fail_call; } return 0; fail_call: event_notifier_cleanup(&vq->masked_notifier); return r; }"} {"target": 1, "idx": 18812, "func": "static int filter_frame(AVFilterLink *inlink, AVFrame *src_buffer) { AVFilterContext *ctx = inlink->dst; ATempoContext *atempo = ctx->priv; AVFilterLink *outlink = ctx->outputs[0]; int ret = 0; int n_in = src_buffer->nb_samples; int n_out = (int)(0.5 + ((double)n_in) / atempo->tempo); const uint8_t *src = src_buffer->data[0]; const uint8_t *src_end = src + n_in * atempo->stride; while (src < src_end) { if (!atempo->dst_buffer) { atempo->dst_buffer = ff_get_audio_buffer(outlink, n_out); if (!atempo->dst_buffer) return AVERROR(ENOMEM); av_frame_copy_props(atempo->dst_buffer, src_buffer); atempo->dst = atempo->dst_buffer->data[0]; atempo->dst_end = atempo->dst + n_out * atempo->stride; } yae_apply(atempo, &src, src_end, &atempo->dst, atempo->dst_end); if (atempo->dst == atempo->dst_end) { int n_samples = ((atempo->dst - atempo->dst_buffer->data[0]) / atempo->stride); ret = push_samples(atempo, outlink, n_samples); if (ret < 0) goto end; } } atempo->nsamples_in += n_in; end: av_frame_free(&src_buffer); return ret; }"} {"target": 1, "idx": 18819, "func": "struct vhost_net *vhost_net_init(VhostNetOptions *options) { int r; bool backend_kernel = options->backend_type == VHOST_BACKEND_TYPE_KERNEL; struct vhost_net *net = g_malloc(sizeof *net); if (!options->net_backend) { fprintf(stderr, \"vhost-net requires net backend to be setup\\n\"); goto fail; } if (backend_kernel) { r = vhost_net_get_fd(options->net_backend); if (r < 0) { goto fail; } net->dev.backend_features = qemu_has_vnet_hdr(options->net_backend) ? 0 : (1ULL << VHOST_NET_F_VIRTIO_NET_HDR); net->backend = r; } else { net->dev.backend_features = 0; net->backend = -1; } net->nc = options->net_backend; net->dev.nvqs = 2; net->dev.vqs = net->vqs; net->dev.vq_index = net->nc->queue_index; r = vhost_dev_init(&net->dev, options->opaque, options->backend_type, options->force); if (r < 0) { goto fail; } if (backend_kernel) { if (!qemu_has_vnet_hdr_len(options->net_backend, sizeof(struct virtio_net_hdr_mrg_rxbuf))) { net->dev.features &= ~(1ULL << VIRTIO_NET_F_MRG_RXBUF); } if (~net->dev.features & net->dev.backend_features) { fprintf(stderr, \"vhost lacks feature mask %\" PRIu64 \" for backend\\n\", (uint64_t)(~net->dev.features & net->dev.backend_features)); vhost_dev_cleanup(&net->dev); goto fail; } } /* Set sane init value. Override when guest acks. */ vhost_net_ack_features(net, 0); return net; fail: g_free(net); return NULL; }"} {"target": 1, "idx": 18820, "func": "static USBDevice *usb_msd_init(const char *filename) { static int nr=0; char id[8]; QemuOpts *opts; DriveInfo *dinfo; USBDevice *dev; int fatal_error; const char *p1; char fmt[32]; /* parse -usbdevice disk: syntax into drive opts */ snprintf(id, sizeof(id), \"usb%d\", nr++); opts = qemu_opts_create(&qemu_drive_opts, id, 0); p1 = strchr(filename, ':'); if (p1++) { const char *p2; if (strstart(filename, \"format=\", &p2)) { int len = MIN(p1 - p2, sizeof(fmt)); pstrcpy(fmt, len, p2); qemu_opt_set(opts, \"format\", fmt); } else if (*filename != ':') { printf(\"unrecognized USB mass-storage option %s\\n\", filename); filename = p1; if (!*filename) { printf(\"block device specification needed\\n\"); qemu_opt_set(opts, \"file\", filename); qemu_opt_set(opts, \"if\", \"none\"); /* create host drive */ dinfo = drive_init(opts, NULL, &fatal_error); if (!dinfo) { qemu_opts_del(opts); /* create guest device */ dev = usb_create(NULL /* FIXME */, \"usb-storage\"); qdev_prop_set_drive(&dev->qdev, \"drive\", dinfo); if (qdev_init(&dev->qdev) < 0) return dev;"} {"target": 1, "idx": 18830, "func": "static inline void rv34_mc(RV34DecContext *r, const int block_type, const int xoff, const int yoff, int mv_off, const int width, const int height, int dir, const int thirdpel, int weighted, qpel_mc_func (*qpel_mc)[16], h264_chroma_mc_func (*chroma_mc)) { MpegEncContext *s = &r->s; uint8_t *Y, *U, *V, *srcY, *srcU, *srcV; int dxy, mx, my, umx, umy, lx, ly, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y; int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride + mv_off; int is16x16 = 1; if(thirdpel){ int chroma_mx, chroma_my; mx = (s->current_picture_ptr->f.motion_val[dir][mv_pos][0] + (3 << 24)) / 3 - (1 << 24); my = (s->current_picture_ptr->f.motion_val[dir][mv_pos][1] + (3 << 24)) / 3 - (1 << 24); lx = (s->current_picture_ptr->f.motion_val[dir][mv_pos][0] + (3 << 24)) % 3; ly = (s->current_picture_ptr->f.motion_val[dir][mv_pos][1] + (3 << 24)) % 3; chroma_mx = s->current_picture_ptr->f.motion_val[dir][mv_pos][0] / 2; chroma_my = s->current_picture_ptr->f.motion_val[dir][mv_pos][1] / 2; umx = (chroma_mx + (3 << 24)) / 3 - (1 << 24); umy = (chroma_my + (3 << 24)) / 3 - (1 << 24); uvmx = chroma_coeffs[(chroma_mx + (3 << 24)) % 3]; uvmy = chroma_coeffs[(chroma_my + (3 << 24)) % 3]; }else{ int cx, cy; mx = s->current_picture_ptr->f.motion_val[dir][mv_pos][0] >> 2; my = s->current_picture_ptr->f.motion_val[dir][mv_pos][1] >> 2; lx = s->current_picture_ptr->f.motion_val[dir][mv_pos][0] & 3; ly = s->current_picture_ptr->f.motion_val[dir][mv_pos][1] & 3; cx = s->current_picture_ptr->f.motion_val[dir][mv_pos][0] / 2; cy = s->current_picture_ptr->f.motion_val[dir][mv_pos][1] / 2; umx = cx >> 2; umy = cy >> 2; uvmx = (cx & 3) << 1; uvmy = (cy & 3) << 1; //due to some flaw RV40 uses the same MC compensation routine for H2V2 and H3V3 if(uvmx == 6 && uvmy == 6) uvmx = uvmy = 4; } dxy = ly*4 + lx; srcY = dir ? s->next_picture_ptr->f.data[0] : s->last_picture_ptr->f.data[0]; srcU = dir ? s->next_picture_ptr->f.data[1] : s->last_picture_ptr->f.data[1]; srcV = dir ? s->next_picture_ptr->f.data[2] : s->last_picture_ptr->f.data[2]; src_x = s->mb_x * 16 + xoff + mx; src_y = s->mb_y * 16 + yoff + my; uvsrc_x = s->mb_x * 8 + (xoff >> 1) + umx; uvsrc_y = s->mb_y * 8 + (yoff >> 1) + umy; srcY += src_y * s->linesize + src_x; srcU += uvsrc_y * s->uvlinesize + uvsrc_x; srcV += uvsrc_y * s->uvlinesize + uvsrc_x; if( (unsigned)(src_x - !!lx*2) > s->h_edge_pos - !!lx*2 - (width <<3) - 4 || (unsigned)(src_y - !!ly*2) > s->v_edge_pos - !!ly*2 - (height<<3) - 4){ uint8_t *uvbuf = s->edge_emu_buffer + 22 * s->linesize; srcY -= 2 + 2*s->linesize; s->dsp.emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize, (width<<3)+6, (height<<3)+6, src_x - 2, src_y - 2, s->h_edge_pos, s->v_edge_pos); srcY = s->edge_emu_buffer + 2 + 2*s->linesize; s->dsp.emulated_edge_mc(uvbuf , srcU, s->uvlinesize, (width<<2)+1, (height<<2)+1, uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1); s->dsp.emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, (width<<2)+1, (height<<2)+1, uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1); srcU = uvbuf; srcV = uvbuf + 16; } if(!weighted){ Y = s->dest[0] + xoff + yoff *s->linesize; U = s->dest[1] + (xoff>>1) + (yoff>>1)*s->uvlinesize; V = s->dest[2] + (xoff>>1) + (yoff>>1)*s->uvlinesize; }else{ Y = r->tmp_b_block_y [dir] + xoff + yoff *s->linesize; U = r->tmp_b_block_uv[dir*2] + (xoff>>1) + (yoff>>1)*s->uvlinesize; V = r->tmp_b_block_uv[dir*2+1] + (xoff>>1) + (yoff>>1)*s->uvlinesize; } if(block_type == RV34_MB_P_16x8){ qpel_mc[1][dxy](Y, srcY, s->linesize); Y += 8; srcY += 8; }else if(block_type == RV34_MB_P_8x16){ qpel_mc[1][dxy](Y, srcY, s->linesize); Y += 8 * s->linesize; srcY += 8 * s->linesize; } is16x16 = (block_type != RV34_MB_P_8x8) && (block_type != RV34_MB_P_16x8) && (block_type != RV34_MB_P_8x16); qpel_mc[!is16x16][dxy](Y, srcY, s->linesize); chroma_mc[2-width] (U, srcU, s->uvlinesize, height*4, uvmx, uvmy); chroma_mc[2-width] (V, srcV, s->uvlinesize, height*4, uvmx, uvmy); }"} {"target": 1, "idx": 18833, "func": "void migrate_fd_connect(MigrationState *s) { s->state = MIG_STATE_SETUP; trace_migrate_set_state(MIG_STATE_SETUP); /* This is a best 1st approximation. ns to ms */ s->expected_downtime = max_downtime/1000000; s->cleanup_bh = qemu_bh_new(migrate_fd_cleanup, s); qemu_file_set_rate_limit(s->file, s->bandwidth_limit / XFER_LIMIT_RATIO); qemu_thread_create(&s->thread, migration_thread, s, QEMU_THREAD_JOINABLE); notifier_list_notify(&migration_state_notifiers, s); }"} {"target": 1, "idx": 18838, "func": "static void ppc405cr_clk_setup (ppc405cr_cpc_t *cpc) { uint64_t VCO_out, PLL_out; uint32_t CPU_clk, TMR_clk, SDRAM_clk, PLB_clk, OPB_clk, EXT_clk, UART_clk; int M, D0, D1, D2; D0 = ((cpc->pllmr >> 26) & 0x3) + 1; /* CBDV */ if (cpc->pllmr & 0x80000000) { D1 = (((cpc->pllmr >> 20) - 1) & 0xF) + 1; /* FBDV */ D2 = 8 - ((cpc->pllmr >> 16) & 0x7); /* FWDVA */ M = D0 * D1 * D2; VCO_out = cpc->sysclk * M; if (VCO_out < 400000000 || VCO_out > 800000000) { /* PLL cannot lock */ cpc->pllmr &= ~0x80000000; goto bypass_pll; } PLL_out = VCO_out / D2; } else { /* Bypass PLL */ bypass_pll: M = D0; PLL_out = cpc->sysclk * M; } CPU_clk = PLL_out; if (cpc->cr1 & 0x00800000) TMR_clk = cpc->sysclk; /* Should have a separate clock */ else TMR_clk = CPU_clk; PLB_clk = CPU_clk / D0; SDRAM_clk = PLB_clk; D0 = ((cpc->pllmr >> 10) & 0x3) + 1; OPB_clk = PLB_clk / D0; D0 = ((cpc->pllmr >> 24) & 0x3) + 2; EXT_clk = PLB_clk / D0; D0 = ((cpc->cr0 >> 1) & 0x1F) + 1; UART_clk = CPU_clk / D0; /* Setup CPU clocks */ clk_setup(&cpc->clk_setup[PPC405CR_CPU_CLK], CPU_clk); /* Setup time-base clock */ clk_setup(&cpc->clk_setup[PPC405CR_TMR_CLK], TMR_clk); /* Setup PLB clock */ clk_setup(&cpc->clk_setup[PPC405CR_PLB_CLK], PLB_clk); /* Setup SDRAM clock */ clk_setup(&cpc->clk_setup[PPC405CR_SDRAM_CLK], SDRAM_clk); /* Setup OPB clock */ clk_setup(&cpc->clk_setup[PPC405CR_OPB_CLK], OPB_clk); /* Setup external clock */ clk_setup(&cpc->clk_setup[PPC405CR_EXT_CLK], EXT_clk); /* Setup UART clock */ clk_setup(&cpc->clk_setup[PPC405CR_UART_CLK], UART_clk); }"} {"target": 0, "idx": 18853, "func": "static uint64_t cmd646_cmd_read(void *opaque, target_phys_addr_t addr, unsigned size) { CMD646BAR *cmd646bar = opaque; if (addr != 2 || size != 1) { return ((uint64_t)1 << (size * 8)) - 1; } return ide_status_read(cmd646bar->bus, addr + 2); }"} {"target": 0, "idx": 18892, "func": "static uint64_t hpet_ram_read(void *opaque, target_phys_addr_t addr, unsigned size) { HPETState *s = opaque; uint64_t cur_tick, index; DPRINTF(\"qemu: Enter hpet_ram_readl at %\" PRIx64 \"\\n\", addr); index = addr; /*address range of all TN regs*/ if (index >= 0x100 && index <= 0x3ff) { uint8_t timer_id = (addr - 0x100) / 0x20; HPETTimer *timer = &s->timer[timer_id]; if (timer_id > s->num_timers) { DPRINTF(\"qemu: timer id out of range\\n\"); return 0; } switch ((addr - 0x100) % 0x20) { case HPET_TN_CFG: return timer->config; case HPET_TN_CFG + 4: // Interrupt capabilities return timer->config >> 32; case HPET_TN_CMP: // comparator register return timer->cmp; case HPET_TN_CMP + 4: return timer->cmp >> 32; case HPET_TN_ROUTE: return timer->fsb; case HPET_TN_ROUTE + 4: return timer->fsb >> 32; default: DPRINTF(\"qemu: invalid hpet_ram_readl\\n\"); break; } } else { switch (index) { case HPET_ID: return s->capability; case HPET_PERIOD: return s->capability >> 32; case HPET_CFG: return s->config; case HPET_CFG + 4: DPRINTF(\"qemu: invalid HPET_CFG + 4 hpet_ram_readl\\n\"); return 0; case HPET_COUNTER: if (hpet_enabled(s)) { cur_tick = hpet_get_ticks(s); } else { cur_tick = s->hpet_counter; } DPRINTF(\"qemu: reading counter = %\" PRIx64 \"\\n\", cur_tick); return cur_tick; case HPET_COUNTER + 4: if (hpet_enabled(s)) { cur_tick = hpet_get_ticks(s); } else { cur_tick = s->hpet_counter; } DPRINTF(\"qemu: reading counter + 4 = %\" PRIx64 \"\\n\", cur_tick); return cur_tick >> 32; case HPET_STATUS: return s->isr; default: DPRINTF(\"qemu: invalid hpet_ram_readl\\n\"); break; } } return 0; }"} {"target": 0, "idx": 18893, "func": "static void virtio_init_pci(VirtIOPCIProxy *proxy, VirtIODevice *vdev, uint16_t vendor, uint16_t device, uint16_t class_code, uint8_t pif) { uint8_t *config; uint32_t size; proxy->vdev = vdev; config = proxy->pci_dev.config; pci_config_set_vendor_id(config, vendor); pci_config_set_device_id(config, device); config[0x08] = VIRTIO_PCI_ABI_VERSION; config[0x09] = pif; pci_config_set_class(config, class_code); config[0x2c] = vendor & 0xFF; config[0x2d] = (vendor >> 8) & 0xFF; config[0x2e] = vdev->device_id & 0xFF; config[0x2f] = (vdev->device_id >> 8) & 0xFF; config[0x3d] = 1; if (vdev->nvectors && !msix_init(&proxy->pci_dev, vdev->nvectors, 1, 0)) { pci_register_bar(&proxy->pci_dev, 1, msix_bar_size(&proxy->pci_dev), PCI_BASE_ADDRESS_SPACE_MEMORY, msix_mmio_map); } else vdev->nvectors = 0; proxy->pci_dev.config_write = virtio_write_config; size = VIRTIO_PCI_REGION_SIZE(&proxy->pci_dev) + vdev->config_len; if (size & (size-1)) size = 1 << qemu_fls(size); pci_register_bar(&proxy->pci_dev, 0, size, PCI_BASE_ADDRESS_SPACE_IO, virtio_map); if (!kvm_has_many_ioeventfds()) { proxy->flags &= ~VIRTIO_PCI_FLAG_USE_IOEVENTFD; } virtio_bind_device(vdev, &virtio_pci_bindings, proxy); proxy->host_features |= 0x1 << VIRTIO_F_NOTIFY_ON_EMPTY; proxy->host_features |= 0x1 << VIRTIO_F_BAD_FEATURE; proxy->host_features = vdev->get_features(vdev, proxy->host_features); }"} {"target": 0, "idx": 18915, "func": "static int usb_device_add(const char *devname, int is_hotplug) { const char *p; USBDevice *dev; if (!free_usb_ports) return -1; if (strstart(devname, \"host:\", &p)) { dev = usb_host_device_open(p); } else if (!strcmp(devname, \"mouse\")) { dev = usb_mouse_init(); } else if (!strcmp(devname, \"tablet\")) { dev = usb_tablet_init(); } else if (!strcmp(devname, \"keyboard\")) { dev = usb_keyboard_init(); } else if (strstart(devname, \"disk:\", &p)) { BlockDriverState *bs; dev = usb_msd_init(p, &bs); if (!dev) return -1; if (bdrv_key_required(bs)) { autostart = 0; if (is_hotplug && monitor_read_bdrv_key(bs) < 0) { dev->handle_destroy(dev); return -1; } } } else if (!strcmp(devname, \"wacom-tablet\")) { dev = usb_wacom_init(); } else if (strstart(devname, \"serial:\", &p)) { dev = usb_serial_init(p); #ifdef CONFIG_BRLAPI } else if (!strcmp(devname, \"braille\")) { dev = usb_baum_init(); #endif } else if (strstart(devname, \"net:\", &p)) { int nic = nb_nics; if (net_client_init(\"nic\", p) < 0) return -1; nd_table[nic].model = \"usb\"; dev = usb_net_init(&nd_table[nic]); } else if (!strcmp(devname, \"bt\") || strstart(devname, \"bt:\", &p)) { dev = usb_bt_init(devname[2] ? hci_init(p) : bt_new_hci(qemu_find_bt_vlan(0))); } else { return -1; } if (!dev) return -1; return usb_device_add_dev(dev); }"} {"target": 0, "idx": 18931, "func": "void ioinst_handle_msch(S390CPU *cpu, uint64_t reg1, uint32_t ipb) { int cssid, ssid, schid, m; SubchDev *sch; SCHIB schib; uint64_t addr; int ret = -ENODEV; int cc; CPUS390XState *env = &cpu->env; uint8_t ar; addr = decode_basedisp_s(env, ipb, &ar); if (addr & 3) { program_interrupt(env, PGM_SPECIFICATION, 2); return; } if (s390_cpu_virt_mem_read(cpu, addr, ar, &schib, sizeof(schib))) { return; } if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid) || !ioinst_schib_valid(&schib)) { program_interrupt(env, PGM_OPERAND, 2); return; } trace_ioinst_sch_id(\"msch\", cssid, ssid, schid); sch = css_find_subch(m, cssid, ssid, schid); if (sch && css_subch_visible(sch)) { ret = css_do_msch(sch, &schib); } switch (ret) { case -ENODEV: cc = 3; break; case -EBUSY: cc = 2; break; case 0: cc = 0; break; default: cc = 1; break; } setcc(cpu, cc); }"} {"target": 1, "idx": 18942, "func": "int load_uimage(const char *filename, target_ulong *ep, target_ulong *loadaddr, int *is_linux) { int fd; int size; uboot_image_header_t h; uboot_image_header_t *hdr = &h; uint8_t *data = NULL; int ret = -1; fd = open(filename, O_RDONLY | O_BINARY); if (fd < 0) return -1; size = read(fd, hdr, sizeof(uboot_image_header_t)); if (size < 0) goto out; bswap_uboot_header(hdr); if (hdr->ih_magic != IH_MAGIC) goto out; /* TODO: Implement Multi-File images. */ if (hdr->ih_type == IH_TYPE_MULTI) { fprintf(stderr, \"Unable to load multi-file u-boot images\\n\"); goto out; } switch (hdr->ih_comp) { case IH_COMP_NONE: case IH_COMP_GZIP: break; default: fprintf(stderr, \"Unable to load u-boot images with compression type %d\\n\", hdr->ih_comp); goto out; } /* TODO: Check CPU type. */ if (is_linux) { if (hdr->ih_type == IH_TYPE_KERNEL && hdr->ih_os == IH_OS_LINUX) *is_linux = 1; else *is_linux = 0; } *ep = hdr->ih_ep; data = qemu_malloc(hdr->ih_size); if (!data) goto out; if (read(fd, data, hdr->ih_size) != hdr->ih_size) { fprintf(stderr, \"Error reading file\\n\"); goto out; } if (hdr->ih_comp == IH_COMP_GZIP) { uint8_t *compressed_data; size_t max_bytes; ssize_t bytes; compressed_data = data; max_bytes = UBOOT_MAX_GUNZIP_BYTES; data = qemu_malloc(max_bytes); bytes = gunzip(data, max_bytes, compressed_data, hdr->ih_size); qemu_free(compressed_data); if (bytes < 0) { fprintf(stderr, \"Unable to decompress gzipped image!\\n\"); goto out; } hdr->ih_size = bytes; } cpu_physical_memory_write_rom(hdr->ih_load, data, hdr->ih_size); if (loadaddr) *loadaddr = hdr->ih_load; ret = hdr->ih_size; out: if (data) qemu_free(data); close(fd); return ret; }"} {"target": 1, "idx": 18963, "func": "static int mpc_read_packet(AVFormatContext *s, AVPacket *pkt) { MPCContext *c = s->priv_data; int ret, size, size2, curbits, cur = c->curframe; int64_t tmp, pos; if (c->curframe >= c->fcount) return -1; if(c->curframe != c->lastframe + 1){ url_fseek(s->pb, c->frames[c->curframe].pos, SEEK_SET); c->curbits = c->frames[c->curframe].skip; } c->lastframe = c->curframe; c->curframe++; curbits = c->curbits; pos = url_ftell(s->pb); tmp = get_le32(s->pb); if(curbits <= 12){ size2 = (tmp >> (12 - curbits)) & 0xFFFFF; }else{ tmp = (tmp << 32) | get_le32(s->pb); size2 = (tmp >> (44 - curbits)) & 0xFFFFF; } curbits += 20; url_fseek(s->pb, pos, SEEK_SET); size = ((size2 + curbits + 31) & ~31) >> 3; if(cur == c->frames_noted){ c->frames[cur].pos = pos; c->frames[cur].size = size; c->frames[cur].skip = curbits - 20; av_add_index_entry(s->streams[0], cur, cur, size, 0, AVINDEX_KEYFRAME); c->frames_noted++; } c->curbits = (curbits + size2) & 0x1F; if (av_new_packet(pkt, size) < 0) return AVERROR(EIO); pkt->data[0] = curbits; pkt->data[1] = (c->curframe > c->fcount); pkt->stream_index = 0; pkt->pts = cur; ret = get_buffer(s->pb, pkt->data + 4, size); if(c->curbits) url_fseek(s->pb, -4, SEEK_CUR); if(ret < size){ av_free_packet(pkt); return AVERROR(EIO); } pkt->size = ret + 4; return 0; }"} {"target": 1, "idx": 18969, "func": "static void hpet_ram_writel(void *opaque, target_phys_addr_t addr, uint32_t value) { int i; HPETState *s = (HPETState *)opaque; uint64_t old_val, new_val, val, index; DPRINTF(\"qemu: Enter hpet_ram_writel at %\" PRIx64 \" = %#x\\n\", addr, value); index = addr; old_val = hpet_ram_readl(opaque, addr); new_val = value; /*address range of all TN regs*/ if (index >= 0x100 && index <= 0x3ff) { uint8_t timer_id = (addr - 0x100) / 0x20; DPRINTF(\"qemu: hpet_ram_writel timer_id = %#x \\n\", timer_id); HPETTimer *timer = &s->timer[timer_id]; if (timer_id > HPET_NUM_TIMERS - 1) { DPRINTF(\"qemu: timer id out of range\\n\"); return; } switch ((addr - 0x100) % 0x20) { case HPET_TN_CFG: DPRINTF(\"qemu: hpet_ram_writel HPET_TN_CFG\\n\"); val = hpet_fixup_reg(new_val, old_val, HPET_TN_CFG_WRITE_MASK); timer->config = (timer->config & 0xffffffff00000000ULL) | val; if (new_val & HPET_TN_32BIT) { timer->cmp = (uint32_t)timer->cmp; timer->period = (uint32_t)timer->period; } if (new_val & HPET_TIMER_TYPE_LEVEL) { printf(\"qemu: level-triggered hpet not supported\\n\"); exit (-1); } break; case HPET_TN_CFG + 4: // Interrupt capabilities DPRINTF(\"qemu: invalid HPET_TN_CFG+4 write\\n\"); break; case HPET_TN_CMP: // comparator register DPRINTF(\"qemu: hpet_ram_writel HPET_TN_CMP \\n\"); if (timer->config & HPET_TN_32BIT) new_val = (uint32_t)new_val; if (!timer_is_periodic(timer) || (timer->config & HPET_TN_SETVAL)) timer->cmp = (timer->cmp & 0xffffffff00000000ULL) | new_val; if (timer_is_periodic(timer)) { /* * FIXME: Clamp period to reasonable min value? * Clamp period to reasonable max value */ new_val &= (timer->config & HPET_TN_32BIT ? ~0u : ~0ull) >> 1; timer->period = (timer->period & 0xffffffff00000000ULL) | new_val; } timer->config &= ~HPET_TN_SETVAL; if (hpet_enabled()) hpet_set_timer(timer); break; case HPET_TN_CMP + 4: // comparator register high order DPRINTF(\"qemu: hpet_ram_writel HPET_TN_CMP + 4\\n\"); if (!timer_is_periodic(timer) || (timer->config & HPET_TN_SETVAL)) timer->cmp = (timer->cmp & 0xffffffffULL) | new_val << 32; else { /* * FIXME: Clamp period to reasonable min value? * Clamp period to reasonable max value */ new_val &= (timer->config & HPET_TN_32BIT ? ~0u : ~0ull) >> 1; timer->period = (timer->period & 0xffffffffULL) | new_val << 32; } timer->config &= ~HPET_TN_SETVAL; if (hpet_enabled()) hpet_set_timer(timer); break; case HPET_TN_ROUTE + 4: DPRINTF(\"qemu: hpet_ram_writel HPET_TN_ROUTE + 4\\n\"); break; default: DPRINTF(\"qemu: invalid hpet_ram_writel\\n\"); break; } return; } else { switch (index) { case HPET_ID: return; case HPET_CFG: val = hpet_fixup_reg(new_val, old_val, HPET_CFG_WRITE_MASK); s->config = (s->config & 0xffffffff00000000ULL) | val; if (activating_bit(old_val, new_val, HPET_CFG_ENABLE)) { /* Enable main counter and interrupt generation. */ s->hpet_offset = ticks_to_ns(s->hpet_counter) - qemu_get_clock(vm_clock); for (i = 0; i < HPET_NUM_TIMERS; i++) if ((&s->timer[i])->cmp != ~0ULL) hpet_set_timer(&s->timer[i]); } else if (deactivating_bit(old_val, new_val, HPET_CFG_ENABLE)) { /* Halt main counter and disable interrupt generation. */ s->hpet_counter = hpet_get_ticks(); for (i = 0; i < HPET_NUM_TIMERS; i++) hpet_del_timer(&s->timer[i]); } /* i8254 and RTC are disabled when HPET is in legacy mode */ if (activating_bit(old_val, new_val, HPET_CFG_LEGACY)) { hpet_pit_disable(); } else if (deactivating_bit(old_val, new_val, HPET_CFG_LEGACY)) { hpet_pit_enable(); } break; case HPET_CFG + 4: DPRINTF(\"qemu: invalid HPET_CFG+4 write \\n\"); break; case HPET_STATUS: /* FIXME: need to handle level-triggered interrupts */ break; case HPET_COUNTER: if (hpet_enabled()) printf(\"qemu: Writing counter while HPET enabled!\\n\"); s->hpet_counter = (s->hpet_counter & 0xffffffff00000000ULL) | value; DPRINTF(\"qemu: HPET counter written. ctr = %#x -> %\" PRIx64 \"\\n\", value, s->hpet_counter); break; case HPET_COUNTER + 4: if (hpet_enabled()) printf(\"qemu: Writing counter while HPET enabled!\\n\"); s->hpet_counter = (s->hpet_counter & 0xffffffffULL) | (((uint64_t)value) << 32); DPRINTF(\"qemu: HPET counter + 4 written. ctr = %#x -> %\" PRIx64 \"\\n\", value, s->hpet_counter); break; default: DPRINTF(\"qemu: invalid hpet_ram_writel\\n\"); break; } } }"} {"target": 0, "idx": 18988, "func": "void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf, int flags) { int i; cpu_fprintf(f, \"PC=%08x\\n\", env->pc); for (i = 0; i < 16; ++i) { cpu_fprintf(f, \"A%02d=%08x%c\", i, env->regs[i], (i % 4) == 3 ? '\\n' : ' '); } }"} {"target": 0, "idx": 19003, "func": "int net_init_socket(const NetClientOptions *opts, const char *name, NetClientState *peer, Error **errp) { /* FIXME error_setg(errp, ...) on failure */ Error *err = NULL; const NetdevSocketOptions *sock; assert(opts->kind == NET_CLIENT_OPTIONS_KIND_SOCKET); sock = opts->socket; if (sock->has_fd + sock->has_listen + sock->has_connect + sock->has_mcast + sock->has_udp != 1) { error_report(\"exactly one of fd=, listen=, connect=, mcast= or udp=\" \" is required\"); return -1; } if (sock->has_localaddr && !sock->has_mcast && !sock->has_udp) { error_report(\"localaddr= is only valid with mcast= or udp=\"); return -1; } if (sock->has_fd) { int fd; fd = monitor_fd_param(cur_mon, sock->fd, &err); if (fd == -1) { error_report_err(err); return -1; } qemu_set_nonblock(fd); if (!net_socket_fd_init(peer, \"socket\", name, fd, 1)) { return -1; } return 0; } if (sock->has_listen) { if (net_socket_listen_init(peer, \"socket\", name, sock->listen) == -1) { return -1; } return 0; } if (sock->has_connect) { if (net_socket_connect_init(peer, \"socket\", name, sock->connect) == -1) { return -1; } return 0; } if (sock->has_mcast) { /* if sock->localaddr is missing, it has been initialized to \"all bits * zero\" */ if (net_socket_mcast_init(peer, \"socket\", name, sock->mcast, sock->localaddr) == -1) { return -1; } return 0; } assert(sock->has_udp); if (!sock->has_localaddr) { error_report(\"localaddr= is mandatory with udp=\"); return -1; } if (net_socket_udp_init(peer, \"socket\", name, sock->udp, sock->localaddr) == -1) { return -1; } return 0; }"} {"target": 1, "idx": 19005, "func": "void visit_type_bool(Visitor *v, bool *obj, const char *name, Error **errp) { if (!error_is_set(errp)) { v->type_bool(v, obj, name, errp); } }"} {"target": 0, "idx": 19011, "func": "static av_always_inline av_flatten void h264_loop_filter_chroma_intra_c(uint8_t *pix, int xstride, int ystride, int alpha, int beta) { int d; for( d = 0; d < 8; d++ ) { const int p0 = pix[-1*xstride]; const int p1 = pix[-2*xstride]; const int q0 = pix[0]; const int q1 = pix[1*xstride]; if( FFABS( p0 - q0 ) < alpha && FFABS( p1 - p0 ) < beta && FFABS( q1 - q0 ) < beta ) { pix[-xstride] = ( 2*p1 + p0 + q1 + 2 ) >> 2; /* p0' */ pix[0] = ( 2*q1 + q0 + p1 + 2 ) >> 2; /* q0' */ } pix += ystride; } }"} {"target": 1, "idx": 19023, "func": "static int protocol_version(VncState *vs, uint8_t *version, size_t len) { char local[13]; memcpy(local, version, 12); local[12] = 0; if (sscanf(local, \"RFB %03d.%03d\\n\", &vs->major, &vs->minor) != 2) { VNC_DEBUG(\"Malformed protocol version %s\\n\", local); vnc_client_error(vs); return 0; } VNC_DEBUG(\"Client request protocol version %d.%d\\n\", vs->major, vs->minor); if (vs->major != 3 || (vs->minor != 3 && vs->minor != 4 && vs->minor != 5 && vs->minor != 7 && vs->minor != 8)) { VNC_DEBUG(\"Unsupported client version\\n\"); vnc_write_u32(vs, VNC_AUTH_INVALID); vnc_flush(vs); vnc_client_error(vs); return 0; } /* Some broken clients report v3.4 or v3.5, which spec requires to be treated * as equivalent to v3.3 by servers */ if (vs->minor == 4 || vs->minor == 5) vs->minor = 3; if (vs->minor == 3) { if (vs->auth == VNC_AUTH_NONE) { VNC_DEBUG(\"Tell client auth none\\n\"); vnc_write_u32(vs, vs->auth); vnc_flush(vs); start_client_init(vs); } else if (vs->auth == VNC_AUTH_VNC) { VNC_DEBUG(\"Tell client VNC auth\\n\"); vnc_write_u32(vs, vs->auth); vnc_flush(vs); start_auth_vnc(vs); } else { VNC_DEBUG(\"Unsupported auth %d for protocol 3.3\\n\", vs->auth); vnc_write_u32(vs, VNC_AUTH_INVALID); vnc_flush(vs); vnc_client_error(vs); } } else { VNC_DEBUG(\"Telling client we support auth %d\\n\", vs->auth); vnc_write_u8(vs, 1); /* num auth */ vnc_write_u8(vs, vs->auth); vnc_read_when(vs, protocol_client_auth, 1); vnc_flush(vs); } return 0; }"} {"target": 1, "idx": 19027, "func": "qemu_irq *pxa2xx_pic_init(target_phys_addr_t base, CPUState *env) { struct pxa2xx_pic_state_s *s; int iomemtype; qemu_irq *qi; s = (struct pxa2xx_pic_state_s *) qemu_mallocz(sizeof(struct pxa2xx_pic_state_s)); if (!s) return NULL; s->cpu_env = env; s->base = base; s->int_pending[0] = 0; s->int_pending[1] = 0; s->int_enabled[0] = 0; s->int_enabled[1] = 0; s->is_fiq[0] = 0; s->is_fiq[1] = 0; qi = qemu_allocate_irqs(pxa2xx_pic_set_irq, s, PXA2XX_PIC_SRCS); /* Enable IC memory-mapped registers access. */ iomemtype = cpu_register_io_memory(0, pxa2xx_pic_readfn, pxa2xx_pic_writefn, s); cpu_register_physical_memory(base, 0x000fffff, iomemtype); /* Enable IC coprocessor access. */ cpu_arm_set_cp_io(env, 6, pxa2xx_pic_cp_read, pxa2xx_pic_cp_write, s); register_savevm(\"pxa2xx_pic\", 0, 0, pxa2xx_pic_save, pxa2xx_pic_load, s); return qi; }"} {"target": 1, "idx": 19041, "func": "static int mp_decode_frame(MPADecodeContext *s, OUT_INT **samples, const uint8_t *buf, int buf_size) { int i, nb_frames, ch, ret; OUT_INT *samples_ptr; init_get_bits(&s->gb, buf + HEADER_SIZE, (buf_size - HEADER_SIZE) * 8); /* skip error protection field */ if (s->error_protection) skip_bits(&s->gb, 16); switch(s->layer) { case 1: s->avctx->frame_size = 384; nb_frames = mp_decode_layer1(s); break; case 2: s->avctx->frame_size = 1152; nb_frames = mp_decode_layer2(s); break; case 3: s->avctx->frame_size = s->lsf ? 576 : 1152; default: nb_frames = mp_decode_layer3(s); if (nb_frames < 0) return nb_frames; s->last_buf_size=0; if (s->in_gb.buffer) { align_get_bits(&s->gb); i = get_bits_left(&s->gb)>>3; if (i >= 0 && i <= BACKSTEP_SIZE) { memmove(s->last_buf, s->gb.buffer + (get_bits_count(&s->gb)>>3), i); s->last_buf_size=i; } else av_log(s->avctx, AV_LOG_ERROR, \"invalid old backstep %d\\n\", i); s->gb = s->in_gb; s->in_gb.buffer = NULL; } align_get_bits(&s->gb); assert((get_bits_count(&s->gb) & 7) == 0); i = get_bits_left(&s->gb) >> 3; if (i < 0 || i > BACKSTEP_SIZE || nb_frames < 0) { if (i < 0) av_log(s->avctx, AV_LOG_ERROR, \"invalid new backstep %d\\n\", i); i = FFMIN(BACKSTEP_SIZE, buf_size - HEADER_SIZE); } assert(i <= buf_size - HEADER_SIZE && i >= 0); memcpy(s->last_buf + s->last_buf_size, s->gb.buffer + buf_size - HEADER_SIZE - i, i); s->last_buf_size += i; } /* get output buffer */ if (!samples) { av_assert0(s->frame != NULL); s->frame->nb_samples = s->avctx->frame_size; if ((ret = ff_get_buffer(s->avctx, s->frame, 0)) < 0) { av_log(s->avctx, AV_LOG_ERROR, \"get_buffer() failed\\n\"); return ret; } samples = (OUT_INT **)s->frame->extended_data; } /* apply the synthesis filter */ for (ch = 0; ch < s->nb_channels; ch++) { int sample_stride; if (s->avctx->sample_fmt == OUT_FMT_P) { samples_ptr = samples[ch]; sample_stride = 1; } else { samples_ptr = samples[0] + ch; sample_stride = s->nb_channels; } for (i = 0; i < nb_frames; i++) { RENAME(ff_mpa_synth_filter)(&s->mpadsp, s->synth_buf[ch], &(s->synth_buf_offset[ch]), RENAME(ff_mpa_synth_window), &s->dither_state, samples_ptr, sample_stride, s->sb_samples[ch][i]); samples_ptr += 32 * sample_stride; } } return nb_frames * 32 * sizeof(OUT_INT) * s->nb_channels; }"} {"target": 1, "idx": 19075, "func": "static InetSocketAddress *ssh_config(QDict *options, Error **errp) { InetSocketAddress *inet = NULL; QDict *addr = NULL; QObject *crumpled_addr = NULL; Visitor *iv = NULL; Error *local_error = NULL; qdict_extract_subqdict(options, &addr, \"server.\"); if (!qdict_size(addr)) { error_setg(errp, \"SSH server address missing\"); goto out; } crumpled_addr = qdict_crumple(addr, errp); if (!crumpled_addr) { goto out; } iv = qobject_input_visitor_new(crumpled_addr); visit_type_InetSocketAddress(iv, NULL, &inet, &local_error); if (local_error) { error_propagate(errp, local_error); goto out; } out: QDECREF(addr); qobject_decref(crumpled_addr); visit_free(iv); return inet; }"} {"target": 1, "idx": 19082, "func": "sdhci_buff_access_is_sequential(SDHCIState *s, unsigned byte_num) { if ((s->data_count & 0x3) != byte_num) { ERRPRINT(\"Non-sequential access to Buffer Data Port register\" \"is prohibited\\n\"); return false; } return true; }"} {"target": 0, "idx": 19090, "func": "static int nbd_negotiate_write(QIOChannel *ioc, const void *buffer, size_t size) { ssize_t ret; guint watch; assert(qemu_in_coroutine()); /* Negotiation are always in main loop. */ watch = qio_channel_add_watch(ioc, G_IO_OUT, nbd_negotiate_continue, qemu_coroutine_self(), NULL); ret = write_sync(ioc, buffer, size, NULL); g_source_remove(watch); return ret; }"} {"target": 0, "idx": 19102, "func": "static void aio_read_done(void *opaque, int ret) { struct aio_ctx *ctx = opaque; struct timeval t2; gettimeofday(&t2, NULL); if (ret < 0) { printf(\"readv failed: %s\\n\", strerror(-ret)); goto out; } if (ctx->Pflag) { void *cmp_buf = malloc(ctx->qiov.size); memset(cmp_buf, ctx->pattern, ctx->qiov.size); if (memcmp(ctx->buf, cmp_buf, ctx->qiov.size)) { printf(\"Pattern verification failed at offset %\" PRId64 \", %zd bytes\\n\", ctx->offset, ctx->qiov.size); } free(cmp_buf); } if (ctx->qflag) { goto out; } if (ctx->vflag) { dump_buffer(ctx->buf, ctx->offset, ctx->qiov.size); } /* Finally, report back -- -C gives a parsable format */ t2 = tsub(t2, ctx->t1); print_report(\"read\", &t2, ctx->offset, ctx->qiov.size, ctx->qiov.size, 1, ctx->Cflag); out: qemu_io_free(ctx->buf); free(ctx); }"} {"target": 0, "idx": 19108, "func": "static void spr_read_tbu (DisasContext *ctx, int gprn, int sprn) { if (use_icount) { gen_io_start(); } gen_helper_load_tbu(cpu_gpr[gprn], cpu_env); if (use_icount) { gen_io_end(); gen_stop_exception(ctx); } }"} {"target": 0, "idx": 19115, "func": "static void nvdimm_build_common_dsm(Aml *dev) { Aml *method, *ifctx, *function, *dsm_mem, *unpatched, *result_size; uint8_t byte_list[1]; method = aml_method(NVDIMM_COMMON_DSM, 4, AML_SERIALIZED); function = aml_arg(2); dsm_mem = aml_name(NVDIMM_ACPI_MEM_ADDR); /* * do not support any method if DSM memory address has not been * patched. */ unpatched = aml_if(aml_equal(dsm_mem, aml_int(0x0))); /* * function 0 is called to inquire what functions are supported by * OSPM */ ifctx = aml_if(aml_equal(function, aml_int(0))); byte_list[0] = 0 /* No function Supported */; aml_append(ifctx, aml_return(aml_buffer(1, byte_list))); aml_append(unpatched, ifctx); /* No function is supported yet. */ byte_list[0] = 1 /* Not Supported */; aml_append(unpatched, aml_return(aml_buffer(1, byte_list))); aml_append(method, unpatched); /* * The HDLE indicates the DSM function is issued from which device, * it is not used at this time as no function is supported yet. * Currently we make it always be 0 for all the devices and will set * the appropriate value once real function is implemented. */ aml_append(method, aml_store(aml_int(0x0), aml_name(\"HDLE\"))); aml_append(method, aml_store(aml_arg(1), aml_name(\"REVS\"))); aml_append(method, aml_store(aml_arg(2), aml_name(\"FUNC\"))); /* * tell QEMU about the real address of DSM memory, then QEMU * gets the control and fills the result in DSM memory. */ aml_append(method, aml_store(dsm_mem, aml_name(\"NTFI\"))); result_size = aml_local(1); aml_append(method, aml_store(aml_name(\"RLEN\"), result_size)); aml_append(method, aml_store(aml_shiftleft(result_size, aml_int(3)), result_size)); aml_append(method, aml_create_field(aml_name(\"ODAT\"), aml_int(0), result_size, \"OBUF\")); aml_append(method, aml_concatenate(aml_buffer(0, NULL), aml_name(\"OBUF\"), aml_arg(6))); aml_append(method, aml_return(aml_arg(6))); aml_append(dev, method); }"} {"target": 0, "idx": 19116, "func": "static gnutls_anon_server_credentials vnc_tls_initialize_anon_cred(void) { gnutls_anon_server_credentials anon_cred; int ret; if ((ret = gnutls_anon_allocate_server_credentials(&anon_cred)) < 0) { VNC_DEBUG(\"Cannot allocate credentials %s\\n\", gnutls_strerror(ret)); return NULL; } gnutls_anon_set_server_dh_params(anon_cred, dh_params); return anon_cred; }"} {"target": 0, "idx": 19119, "func": "static int zero_single_l2(BlockDriverState *bs, uint64_t offset, unsigned int nb_clusters) { BDRVQcowState *s = bs->opaque; uint64_t *l2_table; int l2_index; int ret; int i; ret = get_cluster_table(bs, offset, &l2_table, &l2_index); if (ret < 0) { return ret; } /* Limit nb_clusters to one L2 table */ nb_clusters = MIN(nb_clusters, s->l2_size - l2_index); for (i = 0; i < nb_clusters; i++) { uint64_t old_offset; old_offset = be64_to_cpu(l2_table[l2_index + i]); /* Update L2 entries */ qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table); if (old_offset & QCOW_OFLAG_COMPRESSED) { l2_table[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO); qcow2_free_any_clusters(bs, old_offset, 1, QCOW2_DISCARD_REQUEST); } else { l2_table[l2_index + i] |= cpu_to_be64(QCOW_OFLAG_ZERO); } } ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); if (ret < 0) { return ret; } return nb_clusters; }"} {"target": 0, "idx": 19126, "func": "static AddressSpace *s390_pci_dma_iommu(PCIBus *bus, void *opaque, int devfn) { S390pciState *s = opaque; return &s->pbdev[PCI_SLOT(devfn)].as; }"} {"target": 0, "idx": 19128, "func": "static void test_info_commands(void) { char *resp, *info, *info_buf, *endp; info_buf = info = hmp(\"help info\"); while (*info) { /* Extract the info command, ignore parameters and description */ g_assert(strncmp(info, \"info \", 5) == 0); endp = strchr(&info[5], ' '); g_assert(endp != NULL); *endp = '\\0'; /* Now run the info command */ if (verbose) { fprintf(stderr, \"\\t%s\\n\", info); } resp = hmp(info); g_free(resp); /* And move forward to the next line */ info = strchr(endp + 1, '\\n'); if (!info) { break; } info += 1; } g_free(info_buf); }"} {"target": 0, "idx": 19129, "func": "static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) { uint8_t *old_code_ptr = s->code_ptr; assert(ret != arg); #if TCG_TARGET_REG_BITS == 32 tcg_out_op_t(s, INDEX_op_mov_i32); #else tcg_out_op_t(s, INDEX_op_mov_i64); #endif tcg_out_r(s, ret); tcg_out_r(s, arg); old_code_ptr[1] = s->code_ptr - old_code_ptr; }"} {"target": 0, "idx": 19133, "func": "static int do_token_in(USBDevice *s, USBPacket *p) { int request, value, index; int ret = 0; assert(p->devep == 0); request = (s->setup_buf[0] << 8) | s->setup_buf[1]; value = (s->setup_buf[3] << 8) | s->setup_buf[2]; index = (s->setup_buf[5] << 8) | s->setup_buf[4]; switch(s->setup_state) { case SETUP_STATE_ACK: if (!(s->setup_buf[0] & USB_DIR_IN)) { ret = usb_device_handle_control(s, p, request, value, index, s->setup_len, s->data_buf); if (ret == USB_RET_ASYNC) { return USB_RET_ASYNC; } s->setup_state = SETUP_STATE_IDLE; if (ret > 0) return 0; return ret; } /* return 0 byte */ return 0; case SETUP_STATE_DATA: if (s->setup_buf[0] & USB_DIR_IN) { int len = s->setup_len - s->setup_index; if (len > p->iov.size) { len = p->iov.size; } usb_packet_copy(p, s->data_buf + s->setup_index, len); s->setup_index += len; if (s->setup_index >= s->setup_len) s->setup_state = SETUP_STATE_ACK; return len; } s->setup_state = SETUP_STATE_IDLE; return USB_RET_STALL; default: return USB_RET_STALL; } }"} {"target": 0, "idx": 19160, "func": "static int coroutine_fn backup_do_cow(BackupBlockJob *job, int64_t offset, uint64_t bytes, bool *error_is_read, bool is_write_notifier) { BlockBackend *blk = job->common.blk; CowRequest cow_request; struct iovec iov; QEMUIOVector bounce_qiov; void *bounce_buffer = NULL; int ret = 0; int64_t start, end; /* bytes */ int n; /* bytes */ qemu_co_rwlock_rdlock(&job->flush_rwlock); start = QEMU_ALIGN_DOWN(offset, job->cluster_size); end = QEMU_ALIGN_UP(bytes + offset, job->cluster_size); trace_backup_do_cow_enter(job, start, offset, bytes); wait_for_overlapping_requests(job, start, end); cow_request_begin(&cow_request, job, start, end); for (; start < end; start += job->cluster_size) { if (test_bit(start / job->cluster_size, job->done_bitmap)) { trace_backup_do_cow_skip(job, start); continue; /* already copied */ } trace_backup_do_cow_process(job, start); n = MIN(job->cluster_size, job->common.len - start); if (!bounce_buffer) { bounce_buffer = blk_blockalign(blk, job->cluster_size); } iov.iov_base = bounce_buffer; iov.iov_len = n; qemu_iovec_init_external(&bounce_qiov, &iov, 1); ret = blk_co_preadv(blk, start, bounce_qiov.size, &bounce_qiov, is_write_notifier ? BDRV_REQ_NO_SERIALISING : 0); if (ret < 0) { trace_backup_do_cow_read_fail(job, start, ret); if (error_is_read) { *error_is_read = true; } goto out; } if (buffer_is_zero(iov.iov_base, iov.iov_len)) { ret = blk_co_pwrite_zeroes(job->target, start, bounce_qiov.size, BDRV_REQ_MAY_UNMAP); } else { ret = blk_co_pwritev(job->target, start, bounce_qiov.size, &bounce_qiov, job->compress ? BDRV_REQ_WRITE_COMPRESSED : 0); } if (ret < 0) { trace_backup_do_cow_write_fail(job, start, ret); if (error_is_read) { *error_is_read = false; } goto out; } set_bit(start / job->cluster_size, job->done_bitmap); /* Publish progress, guest I/O counts as progress too. Note that the * offset field is an opaque progress value, it is not a disk offset. */ job->bytes_read += n; job->common.offset += n; } out: if (bounce_buffer) { qemu_vfree(bounce_buffer); } cow_request_end(&cow_request); trace_backup_do_cow_return(job, offset, bytes, ret); qemu_co_rwlock_unlock(&job->flush_rwlock); return ret; }"} {"target": 0, "idx": 19184, "func": "static int inject_error(BlockDriverState *bs, BlkdebugRule *rule) { BDRVBlkdebugState *s = bs->opaque; int error = rule->options.inject.error; bool immediately = rule->options.inject.immediately; if (rule->options.inject.once) { QSIMPLEQ_REMOVE(&s->active_rules, rule, BlkdebugRule, active_next); remove_rule(rule); } if (!immediately) { aio_co_schedule(qemu_get_current_aio_context(), qemu_coroutine_self()); qemu_coroutine_yield(); } return -error; }"} {"target": 0, "idx": 19198, "func": "static void pflash_write(pflash_t *pfl, hwaddr offset, uint32_t value, int width, int be) { uint8_t *p; uint8_t cmd; cmd = value; DPRINTF(\"%s: writing offset \" TARGET_FMT_plx \" value %08x width %d wcycle 0x%x\\n\", __func__, offset, value, width, pfl->wcycle); if (!pfl->wcycle) { /* Set the device in I/O access mode */ memory_region_rom_device_set_readable(&pfl->mem, false); } switch (pfl->wcycle) { case 0: /* read mode */ switch (cmd) { case 0x00: /* ??? */ goto reset_flash; case 0x10: /* Single Byte Program */ case 0x40: /* Single Byte Program */ DPRINTF(\"%s: Single Byte Program\\n\", __func__); break; case 0x20: /* Block erase */ p = pfl->storage; offset &= ~(pfl->sector_len - 1); DPRINTF(\"%s: block erase at \" TARGET_FMT_plx \" bytes %x\\n\", __func__, offset, (unsigned)pfl->sector_len); if (!pfl->ro) { memset(p + offset, 0xff, pfl->sector_len); pflash_update(pfl, offset, pfl->sector_len); } else { pfl->status |= 0x20; /* Block erase error */ } pfl->status |= 0x80; /* Ready! */ break; case 0x50: /* Clear status bits */ DPRINTF(\"%s: Clear status bits\\n\", __func__); pfl->status = 0x0; goto reset_flash; case 0x60: /* Block (un)lock */ DPRINTF(\"%s: Block unlock\\n\", __func__); break; case 0x70: /* Status Register */ DPRINTF(\"%s: Read status register\\n\", __func__); pfl->cmd = cmd; return; case 0x90: /* Read Device ID */ DPRINTF(\"%s: Read Device information\\n\", __func__); pfl->cmd = cmd; return; case 0x98: /* CFI query */ DPRINTF(\"%s: CFI query\\n\", __func__); break; case 0xe8: /* Write to buffer */ DPRINTF(\"%s: Write to buffer\\n\", __func__); pfl->status |= 0x80; /* Ready! */ break; case 0xf0: /* Probe for AMD flash */ DPRINTF(\"%s: Probe for AMD flash\\n\", __func__); goto reset_flash; case 0xff: /* Read array mode */ DPRINTF(\"%s: Read array mode\\n\", __func__); goto reset_flash; default: goto error_flash; } pfl->wcycle++; pfl->cmd = cmd; break; case 1: switch (pfl->cmd) { case 0x10: /* Single Byte Program */ case 0x40: /* Single Byte Program */ DPRINTF(\"%s: Single Byte Program\\n\", __func__); if (!pfl->ro) { pflash_data_write(pfl, offset, value, width, be); pflash_update(pfl, offset, width); } else { pfl->status |= 0x10; /* Programming error */ } pfl->status |= 0x80; /* Ready! */ pfl->wcycle = 0; break; case 0x20: /* Block erase */ case 0x28: if (cmd == 0xd0) { /* confirm */ pfl->wcycle = 0; pfl->status |= 0x80; } else if (cmd == 0xff) { /* read array mode */ goto reset_flash; } else goto error_flash; break; case 0xe8: DPRINTF(\"%s: block write of %x bytes\\n\", __func__, value); pfl->counter = value; pfl->wcycle++; break; case 0x60: if (cmd == 0xd0) { pfl->wcycle = 0; pfl->status |= 0x80; } else if (cmd == 0x01) { pfl->wcycle = 0; pfl->status |= 0x80; } else if (cmd == 0xff) { goto reset_flash; } else { DPRINTF(\"%s: Unknown (un)locking command\\n\", __func__); goto reset_flash; } break; case 0x98: if (cmd == 0xff) { goto reset_flash; } else { DPRINTF(\"%s: leaving query mode\\n\", __func__); } break; default: goto error_flash; } break; case 2: switch (pfl->cmd) { case 0xe8: /* Block write */ if (!pfl->ro) { pflash_data_write(pfl, offset, value, width, be); } else { pfl->status |= 0x10; /* Programming error */ } pfl->status |= 0x80; if (!pfl->counter) { hwaddr mask = pfl->writeblock_size - 1; mask = ~mask; DPRINTF(\"%s: block write finished\\n\", __func__); pfl->wcycle++; if (!pfl->ro) { /* Flush the entire write buffer onto backing storage. */ pflash_update(pfl, offset & mask, pfl->writeblock_size); } else { pfl->status |= 0x10; /* Programming error */ } } pfl->counter--; break; default: goto error_flash; } break; case 3: /* Confirm mode */ switch (pfl->cmd) { case 0xe8: /* Block write */ if (cmd == 0xd0) { pfl->wcycle = 0; pfl->status |= 0x80; } else { DPRINTF(\"%s: unknown command for \\\"write block\\\"\\n\", __func__); PFLASH_BUG(\"Write block confirm\"); goto reset_flash; } break; default: goto error_flash; } break; default: /* Should never happen */ DPRINTF(\"%s: invalid write state\\n\", __func__); goto reset_flash; } return; error_flash: qemu_log_mask(LOG_UNIMP, \"%s: Unimplemented flash cmd sequence \" \"(offset \" TARGET_FMT_plx \", wcycle 0x%x cmd 0x%x value 0x%x)\" \"\\n\", __func__, offset, pfl->wcycle, pfl->cmd, value); reset_flash: memory_region_rom_device_set_readable(&pfl->mem, true); pfl->bypass = 0; pfl->wcycle = 0; pfl->cmd = 0; }"} {"target": 0, "idx": 19204, "func": "int kvm_init_vcpu(CPUState *cpu) { KVMState *s = kvm_state; long mmap_size; int ret; DPRINTF(\"kvm_init_vcpu\\n\"); ret = kvm_vm_ioctl(s, KVM_CREATE_VCPU, (void *)kvm_arch_vcpu_id(cpu)); if (ret < 0) { DPRINTF(\"kvm_create_vcpu failed\\n\"); goto err; } cpu->kvm_fd = ret; cpu->kvm_state = s; cpu->kvm_vcpu_dirty = true; mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0); if (mmap_size < 0) { ret = mmap_size; DPRINTF(\"KVM_GET_VCPU_MMAP_SIZE failed\\n\"); goto err; } cpu->kvm_run = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED, cpu->kvm_fd, 0); if (cpu->kvm_run == MAP_FAILED) { ret = -errno; DPRINTF(\"mmap'ing vcpu state failed\\n\"); goto err; } if (s->coalesced_mmio && !s->coalesced_mmio_ring) { s->coalesced_mmio_ring = (void *)cpu->kvm_run + s->coalesced_mmio * PAGE_SIZE; } ret = kvm_arch_init_vcpu(cpu); err: return ret; }"} {"target": 1, "idx": 19216, "func": "void arm_cpu_do_interrupt(CPUState *cs) { ARMCPU *cpu = ARM_CPU(cs); CPUARMState *env = &cpu->env; unsigned int new_el = env->exception.target_el; assert(!arm_feature(env, ARM_FEATURE_M)); arm_log_exception(cs->exception_index); qemu_log_mask(CPU_LOG_INT, \"...from EL%d to EL%d\\n\", arm_current_el(env), new_el); if (qemu_loglevel_mask(CPU_LOG_INT) && !excp_is_internal(cs->exception_index)) { qemu_log_mask(CPU_LOG_INT, \"...with ESR %x/0x%\" PRIx32 \"\\n\", env->exception.syndrome >> ARM_EL_EC_SHIFT, env->exception.syndrome); } if (arm_is_psci_call(cpu, cs->exception_index)) { arm_handle_psci_call(cpu); qemu_log_mask(CPU_LOG_INT, \"...handled as PSCI call\\n\"); return; } /* Semihosting semantics depend on the register width of the * code that caused the exception, not the target exception level, * so must be handled here. if (check_for_semihosting(cs)) { return; } assert(!excp_is_internal(cs->exception_index)); if (arm_el_is_aa64(env, new_el)) { arm_cpu_do_interrupt_aarch64(cs); } else { arm_cpu_do_interrupt_aarch32(cs); } arm_call_el_change_hook(cpu); if (!kvm_enabled()) { cs->interrupt_request |= CPU_INTERRUPT_EXITTB; } }"} {"target": 1, "idx": 19223, "func": "static void virtio_net_flush_tx(VirtIONet *n, VirtQueue *vq) { VirtQueueElement elem; if (!(n->vdev.status & VIRTIO_CONFIG_S_DRIVER_OK)) return; if (n->async_tx.elem.out_num) { virtio_queue_set_notification(n->tx_vq, 0); return; } while (virtqueue_pop(vq, &elem)) { ssize_t ret, len = 0; unsigned int out_num = elem.out_num; struct iovec *out_sg = &elem.out_sg[0]; unsigned hdr_len; /* hdr_len refers to the header received from the guest */ hdr_len = n->mergeable_rx_bufs ? sizeof(struct virtio_net_hdr_mrg_rxbuf) : sizeof(struct virtio_net_hdr); if (out_num < 1 || out_sg->iov_len != hdr_len) { fprintf(stderr, \"virtio-net header not in first element\\n\"); exit(1); } /* ignore the header if GSO is not supported */ if (!n->has_vnet_hdr) { out_num--; out_sg++; len += hdr_len; } else if (n->mergeable_rx_bufs) { /* tapfd expects a struct virtio_net_hdr */ hdr_len -= sizeof(struct virtio_net_hdr); out_sg->iov_len -= hdr_len; len += hdr_len; } ret = qemu_sendv_packet_async(&n->nic->nc, out_sg, out_num, virtio_net_tx_complete); if (ret == 0) { virtio_queue_set_notification(n->tx_vq, 0); n->async_tx.elem = elem; n->async_tx.len = len; return; } len += ret; virtqueue_push(vq, &elem, len); virtio_notify(&n->vdev, vq); } }"} {"target": 1, "idx": 19237, "func": "static void flush_queued_data(VirtIOSerialPort *port, bool discard) { assert(port || discard); do_flush_queued_data(port, port->ovq, &port->vser->vdev, discard); }"} {"target": 1, "idx": 19238, "func": "static int hdev_open(BlockDriverState *bs, QDict *options, int flags, Error **errp) { BDRVRawState *s = bs->opaque; Error *local_err = NULL; int ret; #if defined(__APPLE__) && defined(__MACH__) const char *filename = qdict_get_str(options, \"filename\"); char bsd_path[MAXPATHLEN] = \"\"; bool error_occurred = false; /* If using a real cdrom */ if (strcmp(filename, \"/dev/cdrom\") == 0) { char *mediaType = NULL; kern_return_t ret_val; io_iterator_t mediaIterator = 0; mediaType = FindEjectableOpticalMedia(&mediaIterator); if (mediaType == NULL) { error_setg(errp, \"Please make sure your CD/DVD is in the optical\" \" drive\"); error_occurred = true; goto hdev_open_Mac_error; } ret_val = GetBSDPath(mediaIterator, bsd_path, sizeof(bsd_path), flags); if (ret_val != KERN_SUCCESS) { error_setg(errp, \"Could not get BSD path for optical drive\"); error_occurred = true; goto hdev_open_Mac_error; } /* If a real optical drive was not found */ if (bsd_path[0] == '\\0') { error_setg(errp, \"Failed to obtain bsd path for optical drive\"); error_occurred = true; goto hdev_open_Mac_error; } /* If using a cdrom disc and finding a partition on the disc failed */ if (strncmp(mediaType, kIOCDMediaClass, 9) == 0 && setup_cdrom(bsd_path, errp) == false) { print_unmounting_directions(bsd_path); error_occurred = true; goto hdev_open_Mac_error; } qdict_put(options, \"filename\", qstring_from_str(bsd_path)); hdev_open_Mac_error: g_free(mediaType); if (mediaIterator) { IOObjectRelease(mediaIterator); } if (error_occurred) { return -ENOENT; } } #endif /* defined(__APPLE__) && defined(__MACH__) */ s->type = FTYPE_FILE; ret = raw_open_common(bs, options, flags, 0, &local_err); if (ret < 0) { error_propagate(errp, local_err); #if defined(__APPLE__) && defined(__MACH__) if (*bsd_path) { filename = bsd_path; } /* if a physical device experienced an error while being opened */ if (strncmp(filename, \"/dev/\", 5) == 0) { print_unmounting_directions(filename); } #endif /* defined(__APPLE__) && defined(__MACH__) */ return ret; } /* Since this does ioctl the device must be already opened */ bs->sg = hdev_is_sg(bs); if (flags & BDRV_O_RDWR) { ret = check_hdev_writable(s); if (ret < 0) { raw_close(bs); error_setg_errno(errp, -ret, \"The device is not writable\"); return ret; } } return ret; }"} {"target": 1, "idx": 19243, "func": "int scsi_bus_legacy_handle_cmdline(SCSIBus *bus) { Location loc; DriveInfo *dinfo; int res = 0, unit; loc_push_none(&loc); for (unit = 0; unit < bus->info->max_target; unit++) { dinfo = drive_get(IF_SCSI, bus->busnr, unit); if (dinfo == NULL) { continue; } qemu_opts_loc_restore(dinfo->opts); if (!scsi_bus_legacy_add_drive(bus, dinfo->bdrv, unit, false, -1)) { res = -1; break; } } loc_pop(&loc); return res; }"} {"target": 1, "idx": 19245, "func": "static int link_filter_inouts(AVFilterContext *filt_ctx, AVFilterInOut **curr_inputs, AVFilterInOut **open_inputs, AVClass *log_ctx) { int pad = filt_ctx->input_count, ret; while (pad--) { AVFilterInOut *p = *curr_inputs; if (!p) { av_log(log_ctx, AV_LOG_ERROR, \"Not enough inputs specified for the \\\"%s\\\" filter.\\n\", filt_ctx->filter->name); return AVERROR(EINVAL); } *curr_inputs = (*curr_inputs)->next; if (p->filter) { if ((ret = link_filter(p->filter, p->pad_idx, filt_ctx, pad, log_ctx)) < 0) return ret; av_free(p->name); av_free(p); } else { p->filter = filt_ctx; p->pad_idx = pad; insert_inout(open_inputs, p); } } if (*curr_inputs) { av_log(log_ctx, AV_LOG_ERROR, \"Too many inputs specified for the \\\"%s\\\" filter.\\n\", filt_ctx->filter->name); return AVERROR(EINVAL); } pad = filt_ctx->output_count; while (pad--) { AVFilterInOut *currlinkn = av_mallocz(sizeof(AVFilterInOut)); currlinkn->filter = filt_ctx; currlinkn->pad_idx = pad; insert_inout(curr_inputs, currlinkn); } return 0; }"} {"target": 0, "idx": 19253, "func": "static void x86_cpu_common_class_init(ObjectClass *oc, void *data) { X86CPUClass *xcc = X86_CPU_CLASS(oc); CPUClass *cc = CPU_CLASS(oc); DeviceClass *dc = DEVICE_CLASS(oc); xcc->parent_realize = dc->realize; dc->realize = x86_cpu_realizefn; dc->bus_type = TYPE_ICC_BUS; dc->props = x86_cpu_properties; xcc->parent_reset = cc->reset; cc->reset = x86_cpu_reset; cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP; cc->class_by_name = x86_cpu_class_by_name; cc->parse_features = x86_cpu_parse_featurestr; cc->has_work = x86_cpu_has_work; cc->do_interrupt = x86_cpu_do_interrupt; cc->cpu_exec_interrupt = x86_cpu_exec_interrupt; cc->dump_state = x86_cpu_dump_state; cc->set_pc = x86_cpu_set_pc; cc->synchronize_from_tb = x86_cpu_synchronize_from_tb; cc->gdb_read_register = x86_cpu_gdb_read_register; cc->gdb_write_register = x86_cpu_gdb_write_register; cc->get_arch_id = x86_cpu_get_arch_id; cc->get_paging_enabled = x86_cpu_get_paging_enabled; #ifdef CONFIG_USER_ONLY cc->handle_mmu_fault = x86_cpu_handle_mmu_fault; #else cc->get_memory_mapping = x86_cpu_get_memory_mapping; cc->get_phys_page_debug = x86_cpu_get_phys_page_debug; cc->write_elf64_note = x86_cpu_write_elf64_note; cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote; cc->write_elf32_note = x86_cpu_write_elf32_note; cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote; cc->vmsd = &vmstate_x86_cpu; #endif cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25; #ifndef CONFIG_USER_ONLY cc->debug_excp_handler = breakpoint_handler; #endif cc->cpu_exec_enter = x86_cpu_exec_enter; cc->cpu_exec_exit = x86_cpu_exec_exit; }"} {"target": 0, "idx": 19256, "func": "static int qpa_init_in (HWVoiceIn *hw, audsettings_t *as) { int error; static pa_sample_spec ss; audsettings_t obt_as = *as; PAVoiceIn *pa = (PAVoiceIn *) hw; ss.format = audfmt_to_pa (as->fmt, as->endianness); ss.channels = as->nchannels; ss.rate = as->freq; obt_as.fmt = pa_to_audfmt (ss.format, &obt_as.endianness); pa->s = pa_simple_new ( conf.server, \"qemu\", PA_STREAM_RECORD, conf.source, \"pcm.capture\", &ss, NULL, /* channel map */ NULL, /* buffering attributes */ &error ); if (!pa->s) { qpa_logerr (error, \"pa_simple_new for capture failed\\n\"); goto fail1; } audio_pcm_init_info (&hw->info, &obt_as); hw->samples = conf.samples; pa->pcm_buf = audio_calloc (AUDIO_FUNC, hw->samples, 1 << hw->info.shift); if (!pa->pcm_buf) { dolog (\"Could not allocate buffer (%d bytes)\\n\", hw->samples << hw->info.shift); goto fail2; } if (audio_pt_init (&pa->pt, qpa_thread_in, hw, AUDIO_CAP, AUDIO_FUNC)) { goto fail3; } return 0; fail3: free (pa->pcm_buf); pa->pcm_buf = NULL; fail2: pa_simple_free (pa->s); pa->s = NULL; fail1: return -1; }"} {"target": 0, "idx": 19274, "func": "uint32_t helper_compute_fprf (uint64_t arg, uint32_t set_fprf) { CPU_DoubleU farg; int isneg; int ret; farg.ll = arg; isneg = float64_is_neg(farg.d); if (unlikely(float64_is_nan(farg.d))) { if (float64_is_signaling_nan(farg.d)) { /* Signaling NaN: flags are undefined */ ret = 0x00; } else { /* Quiet NaN */ ret = 0x11; } } else if (unlikely(float64_is_infinity(farg.d))) { /* +/- infinity */ if (isneg) ret = 0x09; else ret = 0x05; } else { if (float64_is_zero(farg.d)) { /* +/- zero */ if (isneg) ret = 0x12; else ret = 0x02; } else { if (isden(farg.d)) { /* Denormalized numbers */ ret = 0x10; } else { /* Normalized numbers */ ret = 0x00; } if (isneg) { ret |= 0x08; } else { ret |= 0x04; } } } if (set_fprf) { /* We update FPSCR_FPRF */ env->fpscr &= ~(0x1F << FPSCR_FPRF); env->fpscr |= ret << FPSCR_FPRF; } /* We just need fpcc to update Rc1 */ return ret & 0xF; }"} {"target": 0, "idx": 19290, "func": "void vga_ioport_write(void *opaque, uint32_t addr, uint32_t val) { VGACommonState *s = opaque; int index; /* check port range access depending on color/monochrome mode */ if (vga_ioport_invalid(s, addr)) { return; } #ifdef DEBUG_VGA printf(\"VGA: write addr=0x%04x data=0x%02x\\n\", addr, val); #endif switch(addr) { case VGA_ATT_W: if (s->ar_flip_flop == 0) { val &= 0x3f; s->ar_index = val; } else { index = s->ar_index & 0x1f; switch(index) { case VGA_ATC_PALETTE0 ... VGA_ATC_PALETTEF: s->ar[index] = val & 0x3f; break; case VGA_ATC_MODE: s->ar[index] = val & ~0x10; break; case VGA_ATC_OVERSCAN: s->ar[index] = val; break; case VGA_ATC_PLANE_ENABLE: s->ar[index] = val & ~0xc0; break; case VGA_ATC_PEL: s->ar[index] = val & ~0xf0; break; case VGA_ATC_COLOR_PAGE: s->ar[index] = val & ~0xf0; break; default: break; } } s->ar_flip_flop ^= 1; break; case VGA_MIS_W: s->msr = val & ~0x10; s->update_retrace_info(s); break; case VGA_SEQ_I: s->sr_index = val & 7; break; case VGA_SEQ_D: #ifdef DEBUG_VGA_REG printf(\"vga: write SR%x = 0x%02x\\n\", s->sr_index, val); #endif s->sr[s->sr_index] = val & sr_mask[s->sr_index]; if (s->sr_index == VGA_SEQ_CLOCK_MODE) { s->update_retrace_info(s); } vga_update_memory_access(s); break; case VGA_PEL_IR: s->dac_read_index = val; s->dac_sub_index = 0; s->dac_state = 3; break; case VGA_PEL_IW: s->dac_write_index = val; s->dac_sub_index = 0; s->dac_state = 0; break; case VGA_PEL_D: s->dac_cache[s->dac_sub_index] = val; if (++s->dac_sub_index == 3) { memcpy(&s->palette[s->dac_write_index * 3], s->dac_cache, 3); s->dac_sub_index = 0; s->dac_write_index++; } break; case VGA_GFX_I: s->gr_index = val & 0x0f; break; case VGA_GFX_D: #ifdef DEBUG_VGA_REG printf(\"vga: write GR%x = 0x%02x\\n\", s->gr_index, val); #endif s->gr[s->gr_index] = val & gr_mask[s->gr_index]; vga_update_memory_access(s); break; case VGA_CRT_IM: case VGA_CRT_IC: s->cr_index = val; break; case VGA_CRT_DM: case VGA_CRT_DC: #ifdef DEBUG_VGA_REG printf(\"vga: write CR%x = 0x%02x\\n\", s->cr_index, val); #endif /* handle CR0-7 protection */ if (s->cr[VGA_CRTC_V_SYNC_END] & VGA_CR11_LOCK_CR0_CR7) { if (s->cr_index <= VGA_CRTC_OVERFLOW) { /* can always write bit 4 of CR7 */ if (s->cr_index == VGA_CRTC_OVERFLOW) { s->cr[VGA_CRTC_OVERFLOW] = (s->cr[VGA_CRTC_OVERFLOW] & ~0x10) | (val & 0x10); } return; } else if ((vga_cga_hacks & VGA_CGA_HACK_FONT_HEIGHT) && !(s->sr[VGA_SEQ_CLOCK_MODE] & VGA_SR01_CHAR_CLK_8DOTS)) { /* extra CGA compatibility hacks (not in standard VGA) */ if (s->cr_index == VGA_CRTC_MAX_SCAN && val == 7 && (s->cr[VGA_CRTC_MAX_SCAN] & 0xf) == 0xf) { return; } else if (s->cr_index == VGA_CRTC_CURSOR_START && val == 6 && (s->cr[VGA_CRTC_MAX_SCAN] & 0xf) == 0xf) { val = 0xd; } else if (s->cr_index == VGA_CRTC_CURSOR_END && val == 7 && (s->cr[VGA_CRTC_MAX_SCAN] & 0xf) == 0xf) { val = 0xe; } } } s->cr[s->cr_index] = val; switch(s->cr_index) { case VGA_CRTC_H_TOTAL: case VGA_CRTC_H_SYNC_START: case VGA_CRTC_H_SYNC_END: case VGA_CRTC_V_TOTAL: case VGA_CRTC_OVERFLOW: case VGA_CRTC_V_SYNC_END: case VGA_CRTC_MODE: s->update_retrace_info(s); break; } break; case VGA_IS1_RM: case VGA_IS1_RC: s->fcr = val & 0x10; break; } }"} {"target": 0, "idx": 19291, "func": "static int usbnet_can_receive(NetClientState *nc) { USBNetState *s = qemu_get_nic_opaque(nc); if (!s->dev.config) { return 0; } if (is_rndis(s) && s->rndis_state != RNDIS_DATA_INITIALIZED) { return 1; } return !s->in_len; }"} {"target": 0, "idx": 19301, "func": "static void dhcp_decode(const uint8_t *buf, int size, int *pmsg_type) { const uint8_t *p, *p_end; int len, tag; *pmsg_type = 0; p = buf; p_end = buf + size; if (size < 5) return; if (memcmp(p, rfc1533_cookie, 4) != 0) return; p += 4; while (p < p_end) { tag = p[0]; if (tag == RFC1533_PAD) { p++; } else if (tag == RFC1533_END) { break; } else { p++; if (p >= p_end) break; len = *p++; dprintf(\"dhcp: tag=0x%02x len=%d\\n\", tag, len); switch(tag) { case RFC2132_MSG_TYPE: if (len >= 1) *pmsg_type = p[0]; break; default: break; } p += len; } } }"} {"target": 0, "idx": 19307, "func": "static int xhci_fire_ctl_transfer(XHCIState *xhci, XHCITransfer *xfer) { XHCITRB *trb_setup, *trb_status; uint8_t bmRequestType; trb_setup = &xfer->trbs[0]; trb_status = &xfer->trbs[xfer->trb_count-1]; trace_usb_xhci_xfer_start(xfer, xfer->epctx->slotid, xfer->epctx->epid, xfer->streamid); /* at most one Event Data TRB allowed after STATUS */ if (TRB_TYPE(*trb_status) == TR_EVDATA && xfer->trb_count > 2) { trb_status--; } /* do some sanity checks */ if (TRB_TYPE(*trb_setup) != TR_SETUP) { DPRINTF(\"xhci: ep0 first TD not SETUP: %d\\n\", TRB_TYPE(*trb_setup)); return -1; } if (TRB_TYPE(*trb_status) != TR_STATUS) { DPRINTF(\"xhci: ep0 last TD not STATUS: %d\\n\", TRB_TYPE(*trb_status)); return -1; } if (!(trb_setup->control & TRB_TR_IDT)) { DPRINTF(\"xhci: Setup TRB doesn't have IDT set\\n\"); return -1; } if ((trb_setup->status & 0x1ffff) != 8) { DPRINTF(\"xhci: Setup TRB has bad length (%d)\\n\", (trb_setup->status & 0x1ffff)); return -1; } bmRequestType = trb_setup->parameter; xfer->in_xfer = bmRequestType & USB_DIR_IN; xfer->iso_xfer = false; xfer->timed_xfer = false; if (xhci_setup_packet(xfer) < 0) { return -1; } xfer->packet.parameter = trb_setup->parameter; usb_handle_packet(xfer->packet.ep->dev, &xfer->packet); xhci_try_complete_packet(xfer); if (!xfer->running_async && !xfer->running_retry) { xhci_kick_epctx(xfer->epctx, 0); } return 0; }"} {"target": 0, "idx": 19322, "func": "void qemu_free_timer(QEMUTimer *ts) { g_free(ts); }"} {"target": 1, "idx": 19353, "func": "static av_cold int dvdsub_close(AVCodecContext *avctx) { DVDSubContext *ctx = avctx->priv_data; av_freep(&ctx->buf); ctx->buf_size = 0; return 0; }"} {"target": 0, "idx": 19373, "func": "static int scsi_disk_emulate_read_toc(SCSIRequest *req, uint8_t *outbuf) { SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); int start_track, format, msf, toclen; uint64_t nb_sectors; msf = req->cmd.buf[1] & 2; format = req->cmd.buf[2] & 0xf; start_track = req->cmd.buf[6]; bdrv_get_geometry(s->qdev.conf.bs, &nb_sectors); DPRINTF(\"Read TOC (track %d format %d msf %d)\\n\", start_track, format, msf >> 1); nb_sectors /= s->qdev.blocksize / 512; switch (format) { case 0: toclen = cdrom_read_toc(nb_sectors, outbuf, msf, start_track); break; case 1: /* multi session : only a single session defined */ toclen = 12; memset(outbuf, 0, 12); outbuf[1] = 0x0a; outbuf[2] = 0x01; outbuf[3] = 0x01; break; case 2: toclen = cdrom_read_toc_raw(nb_sectors, outbuf, msf, start_track); break; default: return -1; } return toclen; }"} {"target": 0, "idx": 19380, "func": "void bdrv_invalidate_cache(BlockDriverState *bs, Error **errp) { Error *local_err = NULL; int ret; if (!bs->drv) { return; } if (!(bs->open_flags & BDRV_O_INACTIVE)) { return; } bs->open_flags &= ~BDRV_O_INACTIVE; if (bs->drv->bdrv_invalidate_cache) { bs->drv->bdrv_invalidate_cache(bs, &local_err); } else if (bs->file) { bdrv_invalidate_cache(bs->file->bs, &local_err); } if (local_err) { bs->open_flags |= BDRV_O_INACTIVE; error_propagate(errp, local_err); return; } ret = refresh_total_sectors(bs, bs->total_sectors); if (ret < 0) { bs->open_flags |= BDRV_O_INACTIVE; error_setg_errno(errp, -ret, \"Could not refresh total sector count\"); return; } }"} {"target": 0, "idx": 19389, "func": "static int usb_uhci_vt82c686b_initfn(PCIDevice *dev) { UHCIState *s = DO_UPCAST(UHCIState, dev, dev); uint8_t *pci_conf = s->dev.config; pci_config_set_vendor_id(pci_conf, PCI_VENDOR_ID_VIA); pci_config_set_device_id(pci_conf, PCI_DEVICE_ID_VIA_UHCI); /* USB misc control 1/2 */ pci_set_long(pci_conf + 0x40,0x00001000); /* PM capability */ pci_set_long(pci_conf + 0x80,0x00020001); /* USB legacy support */ pci_set_long(pci_conf + 0xc0,0x00002000); return usb_uhci_common_initfn(s); }"} {"target": 1, "idx": 19403, "func": "static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv dest) { int rd = (insn >> 0) & 0xf; TCGv tmp; if (insn & (1 << 8)) { if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) { return 1; } else { tmp = iwmmxt_load_creg(rd); } } else { tmp = new_tmp(); iwmmxt_load_reg(cpu_V0, rd); tcg_gen_trunc_i64_i32(tmp, cpu_V0); } tcg_gen_andi_i32(tmp, tmp, mask); tcg_gen_mov_i32(dest, tmp); dead_tmp(tmp); return 0; }"} {"target": 1, "idx": 19408, "func": "static void vfio_vga_probe_nvidia_3d0_quirk(VFIOPCIDevice *vdev) { VFIOQuirk *quirk; VFIONvidia3d0Quirk *data; if (!vfio_pci_is(vdev, PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID) || !vdev->bars[1].region.size) { return; } quirk = g_malloc0(sizeof(*quirk)); quirk->data = data = g_malloc0(sizeof(*data)); quirk->mem = g_malloc0(sizeof(MemoryRegion) * 2); quirk->nr_mem = 2; data->vdev = vdev; memory_region_init_io(&quirk->mem[0], OBJECT(vdev), &vfio_nvidia_3d4_quirk, data, \"vfio-nvidia-3d4-quirk\", 2); memory_region_add_subregion(&vdev->vga.region[QEMU_PCI_VGA_IO_HI].mem, 0x14 /* 0x3c0 + 0x14 */, &quirk->mem[0]); memory_region_init_io(&quirk->mem[1], OBJECT(vdev), &vfio_nvidia_3d0_quirk, data, \"vfio-nvidia-3d0-quirk\", 2); memory_region_add_subregion(&vdev->vga.region[QEMU_PCI_VGA_IO_HI].mem, 0x10 /* 0x3c0 + 0x10 */, &quirk->mem[1]); QLIST_INSERT_HEAD(&vdev->vga.region[QEMU_PCI_VGA_IO_HI].quirks, quirk, next); trace_vfio_quirk_nvidia_3d0_probe(vdev->vbasedev.name); }"} {"target": 0, "idx": 19411, "func": "static int cookie_string(AVDictionary *dict, char **cookies) { AVDictionaryEntry *e = NULL; int len = 1; // determine how much memory is needed for the cookies string while (e = av_dict_get(dict, \"\", e, AV_DICT_IGNORE_SUFFIX)) len += strlen(e->key) + strlen(e->value) + 1; // reallocate the cookies e = NULL; if (*cookies) av_free(*cookies); *cookies = av_malloc(len); if (!cookies) return AVERROR(ENOMEM); *cookies[0] = '\\0'; // write out the cookies while (e = av_dict_get(dict, \"\", e, AV_DICT_IGNORE_SUFFIX)) av_strlcatf(*cookies, len, \"%s%s\\n\", e->key, e->value); return 0; }"} {"target": 1, "idx": 19431, "func": "static int get_physical_address (CPUMIPSState *env, hwaddr *physical, int *prot, target_ulong real_address, int rw, int access_type) { /* User mode can only access useg/xuseg */ int user_mode = (env->hflags & MIPS_HFLAG_MODE) == MIPS_HFLAG_UM; int supervisor_mode = (env->hflags & MIPS_HFLAG_MODE) == MIPS_HFLAG_SM; int kernel_mode = !user_mode && !supervisor_mode; #if defined(TARGET_MIPS64) int UX = (env->CP0_Status & (1 << CP0St_UX)) != 0; int SX = (env->CP0_Status & (1 << CP0St_SX)) != 0; int KX = (env->CP0_Status & (1 << CP0St_KX)) != 0; #endif int ret = TLBRET_MATCH; /* effective address (modified for KVM T&E kernel segments) */ target_ulong address = real_address; #define USEG_LIMIT 0x7FFFFFFFUL #define KSEG0_BASE 0x80000000UL #define KSEG1_BASE 0xA0000000UL #define KSEG2_BASE 0xC0000000UL #define KSEG3_BASE 0xE0000000UL #define KVM_KSEG0_BASE 0x40000000UL #define KVM_KSEG2_BASE 0x60000000UL if (kvm_enabled()) { /* KVM T&E adds guest kernel segments in useg */ if (real_address >= KVM_KSEG0_BASE) { if (real_address < KVM_KSEG2_BASE) { /* kseg0 */ address += KSEG0_BASE - KVM_KSEG0_BASE; } else if (real_address <= USEG_LIMIT) { /* kseg2/3 */ address += KSEG2_BASE - KVM_KSEG2_BASE; } } } if (address <= USEG_LIMIT) { /* useg */ if (env->CP0_Status & (1 << CP0St_ERL)) { *physical = address & 0xFFFFFFFF; *prot = PAGE_READ | PAGE_WRITE; } else { ret = env->tlb->map_address(env, physical, prot, real_address, rw, access_type); } #if defined(TARGET_MIPS64) } else if (address < 0x4000000000000000ULL) { /* xuseg */ if (UX && address <= (0x3FFFFFFFFFFFFFFFULL & env->SEGMask)) { ret = env->tlb->map_address(env, physical, prot, real_address, rw, access_type); } else { ret = TLBRET_BADADDR; } } else if (address < 0x8000000000000000ULL) { /* xsseg */ if ((supervisor_mode || kernel_mode) && SX && address <= (0x7FFFFFFFFFFFFFFFULL & env->SEGMask)) { ret = env->tlb->map_address(env, physical, prot, real_address, rw, access_type); } else { ret = TLBRET_BADADDR; } } else if (address < 0xC000000000000000ULL) { /* xkphys */ if (kernel_mode && KX && (address & 0x07FFFFFFFFFFFFFFULL) <= env->PAMask) { *physical = address & env->PAMask; *prot = PAGE_READ | PAGE_WRITE; } else { ret = TLBRET_BADADDR; } } else if (address < 0xFFFFFFFF80000000ULL) { /* xkseg */ if (kernel_mode && KX && address <= (0xFFFFFFFF7FFFFFFFULL & env->SEGMask)) { ret = env->tlb->map_address(env, physical, prot, real_address, rw, access_type); } else { ret = TLBRET_BADADDR; } #endif } else if (address < (int32_t)KSEG1_BASE) { /* kseg0 */ if (kernel_mode) { *physical = address - (int32_t)KSEG0_BASE; *prot = PAGE_READ | PAGE_WRITE; } else { ret = TLBRET_BADADDR; } } else if (address < (int32_t)KSEG2_BASE) { /* kseg1 */ if (kernel_mode) { *physical = address - (int32_t)KSEG1_BASE; *prot = PAGE_READ | PAGE_WRITE; } else { ret = TLBRET_BADADDR; } } else if (address < (int32_t)KSEG3_BASE) { /* sseg (kseg2) */ if (supervisor_mode || kernel_mode) { ret = env->tlb->map_address(env, physical, prot, real_address, rw, access_type); } else { ret = TLBRET_BADADDR; } } else { /* kseg3 */ /* XXX: debug segment is not emulated */ if (kernel_mode) { ret = env->tlb->map_address(env, physical, prot, real_address, rw, access_type); } else { ret = TLBRET_BADADDR; } } return ret; }"} {"target": 1, "idx": 19434, "func": "static int get_htab_fd(sPAPRMachineState *spapr) { if (spapr->htab_fd >= 0) { return spapr->htab_fd; } spapr->htab_fd = kvmppc_get_htab_fd(false); if (spapr->htab_fd < 0) { error_report(\"Unable to open fd for reading hash table from KVM: %s\", strerror(errno)); } return spapr->htab_fd; }"} {"target": 1, "idx": 19441, "func": "static void qobject_input_start_list(Visitor *v, const char *name, GenericList **list, size_t size, Error **errp) { QObjectInputVisitor *qiv = to_qiv(v); QObject *qobj = qobject_input_get_object(qiv, name, true, errp); const QListEntry *entry; if (list) { *list = NULL; } if (!qobj) { return; } if (qobject_type(qobj) != QTYPE_QLIST) { error_setg(errp, QERR_INVALID_PARAMETER_TYPE, name ? name : \"null\", \"list\"); return; } entry = qobject_input_push(qiv, qobj, list); if (entry && list) { *list = g_malloc0(size); } }"} {"target": 0, "idx": 19469, "func": "static void test_qga_file_write_read(gconstpointer fix) { const TestFixture *fixture = fix; const unsigned char helloworld[] = \"Hello World!\\n\"; const char *b64; gchar *cmd, *enc; QDict *ret, *val; int64_t id, eof; gsize count; /* open */ ret = qmp_fd(fixture->fd, \"{'execute': 'guest-file-open',\" \" 'arguments': { 'path': 'foo', 'mode': 'w+' } }\"); g_assert_nonnull(ret); qmp_assert_no_error(ret); id = qdict_get_int(ret, \"return\"); QDECREF(ret); enc = g_base64_encode(helloworld, sizeof(helloworld)); /* write */ cmd = g_strdup_printf(\"{'execute': 'guest-file-write',\" \" 'arguments': { 'handle': %\" PRId64 \",\" \" 'buf-b64': '%s' } }\", id, enc); ret = qmp_fd(fixture->fd, cmd); g_assert_nonnull(ret); qmp_assert_no_error(ret); val = qdict_get_qdict(ret, \"return\"); count = qdict_get_int(val, \"count\"); eof = qdict_get_bool(val, \"eof\"); g_assert_cmpint(count, ==, sizeof(helloworld)); g_assert_cmpint(eof, ==, 0); QDECREF(ret); g_free(cmd); /* read (check implicit flush) */ cmd = g_strdup_printf(\"{'execute': 'guest-file-read',\" \" 'arguments': { 'handle': %\" PRId64 \"} }\", id); ret = qmp_fd(fixture->fd, cmd); val = qdict_get_qdict(ret, \"return\"); count = qdict_get_int(val, \"count\"); eof = qdict_get_bool(val, \"eof\"); b64 = qdict_get_str(val, \"buf-b64\"); g_assert_cmpint(count, ==, 0); g_assert(eof); g_assert_cmpstr(b64, ==, \"\"); QDECREF(ret); g_free(cmd); /* seek to 0 */ cmd = g_strdup_printf(\"{'execute': 'guest-file-seek',\" \" 'arguments': { 'handle': %\" PRId64 \", \" \" 'offset': %d, 'whence': %d } }\", id, 0, SEEK_SET); ret = qmp_fd(fixture->fd, cmd); qmp_assert_no_error(ret); val = qdict_get_qdict(ret, \"return\"); count = qdict_get_int(val, \"position\"); eof = qdict_get_bool(val, \"eof\"); g_assert_cmpint(count, ==, 0); g_assert(!eof); QDECREF(ret); g_free(cmd); /* read */ cmd = g_strdup_printf(\"{'execute': 'guest-file-read',\" \" 'arguments': { 'handle': %\" PRId64 \"} }\", id); ret = qmp_fd(fixture->fd, cmd); val = qdict_get_qdict(ret, \"return\"); count = qdict_get_int(val, \"count\"); eof = qdict_get_bool(val, \"eof\"); b64 = qdict_get_str(val, \"buf-b64\"); g_assert_cmpint(count, ==, sizeof(helloworld)); g_assert(eof); g_assert_cmpstr(b64, ==, enc); QDECREF(ret); g_free(cmd); g_free(enc); /* close */ cmd = g_strdup_printf(\"{'execute': 'guest-file-close',\" \" 'arguments': {'handle': %\" PRId64 \"} }\", id); ret = qmp_fd(fixture->fd, cmd); QDECREF(ret); g_free(cmd); }"} {"target": 0, "idx": 19473, "func": "static void test_visitor_out_native_list_uint16(TestOutputVisitorData *data, const void *unused) { test_native_list(data, unused, USER_DEF_NATIVE_LIST_UNION_KIND_U16); }"} {"target": 0, "idx": 19490, "func": "static int xen_9pfs_connect(struct XenDevice *xendev) { int i; Xen9pfsDev *xen_9pdev = container_of(xendev, Xen9pfsDev, xendev); V9fsState *s = &xen_9pdev->state; QemuOpts *fsdev; if (xenstore_read_fe_int(&xen_9pdev->xendev, \"num-rings\", &xen_9pdev->num_rings) == -1 || xen_9pdev->num_rings > MAX_RINGS || xen_9pdev->num_rings < 1) { return -1; } xen_9pdev->rings = g_malloc0(xen_9pdev->num_rings * sizeof(Xen9pfsRing)); for (i = 0; i < xen_9pdev->num_rings; i++) { char *str; int ring_order; xen_9pdev->rings[i].priv = xen_9pdev; xen_9pdev->rings[i].evtchn = -1; xen_9pdev->rings[i].local_port = -1; str = g_strdup_printf(\"ring-ref%u\", i); if (xenstore_read_fe_int(&xen_9pdev->xendev, str, &xen_9pdev->rings[i].ref) == -1) { g_free(str); goto out; } g_free(str); str = g_strdup_printf(\"event-channel-%u\", i); if (xenstore_read_fe_int(&xen_9pdev->xendev, str, &xen_9pdev->rings[i].evtchn) == -1) { g_free(str); goto out; } g_free(str); xen_9pdev->rings[i].intf = xengnttab_map_grant_ref( xen_9pdev->xendev.gnttabdev, xen_9pdev->xendev.dom, xen_9pdev->rings[i].ref, PROT_READ | PROT_WRITE); if (!xen_9pdev->rings[i].intf) { goto out; } ring_order = xen_9pdev->rings[i].intf->ring_order; if (ring_order > MAX_RING_ORDER) { goto out; } xen_9pdev->rings[i].ring_order = ring_order; xen_9pdev->rings[i].data = xengnttab_map_domain_grant_refs( xen_9pdev->xendev.gnttabdev, (1 << ring_order), xen_9pdev->xendev.dom, xen_9pdev->rings[i].intf->ref, PROT_READ | PROT_WRITE); if (!xen_9pdev->rings[i].data) { goto out; } xen_9pdev->rings[i].ring.in = xen_9pdev->rings[i].data; xen_9pdev->rings[i].ring.out = xen_9pdev->rings[i].data + XEN_FLEX_RING_SIZE(ring_order); xen_9pdev->rings[i].bh = qemu_bh_new(xen_9pfs_bh, &xen_9pdev->rings[i]); xen_9pdev->rings[i].out_cons = 0; xen_9pdev->rings[i].out_size = 0; xen_9pdev->rings[i].inprogress = false; xen_9pdev->rings[i].evtchndev = xenevtchn_open(NULL, 0); if (xen_9pdev->rings[i].evtchndev == NULL) { goto out; } fcntl(xenevtchn_fd(xen_9pdev->rings[i].evtchndev), F_SETFD, FD_CLOEXEC); xen_9pdev->rings[i].local_port = xenevtchn_bind_interdomain (xen_9pdev->rings[i].evtchndev, xendev->dom, xen_9pdev->rings[i].evtchn); if (xen_9pdev->rings[i].local_port == -1) { xen_pv_printf(xendev, 0, \"xenevtchn_bind_interdomain failed port=%d\\n\", xen_9pdev->rings[i].evtchn); goto out; } xen_pv_printf(xendev, 2, \"bind evtchn port %d\\n\", xendev->local_port); qemu_set_fd_handler(xenevtchn_fd(xen_9pdev->rings[i].evtchndev), xen_9pfs_evtchn_event, NULL, &xen_9pdev->rings[i]); } xen_9pdev->security_model = xenstore_read_be_str(xendev, \"security_model\"); xen_9pdev->path = xenstore_read_be_str(xendev, \"path\"); xen_9pdev->id = s->fsconf.fsdev_id = g_strdup_printf(\"xen9p%d\", xendev->dev); xen_9pdev->tag = s->fsconf.tag = xenstore_read_fe_str(xendev, \"tag\"); v9fs_register_transport(s, &xen_9p_transport); fsdev = qemu_opts_create(qemu_find_opts(\"fsdev\"), s->fsconf.tag, 1, NULL); qemu_opt_set(fsdev, \"fsdriver\", \"local\", NULL); qemu_opt_set(fsdev, \"path\", xen_9pdev->path, NULL); qemu_opt_set(fsdev, \"security_model\", xen_9pdev->security_model, NULL); qemu_opts_set_id(fsdev, s->fsconf.fsdev_id); qemu_fsdev_add(fsdev); v9fs_device_realize_common(s, NULL); return 0; out: xen_9pfs_free(xendev); return -1; }"} {"target": 0, "idx": 19492, "func": "static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg *args, TCGArg dst, TCGArg src) { if (temps_are_copies(dst, src)) { tcg_op_remove(s, op); return; } if (temp_is_const(src)) { tcg_opt_gen_movi(s, op, args, dst, temps[src].val); return; } TCGOpcode new_op = op_to_mov(op->opc); tcg_target_ulong mask; op->opc = new_op; reset_temp(dst); mask = temps[src].mask; if (TCG_TARGET_REG_BITS > 32 && new_op == INDEX_op_mov_i32) { /* High bits of the destination are now garbage. */ mask |= ~0xffffffffull; } temps[dst].mask = mask; assert(!temp_is_const(src)); if (s->temps[src].type == s->temps[dst].type) { temps[dst].next_copy = temps[src].next_copy; temps[dst].prev_copy = src; temps[temps[dst].next_copy].prev_copy = dst; temps[src].next_copy = dst; temps[dst].is_const = false; } args[0] = dst; args[1] = src; }"} {"target": 0, "idx": 19494, "func": "av_cold void ff_dsputil_init_vis(DSPContext *c, AVCodecContext *avctx) { /* VIS-specific optimizations */ int accel = vis_level (); const int high_bit_depth = avctx->bits_per_raw_sample > 8; if (accel & ACCEL_SPARC_VIS) { if (avctx->bits_per_raw_sample <= 8 && avctx->idct_algo == FF_IDCT_SIMPLEVIS) { c->idct_put = ff_simple_idct_put_vis; c->idct_add = ff_simple_idct_add_vis; c->idct = ff_simple_idct_vis; c->idct_permutation_type = FF_TRANSPOSE_IDCT_PERM; } if (!high_bit_depth) { c->put_pixels_tab[0][0] = MC_put_o_16_vis; c->put_pixels_tab[0][1] = MC_put_x_16_vis; c->put_pixels_tab[0][2] = MC_put_y_16_vis; c->put_pixels_tab[0][3] = MC_put_xy_16_vis; c->put_pixels_tab[1][0] = MC_put_o_8_vis; c->put_pixels_tab[1][1] = MC_put_x_8_vis; c->put_pixels_tab[1][2] = MC_put_y_8_vis; c->put_pixels_tab[1][3] = MC_put_xy_8_vis; c->avg_pixels_tab[0][0] = MC_avg_o_16_vis; c->avg_pixels_tab[0][1] = MC_avg_x_16_vis; c->avg_pixels_tab[0][2] = MC_avg_y_16_vis; c->avg_pixels_tab[0][3] = MC_avg_xy_16_vis; c->avg_pixels_tab[1][0] = MC_avg_o_8_vis; c->avg_pixels_tab[1][1] = MC_avg_x_8_vis; c->avg_pixels_tab[1][2] = MC_avg_y_8_vis; c->avg_pixels_tab[1][3] = MC_avg_xy_8_vis; c->put_no_rnd_pixels_tab[0][0] = MC_put_no_round_o_16_vis; c->put_no_rnd_pixels_tab[0][1] = MC_put_no_round_x_16_vis; c->put_no_rnd_pixels_tab[0][2] = MC_put_no_round_y_16_vis; c->put_no_rnd_pixels_tab[0][3] = MC_put_no_round_xy_16_vis; c->put_no_rnd_pixels_tab[1][0] = MC_put_no_round_o_8_vis; c->put_no_rnd_pixels_tab[1][1] = MC_put_no_round_x_8_vis; c->put_no_rnd_pixels_tab[1][2] = MC_put_no_round_y_8_vis; c->put_no_rnd_pixels_tab[1][3] = MC_put_no_round_xy_8_vis; c->avg_no_rnd_pixels_tab[0] = MC_avg_no_round_o_16_vis; c->avg_no_rnd_pixels_tab[1] = MC_avg_no_round_x_16_vis; c->avg_no_rnd_pixels_tab[2] = MC_avg_no_round_y_16_vis; c->avg_no_rnd_pixels_tab[3] = MC_avg_no_round_xy_16_vis; } } }"} {"target": 0, "idx": 19538, "func": "void vp8_mc(VP8Context *s, int luma, uint8_t *dst, uint8_t *src, const VP56mv *mv, int x_off, int y_off, int block_w, int block_h, int width, int height, int linesize, vp8_mc_func mc_func[3][3]) { if (AV_RN32A(mv)) { static const uint8_t idx[3][8] = { { 0, 1, 2, 1, 2, 1, 2, 1 }, // nr. of left extra pixels, // also function pointer index { 0, 3, 5, 3, 5, 3, 5, 3 }, // nr. of extra pixels required { 0, 2, 3, 2, 3, 2, 3, 2 }, // nr. of right extra pixels }; int mx = (mv->x << luma)&7, mx_idx = idx[0][mx]; int my = (mv->y << luma)&7, my_idx = idx[0][my]; x_off += mv->x >> (3 - luma); y_off += mv->y >> (3 - luma); // edge emulation src += y_off * linesize + x_off; if (x_off < mx_idx || x_off >= width - block_w - idx[2][mx] || y_off < my_idx || y_off >= height - block_h - idx[2][my]) { s->dsp.emulated_edge_mc(s->edge_emu_buffer, src - my_idx * linesize - mx_idx, linesize, block_w + idx[1][mx], block_h + idx[1][my], x_off - mx_idx, y_off - my_idx, width, height); src = s->edge_emu_buffer + mx_idx + linesize * my_idx; } mc_func[my_idx][mx_idx](dst, linesize, src, linesize, block_h, mx, my); } else mc_func[0][0](dst, linesize, src + y_off * linesize + x_off, linesize, block_h, 0, 0); }"} {"target": 1, "idx": 19554, "func": "static int parse_uint16(DeviceState *dev, Property *prop, const char *str) { uint16_t *ptr = qdev_get_prop_ptr(dev, prop); const char *fmt; /* accept both hex and decimal */ fmt = strncasecmp(str, \"0x\",2) == 0 ? \"%\" PRIx16 : \"%\" PRIu16; if (sscanf(str, fmt, ptr) != 1) return -EINVAL; return 0; }"} {"target": 1, "idx": 19557, "func": "DECLARE_LOOP_FILTER(mmxext) DECLARE_LOOP_FILTER(sse2) DECLARE_LOOP_FILTER(ssse3) DECLARE_LOOP_FILTER(sse4) #endif /* HAVE_YASM */ #define VP8_LUMA_MC_FUNC(IDX, SIZE, OPT) \\ c->put_vp8_epel_pixels_tab[IDX][0][2] = ff_put_vp8_epel ## SIZE ## _h6_ ## OPT; \\ c->put_vp8_epel_pixels_tab[IDX][2][0] = ff_put_vp8_epel ## SIZE ## _v6_ ## OPT; \\ c->put_vp8_epel_pixels_tab[IDX][2][2] = ff_put_vp8_epel ## SIZE ## _h6v6_ ## OPT #define VP8_MC_FUNC(IDX, SIZE, OPT) \\ c->put_vp8_epel_pixels_tab[IDX][0][1] = ff_put_vp8_epel ## SIZE ## _h4_ ## OPT; \\ c->put_vp8_epel_pixels_tab[IDX][1][0] = ff_put_vp8_epel ## SIZE ## _v4_ ## OPT; \\ c->put_vp8_epel_pixels_tab[IDX][1][1] = ff_put_vp8_epel ## SIZE ## _h4v4_ ## OPT; \\ c->put_vp8_epel_pixels_tab[IDX][1][2] = ff_put_vp8_epel ## SIZE ## _h6v4_ ## OPT; \\ c->put_vp8_epel_pixels_tab[IDX][2][1] = ff_put_vp8_epel ## SIZE ## _h4v6_ ## OPT; \\ VP8_LUMA_MC_FUNC(IDX, SIZE, OPT) #define VP8_BILINEAR_MC_FUNC(IDX, SIZE, OPT) \\ c->put_vp8_bilinear_pixels_tab[IDX][0][1] = ff_put_vp8_bilinear ## SIZE ## _h_ ## OPT; \\ c->put_vp8_bilinear_pixels_tab[IDX][0][2] = ff_put_vp8_bilinear ## SIZE ## _h_ ## OPT; \\ c->put_vp8_bilinear_pixels_tab[IDX][1][0] = ff_put_vp8_bilinear ## SIZE ## _v_ ## OPT; \\ c->put_vp8_bilinear_pixels_tab[IDX][1][1] = ff_put_vp8_bilinear ## SIZE ## _hv_ ## OPT; \\ c->put_vp8_bilinear_pixels_tab[IDX][1][2] = ff_put_vp8_bilinear ## SIZE ## _hv_ ## OPT; \\ c->put_vp8_bilinear_pixels_tab[IDX][2][0] = ff_put_vp8_bilinear ## SIZE ## _v_ ## OPT; \\ c->put_vp8_bilinear_pixels_tab[IDX][2][1] = ff_put_vp8_bilinear ## SIZE ## _hv_ ## OPT; \\ c->put_vp8_bilinear_pixels_tab[IDX][2][2] = ff_put_vp8_bilinear ## SIZE ## _hv_ ## OPT av_cold void ff_vp8dsp_init_x86(VP8DSPContext* c) { #if HAVE_YASM int cpu_flags = av_get_cpu_flags(); if (EXTERNAL_MMX(cpu_flags)) { c->vp8_idct_dc_add = ff_vp8_idct_dc_add_mmx; c->vp8_idct_dc_add4uv = ff_vp8_idct_dc_add4uv_mmx; #if ARCH_X86_32 c->vp8_idct_dc_add4y = ff_vp8_idct_dc_add4y_mmx; c->vp8_idct_add = ff_vp8_idct_add_mmx; c->vp8_luma_dc_wht = ff_vp8_luma_dc_wht_mmx; c->put_vp8_epel_pixels_tab[0][0][0] = c->put_vp8_bilinear_pixels_tab[0][0][0] = ff_put_vp8_pixels16_mmx; #endif c->put_vp8_epel_pixels_tab[1][0][0] = c->put_vp8_bilinear_pixels_tab[1][0][0] = ff_put_vp8_pixels8_mmx; #if ARCH_X86_32 c->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter_simple_mmx; c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_mmx; c->vp8_v_loop_filter16y_inner = ff_vp8_v_loop_filter16y_inner_mmx; c->vp8_h_loop_filter16y_inner = ff_vp8_h_loop_filter16y_inner_mmx; c->vp8_v_loop_filter8uv_inner = ff_vp8_v_loop_filter8uv_inner_mmx; c->vp8_h_loop_filter8uv_inner = ff_vp8_h_loop_filter8uv_inner_mmx; c->vp8_v_loop_filter16y = ff_vp8_v_loop_filter16y_mbedge_mmx; c->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16y_mbedge_mmx; c->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_mbedge_mmx; c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_mmx; #endif } /* note that 4-tap width=16 functions are missing because w=16 * is only used for luma, and luma is always a copy or sixtap. */ if (EXTERNAL_MMXEXT(cpu_flags)) { VP8_MC_FUNC(2, 4, mmxext); VP8_BILINEAR_MC_FUNC(2, 4, mmxext); #if ARCH_X86_32 VP8_LUMA_MC_FUNC(0, 16, mmxext); VP8_MC_FUNC(1, 8, mmxext); VP8_BILINEAR_MC_FUNC(0, 16, mmxext); VP8_BILINEAR_MC_FUNC(1, 8, mmxext); c->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter_simple_mmxext; c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_mmxext; c->vp8_v_loop_filter16y_inner = ff_vp8_v_loop_filter16y_inner_mmxext; c->vp8_h_loop_filter16y_inner = ff_vp8_h_loop_filter16y_inner_mmxext; c->vp8_v_loop_filter8uv_inner = ff_vp8_v_loop_filter8uv_inner_mmxext; c->vp8_h_loop_filter8uv_inner = ff_vp8_h_loop_filter8uv_inner_mmxext; c->vp8_v_loop_filter16y = ff_vp8_v_loop_filter16y_mbedge_mmxext; c->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16y_mbedge_mmxext; c->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_mbedge_mmxext; c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_mmxext; #endif } if (EXTERNAL_SSE(cpu_flags)) { c->vp8_idct_add = ff_vp8_idct_add_sse; c->vp8_luma_dc_wht = ff_vp8_luma_dc_wht_sse; c->put_vp8_epel_pixels_tab[0][0][0] = c->put_vp8_bilinear_pixels_tab[0][0][0] = ff_put_vp8_pixels16_sse; } if (EXTERNAL_SSE2(cpu_flags) && (cpu_flags & AV_CPU_FLAG_SSE2SLOW)) { VP8_LUMA_MC_FUNC(0, 16, sse2); VP8_MC_FUNC(1, 8, sse2); VP8_BILINEAR_MC_FUNC(0, 16, sse2); VP8_BILINEAR_MC_FUNC(1, 8, sse2); c->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter_simple_sse2; c->vp8_v_loop_filter16y_inner = ff_vp8_v_loop_filter16y_inner_sse2; c->vp8_v_loop_filter8uv_inner = ff_vp8_v_loop_filter8uv_inner_sse2; c->vp8_v_loop_filter16y = ff_vp8_v_loop_filter16y_mbedge_sse2; c->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_mbedge_sse2; } if (EXTERNAL_SSE2(cpu_flags)) { c->vp8_idct_dc_add4y = ff_vp8_idct_dc_add4y_sse2; c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_sse2; c->vp8_h_loop_filter16y_inner = ff_vp8_h_loop_filter16y_inner_sse2; c->vp8_h_loop_filter8uv_inner = ff_vp8_h_loop_filter8uv_inner_sse2; c->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16y_mbedge_sse2; c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_sse2; } if (EXTERNAL_SSSE3(cpu_flags)) { VP8_LUMA_MC_FUNC(0, 16, ssse3); VP8_MC_FUNC(1, 8, ssse3); VP8_MC_FUNC(2, 4, ssse3); VP8_BILINEAR_MC_FUNC(0, 16, ssse3); VP8_BILINEAR_MC_FUNC(1, 8, ssse3); VP8_BILINEAR_MC_FUNC(2, 4, ssse3); c->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter_simple_ssse3; c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_ssse3; c->vp8_v_loop_filter16y_inner = ff_vp8_v_loop_filter16y_inner_ssse3; c->vp8_h_loop_filter16y_inner = ff_vp8_h_loop_filter16y_inner_ssse3; c->vp8_v_loop_filter8uv_inner = ff_vp8_v_loop_filter8uv_inner_ssse3; c->vp8_h_loop_filter8uv_inner = ff_vp8_h_loop_filter8uv_inner_ssse3; c->vp8_v_loop_filter16y = ff_vp8_v_loop_filter16y_mbedge_ssse3; c->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16y_mbedge_ssse3; c->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_mbedge_ssse3; c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_ssse3; } if (EXTERNAL_SSE4(cpu_flags)) { c->vp8_idct_dc_add = ff_vp8_idct_dc_add_sse4; c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_sse4; c->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16y_mbedge_sse4; c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_sse4; } #endif /* HAVE_YASM */ }"} {"target": 0, "idx": 19567, "func": "static int get_bat (CPUState *env, mmu_ctx_t *ctx, target_ulong virtual, int rw, int type) { target_ulong *BATlt, *BATut, *BATu, *BATl; target_ulong base, BEPIl, BEPIu, bl; int i; int ret = -1; #if defined (DEBUG_BATS) if (loglevel != 0) { fprintf(logfile, \"%s: %cBAT v 0x\" ADDRX \"\\n\", __func__, type == ACCESS_CODE ? 'I' : 'D', virtual); } #endif switch (type) { case ACCESS_CODE: BATlt = env->IBAT[1]; BATut = env->IBAT[0]; break; default: BATlt = env->DBAT[1]; BATut = env->DBAT[0]; break; } #if defined (DEBUG_BATS) if (loglevel != 0) { fprintf(logfile, \"%s...: %cBAT v 0x\" ADDRX \"\\n\", __func__, type == ACCESS_CODE ? 'I' : 'D', virtual); } #endif base = virtual & 0xFFFC0000; for (i = 0; i < 4; i++) { BATu = &BATut[i]; BATl = &BATlt[i]; BEPIu = *BATu & 0xF0000000; BEPIl = *BATu & 0x0FFE0000; bl = (*BATu & 0x00001FFC) << 15; #if defined (DEBUG_BATS) if (loglevel != 0) { fprintf(logfile, \"%s: %cBAT%d v 0x\" ADDRX \" BATu 0x\" ADDRX \" BATl 0x\" ADDRX \"\\n\", __func__, type == ACCESS_CODE ? 'I' : 'D', i, virtual, *BATu, *BATl); } #endif if ((virtual & 0xF0000000) == BEPIu && ((virtual & 0x0FFE0000) & ~bl) == BEPIl) { /* BAT matches */ if ((msr_pr == 0 && (*BATu & 0x00000002)) || (msr_pr == 1 && (*BATu & 0x00000001))) { /* Get physical address */ ctx->raddr = (*BATl & 0xF0000000) | ((virtual & 0x0FFE0000 & bl) | (*BATl & 0x0FFE0000)) | (virtual & 0x0001F000); if (*BATl & 0x00000001) ctx->prot = PAGE_READ; if (*BATl & 0x00000002) ctx->prot = PAGE_WRITE | PAGE_READ; #if defined (DEBUG_BATS) if (loglevel != 0) { fprintf(logfile, \"BAT %d match: r 0x\" PADDRX \" prot=%c%c\\n\", i, ctx->raddr, ctx->prot & PAGE_READ ? 'R' : '-', ctx->prot & PAGE_WRITE ? 'W' : '-'); } #endif ret = 0; break; } } } if (ret < 0) { #if defined (DEBUG_BATS) if (loglevel != 0) { fprintf(logfile, \"no BAT match for 0x\" ADDRX \":\\n\", virtual); for (i = 0; i < 4; i++) { BATu = &BATut[i]; BATl = &BATlt[i]; BEPIu = *BATu & 0xF0000000; BEPIl = *BATu & 0x0FFE0000; bl = (*BATu & 0x00001FFC) << 15; fprintf(logfile, \"%s: %cBAT%d v 0x\" ADDRX \" BATu 0x\" ADDRX \" BATl 0x\" ADDRX \" \\n\\t\" \"0x\" ADDRX \" 0x\" ADDRX \" 0x\" ADDRX \"\\n\", __func__, type == ACCESS_CODE ? 'I' : 'D', i, virtual, *BATu, *BATl, BEPIu, BEPIl, bl); } } #endif } /* No hit */ return ret; }"} {"target": 0, "idx": 19574, "func": "static void *migration_thread(void *opaque) { MigrationState *s = opaque; /* Used by the bandwidth calcs, updated later */ int64_t initial_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); int64_t setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST); int64_t initial_bytes = 0; int64_t max_size = 0; int64_t start_time = initial_time; int64_t end_time; bool old_vm_running = false; bool entered_postcopy = false; /* The active state we expect to be in; ACTIVE or POSTCOPY_ACTIVE */ enum MigrationStatus current_active_state = MIGRATION_STATUS_ACTIVE; rcu_register_thread(); qemu_savevm_state_header(s->to_dst_file); if (migrate_postcopy_ram()) { /* Now tell the dest that it should open its end so it can reply */ qemu_savevm_send_open_return_path(s->to_dst_file); /* And do a ping that will make stuff easier to debug */ qemu_savevm_send_ping(s->to_dst_file, 1); /* * Tell the destination that we *might* want to do postcopy later; * if the other end can't do postcopy it should fail now, nice and * early. */ qemu_savevm_send_postcopy_advise(s->to_dst_file); } qemu_savevm_state_begin(s->to_dst_file, &s->params); s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start; current_active_state = MIGRATION_STATUS_ACTIVE; migrate_set_state(&s->state, MIGRATION_STATUS_SETUP, MIGRATION_STATUS_ACTIVE); trace_migration_thread_setup_complete(); while (s->state == MIGRATION_STATUS_ACTIVE || s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) { int64_t current_time; uint64_t pending_size; if (!qemu_file_rate_limit(s->to_dst_file)) { uint64_t pend_post, pend_nonpost; qemu_savevm_state_pending(s->to_dst_file, max_size, &pend_nonpost, &pend_post); pending_size = pend_nonpost + pend_post; trace_migrate_pending(pending_size, max_size, pend_post, pend_nonpost); if (pending_size && pending_size >= max_size) { /* Still a significant amount to transfer */ if (migrate_postcopy_ram() && s->state != MIGRATION_STATUS_POSTCOPY_ACTIVE && pend_nonpost <= max_size && atomic_read(&s->start_postcopy)) { if (!postcopy_start(s, &old_vm_running)) { current_active_state = MIGRATION_STATUS_POSTCOPY_ACTIVE; entered_postcopy = true; } continue; } /* Just another iteration step */ qemu_savevm_state_iterate(s->to_dst_file, entered_postcopy); } else { trace_migration_thread_low_pending(pending_size); migration_completion(s, current_active_state, &old_vm_running, &start_time); break; } } if (qemu_file_get_error(s->to_dst_file)) { migrate_set_state(&s->state, current_active_state, MIGRATION_STATUS_FAILED); trace_migration_thread_file_err(); break; } current_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); if (current_time >= initial_time + BUFFER_DELAY) { uint64_t transferred_bytes = qemu_ftell(s->to_dst_file) - initial_bytes; uint64_t time_spent = current_time - initial_time; double bandwidth = (double)transferred_bytes / time_spent; max_size = bandwidth * migrate_max_downtime() / 1000000; s->mbps = (((double) transferred_bytes * 8.0) / ((double) time_spent / 1000.0)) / 1000.0 / 1000.0; trace_migrate_transferred(transferred_bytes, time_spent, bandwidth, max_size); /* if we haven't sent anything, we don't want to recalculate 10000 is a small enough number for our purposes */ if (s->dirty_bytes_rate && transferred_bytes > 10000) { s->expected_downtime = s->dirty_bytes_rate / bandwidth; } qemu_file_reset_rate_limit(s->to_dst_file); initial_time = current_time; initial_bytes = qemu_ftell(s->to_dst_file); } if (qemu_file_rate_limit(s->to_dst_file)) { /* usleep expects microseconds */ g_usleep((initial_time + BUFFER_DELAY - current_time)*1000); } } trace_migration_thread_after_loop(); /* If we enabled cpu throttling for auto-converge, turn it off. */ cpu_throttle_stop(); end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); qemu_mutex_lock_iothread(); qemu_savevm_state_cleanup(); if (s->state == MIGRATION_STATUS_COMPLETED) { uint64_t transferred_bytes = qemu_ftell(s->to_dst_file); s->total_time = end_time - s->total_time; if (!entered_postcopy) { s->downtime = end_time - start_time; } if (s->total_time) { s->mbps = (((double) transferred_bytes * 8.0) / ((double) s->total_time)) / 1000; } runstate_set(RUN_STATE_POSTMIGRATE); } else { if (old_vm_running && !entered_postcopy) { vm_start(); } else { if (runstate_check(RUN_STATE_FINISH_MIGRATE)) { runstate_set(RUN_STATE_POSTMIGRATE); } } } qemu_bh_schedule(s->cleanup_bh); qemu_mutex_unlock_iothread(); rcu_unregister_thread(); return NULL; }"} {"target": 0, "idx": 19576, "func": "void replay_input_event(QemuConsole *src, InputEvent *evt) { if (replay_mode == REPLAY_MODE_PLAY) { /* Nothing */ } else if (replay_mode == REPLAY_MODE_RECORD) { replay_add_input_event(qapi_clone_InputEvent(evt)); } else { qemu_input_event_send_impl(src, evt); } }"} {"target": 1, "idx": 19593, "func": "static inline unsigned int get_uint(ShortenContext *s, int k) { if (s->version != 0) k = get_ur_golomb_shorten(&s->gb, ULONGSIZE); return get_ur_golomb_shorten(&s->gb, k); }"} {"target": 1, "idx": 19595, "func": "static void scale_coefs ( int32_t *dst, const int32_t *src, int dynrng, int len) { int i, shift, round; int16_t mul; int temp, temp1, temp2, temp3, temp4, temp5, temp6, temp7; mul = (dynrng & 0x1f) + 0x20; shift = 4 - ((dynrng << 23) >> 28); if (shift > 0 ) { round = 1 << (shift-1); for (i=0; i> shift; temp3 = src[i+3] * mul; temp2 = temp2 + round; dst[i+1] = temp1 >> shift; temp4 = src[i + 4] * mul; temp3 = temp3 + round; dst[i+2] = temp2 >> shift; temp5 = src[i+5] * mul; temp4 = temp4 + round; dst[i+3] = temp3 >> shift; temp6 = src[i+6] * mul; dst[i+4] = temp4 >> shift; temp5 = temp5 + round; temp7 = src[i+7] * mul; temp6 = temp6 + round; dst[i+5] = temp5 >> shift; temp7 = temp7 + round; dst[i+6] = temp6 >> shift; dst[i+7] = temp7 >> shift; } } else { shift = -shift; for (i=0; ipath = g_strdup(path); return 0; }"} {"target": 1, "idx": 19633, "func": "void vnc_sasl_client_cleanup(VncState *vs) { if (vs->sasl.conn) { vs->sasl.runSSF = vs->sasl.waitWriteSSF = vs->sasl.wantSSF = 0; vs->sasl.encodedLength = vs->sasl.encodedOffset = 0; vs->sasl.encoded = NULL; g_free(vs->sasl.username); free(vs->sasl.mechlist); vs->sasl.username = vs->sasl.mechlist = NULL; sasl_dispose(&vs->sasl.conn); vs->sasl.conn = NULL; } }"} {"target": 1, "idx": 19643, "func": "static int integratorcm_init(SysBusDevice *dev) { IntegratorCMState *s = INTEGRATOR_CM(dev); s->cm_osc = 0x01000048; /* ??? What should the high bits of this value be? */ s->cm_auxosc = 0x0007feff; s->cm_sdram = 0x00011122; if (s->memsz >= 256) { integrator_spd[31] = 64; s->cm_sdram |= 0x10; } else if (s->memsz >= 128) { integrator_spd[31] = 32; s->cm_sdram |= 0x0c; } else if (s->memsz >= 64) { integrator_spd[31] = 16; s->cm_sdram |= 0x08; } else if (s->memsz >= 32) { integrator_spd[31] = 4; s->cm_sdram |= 0x04; } else { integrator_spd[31] = 2; } memcpy(integrator_spd + 73, \"QEMU-MEMORY\", 11); s->cm_init = 0x00000112; s->cm_refcnt_offset = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), 24, 1000); memory_region_init_ram(&s->flash, OBJECT(s), \"integrator.flash\", 0x100000, &error_abort); vmstate_register_ram_global(&s->flash); memory_region_init_io(&s->iomem, OBJECT(s), &integratorcm_ops, s, \"integratorcm\", 0x00800000); sysbus_init_mmio(dev, &s->iomem); integratorcm_do_remap(s); /* ??? Save/restore. */ return 0; }"} {"target": 1, "idx": 19650, "func": "static void unterminated_array_comma(void) { QObject *obj = qobject_from_json(\"[32,\", NULL); g_assert(obj == NULL); }"} {"target": 0, "idx": 19654, "func": "static int svq1_decode_frame_header(GetBitContext *bitbuf, MpegEncContext *s) { int frame_size_code; skip_bits(bitbuf, 8); /* temporal_reference */ /* frame type */ s->pict_type = get_bits(bitbuf, 2) + 1; if (s->pict_type == 4) return AVERROR_INVALIDDATA; if (s->pict_type == AV_PICTURE_TYPE_I) { /* unknown fields */ if (s->f_code == 0x50 || s->f_code == 0x60) { int csum = get_bits(bitbuf, 16); csum = ff_svq1_packet_checksum(bitbuf->buffer, bitbuf->size_in_bits >> 3, csum); av_dlog(s->avctx, \"%s checksum (%02x) for packet data\\n\", (csum == 0) ? \"correct\" : \"incorrect\", csum); } if ((s->f_code ^ 0x10) >= 0x50) { uint8_t msg[256]; svq1_parse_string(bitbuf, msg); av_log(s->avctx, AV_LOG_ERROR, \"embedded message: \\\"%s\\\"\\n\", (char *)msg); } skip_bits(bitbuf, 2); skip_bits(bitbuf, 2); skip_bits1(bitbuf); /* load frame size */ frame_size_code = get_bits(bitbuf, 3); if (frame_size_code == 7) { /* load width, height (12 bits each) */ s->width = get_bits(bitbuf, 12); s->height = get_bits(bitbuf, 12); if (!s->width || !s->height) return AVERROR_INVALIDDATA; } else { /* get width, height from table */ s->width = ff_svq1_frame_size_table[frame_size_code].width; s->height = ff_svq1_frame_size_table[frame_size_code].height; } } /* unknown fields */ if (get_bits1(bitbuf) == 1) { skip_bits1(bitbuf); /* use packet checksum if (1) */ skip_bits1(bitbuf); /* component checksums after image data if (1) */ if (get_bits(bitbuf, 2) != 0) return AVERROR_INVALIDDATA; } if (get_bits1(bitbuf) == 1) { skip_bits1(bitbuf); skip_bits(bitbuf, 4); skip_bits1(bitbuf); skip_bits(bitbuf, 2); while (get_bits1(bitbuf) == 1) skip_bits(bitbuf, 8); } return 0; }"} {"target": 1, "idx": 19665, "func": "void prepare_grab(void) { fprintf(stderr, \"Must supply at least one input file\\n\"); exit(1); }"} {"target": 1, "idx": 19667, "func": "static int compare_codec_desc(const void *a, const void *b) { const AVCodecDescriptor * const *da = a; const AVCodecDescriptor * const *db = b; return (*da)->type != (*db)->type ? (*da)->type - (*db)->type : strcmp((*da)->name, (*db)->name); }"} {"target": 1, "idx": 19684, "func": "static int coroutine_fn add_aio_request(BDRVSheepdogState *s, AIOReq *aio_req, struct iovec *iov, int niov, int create, enum AIOCBState aiocb_type) { int nr_copies = s->inode.nr_copies; SheepdogObjReq hdr; unsigned int wlen; int ret; uint64_t oid = aio_req->oid; unsigned int datalen = aio_req->data_len; uint64_t offset = aio_req->offset; uint8_t flags = aio_req->flags; uint64_t old_oid = aio_req->base_oid; if (!nr_copies) { error_report(\"bug\"); } memset(&hdr, 0, sizeof(hdr)); if (aiocb_type == AIOCB_READ_UDATA) { wlen = 0; hdr.opcode = SD_OP_READ_OBJ; hdr.flags = flags; } else if (create) { wlen = datalen; hdr.opcode = SD_OP_CREATE_AND_WRITE_OBJ; hdr.flags = SD_FLAG_CMD_WRITE | flags; } else { wlen = datalen; hdr.opcode = SD_OP_WRITE_OBJ; hdr.flags = SD_FLAG_CMD_WRITE | flags; } hdr.oid = oid; hdr.cow_oid = old_oid; hdr.copies = s->inode.nr_copies; hdr.data_length = datalen; hdr.offset = offset; hdr.id = aio_req->id; qemu_co_mutex_lock(&s->lock); s->co_send = qemu_coroutine_self(); qemu_aio_set_fd_handler(s->fd, co_read_response, co_write_request, aio_flush_request, NULL, s); set_cork(s->fd, 1); /* send a header */ ret = do_write(s->fd, &hdr, sizeof(hdr)); if (ret) { error_report(\"failed to send a req, %s\", strerror(errno)); return -EIO; } if (wlen) { ret = do_writev(s->fd, iov, wlen, aio_req->iov_offset); if (ret) { error_report(\"failed to send a data, %s\", strerror(errno)); return -EIO; } } set_cork(s->fd, 0); qemu_aio_set_fd_handler(s->fd, co_read_response, NULL, aio_flush_request, NULL, s); return 0; }"} {"target": 1, "idx": 19685, "func": "static void ehci_writeback_async_complete_packet(EHCIPacket *p) { EHCIQueue *q = p->queue; int state; state = ehci_get_state(q->ehci, q->async); ehci_state_executing(q); ehci_state_writeback(q); /* Frees the packet! */ if (!(q->qh.token & QTD_TOKEN_HALT)) { ehci_state_advqueue(q); ehci_set_state(q->ehci, q->async, state);"} {"target": 1, "idx": 19709, "func": "static void gen_ldst_pair (DisasContext *ctx, uint32_t opc, int rd, int base, int16_t offset) { const char *opn = \"ldst_pair\"; TCGv t0, t1; if (ctx->hflags & MIPS_HFLAG_BMASK || rd == 31 || rd == base) { generate_exception(ctx, EXCP_RI); return; } t0 = tcg_temp_new(); t1 = tcg_temp_new(); gen_base_offset_addr(ctx, t0, base, offset); switch (opc) { case LWP: save_cpu_state(ctx, 0); op_ld_lw(t1, t0, ctx); gen_store_gpr(t1, rd); tcg_gen_movi_tl(t1, 4); gen_op_addr_add(ctx, t0, t0, t1); op_ld_lw(t1, t0, ctx); gen_store_gpr(t1, rd+1); opn = \"lwp\"; break; case SWP: save_cpu_state(ctx, 0); gen_load_gpr(t1, rd); op_st_sw(t1, t0, ctx); tcg_gen_movi_tl(t1, 4); gen_op_addr_add(ctx, t0, t0, t1); gen_load_gpr(t1, rd+1); op_st_sw(t1, t0, ctx); opn = \"swp\"; break; #ifdef TARGET_MIPS64 case LDP: save_cpu_state(ctx, 0); op_ld_ld(t1, t0, ctx); gen_store_gpr(t1, rd); tcg_gen_movi_tl(t1, 8); gen_op_addr_add(ctx, t0, t0, t1); op_ld_ld(t1, t0, ctx); gen_store_gpr(t1, rd+1); opn = \"ldp\"; break; case SDP: save_cpu_state(ctx, 0); gen_load_gpr(t1, rd); op_st_sd(t1, t0, ctx); tcg_gen_movi_tl(t1, 8); gen_op_addr_add(ctx, t0, t0, t1); gen_load_gpr(t1, rd+1); op_st_sd(t1, t0, ctx); opn = \"sdp\"; break; #endif } (void)opn; /* avoid a compiler warning */ MIPS_DEBUG(\"%s, %s, %d(%s)\", opn, regnames[rd], offset, regnames[base]); tcg_temp_free(t0); tcg_temp_free(t1); }"} {"target": 1, "idx": 19716, "func": "static void wdt_diag288_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); DIAG288Class *diag288 = DIAG288_CLASS(klass); dc->realize = wdt_diag288_realize; dc->unrealize = wdt_diag288_unrealize; dc->reset = wdt_diag288_reset; set_bit(DEVICE_CATEGORY_MISC, dc->categories); dc->vmsd = &vmstate_diag288; diag288->handle_timer = wdt_diag288_handle_timer; }"} {"target": 0, "idx": 19721, "func": "int ff_estimate_motion_b(MpegEncContext * s, int mb_x, int mb_y, int16_t (*mv_table)[2], uint8_t *ref_picture, int f_code) { int mx, my, range, dmin; int xmin, ymin, xmax, ymax; int rel_xmin, rel_ymin, rel_xmax, rel_ymax; int pred_x=0, pred_y=0; int P[6][2]; const int shift= 1+s->quarter_sample; const int mot_stride = s->mb_width + 2; const int mot_xy = (mb_y + 1)*mot_stride + mb_x + 1; get_limits(s, &range, &xmin, &ymin, &xmax, &ymax, f_code); switch(s->me_method) { case ME_ZERO: default: no_motion_search(s, &mx, &my); dmin = 0; break; case ME_FULL: dmin = full_motion_search(s, &mx, &my, range, xmin, ymin, xmax, ymax, ref_picture); break; case ME_LOG: dmin = log_motion_search(s, &mx, &my, range / 2, xmin, ymin, xmax, ymax, ref_picture); break; case ME_PHODS: dmin = phods_motion_search(s, &mx, &my, range / 2, xmin, ymin, xmax, ymax, ref_picture); break; case ME_X1: case ME_EPZS: { rel_xmin= xmin - mb_x*16; rel_xmax= xmax - mb_x*16; rel_ymin= ymin - mb_y*16; rel_ymax= ymax - mb_y*16; P[0][0] = mv_table[mot_xy ][0]; P[0][1] = mv_table[mot_xy ][1]; P[1][0] = mv_table[mot_xy - 1][0]; P[1][1] = mv_table[mot_xy - 1][1]; if(P[1][0] > (rel_xmax<first_slice_line || s->first_gob_line)) { P[4][0] = P[1][0]; P[4][1] = P[1][1]; } else { P[2][0] = mv_table[mot_xy - mot_stride ][0]; P[2][1] = mv_table[mot_xy - mot_stride ][1]; P[3][0] = mv_table[mot_xy - mot_stride + 1 ][0]; P[3][1] = mv_table[mot_xy - mot_stride + 1 ][1]; if(P[2][1] > (rel_ymax< (rel_ymax<new_picture[0] + (yy * s->linesize) + xx; /* At this point (mx,my) are full-pell and the absolute displacement */ // ppix = ref_picture + (my * s->linesize) + mx; dmin= halfpel_motion_search(s, &mx, &my, dmin, xmin, ymin, xmax, ymax, pred_x, pred_y, ref_picture); // s->mb_type[mb_y*s->mb_width + mb_x]= mb_type; mv_table[mot_xy][0]= mx; mv_table[mot_xy][1]= my; return dmin; }"} {"target": 0, "idx": 19724, "func": "static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size){ MpegEncContext * const s = &h->s; AVCodecContext * const avctx= s->avctx; H264Context *hx; ///< thread context int buf_index; int context_count; int next_avc; int pass = !(avctx->active_thread_type & FF_THREAD_FRAME); int nals_needed=0; ///< number of NALs that need decoding before the next frame thread starts int nal_index; h->nal_unit_type= 0; h->max_contexts = (HAVE_THREADS && (s->avctx->active_thread_type&FF_THREAD_SLICE)) ? avctx->thread_count : 1; if(!(s->flags2 & CODEC_FLAG2_CHUNKS)){ h->current_slice = 0; if (!s->first_field) s->current_picture_ptr= NULL; ff_h264_reset_sei(h); } for(;pass <= 1;pass++){ buf_index = 0; context_count = 0; next_avc = h->is_avc ? 0 : buf_size; nal_index = 0; for(;;){ int consumed; int dst_length; int bit_length; uint8_t *ptr; int i, nalsize = 0; int err; if(buf_index >= next_avc) { if (buf_index >= buf_size - h->nal_length_size) break; nalsize = 0; for(i = 0; i < h->nal_length_size; i++) nalsize = (nalsize << 8) | buf[buf_index++]; if(nalsize <= 0 || nalsize > buf_size - buf_index){ av_log(h->s.avctx, AV_LOG_ERROR, \"AVC: nal size %d\\n\", nalsize); break; } next_avc= buf_index + nalsize; } else { // start code prefix search for(; buf_index + 3 < next_avc; buf_index++){ // This should always succeed in the first iteration. if(buf[buf_index] == 0 && buf[buf_index+1] == 0 && buf[buf_index+2] == 1) break; } if(buf_index+3 >= buf_size) break; buf_index+=3; if(buf_index >= next_avc) continue; } hx = h->thread_context[context_count]; ptr= ff_h264_decode_nal(hx, buf + buf_index, &dst_length, &consumed, next_avc - buf_index); if (ptr==NULL || dst_length < 0){ return -1; } i= buf_index + consumed; if((s->workaround_bugs & FF_BUG_AUTODETECT) && i+3workaround_bugs |= FF_BUG_TRUNCATED; if(!(s->workaround_bugs & FF_BUG_TRUNCATED)){ while(dst_length > 0 && ptr[dst_length - 1] == 0) dst_length--; } bit_length= !dst_length ? 0 : (8*dst_length - ff_h264_decode_rbsp_trailing(h, ptr + dst_length - 1)); if(s->avctx->debug&FF_DEBUG_STARTCODE){ av_log(h->s.avctx, AV_LOG_DEBUG, \"NAL %d/%d at %d/%d length %d pass %d\\n\", hx->nal_unit_type, hx->nal_ref_idc, buf_index, buf_size, dst_length, pass); } if (h->is_avc && (nalsize != consumed) && nalsize){ av_log(h->s.avctx, AV_LOG_DEBUG, \"AVC: Consumed only %d bytes instead of %d\\n\", consumed, nalsize); } buf_index += consumed; nal_index++; if(pass == 0) { // packets can sometimes contain multiple PPS/SPS // e.g. two PAFF field pictures in one packet, or a demuxer which splits NALs strangely // if so, when frame threading we can't start the next thread until we've read all of them switch (hx->nal_unit_type) { case NAL_SPS: case NAL_PPS: nals_needed = nal_index; break; case NAL_IDR_SLICE: case NAL_SLICE: init_get_bits(&hx->s.gb, ptr, bit_length); if (!get_ue_golomb(&hx->s.gb)) nals_needed = nal_index; } continue; } //FIXME do not discard SEI id if(avctx->skip_frame >= AVDISCARD_NONREF && h->nal_ref_idc == 0) continue; again: err = 0; switch(hx->nal_unit_type){ case NAL_IDR_SLICE: if (h->nal_unit_type != NAL_IDR_SLICE) { av_log(h->s.avctx, AV_LOG_ERROR, \"Invalid mix of idr and non-idr slices\"); return -1; } idr(h); // FIXME ensure we don't lose some frames if there is reordering case NAL_SLICE: init_get_bits(&hx->s.gb, ptr, bit_length); hx->intra_gb_ptr= hx->inter_gb_ptr= &hx->s.gb; hx->s.data_partitioning = 0; if((err = decode_slice_header(hx, h))) break; if ( h->sei_recovery_frame_cnt >= 0 && ((h->recovery_frame - h->frame_num) & ((1 << h->sps.log2_max_frame_num)-1)) > h->sei_recovery_frame_cnt) { h->recovery_frame = (h->frame_num + h->sei_recovery_frame_cnt) % (1 << h->sps.log2_max_frame_num); } s->current_picture_ptr->f.key_frame |= (hx->nal_unit_type == NAL_IDR_SLICE); if (h->recovery_frame == h->frame_num) { h->sync |= 1; h->recovery_frame = -1; } h->sync |= !!s->current_picture_ptr->f.key_frame; h->sync |= 3*!!(s->flags2 & CODEC_FLAG2_SHOW_ALL); s->current_picture_ptr->sync = h->sync; if (h->current_slice == 1) { if(!(s->flags2 & CODEC_FLAG2_CHUNKS)) { decode_postinit(h, nal_index >= nals_needed); } if (s->avctx->hwaccel && s->avctx->hwaccel->start_frame(s->avctx, NULL, 0) < 0) return -1; if(CONFIG_H264_VDPAU_DECODER && s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU) ff_vdpau_h264_picture_start(s); } if(hx->redundant_pic_count==0 && (avctx->skip_frame < AVDISCARD_NONREF || hx->nal_ref_idc) && (avctx->skip_frame < AVDISCARD_BIDIR || hx->slice_type_nos!=AV_PICTURE_TYPE_B) && (avctx->skip_frame < AVDISCARD_NONKEY || hx->slice_type_nos==AV_PICTURE_TYPE_I) && avctx->skip_frame < AVDISCARD_ALL){ if(avctx->hwaccel) { if (avctx->hwaccel->decode_slice(avctx, &buf[buf_index - consumed], consumed) < 0) return -1; }else if(CONFIG_H264_VDPAU_DECODER && s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU){ static const uint8_t start_code[] = {0x00, 0x00, 0x01}; ff_vdpau_add_data_chunk(s, start_code, sizeof(start_code)); ff_vdpau_add_data_chunk(s, &buf[buf_index - consumed], consumed ); }else context_count++; } break; case NAL_DPA: init_get_bits(&hx->s.gb, ptr, bit_length); hx->intra_gb_ptr= hx->inter_gb_ptr= NULL; if ((err = decode_slice_header(hx, h)) < 0) break; hx->s.data_partitioning = 1; break; case NAL_DPB: init_get_bits(&hx->intra_gb, ptr, bit_length); hx->intra_gb_ptr= &hx->intra_gb; break; case NAL_DPC: init_get_bits(&hx->inter_gb, ptr, bit_length); hx->inter_gb_ptr= &hx->inter_gb; if(hx->redundant_pic_count==0 && hx->intra_gb_ptr && hx->s.data_partitioning && s->context_initialized && (avctx->skip_frame < AVDISCARD_NONREF || hx->nal_ref_idc) && (avctx->skip_frame < AVDISCARD_BIDIR || hx->slice_type_nos!=AV_PICTURE_TYPE_B) && (avctx->skip_frame < AVDISCARD_NONKEY || hx->slice_type_nos==AV_PICTURE_TYPE_I) && avctx->skip_frame < AVDISCARD_ALL) context_count++; break; case NAL_SEI: init_get_bits(&s->gb, ptr, bit_length); ff_h264_decode_sei(h); break; case NAL_SPS: init_get_bits(&s->gb, ptr, bit_length); if(ff_h264_decode_seq_parameter_set(h) < 0 && (h->is_avc ? (nalsize != consumed) && nalsize : 1)){ av_log(h->s.avctx, AV_LOG_DEBUG, \"SPS decoding failure, trying alternative mode\\n\"); if(h->is_avc) av_assert0(next_avc - buf_index + consumed == nalsize); init_get_bits(&s->gb, &buf[buf_index + 1 - consumed], 8*(next_avc - buf_index + consumed)); ff_h264_decode_seq_parameter_set(h); } if (s->flags& CODEC_FLAG_LOW_DELAY || (h->sps.bitstream_restriction_flag && !h->sps.num_reorder_frames)) s->low_delay=1; if(avctx->has_b_frames < 2) avctx->has_b_frames= !s->low_delay; break; case NAL_PPS: init_get_bits(&s->gb, ptr, bit_length); ff_h264_decode_picture_parameter_set(h, bit_length); break; case NAL_AUD: case NAL_END_SEQUENCE: case NAL_END_STREAM: case NAL_FILLER_DATA: case NAL_SPS_EXT: case NAL_AUXILIARY_SLICE: break; default: av_log(avctx, AV_LOG_DEBUG, \"Unknown NAL code: %d (%d bits)\\n\", hx->nal_unit_type, bit_length); } if(context_count == h->max_contexts) { execute_decode_slices(h, context_count); context_count = 0; } if (err < 0) av_log(h->s.avctx, AV_LOG_ERROR, \"decode_slice_header error\\n\"); else if(err == 1) { /* Slice could not be decoded in parallel mode, copy down * NAL unit stuff to context 0 and restart. Note that * rbsp_buffer is not transferred, but since we no longer * run in parallel mode this should not be an issue. */ h->nal_unit_type = hx->nal_unit_type; h->nal_ref_idc = hx->nal_ref_idc; hx = h; goto again; } } } if(context_count) execute_decode_slices(h, context_count); return buf_index; }"} {"target": 1, "idx": 19728, "func": "static int dnxhd_find_frame_end(DNXHDParserContext *dctx, const uint8_t *buf, int buf_size) { ParseContext *pc = &dctx->pc; uint64_t state = pc->state64; int pic_found = pc->frame_start_found; int i = 0; if (!pic_found) { for (i = 0; i < buf_size; i++) { state = (state << 8) | buf[i]; if (ff_dnxhd_check_header_prefix(state & 0xffffffffff00LL) != 0) { i++; pic_found = 1; dctx->cur_byte = 0; dctx->remaining = 0; break; } } } if (pic_found && !dctx->remaining) { if (!buf_size) /* EOF considered as end of frame */ return 0; for (; i < buf_size; i++) { dctx->cur_byte++; state = (state << 8) | buf[i]; if (dctx->cur_byte == 24) { dctx->h = (state >> 32) & 0xFFFF; } else if (dctx->cur_byte == 26) { dctx->w = (state >> 32) & 0xFFFF; } else if (dctx->cur_byte == 42) { int cid = (state >> 32) & 0xFFFFFFFF; if (cid <= 0) continue; dctx->remaining = avpriv_dnxhd_get_frame_size(cid); if (dctx->remaining <= 0) { dctx->remaining = ff_dnxhd_get_hr_frame_size(cid, dctx->w, dctx->h); if (dctx->remaining <= 0) return dctx->remaining; } if (buf_size - i + 47 >= dctx->remaining) { int remaining = dctx->remaining; pc->frame_start_found = 0; pc->state64 = -1; dctx->cur_byte = 0; dctx->remaining = 0; return remaining; } else { dctx->remaining -= buf_size; } } } } else if (pic_found) { if (dctx->remaining > buf_size) { dctx->remaining -= buf_size; } else { int remaining = dctx->remaining; pc->frame_start_found = 0; pc->state64 = -1; dctx->cur_byte = 0; dctx->remaining = 0; return remaining; } } pc->frame_start_found = pic_found; pc->state64 = state; return END_NOT_FOUND; }"} {"target": 0, "idx": 19759, "func": "static av_cold int vaapi_encode_h264_init_constant_bitrate(AVCodecContext *avctx) { VAAPIEncodeContext *ctx = avctx->priv_data; VAAPIEncodeH264Context *priv = ctx->priv_data; int hrd_buffer_size; int hrd_initial_buffer_fullness; if (avctx->bit_rate > INT32_MAX) { av_log(avctx, AV_LOG_ERROR, \"Target bitrate of 2^31 bps or \" \"higher is not supported.\\n\"); return AVERROR(EINVAL); } if (avctx->rc_buffer_size) hrd_buffer_size = avctx->rc_buffer_size; else hrd_buffer_size = avctx->bit_rate; if (avctx->rc_initial_buffer_occupancy) hrd_initial_buffer_fullness = avctx->rc_initial_buffer_occupancy; else hrd_initial_buffer_fullness = hrd_buffer_size * 3 / 4; priv->rc_params.misc.type = VAEncMiscParameterTypeRateControl; priv->rc_params.rc = (VAEncMiscParameterRateControl) { .bits_per_second = avctx->bit_rate, .target_percentage = 66, .window_size = 1000, .initial_qp = (avctx->qmax >= 0 ? avctx->qmax : 40), .min_qp = (avctx->qmin >= 0 ? avctx->qmin : 18), .basic_unit_size = 0, }; ctx->global_params[ctx->nb_global_params] = &priv->rc_params.misc; ctx->global_params_size[ctx->nb_global_params++] = sizeof(priv->rc_params); priv->hrd_params.misc.type = VAEncMiscParameterTypeHRD; priv->hrd_params.hrd = (VAEncMiscParameterHRD) { .initial_buffer_fullness = hrd_initial_buffer_fullness, .buffer_size = hrd_buffer_size, }; ctx->global_params[ctx->nb_global_params] = &priv->hrd_params.misc; ctx->global_params_size[ctx->nb_global_params++] = sizeof(priv->hrd_params); // These still need to be set for pic_init_qp/slice_qp_delta. priv->fixed_qp_idr = 26; priv->fixed_qp_p = 26; priv->fixed_qp_b = 26; av_log(avctx, AV_LOG_DEBUG, \"Using constant-bitrate = %\"PRId64\" bps.\\n\", avctx->bit_rate); return 0; }"} {"target": 1, "idx": 19782, "func": "static inline void RENAME(planar2x)(const uint8_t *src, uint8_t *dst, long srcWidth, long srcHeight, long srcStride, long dstStride) { long x,y; dst[0]= src[0]; // first line for(x=0; x>2; dst[2*x+2]= ( src[x] + 3*src[x+1])>>2; } dst[2*srcWidth-1]= src[srcWidth-1]; dst+= dstStride; for(y=1; y>2; dst[dstStride]= ( src[0] + 3*src[srcStride])>>2; for(x=mmxSize-1; x>2; dst[2*x+dstStride+2]= ( src[x+0] + 3*src[x+srcStride+1])>>2; dst[2*x+dstStride+1]= ( src[x+1] + 3*src[x+srcStride ])>>2; dst[2*x +2]= (3*src[x+1] + src[x+srcStride ])>>2; } dst[srcWidth*2 -1 ]= (3*src[srcWidth-1] + src[srcWidth-1 + srcStride])>>2; dst[srcWidth*2 -1 + dstStride]= ( src[srcWidth-1] + 3*src[srcWidth-1 + srcStride])>>2; dst+=dstStride*2; src+=srcStride; } // last line #if 1 dst[0]= src[0]; for(x=0; x>2; dst[2*x+2]= ( src[x] + 3*src[x+1])>>2; } dst[2*srcWidth-1]= src[srcWidth-1]; #else for(x=0; xpr)) { gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); return; } if (ctx->opcode & 0x00010000) { /* Special form that does not need any synchronisation */ TCGv t0 = tcg_temp_new(); tcg_gen_andi_tl(t0, cpu_gpr[rS(ctx->opcode)], (1 << MSR_RI) | (1 << MSR_EE)); tcg_gen_andi_tl(cpu_msr, cpu_msr, ~(target_ulong)((1 << MSR_RI) | (1 << MSR_EE))); tcg_gen_or_tl(cpu_msr, cpu_msr, t0); tcg_temp_free(t0); } else { TCGv msr = tcg_temp_new(); /* XXX: we need to update nip before the store * if we enter power saving mode, we will exit the loop * directly from ppc_store_msr */ gen_update_nip(ctx, ctx->nip); #if defined(TARGET_PPC64) tcg_gen_deposit_tl(msr, cpu_msr, cpu_gpr[rS(ctx->opcode)], 0, 32); #else tcg_gen_mov_tl(msr, cpu_gpr[rS(ctx->opcode)]); #endif gen_helper_store_msr(cpu_env, msr); tcg_temp_free(msr); /* Must stop the translation as machine state (may have) changed */ /* Note that mtmsr is not always defined as context-synchronizing */ gen_stop_exception(ctx); } #endif }"} {"target": 1, "idx": 19809, "func": "static void flatview_ref(FlatView *view) { atomic_inc(&view->ref); }"} {"target": 1, "idx": 19827, "func": "static int huff_build10(VLC *vlc, uint8_t *len) { HuffEntry he[1024]; uint32_t codes[1024]; uint8_t bits[1024]; uint16_t syms[1024]; uint32_t code; int i; for (i = 0; i < 1024; i++) { he[i].sym = 1023 - i; he[i].len = len[i]; } AV_QSORT(he, 1024, HuffEntry, huff_cmp_len10); code = 1; for (i = 1023; i >= 0; i--) { codes[i] = code >> (32 - he[i].len); bits[i] = he[i].len; syms[i] = he[i].sym; code += 0x80000000u >> (he[i].len - 1); } ff_free_vlc(vlc); return ff_init_vlc_sparse(vlc, FFMIN(he[1023].len, 12), 1024, bits, sizeof(*bits), sizeof(*bits), codes, sizeof(*codes), sizeof(*codes), syms, sizeof(*syms), sizeof(*syms), 0); }"} {"target": 1, "idx": 19834, "func": "static void test_task_complete(void) { QIOTask *task; Object *obj = object_new(TYPE_DUMMY); Object *src; struct TestTaskData data = { NULL, NULL, false }; task = qio_task_new(obj, task_callback, &data, NULL); src = qio_task_get_source(task); qio_task_complete(task); g_assert(obj == src); object_unref(obj); object_unref(src); g_assert(data.source == obj); g_assert(data.err == NULL); g_assert(data.freed == false); }"} {"target": 0, "idx": 19886, "func": "static int nut_read_header(AVFormatContext *s, AVFormatParameters *ap) { NUTContext *nut = s->priv_data; ByteIOContext *bc = &s->pb; int64_t pos; int inited_stream_count; nut->avf= s; av_set_pts_info(s, 60, 1, AV_TIME_BASE); /* main header */ pos=0; for(;;){ if (find_startcode(bc, MAIN_STARTCODE, pos)<0){ av_log(s, AV_LOG_ERROR, \"no main startcode found\\n\"); return -1; } pos= url_ftell(bc); if(decode_main_header(nut) >= 0) break; } s->bit_rate = 0; nut->stream = av_malloc(sizeof(StreamContext)*nut->stream_count); /* stream headers */ pos=0; for(inited_stream_count=0; inited_stream_count < nut->stream_count;){ if (find_startcode(bc, STREAM_STARTCODE, pos)<0){ av_log(s, AV_LOG_ERROR, \"not all stream headers found\\n\"); return -1; } pos= url_ftell(bc); if(decode_stream_header(nut) >= 0) inited_stream_count++; } /* info headers */ pos=0; for(;;){ uint64_t startcode= find_any_startcode(bc, pos); pos= url_ftell(bc); if(startcode==0){ av_log(s, AV_LOG_ERROR, \"EOF before video frames\\n\"); return -1; }else if(startcode == KEYFRAME_STARTCODE){ url_fseek(bc, -8, SEEK_CUR); //FIXME break; }else if(startcode != INFO_STARTCODE){ continue; } decode_info_header(nut); } return 0; }"} {"target": 1, "idx": 19889, "func": "int ff_v4l2_m2m_codec_reinit(V4L2m2mContext* s) { int ret; av_log(s->avctx, AV_LOG_DEBUG, \"reinit context\\n\"); /* 1. streamoff */ ret = ff_v4l2_context_set_status(&s->capture, VIDIOC_STREAMOFF); if (ret) av_log(s->avctx, AV_LOG_ERROR, \"capture VIDIOC_STREAMOFF\\n\"); /* 2. unmap the capture buffers (v4l2 and ffmpeg): * we must wait for all references to be released before being allowed * to queue new buffers. */ av_log(s->avctx, AV_LOG_DEBUG, \"waiting for user to release AVBufferRefs\\n\"); if (atomic_load(&s->refcount)) while(sem_wait(&s->refsync) == -1 && errno == EINTR); ff_v4l2_context_release(&s->capture); /* 3. get the new capture format */ ret = ff_v4l2_context_get_format(&s->capture); if (ret) { av_log(s->avctx, AV_LOG_ERROR, \"query the new capture format\\n\"); return ret; } /* 4. set the capture format */ ret = ff_v4l2_context_set_format(&s->capture); if (ret) { av_log(s->avctx, AV_LOG_ERROR, \"setting capture format\\n\"); return ret; } /* 5. complete reinit */ sem_destroy(&s->refsync); sem_init(&s->refsync, 0, 0); s->draining = 0; s->reinit = 0; return 0; }"} {"target": 1, "idx": 19892, "func": "static int wsvqa_read_header(AVFormatContext *s, AVFormatParameters *ap) { WsVqaDemuxContext *wsvqa = s->priv_data; AVIOContext *pb = s->pb; AVStream *st; unsigned char *header; unsigned char scratch[VQA_PREAMBLE_SIZE]; unsigned int chunk_tag; unsigned int chunk_size; /* initialize the video decoder stream */ st = av_new_stream(s, 0); if (!st) return AVERROR(ENOMEM); av_set_pts_info(st, 33, 1, VQA_FRAMERATE); wsvqa->video_stream_index = st->index; st->codec->codec_type = AVMEDIA_TYPE_VIDEO; st->codec->codec_id = CODEC_ID_WS_VQA; st->codec->codec_tag = 0; /* no fourcc */ /* skip to the start of the VQA header */ avio_seek(pb, 20, SEEK_SET); /* the VQA header needs to go to the decoder */ st->codec->extradata_size = VQA_HEADER_SIZE; st->codec->extradata = av_mallocz(VQA_HEADER_SIZE + FF_INPUT_BUFFER_PADDING_SIZE); header = (unsigned char *)st->codec->extradata; if (avio_read(pb, st->codec->extradata, VQA_HEADER_SIZE) != VQA_HEADER_SIZE) { av_free(st->codec->extradata); return AVERROR(EIO); } st->codec->width = AV_RL16(&header[6]); st->codec->height = AV_RL16(&header[8]); /* initialize the audio decoder stream for VQA v1 or nonzero samplerate */ if (AV_RL16(&header[24]) || (AV_RL16(&header[0]) == 1 && AV_RL16(&header[2]) == 1)) { st = av_new_stream(s, 0); if (!st) return AVERROR(ENOMEM); av_set_pts_info(st, 33, 1, VQA_FRAMERATE); st->codec->codec_type = AVMEDIA_TYPE_AUDIO; if (AV_RL16(&header[0]) == 1) st->codec->codec_id = CODEC_ID_WESTWOOD_SND1; else st->codec->codec_id = CODEC_ID_ADPCM_IMA_WS; st->codec->codec_tag = 0; /* no tag */ st->codec->sample_rate = AV_RL16(&header[24]); if (!st->codec->sample_rate) st->codec->sample_rate = 22050; st->codec->channels = header[26]; if (!st->codec->channels) st->codec->channels = 1; st->codec->bits_per_coded_sample = 16; st->codec->bit_rate = st->codec->channels * st->codec->sample_rate * st->codec->bits_per_coded_sample / 4; st->codec->block_align = st->codec->channels * st->codec->bits_per_coded_sample; wsvqa->audio_stream_index = st->index; wsvqa->audio_samplerate = st->codec->sample_rate; wsvqa->audio_channels = st->codec->channels; wsvqa->audio_frame_counter = 0; } /* there are 0 or more chunks before the FINF chunk; iterate until * FINF has been skipped and the file will be ready to be demuxed */ do { if (avio_read(pb, scratch, VQA_PREAMBLE_SIZE) != VQA_PREAMBLE_SIZE) { av_free(st->codec->extradata); return AVERROR(EIO); } chunk_tag = AV_RB32(&scratch[0]); chunk_size = AV_RB32(&scratch[4]); /* catch any unknown header tags, for curiousity */ switch (chunk_tag) { case CINF_TAG: case CINH_TAG: case CIND_TAG: case PINF_TAG: case PINH_TAG: case PIND_TAG: case FINF_TAG: case CMDS_TAG: break; default: av_log (s, AV_LOG_ERROR, \" note: unknown chunk seen (%c%c%c%c)\\n\", scratch[0], scratch[1], scratch[2], scratch[3]); break; } avio_skip(pb, chunk_size); } while (chunk_tag != FINF_TAG); return 0; }"} {"target": 1, "idx": 19908, "func": "int ff_wma_init(AVCodecContext *avctx, int flags2) { WMACodecContext *s = avctx->priv_data; int i; float bps1, high_freq; volatile float bps; int sample_rate1; int coef_vlc_table; if ( avctx->sample_rate <= 0 || avctx->sample_rate > 50000 || avctx->channels <= 0 || avctx->channels > 2 || avctx->bit_rate <= 0) return -1; ff_fmt_convert_init(&s->fmt_conv, avctx); avpriv_float_dsp_init(&s->fdsp, avctx->flags & CODEC_FLAG_BITEXACT); if (avctx->codec->id == AV_CODEC_ID_WMAV1) { s->version = 1; } else { s->version = 2; /* compute MDCT block size */ s->frame_len_bits = ff_wma_get_frame_len_bits(avctx->sample_rate, s->version, 0); s->next_block_len_bits = s->frame_len_bits; s->prev_block_len_bits = s->frame_len_bits; s->block_len_bits = s->frame_len_bits; s->frame_len = 1 << s->frame_len_bits; if (s->use_variable_block_len) { int nb_max, nb; nb = ((flags2 >> 3) & 3) + 1; if ((avctx->bit_rate / avctx->channels) >= 32000) nb += 2; nb_max = s->frame_len_bits - BLOCK_MIN_BITS; if (nb > nb_max) nb = nb_max; s->nb_block_sizes = nb + 1; } else { s->nb_block_sizes = 1; /* init rate dependent parameters */ s->use_noise_coding = 1; high_freq = avctx->sample_rate * 0.5; /* if version 2, then the rates are normalized */ sample_rate1 = avctx->sample_rate; if (s->version == 2) { if (sample_rate1 >= 44100) { sample_rate1 = 44100; } else if (sample_rate1 >= 22050) { sample_rate1 = 22050; } else if (sample_rate1 >= 16000) { sample_rate1 = 16000; } else if (sample_rate1 >= 11025) { sample_rate1 = 11025; } else if (sample_rate1 >= 8000) { sample_rate1 = 8000; bps = (float)avctx->bit_rate / (float)(avctx->channels * avctx->sample_rate); s->byte_offset_bits = av_log2((int)(bps * s->frame_len / 8.0 + 0.5)) + 2; /* compute high frequency value and choose if noise coding should be activated */ bps1 = bps; if (avctx->channels == 2) bps1 = bps * 1.6; if (sample_rate1 == 44100) { if (bps1 >= 0.61) { s->use_noise_coding = 0; } else { high_freq = high_freq * 0.4; } else if (sample_rate1 == 22050) { if (bps1 >= 1.16) { s->use_noise_coding = 0; } else if (bps1 >= 0.72) { high_freq = high_freq * 0.7; } else { high_freq = high_freq * 0.6; } else if (sample_rate1 == 16000) { if (bps > 0.5) { high_freq = high_freq * 0.5; } else { high_freq = high_freq * 0.3; } else if (sample_rate1 == 11025) { high_freq = high_freq * 0.7; } else if (sample_rate1 == 8000) { if (bps <= 0.625) { high_freq = high_freq * 0.5; } else if (bps > 0.75) { s->use_noise_coding = 0; } else { high_freq = high_freq * 0.65; } else { if (bps >= 0.8) { high_freq = high_freq * 0.75; } else if (bps >= 0.6) { high_freq = high_freq * 0.6; } else { high_freq = high_freq * 0.5; av_dlog(s->avctx, \"flags2=0x%x\\n\", flags2); av_dlog(s->avctx, \"version=%d channels=%d sample_rate=%d bitrate=%d block_align=%d\\n\", s->version, avctx->channels, avctx->sample_rate, avctx->bit_rate, avctx->block_align); av_dlog(s->avctx, \"bps=%f bps1=%f high_freq=%f bitoffset=%d\\n\", bps, bps1, high_freq, s->byte_offset_bits); av_dlog(s->avctx, \"use_noise_coding=%d use_exp_vlc=%d nb_block_sizes=%d\\n\", s->use_noise_coding, s->use_exp_vlc, s->nb_block_sizes); /* compute the scale factor band sizes for each MDCT block size */ { int a, b, pos, lpos, k, block_len, i, j, n; const uint8_t *table; if (s->version == 1) { s->coefs_start = 3; } else { s->coefs_start = 0; for (k = 0; k < s->nb_block_sizes; k++) { block_len = s->frame_len >> k; if (s->version == 1) { lpos = 0; for (i = 0; i < 25; i++) { a = ff_wma_critical_freqs[i]; b = avctx->sample_rate; pos = ((block_len * 2 * a) + (b >> 1)) / b; if (pos > block_len) pos = block_len; s->exponent_bands[0][i] = pos - lpos; if (pos >= block_len) { i++; break; lpos = pos; s->exponent_sizes[0] = i; } else { /* hardcoded tables */ table = NULL; a = s->frame_len_bits - BLOCK_MIN_BITS - k; if (a < 3) { if (avctx->sample_rate >= 44100) { table = exponent_band_44100[a]; } else if (avctx->sample_rate >= 32000) { table = exponent_band_32000[a]; } else if (avctx->sample_rate >= 22050) { table = exponent_band_22050[a]; if (table) { n = *table++; for (i = 0; i < n; i++) s->exponent_bands[k][i] = table[i]; s->exponent_sizes[k] = n; } else { j = 0; lpos = 0; for (i = 0; i < 25; i++) { a = ff_wma_critical_freqs[i]; b = avctx->sample_rate; pos = ((block_len * 2 * a) + (b << 1)) / (4 * b); pos <<= 2; if (pos > block_len) pos = block_len; if (pos > lpos) s->exponent_bands[k][j++] = pos - lpos; if (pos >= block_len) break; lpos = pos; s->exponent_sizes[k] = j; /* max number of coefs */ s->coefs_end[k] = (s->frame_len - ((s->frame_len * 9) / 100)) >> k; /* high freq computation */ s->high_band_start[k] = (int)((block_len * 2 * high_freq) / avctx->sample_rate + 0.5); n = s->exponent_sizes[k]; j = 0; pos = 0; for (i = 0; i < n; i++) { int start, end; start = pos; pos += s->exponent_bands[k][i]; end = pos; if (start < s->high_band_start[k]) start = s->high_band_start[k]; if (end > s->coefs_end[k]) end = s->coefs_end[k]; if (end > start) s->exponent_high_bands[k][j++] = end - start; s->exponent_high_sizes[k] = j; #if 0 tprintf(s->avctx, \"%5d: coefs_end=%d high_band_start=%d nb_high_bands=%d: \", s->frame_len >> k, s->coefs_end[k], s->high_band_start[k], s->exponent_high_sizes[k]); for (j = 0; j < s->exponent_high_sizes[k]; j++) tprintf(s->avctx, \" %d\", s->exponent_high_bands[k][j]); tprintf(s->avctx, \"\\n\"); #endif #ifdef TRACE { int i, j; for (i = 0; i < s->nb_block_sizes; i++) { tprintf(s->avctx, \"%5d: n=%2d:\", s->frame_len >> i, s->exponent_sizes[i]); for (j = 0; j < s->exponent_sizes[i]; j++) tprintf(s->avctx, \" %d\", s->exponent_bands[i][j]); tprintf(s->avctx, \"\\n\"); #endif /* init MDCT windows : simple sinus window */ for (i = 0; i < s->nb_block_sizes; i++) { ff_init_ff_sine_windows(s->frame_len_bits - i); s->windows[i] = ff_sine_windows[s->frame_len_bits - i]; s->reset_block_lengths = 1; if (s->use_noise_coding) { /* init the noise generator */ if (s->use_exp_vlc) { s->noise_mult = 0.02; } else { s->noise_mult = 0.04; #ifdef TRACE for (i = 0; i < NOISE_TAB_SIZE; i++) s->noise_table[i] = 1.0 * s->noise_mult; #else { unsigned int seed; float norm; seed = 1; norm = (1.0 / (float)(1LL << 31)) * sqrt(3) * s->noise_mult; for (i = 0; i < NOISE_TAB_SIZE; i++) { seed = seed * 314159 + 1; s->noise_table[i] = (float)((int)seed) * norm; #endif /* choose the VLC tables for the coefficients */ coef_vlc_table = 2; if (avctx->sample_rate >= 32000) { if (bps1 < 0.72) { coef_vlc_table = 0; } else if (bps1 < 1.16) { coef_vlc_table = 1; s->coef_vlcs[0]= &coef_vlcs[coef_vlc_table * 2 ]; s->coef_vlcs[1]= &coef_vlcs[coef_vlc_table * 2 + 1]; init_coef_vlc(&s->coef_vlc[0], &s->run_table[0], &s->level_table[0], &s->int_table[0], s->coef_vlcs[0]); init_coef_vlc(&s->coef_vlc[1], &s->run_table[1], &s->level_table[1], &s->int_table[1], s->coef_vlcs[1]); return 0;"} {"target": 1, "idx": 19912, "func": "av_cold int ff_ivi_decode_close(AVCodecContext *avctx) { IVI45DecContext *ctx = avctx->priv_data; ivi_free_buffers(&ctx->planes[0]); if (ctx->mb_vlc.cust_tab.table) ff_free_vlc(&ctx->mb_vlc.cust_tab); av_frame_free(&ctx->p_frame); return 0; }"} {"target": 1, "idx": 19916, "func": "int qemu_check_nic_model_list(NICInfo *nd, const char * const *models, const char *default_model) { int i, exit_status = 0; if (!nd->model) nd->model = strdup(default_model); if (strcmp(nd->model, \"?\") != 0) { for (i = 0 ; models[i]; i++) if (strcmp(nd->model, models[i]) == 0) return i; fprintf(stderr, \"qemu: Unsupported NIC model: %s\\n\", nd->model); exit_status = 1; } fprintf(stderr, \"qemu: Supported NIC models: \"); for (i = 0 ; models[i]; i++) fprintf(stderr, \"%s%c\", models[i], models[i+1] ? ',' : '\\n'); exit(exit_status); }"} {"target": 0, "idx": 19927, "func": "static void virtio_s390_notify(void *opaque, uint16_t vector) { VirtIOS390Device *dev = (VirtIOS390Device*)opaque; uint64_t token = s390_virtio_device_vq_token(dev, vector); /* XXX kvm dependency! */ kvm_s390_virtio_irq(s390_cpu_addr2state(0), 0, token); }"} {"target": 0, "idx": 19962, "func": "static bool check_throttle_config(ThrottleConfig *cfg, Error **errp) { if (throttle_conflicting(cfg, errp)) { return false; } if (!throttle_is_valid(cfg, errp)) { return false; } if (throttle_max_is_missing_limit(cfg, errp)) { return false; } return true; }"} {"target": 1, "idx": 19965, "func": "static void decode_block(BinkAudioContext *s, short *out, int use_dct) { int ch, i, j, k; float q, quant[25]; int width, coeff; GetBitContext *gb = &s->gb; if (use_dct) skip_bits(gb, 2); for (ch = 0; ch < s->channels; ch++) { FFTSample *coeffs = s->coeffs_ptr[ch]; if (s->version_b) { coeffs[0] = av_int2flt(get_bits(gb, 32)) * s->root; coeffs[1] = av_int2flt(get_bits(gb, 32)) * s->root; } else { coeffs[0] = get_float(gb) * s->root; coeffs[1] = get_float(gb) * s->root; } for (i = 0; i < s->num_bands; i++) { /* constant is result of 0.066399999/log10(M_E) */ int value = get_bits(gb, 8); quant[i] = expf(FFMIN(value, 95) * 0.15289164787221953823f) * s->root; } k = 0; q = quant[0]; // parse coefficients i = 2; while (i < s->frame_len) { if (s->version_b) { j = i + 16; } else if (get_bits1(gb)) { j = i + rle_length_tab[get_bits(gb, 4)] * 8; } else { j = i + 8; } j = FFMIN(j, s->frame_len); width = get_bits(gb, 4); if (width == 0) { memset(coeffs + i, 0, (j - i) * sizeof(*coeffs)); i = j; while (s->bands[k] < i) q = quant[k++]; } else { while (i < j) { if (s->bands[k] == i) q = quant[k++]; coeff = get_bits(gb, width); if (coeff) { if (get_bits1(gb)) coeffs[i] = -q * coeff; else coeffs[i] = q * coeff; } else { coeffs[i] = 0.0f; } i++; } } } if (CONFIG_BINKAUDIO_DCT_DECODER && use_dct) { coeffs[0] /= 0.5; s->trans.dct.dct_calc(&s->trans.dct, coeffs); s->dsp.vector_fmul_scalar(coeffs, coeffs, s->frame_len / 2, s->frame_len); } else if (CONFIG_BINKAUDIO_RDFT_DECODER) s->trans.rdft.rdft_calc(&s->trans.rdft, coeffs); } s->fmt_conv.float_to_int16_interleave(out, (const float **)s->coeffs_ptr, s->frame_len, s->channels); if (!s->first) { int count = s->overlap_len * s->channels; int shift = av_log2(count); for (i = 0; i < count; i++) { out[i] = (s->previous[i] * (count - i) + out[i] * i) >> shift; } } memcpy(s->previous, out + s->block_size, s->overlap_len * s->channels * sizeof(*out)); s->first = 0; }"} {"target": 1, "idx": 19966, "func": "void qemu_main_loop_start(void) { }"} {"target": 1, "idx": 19976, "func": "static int mpeg_mux_write_packet(AVFormatContext *ctx, int stream_index, const uint8_t *buf, int size, int64_t pts) { MpegMuxContext *s = ctx->priv_data; AVStream *st = ctx->streams[stream_index]; StreamInfo *stream = st->priv_data; int64_t dts; int len; /* XXX: system clock should be computed precisely, especially for CBR case. The current mode gives at least something coherent */ if (stream_index == s->scr_stream_index) s->last_scr = pts; #if 0 printf(\"%d: pts=%0.3f scr=%0.3f\\n\", stream_index, pts / 90000.0, s->last_scr / 90000.0); #endif /* XXX: currently no way to pass dts, will change soon */ dts = AV_NOPTS_VALUE; /* we assume here that pts != AV_NOPTS_VALUE */ if (stream->start_pts == AV_NOPTS_VALUE) { stream->start_pts = pts; stream->start_dts = dts; } while (size > 0) { len = s->packet_data_max_size - stream->buffer_ptr; if (len > size) len = size; memcpy(stream->buffer + stream->buffer_ptr, buf, len); stream->buffer_ptr += len; buf += len; size -= len; while (stream->buffer_ptr >= s->packet_data_max_size) { /* output the packet */ flush_packet(ctx, stream_index, stream->start_pts, stream->start_dts, s->last_scr); /* Make sure only the FIRST pes packet for this frame has a timestamp */ stream->start_pts = AV_NOPTS_VALUE; stream->start_dts = AV_NOPTS_VALUE; } } return 0; }"} {"target": 1, "idx": 19987, "func": "static void cpu_devinit(const char *cpu_model, unsigned int id, uint64_t prom_addr, qemu_irq **cpu_irqs) { CPUState *cs; SPARCCPU *cpu; CPUSPARCState *env; cpu = SPARC_CPU(cpu_generic_init(TYPE_SPARC_CPU, cpu_model)); if (cpu == NULL) { fprintf(stderr, \"qemu: Unable to find Sparc CPU definition\\n\"); exit(1); } env = &cpu->env; cpu_sparc_set_id(env, id); if (id == 0) { qemu_register_reset(main_cpu_reset, cpu); } else { qemu_register_reset(secondary_cpu_reset, cpu); cs = CPU(cpu); cs->halted = 1; } *cpu_irqs = qemu_allocate_irqs(cpu_set_irq, cpu, MAX_PILS); env->prom_addr = prom_addr; }"} {"target": 0, "idx": 19998, "func": "av_cold void avcodec_register(AVCodec *codec) { AVCodec **p; avcodec_init(); p = &first_avcodec; while (*p != NULL) p = &(*p)->next; *p = codec; codec->next = NULL; if (codec->init_static_data) codec->init_static_data(codec); }"} {"target": 1, "idx": 19999, "func": "static int read_naa_id(const uint8_t *p, uint64_t *p_wwn) { int i; if ((p[1] & 0xF) == 3) { /* NAA designator type */ if (p[3] != 8) { return -EINVAL; } *p_wwn = ldq_be_p(p + 4); return 0; } if ((p[1] & 0xF) == 8) { /* SCSI name string designator type */ if (p[3] < 20 || memcmp(&p[4], \"naa.\", 4)) { return -EINVAL; } if (p[3] > 20 && p[24] != ',') { return -EINVAL; } *p_wwn = 0; for (i = 8; i < 24; i++) { char c = toupper(p[i]); c -= (c >= '0' && c <= '9' ? '0' : 'A' - 10); *p_wwn = (*p_wwn << 4) | c; } return 0; } return -EINVAL; }"} {"target": 1, "idx": 20000, "func": "static int decode_i_picture_header(VC9Context *v) { GetBitContext *gb = &v->s.gb; int pqindex, status = 0; /* Prolog common to all frametypes should be done in caller */ //BF = Buffer Fullness if (v->profile <= PROFILE_MAIN && get_bits(gb, 7)) { av_log(v->s.avctx, AV_LOG_DEBUG, \"I BufferFullness not 0\\n\"); } /* Quantizer stuff */ pqindex = get_bits(gb, 5); if (v->quantizer_mode == QUANT_FRAME_IMPLICIT) v->pq = pquant_table[0][pqindex]; else { v->pq = pquant_table[v->quantizer_mode-1][pqindex]; } if (pqindex < 9) v->halfpq = get_bits(gb, 1); if (v->quantizer_mode == QUANT_FRAME_EXPLICIT) v->pquantizer = get_bits(gb, 1); av_log(v->s.avctx, AV_LOG_DEBUG, \"I frame: QP=%i (+%i/2)\\n\", v->pq, v->halfpq); #if HAS_ADVANCED_PROFILE if (v->profile <= PROFILE_MAIN) #endif { if (v->extended_mv) v->mvrange = get_prefix(gb, 0, 3); if (v->multires) v->respic = get_bits(gb, 2); } #if HAS_ADVANCED_PROFILE else { v->s.ac_pred = get_bits(gb, 1); if (v->postprocflag) v->postproc = get_bits(gb, 1); /* 7.1.1.34 + 8.5.2 */ if (v->overlap && v->pq<9) { v->condover = get_bits(gb, 1); if (v->condover) { v->condover = 2+get_bits(gb, 1); if (v->condover == 3) { status = bitplane_decoding(&v->over_flags_plane, v); if (status < 0) return -1; #if TRACE av_log(v->s.avctx, AV_LOG_DEBUG, \"Overflags plane encoding: \" \"Imode: %i, Invert: %i\\n\", status>>1, status&1); #endif } } } } #endif /* Epilog (AC/DC syntax) should be done in caller */ return status; }"} {"target": 0, "idx": 20005, "func": "static void show_stream(AVFormatContext *fmt_ctx, int stream_idx) { AVStream *stream = fmt_ctx->streams[stream_idx]; AVCodecContext *dec_ctx; AVCodec *dec; char val_str[128]; AVDictionaryEntry *tag = NULL; AVRational display_aspect_ratio; printf(\"[STREAM]\\n\"); printf(\"index=%d\\n\", stream->index); if ((dec_ctx = stream->codec)) { if ((dec = dec_ctx->codec)) { printf(\"codec_name=%s\\n\", dec->name); printf(\"codec_long_name=%s\\n\", dec->long_name); } else { printf(\"codec_name=unknown\\n\"); } printf(\"codec_type=%s\\n\", media_type_string(dec_ctx->codec_type)); printf(\"codec_time_base=%d/%d\\n\", dec_ctx->time_base.num, dec_ctx->time_base.den); /* print AVI/FourCC tag */ av_get_codec_tag_string(val_str, sizeof(val_str), dec_ctx->codec_tag); printf(\"codec_tag_string=%s\\n\", val_str); printf(\"codec_tag=0x%04x\\n\", dec_ctx->codec_tag); switch (dec_ctx->codec_type) { case AVMEDIA_TYPE_VIDEO: printf(\"width=%d\\n\", dec_ctx->width); printf(\"height=%d\\n\", dec_ctx->height); printf(\"has_b_frames=%d\\n\", dec_ctx->has_b_frames); if (dec_ctx->sample_aspect_ratio.num) { printf(\"sample_aspect_ratio=%d:%d\\n\", dec_ctx->sample_aspect_ratio.num, dec_ctx->sample_aspect_ratio.den); av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den, dec_ctx->width * dec_ctx->sample_aspect_ratio.num, dec_ctx->height * dec_ctx->sample_aspect_ratio.den, 1024*1024); printf(\"display_aspect_ratio=%d:%d\\n\", display_aspect_ratio.num, display_aspect_ratio.den); } printf(\"pix_fmt=%s\\n\", dec_ctx->pix_fmt != PIX_FMT_NONE ? av_pix_fmt_descriptors[dec_ctx->pix_fmt].name : \"unknown\"); printf(\"level=%d\\n\", dec_ctx->level); break; case AVMEDIA_TYPE_AUDIO: printf(\"sample_rate=%s\\n\", value_string(val_str, sizeof(val_str), dec_ctx->sample_rate, unit_hertz_str)); printf(\"channels=%d\\n\", dec_ctx->channels); printf(\"bits_per_sample=%d\\n\", av_get_bits_per_sample(dec_ctx->codec_id)); break; } } else { printf(\"codec_type=unknown\\n\"); } if (fmt_ctx->iformat->flags & AVFMT_SHOW_IDS) printf(\"id=0x%x\\n\", stream->id); printf(\"r_frame_rate=%d/%d\\n\", stream->r_frame_rate.num, stream->r_frame_rate.den); printf(\"avg_frame_rate=%d/%d\\n\", stream->avg_frame_rate.num, stream->avg_frame_rate.den); printf(\"time_base=%d/%d\\n\", stream->time_base.num, stream->time_base.den); printf(\"start_time=%s\\n\", time_value_string(val_str, sizeof(val_str), stream->start_time, &stream->time_base)); printf(\"duration=%s\\n\", time_value_string(val_str, sizeof(val_str), stream->duration, &stream->time_base)); if (stream->nb_frames) printf(\"nb_frames=%\"PRId64\"\\n\", stream->nb_frames); while ((tag = av_dict_get(stream->metadata, \"\", tag, AV_DICT_IGNORE_SUFFIX))) printf(\"TAG:%s=%s\\n\", tag->key, tag->value); printf(\"[/STREAM]\\n\"); }"} {"target": 0, "idx": 20019, "func": "static void encode_block(MpegEncContext *s, int16_t *block, int n) { int i, j, table_id; int component, dc, last_index, val, run; MJpegContext *m = s->mjpeg_ctx; /* DC coef */ component = (n <= 3 ? 0 : (n&1) + 1); table_id = (n <= 3 ? 0 : 1); dc = block[0]; /* overflow is impossible */ val = dc - s->last_dc[component]; ff_mjpeg_encode_coef(m, table_id, val, 0); s->last_dc[component] = dc; /* AC coefs */ run = 0; last_index = s->block_last_index[n]; table_id |= 2; for(i=1;i<=last_index;i++) { j = s->intra_scantable.permutated[i]; val = block[j]; if (val == 0) { run++; } else { while (run >= 16) { ff_mjpeg_encode_code(m, table_id, 0xf0); run -= 16; } ff_mjpeg_encode_coef(m, table_id, val, run); run = 0; } } /* output EOB only if not already 64 values */ if (last_index < 63 || run != 0) ff_mjpeg_encode_code(m, table_id, 0); }"} {"target": 0, "idx": 20022, "func": "static void RENAME(swScale)(SwsContext *c, uint8_t* srcParam[], int srcStrideParam[], int srcSliceY, int srcSliceH, uint8_t* dstParam[], int dstStride[]){ /* load a few things into local vars to make the code more readable? and faster */ const int srcW= c->srcW; const int dstW= c->dstW; const int dstH= c->dstH; const int chrDstW= c->chrDstW; const int lumXInc= c->lumXInc; const int chrXInc= c->chrXInc; const int dstFormat= c->dstFormat; const int flags= c->flags; const int canMMX2BeUsed= c->canMMX2BeUsed; int16_t *vLumFilterPos= c->vLumFilterPos; int16_t *vChrFilterPos= c->vChrFilterPos; int16_t *hLumFilterPos= c->hLumFilterPos; int16_t *hChrFilterPos= c->hChrFilterPos; int16_t *vLumFilter= c->vLumFilter; int16_t *vChrFilter= c->vChrFilter; int16_t *hLumFilter= c->hLumFilter; int16_t *hChrFilter= c->hChrFilter; int16_t *lumMmxFilter= c->lumMmxFilter; int16_t *chrMmxFilter= c->chrMmxFilter; const int vLumFilterSize= c->vLumFilterSize; const int vChrFilterSize= c->vChrFilterSize; const int hLumFilterSize= c->hLumFilterSize; const int hChrFilterSize= c->hChrFilterSize; int16_t **lumPixBuf= c->lumPixBuf; int16_t **chrPixBuf= c->chrPixBuf; const int vLumBufSize= c->vLumBufSize; const int vChrBufSize= c->vChrBufSize; uint8_t *funnyYCode= c->funnyYCode; uint8_t *funnyUVCode= c->funnyUVCode; uint8_t *formatConvBuffer= c->formatConvBuffer; /* vars whch will change and which we need to storw back in the context */ int dstY= c->dstY; int lumBufIndex= c->lumBufIndex; int chrBufIndex= c->chrBufIndex; int lastInLumBuf= c->lastInLumBuf; int lastInChrBuf= c->lastInChrBuf; int srcStride[3]; uint8_t *src[3]; uint8_t *dst[3]; if((c->srcFormat == IMGFMT_IYUV) || (c->srcFormat == IMGFMT_I420)){ src[0]= srcParam[0]; src[1]= srcParam[2]; src[2]= srcParam[1]; srcStride[0]= srcStrideParam[0]; srcStride[1]= srcStrideParam[2]; srcStride[2]= srcStrideParam[1]; } else if(c->srcFormat==IMGFMT_YV12){ src[0]= srcParam[0]; src[1]= srcParam[1]; src[2]= srcParam[2]; srcStride[0]= srcStrideParam[0]; srcStride[1]= srcStrideParam[1]; srcStride[2]= srcStrideParam[2]; } else if(isPacked(c->srcFormat)){ src[0]= src[1]= src[2]= srcParam[0]; srcStride[0]= srcStrideParam[0]; srcStride[1]= srcStride[2]= srcStrideParam[0]<<1; } else if(c->srcFormat==IMGFMT_Y8){ src[0]= srcParam[0]; src[1]= src[2]= NULL; srcStride[0]= srcStrideParam[0]; srcStride[1]= srcStride[2]= 0; } if((c->dstFormat == IMGFMT_IYUV) || (c->dstFormat == IMGFMT_I420)){ dst[0]= dstParam[0]; dst[1]= dstParam[2]; dst[2]= dstParam[1]; }else{ dst[0]= dstParam[0]; dst[1]= dstParam[1]; dst[2]= dstParam[2]; } if(dstStride[0]%8 !=0 || dstStride[1]%8 !=0 || dstStride[2]%8 !=0) { static int firstTime=1; //FIXME move this into the context perhaps if(flags & SWS_PRINT_INFO && firstTime) { fprintf(stderr, \"SwScaler: Warning: dstStride is not aligned!\\n\" \"SwScaler: ->cannot do aligned memory acesses anymore\\n\"); firstTime=0; } } /* Note the user might start scaling the picture in the middle so this will not get executed this is not really intended but works currently, so ppl might do it */ if(srcSliceY ==0){ lumBufIndex=0; chrBufIndex=0; dstY=0; lastInLumBuf= -1; lastInChrBuf= -1; } for(;dstY < dstH; dstY++){ unsigned char *dest =dst[0]+dstStride[0]*dstY; unsigned char *uDest=dst[1]+dstStride[1]*(dstY>>1); unsigned char *vDest=dst[2]+dstStride[2]*(dstY>>1); const int chrDstY= isHalfChrV(dstFormat) ? (dstY>>1) : dstY; const int firstLumSrcY= vLumFilterPos[dstY]; //First line needed as input const int firstChrSrcY= vChrFilterPos[chrDstY]; //First line needed as input const int lastLumSrcY= firstLumSrcY + vLumFilterSize -1; // Last line needed as input const int lastChrSrcY= firstChrSrcY + vChrFilterSize -1; // Last line needed as input //handle holes (FAST_BILINEAR & weird filters) if(firstLumSrcY > lastInLumBuf) lastInLumBuf= firstLumSrcY-1; if(firstChrSrcY > lastInChrBuf) lastInChrBuf= firstChrSrcY-1; //printf(\"%d %d %d\\n\", firstChrSrcY, lastInChrBuf, vChrBufSize); ASSERT(firstLumSrcY >= lastInLumBuf - vLumBufSize + 1) ASSERT(firstChrSrcY >= lastInChrBuf - vChrBufSize + 1) // Do we have enough lines in this slice to output the dstY line if(lastLumSrcY < srcSliceY + srcSliceH && lastChrSrcY < ((srcSliceY + srcSliceH)>>1)) { //Do horizontal scaling while(lastInLumBuf < lastLumSrcY) { uint8_t *s= src[0]+(lastInLumBuf + 1 - srcSliceY)*srcStride[0]; lumBufIndex++; // printf(\"%d %d %d %d\\n\", lumBufIndex, vLumBufSize, lastInLumBuf, lastLumSrcY); ASSERT(lumBufIndex < 2*vLumBufSize) ASSERT(lastInLumBuf + 1 - srcSliceY < srcSliceH) ASSERT(lastInLumBuf + 1 - srcSliceY >= 0) // printf(\"%d %d\\n\", lumBufIndex, vLumBufSize); RENAME(hyscale)(lumPixBuf[ lumBufIndex ], dstW, s, srcW, lumXInc, flags, canMMX2BeUsed, hLumFilter, hLumFilterPos, hLumFilterSize, funnyYCode, c->srcFormat, formatConvBuffer); lastInLumBuf++; } while(lastInChrBuf < lastChrSrcY) { uint8_t *src1= src[1]+(lastInChrBuf + 1 - (srcSliceY>>1))*srcStride[1]; uint8_t *src2= src[2]+(lastInChrBuf + 1 - (srcSliceY>>1))*srcStride[2]; chrBufIndex++; ASSERT(chrBufIndex < 2*vChrBufSize) ASSERT(lastInChrBuf + 1 - (srcSliceY>>1) < (srcSliceH>>1)) ASSERT(lastInChrBuf + 1 - (srcSliceY>>1) >= 0) //FIXME replace parameters through context struct (some at least) RENAME(hcscale)(chrPixBuf[ chrBufIndex ], chrDstW, src1, src2, (srcW+1)>>1, chrXInc, flags, canMMX2BeUsed, hChrFilter, hChrFilterPos, hChrFilterSize, funnyUVCode, c->srcFormat, formatConvBuffer); lastInChrBuf++; } //wrap buf index around to stay inside the ring buffer if(lumBufIndex >= vLumBufSize ) lumBufIndex-= vLumBufSize; if(chrBufIndex >= vChrBufSize ) chrBufIndex-= vChrBufSize; } else // not enough lines left in this slice -> load the rest in the buffer { /* printf(\"%d %d Last:%d %d LastInBuf:%d %d Index:%d %d Y:%d FSize: %d %d BSize: %d %d\\n\", firstChrSrcY,firstLumSrcY,lastChrSrcY,lastLumSrcY, lastInChrBuf,lastInLumBuf,chrBufIndex,lumBufIndex,dstY,vChrFilterSize,vLumFilterSize, vChrBufSize, vLumBufSize); */ //Do horizontal scaling while(lastInLumBuf+1 < srcSliceY + srcSliceH) { uint8_t *s= src[0]+(lastInLumBuf + 1 - srcSliceY)*srcStride[0]; lumBufIndex++; ASSERT(lumBufIndex < 2*vLumBufSize) ASSERT(lastInLumBuf + 1 - srcSliceY < srcSliceH) ASSERT(lastInLumBuf + 1 - srcSliceY >= 0) RENAME(hyscale)(lumPixBuf[ lumBufIndex ], dstW, s, srcW, lumXInc, flags, canMMX2BeUsed, hLumFilter, hLumFilterPos, hLumFilterSize, funnyYCode, c->srcFormat, formatConvBuffer); lastInLumBuf++; } while(lastInChrBuf+1 < ((srcSliceY + srcSliceH)>>1)) { uint8_t *src1= src[1]+(lastInChrBuf + 1 - (srcSliceY>>1))*srcStride[1]; uint8_t *src2= src[2]+(lastInChrBuf + 1 - (srcSliceY>>1))*srcStride[2]; chrBufIndex++; ASSERT(chrBufIndex < 2*vChrBufSize) ASSERT(lastInChrBuf + 1 - (srcSliceY>>1) < (srcSliceH>>1)) ASSERT(lastInChrBuf + 1 - (srcSliceY>>1) >= 0) RENAME(hcscale)(chrPixBuf[ chrBufIndex ], chrDstW, src1, src2, (srcW+1)>>1, chrXInc, flags, canMMX2BeUsed, hChrFilter, hChrFilterPos, hChrFilterSize, funnyUVCode, c->srcFormat, formatConvBuffer); lastInChrBuf++; } //wrap buf index around to stay inside the ring buffer if(lumBufIndex >= vLumBufSize ) lumBufIndex-= vLumBufSize; if(chrBufIndex >= vChrBufSize ) chrBufIndex-= vChrBufSize; break; //we cant output a dstY line so lets try with the next slice } #ifdef HAVE_MMX b5Dither= dither8[dstY&1]; g6Dither= dither4[dstY&1]; g5Dither= dither8[dstY&1]; r5Dither= dither8[(dstY+1)&1]; #endif if(dstY < dstH-2) { if(isPlanarYUV(dstFormat)) //YV12 like { if(dstY&1) uDest=vDest= NULL; //FIXME split functions in lumi / chromi if(vLumFilterSize == 1 && vChrFilterSize == 1) // Unscaled YV12 { int16_t *lumBuf = lumPixBuf[0]; int16_t *chrBuf= chrPixBuf[0]; RENAME(yuv2yuv1)(lumBuf, chrBuf, dest, uDest, vDest, dstW); } else //General YV12 { int16_t **lumSrcPtr= lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize; int16_t **chrSrcPtr= chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize; RENAME(yuv2yuvX)( vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize, vChrFilter+(dstY>>1)*vChrFilterSize, chrSrcPtr, vChrFilterSize, dest, uDest, vDest, dstW, lumMmxFilter+dstY*vLumFilterSize*4, chrMmxFilter+(dstY>>1)*vChrFilterSize*4); } } else { int16_t **lumSrcPtr= lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize; int16_t **chrSrcPtr= chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize; ASSERT(lumSrcPtr + vLumFilterSize - 1 < lumPixBuf + vLumBufSize*2); ASSERT(chrSrcPtr + vChrFilterSize - 1 < chrPixBuf + vChrBufSize*2); if(vLumFilterSize == 1 && vChrFilterSize == 2) //Unscaled RGB { int chrAlpha= vChrFilter[2*dstY+1]; RENAME(yuv2rgb1)(*lumSrcPtr, *chrSrcPtr, *(chrSrcPtr+1), dest, dstW, chrAlpha, dstFormat, flags); } else if(vLumFilterSize == 2 && vChrFilterSize == 2) //BiLinear Upscale RGB { int lumAlpha= vLumFilter[2*dstY+1]; int chrAlpha= vChrFilter[2*dstY+1]; RENAME(yuv2rgb2)(*lumSrcPtr, *(lumSrcPtr+1), *chrSrcPtr, *(chrSrcPtr+1), dest, dstW, lumAlpha, chrAlpha, dstFormat, flags); } else //General RGB { RENAME(yuv2rgbX)( vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize, vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize, dest, dstW, dstFormat, lumMmxFilter+dstY*vLumFilterSize*4, chrMmxFilter+dstY*vChrFilterSize*4); } } } else // hmm looks like we cant use MMX here without overwriting this arrays tail { int16_t **lumSrcPtr= lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize; int16_t **chrSrcPtr= chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize; if(isPlanarYUV(dstFormat)) //YV12 { if(dstY&1) uDest=vDest= NULL; //FIXME split functions in lumi / chromi yuv2yuvXinC( vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize, vChrFilter+(dstY>>1)*vChrFilterSize, chrSrcPtr, vChrFilterSize, dest, uDest, vDest, dstW); } else { ASSERT(lumSrcPtr + vLumFilterSize - 1 < lumPixBuf + vLumBufSize*2); ASSERT(chrSrcPtr + vChrFilterSize - 1 < chrPixBuf + vChrBufSize*2); yuv2rgbXinC( vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize, vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize, dest, dstW, dstFormat); } } } #ifdef HAVE_MMX __asm __volatile(SFENCE:::\"memory\"); __asm __volatile(EMMS:::\"memory\"); #endif /* store changed local vars back in the context */ c->dstY= dstY; c->lumBufIndex= lumBufIndex; c->chrBufIndex= chrBufIndex; c->lastInLumBuf= lastInLumBuf; c->lastInChrBuf= lastInChrBuf; }"} {"target": 1, "idx": 20026, "func": "static int vscsi_queue_cmd(VSCSIState *s, vscsi_req *req) { union srp_iu *srp = &req->iu.srp; SCSIDevice *sdev; int n, id, lun; vscsi_decode_id_lun(be64_to_cpu(srp->cmd.lun), &id, &lun); /* Qemu vs. linux issue with LUNs to be sorted out ... */ sdev = (id < 8 && lun < 16) ? s->bus.devs[id] : NULL; if (!sdev) { dprintf(\"VSCSI: Command for id %d with no drive\\n\", id); if (srp->cmd.cdb[0] == INQUIRY) { vscsi_inquiry_no_target(s, req); } else { vscsi_makeup_sense(s, req, ILLEGAL_REQUEST, 0x24, 0x00); vscsi_send_rsp(s, req, CHECK_CONDITION, 0, 0); } return 1; } req->sdev = sdev; req->lun = lun; n = sdev->info->send_command(sdev, req->qtag, srp->cmd.cdb, lun); dprintf(\"VSCSI: Queued command tag 0x%x CMD 0x%x ID %d LUN %d ret: %d\\n\", req->qtag, srp->cmd.cdb[0], id, lun, n); if (n) { /* Transfer direction must be set before preprocessing the * descriptors */ req->writing = (n < 1); /* Preprocess RDMA descriptors */ vscsi_preprocess_desc(req); } /* Get transfer direction and initiate transfer */ if (n > 0) { req->data_len = n; sdev->info->read_data(sdev, req->qtag); } else if (n < 0) { req->data_len = -n; sdev->info->write_data(sdev, req->qtag); } /* Don't touch req here, it may have been recycled already */ return 0; }"} {"target": 1, "idx": 20032, "func": "static int do_sendv_recvv(int sockfd, struct iovec *iov, int len, int offset, int do_sendv) { int ret, diff, iovlen; struct iovec *last_iov; /* last_iov is inclusive, so count from one. */ iovlen = 1; last_iov = iov; len += offset; while (last_iov->iov_len < len) { len -= last_iov->iov_len; last_iov++; iovlen++; } diff = last_iov->iov_len - len; last_iov->iov_len -= diff; while (iov->iov_len <= offset) { offset -= iov->iov_len; iov++; iovlen--; } iov->iov_base = (char *) iov->iov_base + offset; iov->iov_len -= offset; { #if defined CONFIG_IOVEC && defined CONFIG_POSIX struct msghdr msg; memset(&msg, 0, sizeof(msg)); msg.msg_iov = iov; msg.msg_iovlen = iovlen; do { if (do_sendv) { ret = sendmsg(sockfd, &msg, 0); } else { ret = recvmsg(sockfd, &msg, 0); } } while (ret == -1 && errno == EINTR); #else struct iovec *p = iov; ret = 0; while (iovlen > 0) { int rc; if (do_sendv) { rc = send(sockfd, p->iov_base, p->iov_len, 0); } else { rc = qemu_recv(sockfd, p->iov_base, p->iov_len, 0); } if (rc == -1) { if (errno == EINTR) { continue; } if (ret == 0) { ret = -1; } break; } if (rc == 0) { break; } ret += rc; iovlen--, p++; } #endif } /* Undo the changes above */ iov->iov_base = (char *) iov->iov_base - offset; iov->iov_len += offset; last_iov->iov_len += diff; return ret; }"} {"target": 1, "idx": 20035, "func": "static int vp9_decode_frame(AVCodecContext *ctx, void *frame, int *got_frame, AVPacket *pkt) { const uint8_t *data = pkt->data; int size = pkt->size; VP9Context *s = ctx->priv_data; int res, tile_row, tile_col, i, ref, row, col; int retain_segmap_ref = s->s.frames[REF_FRAME_SEGMAP].segmentation_map && (!s->s.h.segmentation.enabled || !s->s.h.segmentation.update_map); ptrdiff_t yoff, uvoff, ls_y, ls_uv; AVFrame *f; int bytesperpixel; if ((res = decode_frame_header(ctx, data, size, &ref)) < 0) { return res; } else if (res == 0) { if (!s->s.refs[ref].f->buf[0]) { av_log(ctx, AV_LOG_ERROR, \"Requested reference %d not available\\n\", ref); return AVERROR_INVALIDDATA; } if ((res = av_frame_ref(frame, s->s.refs[ref].f)) < 0) return res; ((AVFrame *)frame)->pts = pkt->pts; #if FF_API_PKT_PTS FF_DISABLE_DEPRECATION_WARNINGS ((AVFrame *)frame)->pkt_pts = pkt->pts; FF_ENABLE_DEPRECATION_WARNINGS #endif ((AVFrame *)frame)->pkt_dts = pkt->dts; for (i = 0; i < 8; i++) { if (s->next_refs[i].f->buf[0]) ff_thread_release_buffer(ctx, &s->next_refs[i]); if (s->s.refs[i].f->buf[0] && (res = ff_thread_ref_frame(&s->next_refs[i], &s->s.refs[i])) < 0) return res; } *got_frame = 1; return pkt->size; } data += res; size -= res; if (!retain_segmap_ref || s->s.h.keyframe || s->s.h.intraonly) { if (s->s.frames[REF_FRAME_SEGMAP].tf.f->buf[0]) vp9_unref_frame(ctx, &s->s.frames[REF_FRAME_SEGMAP]); if (!s->s.h.keyframe && !s->s.h.intraonly && !s->s.h.errorres && s->s.frames[CUR_FRAME].tf.f->buf[0] && (res = vp9_ref_frame(ctx, &s->s.frames[REF_FRAME_SEGMAP], &s->s.frames[CUR_FRAME])) < 0) return res; } if (s->s.frames[REF_FRAME_MVPAIR].tf.f->buf[0]) vp9_unref_frame(ctx, &s->s.frames[REF_FRAME_MVPAIR]); if (!s->s.h.intraonly && !s->s.h.keyframe && !s->s.h.errorres && s->s.frames[CUR_FRAME].tf.f->buf[0] && (res = vp9_ref_frame(ctx, &s->s.frames[REF_FRAME_MVPAIR], &s->s.frames[CUR_FRAME])) < 0) return res; if (s->s.frames[CUR_FRAME].tf.f->buf[0]) vp9_unref_frame(ctx, &s->s.frames[CUR_FRAME]); if ((res = vp9_alloc_frame(ctx, &s->s.frames[CUR_FRAME])) < 0) return res; f = s->s.frames[CUR_FRAME].tf.f; f->key_frame = s->s.h.keyframe; f->pict_type = (s->s.h.keyframe || s->s.h.intraonly) ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P; ls_y = f->linesize[0]; ls_uv =f->linesize[1]; if (s->s.frames[REF_FRAME_SEGMAP].tf.f->buf[0] && (s->s.frames[REF_FRAME_MVPAIR].tf.f->width != s->s.frames[CUR_FRAME].tf.f->width || s->s.frames[REF_FRAME_MVPAIR].tf.f->height != s->s.frames[CUR_FRAME].tf.f->height)) { vp9_unref_frame(ctx, &s->s.frames[REF_FRAME_SEGMAP]); } // ref frame setup for (i = 0; i < 8; i++) { if (s->next_refs[i].f->buf[0]) ff_thread_release_buffer(ctx, &s->next_refs[i]); if (s->s.h.refreshrefmask & (1 << i)) { res = ff_thread_ref_frame(&s->next_refs[i], &s->s.frames[CUR_FRAME].tf); } else if (s->s.refs[i].f->buf[0]) { res = ff_thread_ref_frame(&s->next_refs[i], &s->s.refs[i]); } if (res < 0) return res; } if (ctx->hwaccel) { res = ctx->hwaccel->start_frame(ctx, NULL, 0); if (res < 0) return res; res = ctx->hwaccel->decode_slice(ctx, pkt->data, pkt->size); if (res < 0) return res; res = ctx->hwaccel->end_frame(ctx); if (res < 0) return res; goto finish; } // main tile decode loop bytesperpixel = s->bytesperpixel; memset(s->above_partition_ctx, 0, s->cols); memset(s->above_skip_ctx, 0, s->cols); if (s->s.h.keyframe || s->s.h.intraonly) { memset(s->above_mode_ctx, DC_PRED, s->cols * 2); } else { memset(s->above_mode_ctx, NEARESTMV, s->cols); } memset(s->above_y_nnz_ctx, 0, s->sb_cols * 16); memset(s->above_uv_nnz_ctx[0], 0, s->sb_cols * 16 >> s->ss_h); memset(s->above_uv_nnz_ctx[1], 0, s->sb_cols * 16 >> s->ss_h); memset(s->above_segpred_ctx, 0, s->cols); s->pass = s->s.frames[CUR_FRAME].uses_2pass = ctx->active_thread_type == FF_THREAD_FRAME && s->s.h.refreshctx && !s->s.h.parallelmode; if ((res = update_block_buffers(ctx)) < 0) { av_log(ctx, AV_LOG_ERROR, \"Failed to allocate block buffers\\n\"); return res; } if (s->s.h.refreshctx && s->s.h.parallelmode) { int j, k, l, m; for (i = 0; i < 4; i++) { for (j = 0; j < 2; j++) for (k = 0; k < 2; k++) for (l = 0; l < 6; l++) for (m = 0; m < 6; m++) memcpy(s->prob_ctx[s->s.h.framectxid].coef[i][j][k][l][m], s->prob.coef[i][j][k][l][m], 3); if (s->s.h.txfmmode == i) break; } s->prob_ctx[s->s.h.framectxid].p = s->prob.p; ff_thread_finish_setup(ctx); } else if (!s->s.h.refreshctx) { ff_thread_finish_setup(ctx); } do { yoff = uvoff = 0; s->b = s->b_base; s->block = s->block_base; s->uvblock[0] = s->uvblock_base[0]; s->uvblock[1] = s->uvblock_base[1]; s->eob = s->eob_base; s->uveob[0] = s->uveob_base[0]; s->uveob[1] = s->uveob_base[1]; for (tile_row = 0; tile_row < s->s.h.tiling.tile_rows; tile_row++) { set_tile_offset(&s->tile_row_start, &s->tile_row_end, tile_row, s->s.h.tiling.log2_tile_rows, s->sb_rows); if (s->pass != 2) { for (tile_col = 0; tile_col < s->s.h.tiling.tile_cols; tile_col++) { int64_t tile_size; if (tile_col == s->s.h.tiling.tile_cols - 1 && tile_row == s->s.h.tiling.tile_rows - 1) { tile_size = size; } else { tile_size = AV_RB32(data); data += 4; size -= 4; } if (tile_size > size) { ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, INT_MAX, 0); return AVERROR_INVALIDDATA; } ff_vp56_init_range_decoder(&s->c_b[tile_col], data, tile_size); if (vp56_rac_get_prob_branchy(&s->c_b[tile_col], 128)) { // marker bit ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, INT_MAX, 0); return AVERROR_INVALIDDATA; } data += tile_size; size -= tile_size; } } for (row = s->tile_row_start; row < s->tile_row_end; row += 8, yoff += ls_y * 64, uvoff += ls_uv * 64 >> s->ss_v) { struct VP9Filter *lflvl_ptr = s->lflvl; ptrdiff_t yoff2 = yoff, uvoff2 = uvoff; for (tile_col = 0; tile_col < s->s.h.tiling.tile_cols; tile_col++) { set_tile_offset(&s->tile_col_start, &s->tile_col_end, tile_col, s->s.h.tiling.log2_tile_cols, s->sb_cols); if (s->pass != 2) { memset(s->left_partition_ctx, 0, 8); memset(s->left_skip_ctx, 0, 8); if (s->s.h.keyframe || s->s.h.intraonly) { memset(s->left_mode_ctx, DC_PRED, 16); } else { memset(s->left_mode_ctx, NEARESTMV, 8); } memset(s->left_y_nnz_ctx, 0, 16); memset(s->left_uv_nnz_ctx, 0, 32); memset(s->left_segpred_ctx, 0, 8); memcpy(&s->c, &s->c_b[tile_col], sizeof(s->c)); } for (col = s->tile_col_start; col < s->tile_col_end; col += 8, yoff2 += 64 * bytesperpixel, uvoff2 += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) { // FIXME integrate with lf code (i.e. zero after each // use, similar to invtxfm coefficients, or similar) if (s->pass != 1) { memset(lflvl_ptr->mask, 0, sizeof(lflvl_ptr->mask)); } if (s->pass == 2) { decode_sb_mem(ctx, row, col, lflvl_ptr, yoff2, uvoff2, BL_64X64); } else { decode_sb(ctx, row, col, lflvl_ptr, yoff2, uvoff2, BL_64X64); } } if (s->pass != 2) { memcpy(&s->c_b[tile_col], &s->c, sizeof(s->c)); } } if (s->pass == 1) { continue; } // backup pre-loopfilter reconstruction data for intra // prediction of next row of sb64s if (row + 8 < s->rows) { memcpy(s->intra_pred_data[0], f->data[0] + yoff + 63 * ls_y, 8 * s->cols * bytesperpixel); memcpy(s->intra_pred_data[1], f->data[1] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv, 8 * s->cols * bytesperpixel >> s->ss_h); memcpy(s->intra_pred_data[2], f->data[2] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv, 8 * s->cols * bytesperpixel >> s->ss_h); } // loopfilter one row if (s->s.h.filter.level) { yoff2 = yoff; uvoff2 = uvoff; lflvl_ptr = s->lflvl; for (col = 0; col < s->cols; col += 8, yoff2 += 64 * bytesperpixel, uvoff2 += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) { loopfilter_sb(ctx, lflvl_ptr, row, col, yoff2, uvoff2); } } // FIXME maybe we can make this more finegrained by running the // loopfilter per-block instead of after each sbrow // In fact that would also make intra pred left preparation easier? ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, row >> 3, 0); } } if (s->pass < 2 && s->s.h.refreshctx && !s->s.h.parallelmode) { adapt_probs(s); ff_thread_finish_setup(ctx); } } while (s->pass++ == 1); ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, INT_MAX, 0); finish: // ref frame setup for (i = 0; i < 8; i++) { if (s->s.refs[i].f->buf[0]) ff_thread_release_buffer(ctx, &s->s.refs[i]); if (s->next_refs[i].f->buf[0] && (res = ff_thread_ref_frame(&s->s.refs[i], &s->next_refs[i])) < 0) return res; } if (!s->s.h.invisible) { if ((res = av_frame_ref(frame, s->s.frames[CUR_FRAME].tf.f)) < 0) return res; *got_frame = 1; } return pkt->size; }"} {"target": 0, "idx": 20054, "func": "static uint64_t gt64120_readl (void *opaque, target_phys_addr_t addr, unsigned size) { GT64120State *s = opaque; PCIHostState *phb = PCI_HOST_BRIDGE(s); uint32_t val; uint32_t saddr; saddr = (addr & 0xfff) >> 2; switch (saddr) { /* CPU Configuration */ case GT_MULTI: /* Only one GT64xxx is present on the CPU bus, return the initial value */ val = s->regs[saddr]; break; /* CPU Error Report */ case GT_CPUERR_ADDRLO: case GT_CPUERR_ADDRHI: case GT_CPUERR_DATALO: case GT_CPUERR_DATAHI: case GT_CPUERR_PARITY: /* Emulated memory has no error, always return the initial values */ val = s->regs[saddr]; break; /* CPU Sync Barrier */ case GT_PCI0SYNC: case GT_PCI1SYNC: /* Reading those register should empty all FIFO on the PCI bus, which are not emulated. The return value should be a random value that should be ignored. */ val = 0xc000ffee; break; /* ECC */ case GT_ECC_ERRDATALO: case GT_ECC_ERRDATAHI: case GT_ECC_MEM: case GT_ECC_CALC: case GT_ECC_ERRADDR: /* Emulated memory has no error, always return the initial values */ val = s->regs[saddr]; break; case GT_CPU: case GT_SCS10LD: case GT_SCS10HD: case GT_SCS32LD: case GT_SCS32HD: case GT_CS20LD: case GT_CS20HD: case GT_CS3BOOTLD: case GT_CS3BOOTHD: case GT_SCS10AR: case GT_SCS32AR: case GT_CS20R: case GT_CS3BOOTR: case GT_PCI0IOLD: case GT_PCI0M0LD: case GT_PCI0M1LD: case GT_PCI1IOLD: case GT_PCI1M0LD: case GT_PCI1M1LD: case GT_PCI0IOHD: case GT_PCI0M0HD: case GT_PCI0M1HD: case GT_PCI1IOHD: case GT_PCI1M0HD: case GT_PCI1M1HD: case GT_PCI0IOREMAP: case GT_PCI0M0REMAP: case GT_PCI0M1REMAP: case GT_PCI1IOREMAP: case GT_PCI1M0REMAP: case GT_PCI1M1REMAP: case GT_ISD: val = s->regs[saddr]; break; case GT_PCI0_IACK: /* Read the IRQ number */ val = pic_read_irq(isa_pic); break; /* SDRAM and Device Address Decode */ case GT_SCS0LD: case GT_SCS0HD: case GT_SCS1LD: case GT_SCS1HD: case GT_SCS2LD: case GT_SCS2HD: case GT_SCS3LD: case GT_SCS3HD: case GT_CS0LD: case GT_CS0HD: case GT_CS1LD: case GT_CS1HD: case GT_CS2LD: case GT_CS2HD: case GT_CS3LD: case GT_CS3HD: case GT_BOOTLD: case GT_BOOTHD: case GT_ADERR: val = s->regs[saddr]; break; /* SDRAM Configuration */ case GT_SDRAM_CFG: case GT_SDRAM_OPMODE: case GT_SDRAM_BM: case GT_SDRAM_ADDRDECODE: val = s->regs[saddr]; break; /* SDRAM Parameters */ case GT_SDRAM_B0: case GT_SDRAM_B1: case GT_SDRAM_B2: case GT_SDRAM_B3: /* We don't simulate electrical parameters of the SDRAM. Just return the last written value. */ val = s->regs[saddr]; break; /* Device Parameters */ case GT_DEV_B0: case GT_DEV_B1: case GT_DEV_B2: case GT_DEV_B3: case GT_DEV_BOOT: val = s->regs[saddr]; break; /* DMA Record */ case GT_DMA0_CNT: case GT_DMA1_CNT: case GT_DMA2_CNT: case GT_DMA3_CNT: case GT_DMA0_SA: case GT_DMA1_SA: case GT_DMA2_SA: case GT_DMA3_SA: case GT_DMA0_DA: case GT_DMA1_DA: case GT_DMA2_DA: case GT_DMA3_DA: case GT_DMA0_NEXT: case GT_DMA1_NEXT: case GT_DMA2_NEXT: case GT_DMA3_NEXT: case GT_DMA0_CUR: case GT_DMA1_CUR: case GT_DMA2_CUR: case GT_DMA3_CUR: val = s->regs[saddr]; break; /* DMA Channel Control */ case GT_DMA0_CTRL: case GT_DMA1_CTRL: case GT_DMA2_CTRL: case GT_DMA3_CTRL: val = s->regs[saddr]; break; /* DMA Arbiter */ case GT_DMA_ARB: val = s->regs[saddr]; break; /* Timer/Counter */ case GT_TC0: case GT_TC1: case GT_TC2: case GT_TC3: case GT_TC_CONTROL: val = s->regs[saddr]; break; /* PCI Internal */ case GT_PCI0_CFGADDR: val = phb->config_reg; break; case GT_PCI0_CFGDATA: if (!(phb->config_reg & (1 << 31))) { val = 0xffffffff; } else { val = pci_data_read(phb->bus, phb->config_reg, 4); } if (!(s->regs[GT_PCI0_CMD] & 1) && (phb->config_reg & 0x00fff800)) { val = bswap32(val); } break; case GT_PCI0_CMD: case GT_PCI0_TOR: case GT_PCI0_BS_SCS10: case GT_PCI0_BS_SCS32: case GT_PCI0_BS_CS20: case GT_PCI0_BS_CS3BT: case GT_PCI1_IACK: case GT_PCI0_BARE: case GT_PCI0_PREFMBR: case GT_PCI0_SCS10_BAR: case GT_PCI0_SCS32_BAR: case GT_PCI0_CS20_BAR: case GT_PCI0_CS3BT_BAR: case GT_PCI0_SSCS10_BAR: case GT_PCI0_SSCS32_BAR: case GT_PCI0_SCS3BT_BAR: case GT_PCI1_CMD: case GT_PCI1_TOR: case GT_PCI1_BS_SCS10: case GT_PCI1_BS_SCS32: case GT_PCI1_BS_CS20: case GT_PCI1_BS_CS3BT: case GT_PCI1_BARE: case GT_PCI1_PREFMBR: case GT_PCI1_SCS10_BAR: case GT_PCI1_SCS32_BAR: case GT_PCI1_CS20_BAR: case GT_PCI1_CS3BT_BAR: case GT_PCI1_SSCS10_BAR: case GT_PCI1_SSCS32_BAR: case GT_PCI1_SCS3BT_BAR: case GT_PCI1_CFGADDR: case GT_PCI1_CFGDATA: val = s->regs[saddr]; break; /* Interrupts */ case GT_INTRCAUSE: val = s->regs[saddr]; DPRINTF(\"INTRCAUSE %x\\n\", val); break; case GT_INTRMASK: val = s->regs[saddr]; DPRINTF(\"INTRMASK %x\\n\", val); break; case GT_PCI0_ICMASK: val = s->regs[saddr]; DPRINTF(\"ICMASK %x\\n\", val); break; case GT_PCI0_SERR0MASK: val = s->regs[saddr]; DPRINTF(\"SERR0MASK %x\\n\", val); break; /* Reserved when only PCI_0 is configured. */ case GT_HINTRCAUSE: case GT_CPU_INTSEL: case GT_PCI0_INTSEL: case GT_HINTRMASK: case GT_PCI0_HICMASK: case GT_PCI1_SERR1MASK: val = s->regs[saddr]; break; default: val = s->regs[saddr]; DPRINTF (\"Bad register offset 0x%x\\n\", (int)addr); break; } if (!(s->regs[GT_CPU] & 0x00001000)) val = bswap32(val); return val; }"} {"target": 0, "idx": 20070, "func": "int qemu_opt_set(QemuOpts *opts, const char *name, const char *value) { QemuOpt *opt; opt = qemu_opt_find(opts, name); if (!opt) { QemuOptDesc *desc = opts->list->desc; int i; for (i = 0; desc[i].name != NULL; i++) { if (strcmp(desc[i].name, name) == 0) { break; } } if (desc[i].name == NULL) { if (i == 0) { /* empty list -> allow any */; } else { fprintf(stderr, \"option \\\"%s\\\" is not valid for %s\\n\", name, opts->list->name); return -1; } } opt = qemu_mallocz(sizeof(*opt)); opt->name = qemu_strdup(name); opt->opts = opts; TAILQ_INSERT_TAIL(&opts->head, opt, next); if (desc[i].name != NULL) { opt->desc = desc+i; } } qemu_free((/* !const */ char*)opt->str); opt->str = NULL; if (value) { opt->str = qemu_strdup(value); } if (qemu_opt_parse(opt) < 0) { fprintf(stderr, \"Failed to parse \\\"%s\\\" for \\\"%s.%s\\\"\\n\", opt->str, opts->list->name, opt->name); qemu_opt_del(opt); return -1; } return 0; }"} {"target": 0, "idx": 20086, "func": "static int scsi_initfn(SCSIDevice *dev) { SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); Error *err = NULL; if (!s->qdev.conf.bs) { error_report(\"drive property not set\"); return -1; } if (!(s->features & (1 << SCSI_DISK_F_REMOVABLE)) && !bdrv_is_inserted(s->qdev.conf.bs)) { error_report(\"Device needs media, but drive is empty\"); return -1; } blkconf_serial(&s->qdev.conf, &s->serial); if (dev->type == TYPE_DISK) { blkconf_geometry(&dev->conf, NULL, 65535, 255, 255, &err); if (err) { error_report(\"%s\", error_get_pretty(err)); error_free(err); return -1; } } if (s->qdev.conf.discard_granularity == -1) { s->qdev.conf.discard_granularity = MAX(s->qdev.conf.logical_block_size, DEFAULT_DISCARD_GRANULARITY); } if (!s->version) { s->version = g_strdup(qemu_get_version()); } if (!s->vendor) { s->vendor = g_strdup(\"QEMU\"); } if (bdrv_is_sg(s->qdev.conf.bs)) { error_report(\"unwanted /dev/sg*\"); return -1; } if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) && !(s->features & (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS))) { bdrv_set_dev_ops(s->qdev.conf.bs, &scsi_disk_removable_block_ops, s); } else { bdrv_set_dev_ops(s->qdev.conf.bs, &scsi_disk_block_ops, s); } bdrv_set_guest_block_size(s->qdev.conf.bs, s->qdev.blocksize); bdrv_iostatus_enable(s->qdev.conf.bs); add_boot_device_path(s->qdev.conf.bootindex, &dev->qdev, NULL); return 0; }"} {"target": 0, "idx": 20095, "func": "static void ehci_update_frindex(EHCIState *ehci, int frames) { int i; if (!ehci_enabled(ehci)) { return; } for (i = 0; i < frames; i++) { ehci->frindex += 8; if (ehci->frindex == 0x00002000) { ehci_raise_irq(ehci, USBSTS_FLR); } if (ehci->frindex == 0x00004000) { ehci_raise_irq(ehci, USBSTS_FLR); ehci->frindex = 0; if (ehci->usbsts_frindex >= 0x00004000) { ehci->usbsts_frindex -= 0x00004000; } else { ehci->usbsts_frindex = 0; } } } }"} {"target": 0, "idx": 20102, "func": "static const MXFCodecUL *mxf_get_codec_ul(const MXFCodecUL *uls, UID *uid) { while (uls->id != CODEC_ID_NONE) { if(mxf_match_uid(uls->uid, *uid, 16)) break; uls++; } return uls; }"} {"target": 0, "idx": 20105, "func": "static void pty_chr_state(CharDriverState *chr, int connected) { PtyCharDriver *s = chr->opaque; if (!connected) { if (s->fd_tag) { g_source_remove(s->fd_tag); s->fd_tag = 0; } s->connected = 0; s->polling = 0; /* (re-)connect poll interval for idle guests: once per second. * We check more frequently in case the guests sends data to * the virtual device linked to our pty. */ pty_chr_rearm_timer(chr, 1000); } else { if (!s->connected) qemu_chr_be_generic_open(chr); s->connected = 1; } }"} {"target": 1, "idx": 20109, "func": "int h263_decode_picture_header(MpegEncContext *s) { int format, width, height; /* picture header */ if (get_bits(&s->gb, 22) != 0x20) return -1; skip_bits(&s->gb, 8); /* picture timestamp */ if (get_bits1(&s->gb) != 1) return -1; /* marker */ if (get_bits1(&s->gb) != 0) return -1; /* h263 id */ skip_bits1(&s->gb); /* split screen off */ skip_bits1(&s->gb); /* camera off */ skip_bits1(&s->gb); /* freeze picture release off */ format = get_bits(&s->gb, 3); if (format != 7) { s->h263_plus = 0; /* H.263v1 */ width = h263_format[format][0]; height = h263_format[format][1]; if (!width) return -1; s->pict_type = I_TYPE + get_bits1(&s->gb); s->unrestricted_mv = get_bits1(&s->gb); s->h263_long_vectors = s->unrestricted_mv; if (get_bits1(&s->gb) != 0) return -1; /* SAC: off */ if (get_bits1(&s->gb) != 0) return -1; /* advanced prediction mode: off */ if (get_bits1(&s->gb) != 0) return -1; /* not PB frame */ s->qscale = get_bits(&s->gb, 5); skip_bits1(&s->gb); /* Continuous Presence Multipoint mode: off */ } else { s->h263_plus = 1; /* H.263v2 */ /* OPPTYPE */ if (get_bits(&s->gb, 3) != 1) /* Update Full Extended PTYPE */ return -1; format = get_bits(&s->gb, 3); skip_bits(&s->gb,1); /* Custom PCF */ umvplus_dec = get_bits(&s->gb, 1); /* Unrestricted Motion Vector */ skip_bits(&s->gb, 10); skip_bits(&s->gb, 3); /* Reserved */ /* MPPTYPE */ s->pict_type = get_bits(&s->gb, 3) + 1; if (s->pict_type != I_TYPE && s->pict_type != P_TYPE) return -1; skip_bits(&s->gb, 7); /* Get the picture dimensions */ if (format == 6) { /* Custom Picture Format (CPFMT) */ skip_bits(&s->gb, 4); /* aspect ratio */ width = (get_bits(&s->gb, 9) + 1) * 4; skip_bits1(&s->gb); height = get_bits(&s->gb, 9) * 4; #ifdef DEBUG fprintf(stderr,\"\\nH.263+ Custom picture: %dx%d\\n\",width,height); #endif } else { width = h263_format[format][0]; height = h263_format[format][1]; } if ((width == 0) || (height == 0)) return -1; if (umvplus_dec) { skip_bits1(&s->gb); /* Unlimited Unrestricted Motion Vectors Indicator (UUI) */ } s->qscale = get_bits(&s->gb, 5); } /* PEI */ while (get_bits1(&s->gb) != 0) { skip_bits(&s->gb, 8); } s->f_code = 1; s->width = width; s->height = height; return 0; }"} {"target": 1, "idx": 20110, "func": "static void stellaris_init(const char *kernel_filename, const char *cpu_model, DisplayState *ds, stellaris_board_info *board) { static const int uart_irq[] = {5, 6, 33, 34}; static const int timer_irq[] = {19, 21, 23, 35}; static const uint32_t gpio_addr[7] = { 0x40004000, 0x40005000, 0x40006000, 0x40007000, 0x40024000, 0x40025000, 0x40026000}; static const int gpio_irq[7] = {0, 1, 2, 3, 4, 30, 31}; qemu_irq *pic; qemu_irq *gpio_in[5]; qemu_irq *gpio_out[5]; qemu_irq adc; int sram_size; int flash_size; i2c_bus *i2c; int i; flash_size = ((board->dc0 & 0xffff) + 1) << 1; sram_size = (board->dc0 >> 18) + 1; pic = armv7m_init(flash_size, sram_size, kernel_filename, cpu_model); if (board->dc1 & (1 << 16)) { adc = stellaris_adc_init(0x40038000, pic[14]); } else { adc = NULL; } for (i = 0; i < 4; i++) { if (board->dc2 & (0x10000 << i)) { stellaris_gptm_init(0x40030000 + i * 0x1000, pic[timer_irq[i]], adc); } } stellaris_sys_init(0x400fe000, pic[28], board, nd_table[0].macaddr); for (i = 0; i < 7; i++) { if (board->dc4 & (1 << i)) { gpio_in[i] = pl061_init(gpio_addr[i], pic[gpio_irq[i]], &gpio_out[i]); } } if (board->dc2 & (1 << 12)) { i2c = i2c_init_bus(); stellaris_i2c_init(0x40020000, pic[8], i2c); if (board->peripherals & BP_OLED_I2C) { ssd0303_init(ds, i2c, 0x3d); } } for (i = 0; i < 4; i++) { if (board->dc2 & (1 << i)) { pl011_init(0x4000c000 + i * 0x1000, pic[uart_irq[i]], serial_hds[i], PL011_LUMINARY); } } if (board->dc2 & (1 << 4)) { if (board->peripherals & BP_OLED_SSI) { void * oled; void * sd; void *ssi_bus; int index; oled = ssd0323_init(ds, &gpio_out[GPIO_C][7]); index = drive_get_index(IF_SD, 0, 0); sd = ssi_sd_init(drives_table[index].bdrv); ssi_bus = stellaris_ssi_bus_init(&gpio_out[GPIO_D][0], ssi_sd_xfer, sd, ssd0323_xfer_ssi, oled); pl022_init(0x40008000, pic[7], stellaris_ssi_bus_xfer, ssi_bus); /* Make sure the select pin is high. */ qemu_irq_raise(gpio_out[GPIO_D][0]); } else { pl022_init(0x40008000, pic[7], NULL, NULL); } } if (board->dc4 & (1 << 28)) { /* FIXME: Obey network model. */ stellaris_enet_init(&nd_table[0], 0x40048000, pic[42]); } if (board->peripherals & BP_GAMEPAD) { qemu_irq gpad_irq[5]; static const int gpad_keycode[5] = { 0xc8, 0xd0, 0xcb, 0xcd, 0x1d }; gpad_irq[0] = qemu_irq_invert(gpio_in[GPIO_E][0]); /* up */ gpad_irq[1] = qemu_irq_invert(gpio_in[GPIO_E][1]); /* down */ gpad_irq[2] = qemu_irq_invert(gpio_in[GPIO_E][2]); /* left */ gpad_irq[3] = qemu_irq_invert(gpio_in[GPIO_E][3]); /* right */ gpad_irq[4] = qemu_irq_invert(gpio_in[GPIO_F][1]); /* select */ stellaris_gamepad_init(5, gpad_irq, gpad_keycode); } }"} {"target": 1, "idx": 20129, "func": "static void do_video_out(AVFormatContext *s, OutputStream *ost, InputStream *ist, AVFrame *in_picture, int *frame_size, float quality) { int nb_frames, i, ret, av_unused resample_changed; AVFrame *final_picture, *formatted_picture; AVCodecContext *enc, *dec; double sync_ipts; enc = ost->st->codec; dec = ist->st->codec; sync_ipts = get_sync_ipts(ost) / av_q2d(enc->time_base); /* by default, we output a single frame */ nb_frames = 1; *frame_size = 0; if(video_sync_method){ double vdelta = sync_ipts - ost->sync_opts; //FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c if (vdelta < -1.1) nb_frames = 0; else if (video_sync_method == 2 || (video_sync_method<0 && (s->oformat->flags & AVFMT_VARIABLE_FPS))){ if(vdelta<=-0.6){ nb_frames=0; }else if(vdelta>0.6) ost->sync_opts= lrintf(sync_ipts); }else if (vdelta > 1.1) nb_frames = lrintf(vdelta); //fprintf(stderr, \"vdelta:%f, ost->sync_opts:%\"PRId64\", ost->sync_ipts:%f nb_frames:%d\\n\", vdelta, ost->sync_opts, get_sync_ipts(ost), nb_frames); if (nb_frames == 0){ ++nb_frames_drop; if (verbose>2) fprintf(stderr, \"*** drop!\\n\"); }else if (nb_frames > 1) { nb_frames_dup += nb_frames - 1; if (verbose>2) fprintf(stderr, \"*** %d dup!\\n\", nb_frames-1); } }else ost->sync_opts= lrintf(sync_ipts); nb_frames= FFMIN(nb_frames, max_frames[AVMEDIA_TYPE_VIDEO] - ost->frame_number); if (nb_frames <= 0) return; formatted_picture = in_picture; final_picture = formatted_picture; #if !CONFIG_AVFILTER resample_changed = ost->resample_width != dec->width || ost->resample_height != dec->height || ost->resample_pix_fmt != dec->pix_fmt; if (resample_changed) { av_log(NULL, AV_LOG_INFO, \"Input stream #%d.%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\\n\", ist->file_index, ist->st->index, ost->resample_width, ost->resample_height, av_get_pix_fmt_name(ost->resample_pix_fmt), dec->width , dec->height , av_get_pix_fmt_name(dec->pix_fmt)); ost->resample_width = dec->width; ost->resample_height = dec->height; ost->resample_pix_fmt = dec->pix_fmt; } ost->video_resample = dec->width != enc->width || dec->height != enc->height || dec->pix_fmt != enc->pix_fmt; if (ost->video_resample) { final_picture = &ost->resample_frame; if (!ost->img_resample_ctx || resample_changed) { /* initialize the destination picture */ if (!ost->resample_frame.data[0]) { avcodec_get_frame_defaults(&ost->resample_frame); if (avpicture_alloc((AVPicture *)&ost->resample_frame, enc->pix_fmt, enc->width, enc->height)) { fprintf(stderr, \"Cannot allocate temp picture, check pix fmt\\n\"); exit_program(1); } } /* initialize a new scaler context */ sws_freeContext(ost->img_resample_ctx); ost->img_resample_ctx = sws_getContext(dec->width, dec->height, dec->pix_fmt, enc->width, enc->height, enc->pix_fmt, ost->sws_flags, NULL, NULL, NULL); if (ost->img_resample_ctx == NULL) { fprintf(stderr, \"Cannot get resampling context\\n\"); exit_program(1); } } sws_scale(ost->img_resample_ctx, formatted_picture->data, formatted_picture->linesize, 0, ost->resample_height, final_picture->data, final_picture->linesize); } #else if (resample_changed) { avfilter_graph_free(&ost->graph); if (configure_video_filters(ist, ost)) { fprintf(stderr, \"Error reinitialising filters!\\n\"); exit_program(1); } } #endif if (resample_changed) { ost->resample_width = dec->width; ost->resample_height = dec->height; ost->resample_pix_fmt = dec->pix_fmt; } /* duplicates frame if needed */ for(i=0;iindex; if (s->oformat->flags & AVFMT_RAWPICTURE) { /* raw pictures are written as AVPicture structure to avoid any copies. We support temorarily the older method. */ AVFrame* old_frame = enc->coded_frame; enc->coded_frame = dec->coded_frame; //FIXME/XXX remove this hack pkt.data= (uint8_t *)final_picture; pkt.size= sizeof(AVPicture); pkt.pts= av_rescale_q(ost->sync_opts, enc->time_base, ost->st->time_base); pkt.flags |= AV_PKT_FLAG_KEY; write_frame(s, &pkt, ost->st->codec, ost->bitstream_filters); enc->coded_frame = old_frame; } else { AVFrame big_picture; big_picture= *final_picture; /* better than nothing: use input picture interlaced settings */ big_picture.interlaced_frame = in_picture->interlaced_frame; if (ost->st->codec->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME)) { if(top_field_first == -1) big_picture.top_field_first = in_picture->top_field_first; else big_picture.top_field_first = top_field_first; } /* handles sameq here. This is not correct because it may not be a global option */ big_picture.quality = quality; if(!me_threshold) big_picture.pict_type = 0; // big_picture.pts = AV_NOPTS_VALUE; big_picture.pts= ost->sync_opts; // big_picture.pts= av_rescale(ost->sync_opts, AV_TIME_BASE*(int64_t)enc->time_base.num, enc->time_base.den); //av_log(NULL, AV_LOG_DEBUG, \"%\"PRId64\" -> encoder\\n\", ost->sync_opts); if (ost->forced_kf_index < ost->forced_kf_count && big_picture.pts >= ost->forced_kf_pts[ost->forced_kf_index]) { big_picture.pict_type = AV_PICTURE_TYPE_I; ost->forced_kf_index++; } ret = avcodec_encode_video(enc, bit_buffer, bit_buffer_size, &big_picture); if (ret < 0) { fprintf(stderr, \"Video encoding failed\\n\"); exit_program(1); } if(ret>0){ pkt.data= bit_buffer; pkt.size= ret; if(enc->coded_frame->pts != AV_NOPTS_VALUE) pkt.pts= av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base); /*av_log(NULL, AV_LOG_DEBUG, \"encoder -> %\"PRId64\"/%\"PRId64\"\\n\", pkt.pts != AV_NOPTS_VALUE ? av_rescale(pkt.pts, enc->time_base.den, AV_TIME_BASE*(int64_t)enc->time_base.num) : -1, pkt.dts != AV_NOPTS_VALUE ? av_rescale(pkt.dts, enc->time_base.den, AV_TIME_BASE*(int64_t)enc->time_base.num) : -1);*/ if(enc->coded_frame->key_frame) pkt.flags |= AV_PKT_FLAG_KEY; write_frame(s, &pkt, ost->st->codec, ost->bitstream_filters); *frame_size = ret; video_size += ret; //fprintf(stderr,\"\\nFrame: %3d size: %5d type: %d\", // enc->frame_number-1, ret, enc->pict_type); /* if two pass, output log */ if (ost->logfile && enc->stats_out) { fprintf(ost->logfile, \"%s\", enc->stats_out); } } } ost->sync_opts++; ost->frame_number++; } }"} {"target": 1, "idx": 20131, "func": "static void continue_after_map_failure(void *opaque) { DMAAIOCB *dbs = (DMAAIOCB *)opaque; dbs->bh = qemu_bh_new(reschedule_dma, dbs); qemu_bh_schedule(dbs->bh); }"} {"target": 0, "idx": 20148, "func": "static void ehci_mem_writel(void *ptr, target_phys_addr_t addr, uint32_t val) { EHCIState *s = ptr; uint32_t *mmio = (uint32_t *)(&s->mmio[addr]); uint32_t old = *mmio; int i; trace_usb_ehci_mmio_writel(addr, addr2str(addr), val); /* Only aligned reads are allowed on OHCI */ if (addr & 3) { fprintf(stderr, \"usb-ehci: Mis-aligned write to addr 0x\" TARGET_FMT_plx \"\\n\", addr); return; } if (addr >= PORTSC && addr < PORTSC + 4 * NB_PORTS) { handle_port_status_write(s, (addr-PORTSC)/4, val); trace_usb_ehci_mmio_change(addr, addr2str(addr), *mmio, old); return; } if (addr < OPREGBASE) { fprintf(stderr, \"usb-ehci: write attempt to read-only register\" TARGET_FMT_plx \"\\n\", addr); return; } /* Do any register specific pre-write processing here. */ switch(addr) { case USBCMD: if (val & USBCMD_HCRESET) { ehci_reset(s); val = s->usbcmd; break; } if (((USBCMD_RUNSTOP | USBCMD_PSE | USBCMD_ASE) & val) != ((USBCMD_RUNSTOP | USBCMD_PSE | USBCMD_ASE) & s->usbcmd)) { if (!ehci_enabled(s)) { qemu_mod_timer(s->frame_timer, qemu_get_clock_ns(vm_clock)); SET_LAST_RUN_CLOCK(s); } ehci_update_halt(s); } /* not supporting dynamic frame list size at the moment */ if ((val & USBCMD_FLS) && !(s->usbcmd & USBCMD_FLS)) { fprintf(stderr, \"attempt to set frame list size -- value %d\\n\", val & USBCMD_FLS); val &= ~USBCMD_FLS; } break; case USBSTS: val &= USBSTS_RO_MASK; // bits 6 through 31 are RO ehci_clear_usbsts(s, val); // bits 0 through 5 are R/WC val = s->usbsts; ehci_set_interrupt(s, 0); break; case USBINTR: val &= USBINTR_MASK; break; case FRINDEX: val &= 0x00003ff8; /* frindex is 14bits and always a multiple of 8 */ break; case CONFIGFLAG: val &= 0x1; if (val) { for(i = 0; i < NB_PORTS; i++) handle_port_owner_write(s, i, 0); } break; case PERIODICLISTBASE: if (ehci_periodic_enabled(s)) { fprintf(stderr, \"ehci: PERIODIC list base register set while periodic schedule\\n\" \" is enabled and HC is enabled\\n\"); } break; case ASYNCLISTADDR: if (ehci_async_enabled(s)) { fprintf(stderr, \"ehci: ASYNC list address register set while async schedule\\n\" \" is enabled and HC is enabled\\n\"); } break; } *mmio = val; trace_usb_ehci_mmio_change(addr, addr2str(addr), *mmio, old); }"} {"target": 0, "idx": 20151, "func": "sPAPRDRConnector *spapr_drc_by_index(uint32_t index) { Object *obj; char name[256]; snprintf(name, sizeof(name), \"%s/%x\", DRC_CONTAINER_PATH, index); obj = object_resolve_path(name, NULL); return !obj ? NULL : SPAPR_DR_CONNECTOR(obj); }"} {"target": 0, "idx": 20158, "func": "uint64_t helper_fnmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3) { CPU_DoubleU farg1, farg2, farg3; farg1.ll = arg1; farg2.ll = arg2; farg3.ll = arg3; if (unlikely(float64_is_signaling_nan(farg1.d) || float64_is_signaling_nan(farg2.d) || float64_is_signaling_nan(farg3.d))) { /* sNaN operation */ farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN); } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) || (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) { /* Multiplication of zero by infinity */ farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ); } else { #if USE_PRECISE_EMULATION #ifdef FLOAT128 /* This is the way the PowerPC specification defines it */ float128 ft0_128, ft1_128; ft0_128 = float64_to_float128(farg1.d, &env->fp_status); ft1_128 = float64_to_float128(farg2.d, &env->fp_status); ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status); if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) && float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) { /* Magnitude subtraction of infinities */ farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI); } else { ft1_128 = float64_to_float128(farg3.d, &env->fp_status); ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status); farg1.d = float128_to_float64(ft0_128, &env->fp_status); } #else /* This is OK on x86 hosts */ farg1.d = (farg1.d * farg2.d) + farg3.d; #endif #else farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status); farg1.d = float64_add(farg1.d, farg3.d, &env->fp_status); #endif if (likely(!float64_is_nan(farg1.d))) farg1.d = float64_chs(farg1.d); } return farg1.ll; }"} {"target": 0, "idx": 20189, "func": "static int cow_probe(const uint8_t *buf, int buf_size, const char *filename) { const struct cow_header_v2 *cow_header = (const void *)buf; if (buf_size >= sizeof(struct cow_header_v2) && be32_to_cpu(cow_header->magic) == COW_MAGIC && be32_to_cpu(cow_header->version) == COW_VERSION) return 100; else return 0; }"} {"target": 0, "idx": 20197, "func": "uint64_t ram_bytes_remaining(void) { return ram_save_remaining() * TARGET_PAGE_SIZE; }"} {"target": 0, "idx": 20199, "func": "static int decode_ics(AACContext * ac, SingleChannelElement * sce, GetBitContext * gb, int common_window, int scale_flag) { Pulse pulse; TemporalNoiseShaping * tns = &sce->tns; IndividualChannelStream * ics = &sce->ics; float * out = sce->coeffs; int global_gain, pulse_present = 0; /* This assignment is to silence a GCC warning about the variable being used * uninitialized when in fact it always is. */ pulse.num_pulse = 0; global_gain = get_bits(gb, 8); if (!common_window && !scale_flag) { if (decode_ics_info(ac, ics, gb, 0) < 0) return -1; } if (decode_band_types(ac, sce->band_type, sce->band_type_run_end, gb, ics) < 0) return -1; if (decode_scalefactors(ac, sce->sf, gb, global_gain, ics, sce->band_type, sce->band_type_run_end) < 0) return -1; pulse_present = 0; if (!scale_flag) { if ((pulse_present = get_bits1(gb))) { if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) { av_log(ac->avccontext, AV_LOG_ERROR, \"Pulse tool not allowed in eight short sequence.\\n\"); return -1; } decode_pulses(&pulse, gb, ics->swb_offset); } if ((tns->present = get_bits1(gb)) && decode_tns(ac, tns, gb, ics)) return -1; if (get_bits1(gb)) { av_log_missing_feature(ac->avccontext, \"SSR\", 1); return -1; } } if (decode_spectrum_and_dequant(ac, out, gb, sce->sf, pulse_present, &pulse, ics, sce->band_type) < 0) return -1; return 0; }"} {"target": 0, "idx": 20200, "func": "static ssize_t buffered_flush(QEMUFileBuffered *s) { size_t offset = 0; ssize_t ret = 0; DPRINTF(\"flushing %zu byte(s) of data\\n\", s->buffer_size); while (s->bytes_xfer < s->xfer_limit && offset < s->buffer_size) { size_t to_send = MIN(s->buffer_size - offset, s->xfer_limit - s->bytes_xfer); ret = migrate_fd_put_buffer(s->migration_state, s->buffer + offset, to_send); if (ret <= 0) { DPRINTF(\"error flushing data, %zd\\n\", ret); break; } else { DPRINTF(\"flushed %zd byte(s)\\n\", ret); offset += ret; s->bytes_xfer += ret; } } DPRINTF(\"flushed %zu of %zu byte(s)\\n\", offset, s->buffer_size); memmove(s->buffer, s->buffer + offset, s->buffer_size - offset); s->buffer_size -= offset; if (ret < 0) { return ret; } return offset; }"} {"target": 0, "idx": 20213, "func": "void pci_qdev_register(PCIDeviceInfo *info) { info->qdev.init = pci_qdev_init; info->qdev.bus_type = BUS_TYPE_PCI; qdev_register(&info->qdev); }"} {"target": 0, "idx": 20233, "func": "static void vhost_net_stop_one(struct vhost_net *net, VirtIODevice *dev) { struct vhost_vring_file file = { .fd = -1 }; if (!net->dev.started) { return; } if (net->nc->info->type == NET_CLIENT_OPTIONS_KIND_TAP) { for (file.index = 0; file.index < net->dev.nvqs; ++file.index) { const VhostOps *vhost_ops = net->dev.vhost_ops; int r = vhost_ops->vhost_call(&net->dev, VHOST_NET_SET_BACKEND, &file); assert(r >= 0); } } if (net->nc->info->poll) { net->nc->info->poll(net->nc, true); } vhost_dev_stop(&net->dev, dev); vhost_dev_disable_notifiers(&net->dev, dev); }"} {"target": 0, "idx": 20237, "func": "static int rtsp_read_header(AVFormatContext *s, AVFormatParameters *ap) { RTSPState *rt = s->priv_data; char host[1024], path[1024], tcpname[1024], cmd[2048]; URLContext *rtsp_hd; int port, i, ret, err; RTSPHeader reply1, *reply = &reply1; unsigned char *content = NULL; AVStream *st; RTSPStream *rtsp_st; int protocol_mask; rtsp_abort_req = 0; /* extract hostname and port */ url_split(NULL, 0, host, sizeof(host), &port, path, sizeof(path), s->filename); if (port < 0) port = RTSP_DEFAULT_PORT; /* open the tcp connexion */ snprintf(tcpname, sizeof(tcpname), \"tcp://%s:%d\", host, port); if (url_open(&rtsp_hd, tcpname, URL_RDWR) < 0) return AVERROR_IO; rt->rtsp_hd = rtsp_hd; rt->seq = 0; /* describe the stream */ snprintf(cmd, sizeof(cmd), \"DESCRIBE %s RTSP/1.0\\r\\n\" \"Accept: application/sdp\\r\\n\", s->filename); rtsp_send_cmd(s, cmd, reply, &content); if (!content) { err = AVERROR_INVALIDDATA; goto fail; } if (reply->status_code != RTSP_STATUS_OK) { err = AVERROR_INVALIDDATA; goto fail; } /* now we got the SDP description, we parse it */ ret = sdp_parse(s, (const char *)content); av_freep(&content); if (ret < 0) { err = AVERROR_INVALIDDATA; goto fail; } protocol_mask = rtsp_default_protocols; /* for each stream, make the setup request */ /* XXX: we assume the same server is used for the control of each RTSP stream */ for(i=0;inb_streams;i++) { char transport[2048]; AVInputFormat *fmt; st = s->streams[i]; rtsp_st = st->priv_data; /* compute available transports */ transport[0] = '\\0'; /* RTP/UDP */ if (protocol_mask & (1 << RTSP_PROTOCOL_RTP_UDP)) { char buf[256]; int j; /* first try in specified port range */ if (rtsp_rtp_port_min != 0) { for(j=rtsp_rtp_port_min;j<=rtsp_rtp_port_max;j++) { snprintf(buf, sizeof(buf), \"rtp://?localport=%d\", j); if (!av_open_input_file(&rtsp_st->ic, buf, &rtp_demux, 0, NULL)) goto rtp_opened; } } /* then try on any port */ if (av_open_input_file(&rtsp_st->ic, \"rtp://\", &rtp_demux, 0, NULL) < 0) { err = AVERROR_INVALIDDATA; goto fail; } rtp_opened: port = rtp_get_local_port(url_fileno(&rtsp_st->ic->pb)); if (transport[0] != '\\0') pstrcat(transport, sizeof(transport), \",\"); snprintf(transport + strlen(transport), sizeof(transport) - strlen(transport) - 1, \"RTP/AVP/UDP;unicast;client_port=%d-%d\", port, port + 1); } /* RTP/TCP */ if (protocol_mask & (1 << RTSP_PROTOCOL_RTP_TCP)) { if (transport[0] != '\\0') pstrcat(transport, sizeof(transport), \",\"); snprintf(transport + strlen(transport), sizeof(transport) - strlen(transport) - 1, \"RTP/AVP/TCP\"); } if (protocol_mask & (1 << RTSP_PROTOCOL_RTP_UDP_MULTICAST)) { if (transport[0] != '\\0') pstrcat(transport, sizeof(transport), \",\"); snprintf(transport + strlen(transport), sizeof(transport) - strlen(transport) - 1, \"RTP/AVP/UDP;multicast\"); } snprintf(cmd, sizeof(cmd), \"SETUP %s RTSP/1.0\\r\\n\" \"Transport: %s\\r\\n\", rtsp_st->control_url, transport); rtsp_send_cmd(s, cmd, reply, NULL); if (reply->status_code != RTSP_STATUS_OK || reply->nb_transports != 1) { err = AVERROR_INVALIDDATA; goto fail; } /* XXX: same protocol for all streams is required */ if (i > 0) { if (reply->transports[0].protocol != rt->protocol) { err = AVERROR_INVALIDDATA; goto fail; } } else { rt->protocol = reply->transports[0].protocol; } /* close RTP connection if not choosen */ if (reply->transports[0].protocol != RTSP_PROTOCOL_RTP_UDP && (protocol_mask & (1 << RTSP_PROTOCOL_RTP_UDP))) { av_close_input_file(rtsp_st->ic); rtsp_st->ic = NULL; } switch(reply->transports[0].protocol) { case RTSP_PROTOCOL_RTP_TCP: fmt = &rtp_demux; if (av_open_input_file(&rtsp_st->ic, \"null\", fmt, 0, NULL) < 0) { err = AVERROR_INVALIDDATA; goto fail; } rtsp_st->interleaved_min = reply->transports[0].interleaved_min; rtsp_st->interleaved_max = reply->transports[0].interleaved_max; break; case RTSP_PROTOCOL_RTP_UDP: { char url[1024]; /* XXX: also use address if specified */ snprintf(url, sizeof(url), \"rtp://%s:%d\", host, reply->transports[0].server_port_min); if (rtp_set_remote_url(url_fileno(&rtsp_st->ic->pb), url) < 0) { err = AVERROR_INVALIDDATA; goto fail; } } break; case RTSP_PROTOCOL_RTP_UDP_MULTICAST: { char url[1024]; int ttl; fmt = &rtp_demux; ttl = reply->transports[0].ttl; if (!ttl) ttl = 16; snprintf(url, sizeof(url), \"rtp://%s:%d?multicast=1&ttl=%d\", host, reply->transports[0].server_port_min, ttl); if (av_open_input_file(&rtsp_st->ic, url, fmt, 0, NULL) < 0) { err = AVERROR_INVALIDDATA; goto fail; } } break; } } /* use callback if available to extend setup */ if (ff_rtsp_callback) { if (ff_rtsp_callback(RTSP_ACTION_CLIENT_SETUP, rt->session_id, NULL, 0, rt->last_reply) < 0) { err = AVERROR_INVALIDDATA; goto fail; } } /* start playing */ snprintf(cmd, sizeof(cmd), \"PLAY %s RTSP/1.0\\r\\n\" \"Range: npt=0-\\r\\n\", s->filename); rtsp_send_cmd(s, cmd, reply, NULL); if (reply->status_code != RTSP_STATUS_OK) { err = AVERROR_INVALIDDATA; goto fail; } #if 0 /* open TCP with bufferized input */ if (rt->protocol == RTSP_PROTOCOL_RTP_TCP) { if (url_fdopen(&rt->rtsp_gb, rt->rtsp_hd) < 0) { err = AVERROR_NOMEM; goto fail; } } #endif return 0; fail: for(i=0;inb_streams;i++) { st = s->streams[i]; rtsp_st = st->priv_data; if (rtsp_st) { if (rtsp_st->ic) av_close_input_file(rtsp_st->ic); } av_free(rtsp_st); } av_freep(&content); url_close(rt->rtsp_hd); return err; }"} {"target": 0, "idx": 20238, "func": "int cpu_s390x_handle_mmu_fault (CPUState *env, target_ulong address, int rw, int mmu_idx, int is_softmmu) { target_ulong phys; int prot; /* XXX: implement mmu */ phys = address; prot = PAGE_READ | PAGE_WRITE; return tlb_set_page(env, address & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK, prot, mmu_idx, is_softmmu); }"} {"target": 0, "idx": 20248, "func": "static int inc_refcounts(BlockDriverState *bs, BdrvCheckResult *res, void **refcount_table, int64_t *refcount_table_size, int64_t offset, int64_t size) { BDRVQcow2State *s = bs->opaque; uint64_t start, last, cluster_offset, k, refcount; int ret; if (size <= 0) { return 0; } start = start_of_cluster(s, offset); last = start_of_cluster(s, offset + size - 1); for(cluster_offset = start; cluster_offset <= last; cluster_offset += s->cluster_size) { k = cluster_offset >> s->cluster_bits; if (k >= *refcount_table_size) { ret = realloc_refcount_array(s, refcount_table, refcount_table_size, k + 1); if (ret < 0) { res->check_errors++; return ret; } } refcount = s->get_refcount(*refcount_table, k); if (refcount == s->refcount_max) { fprintf(stderr, \"ERROR: overflow cluster offset=0x%\" PRIx64 \"\\n\", cluster_offset); fprintf(stderr, \"Use qemu-img amend to increase the refcount entry \" \"width or qemu-img convert to create a clean copy if the \" \"image cannot be opened for writing\\n\"); res->corruptions++; continue; } s->set_refcount(*refcount_table, k, refcount + 1); } return 0; }"} {"target": 0, "idx": 20257, "func": "static void gic_do_cpu_write(void *opaque, target_phys_addr_t addr, uint64_t value, unsigned size) { GICState **backref = (GICState **)opaque; GICState *s = *backref; int id = (backref - s->backref); gic_cpu_write(s, id, addr, value); }"} {"target": 0, "idx": 20282, "func": "void stq_be_phys(target_phys_addr_t addr, uint64_t val) { val = cpu_to_be64(val); cpu_physical_memory_write(addr, &val, 8); }"} {"target": 0, "idx": 20283, "func": "static int fb_initialise(struct XenDevice *xendev) { struct XenFB *fb = container_of(xendev, struct XenFB, c.xendev); struct xenfb_page *fb_page; int videoram; int rc; if (xenstore_read_fe_int(xendev, \"videoram\", &videoram) == -1) videoram = 0; rc = common_bind(&fb->c); if (rc != 0) return rc; fb_page = fb->c.page; rc = xenfb_configure_fb(fb, videoram * 1024 * 1024U, fb_page->width, fb_page->height, fb_page->depth, fb_page->mem_length, 0, fb_page->line_length); if (rc != 0) return rc; rc = xenfb_map_fb(fb); if (rc != 0) return rc; #if 0 /* handled in xen_init_display() for now */ if (!fb->have_console) { fb->c.ds = graphic_console_init(xenfb_update, xenfb_invalidate, NULL, NULL, fb); fb->have_console = 1; } #endif if (xenstore_read_fe_int(xendev, \"feature-update\", &fb->feature_update) == -1) fb->feature_update = 0; if (fb->feature_update) xenstore_write_be_int(xendev, \"request-update\", 1); xen_pv_printf(xendev, 1, \"feature-update=%d, videoram=%d\\n\", fb->feature_update, videoram); return 0; }"} {"target": 0, "idx": 20299, "func": "static void gen_exts(int ot, TCGv reg) { switch(ot) { case OT_BYTE: tcg_gen_ext8s_tl(reg, reg); break; case OT_WORD: tcg_gen_ext16s_tl(reg, reg); break; case OT_LONG: tcg_gen_ext32s_tl(reg, reg); break; default: break; } }"} {"target": 0, "idx": 20301, "func": "bool is_valid_option_list(const char *param) { size_t buflen = strlen(param) + 1; char *buf = g_malloc(buflen); const char *p = param; bool result = true; while (*p) { p = get_opt_value(buf, buflen, p); if (*p && !*++p) { result = false; goto out; } if (!*buf || *buf == ',') { result = false; goto out; } } out: free(buf); return result; }"} {"target": 1, "idx": 20305, "func": "int ff_h264_alloc_tables(H264Context *h){ MpegEncContext * const s = &h->s; const int big_mb_num= s->mb_stride * (s->mb_height+1); const int row_mb_num= 2*s->mb_stride*s->avctx->thread_count; int x,y; FF_ALLOCZ_OR_GOTO(h->s.avctx, h->intra4x4_pred_mode, row_mb_num * 8 * sizeof(uint8_t), fail) FF_ALLOCZ_OR_GOTO(h->s.avctx, h->non_zero_count , big_mb_num * 48 * sizeof(uint8_t), fail) FF_ALLOCZ_OR_GOTO(h->s.avctx, h->slice_table_base , (big_mb_num+s->mb_stride) * sizeof(*h->slice_table_base), fail) FF_ALLOCZ_OR_GOTO(h->s.avctx, h->cbp_table, big_mb_num * sizeof(uint16_t), fail) FF_ALLOCZ_OR_GOTO(h->s.avctx, h->chroma_pred_mode_table, big_mb_num * sizeof(uint8_t), fail) FF_ALLOCZ_OR_GOTO(h->s.avctx, h->mvd_table[0], 16*row_mb_num * sizeof(uint8_t), fail); FF_ALLOCZ_OR_GOTO(h->s.avctx, h->mvd_table[1], 16*row_mb_num * sizeof(uint8_t), fail); FF_ALLOCZ_OR_GOTO(h->s.avctx, h->direct_table, 4*big_mb_num * sizeof(uint8_t) , fail); FF_ALLOCZ_OR_GOTO(h->s.avctx, h->list_counts, big_mb_num * sizeof(uint8_t), fail) memset(h->slice_table_base, -1, (big_mb_num+s->mb_stride) * sizeof(*h->slice_table_base)); h->slice_table= h->slice_table_base + s->mb_stride*2 + 1; FF_ALLOCZ_OR_GOTO(h->s.avctx, h->mb2b_xy , big_mb_num * sizeof(uint32_t), fail); FF_ALLOCZ_OR_GOTO(h->s.avctx, h->mb2br_xy , big_mb_num * sizeof(uint32_t), fail); for(y=0; ymb_height; y++){ for(x=0; xmb_width; x++){ const int mb_xy= x + y*s->mb_stride; const int b_xy = 4*x + 4*y*h->b_stride; h->mb2b_xy [mb_xy]= b_xy; h->mb2br_xy[mb_xy]= 8*(FMO ? mb_xy : (mb_xy % (2*s->mb_stride))); } } s->obmc_scratchpad = NULL; if(!h->dequant4_coeff[0]) init_dequant_tables(h); return 0; fail: free_tables(h, 1); return -1; }"} {"target": 0, "idx": 20320, "func": "static inline CopyRet copy_frame(AVCodecContext *avctx, BC_DTS_PROC_OUT *output, void *data, int *data_size, uint8_t second_field) { BC_STATUS ret; BC_DTS_STATUS decoder_status; uint8_t is_paff; uint8_t next_frame_same; uint8_t interlaced; CHDContext *priv = avctx->priv_data; uint8_t bottom_field = (output->PicInfo.flags & VDEC_FLAG_BOTTOMFIELD) == VDEC_FLAG_BOTTOMFIELD; uint8_t bottom_first = !!(output->PicInfo.flags & VDEC_FLAG_BOTTOM_FIRST); int width = output->PicInfo.width; int height = output->PicInfo.height; int bwidth; uint8_t *src = output->Ybuff; int sStride; uint8_t *dst; int dStride; ret = DtsGetDriverStatus(priv->dev, &decoder_status); if (ret != BC_STS_SUCCESS) { av_log(avctx, AV_LOG_ERROR, \"CrystalHD: GetDriverStatus failed: %u\\n\", ret); return RET_ERROR; } is_paff = ASSUME_PAFF_OVER_MBAFF || !(output->PicInfo.flags & VDEC_FLAG_UNKNOWN_SRC); next_frame_same = output->PicInfo.picture_number == (decoder_status.picNumFlags & ~0x40000000); interlaced = ((output->PicInfo.flags & VDEC_FLAG_INTERLACED_SRC) && is_paff) || next_frame_same || bottom_field || second_field; av_log(avctx, AV_LOG_VERBOSE, \"CrystalHD: next_frame_same: %u | %u | %u\\n\", next_frame_same, output->PicInfo.picture_number, decoder_status.picNumFlags & ~0x40000000); if (priv->pic.data[0] && !priv->need_second_field) avctx->release_buffer(avctx, &priv->pic); priv->need_second_field = interlaced && !priv->need_second_field; priv->pic.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE; if (!priv->pic.data[0]) { if (avctx->get_buffer(avctx, &priv->pic) < 0) { av_log(avctx, AV_LOG_ERROR, \"get_buffer() failed\\n\"); return RET_ERROR; } } bwidth = av_image_get_linesize(avctx->pix_fmt, width, 0); if (priv->is_70012) { int pStride; if (width <= 720) pStride = 720; else if (width <= 1280) pStride = 1280; else if (width <= 1080) pStride = 1080; sStride = av_image_get_linesize(avctx->pix_fmt, pStride, 0); } else { sStride = bwidth; } dStride = priv->pic.linesize[0]; dst = priv->pic.data[0]; av_log(priv->avctx, AV_LOG_VERBOSE, \"CrystalHD: Copying out frame\\n\"); if (interlaced) { int dY = 0; int sY = 0; height /= 2; if (bottom_field) { av_log(priv->avctx, AV_LOG_VERBOSE, \"Interlaced: bottom field\\n\"); dY = 1; } else { av_log(priv->avctx, AV_LOG_VERBOSE, \"Interlaced: top field\\n\"); dY = 0; } for (sY = 0; sY < height; dY++, sY++) { memcpy(&(dst[dY * dStride]), &(src[sY * sStride]), bwidth); if (interlaced) dY++; } } else { av_image_copy_plane(dst, dStride, src, sStride, bwidth, height); } priv->pic.interlaced_frame = interlaced; if (interlaced) priv->pic.top_field_first = !bottom_first; if (output->PicInfo.timeStamp != 0) { priv->pic.pkt_pts = opaque_list_pop(priv, output->PicInfo.timeStamp); av_log(avctx, AV_LOG_VERBOSE, \"output \\\"pts\\\": %\"PRIu64\"\\n\", priv->pic.pkt_pts); } if (!priv->need_second_field) { *data_size = sizeof(AVFrame); *(AVFrame *)data = priv->pic; } if (ASSUME_TWO_INPUTS_ONE_OUTPUT && output->PicInfo.flags & VDEC_FLAG_UNKNOWN_SRC) { av_log(priv->avctx, AV_LOG_VERBOSE, \"Fieldpair from two packets.\\n\"); return RET_SKIP_NEXT_COPY; } return RET_OK; }"} {"target": 1, "idx": 20327, "func": "static void net_l2tpv3_cleanup(NetClientState *nc) { NetL2TPV3State *s = DO_UPCAST(NetL2TPV3State, nc, nc); qemu_purge_queued_packets(nc); l2tpv3_read_poll(s, false); l2tpv3_write_poll(s, false); if (s->fd > 0) { close(s->fd); } destroy_vector(s->msgvec, MAX_L2TPV3_MSGCNT, IOVSIZE); g_free(s->vec); g_free(s->header_buf); g_free(s->dgram_dst); }"} {"target": 1, "idx": 20328, "func": "static AioHandler *find_aio_handler(int fd) { AioHandler *node; LIST_FOREACH(node, &aio_handlers, node) { if (node->fd == fd) return node; } return NULL; }"} {"target": 0, "idx": 20338, "func": "static inline void RENAME(yuv2yuv1)(SwsContext *c, const int16_t *lumSrc, const int16_t *chrSrc, const int16_t *alpSrc, uint8_t *dest, uint8_t *uDest, uint8_t *vDest, uint8_t *aDest, long dstW, long chrDstW) { int i; #if COMPILE_TEMPLATE_MMX if(!(c->flags & SWS_BITEXACT)) { long p= 4; const int16_t *src[4]= {alpSrc + dstW, lumSrc + dstW, chrSrc + chrDstW, chrSrc + VOFW + chrDstW}; uint8_t *dst[4]= {aDest, dest, uDest, vDest}; x86_reg counter[4]= {dstW, dstW, chrDstW, chrDstW}; if (c->flags & SWS_ACCURATE_RND) { while(p--) { if (dst[p]) { __asm__ volatile( YSCALEYUV2YV121_ACCURATE :: \"r\" (src[p]), \"r\" (dst[p] + counter[p]), \"g\" (-counter[p]) : \"%\"REG_a ); } } } else { while(p--) { if (dst[p]) { __asm__ volatile( YSCALEYUV2YV121 :: \"r\" (src[p]), \"r\" (dst[p] + counter[p]), \"g\" (-counter[p]) : \"%\"REG_a ); } } } return; } #endif for (i=0; i>7; if (val&256) { if (val<0) val=0; else val=255; } dest[i]= val; } if (uDest) for (i=0; i>7; int v=(chrSrc[i + VOFW]+64)>>7; if ((u|v)&256) { if (u<0) u=0; else if (u>255) u=255; if (v<0) v=0; else if (v>255) v=255; } uDest[i]= u; vDest[i]= v; } if (CONFIG_SWSCALE_ALPHA && aDest) for (i=0; i>7; aDest[i]= av_clip_uint8(val); } }"} {"target": 1, "idx": 20345, "func": "int x86_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) { X86CPU *cpu = X86_CPU(cs); CPUX86State *env = &cpu->env; uint32_t tmp; if (n < CPU_NB_REGS) { if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) { env->regs[gpr_map[n]] = ldtul_p(mem_buf); return sizeof(target_ulong); } else if (n < CPU_NB_REGS32) { n = gpr_map32[n]; env->regs[n] &= ~0xffffffffUL; env->regs[n] |= (uint32_t)ldl_p(mem_buf); return 4; } } else if (n >= IDX_FP_REGS && n < IDX_FP_REGS + 8) { #ifdef USE_X86LDOUBLE /* FIXME: byteswap float values - after fixing fpregs layout. */ memcpy(&env->fpregs[n - IDX_FP_REGS], mem_buf, 10); #endif return 10; } else if (n >= IDX_XMM_REGS && n < IDX_XMM_REGS + CPU_NB_REGS) { n -= IDX_XMM_REGS; if (n < CPU_NB_REGS32 || (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK)) { env->xmm_regs[n].XMM_Q(0) = ldq_p(mem_buf); env->xmm_regs[n].XMM_Q(1) = ldq_p(mem_buf + 8); return 16; } } else { switch (n) { case IDX_IP_REG: if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) { env->eip = ldq_p(mem_buf); return 8; } else { env->eip &= ~0xffffffffUL; env->eip |= (uint32_t)ldl_p(mem_buf); return 4; } case IDX_FLAGS_REG: env->eflags = ldl_p(mem_buf); return 4; case IDX_SEG_REGS: return x86_cpu_gdb_load_seg(cpu, R_CS, mem_buf); case IDX_SEG_REGS + 1: return x86_cpu_gdb_load_seg(cpu, R_SS, mem_buf); case IDX_SEG_REGS + 2: return x86_cpu_gdb_load_seg(cpu, R_DS, mem_buf); case IDX_SEG_REGS + 3: return x86_cpu_gdb_load_seg(cpu, R_ES, mem_buf); case IDX_SEG_REGS + 4: return x86_cpu_gdb_load_seg(cpu, R_FS, mem_buf); case IDX_SEG_REGS + 5: return x86_cpu_gdb_load_seg(cpu, R_GS, mem_buf); case IDX_FP_REGS + 8: env->fpuc = ldl_p(mem_buf); return 4; case IDX_FP_REGS + 9: tmp = ldl_p(mem_buf); env->fpstt = (tmp >> 11) & 7; env->fpus = tmp & ~0x3800; return 4; case IDX_FP_REGS + 10: /* ftag */ return 4; case IDX_FP_REGS + 11: /* fiseg */ return 4; case IDX_FP_REGS + 12: /* fioff */ return 4; case IDX_FP_REGS + 13: /* foseg */ return 4; case IDX_FP_REGS + 14: /* fooff */ return 4; case IDX_FP_REGS + 15: /* fop */ return 4; case IDX_MXCSR_REG: env->mxcsr = ldl_p(mem_buf); return 4; } } /* Unrecognised register. */ return 0; }"} {"target": 0, "idx": 20352, "func": "int ff_h264_decode_seq_parameter_set(GetBitContext *gb, AVCodecContext *avctx, H264ParamSets *ps) { AVBufferRef *sps_buf; int profile_idc, level_idc, constraint_set_flags = 0; unsigned int sps_id; int i, log2_max_frame_num_minus4; SPS *sps; profile_idc = get_bits(gb, 8); constraint_set_flags |= get_bits1(gb) << 0; // constraint_set0_flag constraint_set_flags |= get_bits1(gb) << 1; // constraint_set1_flag constraint_set_flags |= get_bits1(gb) << 2; // constraint_set2_flag constraint_set_flags |= get_bits1(gb) << 3; // constraint_set3_flag constraint_set_flags |= get_bits1(gb) << 4; // constraint_set4_flag constraint_set_flags |= get_bits1(gb) << 5; // constraint_set5_flag skip_bits(gb, 2); // reserved_zero_2bits level_idc = get_bits(gb, 8); sps_id = get_ue_golomb_31(gb); if (sps_id >= MAX_SPS_COUNT) { av_log(avctx, AV_LOG_ERROR, \"sps_id %u out of range\\n\", sps_id); return AVERROR_INVALIDDATA; } sps_buf = av_buffer_allocz(sizeof(*sps)); if (!sps_buf) return AVERROR(ENOMEM); sps = (SPS*)sps_buf->data; sps->sps_id = sps_id; sps->time_offset_length = 24; sps->profile_idc = profile_idc; sps->constraint_set_flags = constraint_set_flags; sps->level_idc = level_idc; memset(sps->scaling_matrix4, 16, sizeof(sps->scaling_matrix4)); memset(sps->scaling_matrix8, 16, sizeof(sps->scaling_matrix8)); sps->scaling_matrix_present = 0; if (sps->profile_idc == 100 || // High profile sps->profile_idc == 110 || // High10 profile sps->profile_idc == 122 || // High422 profile sps->profile_idc == 244 || // High444 Predictive profile sps->profile_idc == 44 || // Cavlc444 profile sps->profile_idc == 83 || // Scalable Constrained High profile (SVC) sps->profile_idc == 86 || // Scalable High Intra profile (SVC) sps->profile_idc == 118 || // Stereo High profile (MVC) sps->profile_idc == 128 || // Multiview High profile (MVC) sps->profile_idc == 138 || // Multiview Depth High profile (MVCD) sps->profile_idc == 144) { // old High444 profile sps->chroma_format_idc = get_ue_golomb_31(gb); if (sps->chroma_format_idc > 3) { avpriv_request_sample(avctx, \"chroma_format_idc %u\", sps->chroma_format_idc); goto fail; } else if (sps->chroma_format_idc == 3) { sps->residual_color_transform_flag = get_bits1(gb); } sps->bit_depth_luma = get_ue_golomb(gb) + 8; sps->bit_depth_chroma = get_ue_golomb(gb) + 8; if (sps->bit_depth_chroma != sps->bit_depth_luma) { avpriv_request_sample(avctx, \"Different chroma and luma bit depth\"); goto fail; } sps->transform_bypass = get_bits1(gb); decode_scaling_matrices(gb, sps, NULL, 1, sps->scaling_matrix4, sps->scaling_matrix8); } else { sps->chroma_format_idc = 1; sps->bit_depth_luma = 8; sps->bit_depth_chroma = 8; } log2_max_frame_num_minus4 = get_ue_golomb(gb); if (log2_max_frame_num_minus4 < MIN_LOG2_MAX_FRAME_NUM - 4 || log2_max_frame_num_minus4 > MAX_LOG2_MAX_FRAME_NUM - 4) { av_log(avctx, AV_LOG_ERROR, \"log2_max_frame_num_minus4 out of range (0-12): %d\\n\", log2_max_frame_num_minus4); goto fail; } sps->log2_max_frame_num = log2_max_frame_num_minus4 + 4; sps->poc_type = get_ue_golomb_31(gb); if (sps->poc_type == 0) { // FIXME #define sps->log2_max_poc_lsb = get_ue_golomb(gb) + 4; } else if (sps->poc_type == 1) { // FIXME #define sps->delta_pic_order_always_zero_flag = get_bits1(gb); sps->offset_for_non_ref_pic = get_se_golomb(gb); sps->offset_for_top_to_bottom_field = get_se_golomb(gb); sps->poc_cycle_length = get_ue_golomb(gb); if ((unsigned)sps->poc_cycle_length >= FF_ARRAY_ELEMS(sps->offset_for_ref_frame)) { av_log(avctx, AV_LOG_ERROR, \"poc_cycle_length overflow %d\\n\", sps->poc_cycle_length); goto fail; } for (i = 0; i < sps->poc_cycle_length; i++) sps->offset_for_ref_frame[i] = get_se_golomb(gb); } else if (sps->poc_type != 2) { av_log(avctx, AV_LOG_ERROR, \"illegal POC type %d\\n\", sps->poc_type); goto fail; } sps->ref_frame_count = get_ue_golomb_31(gb); if (sps->ref_frame_count > H264_MAX_PICTURE_COUNT - 2 || sps->ref_frame_count >= 32U) { av_log(avctx, AV_LOG_ERROR, \"too many reference frames %d\\n\", sps->ref_frame_count); goto fail; } sps->gaps_in_frame_num_allowed_flag = get_bits1(gb); sps->mb_width = get_ue_golomb(gb) + 1; sps->mb_height = get_ue_golomb(gb) + 1; if ((unsigned)sps->mb_width >= INT_MAX / 16 || (unsigned)sps->mb_height >= INT_MAX / 16 || av_image_check_size(16 * sps->mb_width, 16 * sps->mb_height, 0, avctx)) { av_log(avctx, AV_LOG_ERROR, \"mb_width/height overflow\\n\"); goto fail; } sps->frame_mbs_only_flag = get_bits1(gb); if (!sps->frame_mbs_only_flag) sps->mb_aff = get_bits1(gb); else sps->mb_aff = 0; sps->direct_8x8_inference_flag = get_bits1(gb); if (!sps->frame_mbs_only_flag && !sps->direct_8x8_inference_flag) { av_log(avctx, AV_LOG_ERROR, \"This stream was generated by a broken encoder, invalid 8x8 inference\\n\"); goto fail; } #ifndef ALLOW_INTERLACE if (sps->mb_aff) av_log(avctx, AV_LOG_ERROR, \"MBAFF support not included; enable it at compile-time.\\n\"); #endif sps->crop = get_bits1(gb); if (sps->crop) { unsigned int crop_left = get_ue_golomb(gb); unsigned int crop_right = get_ue_golomb(gb); unsigned int crop_top = get_ue_golomb(gb); unsigned int crop_bottom = get_ue_golomb(gb); if (avctx->flags2 & AV_CODEC_FLAG2_IGNORE_CROP) { av_log(avctx, AV_LOG_DEBUG, \"discarding sps cropping, original \" \"values are l:%d r:%d t:%d b:%d\\n\", crop_left, crop_right, crop_top, crop_bottom); sps->crop_left = sps->crop_right = sps->crop_top = sps->crop_bottom = 0; } else { int vsub = (sps->chroma_format_idc == 1) ? 1 : 0; int hsub = (sps->chroma_format_idc == 1 || sps->chroma_format_idc == 2) ? 1 : 0; int step_x = 1 << hsub; int step_y = (2 - sps->frame_mbs_only_flag) << vsub; if (crop_left & (0x1F >> (sps->bit_depth_luma > 8)) && !(avctx->flags & AV_CODEC_FLAG_UNALIGNED)) { crop_left &= ~(0x1F >> (sps->bit_depth_luma > 8)); av_log(avctx, AV_LOG_WARNING, \"Reducing left cropping to %d \" \"chroma samples to preserve alignment.\\n\", crop_left); } if (INT_MAX / step_x <= crop_left || INT_MAX / step_x - crop_left <= crop_right || 16 * sps->mb_width <= step_x * (crop_left + crop_right) || INT_MAX / step_y <= crop_top || INT_MAX / step_y - crop_top <= crop_bottom || 16 * sps->mb_height <= step_y * (crop_top + crop_bottom)) { av_log(avctx, AV_LOG_WARNING, \"Invalid crop parameters\\n\"); if (avctx->err_recognition & AV_EF_EXPLODE) goto fail; crop_left = crop_right = crop_top = crop_bottom = 0; } sps->crop_left = crop_left * step_x; sps->crop_right = crop_right * step_x; sps->crop_top = crop_top * step_y; sps->crop_bottom = crop_bottom * step_y; } } else { sps->crop_left = sps->crop_right = sps->crop_top = sps->crop_bottom = sps->crop = 0; } sps->vui_parameters_present_flag = get_bits1(gb); if (sps->vui_parameters_present_flag) { int ret = decode_vui_parameters(gb, avctx, sps); if (ret < 0 && avctx->err_recognition & AV_EF_EXPLODE) goto fail; } /* if the maximum delay is not stored in the SPS, derive it based on the * level */ if (!sps->bitstream_restriction_flag && (sps->ref_frame_count || avctx->strict_std_compliance >= FF_COMPLIANCE_STRICT)) { sps->num_reorder_frames = MAX_DELAYED_PIC_COUNT - 1; for (i = 0; i < FF_ARRAY_ELEMS(level_max_dpb_mbs); i++) { if (level_max_dpb_mbs[i][0] == sps->level_idc) { sps->num_reorder_frames = FFMIN(level_max_dpb_mbs[i][1] / (sps->mb_width * sps->mb_height), sps->num_reorder_frames); break; } } } if (!sps->sar.den) sps->sar.den = 1; if (avctx->debug & FF_DEBUG_PICT_INFO) { static const char csp[4][5] = { \"Gray\", \"420\", \"422\", \"444\" }; av_log(avctx, AV_LOG_DEBUG, \"sps:%u profile:%d/%d poc:%d ref:%d %dx%d %s %s crop:%u/%u/%u/%u %s %s %\"PRId32\"/%\"PRId32\"\\n\", sps_id, sps->profile_idc, sps->level_idc, sps->poc_type, sps->ref_frame_count, sps->mb_width, sps->mb_height, sps->frame_mbs_only_flag ? \"FRM\" : (sps->mb_aff ? \"MB-AFF\" : \"PIC-AFF\"), sps->direct_8x8_inference_flag ? \"8B8\" : \"\", sps->crop_left, sps->crop_right, sps->crop_top, sps->crop_bottom, sps->vui_parameters_present_flag ? \"VUI\" : \"\", csp[sps->chroma_format_idc], sps->timing_info_present_flag ? sps->num_units_in_tick : 0, sps->timing_info_present_flag ? sps->time_scale : 0); } /* check if this is a repeat of an already parsed SPS, then keep the * original one. * otherwise drop all PPSes that depend on it */ if (ps->sps_list[sps_id] && !memcmp(ps->sps_list[sps_id]->data, sps_buf->data, sps_buf->size)) { av_buffer_unref(&sps_buf); } else { remove_sps(ps, sps_id); ps->sps_list[sps_id] = sps_buf; } return 0; fail: av_buffer_unref(&sps_buf); return AVERROR_INVALIDDATA; }"} {"target": 0, "idx": 20365, "func": "static void cchip_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) { TyphoonState *s = opaque; uint64_t oldval, newval; switch (addr) { case 0x0000: /* CSC: Cchip System Configuration Register. */ /* All sorts of data here; nothing relevant RW. */ break; case 0x0040: /* MTR: Memory Timing Register. */ /* All sorts of stuff related to real DRAM. */ break; case 0x0080: /* MISC: Miscellaneous Register. */ newval = oldval = s->cchip.misc; newval &= ~(val & 0x10000ff0); /* W1C fields */ if (val & 0x100000) { newval &= ~0xff0000ull; /* ACL clears ABT and ABW */ } else { newval |= val & 0x00f00000; /* ABT field is W1S */ if ((newval & 0xf0000) == 0) { newval |= val & 0xf0000; /* ABW field is W1S iff zero */ } } newval |= (val & 0xf000) >> 4; /* IPREQ field sets IPINTR. */ newval &= ~0xf0000000000ull; /* WO and RW fields */ newval |= val & 0xf0000000000ull; s->cchip.misc = newval; /* Pass on changes to IPI and ITI state. */ if ((newval ^ oldval) & 0xff0) { int i; for (i = 0; i < 4; ++i) { AlphaCPU *cpu = s->cchip.cpu[i]; if (cpu != NULL) { CPUState *cs = CPU(cpu); /* IPI can be either cleared or set by the write. */ if (newval & (1 << (i + 8))) { cpu_interrupt(cs, CPU_INTERRUPT_SMP); } else { cpu_reset_interrupt(cs, CPU_INTERRUPT_SMP); } /* ITI can only be cleared by the write. */ if ((newval & (1 << (i + 4))) == 0) { cpu_reset_interrupt(cs, CPU_INTERRUPT_TIMER); } } } } break; case 0x00c0: /* MPD: Memory Presence Detect Register. */ break; case 0x0100: /* AAR0 */ case 0x0140: /* AAR1 */ case 0x0180: /* AAR2 */ case 0x01c0: /* AAR3 */ /* AAR: Array Address Register. */ /* All sorts of information about DRAM. */ break; case 0x0200: /* DIM0 */ /* DIM: Device Interrupt Mask Register, CPU0. */ s->cchip.dim[0] = val; cpu_irq_change(s->cchip.cpu[0], val & s->cchip.drir); break; case 0x0240: /* DIM1 */ /* DIM: Device Interrupt Mask Register, CPU1. */ s->cchip.dim[0] = val; cpu_irq_change(s->cchip.cpu[1], val & s->cchip.drir); break; case 0x0280: /* DIR0 (RO) */ case 0x02c0: /* DIR1 (RO) */ case 0x0300: /* DRIR (RO) */ break; case 0x0340: /* PRBEN: Probe Enable Register. */ break; case 0x0380: /* IIC0 */ s->cchip.iic[0] = val & 0xffffff; break; case 0x03c0: /* IIC1 */ s->cchip.iic[1] = val & 0xffffff; break; case 0x0400: /* MPR0 */ case 0x0440: /* MPR1 */ case 0x0480: /* MPR2 */ case 0x04c0: /* MPR3 */ /* MPR: Memory Programming Register. */ break; case 0x0580: /* TTR: TIGbus Timing Register. */ /* All sorts of stuff related to interrupt delivery timings. */ break; case 0x05c0: /* TDR: TIGbug Device Timing Register. */ break; case 0x0600: /* DIM2: Device Interrupt Mask Register, CPU2. */ s->cchip.dim[2] = val; cpu_irq_change(s->cchip.cpu[2], val & s->cchip.drir); break; case 0x0640: /* DIM3: Device Interrupt Mask Register, CPU3. */ s->cchip.dim[3] = val; cpu_irq_change(s->cchip.cpu[3], val & s->cchip.drir); break; case 0x0680: /* DIR2 (RO) */ case 0x06c0: /* DIR3 (RO) */ break; case 0x0700: /* IIC2 */ s->cchip.iic[2] = val & 0xffffff; break; case 0x0740: /* IIC3 */ s->cchip.iic[3] = val & 0xffffff; break; case 0x0780: /* PWR: Power Management Control. */ break; case 0x0c00: /* CMONCTLA */ case 0x0c40: /* CMONCTLB */ case 0x0c80: /* CMONCNT01 */ case 0x0cc0: /* CMONCNT23 */ break; default: cpu_unassigned_access(current_cpu, addr, true, false, 0, size); return; } }"} {"target": 0, "idx": 20368, "func": "static void exynos4210_combiner_write(void *opaque, target_phys_addr_t offset, uint64_t val, unsigned size) { struct Exynos4210CombinerState *s = (struct Exynos4210CombinerState *)opaque; uint32_t req_quad_base_n; /* Base of registers quad. Multiply it by 4 and get a start of corresponding group quad */ uint32_t grp_quad_base_n; /* Base of group quad */ uint32_t reg_n; /* Register number inside the quad */ req_quad_base_n = offset >> 4; grp_quad_base_n = req_quad_base_n << 2; reg_n = (offset - (req_quad_base_n << 4)) >> 2; if (req_quad_base_n >= IIC_NGRP) { hw_error(\"exynos4210.combiner: unallowed write access at offset 0x\" TARGET_FMT_plx \"\\n\", offset); return; } if (reg_n > 1) { hw_error(\"exynos4210.combiner: unallowed write access at offset 0x\" TARGET_FMT_plx \"\\n\", offset); return; } if (offset >> 2 >= IIC_REGSET_SIZE) { hw_error(\"exynos4210.combiner: overflow of reg_set by 0x\" TARGET_FMT_plx \"offset\\n\", offset); } s->reg_set[offset >> 2] = val; switch (reg_n) { /* IIESR */ case 0: /* FIXME: what if irq is pending, allowed by mask, and we allow it * again. Interrupt will rise again! */ DPRINTF(\"%s enable IRQ for groups %d, %d, %d, %d\\n\", s->external ? \"EXT\" : \"INT\", grp_quad_base_n, grp_quad_base_n + 1, grp_quad_base_n + 2, grp_quad_base_n + 3); /* Enable interrupt sources */ s->group[grp_quad_base_n].src_mask |= val & 0xFF; s->group[grp_quad_base_n + 1].src_mask |= (val & 0xFF00) >> 8; s->group[grp_quad_base_n + 2].src_mask |= (val & 0xFF0000) >> 16; s->group[grp_quad_base_n + 3].src_mask |= (val & 0xFF000000) >> 24; exynos4210_combiner_update(s, grp_quad_base_n); exynos4210_combiner_update(s, grp_quad_base_n + 1); exynos4210_combiner_update(s, grp_quad_base_n + 2); exynos4210_combiner_update(s, grp_quad_base_n + 3); break; /* IIECR */ case 1: DPRINTF(\"%s disable IRQ for groups %d, %d, %d, %d\\n\", s->external ? \"EXT\" : \"INT\", grp_quad_base_n, grp_quad_base_n + 1, grp_quad_base_n + 2, grp_quad_base_n + 3); /* Disable interrupt sources */ s->group[grp_quad_base_n].src_mask &= ~(val & 0xFF); s->group[grp_quad_base_n + 1].src_mask &= ~((val & 0xFF00) >> 8); s->group[grp_quad_base_n + 2].src_mask &= ~((val & 0xFF0000) >> 16); s->group[grp_quad_base_n + 3].src_mask &= ~((val & 0xFF000000) >> 24); exynos4210_combiner_update(s, grp_quad_base_n); exynos4210_combiner_update(s, grp_quad_base_n + 1); exynos4210_combiner_update(s, grp_quad_base_n + 2); exynos4210_combiner_update(s, grp_quad_base_n + 3); break; default: hw_error(\"exynos4210.combiner: unallowed write access at offset 0x\" TARGET_FMT_plx \"\\n\", offset); break; } }"} {"target": 0, "idx": 20370, "func": "static void bt_submit_acl(struct HCIInfo *info, const uint8_t *data, int length) { struct bt_hci_s *hci = hci_from_info(info); uint16_t handle; int datalen, flags; struct bt_link_s *link; if (length < HCI_ACL_HDR_SIZE) { fprintf(stderr, \"%s: ACL packet too short (%iB)\\n\", __FUNCTION__, length); return; } handle = acl_handle((data[1] << 8) | data[0]); flags = acl_flags((data[1] << 8) | data[0]); datalen = (data[3] << 8) | data[2]; data += HCI_ACL_HDR_SIZE; length -= HCI_ACL_HDR_SIZE; if (bt_hci_handle_bad(hci, handle)) { fprintf(stderr, \"%s: invalid ACL handle %03x\\n\", __FUNCTION__, handle); /* TODO: signal an error */ return; } handle &= ~HCI_HANDLE_OFFSET; if (datalen > length) { fprintf(stderr, \"%s: ACL packet too short (%iB < %iB)\\n\", __FUNCTION__, length, datalen); return; } link = hci->lm.handle[handle].link; if ((flags & ~3) == ACL_ACTIVE_BCAST) { if (!hci->asb_handle) hci->asb_handle = handle; else if (handle != hci->asb_handle) { fprintf(stderr, \"%s: Bad handle %03x in Active Slave Broadcast\\n\", __FUNCTION__, handle); /* TODO: signal an error */ return; } /* TODO */ } if ((flags & ~3) == ACL_PICO_BCAST) { if (!hci->psb_handle) hci->psb_handle = handle; else if (handle != hci->psb_handle) { fprintf(stderr, \"%s: Bad handle %03x in Parked Slave Broadcast\\n\", __FUNCTION__, handle); /* TODO: signal an error */ return; } /* TODO */ } /* TODO: increase counter and send EVT_NUM_COMP_PKTS */ bt_hci_event_num_comp_pkts(hci, handle | HCI_HANDLE_OFFSET, 1); /* Do this last as it can trigger further events even in this HCI */ hci->lm.handle[handle].lmp_acl_data(link, data, (flags & 3) == ACL_START, length); }"} {"target": 0, "idx": 20398, "func": "static int nbd_co_flush(BlockDriverState *bs) { BDRVNBDState *s = bs->opaque; return nbd_client_session_co_flush(&s->client); }"} {"target": 0, "idx": 20410, "func": "static int qiov_is_aligned(QEMUIOVector *qiov) { int i; for (i = 0; i < qiov->niov; i++) { if ((uintptr_t) qiov->iov[i].iov_base % BDRV_SECTOR_SIZE) { return 0; } } return 1; }"} {"target": 1, "idx": 20422, "func": "static int seqvideo_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; SeqVideoContext *seq = avctx->priv_data; seq->frame.reference = 1; seq->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE; if (avctx->reget_buffer(avctx, &seq->frame)) { av_log(seq->avctx, AV_LOG_ERROR, \"tiertexseqvideo: reget_buffer() failed\\n\"); return -1; } seqvideo_decode(seq, buf, buf_size); *data_size = sizeof(AVFrame); *(AVFrame *)data = seq->frame; return buf_size; }"} {"target": 1, "idx": 20425, "func": "static ssize_t imx_fec_receive(NetClientState *nc, const uint8_t *buf, size_t len) { IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc)); IMXFECBufDesc bd; uint32_t flags = 0; uint32_t addr; uint32_t crc; uint32_t buf_addr; uint8_t *crc_ptr; unsigned int buf_len; size_t size = len; FEC_PRINTF(\"len %d\\n\", (int)size); if (!s->regs[ENET_RDAR]) { qemu_log_mask(LOG_GUEST_ERROR, \"[%s]%s: Unexpected packet\\n\", TYPE_IMX_FEC, __func__); return 0; } /* 4 bytes for the CRC. */ size += 4; crc = cpu_to_be32(crc32(~0, buf, size)); crc_ptr = (uint8_t *) &crc; /* Huge frames are truncated. */ if (size > ENET_MAX_FRAME_SIZE) { size = ENET_MAX_FRAME_SIZE; flags |= ENET_BD_TR | ENET_BD_LG; } /* Frames larger than the user limit just set error flags. */ if (size > (s->regs[ENET_RCR] >> 16)) { flags |= ENET_BD_LG; } addr = s->rx_descriptor; while (size > 0) { imx_fec_read_bd(&bd, addr); if ((bd.flags & ENET_BD_E) == 0) { /* No descriptors available. Bail out. */ /* * FIXME: This is wrong. We should probably either * save the remainder for when more RX buffers are * available, or flag an error. */ qemu_log_mask(LOG_GUEST_ERROR, \"[%s]%s: Lost end of frame\\n\", TYPE_IMX_FEC, __func__); break; } buf_len = (size <= s->regs[ENET_MRBR]) ? size : s->regs[ENET_MRBR]; bd.length = buf_len; size -= buf_len; FEC_PRINTF(\"rx_bd 0x%x length %d\\n\", addr, bd.length); /* The last 4 bytes are the CRC. */ if (size < 4) { buf_len += size - 4; } buf_addr = bd.data; dma_memory_write(&address_space_memory, buf_addr, buf, buf_len); buf += buf_len; if (size < 4) { dma_memory_write(&address_space_memory, buf_addr + buf_len, crc_ptr, 4 - size); crc_ptr += 4 - size; } bd.flags &= ~ENET_BD_E; if (size == 0) { /* Last buffer in frame. */ bd.flags |= flags | ENET_BD_L; FEC_PRINTF(\"rx frame flags %04x\\n\", bd.flags); s->regs[ENET_EIR] |= ENET_INT_RXF; } else { s->regs[ENET_EIR] |= ENET_INT_RXB; } imx_fec_write_bd(&bd, addr); /* Advance to the next descriptor. */ if ((bd.flags & ENET_BD_W) != 0) { addr = s->regs[ENET_RDSR]; } else { addr += sizeof(bd); } } s->rx_descriptor = addr; imx_eth_enable_rx(s); imx_eth_update(s); return len; }"} {"target": 0, "idx": 20433, "func": "static void breakpoint_handler(CPUState *env) { CPUBreakpoint *bp; if (env->watchpoint_hit) { if (env->watchpoint_hit->flags & BP_CPU) { env->watchpoint_hit = NULL; if (check_hw_breakpoints(env, 0)) raise_exception(EXCP01_DB); else cpu_resume_from_signal(env, NULL); } } else { TAILQ_FOREACH(bp, &env->breakpoints, entry) if (bp->pc == env->eip) { if (bp->flags & BP_CPU) { check_hw_breakpoints(env, 1); raise_exception(EXCP01_DB); } break; } } if (prev_debug_excp_handler) prev_debug_excp_handler(env); }"} {"target": 0, "idx": 20434, "func": "static gboolean gd_button_event(GtkWidget *widget, GdkEventButton *button, void *opaque) { VirtualConsole *vc = opaque; GtkDisplayState *s = vc->s; InputButton btn; /* implicitly grab the input at the first click in the relative mode */ if (button->button == 1 && button->type == GDK_BUTTON_PRESS && !qemu_input_is_absolute() && !gd_is_grab_active(s)) { gtk_check_menu_item_set_active(GTK_CHECK_MENU_ITEM(s->grab_item), TRUE); return TRUE; } if (button->button == 1) { btn = INPUT_BUTTON_LEFT; } else if (button->button == 2) { btn = INPUT_BUTTON_MIDDLE; } else if (button->button == 3) { btn = INPUT_BUTTON_RIGHT; } else { return TRUE; } qemu_input_queue_btn(vc->gfx.dcl.con, btn, button->type == GDK_BUTTON_PRESS); qemu_input_event_sync(); return TRUE; }"} {"target": 1, "idx": 20457, "func": "static void * attribute_align_arg worker(void *v){ AVCodecContext *avctx = v; ThreadContext *c = avctx->internal->frame_thread_encoder; AVPacket *pkt = NULL; while(!c->exit){ int got_packet, ret; AVFrame *frame; Task task; if(!pkt) pkt= av_mallocz(sizeof(*pkt)); if(!pkt) continue; av_init_packet(pkt); pthread_mutex_lock(&c->task_fifo_mutex); while (av_fifo_size(c->task_fifo) <= 0 || c->exit) { if(c->exit){ pthread_mutex_unlock(&c->task_fifo_mutex); goto end; } pthread_cond_wait(&c->task_fifo_cond, &c->task_fifo_mutex); } av_fifo_generic_read(c->task_fifo, &task, sizeof(task), NULL); pthread_mutex_unlock(&c->task_fifo_mutex); frame = task.indata; ret = avcodec_encode_video2(avctx, pkt, frame, &got_packet); pthread_mutex_lock(&c->buffer_mutex); av_frame_unref(frame); pthread_mutex_unlock(&c->buffer_mutex); av_frame_free(&frame); if(got_packet) { int ret2 = av_dup_packet(pkt); if (ret >= 0 && ret2 < 0) ret = ret2; } else { pkt->data = NULL; pkt->size = 0; } pthread_mutex_lock(&c->finished_task_mutex); c->finished_tasks[task.index].outdata = pkt; pkt = NULL; c->finished_tasks[task.index].return_code = ret; pthread_cond_signal(&c->finished_task_cond); pthread_mutex_unlock(&c->finished_task_mutex); } end: av_free(pkt); pthread_mutex_lock(&c->buffer_mutex); avcodec_close(avctx); pthread_mutex_unlock(&c->buffer_mutex); av_freep(&avctx); return NULL; }"} {"target": 1, "idx": 20467, "func": "static void _decode_opc(DisasContext * ctx) { /* This code tries to make movcal emulation sufficiently accurate for Linux purposes. This instruction writes memory, and prior to that, always allocates a cache line. It is used in two contexts: - in memcpy, where data is copied in blocks, the first write of to a block uses movca.l for performance. - in arch/sh/mm/cache-sh4.c, movcal.l + ocbi combination is used to flush the cache. Here, the data written by movcal.l is never written to memory, and the data written is just bogus. To simulate this, we simulate movcal.l, we store the value to memory, but we also remember the previous content. If we see ocbi, we check if movcal.l for that address was done previously. If so, the write should not have hit the memory, so we restore the previous content. When we see an instruction that is neither movca.l nor ocbi, the previous content is discarded. To optimize, we only try to flush stores when we're at the start of TB, or if we already saw movca.l in this TB and did not flush stores yet. */ if (ctx->has_movcal) { int opcode = ctx->opcode & 0xf0ff; if (opcode != 0x0093 /* ocbi */ && opcode != 0x00c3 /* movca.l */) { gen_helper_discard_movcal_backup (); ctx->has_movcal = 0; } } #if 0 fprintf(stderr, \"Translating opcode 0x%04x\\n\", ctx->opcode); #endif switch (ctx->opcode) { case 0x0019: /* div0u */ tcg_gen_andi_i32(cpu_sr, cpu_sr, ~(SR_M | SR_Q | SR_T)); return; case 0x000b: /* rts */ CHECK_NOT_DELAY_SLOT tcg_gen_mov_i32(cpu_delayed_pc, cpu_pr); ctx->flags |= DELAY_SLOT; ctx->delayed_pc = (uint32_t) - 1; return; case 0x0028: /* clrmac */ tcg_gen_movi_i32(cpu_mach, 0); tcg_gen_movi_i32(cpu_macl, 0); return; case 0x0048: /* clrs */ tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_S); return; case 0x0008: /* clrt */ gen_clr_t(); return; case 0x0038: /* ldtlb */ CHECK_PRIVILEGED gen_helper_ldtlb(); return; case 0x002b: /* rte */ CHECK_PRIVILEGED CHECK_NOT_DELAY_SLOT tcg_gen_mov_i32(cpu_sr, cpu_ssr); tcg_gen_mov_i32(cpu_delayed_pc, cpu_spc); ctx->flags |= DELAY_SLOT; ctx->delayed_pc = (uint32_t) - 1; return; case 0x0058: /* sets */ tcg_gen_ori_i32(cpu_sr, cpu_sr, SR_S); return; case 0x0018: /* sett */ gen_set_t(); return; case 0xfbfd: /* frchg */ tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_FR); ctx->bstate = BS_STOP; return; case 0xf3fd: /* fschg */ tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_SZ); ctx->bstate = BS_STOP; return; case 0x0009: /* nop */ return; case 0x001b: /* sleep */ CHECK_PRIVILEGED gen_helper_sleep(tcg_const_i32(ctx->pc + 2)); return; } switch (ctx->opcode & 0xf000) { case 0x1000: /* mov.l Rm,@(disp,Rn) */ { TCGv addr = tcg_temp_new(); tcg_gen_addi_i32(addr, REG(B11_8), B3_0 * 4); tcg_gen_qemu_st32(REG(B7_4), addr, ctx->memidx); tcg_temp_free(addr); } return; case 0x5000: /* mov.l @(disp,Rm),Rn */ { TCGv addr = tcg_temp_new(); tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 4); tcg_gen_qemu_ld32s(REG(B11_8), addr, ctx->memidx); tcg_temp_free(addr); } return; case 0xe000: /* mov #imm,Rn */ tcg_gen_movi_i32(REG(B11_8), B7_0s); return; case 0x9000: /* mov.w @(disp,PC),Rn */ { TCGv addr = tcg_const_i32(ctx->pc + 4 + B7_0 * 2); tcg_gen_qemu_ld16s(REG(B11_8), addr, ctx->memidx); tcg_temp_free(addr); } return; case 0xd000: /* mov.l @(disp,PC),Rn */ { TCGv addr = tcg_const_i32((ctx->pc + 4 + B7_0 * 4) & ~3); tcg_gen_qemu_ld32s(REG(B11_8), addr, ctx->memidx); tcg_temp_free(addr); } return; case 0x7000: /* add #imm,Rn */ tcg_gen_addi_i32(REG(B11_8), REG(B11_8), B7_0s); return; case 0xa000: /* bra disp */ CHECK_NOT_DELAY_SLOT ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2; tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc); ctx->flags |= DELAY_SLOT; return; case 0xb000: /* bsr disp */ CHECK_NOT_DELAY_SLOT tcg_gen_movi_i32(cpu_pr, ctx->pc + 4); ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2; tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc); ctx->flags |= DELAY_SLOT; return; } switch (ctx->opcode & 0xf00f) { case 0x6003: /* mov Rm,Rn */ tcg_gen_mov_i32(REG(B11_8), REG(B7_4)); return; case 0x2000: /* mov.b Rm,@Rn */ tcg_gen_qemu_st8(REG(B7_4), REG(B11_8), ctx->memidx); return; case 0x2001: /* mov.w Rm,@Rn */ tcg_gen_qemu_st16(REG(B7_4), REG(B11_8), ctx->memidx); return; case 0x2002: /* mov.l Rm,@Rn */ tcg_gen_qemu_st32(REG(B7_4), REG(B11_8), ctx->memidx); return; case 0x6000: /* mov.b @Rm,Rn */ tcg_gen_qemu_ld8s(REG(B11_8), REG(B7_4), ctx->memidx); return; case 0x6001: /* mov.w @Rm,Rn */ tcg_gen_qemu_ld16s(REG(B11_8), REG(B7_4), ctx->memidx); return; case 0x6002: /* mov.l @Rm,Rn */ tcg_gen_qemu_ld32s(REG(B11_8), REG(B7_4), ctx->memidx); return; case 0x2004: /* mov.b Rm,@-Rn */ { TCGv addr = tcg_temp_new(); tcg_gen_subi_i32(addr, REG(B11_8), 1); tcg_gen_qemu_st8(REG(B7_4), addr, ctx->memidx); /* might cause re-execution */ tcg_gen_mov_i32(REG(B11_8), addr); /* modify register status */ tcg_temp_free(addr); } return; case 0x2005: /* mov.w Rm,@-Rn */ { TCGv addr = tcg_temp_new(); tcg_gen_subi_i32(addr, REG(B11_8), 2); tcg_gen_qemu_st16(REG(B7_4), addr, ctx->memidx); tcg_gen_mov_i32(REG(B11_8), addr); tcg_temp_free(addr); } return; case 0x2006: /* mov.l Rm,@-Rn */ { TCGv addr = tcg_temp_new(); tcg_gen_subi_i32(addr, REG(B11_8), 4); tcg_gen_qemu_st32(REG(B7_4), addr, ctx->memidx); tcg_gen_mov_i32(REG(B11_8), addr); } return; case 0x6004: /* mov.b @Rm+,Rn */ tcg_gen_qemu_ld8s(REG(B11_8), REG(B7_4), ctx->memidx); if ( B11_8 != B7_4 ) tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 1); return; case 0x6005: /* mov.w @Rm+,Rn */ tcg_gen_qemu_ld16s(REG(B11_8), REG(B7_4), ctx->memidx); if ( B11_8 != B7_4 ) tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2); return; case 0x6006: /* mov.l @Rm+,Rn */ tcg_gen_qemu_ld32s(REG(B11_8), REG(B7_4), ctx->memidx); if ( B11_8 != B7_4 ) tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4); return; case 0x0004: /* mov.b Rm,@(R0,Rn) */ { TCGv addr = tcg_temp_new(); tcg_gen_add_i32(addr, REG(B11_8), REG(0)); tcg_gen_qemu_st8(REG(B7_4), addr, ctx->memidx); tcg_temp_free(addr); } return; case 0x0005: /* mov.w Rm,@(R0,Rn) */ { TCGv addr = tcg_temp_new(); tcg_gen_add_i32(addr, REG(B11_8), REG(0)); tcg_gen_qemu_st16(REG(B7_4), addr, ctx->memidx); tcg_temp_free(addr); } return; case 0x0006: /* mov.l Rm,@(R0,Rn) */ { TCGv addr = tcg_temp_new(); tcg_gen_add_i32(addr, REG(B11_8), REG(0)); tcg_gen_qemu_st32(REG(B7_4), addr, ctx->memidx); tcg_temp_free(addr); } return; case 0x000c: /* mov.b @(R0,Rm),Rn */ { TCGv addr = tcg_temp_new(); tcg_gen_add_i32(addr, REG(B7_4), REG(0)); tcg_gen_qemu_ld8s(REG(B11_8), addr, ctx->memidx); tcg_temp_free(addr); } return; case 0x000d: /* mov.w @(R0,Rm),Rn */ { TCGv addr = tcg_temp_new(); tcg_gen_add_i32(addr, REG(B7_4), REG(0)); tcg_gen_qemu_ld16s(REG(B11_8), addr, ctx->memidx); tcg_temp_free(addr); } return; case 0x000e: /* mov.l @(R0,Rm),Rn */ { TCGv addr = tcg_temp_new(); tcg_gen_add_i32(addr, REG(B7_4), REG(0)); tcg_gen_qemu_ld32s(REG(B11_8), addr, ctx->memidx); tcg_temp_free(addr); } return; case 0x6008: /* swap.b Rm,Rn */ { TCGv high, low; high = tcg_temp_new(); tcg_gen_andi_i32(high, REG(B7_4), 0xffff0000); low = tcg_temp_new(); tcg_gen_ext16u_i32(low, REG(B7_4)); tcg_gen_bswap16_i32(low, low); tcg_gen_or_i32(REG(B11_8), high, low); tcg_temp_free(low); tcg_temp_free(high); } return; case 0x6009: /* swap.w Rm,Rn */ { TCGv high, low; high = tcg_temp_new(); tcg_gen_shli_i32(high, REG(B7_4), 16); low = tcg_temp_new(); tcg_gen_shri_i32(low, REG(B7_4), 16); tcg_gen_ext16u_i32(low, low); tcg_gen_or_i32(REG(B11_8), high, low); tcg_temp_free(low); tcg_temp_free(high); } return; case 0x200d: /* xtrct Rm,Rn */ { TCGv high, low; high = tcg_temp_new(); tcg_gen_shli_i32(high, REG(B7_4), 16); low = tcg_temp_new(); tcg_gen_shri_i32(low, REG(B11_8), 16); tcg_gen_ext16u_i32(low, low); tcg_gen_or_i32(REG(B11_8), high, low); tcg_temp_free(low); tcg_temp_free(high); } return; case 0x300c: /* add Rm,Rn */ tcg_gen_add_i32(REG(B11_8), REG(B11_8), REG(B7_4)); return; case 0x300e: /* addc Rm,Rn */ gen_helper_addc(REG(B11_8), REG(B7_4), REG(B11_8)); return; case 0x300f: /* addv Rm,Rn */ gen_helper_addv(REG(B11_8), REG(B7_4), REG(B11_8)); return; case 0x2009: /* and Rm,Rn */ tcg_gen_and_i32(REG(B11_8), REG(B11_8), REG(B7_4)); return; case 0x3000: /* cmp/eq Rm,Rn */ gen_cmp(TCG_COND_EQ, REG(B7_4), REG(B11_8)); return; case 0x3003: /* cmp/ge Rm,Rn */ gen_cmp(TCG_COND_GE, REG(B7_4), REG(B11_8)); return; case 0x3007: /* cmp/gt Rm,Rn */ gen_cmp(TCG_COND_GT, REG(B7_4), REG(B11_8)); return; case 0x3006: /* cmp/hi Rm,Rn */ gen_cmp(TCG_COND_GTU, REG(B7_4), REG(B11_8)); return; case 0x3002: /* cmp/hs Rm,Rn */ gen_cmp(TCG_COND_GEU, REG(B7_4), REG(B11_8)); return; case 0x200c: /* cmp/str Rm,Rn */ { int label1 = gen_new_label(); int label2 = gen_new_label(); TCGv cmp1 = tcg_temp_local_new(); TCGv cmp2 = tcg_temp_local_new(); tcg_gen_xor_i32(cmp1, REG(B7_4), REG(B11_8)); tcg_gen_andi_i32(cmp2, cmp1, 0xff000000); tcg_gen_brcondi_i32(TCG_COND_EQ, cmp2, 0, label1); tcg_gen_andi_i32(cmp2, cmp1, 0x00ff0000); tcg_gen_brcondi_i32(TCG_COND_EQ, cmp2, 0, label1); tcg_gen_andi_i32(cmp2, cmp1, 0x0000ff00); tcg_gen_brcondi_i32(TCG_COND_EQ, cmp2, 0, label1); tcg_gen_andi_i32(cmp2, cmp1, 0x000000ff); tcg_gen_brcondi_i32(TCG_COND_EQ, cmp2, 0, label1); tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T); tcg_gen_br(label2); gen_set_label(label1); tcg_gen_ori_i32(cpu_sr, cpu_sr, SR_T); gen_set_label(label2); tcg_temp_free(cmp2); tcg_temp_free(cmp1); } return; case 0x2007: /* div0s Rm,Rn */ { gen_copy_bit_i32(cpu_sr, 8, REG(B11_8), 31); /* SR_Q */ gen_copy_bit_i32(cpu_sr, 9, REG(B7_4), 31); /* SR_M */ TCGv val = tcg_temp_new(); tcg_gen_xor_i32(val, REG(B7_4), REG(B11_8)); gen_copy_bit_i32(cpu_sr, 0, val, 31); /* SR_T */ tcg_temp_free(val); } return; case 0x3004: /* div1 Rm,Rn */ gen_helper_div1(REG(B11_8), REG(B7_4), REG(B11_8)); return; case 0x300d: /* dmuls.l Rm,Rn */ { TCGv_i64 tmp1 = tcg_temp_new_i64(); TCGv_i64 tmp2 = tcg_temp_new_i64(); tcg_gen_ext_i32_i64(tmp1, REG(B7_4)); tcg_gen_ext_i32_i64(tmp2, REG(B11_8)); tcg_gen_mul_i64(tmp1, tmp1, tmp2); tcg_gen_trunc_i64_i32(cpu_macl, tmp1); tcg_gen_shri_i64(tmp1, tmp1, 32); tcg_gen_trunc_i64_i32(cpu_mach, tmp1); tcg_temp_free_i64(tmp2); tcg_temp_free_i64(tmp1); } return; case 0x3005: /* dmulu.l Rm,Rn */ { TCGv_i64 tmp1 = tcg_temp_new_i64(); TCGv_i64 tmp2 = tcg_temp_new_i64(); tcg_gen_extu_i32_i64(tmp1, REG(B7_4)); tcg_gen_extu_i32_i64(tmp2, REG(B11_8)); tcg_gen_mul_i64(tmp1, tmp1, tmp2); tcg_gen_trunc_i64_i32(cpu_macl, tmp1); tcg_gen_shri_i64(tmp1, tmp1, 32); tcg_gen_trunc_i64_i32(cpu_mach, tmp1); tcg_temp_free_i64(tmp2); tcg_temp_free_i64(tmp1); } return; case 0x600e: /* exts.b Rm,Rn */ tcg_gen_ext8s_i32(REG(B11_8), REG(B7_4)); return; case 0x600f: /* exts.w Rm,Rn */ tcg_gen_ext16s_i32(REG(B11_8), REG(B7_4)); return; case 0x600c: /* extu.b Rm,Rn */ tcg_gen_ext8u_i32(REG(B11_8), REG(B7_4)); return; case 0x600d: /* extu.w Rm,Rn */ tcg_gen_ext16u_i32(REG(B11_8), REG(B7_4)); return; case 0x000f: /* mac.l @Rm+,@Rn+ */ { TCGv arg0, arg1; arg0 = tcg_temp_new(); tcg_gen_qemu_ld32s(arg0, REG(B7_4), ctx->memidx); arg1 = tcg_temp_new(); tcg_gen_qemu_ld32s(arg1, REG(B11_8), ctx->memidx); gen_helper_macl(arg0, arg1); tcg_temp_free(arg1); tcg_temp_free(arg0); tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4); tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); } return; case 0x400f: /* mac.w @Rm+,@Rn+ */ { TCGv arg0, arg1; arg0 = tcg_temp_new(); tcg_gen_qemu_ld32s(arg0, REG(B7_4), ctx->memidx); arg1 = tcg_temp_new(); tcg_gen_qemu_ld32s(arg1, REG(B11_8), ctx->memidx); gen_helper_macw(arg0, arg1); tcg_temp_free(arg1); tcg_temp_free(arg0); tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 2); tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2); } return; case 0x0007: /* mul.l Rm,Rn */ tcg_gen_mul_i32(cpu_macl, REG(B7_4), REG(B11_8)); return; case 0x200f: /* muls.w Rm,Rn */ { TCGv arg0, arg1; arg0 = tcg_temp_new(); tcg_gen_ext16s_i32(arg0, REG(B7_4)); arg1 = tcg_temp_new(); tcg_gen_ext16s_i32(arg1, REG(B11_8)); tcg_gen_mul_i32(cpu_macl, arg0, arg1); tcg_temp_free(arg1); tcg_temp_free(arg0); } return; case 0x200e: /* mulu.w Rm,Rn */ { TCGv arg0, arg1; arg0 = tcg_temp_new(); tcg_gen_ext16u_i32(arg0, REG(B7_4)); arg1 = tcg_temp_new(); tcg_gen_ext16u_i32(arg1, REG(B11_8)); tcg_gen_mul_i32(cpu_macl, arg0, arg1); tcg_temp_free(arg1); tcg_temp_free(arg0); } return; case 0x600b: /* neg Rm,Rn */ tcg_gen_neg_i32(REG(B11_8), REG(B7_4)); return; case 0x600a: /* negc Rm,Rn */ gen_helper_negc(REG(B11_8), REG(B7_4)); return; case 0x6007: /* not Rm,Rn */ tcg_gen_not_i32(REG(B11_8), REG(B7_4)); return; case 0x200b: /* or Rm,Rn */ tcg_gen_or_i32(REG(B11_8), REG(B11_8), REG(B7_4)); return; case 0x400c: /* shad Rm,Rn */ { int label1 = gen_new_label(); int label2 = gen_new_label(); int label3 = gen_new_label(); int label4 = gen_new_label(); TCGv shift; tcg_gen_brcondi_i32(TCG_COND_LT, REG(B7_4), 0, label1); /* Rm positive, shift to the left */ shift = tcg_temp_new(); tcg_gen_andi_i32(shift, REG(B7_4), 0x1f); tcg_gen_shl_i32(REG(B11_8), REG(B11_8), shift); tcg_temp_free(shift); tcg_gen_br(label4); /* Rm negative, shift to the right */ gen_set_label(label1); shift = tcg_temp_new(); tcg_gen_andi_i32(shift, REG(B7_4), 0x1f); tcg_gen_brcondi_i32(TCG_COND_EQ, shift, 0, label2); tcg_gen_not_i32(shift, REG(B7_4)); tcg_gen_andi_i32(shift, shift, 0x1f); tcg_gen_addi_i32(shift, shift, 1); tcg_gen_sar_i32(REG(B11_8), REG(B11_8), shift); tcg_temp_free(shift); tcg_gen_br(label4); /* Rm = -32 */ gen_set_label(label2); tcg_gen_brcondi_i32(TCG_COND_LT, REG(B11_8), 0, label3); tcg_gen_movi_i32(REG(B11_8), 0); tcg_gen_br(label4); gen_set_label(label3); tcg_gen_movi_i32(REG(B11_8), 0xffffffff); gen_set_label(label4); } return; case 0x400d: /* shld Rm,Rn */ { int label1 = gen_new_label(); int label2 = gen_new_label(); int label3 = gen_new_label(); TCGv shift; tcg_gen_brcondi_i32(TCG_COND_LT, REG(B7_4), 0, label1); /* Rm positive, shift to the left */ shift = tcg_temp_new(); tcg_gen_andi_i32(shift, REG(B7_4), 0x1f); tcg_gen_shl_i32(REG(B11_8), REG(B11_8), shift); tcg_temp_free(shift); tcg_gen_br(label3); /* Rm negative, shift to the right */ gen_set_label(label1); shift = tcg_temp_new(); tcg_gen_andi_i32(shift, REG(B7_4), 0x1f); tcg_gen_brcondi_i32(TCG_COND_EQ, shift, 0, label2); tcg_gen_not_i32(shift, REG(B7_4)); tcg_gen_andi_i32(shift, shift, 0x1f); tcg_gen_addi_i32(shift, shift, 1); tcg_gen_shr_i32(REG(B11_8), REG(B11_8), shift); tcg_temp_free(shift); tcg_gen_br(label3); /* Rm = -32 */ gen_set_label(label2); tcg_gen_movi_i32(REG(B11_8), 0); gen_set_label(label3); } return; case 0x3008: /* sub Rm,Rn */ tcg_gen_sub_i32(REG(B11_8), REG(B11_8), REG(B7_4)); return; case 0x300a: /* subc Rm,Rn */ gen_helper_subc(REG(B11_8), REG(B7_4), REG(B11_8)); return; case 0x300b: /* subv Rm,Rn */ gen_helper_subv(REG(B11_8), REG(B7_4), REG(B11_8)); return; case 0x2008: /* tst Rm,Rn */ { TCGv val = tcg_temp_new(); tcg_gen_and_i32(val, REG(B7_4), REG(B11_8)); gen_cmp_imm(TCG_COND_EQ, val, 0); tcg_temp_free(val); } return; case 0x200a: /* xor Rm,Rn */ tcg_gen_xor_i32(REG(B11_8), REG(B11_8), REG(B7_4)); return; case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */ CHECK_FPU_ENABLED if (ctx->fpscr & FPSCR_SZ) { TCGv_i64 fp = tcg_temp_new_i64(); gen_load_fpr64(fp, XREG(B7_4)); gen_store_fpr64(fp, XREG(B11_8)); tcg_temp_free_i64(fp); } else { tcg_gen_mov_i32(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]); } return; case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */ CHECK_FPU_ENABLED if (ctx->fpscr & FPSCR_SZ) { TCGv addr_hi = tcg_temp_new(); int fr = XREG(B7_4); tcg_gen_addi_i32(addr_hi, REG(B11_8), 4); tcg_gen_qemu_st32(cpu_fregs[fr ], REG(B11_8), ctx->memidx); tcg_gen_qemu_st32(cpu_fregs[fr+1], addr_hi, ctx->memidx); tcg_temp_free(addr_hi); } else { tcg_gen_qemu_st32(cpu_fregs[FREG(B7_4)], REG(B11_8), ctx->memidx); } return; case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */ CHECK_FPU_ENABLED if (ctx->fpscr & FPSCR_SZ) { TCGv addr_hi = tcg_temp_new(); int fr = XREG(B11_8); tcg_gen_addi_i32(addr_hi, REG(B7_4), 4); tcg_gen_qemu_ld32u(cpu_fregs[fr ], REG(B7_4), ctx->memidx); tcg_gen_qemu_ld32u(cpu_fregs[fr+1], addr_hi, ctx->memidx); tcg_temp_free(addr_hi); } else { tcg_gen_qemu_ld32u(cpu_fregs[FREG(B11_8)], REG(B7_4), ctx->memidx); } return; case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */ CHECK_FPU_ENABLED if (ctx->fpscr & FPSCR_SZ) { TCGv addr_hi = tcg_temp_new(); int fr = XREG(B11_8); tcg_gen_addi_i32(addr_hi, REG(B7_4), 4); tcg_gen_qemu_ld32u(cpu_fregs[fr ], REG(B7_4), ctx->memidx); tcg_gen_qemu_ld32u(cpu_fregs[fr+1], addr_hi, ctx->memidx); tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 8); tcg_temp_free(addr_hi); } else { tcg_gen_qemu_ld32u(cpu_fregs[FREG(B11_8)], REG(B7_4), ctx->memidx); tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4); } return; case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */ CHECK_FPU_ENABLED if (ctx->fpscr & FPSCR_SZ) { TCGv addr = tcg_temp_new_i32(); int fr = XREG(B7_4); tcg_gen_subi_i32(addr, REG(B11_8), 4); tcg_gen_qemu_st32(cpu_fregs[fr+1], addr, ctx->memidx); tcg_gen_subi_i32(addr, addr, 4); tcg_gen_qemu_st32(cpu_fregs[fr ], addr, ctx->memidx); tcg_gen_mov_i32(REG(B11_8), addr); tcg_temp_free(addr); } else { TCGv addr; addr = tcg_temp_new_i32(); tcg_gen_subi_i32(addr, REG(B11_8), 4); tcg_gen_qemu_st32(cpu_fregs[FREG(B7_4)], addr, ctx->memidx); tcg_gen_mov_i32(REG(B11_8), addr); tcg_temp_free(addr); } return; case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */ CHECK_FPU_ENABLED { TCGv addr = tcg_temp_new_i32(); tcg_gen_add_i32(addr, REG(B7_4), REG(0)); if (ctx->fpscr & FPSCR_SZ) { int fr = XREG(B11_8); tcg_gen_qemu_ld32u(cpu_fregs[fr ], addr, ctx->memidx); tcg_gen_addi_i32(addr, addr, 4); tcg_gen_qemu_ld32u(cpu_fregs[fr+1], addr, ctx->memidx); } else { tcg_gen_qemu_ld32u(cpu_fregs[FREG(B11_8)], addr, ctx->memidx); } tcg_temp_free(addr); } return; case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */ CHECK_FPU_ENABLED { TCGv addr = tcg_temp_new(); tcg_gen_add_i32(addr, REG(B11_8), REG(0)); if (ctx->fpscr & FPSCR_SZ) { int fr = XREG(B7_4); tcg_gen_qemu_ld32u(cpu_fregs[fr ], addr, ctx->memidx); tcg_gen_addi_i32(addr, addr, 4); tcg_gen_qemu_ld32u(cpu_fregs[fr+1], addr, ctx->memidx); } else { tcg_gen_qemu_st32(cpu_fregs[FREG(B7_4)], addr, ctx->memidx); } tcg_temp_free(addr); } return; case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */ case 0xf001: /* fsub Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */ case 0xf002: /* fmul Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */ case 0xf003: /* fdiv Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */ case 0xf004: /* fcmp/eq Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */ case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */ { CHECK_FPU_ENABLED if (ctx->fpscr & FPSCR_PR) { TCGv_i64 fp0, fp1; if (ctx->opcode & 0x0110) break; /* illegal instruction */ fp0 = tcg_temp_new_i64(); fp1 = tcg_temp_new_i64(); gen_load_fpr64(fp0, DREG(B11_8)); gen_load_fpr64(fp1, DREG(B7_4)); switch (ctx->opcode & 0xf00f) { case 0xf000: /* fadd Rm,Rn */ gen_helper_fadd_DT(fp0, fp0, fp1); break; case 0xf001: /* fsub Rm,Rn */ gen_helper_fsub_DT(fp0, fp0, fp1); break; case 0xf002: /* fmul Rm,Rn */ gen_helper_fmul_DT(fp0, fp0, fp1); break; case 0xf003: /* fdiv Rm,Rn */ gen_helper_fdiv_DT(fp0, fp0, fp1); break; case 0xf004: /* fcmp/eq Rm,Rn */ gen_helper_fcmp_eq_DT(fp0, fp1); return; case 0xf005: /* fcmp/gt Rm,Rn */ gen_helper_fcmp_gt_DT(fp0, fp1); return; } gen_store_fpr64(fp0, DREG(B11_8)); tcg_temp_free_i64(fp0); tcg_temp_free_i64(fp1); } else { switch (ctx->opcode & 0xf00f) { case 0xf000: /* fadd Rm,Rn */ gen_helper_fadd_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]); break; case 0xf001: /* fsub Rm,Rn */ gen_helper_fsub_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]); break; case 0xf002: /* fmul Rm,Rn */ gen_helper_fmul_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]); break; case 0xf003: /* fdiv Rm,Rn */ gen_helper_fdiv_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]); break; case 0xf004: /* fcmp/eq Rm,Rn */ gen_helper_fcmp_eq_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]); return; case 0xf005: /* fcmp/gt Rm,Rn */ gen_helper_fcmp_gt_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]); return; } } } return; case 0xf00e: /* fmac FR0,RM,Rn */ { CHECK_FPU_ENABLED if (ctx->fpscr & FPSCR_PR) { break; /* illegal instruction */ } else { gen_helper_fmac_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(0)], cpu_fregs[FREG(B7_4)], cpu_fregs[FREG(B11_8)]); return; } } } switch (ctx->opcode & 0xff00) { case 0xc900: /* and #imm,R0 */ tcg_gen_andi_i32(REG(0), REG(0), B7_0); return; case 0xcd00: /* and.b #imm,@(R0,GBR) */ { TCGv addr, val; addr = tcg_temp_new(); tcg_gen_add_i32(addr, REG(0), cpu_gbr); val = tcg_temp_new(); tcg_gen_qemu_ld8u(val, addr, ctx->memidx); tcg_gen_andi_i32(val, val, B7_0); tcg_gen_qemu_st8(val, addr, ctx->memidx); tcg_temp_free(val); tcg_temp_free(addr); } return; case 0x8b00: /* bf label */ CHECK_NOT_DELAY_SLOT gen_conditional_jump(ctx, ctx->pc + 2, ctx->pc + 4 + B7_0s * 2); ctx->bstate = BS_BRANCH; return; case 0x8f00: /* bf/s label */ CHECK_NOT_DELAY_SLOT gen_branch_slot(ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2, 0); ctx->flags |= DELAY_SLOT_CONDITIONAL; return; case 0x8900: /* bt label */ CHECK_NOT_DELAY_SLOT gen_conditional_jump(ctx, ctx->pc + 4 + B7_0s * 2, ctx->pc + 2); ctx->bstate = BS_BRANCH; return; case 0x8d00: /* bt/s label */ CHECK_NOT_DELAY_SLOT gen_branch_slot(ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2, 1); ctx->flags |= DELAY_SLOT_CONDITIONAL; return; case 0x8800: /* cmp/eq #imm,R0 */ gen_cmp_imm(TCG_COND_EQ, REG(0), B7_0s); return; case 0xc400: /* mov.b @(disp,GBR),R0 */ { TCGv addr = tcg_temp_new(); tcg_gen_addi_i32(addr, cpu_gbr, B7_0); tcg_gen_qemu_ld8s(REG(0), addr, ctx->memidx); tcg_temp_free(addr); } return; case 0xc500: /* mov.w @(disp,GBR),R0 */ { TCGv addr = tcg_temp_new(); tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2); tcg_gen_qemu_ld16s(REG(0), addr, ctx->memidx); tcg_temp_free(addr); } return; case 0xc600: /* mov.l @(disp,GBR),R0 */ { TCGv addr = tcg_temp_new(); tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4); tcg_gen_qemu_ld32s(REG(0), addr, ctx->memidx); tcg_temp_free(addr); } return; case 0xc000: /* mov.b R0,@(disp,GBR) */ { TCGv addr = tcg_temp_new(); tcg_gen_addi_i32(addr, cpu_gbr, B7_0); tcg_gen_qemu_st8(REG(0), addr, ctx->memidx); tcg_temp_free(addr); } return; case 0xc100: /* mov.w R0,@(disp,GBR) */ { TCGv addr = tcg_temp_new(); tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2); tcg_gen_qemu_st16(REG(0), addr, ctx->memidx); tcg_temp_free(addr); } return; case 0xc200: /* mov.l R0,@(disp,GBR) */ { TCGv addr = tcg_temp_new(); tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4); tcg_gen_qemu_st32(REG(0), addr, ctx->memidx); tcg_temp_free(addr); } return; case 0x8000: /* mov.b R0,@(disp,Rn) */ { TCGv addr = tcg_temp_new(); tcg_gen_addi_i32(addr, REG(B7_4), B3_0); tcg_gen_qemu_st8(REG(0), addr, ctx->memidx); tcg_temp_free(addr); } return; case 0x8100: /* mov.w R0,@(disp,Rn) */ { TCGv addr = tcg_temp_new(); tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2); tcg_gen_qemu_st16(REG(0), addr, ctx->memidx); tcg_temp_free(addr); } return; case 0x8400: /* mov.b @(disp,Rn),R0 */ { TCGv addr = tcg_temp_new(); tcg_gen_addi_i32(addr, REG(B7_4), B3_0); tcg_gen_qemu_ld8s(REG(0), addr, ctx->memidx); tcg_temp_free(addr); } return; case 0x8500: /* mov.w @(disp,Rn),R0 */ { TCGv addr = tcg_temp_new(); tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2); tcg_gen_qemu_ld16s(REG(0), addr, ctx->memidx); tcg_temp_free(addr); } return; case 0xc700: /* mova @(disp,PC),R0 */ tcg_gen_movi_i32(REG(0), ((ctx->pc & 0xfffffffc) + 4 + B7_0 * 4) & ~3); return; case 0xcb00: /* or #imm,R0 */ tcg_gen_ori_i32(REG(0), REG(0), B7_0); return; case 0xcf00: /* or.b #imm,@(R0,GBR) */ { TCGv addr, val; addr = tcg_temp_new(); tcg_gen_add_i32(addr, REG(0), cpu_gbr); val = tcg_temp_new(); tcg_gen_qemu_ld8u(val, addr, ctx->memidx); tcg_gen_ori_i32(val, val, B7_0); tcg_gen_qemu_st8(val, addr, ctx->memidx); tcg_temp_free(val); tcg_temp_free(addr); } return; case 0xc300: /* trapa #imm */ { TCGv imm; CHECK_NOT_DELAY_SLOT tcg_gen_movi_i32(cpu_pc, ctx->pc); imm = tcg_const_i32(B7_0); gen_helper_trapa(imm); tcg_temp_free(imm); ctx->bstate = BS_BRANCH; } return; case 0xc800: /* tst #imm,R0 */ { TCGv val = tcg_temp_new(); tcg_gen_andi_i32(val, REG(0), B7_0); gen_cmp_imm(TCG_COND_EQ, val, 0); tcg_temp_free(val); } return; case 0xcc00: /* tst.b #imm,@(R0,GBR) */ { TCGv val = tcg_temp_new(); tcg_gen_add_i32(val, REG(0), cpu_gbr); tcg_gen_qemu_ld8u(val, val, ctx->memidx); tcg_gen_andi_i32(val, val, B7_0); gen_cmp_imm(TCG_COND_EQ, val, 0); tcg_temp_free(val); } return; case 0xca00: /* xor #imm,R0 */ tcg_gen_xori_i32(REG(0), REG(0), B7_0); return; case 0xce00: /* xor.b #imm,@(R0,GBR) */ { TCGv addr, val; addr = tcg_temp_new(); tcg_gen_add_i32(addr, REG(0), cpu_gbr); val = tcg_temp_new(); tcg_gen_qemu_ld8u(val, addr, ctx->memidx); tcg_gen_xori_i32(val, val, B7_0); tcg_gen_qemu_st8(val, addr, ctx->memidx); tcg_temp_free(val); tcg_temp_free(addr); } return; } switch (ctx->opcode & 0xf08f) { case 0x408e: /* ldc Rm,Rn_BANK */ CHECK_PRIVILEGED tcg_gen_mov_i32(ALTREG(B6_4), REG(B11_8)); return; case 0x4087: /* ldc.l @Rm+,Rn_BANK */ CHECK_PRIVILEGED tcg_gen_qemu_ld32s(ALTREG(B6_4), REG(B11_8), ctx->memidx); tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); return; case 0x0082: /* stc Rm_BANK,Rn */ CHECK_PRIVILEGED tcg_gen_mov_i32(REG(B11_8), ALTREG(B6_4)); return; case 0x4083: /* stc.l Rm_BANK,@-Rn */ CHECK_PRIVILEGED { TCGv addr = tcg_temp_new(); tcg_gen_subi_i32(addr, REG(B11_8), 4); tcg_gen_qemu_st32(ALTREG(B6_4), addr, ctx->memidx); tcg_gen_mov_i32(REG(B11_8), addr); tcg_temp_free(addr); } return; } switch (ctx->opcode & 0xf0ff) { case 0x0023: /* braf Rn */ CHECK_NOT_DELAY_SLOT tcg_gen_addi_i32(cpu_delayed_pc, REG(B11_8), ctx->pc + 4); ctx->flags |= DELAY_SLOT; ctx->delayed_pc = (uint32_t) - 1; return; case 0x0003: /* bsrf Rn */ CHECK_NOT_DELAY_SLOT tcg_gen_movi_i32(cpu_pr, ctx->pc + 4); tcg_gen_add_i32(cpu_delayed_pc, REG(B11_8), cpu_pr); ctx->flags |= DELAY_SLOT; ctx->delayed_pc = (uint32_t) - 1; return; case 0x4015: /* cmp/pl Rn */ gen_cmp_imm(TCG_COND_GT, REG(B11_8), 0); return; case 0x4011: /* cmp/pz Rn */ gen_cmp_imm(TCG_COND_GE, REG(B11_8), 0); return; case 0x4010: /* dt Rn */ tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 1); gen_cmp_imm(TCG_COND_EQ, REG(B11_8), 0); return; case 0x402b: /* jmp @Rn */ CHECK_NOT_DELAY_SLOT tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8)); ctx->flags |= DELAY_SLOT; ctx->delayed_pc = (uint32_t) - 1; return; case 0x400b: /* jsr @Rn */ CHECK_NOT_DELAY_SLOT tcg_gen_movi_i32(cpu_pr, ctx->pc + 4); tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8)); ctx->flags |= DELAY_SLOT; ctx->delayed_pc = (uint32_t) - 1; return; case 0x400e: /* ldc Rm,SR */ CHECK_PRIVILEGED tcg_gen_andi_i32(cpu_sr, REG(B11_8), 0x700083f3); ctx->bstate = BS_STOP; return; case 0x4007: /* ldc.l @Rm+,SR */ CHECK_PRIVILEGED { TCGv val = tcg_temp_new(); tcg_gen_qemu_ld32s(val, REG(B11_8), ctx->memidx); tcg_gen_andi_i32(cpu_sr, val, 0x700083f3); tcg_temp_free(val); tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); ctx->bstate = BS_STOP; } return; case 0x0002: /* stc SR,Rn */ CHECK_PRIVILEGED tcg_gen_mov_i32(REG(B11_8), cpu_sr); return; case 0x4003: /* stc SR,@-Rn */ CHECK_PRIVILEGED { TCGv addr = tcg_temp_new(); tcg_gen_subi_i32(addr, REG(B11_8), 4); tcg_gen_qemu_st32(cpu_sr, addr, ctx->memidx); tcg_gen_mov_i32(REG(B11_8), addr); tcg_temp_free(addr); } return; #define LD(reg,ldnum,ldpnum,prechk) \\ case ldnum: \\ prechk \\ tcg_gen_mov_i32 (cpu_##reg, REG(B11_8)); \\ return; \\ case ldpnum: \\ prechk \\ tcg_gen_qemu_ld32s (cpu_##reg, REG(B11_8), ctx->memidx); \\ tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); \\ return; #define ST(reg,stnum,stpnum,prechk) \\ case stnum: \\ prechk \\ tcg_gen_mov_i32 (REG(B11_8), cpu_##reg); \\ return; \\ case stpnum: \\ prechk \\ { \\ TCGv addr = tcg_temp_new(); \\ tcg_gen_subi_i32(addr, REG(B11_8), 4); \\ tcg_gen_qemu_st32 (cpu_##reg, addr, ctx->memidx); \\ tcg_gen_mov_i32(REG(B11_8), addr); \\ tcg_temp_free(addr); \\ } \\ return; #define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk) \\ LD(reg,ldnum,ldpnum,prechk) \\ ST(reg,stnum,stpnum,prechk) LDST(gbr, 0x401e, 0x4017, 0x0012, 0x4013, {}) LDST(vbr, 0x402e, 0x4027, 0x0022, 0x4023, CHECK_PRIVILEGED) LDST(ssr, 0x403e, 0x4037, 0x0032, 0x4033, CHECK_PRIVILEGED) LDST(spc, 0x404e, 0x4047, 0x0042, 0x4043, CHECK_PRIVILEGED) ST(sgr, 0x003a, 0x4032, CHECK_PRIVILEGED) LD(sgr, 0x403a, 0x4036, CHECK_PRIVILEGED if (!(ctx->features & SH_FEATURE_SH4A)) break;) LDST(dbr, 0x40fa, 0x40f6, 0x00fa, 0x40f2, CHECK_PRIVILEGED) LDST(mach, 0x400a, 0x4006, 0x000a, 0x4002, {}) LDST(macl, 0x401a, 0x4016, 0x001a, 0x4012, {}) LDST(pr, 0x402a, 0x4026, 0x002a, 0x4022, {}) LDST(fpul, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED}) case 0x406a: /* lds Rm,FPSCR */ CHECK_FPU_ENABLED gen_helper_ld_fpscr(REG(B11_8)); ctx->bstate = BS_STOP; return; case 0x4066: /* lds.l @Rm+,FPSCR */ CHECK_FPU_ENABLED { TCGv addr = tcg_temp_new(); tcg_gen_qemu_ld32s(addr, REG(B11_8), ctx->memidx); tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); gen_helper_ld_fpscr(addr); tcg_temp_free(addr); ctx->bstate = BS_STOP; } return; case 0x006a: /* sts FPSCR,Rn */ CHECK_FPU_ENABLED tcg_gen_andi_i32(REG(B11_8), cpu_fpscr, 0x003fffff); return; case 0x4062: /* sts FPSCR,@-Rn */ CHECK_FPU_ENABLED { TCGv addr, val; val = tcg_temp_new(); tcg_gen_andi_i32(val, cpu_fpscr, 0x003fffff); addr = tcg_temp_new(); tcg_gen_subi_i32(addr, REG(B11_8), 4); tcg_gen_qemu_st32(val, addr, ctx->memidx); tcg_gen_mov_i32(REG(B11_8), addr); tcg_temp_free(addr); tcg_temp_free(val); } return; case 0x00c3: /* movca.l R0,@Rm */ { TCGv val = tcg_temp_new(); tcg_gen_qemu_ld32u(val, REG(B11_8), ctx->memidx); gen_helper_movcal (REG(B11_8), val); tcg_gen_qemu_st32(REG(0), REG(B11_8), ctx->memidx); } ctx->has_movcal = 1; return; case 0x40a9: /* MOVUA.L @Rm,R0 (Rm) -> R0 Load non-boundary-aligned data */ tcg_gen_qemu_ld32u(REG(0), REG(B11_8), ctx->memidx); return; case 0x40e9: /* MOVUA.L @Rm+,R0 (Rm) -> R0, Rm + 4 -> Rm Load non-boundary-aligned data */ tcg_gen_qemu_ld32u(REG(0), REG(B11_8), ctx->memidx); tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); return; case 0x0029: /* movt Rn */ tcg_gen_andi_i32(REG(B11_8), cpu_sr, SR_T); return; case 0x0073: /* MOVCO.L LDST -> T If (T == 1) R0 -> (Rn) 0 -> LDST */ if (ctx->features & SH_FEATURE_SH4A) { int label = gen_new_label(); gen_clr_t(); tcg_gen_or_i32(cpu_sr, cpu_sr, cpu_ldst); tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ldst, 0, label); tcg_gen_qemu_st32(REG(0), REG(B11_8), ctx->memidx); gen_set_label(label); tcg_gen_movi_i32(cpu_ldst, 0); return; } else break; case 0x0063: /* MOVLI.L @Rm,R0 1 -> LDST (Rm) -> R0 When interrupt/exception occurred 0 -> LDST */ if (ctx->features & SH_FEATURE_SH4A) { tcg_gen_movi_i32(cpu_ldst, 0); tcg_gen_qemu_ld32s(REG(0), REG(B11_8), ctx->memidx); tcg_gen_movi_i32(cpu_ldst, 1); return; } else break; case 0x0093: /* ocbi @Rn */ { gen_helper_ocbi (REG(B11_8)); } return; case 0x00a3: /* ocbp @Rn */ { TCGv dummy = tcg_temp_new(); tcg_gen_qemu_ld32s(dummy, REG(B11_8), ctx->memidx); tcg_temp_free(dummy); } return; case 0x00b3: /* ocbwb @Rn */ { TCGv dummy = tcg_temp_new(); tcg_gen_qemu_ld32s(dummy, REG(B11_8), ctx->memidx); tcg_temp_free(dummy); } return; case 0x0083: /* pref @Rn */ return; case 0x00d3: /* prefi @Rn */ if (ctx->features & SH_FEATURE_SH4A) return; else break; case 0x00e3: /* icbi @Rn */ if (ctx->features & SH_FEATURE_SH4A) return; else break; case 0x00ab: /* synco */ if (ctx->features & SH_FEATURE_SH4A) return; else break; case 0x4024: /* rotcl Rn */ { TCGv tmp = tcg_temp_new(); tcg_gen_mov_i32(tmp, cpu_sr); gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 31); tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1); gen_copy_bit_i32(REG(B11_8), 0, tmp, 0); tcg_temp_free(tmp); } return; case 0x4025: /* rotcr Rn */ { TCGv tmp = tcg_temp_new(); tcg_gen_mov_i32(tmp, cpu_sr); gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0); tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1); gen_copy_bit_i32(REG(B11_8), 31, tmp, 0); tcg_temp_free(tmp); } return; case 0x4004: /* rotl Rn */ gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 31); tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1); gen_copy_bit_i32(REG(B11_8), 0, cpu_sr, 0); return; case 0x4005: /* rotr Rn */ gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0); tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1); gen_copy_bit_i32(REG(B11_8), 31, cpu_sr, 0); return; case 0x4000: /* shll Rn */ case 0x4020: /* shal Rn */ gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 31); tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1); return; case 0x4021: /* shar Rn */ gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0); tcg_gen_sari_i32(REG(B11_8), REG(B11_8), 1); return; case 0x4001: /* shlr Rn */ gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0); tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1); return; case 0x4008: /* shll2 Rn */ tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 2); return; case 0x4018: /* shll8 Rn */ tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 8); return; case 0x4028: /* shll16 Rn */ tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 16); return; case 0x4009: /* shlr2 Rn */ tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 2); return; case 0x4019: /* shlr8 Rn */ tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 8); return; case 0x4029: /* shlr16 Rn */ tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 16); return; case 0x401b: /* tas.b @Rn */ { TCGv addr, val; addr = tcg_temp_local_new(); tcg_gen_mov_i32(addr, REG(B11_8)); val = tcg_temp_local_new(); tcg_gen_qemu_ld8u(val, addr, ctx->memidx); gen_cmp_imm(TCG_COND_EQ, val, 0); tcg_gen_ori_i32(val, val, 0x80); tcg_gen_qemu_st8(val, addr, ctx->memidx); tcg_temp_free(val); tcg_temp_free(addr); } return; case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */ CHECK_FPU_ENABLED tcg_gen_mov_i32(cpu_fregs[FREG(B11_8)], cpu_fpul); return; case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */ CHECK_FPU_ENABLED tcg_gen_mov_i32(cpu_fpul, cpu_fregs[FREG(B11_8)]); return; case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */ CHECK_FPU_ENABLED if (ctx->fpscr & FPSCR_PR) { TCGv_i64 fp; if (ctx->opcode & 0x0100) break; /* illegal instruction */ fp = tcg_temp_new_i64(); gen_helper_float_DT(fp, cpu_fpul); gen_store_fpr64(fp, DREG(B11_8)); tcg_temp_free_i64(fp); } else { gen_helper_float_FT(cpu_fregs[FREG(B11_8)], cpu_fpul); } return; case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */ CHECK_FPU_ENABLED if (ctx->fpscr & FPSCR_PR) { TCGv_i64 fp; if (ctx->opcode & 0x0100) break; /* illegal instruction */ fp = tcg_temp_new_i64(); gen_load_fpr64(fp, DREG(B11_8)); gen_helper_ftrc_DT(cpu_fpul, fp); tcg_temp_free_i64(fp); } else { gen_helper_ftrc_FT(cpu_fpul, cpu_fregs[FREG(B11_8)]); } return; case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */ CHECK_FPU_ENABLED { gen_helper_fneg_T(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]); } return; case 0xf05d: /* fabs FRn/DRn */ CHECK_FPU_ENABLED if (ctx->fpscr & FPSCR_PR) { if (ctx->opcode & 0x0100) break; /* illegal instruction */ TCGv_i64 fp = tcg_temp_new_i64(); gen_load_fpr64(fp, DREG(B11_8)); gen_helper_fabs_DT(fp, fp); gen_store_fpr64(fp, DREG(B11_8)); tcg_temp_free_i64(fp); } else { gen_helper_fabs_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]); } return; case 0xf06d: /* fsqrt FRn */ CHECK_FPU_ENABLED if (ctx->fpscr & FPSCR_PR) { if (ctx->opcode & 0x0100) break; /* illegal instruction */ TCGv_i64 fp = tcg_temp_new_i64(); gen_load_fpr64(fp, DREG(B11_8)); gen_helper_fsqrt_DT(fp, fp); gen_store_fpr64(fp, DREG(B11_8)); tcg_temp_free_i64(fp); } else { gen_helper_fsqrt_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]); } return; case 0xf07d: /* fsrra FRn */ CHECK_FPU_ENABLED break; case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */ CHECK_FPU_ENABLED if (!(ctx->fpscr & FPSCR_PR)) { tcg_gen_movi_i32(cpu_fregs[FREG(B11_8)], 0); } return; case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */ CHECK_FPU_ENABLED if (!(ctx->fpscr & FPSCR_PR)) { tcg_gen_movi_i32(cpu_fregs[FREG(B11_8)], 0x3f800000); } return; case 0xf0ad: /* fcnvsd FPUL,DRn */ CHECK_FPU_ENABLED { TCGv_i64 fp = tcg_temp_new_i64(); gen_helper_fcnvsd_FT_DT(fp, cpu_fpul); gen_store_fpr64(fp, DREG(B11_8)); tcg_temp_free_i64(fp); } return; case 0xf0bd: /* fcnvds DRn,FPUL */ CHECK_FPU_ENABLED { TCGv_i64 fp = tcg_temp_new_i64(); gen_load_fpr64(fp, DREG(B11_8)); gen_helper_fcnvds_DT_FT(cpu_fpul, fp); tcg_temp_free_i64(fp); } return; } #if 0 fprintf(stderr, \"unknown instruction 0x%04x at pc 0x%08x\\n\", ctx->opcode, ctx->pc); fflush(stderr); #endif gen_helper_raise_illegal_instruction(); ctx->bstate = BS_EXCP; }"} {"target": 1, "idx": 20471, "func": "void wm8750_set_bclk_in(void *opaque, int hz) { struct wm8750_s *s = (struct wm8750_s *) opaque; s->ext_adc_hz = hz; s->ext_dac_hz = hz; wm8750_clk_update(s, 1); }"} {"target": 1, "idx": 20480, "func": "static void pmac_dma_write(BlockBackend *blk, int64_t sector_num, int nb_sectors, void (*cb)(void *opaque, int ret), void *opaque) { DBDMA_io *io = opaque; MACIOIDEState *m = io->opaque; IDEState *s = idebus_active_if(&m->bus); dma_addr_t dma_addr, dma_len; void *mem; int nsector, remainder; int extra = 0; qemu_iovec_destroy(&io->iov); qemu_iovec_init(&io->iov, io->len / MACIO_PAGE_SIZE + 1); if (io->remainder_len > 0) { /* Return remainder of request */ int transfer = MIN(io->remainder_len, io->len); MACIO_DPRINTF(\"--- processing write remainder %x\\n\", transfer); cpu_physical_memory_read(io->addr, &io->remainder + (0x200 - transfer), transfer); io->remainder_len -= transfer; io->len -= transfer; io->addr += transfer; s->io_buffer_index += transfer; s->io_buffer_size -= transfer; if (io->remainder_len != 0) { /* Still waiting for remainder */ return; } MACIO_DPRINTF(\"--> prepending bounce buffer with size 0x200\\n\"); /* Sector transfer complete - prepend to request */ qemu_iovec_add(&io->iov, &io->remainder, 0x200); extra = 1; } if (s->drive_kind == IDE_CD) { sector_num = (int64_t)(s->lba << 2) + (s->io_buffer_index >> 9); } else { sector_num = ide_get_sector(s) + (s->io_buffer_index >> 9); } nsector = (io->len >> 9); remainder = io->len - (nsector << 9); MACIO_DPRINTF(\"--- DMA write transfer - addr: %\" HWADDR_PRIx \" len: %x\\n\", io->addr, io->len); MACIO_DPRINTF(\"xxx remainder: %x\\n\", remainder); MACIO_DPRINTF(\"xxx sector_num: %\"PRIx64\" nsector: %x\\n\", sector_num, nsector); dma_addr = io->addr; dma_len = io->len; mem = dma_memory_map(&address_space_memory, dma_addr, &dma_len, DMA_DIRECTION_TO_DEVICE); if (!remainder) { MACIO_DPRINTF(\"--- DMA write aligned - addr: %\" HWADDR_PRIx \" len: %x\\n\", io->addr, io->len); qemu_iovec_add(&io->iov, mem, io->len); } else { /* Write up to last complete sector */ MACIO_DPRINTF(\"--- DMA write unaligned - addr: %\" HWADDR_PRIx \" len: %x\\n\", io->addr, (nsector << 9)); qemu_iovec_add(&io->iov, mem, (nsector << 9)); MACIO_DPRINTF(\"--- DMA write read - bounce addr: %p \" \"remainder_len: %x\\n\", &io->remainder, remainder); cpu_physical_memory_read(io->addr + (nsector << 9), &io->remainder, remainder); io->remainder_len = 0x200 - remainder; MACIO_DPRINTF(\"xxx remainder_len: %x\\n\", io->remainder_len); } s->io_buffer_size -= ((nsector + extra) << 9); s->io_buffer_index += ((nsector + extra) << 9); io->len = 0; MACIO_DPRINTF(\"--- Block write transfer - sector_num: %\"PRIx64\" \" \"nsector: %x\\n\", sector_num, nsector + extra); m->aiocb = blk_aio_writev(blk, sector_num, &io->iov, nsector + extra, cb, io); }"} {"target": 0, "idx": 20485, "func": "static void bdrv_delete(BlockDriverState *bs) { assert(!bs->job); assert(bdrv_op_blocker_is_empty(bs)); assert(!bs->refcnt); assert(QLIST_EMPTY(&bs->dirty_bitmaps)); bdrv_close(bs); /* remove from list, if necessary */ bdrv_make_anon(bs); g_free(bs); }"} {"target": 0, "idx": 20495, "func": "static void vtd_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n) { VTDAddressSpace *vtd_as = container_of(iommu_mr, VTDAddressSpace, iommu); IntelIOMMUState *s = vtd_as->iommu_state; uint8_t bus_n = pci_bus_num(vtd_as->bus); VTDContextEntry ce; /* * The replay can be triggered by either a invalidation or a newly * created entry. No matter what, we release existing mappings * (it means flushing caches for UNMAP-only registers). */ vtd_address_space_unmap(vtd_as, n); if (vtd_dev_to_context_entry(s, bus_n, vtd_as->devfn, &ce) == 0) { trace_vtd_replay_ce_valid(bus_n, PCI_SLOT(vtd_as->devfn), PCI_FUNC(vtd_as->devfn), VTD_CONTEXT_ENTRY_DID(ce.hi), ce.hi, ce.lo); vtd_page_walk(&ce, 0, ~0ULL, vtd_replay_hook, (void *)n, false); } else { trace_vtd_replay_ce_invalid(bus_n, PCI_SLOT(vtd_as->devfn), PCI_FUNC(vtd_as->devfn)); } return; }"} {"target": 0, "idx": 20507, "func": "static inline int sd_wp_addr(SDState *sd, uint32_t addr) { return sd->wp_groups[addr >> (HWBLOCK_SHIFT + SECTOR_SHIFT + WPGROUP_SHIFT)]; }"} {"target": 0, "idx": 20518, "func": "static void console_handle_escape(QemuConsole *s) { int i; for (i=0; inb_esc_params; i++) { switch (s->esc_params[i]) { case 0: /* reset all console attributes to default */ s->t_attrib = s->t_attrib_default; break; case 1: s->t_attrib.bold = 1; break; case 4: s->t_attrib.uline = 1; break; case 5: s->t_attrib.blink = 1; break; case 7: s->t_attrib.invers = 1; break; case 8: s->t_attrib.unvisible = 1; break; case 22: s->t_attrib.bold = 0; break; case 24: s->t_attrib.uline = 0; break; case 25: s->t_attrib.blink = 0; break; case 27: s->t_attrib.invers = 0; break; case 28: s->t_attrib.unvisible = 0; break; /* set foreground color */ case 30: s->t_attrib.fgcol=COLOR_BLACK; break; case 31: s->t_attrib.fgcol=COLOR_RED; break; case 32: s->t_attrib.fgcol=COLOR_GREEN; break; case 33: s->t_attrib.fgcol=COLOR_YELLOW; break; case 34: s->t_attrib.fgcol=COLOR_BLUE; break; case 35: s->t_attrib.fgcol=COLOR_MAGENTA; break; case 36: s->t_attrib.fgcol=COLOR_CYAN; break; case 37: s->t_attrib.fgcol=COLOR_WHITE; break; /* set background color */ case 40: s->t_attrib.bgcol=COLOR_BLACK; break; case 41: s->t_attrib.bgcol=COLOR_RED; break; case 42: s->t_attrib.bgcol=COLOR_GREEN; break; case 43: s->t_attrib.bgcol=COLOR_YELLOW; break; case 44: s->t_attrib.bgcol=COLOR_BLUE; break; case 45: s->t_attrib.bgcol=COLOR_MAGENTA; break; case 46: s->t_attrib.bgcol=COLOR_CYAN; break; case 47: s->t_attrib.bgcol=COLOR_WHITE; break; } } }"} {"target": 0, "idx": 20520, "func": "static void write_vec_element(DisasContext *s, TCGv_i64 tcg_src, int destidx, int element, TCGMemOp memop) { int vect_off = vec_reg_offset(destidx, element, memop & MO_SIZE); switch (memop) { case MO_8: tcg_gen_st8_i64(tcg_src, cpu_env, vect_off); break; case MO_16: tcg_gen_st16_i64(tcg_src, cpu_env, vect_off); break; case MO_32: tcg_gen_st32_i64(tcg_src, cpu_env, vect_off); break; case MO_64: tcg_gen_st_i64(tcg_src, cpu_env, vect_off); break; default: g_assert_not_reached(); } }"} {"target": 0, "idx": 20531, "func": "static always_inline void gen_ext_h(void (*tcg_gen_ext_i64)(TCGv t0, TCGv t1), int ra, int rb, int rc, int islit, uint8_t lit) { if (unlikely(rc == 31)) return; if (ra != 31) { if (islit) { if (lit != 0) tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], 64 - ((lit & 7) * 8)); else tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[ra]); } else { TCGv tmp1, tmp2; tmp1 = tcg_temp_new(TCG_TYPE_I64); tcg_gen_andi_i64(tmp1, cpu_ir[rb], 7); tcg_gen_shli_i64(tmp1, tmp1, 3); tmp2 = tcg_const_i64(64); tcg_gen_sub_i64(tmp1, tmp2, tmp1); tcg_temp_free(tmp2); tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], tmp1); tcg_temp_free(tmp1); } if (tcg_gen_ext_i64) tcg_gen_ext_i64(cpu_ir[rc], cpu_ir[rc]); } else tcg_gen_movi_i64(cpu_ir[rc], 0); }"} {"target": 0, "idx": 20534, "func": "void ff_h264_init_dequant_tables(H264Context *h) { int i, x; init_dequant4_coeff_table(h); if (h->pps.transform_8x8_mode) init_dequant8_coeff_table(h); if (h->sps.transform_bypass) { for (i = 0; i < 6; i++) for (x = 0; x < 16; x++) h->dequant4_coeff[i][0][x] = 1 << 6; if (h->pps.transform_8x8_mode) for (i = 0; i < 6; i++) for (x = 0; x < 64; x++) h->dequant8_coeff[i][0][x] = 1 << 6; } }"} {"target": 0, "idx": 20541, "func": "static av_cold int g722_encode_init(AVCodecContext * avctx) { G722Context *c = avctx->priv_data; int ret; if (avctx->channels != 1) { av_log(avctx, AV_LOG_ERROR, \"Only mono tracks are allowed.\\n\"); return AVERROR_INVALIDDATA; } c->band[0].scale_factor = 8; c->band[1].scale_factor = 2; c->prev_samples_pos = 22; if (avctx->trellis) { int frontier = 1 << avctx->trellis; int max_paths = frontier * FREEZE_INTERVAL; int i; for (i = 0; i < 2; i++) { c->paths[i] = av_mallocz(max_paths * sizeof(**c->paths)); c->node_buf[i] = av_mallocz(2 * frontier * sizeof(**c->node_buf)); c->nodep_buf[i] = av_mallocz(2 * frontier * sizeof(**c->nodep_buf)); if (!c->paths[i] || !c->node_buf[i] || !c->nodep_buf[i]) { ret = AVERROR(ENOMEM); goto error; } } } if (avctx->frame_size) { /* validate frame size */ if (avctx->frame_size & 1 || avctx->frame_size > MAX_FRAME_SIZE) { int new_frame_size; if (avctx->frame_size == 1) new_frame_size = 2; else if (avctx->frame_size > MAX_FRAME_SIZE) new_frame_size = MAX_FRAME_SIZE; else new_frame_size = avctx->frame_size - 1; av_log(avctx, AV_LOG_WARNING, \"Requested frame size is not \" \"allowed. Using %d instead of %d\\n\", new_frame_size, avctx->frame_size); avctx->frame_size = new_frame_size; } } else { /* This is arbitrary. We use 320 because it's 20ms @ 16kHz, which is a common packet size for VoIP applications */ avctx->frame_size = 320; } avctx->delay = 22; if (avctx->trellis) { /* validate trellis */ if (avctx->trellis < MIN_TRELLIS || avctx->trellis > MAX_TRELLIS) { int new_trellis = av_clip(avctx->trellis, MIN_TRELLIS, MAX_TRELLIS); av_log(avctx, AV_LOG_WARNING, \"Requested trellis value is not \" \"allowed. Using %d instead of %d\\n\", new_trellis, avctx->trellis); avctx->trellis = new_trellis; } } return 0; error: g722_encode_close(avctx); return ret; }"} {"target": 0, "idx": 20548, "func": "static uint64_t lsi_mmio_read(void *opaque, target_phys_addr_t addr, unsigned size) { LSIState *s = opaque; return lsi_reg_readb(s, addr & 0xff); }"} {"target": 0, "idx": 20549, "func": "static int cryptodev_builtin_create_cipher_session( CryptoDevBackendBuiltin *builtin, CryptoDevBackendSymSessionInfo *sess_info, Error **errp) { int algo; int mode; QCryptoCipher *cipher; int index; CryptoDevBackendBuiltinSession *sess; if (sess_info->op_type != VIRTIO_CRYPTO_SYM_OP_CIPHER) { error_setg(errp, \"Unsupported optype :%u\", sess_info->op_type); return -1; } index = cryptodev_builtin_get_unused_session_index(builtin); if (index < 0) { error_setg(errp, \"Total number of sessions created exceeds %u\", MAX_NUM_SESSIONS); return -1; } switch (sess_info->cipher_alg) { case VIRTIO_CRYPTO_CIPHER_AES_ECB: algo = cryptodev_builtin_get_aes_algo(sess_info->key_len, errp); if (algo < 0) { return -1; } mode = QCRYPTO_CIPHER_MODE_ECB; break; case VIRTIO_CRYPTO_CIPHER_AES_CBC: algo = cryptodev_builtin_get_aes_algo(sess_info->key_len, errp); if (algo < 0) { return -1; } mode = QCRYPTO_CIPHER_MODE_CBC; break; case VIRTIO_CRYPTO_CIPHER_AES_CTR: algo = cryptodev_builtin_get_aes_algo(sess_info->key_len, errp); if (algo < 0) { return -1; } mode = QCRYPTO_CIPHER_MODE_CTR; break; case VIRTIO_CRYPTO_CIPHER_DES_ECB: algo = QCRYPTO_CIPHER_ALG_DES_RFB; mode = QCRYPTO_CIPHER_MODE_ECB; break; default: error_setg(errp, \"Unsupported cipher alg :%u\", sess_info->cipher_alg); return -1; } cipher = qcrypto_cipher_new(algo, mode, sess_info->cipher_key, sess_info->key_len, errp); if (!cipher) { return -1; } sess = g_new0(CryptoDevBackendBuiltinSession, 1); sess->cipher = cipher; sess->direction = sess_info->direction; sess->type = sess_info->op_type; builtin->sessions[index] = sess; return index; }"} {"target": 0, "idx": 20553, "func": "envlist_free(envlist_t *envlist) { struct envlist_entry *entry; assert(envlist != NULL); while (envlist->el_entries.lh_first != NULL) { entry = envlist->el_entries.lh_first; LIST_REMOVE(entry, ev_link); free((char *)entry->ev_var); free(entry); } free(envlist); }"} {"target": 0, "idx": 20562, "func": "static void test_hba_enable(void) { AHCIQState *ahci; ahci = ahci_boot(); ahci_pci_enable(ahci); ahci_hba_enable(ahci); ahci_shutdown(ahci); }"} {"target": 1, "idx": 20576, "func": "static int writev_f(int argc, char **argv) { struct timeval t1, t2; int Cflag = 0, qflag = 0; int c, cnt; char *buf; int64_t offset; /* Some compilers get confused and warn if this is not initialized. */ int total = 0; int nr_iov; int pattern = 0xcd; QEMUIOVector qiov; while ((c = getopt(argc, argv, \"CqP:\")) != EOF) { switch (c) { case 'C': Cflag = 1; break; case 'q': qflag = 1; break; case 'P': pattern = parse_pattern(optarg); if (pattern < 0) { return 0; } break; default: return command_usage(&writev_cmd); } } if (optind > argc - 2) { return command_usage(&writev_cmd); } offset = cvtnum(argv[optind]); if (offset < 0) { printf(\"non-numeric length argument -- %s\\n\", argv[optind]); return 0; } optind++; if (offset & 0x1ff) { printf(\"offset %\" PRId64 \" is not sector aligned\\n\", offset); return 0; } nr_iov = argc - optind; buf = create_iovec(&qiov, &argv[optind], nr_iov, pattern); if (buf == NULL) { return 0; } gettimeofday(&t1, NULL); cnt = do_aio_writev(&qiov, offset, &total); gettimeofday(&t2, NULL); if (cnt < 0) { printf(\"writev failed: %s\\n\", strerror(-cnt)); goto out; } if (qflag) { goto out; } /* Finally, report back -- -C gives a parsable format */ t2 = tsub(t2, t1); print_report(\"wrote\", &t2, offset, qiov.size, total, cnt, Cflag); out: qemu_io_free(buf); return 0; }"} {"target": 0, "idx": 20581, "func": "static void RENAME(chrRangeToJpeg)(int16_t *dst, int width) { int i; for (i = 0; i < width; i++) { dst[i ] = (FFMIN(dst[i ],30775)*4663 - 9289992)>>12; //-264 dst[i+VOFW] = (FFMIN(dst[i+VOFW],30775)*4663 - 9289992)>>12; //-264 } }"} {"target": 1, "idx": 20600, "func": "static int decode_pce(AVCodecContext *avctx, MPEG4AudioConfig *m4ac, enum ChannelPosition new_che_pos[4][MAX_ELEM_ID], GetBitContext *gb) { int num_front, num_side, num_back, num_lfe, num_assoc_data, num_cc, sampling_index; int comment_len; skip_bits(gb, 2); // object_type sampling_index = get_bits(gb, 4); if (m4ac->sampling_index != sampling_index) av_log(avctx, AV_LOG_WARNING, \"Sample rate index in program config element does not match the sample rate index configured by the container.\\n\"); num_front = get_bits(gb, 4); num_side = get_bits(gb, 4); num_back = get_bits(gb, 4); num_lfe = get_bits(gb, 2); num_assoc_data = get_bits(gb, 3); num_cc = get_bits(gb, 4); if (get_bits1(gb)) skip_bits(gb, 4); // mono_mixdown_tag if (get_bits1(gb)) skip_bits(gb, 4); // stereo_mixdown_tag if (get_bits1(gb)) skip_bits(gb, 3); // mixdown_coeff_index and pseudo_surround decode_channel_map(new_che_pos[TYPE_CPE], new_che_pos[TYPE_SCE], AAC_CHANNEL_FRONT, gb, num_front); decode_channel_map(new_che_pos[TYPE_CPE], new_che_pos[TYPE_SCE], AAC_CHANNEL_SIDE, gb, num_side ); decode_channel_map(new_che_pos[TYPE_CPE], new_che_pos[TYPE_SCE], AAC_CHANNEL_BACK, gb, num_back ); decode_channel_map(NULL, new_che_pos[TYPE_LFE], AAC_CHANNEL_LFE, gb, num_lfe ); skip_bits_long(gb, 4 * num_assoc_data); decode_channel_map(new_che_pos[TYPE_CCE], new_che_pos[TYPE_CCE], AAC_CHANNEL_CC, gb, num_cc ); align_get_bits(gb); /* comment field, first byte is length */ comment_len = get_bits(gb, 8) * 8; if (get_bits_left(gb) < comment_len) { skip_bits_long(gb, comment_len); return 0;"} {"target": 1, "idx": 20606, "func": "static int ws_snd_decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr, AVPacket *avpkt) { WSSndContext *s = avctx->priv_data; const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; int in_size, out_size, ret; int sample = 128; uint8_t *samples; uint8_t *samples_end; if (!buf_size) return 0; if (buf_size < 4) { av_log(avctx, AV_LOG_ERROR, \"packet is too small\\n\"); return AVERROR(EINVAL); } out_size = AV_RL16(&buf[0]); in_size = AV_RL16(&buf[2]); buf += 4; if (in_size > buf_size) { av_log(avctx, AV_LOG_ERROR, \"Frame data is larger than input buffer\\n\"); return -1; } /* get output buffer */ s->frame.nb_samples = out_size; if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) { av_log(avctx, AV_LOG_ERROR, \"get_buffer() failed\\n\"); return ret; } samples = s->frame.data[0]; samples_end = samples + out_size; if (in_size == out_size) { memcpy(samples, buf, out_size); *got_frame_ptr = 1; *(AVFrame *)data = s->frame; return buf_size; } while (samples < samples_end && buf - avpkt->data < buf_size) { int code, smp, size; uint8_t count; code = *buf >> 6; count = *buf & 0x3F; buf++; /* make sure we don't write past the output buffer */ switch (code) { case 0: smp = 4; break; case 1: smp = 2; break; case 2: smp = (count & 0x20) ? 1 : count + 1; break; default: smp = count + 1; break; } if (samples_end - samples < smp) break; /* make sure we don't read past the input buffer */ size = ((code == 2 && (count & 0x20)) || code == 3) ? 0 : count + 1; if ((buf - avpkt->data) + size > buf_size) break; switch (code) { case 0: /* ADPCM 2-bit */ for (count++; count > 0; count--) { code = *buf++; sample += ( code & 0x3) - 2; sample = av_clip_uint8(sample); *samples++ = sample; sample += ((code >> 2) & 0x3) - 2; sample = av_clip_uint8(sample); *samples++ = sample; sample += ((code >> 4) & 0x3) - 2; sample = av_clip_uint8(sample); *samples++ = sample; sample += (code >> 6) - 2; sample = av_clip_uint8(sample); *samples++ = sample; } break; case 1: /* ADPCM 4-bit */ for (count++; count > 0; count--) { code = *buf++; sample += ws_adpcm_4bit[code & 0xF]; sample = av_clip_uint8(sample); *samples++ = sample; sample += ws_adpcm_4bit[code >> 4]; sample = av_clip_uint8(sample); *samples++ = sample; } break; case 2: /* no compression */ if (count & 0x20) { /* big delta */ int8_t t; t = count; t <<= 3; sample += t >> 3; sample = av_clip_uint8(sample); *samples++ = sample; } else { /* copy */ memcpy(samples, buf, smp); samples += smp; buf += smp; sample = buf[-1]; } break; default: /* run */ memset(samples, sample, smp); samples += smp; } } s->frame.nb_samples = samples - s->frame.data[0]; *got_frame_ptr = 1; *(AVFrame *)data = s->frame; return buf_size; }"} {"target": 1, "idx": 20613, "func": "static av_always_inline void hyscale(SwsContext *c, int16_t *dst, int dstWidth, const uint8_t *src_in[4], int srcW, int xInc, const int16_t *hLumFilter, const int16_t *hLumFilterPos, int hLumFilterSize, uint8_t *formatConvBuffer, uint32_t *pal, int isAlpha) { void (*toYV12)(uint8_t *, const uint8_t *, int, uint32_t *) = isAlpha ? c->alpToYV12 : c->lumToYV12; void (*convertRange)(int16_t *, int) = isAlpha ? NULL : c->lumConvertRange; const uint8_t *src = src_in[isAlpha ? 3 : 0]; if (toYV12) { toYV12(formatConvBuffer, src, srcW, pal); src= formatConvBuffer; } else if (c->readLumPlanar && !isAlpha) { c->readLumPlanar(formatConvBuffer, src_in, srcW); src = formatConvBuffer; } if (!c->hyscale_fast) { c->hyScale(c, dst, dstWidth, src, hLumFilter, hLumFilterPos, hLumFilterSize); } else { // fast bilinear upscale / crap downscale c->hyscale_fast(c, dst, dstWidth, src, srcW, xInc); } if (convertRange) convertRange(dst, dstWidth); }"} {"target": 0, "idx": 20617, "func": "static av_always_inline av_flatten void h264_loop_filter_chroma_c(uint8_t *pix, int xstride, int ystride, int alpha, int beta, int8_t *tc0) { int i, d; for( i = 0; i < 4; i++ ) { const int tc = tc0[i]; if( tc <= 0 ) { pix += 2*ystride; continue; } for( d = 0; d < 2; d++ ) { const int p0 = pix[-1*xstride]; const int p1 = pix[-2*xstride]; const int q0 = pix[0]; const int q1 = pix[1*xstride]; if( FFABS( p0 - q0 ) < alpha && FFABS( p1 - p0 ) < beta && FFABS( q1 - q0 ) < beta ) { int delta = av_clip( (((q0 - p0 ) << 2) + (p1 - q1) + 4) >> 3, -tc, tc ); pix[-xstride] = av_clip_uint8( p0 + delta ); /* p0' */ pix[0] = av_clip_uint8( q0 - delta ); /* q0' */ } pix += ystride; } } }"} {"target": 0, "idx": 20618, "func": "int ff_mpeg4_decode_picture_header(MpegEncContext * s, GetBitContext *gb) { int startcode, v; /* search next start code */ align_get_bits(gb); startcode = 0xff; for(;;) { v = get_bits(gb, 8); startcode = ((startcode << 8) | v) & 0xffffffff; if(get_bits_count(gb) >= gb->size*8){ if(gb->size==1 && s->divx_version){ printf(\"frame skip %d\\n\", gb->size); return FRAME_SKIPED; //divx bug }else return -1; //end of stream } if((startcode&0xFFFFFF00) != 0x100) continue; //no startcode if(s->avctx->debug&FF_DEBUG_STARTCODE){ printf(\"startcode: %3X \", startcode); if (startcode<=0x11F) printf(\"Video Object Start\"); else if(startcode<=0x12F) printf(\"Video Object Layer Start\"); else if(startcode<=0x13F) printf(\"Reserved\"); else if(startcode<=0x15F) printf(\"FGS bp start\"); else if(startcode<=0x1AF) printf(\"Reserved\"); else if(startcode==0x1B0) printf(\"Visual Object Seq Start\"); else if(startcode==0x1B1) printf(\"Visual Object Seq End\"); else if(startcode==0x1B2) printf(\"User Data\"); else if(startcode==0x1B3) printf(\"Group of VOP start\"); else if(startcode==0x1B4) printf(\"Video Session Error\"); else if(startcode==0x1B5) printf(\"Visual Object Start\"); else if(startcode==0x1B6) printf(\"Video Object Plane start\"); else if(startcode==0x1B7) printf(\"slice start\"); else if(startcode==0x1B8) printf(\"extension start\"); else if(startcode==0x1B9) printf(\"fgs start\"); else if(startcode==0x1BA) printf(\"FBA Object start\"); else if(startcode==0x1BB) printf(\"FBA Object Plane start\"); else if(startcode==0x1BC) printf(\"Mesh Object start\"); else if(startcode==0x1BD) printf(\"Mesh Object Plane start\"); else if(startcode==0x1BE) printf(\"Still Textutre Object start\"); else if(startcode==0x1BF) printf(\"Textutre Spatial Layer start\"); else if(startcode==0x1C0) printf(\"Textutre SNR Layer start\"); else if(startcode==0x1C1) printf(\"Textutre Tile start\"); else if(startcode==0x1C2) printf(\"Textutre Shape Layer start\"); else if(startcode==0x1C3) printf(\"stuffing start\"); else if(startcode<=0x1C5) printf(\"reserved\"); else if(startcode<=0x1FF) printf(\"System start\"); printf(\" at %d\\n\", get_bits_count(gb)); } switch(startcode){ case 0x120: decode_vol_header(s, gb); break; case USER_DATA_STARTCODE: decode_user_data(s, gb); break; case GOP_STARTCODE: mpeg4_decode_gop_header(s, gb); break; case VOP_STARTCODE: return decode_vop_header(s, gb); default: break; } align_get_bits(gb); startcode = 0xff; } }"} {"target": 0, "idx": 20620, "func": "static void mpegvideo_extract_headers(AVCodecParserContext *s, AVCodecContext *avctx, const uint8_t *buf, int buf_size) { ParseContext1 *pc = s->priv_data; const uint8_t *buf_end; const uint8_t *buf_start= buf; uint32_t start_code; int frame_rate_index, ext_type, bytes_left; int frame_rate_ext_n, frame_rate_ext_d; int picture_structure, top_field_first, repeat_first_field, progressive_frame; int horiz_size_ext, vert_size_ext, bit_rate_ext; //FIXME replace the crap with get_bits() s->repeat_pict = 0; buf_end = buf + buf_size; while (buf < buf_end) { start_code= -1; buf= ff_find_start_code(buf, buf_end, &start_code); bytes_left = buf_end - buf; switch(start_code) { case PICTURE_START_CODE: ff_fetch_timestamp(s, buf-buf_start-4, 1); if (bytes_left >= 2) { s->pict_type = (buf[1] >> 3) & 7; } break; case SEQ_START_CODE: if (bytes_left >= 7) { pc->width = (buf[0] << 4) | (buf[1] >> 4); pc->height = ((buf[1] & 0x0f) << 8) | buf[2]; avcodec_set_dimensions(avctx, pc->width, pc->height); frame_rate_index = buf[3] & 0xf; pc->frame_rate.den = avctx->time_base.den = ff_frame_rate_tab[frame_rate_index].num; pc->frame_rate.num = avctx->time_base.num = ff_frame_rate_tab[frame_rate_index].den; avctx->bit_rate = ((buf[4]<<10) | (buf[5]<<2) | (buf[6]>>6))*400; avctx->codec_id = CODEC_ID_MPEG1VIDEO; avctx->sub_id = 1; } break; case EXT_START_CODE: if (bytes_left >= 1) { ext_type = (buf[0] >> 4); switch(ext_type) { case 0x1: /* sequence extension */ if (bytes_left >= 6) { horiz_size_ext = ((buf[1] & 1) << 1) | (buf[2] >> 7); vert_size_ext = (buf[2] >> 5) & 3; bit_rate_ext = ((buf[2] & 0x1F)<<7) | (buf[3]>>1); frame_rate_ext_n = (buf[5] >> 5) & 3; frame_rate_ext_d = (buf[5] & 0x1f); pc->progressive_sequence = buf[1] & (1 << 3); avctx->has_b_frames= !(buf[5] >> 7); pc->width |=(horiz_size_ext << 12); pc->height |=( vert_size_ext << 12); avctx->bit_rate += (bit_rate_ext << 18) * 400; avcodec_set_dimensions(avctx, pc->width, pc->height); avctx->time_base.den = pc->frame_rate.den * (frame_rate_ext_n + 1) * 2; avctx->time_base.num = pc->frame_rate.num * (frame_rate_ext_d + 1); avctx->codec_id = CODEC_ID_MPEG2VIDEO; avctx->sub_id = 2; /* forces MPEG2 */ } break; case 0x8: /* picture coding extension */ if (bytes_left >= 5) { picture_structure = buf[2]&3; top_field_first = buf[3] & (1 << 7); repeat_first_field = buf[3] & (1 << 1); progressive_frame = buf[4] & (1 << 7); /* check if we must repeat the frame */ s->repeat_pict = 1; if (repeat_first_field) { if (pc->progressive_sequence) { if (top_field_first) s->repeat_pict = 5; else s->repeat_pict = 3; } else if (progressive_frame) { s->repeat_pict = 2; } } } break; } } break; case -1: goto the_end; default: /* we stop parsing when we encounter a slice. It ensures that this function takes a negligible amount of time */ if (start_code >= SLICE_MIN_START_CODE && start_code <= SLICE_MAX_START_CODE) goto the_end; break; } } the_end: ; }"} {"target": 1, "idx": 20623, "func": "static int set_chroma_format(AVCodecContext *avctx) { int num_formats = sizeof(schro_pixel_format_map) / sizeof(schro_pixel_format_map[0]); int idx; SchroEncoderParams *p_schro_params = avctx->priv_data; for (idx = 0; idx < num_formats; ++idx) { if (schro_pixel_format_map[idx].ff_pix_fmt == avctx->pix_fmt) { p_schro_params->format->chroma_format = schro_pixel_format_map[idx].schro_pix_fmt; return 0; } } av_log(avctx, AV_LOG_ERROR, \"This codec currently only supports planar YUV 4:2:0, 4:2:2\" \" and 4:4:4 formats.\\n\"); return -1; }"} {"target": 1, "idx": 20631, "func": "IEEE_ARITH2(sqrtt) IEEE_ARITH2(cvtst) IEEE_ARITH2(cvtts) static void gen_cvttq(DisasContext *ctx, int rb, int rc, int fn11) { TCGv vb, vc; /* No need to set flushzero, since we have an integer output. */ vb = gen_ieee_input(ctx, rb, fn11, 0); vc = dest_fpr(ctx, rc); /* Almost all integer conversions use cropped rounding, and most also do not have integer overflow enabled. Special case that. */ switch (fn11) { case QUAL_RM_C: gen_helper_cvttq_c(vc, cpu_env, vb); break; case QUAL_V | QUAL_RM_C: case QUAL_S | QUAL_V | QUAL_RM_C: case QUAL_S | QUAL_V | QUAL_I | QUAL_RM_C: gen_helper_cvttq_svic(vc, cpu_env, vb); break; default: gen_qual_roundmode(ctx, fn11); gen_helper_cvttq(vc, cpu_env, vb); break; } gen_fp_exc_raise(rc, fn11); }"} {"target": 1, "idx": 20643, "func": "static int virtio_serial_device_exit(DeviceState *dev) { VirtIOSerial *vser = VIRTIO_SERIAL(dev); VirtIODevice *vdev = VIRTIO_DEVICE(dev); unregister_savevm(dev, \"virtio-console\", vser); g_free(vser->ivqs); g_free(vser->ovqs); g_free(vser->ports_map); if (vser->post_load) { g_free(vser->post_load->connected); timer_del(vser->post_load->timer); timer_free(vser->post_load->timer); g_free(vser->post_load); } virtio_cleanup(vdev); return 0; }"} {"target": 1, "idx": 20644, "func": "int kvm_arch_init_vcpu(CPUState *cenv) { int ret = 0; struct kvm_sregs sregs; sregs.pvr = cenv->spr[SPR_PVR]; ret = kvm_vcpu_ioctl(cenv, KVM_SET_SREGS, &sregs); idle_timer = qemu_new_timer_ns(vm_clock, kvm_kick_env, cenv); return ret; }"} {"target": 1, "idx": 20647, "func": "static void virtio_queue_notify_vq(VirtQueue *vq) { if (vq->vring.desc && vq->handle_output) { VirtIODevice *vdev = vq->vdev; trace_virtio_queue_notify(vdev, vq - vdev->vq, vq); vq->handle_output(vdev, vq);"} {"target": 1, "idx": 20659, "func": "static av_cold int mp_decode_init(AVCodecContext *avctx) { MotionPixelsContext *mp = avctx->priv_data; int w4 = (avctx->width + 3) & ~3; int h4 = (avctx->height + 3) & ~3; if(avctx->extradata_size < 2){ av_log(avctx, AV_LOG_ERROR, \"extradata too small\\n\"); return AVERROR_INVALIDDATA; } motionpixels_tableinit(); mp->avctx = avctx; ff_dsputil_init(&mp->dsp, avctx); mp->changes_map = av_mallocz(avctx->width * h4); mp->offset_bits_len = av_log2(avctx->width * avctx->height) + 1; mp->vpt = av_mallocz(avctx->height * sizeof(YuvPixel)); mp->hpt = av_mallocz(h4 * w4 / 16 * sizeof(YuvPixel)); avctx->pix_fmt = AV_PIX_FMT_RGB555; avcodec_get_frame_defaults(&mp->frame); return 0; }"} {"target": 0, "idx": 20662, "func": "static void put_payload_header( AVFormatContext *s, ASFStream *stream, int presentation_time, int m_obj_size, int m_obj_offset, int payload_len ) { ASFContext *asf = s->priv_data; ByteIOContext *pb = &asf->pb; int val; val = stream->num; if (s->streams[val - 1]->codec.coded_frame->key_frame) val |= ASF_PL_FLAG_KEY_FRAME; put_byte(pb, val); put_byte(pb, stream->seq); //Media object number put_le32(pb, m_obj_offset); //Offset Into Media Object // Replicated Data shall be at least 8 bytes long. // The first 4 bytes of data shall contain the // Size of the Media Object that the payload belongs to. // The next 4 bytes of data shall contain the // Presentation Time for the media object that the payload belongs to. put_byte(pb, ASF_PAYLOAD_REPLICATED_DATA_LENGTH); put_le32(pb, m_obj_size); //Replicated Data - Media Object Size put_le32(pb, presentation_time);//Replicated Data - Presentation Time if (asf->multi_payloads_present){ put_le16(pb, payload_len); //payload length } }"} {"target": 1, "idx": 20697, "func": "static int pcm_encode_frame(AVCodecContext *avctx, unsigned char *frame, int buf_size, void *data) { int n, sample_size, v; short *samples; unsigned char *dst; switch(avctx->codec->id) { case CODEC_ID_PCM_S16LE: case CODEC_ID_PCM_S16BE: case CODEC_ID_PCM_U16LE: case CODEC_ID_PCM_U16BE: sample_size = 2; break; default: sample_size = 1; break; } n = buf_size / sample_size; samples = data; dst = frame; switch(avctx->codec->id) { case CODEC_ID_PCM_S16LE: for(;n>0;n--) { v = *samples++; dst[0] = v & 0xff; dst[1] = v >> 8; dst += 2; } break; case CODEC_ID_PCM_S16BE: for(;n>0;n--) { v = *samples++; dst[0] = v >> 8; dst[1] = v; dst += 2; } break; case CODEC_ID_PCM_U16LE: for(;n>0;n--) { v = *samples++; v += 0x8000; dst[0] = v & 0xff; dst[1] = v >> 8; dst += 2; } break; case CODEC_ID_PCM_U16BE: for(;n>0;n--) { v = *samples++; v += 0x8000; dst[0] = v >> 8; dst[1] = v; dst += 2; } break; case CODEC_ID_PCM_S8: for(;n>0;n--) { v = *samples++; dst[0] = (v + 128) >> 8; dst++; } break; case CODEC_ID_PCM_U8: for(;n>0;n--) { v = *samples++; dst[0] = ((v + 128) >> 8) + 128; dst++; } break; case CODEC_ID_PCM_ALAW: for(;n>0;n--) { v = *samples++; dst[0] = linear_to_alaw[(v + 32768) >> 2]; dst++; } break; case CODEC_ID_PCM_MULAW: for(;n>0;n--) { v = *samples++; dst[0] = linear_to_ulaw[(v + 32768) >> 2]; dst++; } break; default: return -1; } avctx->key_frame = 1; //avctx->frame_size = (dst - frame) / (sample_size * avctx->channels); return dst - frame; }"} {"target": 1, "idx": 20698, "func": "static inline void hScale_altivec_real(int16_t *dst, int dstW, uint8_t *src, int srcW, int xInc, int16_t *filter, int16_t *filterPos, int filterSize) { register int i; int __attribute__ ((aligned (16))) tempo[4]; if (filterSize % 4) { for(i=0; i>7, 0, (1<<15)-1); } } else switch (filterSize) { case 4: { for(i=0; i 12) { src_v1 = vec_ld(srcPos + 16, src); } src_vF = vec_perm(src_v0, src_v1, vec_lvsl(srcPos, src)); src_v = // vec_unpackh sign-extends... (vector signed short)(vec_mergeh((vector unsigned char)vzero, src_vF)); // now put our elements in the even slots src_v = vec_mergeh(src_v, (vector signed short)vzero); filter_v = vec_ld(i << 3, filter); // the 3 above is 2 (filterSize == 4) + 1 (sizeof(short) == 2) // the neat trick : we only care for half the elements, // high or low depending on (i<<3)%16 (it's 0 or 8 here), // and we're going to use vec_mule, so we chose // carefully how to \"unpack\" the elements into the even slots if ((i << 3) % 16) filter_v = vec_mergel(filter_v,(vector signed short)vzero); else filter_v = vec_mergeh(filter_v,(vector signed short)vzero); val_vEven = vec_mule(src_v, filter_v); val_s = vec_sums(val_vEven, vzero); vec_st(val_s, 0, tempo); dst[i] = av_clip(tempo[3]>>7, 0, (1<<15)-1); } } break; case 8: { for(i=0; i 8) { src_v1 = vec_ld(srcPos + 16, src); } src_vF = vec_perm(src_v0, src_v1, vec_lvsl(srcPos, src)); src_v = // vec_unpackh sign-extends... (vector signed short)(vec_mergeh((vector unsigned char)vzero, src_vF)); filter_v = vec_ld(i << 4, filter); // the 4 above is 3 (filterSize == 8) + 1 (sizeof(short) == 2) val_v = vec_msums(src_v, filter_v, (vector signed int)vzero); val_s = vec_sums(val_v, vzero); vec_st(val_s, 0, tempo); dst[i] = av_clip(tempo[3]>>7, 0, (1<<15)-1); } } break; case 16: { for(i=0; i>7, 0, (1<<15)-1); } } break; default: { for(i=0; i 8) { src_v1 = vec_ld(srcPos + j + 16, src); } src_vF = vec_perm(src_v0, src_v1, permS); src_v = // vec_unpackh sign-extends... (vector signed short)(vec_mergeh((vector unsigned char)vzero, src_vF)); // loading filter_v0R is useless, it's already done above //vector signed short filter_v0R = vec_ld((i * 2 * filterSize) + j, filter); filter_v1R = vec_ld((i * 2 * filterSize) + (j * 2) + 16, filter); filter_v = vec_perm(filter_v0R, filter_v1R, permF); val_v = vec_msums(src_v, filter_v, val_v); } val_s = vec_sums(val_v, vzero); vec_st(val_s, 0, tempo); dst[i] = av_clip(tempo[3]>>7, 0, (1<<15)-1); } } } }"} {"target": 1, "idx": 20716, "func": "void ppc_store_xer (CPUPPCState *env, uint32_t value) { xer_so = (value >> XER_SO) & 0x01; xer_ov = (value >> XER_OV) & 0x01; xer_ca = (value >> XER_CA) & 0x01; xer_cmp = (value >> XER_CMP) & 0xFF; xer_bc = (value >> XER_BC) & 0x3F; }"} {"target": 1, "idx": 20722, "func": "static void property_get_enum(Object *obj, Visitor *v, void *opaque, const char *name, Error **errp) { EnumProperty *prop = opaque; int value; value = prop->get(obj, errp); visit_type_enum(v, &value, prop->strings, NULL, name, errp); }"} {"target": 0, "idx": 20728, "func": "static int path_has_protocol(const char *path) { #ifdef _WIN32 if (is_windows_drive(path) || is_windows_drive_prefix(path)) { return 0; } #endif return strchr(path, ':') != NULL; }"} {"target": 0, "idx": 20738, "func": "static int mch_init(PCIDevice *d) { int i; MCHPCIState *mch = MCH_PCI_DEVICE(d); /* setup pci memory regions */ memory_region_init_alias(&mch->pci_hole, OBJECT(mch), \"pci-hole\", mch->pci_address_space, mch->below_4g_mem_size, 0x100000000ULL - mch->below_4g_mem_size); memory_region_add_subregion(mch->system_memory, mch->below_4g_mem_size, &mch->pci_hole); pc_init_pci64_hole(&mch->pci_info, 0x100000000ULL + mch->above_4g_mem_size, mch->pci_hole64_size); memory_region_init_alias(&mch->pci_hole_64bit, OBJECT(mch), \"pci-hole64\", mch->pci_address_space, mch->pci_info.w64.begin, mch->pci_hole64_size); if (mch->pci_hole64_size) { memory_region_add_subregion(mch->system_memory, mch->pci_info.w64.begin, &mch->pci_hole_64bit); } /* smram */ cpu_smm_register(&mch_set_smm, mch); memory_region_init_alias(&mch->smram_region, OBJECT(mch), \"smram-region\", mch->pci_address_space, 0xa0000, 0x20000); memory_region_add_subregion_overlap(mch->system_memory, 0xa0000, &mch->smram_region, 1); memory_region_set_enabled(&mch->smram_region, false); init_pam(DEVICE(mch), mch->ram_memory, mch->system_memory, mch->pci_address_space, &mch->pam_regions[0], PAM_BIOS_BASE, PAM_BIOS_SIZE); for (i = 0; i < 12; ++i) { init_pam(DEVICE(mch), mch->ram_memory, mch->system_memory, mch->pci_address_space, &mch->pam_regions[i+1], PAM_EXPAN_BASE + i * PAM_EXPAN_SIZE, PAM_EXPAN_SIZE); } return 0; }"} {"target": 0, "idx": 20741, "func": "static int h261_find_frame_end(ParseContext *pc, AVCodecContext* avctx, const uint8_t *buf, int buf_size){ int vop_found, i, j, bits_left, last_bits; uint32_t state; H261Context *h = avctx->priv_data; if(h){ bits_left = h->bits_left; last_bits = h->last_bits; } else{ bits_left = 0; last_bits = 0; } vop_found= pc->frame_start_found; state= pc->state; if(bits_left!=0 && !vop_found) state = state << (8-bits_left) | last_bits; i=0; if(!vop_found){ for(i=0; i>(8-j)) )>>(32-20) == 0x10 )&&(((state >> (17-j)) & 0x4000) == 0x0)){ i++; vop_found=1; break; } } if(vop_found) break; } } if(vop_found){ for(; iflags & CODEC_FLAG_TRUNCATED)//XXX ffplay workaround, someone a better solution? state= (state<<8) | buf[i]; for(j=0; j<8; j++){ if(( ( (state<>(8-j)) )>>(32-20) == 0x10 )&&(((state >> (17-j)) & 0x4000) == 0x0)){ pc->frame_start_found=0; pc->state=-1; return i-3; } } } } pc->frame_start_found= vop_found; pc->state= state; return END_NOT_FOUND; }"} {"target": 0, "idx": 20743, "func": "void address_space_write(AddressSpace *as, target_phys_addr_t addr, const uint8_t *buf, int len) { address_space_rw(as, addr, (uint8_t *)buf, len, true); }"} {"target": 0, "idx": 20751, "func": "static void audio_run_capture (AudioState *s) { CaptureVoiceOut *cap; for (cap = s->cap_head.lh_first; cap; cap = cap->entries.le_next) { int live, rpos, captured; HWVoiceOut *hw = &cap->hw; SWVoiceOut *sw; captured = live = audio_pcm_hw_get_live_out (hw); rpos = hw->rpos; while (live) { int left = hw->samples - rpos; int to_capture = audio_MIN (live, left); st_sample_t *src; struct capture_callback *cb; src = hw->mix_buf + rpos; hw->clip (cap->buf, src, to_capture); mixeng_clear (src, to_capture); for (cb = cap->cb_head.lh_first; cb; cb = cb->entries.le_next) { cb->ops.capture (cb->opaque, cap->buf, to_capture << hw->info.shift); } rpos = (rpos + to_capture) % hw->samples; live -= to_capture; } hw->rpos = rpos; for (sw = hw->sw_head.lh_first; sw; sw = sw->entries.le_next) { if (!sw->active && sw->empty) { continue; } if (audio_bug (AUDIO_FUNC, captured > sw->total_hw_samples_mixed)) { dolog (\"captured=%d sw->total_hw_samples_mixed=%d\\n\", captured, sw->total_hw_samples_mixed); captured = sw->total_hw_samples_mixed; } sw->total_hw_samples_mixed -= captured; sw->empty = sw->total_hw_samples_mixed == 0; } } }"} {"target": 1, "idx": 20761, "func": "CharDriverState *qemu_chr_alloc(void) { CharDriverState *chr = g_malloc0(sizeof(CharDriverState)); return chr; }"} {"target": 1, "idx": 20766, "func": "static void ecc_init(target_phys_addr_t base, qemu_irq irq, uint32_t version) { DeviceState *dev; SysBusDevice *s; dev = qdev_create(NULL, \"eccmemctl\"); qdev_prop_set_uint32(dev, \"version\", version); qdev_init(dev); s = sysbus_from_qdev(dev); sysbus_connect_irq(s, 0, irq); sysbus_mmio_map(s, 0, base); if (version == 0) { // SS-600MP only sysbus_mmio_map(s, 1, base + 0x1000); } }"} {"target": 0, "idx": 20797, "func": "int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr, uint8_t ar) { CPUS390XState *env = &cpu->env; S390PCIBusDevice *pbdev; MemoryRegion *mr; int i; uint32_t fh; uint8_t pcias; uint8_t len; uint8_t buffer[128]; if (env->psw.mask & PSW_MASK_PSTATE) { program_interrupt(env, PGM_PRIVILEGED, 6); return 0; } fh = env->regs[r1] >> 32; pcias = (env->regs[r1] >> 16) & 0xf; len = env->regs[r1] & 0xff; if (pcias > 5) { DPRINTF(\"pcistb invalid space\\n\"); setcc(cpu, ZPCI_PCI_LS_ERR); s390_set_status_code(env, r1, ZPCI_PCI_ST_INVAL_AS); return 0; } switch (len) { case 16: case 32: case 64: case 128: break; default: program_interrupt(env, PGM_SPECIFICATION, 6); return 0; } pbdev = s390_pci_find_dev_by_fh(fh); if (!pbdev || !(pbdev->fh & FH_MASK_ENABLE)) { DPRINTF(\"pcistb no pci dev fh 0x%x\\n\", fh); setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); return 0; } if (pbdev->lgstg_blocked) { setcc(cpu, ZPCI_PCI_LS_ERR); s390_set_status_code(env, r1, ZPCI_PCI_ST_BLOCKED); return 0; } mr = pbdev->pdev->io_regions[pcias].memory; if (!memory_region_access_valid(mr, env->regs[r3], len, true)) { program_interrupt(env, PGM_ADDRESSING, 6); return 0; } if (s390_cpu_virt_mem_read(cpu, gaddr, ar, buffer, len)) { return 0; } for (i = 0; i < len / 8; i++) { memory_region_dispatch_write(mr, env->regs[r3] + i * 8, ldq_p(buffer + i * 8), 8, MEMTXATTRS_UNSPECIFIED); } setcc(cpu, ZPCI_PCI_LS_OK); return 0; }"} {"target": 0, "idx": 20802, "func": "static gboolean monitor_unblocked(GIOChannel *chan, GIOCondition cond, void *opaque) { monitor_flush(opaque); return FALSE; }"} {"target": 0, "idx": 20803, "func": "static always_inline void gen_store_spr(int reg, TCGv t) { tcg_gen_st_tl(t, cpu_env, offsetof(CPUState, spr[reg])); }"} {"target": 0, "idx": 20811, "func": "int bdrv_is_read_only(BlockDriverState *bs) { return bs->read_only; }"} {"target": 0, "idx": 20812, "func": "static void bw_conf1_write(void *opaque, target_phys_addr_t addr, uint64_t val, unsigned size) { PCIBus *b = opaque; pci_data_write(b, addr, val, size); }"} {"target": 0, "idx": 20831, "func": "void tlb_fill(target_ulong addr, int is_write, int is_user, void *retaddr) { TranslationBlock *tb; int ret; unsigned long pc; CPUX86State *saved_env; /* XXX: hack to restore env in all cases, even if not called from generated code */ saved_env = env; env = cpu_single_env; ret = cpu_x86_handle_mmu_fault(env, addr, is_write, is_user, 1); if (ret) { if (retaddr) { /* now we have a real cpu fault */ pc = (unsigned long)retaddr; tb = tb_find_pc(pc); if (tb) { /* the PC is inside the translated code. It means that we have a virtual CPU fault */ cpu_restore_state(tb, env, pc, NULL); } } if (retaddr) raise_exception_err(EXCP0E_PAGE, env->error_code); else raise_exception_err_norestore(EXCP0E_PAGE, env->error_code); } env = saved_env; }"} {"target": 1, "idx": 20835, "func": "static void test_tco1_status_bits(void) { TestData d; uint16_t ticks = 8; uint16_t val; int ret; d.args = NULL; d.noreboot = true; test_init(&d); stop_tco(&d); clear_tco_status(&d); reset_on_second_timeout(false); set_tco_timeout(&d, ticks); load_tco(&d); start_tco(&d); clock_step(ticks * TCO_TICK_NSEC); qpci_io_writeb(d.dev, d.tco_io_base + TCO_DAT_IN, 0); qpci_io_writeb(d.dev, d.tco_io_base + TCO_DAT_OUT, 0); val = qpci_io_readw(d.dev, d.tco_io_base + TCO1_STS); ret = val & (TCO_TIMEOUT | SW_TCO_SMI | TCO_INT_STS) ? 1 : 0; g_assert(ret == 1); qpci_io_writew(d.dev, d.tco_io_base + TCO1_STS, val); g_assert_cmpint(qpci_io_readw(d.dev, d.tco_io_base + TCO1_STS), ==, 0); qtest_end(); }"} {"target": 1, "idx": 20840, "func": "block_crypto_create_opts_init(QCryptoBlockFormat format, QemuOpts *opts, Error **errp) { OptsVisitor *ov; QCryptoBlockCreateOptions *ret = NULL; Error *local_err = NULL; ret = g_new0(QCryptoBlockCreateOptions, 1); ret->format = format; ov = opts_visitor_new(opts); visit_start_struct(opts_get_visitor(ov), NULL, NULL, 0, &local_err); if (local_err) { goto out; } switch (format) { case Q_CRYPTO_BLOCK_FORMAT_LUKS: visit_type_QCryptoBlockCreateOptionsLUKS_members( opts_get_visitor(ov), &ret->u.luks, &local_err); break; default: error_setg(&local_err, \"Unsupported block format %d\", format); break; } error_propagate(errp, local_err); local_err = NULL; visit_end_struct(opts_get_visitor(ov), &local_err); out: if (local_err) { error_propagate(errp, local_err); qapi_free_QCryptoBlockCreateOptions(ret); ret = NULL; } opts_visitor_cleanup(ov); return ret; }"} {"target": 0, "idx": 20856, "func": "static void RENAME(yuv2yuvX)(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, uint8_t *uDest, uint8_t *vDest, uint8_t *aDest, int dstW, int chrDstW) { if (uDest) { x86_reg uv_off = c->uv_off; YSCALEYUV2YV12X(CHR_MMX_FILTER_OFFSET, uDest, chrDstW, 0) YSCALEYUV2YV12X(CHR_MMX_FILTER_OFFSET, vDest - uv_off, chrDstW + uv_off, uv_off) } if (CONFIG_SWSCALE_ALPHA && aDest) { YSCALEYUV2YV12X(ALP_MMX_FILTER_OFFSET, aDest, dstW, 0) } YSCALEYUV2YV12X(LUM_MMX_FILTER_OFFSET, dest, dstW, 0) }"} {"target": 0, "idx": 20902, "func": "static void add_cpreg_to_list(gpointer key, gpointer opaque) { ARMCPU *cpu = opaque; uint64_t regidx; const ARMCPRegInfo *ri; regidx = *(uint32_t *)key; ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); if (!(ri->type & ARM_CP_NO_MIGRATE)) { cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx); /* The value array need not be initialized at this point */ cpu->cpreg_array_len++; } }"} {"target": 0, "idx": 20903, "func": "static int xan_huffman_decode(unsigned char *dest, unsigned char *src) { unsigned char byte = *src++; unsigned char ival = byte + 0x16; unsigned char * ptr = src + byte*2; unsigned char val = ival; int counter = 0; unsigned char bits = *ptr++; while ( val != 0x16 ) { if ( (1 << counter) & bits ) val = src[byte + val - 0x17]; else val = src[val - 0x17]; if ( val < 0x16 ) { *dest++ = val; val = ival; } if (counter++ == 7) { counter = 0; bits = *ptr++; } } return 0; }"} {"target": 0, "idx": 20908, "func": "static inline int alarm_has_dynticks(struct qemu_alarm_timer *t) { return t && t->rearm; }"} {"target": 1, "idx": 20917, "func": "static int iff_read_header(AVFormatContext *s) { IffDemuxContext *iff = s->priv_data; AVIOContext *pb = s->pb; AVStream *st; uint8_t *buf; uint32_t chunk_id, data_size; uint32_t screenmode = 0, num, den; unsigned transparency = 0; unsigned masking = 0; // no mask uint8_t fmt[16]; int fmt_size; st = avformat_new_stream(s, NULL); if (!st) return AVERROR(ENOMEM); st->codec->channels = 1; st->codec->channel_layout = AV_CH_LAYOUT_MONO; avio_skip(pb, 8); // codec_tag used by ByteRun1 decoder to distinguish progressive (PBM) and interlaced (ILBM) content st->codec->codec_tag = avio_rl32(pb); iff->bitmap_compression = -1; iff->svx8_compression = -1; iff->maud_bits = -1; iff->maud_compression = -1; while(!url_feof(pb)) { uint64_t orig_pos; int res; const char *metadata_tag = NULL; chunk_id = avio_rl32(pb); data_size = avio_rb32(pb); orig_pos = avio_tell(pb); switch(chunk_id) { case ID_VHDR: st->codec->codec_type = AVMEDIA_TYPE_AUDIO; if (data_size < 14) return AVERROR_INVALIDDATA; avio_skip(pb, 12); st->codec->sample_rate = avio_rb16(pb); if (data_size >= 16) { avio_skip(pb, 1); iff->svx8_compression = avio_r8(pb); } break; case ID_MHDR: st->codec->codec_type = AVMEDIA_TYPE_AUDIO; if (data_size < 32) return AVERROR_INVALIDDATA; avio_skip(pb, 4); iff->maud_bits = avio_rb16(pb); avio_skip(pb, 2); num = avio_rb32(pb); den = avio_rb16(pb); if (!den) return AVERROR_INVALIDDATA; avio_skip(pb, 2); st->codec->sample_rate = num / den; st->codec->channels = avio_rb16(pb); iff->maud_compression = avio_rb16(pb); if (st->codec->channels == 1) st->codec->channel_layout = AV_CH_LAYOUT_MONO; else if (st->codec->channels == 2) st->codec->channel_layout = AV_CH_LAYOUT_STEREO; break; case ID_ABIT: case ID_BODY: case ID_DBOD: case ID_MDAT: iff->body_pos = avio_tell(pb); iff->body_end = iff->body_pos + data_size; iff->body_size = data_size; break; case ID_CHAN: if (data_size < 4) return AVERROR_INVALIDDATA; if (avio_rb32(pb) < 6) { st->codec->channels = 1; st->codec->channel_layout = AV_CH_LAYOUT_MONO; } else { st->codec->channels = 2; st->codec->channel_layout = AV_CH_LAYOUT_STEREO; } break; case ID_CAMG: if (data_size < 4) return AVERROR_INVALIDDATA; screenmode = avio_rb32(pb); break; case ID_CMAP: if (data_size > INT_MAX - IFF_EXTRA_VIDEO_SIZE - FF_INPUT_BUFFER_PADDING_SIZE) return AVERROR_INVALIDDATA; st->codec->extradata_size = data_size + IFF_EXTRA_VIDEO_SIZE; st->codec->extradata = av_malloc(data_size + IFF_EXTRA_VIDEO_SIZE + FF_INPUT_BUFFER_PADDING_SIZE); if (!st->codec->extradata) return AVERROR(ENOMEM); if (avio_read(pb, st->codec->extradata + IFF_EXTRA_VIDEO_SIZE, data_size) < 0) return AVERROR(EIO); break; case ID_BMHD: st->codec->codec_type = AVMEDIA_TYPE_VIDEO; if (data_size <= 8) return AVERROR_INVALIDDATA; st->codec->width = avio_rb16(pb); st->codec->height = avio_rb16(pb); avio_skip(pb, 4); // x, y offset st->codec->bits_per_coded_sample = avio_r8(pb); if (data_size >= 10) masking = avio_r8(pb); if (data_size >= 11) iff->bitmap_compression = avio_r8(pb); if (data_size >= 14) { avio_skip(pb, 1); // padding transparency = avio_rb16(pb); } if (data_size >= 16) { st->sample_aspect_ratio.num = avio_r8(pb); st->sample_aspect_ratio.den = avio_r8(pb); } break; case ID_DPEL: if (data_size < 4 || (data_size & 3)) return AVERROR_INVALIDDATA; if ((fmt_size = avio_read(pb, fmt, sizeof(fmt))) < 0) return fmt_size; if (fmt_size == sizeof(deep_rgb24) && !memcmp(fmt, deep_rgb24, sizeof(deep_rgb24))) st->codec->pix_fmt = AV_PIX_FMT_RGB24; else if (fmt_size == sizeof(deep_rgba) && !memcmp(fmt, deep_rgba, sizeof(deep_rgba))) st->codec->pix_fmt = AV_PIX_FMT_RGBA; else if (fmt_size == sizeof(deep_bgra) && !memcmp(fmt, deep_bgra, sizeof(deep_bgra))) st->codec->pix_fmt = AV_PIX_FMT_BGRA; else if (fmt_size == sizeof(deep_argb) && !memcmp(fmt, deep_argb, sizeof(deep_argb))) st->codec->pix_fmt = AV_PIX_FMT_ARGB; else if (fmt_size == sizeof(deep_abgr) && !memcmp(fmt, deep_abgr, sizeof(deep_abgr))) st->codec->pix_fmt = AV_PIX_FMT_ABGR; else { av_log_ask_for_sample(s, \"unsupported color format\\n\"); return AVERROR_PATCHWELCOME; } break; case ID_DGBL: st->codec->codec_type = AVMEDIA_TYPE_VIDEO; if (data_size < 8) return AVERROR_INVALIDDATA; st->codec->width = avio_rb16(pb); st->codec->height = avio_rb16(pb); iff->bitmap_compression = avio_rb16(pb); st->sample_aspect_ratio.num = avio_r8(pb); st->sample_aspect_ratio.den = avio_r8(pb); st->codec->bits_per_coded_sample = 24; break; case ID_DLOC: if (data_size < 4) return AVERROR_INVALIDDATA; st->codec->width = avio_rb16(pb); st->codec->height = avio_rb16(pb); break; case ID_TVDC: if (data_size < sizeof(iff->tvdc)) return AVERROR_INVALIDDATA; res = avio_read(pb, iff->tvdc, sizeof(iff->tvdc)); if (res < 0) return res; break; case ID_ANNO: case ID_TEXT: metadata_tag = \"comment\"; break; case ID_AUTH: metadata_tag = \"artist\"; break; case ID_COPYRIGHT: metadata_tag = \"copyright\"; break; case ID_NAME: metadata_tag = \"title\"; break; } if (metadata_tag) { if ((res = get_metadata(s, metadata_tag, data_size)) < 0) { av_log(s, AV_LOG_ERROR, \"cannot allocate metadata tag %s!\\n\", metadata_tag); return res; } } avio_skip(pb, data_size - (avio_tell(pb) - orig_pos) + (data_size & 1)); } avio_seek(pb, iff->body_pos, SEEK_SET); switch(st->codec->codec_type) { case AVMEDIA_TYPE_AUDIO: avpriv_set_pts_info(st, 32, 1, st->codec->sample_rate); if (st->codec->codec_tag == ID_16SV) st->codec->codec_id = AV_CODEC_ID_PCM_S16BE_PLANAR; else if (st->codec->codec_tag == ID_MAUD) { if (iff->maud_bits == 8 && !iff->maud_compression) { st->codec->codec_id = AV_CODEC_ID_PCM_U8; } else if (iff->maud_bits == 16 && !iff->maud_compression) { st->codec->codec_id = AV_CODEC_ID_PCM_S16BE; } else if (iff->maud_bits == 8 && iff->maud_compression == 2) { st->codec->codec_id = AV_CODEC_ID_PCM_ALAW; } else if (iff->maud_bits == 8 && iff->maud_compression == 3) { st->codec->codec_id = AV_CODEC_ID_PCM_MULAW; } else { av_log_ask_for_sample(s, \"unsupported compression %d and bit depth %d\\n\", iff->maud_compression, iff->maud_bits); return AVERROR_PATCHWELCOME; } st->codec->bits_per_coded_sample = av_get_bits_per_sample(st->codec->codec_id); st->codec->block_align = st->codec->bits_per_coded_sample * st->codec->channels / 8; } else { switch (iff->svx8_compression) { case COMP_NONE: st->codec->codec_id = AV_CODEC_ID_PCM_S8_PLANAR; break; case COMP_FIB: st->codec->codec_id = AV_CODEC_ID_8SVX_FIB; break; case COMP_EXP: st->codec->codec_id = AV_CODEC_ID_8SVX_EXP; break; default: av_log(s, AV_LOG_ERROR, \"Unknown SVX8 compression method '%d'\\n\", iff->svx8_compression); return -1; } } st->codec->bits_per_coded_sample = av_get_bits_per_sample(st->codec->codec_id); st->codec->bit_rate = st->codec->channels * st->codec->sample_rate * st->codec->bits_per_coded_sample; st->codec->block_align = st->codec->channels * st->codec->bits_per_coded_sample; break; case AVMEDIA_TYPE_VIDEO: iff->bpp = st->codec->bits_per_coded_sample; if ((screenmode & 0x800 /* Hold And Modify */) && iff->bpp <= 8) { iff->ham = iff->bpp > 6 ? 6 : 4; st->codec->bits_per_coded_sample = 24; } iff->flags = (screenmode & 0x80 /* Extra HalfBrite */) && iff->bpp <= 8; iff->masking = masking; iff->transparency = transparency; if (!st->codec->extradata) { st->codec->extradata_size = IFF_EXTRA_VIDEO_SIZE; st->codec->extradata = av_malloc(IFF_EXTRA_VIDEO_SIZE + FF_INPUT_BUFFER_PADDING_SIZE); if (!st->codec->extradata) return AVERROR(ENOMEM); } buf = st->codec->extradata; bytestream_put_be16(&buf, IFF_EXTRA_VIDEO_SIZE); bytestream_put_byte(&buf, iff->bitmap_compression); bytestream_put_byte(&buf, iff->bpp); bytestream_put_byte(&buf, iff->ham); bytestream_put_byte(&buf, iff->flags); bytestream_put_be16(&buf, iff->transparency); bytestream_put_byte(&buf, iff->masking); bytestream_put_buffer(&buf, iff->tvdc, sizeof(iff->tvdc)); st->codec->codec_id = AV_CODEC_ID_IFF_ILBM; break; default: return -1; } return 0; }"} {"target": 1, "idx": 20918, "func": "static int ftp_status(FTPContext *s, char **line, const int response_codes[]) { int err, i, dash = 0, result = 0, code_found = 0; char buf[CONTROL_BUFFER_SIZE]; AVBPrint line_buffer; if (line) av_bprint_init(&line_buffer, 0, AV_BPRINT_SIZE_AUTOMATIC); while (!code_found || dash) { if ((err = ftp_get_line(s, buf, sizeof(buf))) < 0) { av_bprint_finalize(&line_buffer, NULL); return err; } av_log(s, AV_LOG_DEBUG, \"%s\\n\", buf); if (strlen(buf) < 4) continue; err = 0; for (i = 0; i < 3; ++i) { if (buf[i] < '0' || buf[i] > '9') continue; err *= 10; err += buf[i] - '0'; } dash = !!(buf[3] == '-'); for (i = 0; response_codes[i]; ++i) { if (err == response_codes[i]) { if (line) av_bprintf(&line_buffer, \"%s\", buf); code_found = 1; result = err; break; } } } if (line) av_bprint_finalize(&line_buffer, line); return result; }"} {"target": 0, "idx": 20938, "func": "static uint32_t arm_sysctl_read(void *opaque, target_phys_addr_t offset) { arm_sysctl_state *s = (arm_sysctl_state *)opaque; switch (offset) { case 0x00: /* ID */ return s->sys_id; case 0x04: /* SW */ /* General purpose hardware switches. We don't have a useful way of exposing these to the user. */ return 0; case 0x08: /* LED */ return s->leds; case 0x20: /* LOCK */ return s->lockval; case 0x0c: /* OSC0 */ case 0x10: /* OSC1 */ case 0x14: /* OSC2 */ case 0x18: /* OSC3 */ case 0x1c: /* OSC4 */ case 0x24: /* 100HZ */ /* ??? Implement these. */ return 0; case 0x28: /* CFGDATA1 */ return s->cfgdata1; case 0x2c: /* CFGDATA2 */ return s->cfgdata2; case 0x30: /* FLAGS */ return s->flags; case 0x38: /* NVFLAGS */ return s->nvflags; case 0x40: /* RESETCTL */ return s->resetlevel; case 0x44: /* PCICTL */ return 1; case 0x48: /* MCI */ return 0; case 0x4c: /* FLASH */ return 0; case 0x50: /* CLCD */ return 0x1000; case 0x54: /* CLCDSER */ return 0; case 0x58: /* BOOTCS */ return 0; case 0x5c: /* 24MHz */ return muldiv64(qemu_get_clock(vm_clock), 24000000, get_ticks_per_sec()); case 0x60: /* MISC */ return 0; case 0x84: /* PROCID0 */ /* ??? Don't know what the proper value for the core tile ID is. */ return 0x02000000; case 0x88: /* PROCID1 */ return 0xff000000; case 0x64: /* DMAPSR0 */ case 0x68: /* DMAPSR1 */ case 0x6c: /* DMAPSR2 */ case 0x70: /* IOSEL */ case 0x74: /* PLDCTL */ case 0x80: /* BUSID */ case 0x8c: /* OSCRESET0 */ case 0x90: /* OSCRESET1 */ case 0x94: /* OSCRESET2 */ case 0x98: /* OSCRESET3 */ case 0x9c: /* OSCRESET4 */ case 0xc0: /* SYS_TEST_OSC0 */ case 0xc4: /* SYS_TEST_OSC1 */ case 0xc8: /* SYS_TEST_OSC2 */ case 0xcc: /* SYS_TEST_OSC3 */ case 0xd0: /* SYS_TEST_OSC4 */ return 0; default: printf (\"arm_sysctl_read: Bad register offset 0x%x\\n\", (int)offset); return 0; } }"} {"target": 0, "idx": 20945, "func": "void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data) { struct qemu_work_item wi; if (qemu_cpu_is_self(cpu)) { func(data); return; } wi.func = func; wi.data = data; wi.free = false; if (cpu->queued_work_first == NULL) { cpu->queued_work_first = &wi; } else { cpu->queued_work_last->next = &wi; } cpu->queued_work_last = &wi; wi.next = NULL; wi.done = false; qemu_cpu_kick(cpu); while (!wi.done) { CPUState *self_cpu = current_cpu; qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex); current_cpu = self_cpu; } }"} {"target": 0, "idx": 20958, "func": "double avpriv_strtod(char *restrict nptr, char **restrict endptr) { char *end; double res; /* Skip leading spaces */ while (isspace(*nptr)) nptr++; if (!av_strncasecmp(nptr, \"infinity\", 8)) { end = nptr + 8; res = INFINITY; } else if (!av_strncasecmp(nptr, \"inf\", 3)) { end = nptr + 3; res = INFINITY; } else if (!av_strncasecmp(nptr, \"+infinity\", 9)) { end = nptr + 9; res = INFINITY; } else if (!av_strncasecmp(nptr, \"+inf\", 4)) { end = nptr + 4; res = INFINITY; } else if (!av_strncasecmp(nptr, \"-infinity\", 9)) { end = nptr + 9; res = -INFINITY; } else if (!av_strncasecmp(nptr, \"-inf\", 4)) { end = nptr + 4; res = -INFINITY; } else if (!av_strncasecmp(nptr, \"nan\", 3)) { end = check_nan_suffix(nptr + 3); res = NAN; } else if (!av_strncasecmp(nptr, \"+nan\", 4) || !av_strncasecmp(nptr, \"-nan\", 4)) { end = check_nan_suffix(nptr + 4); res = NAN; } else if (!av_strncasecmp(nptr, \"0x\", 2) || !av_strncasecmp(nptr, \"-0x\", 3) || !av_strncasecmp(nptr, \"+0x\", 3)) { /* FIXME this doesn't handle exponents, non-integers (float/double) * and numbers too large for long long */ res = strtoll(nptr, &end, 16); } else { res = strtod(nptr, &end); } if (endptr) *endptr = end; return res; }"} {"target": 0, "idx": 20961, "func": "static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial) { double time = av_gettime() / 1000000.0; /* update current video pts */ is->video_current_pts = pts; is->video_current_pts_drift = is->video_current_pts - time; is->video_current_pos = pos; is->frame_last_pts = pts; check_external_clock_sync(is, is->video_current_pts); }"} {"target": 0, "idx": 20970, "func": "open_f(int argc, char **argv) { int flags = 0; int readonly = 0; int growable = 0; int c; while ((c = getopt(argc, argv, \"snrg\")) != EOF) { switch (c) { case 's': flags |= BDRV_O_SNAPSHOT; break; case 'n': flags |= BDRV_O_NOCACHE; break; case 'r': readonly = 1; break; case 'g': growable = 1; break; default: return command_usage(&open_cmd); } } if (!readonly) { flags |= BDRV_O_RDWR; } if (optind != argc - 1) return command_usage(&open_cmd); return openfile(argv[optind], flags, growable); }"} {"target": 0, "idx": 20975, "func": "static unsigned int event_status_media(IDEState *s, uint8_t *buf) { uint8_t event_code, media_status; media_status = 0; if (s->tray_open) { media_status = MS_TRAY_OPEN; } else if (bdrv_is_inserted(s->bs)) { media_status = MS_MEDIA_PRESENT; } /* Event notification descriptor */ event_code = MEC_NO_CHANGE; if (media_status != MS_TRAY_OPEN) { if (s->events.new_media) { event_code = MEC_NEW_MEDIA; s->events.new_media = false; } else if (s->events.eject_request) { event_code = MEC_EJECT_REQUESTED; s->events.eject_request = false; } } buf[4] = event_code; buf[5] = media_status; /* These fields are reserved, just clear them. */ buf[6] = 0; buf[7] = 0; return 8; /* We wrote to 4 extra bytes from the header */ }"} {"target": 0, "idx": 20994, "func": "static void virtio_scsi_hotunplug(HotplugHandler *hotplug_dev, DeviceState *dev, Error **errp) { VirtIODevice *vdev = VIRTIO_DEVICE(hotplug_dev); VirtIOSCSI *s = VIRTIO_SCSI(vdev); SCSIDevice *sd = SCSI_DEVICE(dev); if ((vdev->guest_features >> VIRTIO_SCSI_F_HOTPLUG) & 1) { virtio_scsi_push_event(s, sd, VIRTIO_SCSI_T_TRANSPORT_RESET, VIRTIO_SCSI_EVT_RESET_REMOVED); } if (s->ctx) { blk_op_unblock_all(sd->conf.blk, s->blocker); } qdev_simple_device_unplug_cb(hotplug_dev, dev, errp); }"} {"target": 0, "idx": 20996, "func": "int qio_channel_socket_connect_sync(QIOChannelSocket *ioc, SocketAddress *addr, Error **errp) { int fd; trace_qio_channel_socket_connect_sync(ioc, addr); fd = socket_connect(addr, NULL, NULL, errp); if (fd < 0) { trace_qio_channel_socket_connect_fail(ioc); return -1; } trace_qio_channel_socket_connect_complete(ioc, fd); if (qio_channel_socket_set_fd(ioc, fd, errp) < 0) { close(fd); return -1; } return 0; }"} {"target": 1, "idx": 21007, "func": "static void save_bits(WMAProDecodeCtx *s, GetBitContext* gb, int len, int append) { int buflen; /** when the frame data does not need to be concatenated, the input buffer is resetted and additional bits from the previous frame are copyed and skipped later so that a fast byte copy is possible */ if (!append) { s->frame_offset = get_bits_count(gb) & 7; s->num_saved_bits = s->frame_offset; init_put_bits(&s->pb, s->frame_data, MAX_FRAMESIZE); buflen = (s->num_saved_bits + len + 8) >> 3; if (len <= 0 || buflen > MAX_FRAMESIZE) { avpriv_request_sample(s->avctx, \"Too small input buffer\"); s->num_saved_bits += len; if (!append) { avpriv_copy_bits(&s->pb, gb->buffer + (get_bits_count(gb) >> 3), s->num_saved_bits); } else { int align = 8 - (get_bits_count(gb) & 7); align = FFMIN(align, len); put_bits(&s->pb, align, get_bits(gb, align)); len -= align; avpriv_copy_bits(&s->pb, gb->buffer + (get_bits_count(gb) >> 3), len); skip_bits_long(gb, len); { PutBitContext tmp = s->pb; flush_put_bits(&tmp); init_get_bits(&s->gb, s->frame_data, s->num_saved_bits); skip_bits(&s->gb, s->frame_offset);"} {"target": 1, "idx": 21012, "func": "static int read_interval_packets(WriterContext *w, AVFormatContext *fmt_ctx, const ReadInterval *interval, int64_t *cur_ts) { AVPacket pkt, pkt1; AVFrame *frame = NULL; int ret = 0, i = 0, frame_count = 0; int64_t start = -INT64_MAX, end = interval->end; int has_start = 0, has_end = interval->has_end && !interval->end_is_offset; av_init_packet(&pkt); av_log(NULL, AV_LOG_VERBOSE, \"Processing read interval \"); log_read_interval(interval, NULL, AV_LOG_VERBOSE); if (interval->has_start) { int64_t target; if (interval->start_is_offset) { if (*cur_ts == AV_NOPTS_VALUE) { av_log(NULL, AV_LOG_ERROR, \"Could not seek to relative position since current \" \"timestamp is not defined\\n\"); ret = AVERROR(EINVAL); target = *cur_ts + interval->start; } else { target = interval->start; av_log(NULL, AV_LOG_VERBOSE, \"Seeking to read interval start point %s\\n\", av_ts2timestr(target, &AV_TIME_BASE_Q)); if ((ret = avformat_seek_file(fmt_ctx, -1, -INT64_MAX, target, INT64_MAX, 0)) < 0) { av_log(NULL, AV_LOG_ERROR, \"Could not seek to position %\"PRId64\": %s\\n\", interval->start, av_err2str(ret)); frame = av_frame_alloc(); while (!av_read_frame(fmt_ctx, &pkt)) { if (selected_streams[pkt.stream_index]) { AVRational tb = fmt_ctx->streams[pkt.stream_index]->time_base; if (pkt.pts != AV_NOPTS_VALUE) *cur_ts = av_rescale_q(pkt.pts, tb, AV_TIME_BASE_Q); if (!has_start && *cur_ts != AV_NOPTS_VALUE) { start = *cur_ts; has_start = 1; if (has_start && !has_end && interval->end_is_offset) { end = start + interval->end; has_end = 1; if (interval->end_is_offset && interval->duration_frames) { if (frame_count >= interval->end) break; } else if (has_end && *cur_ts != AV_NOPTS_VALUE && *cur_ts >= end) { break; frame_count++; if (do_read_packets) { if (do_show_packets) show_packet(w, fmt_ctx, &pkt, i++); nb_streams_packets[pkt.stream_index]++; if (do_read_frames) { pkt1 = pkt; while (pkt1.size && process_frame(w, fmt_ctx, frame, &pkt1) > 0); av_free_packet(&pkt); av_init_packet(&pkt); pkt.data = NULL; pkt.size = 0; //Flush remaining frames that are cached in the decoder for (i = 0; i < fmt_ctx->nb_streams; i++) { pkt.stream_index = i; if (do_read_frames) while (process_frame(w, fmt_ctx, frame, &pkt) > 0); end: av_frame_free(&frame); if (ret < 0) { av_log(NULL, AV_LOG_ERROR, \"Could not read packets in interval \"); log_read_interval(interval, NULL, AV_LOG_ERROR); return ret;"} {"target": 1, "idx": 21030, "func": "int attribute_align_arg avcodec_encode_audio(AVCodecContext *avctx, uint8_t *buf, int buf_size, const short *samples) { AVPacket pkt; AVFrame *frame; int ret, samples_size, got_packet; av_init_packet(&pkt); pkt.data = buf; pkt.size = buf_size; if (samples) { frame = av_frame_alloc(); if (!frame) return AVERROR(ENOMEM); if (avctx->frame_size) { frame->nb_samples = avctx->frame_size; } else { /* if frame_size is not set, the number of samples must be * calculated from the buffer size */ int64_t nb_samples; if (!av_get_bits_per_sample(avctx->codec_id)) { av_log(avctx, AV_LOG_ERROR, \"avcodec_encode_audio() does not \" \"support this codec\\n\"); av_frame_free(&frame); return AVERROR(EINVAL); } nb_samples = (int64_t)buf_size * 8 / (av_get_bits_per_sample(avctx->codec_id) * avctx->channels); if (nb_samples >= INT_MAX) { av_frame_free(&frame); return AVERROR(EINVAL); } frame->nb_samples = nb_samples; } /* it is assumed that the samples buffer is large enough based on the * relevant parameters */ samples_size = av_samples_get_buffer_size(NULL, avctx->channels, frame->nb_samples, avctx->sample_fmt, 1); if ((ret = avcodec_fill_audio_frame(frame, avctx->channels, avctx->sample_fmt, (const uint8_t *)samples, samples_size, 1)) < 0) { av_frame_free(&frame); return ret; } /* fabricate frame pts from sample count. * this is needed because the avcodec_encode_audio() API does not have * a way for the user to provide pts */ if (avctx->sample_rate && avctx->time_base.num) frame->pts = ff_samples_to_time_base(avctx, avctx->internal->sample_count); else frame->pts = AV_NOPTS_VALUE; avctx->internal->sample_count += frame->nb_samples; } else { frame = NULL; } got_packet = 0; ret = avcodec_encode_audio2(avctx, &pkt, frame, &got_packet); if (!ret && got_packet && avctx->coded_frame) { avctx->coded_frame->pts = pkt.pts; avctx->coded_frame->key_frame = !!(pkt.flags & AV_PKT_FLAG_KEY); } /* free any side data since we cannot return it */ av_packet_free_side_data(&pkt); if (frame && frame->extended_data != frame->data) av_freep(&frame->extended_data); av_frame_free(&frame); return ret ? ret : pkt.size; }"} {"target": 1, "idx": 21034, "func": "static void host_cpuid(uint32_t function, uint32_t count, uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx) { #if defined(CONFIG_KVM) uint32_t vec[4]; #ifdef __x86_64__ asm volatile(\"cpuid\" : \"=a\"(vec[0]), \"=b\"(vec[1]), \"=c\"(vec[2]), \"=d\"(vec[3]) : \"0\"(function), \"c\"(count) : \"cc\"); #else asm volatile(\"pusha \\n\\t\" \"cpuid \\n\\t\" \"mov %%eax, 0(%1) \\n\\t\" \"mov %%ebx, 4(%1) \\n\\t\" \"mov %%ecx, 8(%1) \\n\\t\" \"mov %%edx, 12(%1) \\n\\t\" \"popa\" : : \"a\"(function), \"c\"(count), \"S\"(vec) : \"memory\", \"cc\"); #endif if (eax) *eax = vec[0]; if (ebx) *ebx = vec[1]; if (ecx) *ecx = vec[2]; if (edx) *edx = vec[3]; #endif }"} {"target": 1, "idx": 21037, "func": "static int dxva2_map_frame(AVHWFramesContext *ctx, AVFrame *dst, const AVFrame *src, int flags) { IDirect3DSurface9 *surface = (IDirect3DSurface9*)src->data[3]; DXVA2Mapping *map; D3DSURFACE_DESC surfaceDesc; D3DLOCKED_RECT LockedRect; HRESULT hr; int i, err, nb_planes; int lock_flags = 0; nb_planes = av_pix_fmt_count_planes(dst->format); hr = IDirect3DSurface9_GetDesc(surface, &surfaceDesc); if (FAILED(hr)) { av_log(ctx, AV_LOG_ERROR, \"Error getting a surface description\\n\"); return AVERROR_UNKNOWN; } if (!(flags & AV_HWFRAME_MAP_WRITE)) lock_flags |= D3DLOCK_READONLY; if (flags & AV_HWFRAME_MAP_OVERWRITE) lock_flags |= D3DLOCK_DISCARD; hr = IDirect3DSurface9_LockRect(surface, &LockedRect, NULL, lock_flags); if (FAILED(hr)) { av_log(ctx, AV_LOG_ERROR, \"Unable to lock DXVA2 surface\\n\"); return AVERROR_UNKNOWN; } map = av_mallocz(sizeof(*map)); if (!map) goto fail; err = ff_hwframe_map_create(src->hw_frames_ctx, dst, src, dxva2_unmap_frame, map); if (err < 0) { av_freep(&map); goto fail; } for (i = 0; i < nb_planes; i++) dst->linesize[i] = LockedRect.Pitch; av_image_fill_pointers(dst->data, dst->format, surfaceDesc.Height, (uint8_t*)LockedRect.pBits, dst->linesize); if (dst->format == AV_PIX_FMT_PAL8) dst->data[1] = (uint8_t*)map->palette_dummy; return 0; fail: IDirect3DSurface9_UnlockRect(surface); return err; }"} {"target": 1, "idx": 21050, "func": "void show_help(void) { const char *prog; const OptionDef *po; int i, expert; prog = do_play ? \"ffplay\" : \"ffmpeg\"; printf(\"%s version \" FFMPEG_VERSION \", Copyright (c) 2000, 2001, 2002 Gerard Lantau\\n\", prog); if (!do_play) { printf(\"usage: ffmpeg [[options] -i input_file]... {[options] outfile}...\\n\" \"Hyper fast MPEG1/MPEG4/H263/RV and AC3/MPEG audio encoder\\n\"); } else { printf(\"usage: ffplay [options] input_file...\\n\" \"Simple audio player\\n\"); } printf(\"\\n\" \"Main options are:\\n\"); for(i=0;i<2;i++) { if (i == 1) printf(\"\\nAdvanced options are:\\n\"); for(po = options; po->name != NULL; po++) { char buf[64]; expert = (po->flags & OPT_EXPERT) != 0; if (expert == i) { strcpy(buf, po->name); if (po->flags & HAS_ARG) { strcat(buf, \" \"); strcat(buf, po->argname); } printf(\"-%-17s %s\\n\", buf, po->help); } } } exit(1); }"} {"target": 1, "idx": 21061, "func": "av_cold int ffv1_common_init(AVCodecContext *avctx) { FFV1Context *s = avctx->priv_data; if (!avctx->width || !avctx->height) return AVERROR_INVALIDDATA; s->avctx = avctx; s->flags = avctx->flags; s->picture.f = avcodec_alloc_frame(); s->last_picture.f = av_frame_alloc(); ff_dsputil_init(&s->dsp, avctx); s->width = avctx->width; s->height = avctx->height; // defaults s->num_h_slices = 1; s->num_v_slices = 1; return 0; }"} {"target": 0, "idx": 21070, "func": "static int vqa_decode_chunk(VqaContext *s) { unsigned int chunk_type; unsigned int chunk_size; int byte_skip; unsigned int index = 0; int i; unsigned char r, g, b; int index_shift; int res; int cbf0_chunk = -1; int cbfz_chunk = -1; int cbp0_chunk = -1; int cbpz_chunk = -1; int cpl0_chunk = -1; int cplz_chunk = -1; int vptz_chunk = -1; int x, y; int lines = 0; int pixel_ptr; int vector_index = 0; int lobyte = 0; int hibyte = 0; int lobytes = 0; int hibytes = s->decode_buffer_size / 2; /* first, traverse through the frame and find the subchunks */ while (bytestream2_get_bytes_left(&s->gb) >= 8) { chunk_type = bytestream2_get_be32u(&s->gb); index = bytestream2_tell(&s->gb); chunk_size = bytestream2_get_be32u(&s->gb); switch (chunk_type) { case CBF0_TAG: cbf0_chunk = index; break; case CBFZ_TAG: cbfz_chunk = index; break; case CBP0_TAG: cbp0_chunk = index; break; case CBPZ_TAG: cbpz_chunk = index; break; case CPL0_TAG: cpl0_chunk = index; break; case CPLZ_TAG: cplz_chunk = index; break; case VPTZ_TAG: vptz_chunk = index; break; default: av_log(s->avctx, AV_LOG_ERROR, \" VQA video: Found unknown chunk type: %c%c%c%c (%08X)\\n\", (chunk_type >> 24) & 0xFF, (chunk_type >> 16) & 0xFF, (chunk_type >> 8) & 0xFF, (chunk_type >> 0) & 0xFF, chunk_type); break; } byte_skip = chunk_size & 0x01; bytestream2_skip(&s->gb, chunk_size + byte_skip); } /* next, deal with the palette */ if ((cpl0_chunk != -1) && (cplz_chunk != -1)) { /* a chunk should not have both chunk types */ av_log(s->avctx, AV_LOG_ERROR, \" VQA video: problem: found both CPL0 and CPLZ chunks\\n\"); return AVERROR_INVALIDDATA; } /* decompress the palette chunk */ if (cplz_chunk != -1) { /* yet to be handled */ } /* convert the RGB palette into the machine's endian format */ if (cpl0_chunk != -1) { bytestream2_seek(&s->gb, cpl0_chunk, SEEK_SET); chunk_size = bytestream2_get_be32(&s->gb); /* sanity check the palette size */ if (chunk_size / 3 > 256 || chunk_size > bytestream2_get_bytes_left(&s->gb)) { av_log(s->avctx, AV_LOG_ERROR, \" VQA video: problem: found a palette chunk with %d colors\\n\", chunk_size / 3); return AVERROR_INVALIDDATA; } for (i = 0; i < chunk_size / 3; i++) { /* scale by 4 to transform 6-bit palette -> 8-bit */ r = bytestream2_get_byteu(&s->gb) * 4; g = bytestream2_get_byteu(&s->gb) * 4; b = bytestream2_get_byteu(&s->gb) * 4; s->palette[i] = (r << 16) | (g << 8) | (b); } } /* next, look for a full codebook */ if ((cbf0_chunk != -1) && (cbfz_chunk != -1)) { /* a chunk should not have both chunk types */ av_log(s->avctx, AV_LOG_ERROR, \" VQA video: problem: found both CBF0 and CBFZ chunks\\n\"); return AVERROR_INVALIDDATA; } /* decompress the full codebook chunk */ if (cbfz_chunk != -1) { bytestream2_seek(&s->gb, cbfz_chunk, SEEK_SET); chunk_size = bytestream2_get_be32(&s->gb); if ((res = decode_format80(&s->gb, chunk_size, s->codebook, s->codebook_size, 0)) < 0) return res; } /* copy a full codebook */ if (cbf0_chunk != -1) { bytestream2_seek(&s->gb, cbf0_chunk, SEEK_SET); chunk_size = bytestream2_get_be32(&s->gb); /* sanity check the full codebook size */ if (chunk_size > MAX_CODEBOOK_SIZE) { av_log(s->avctx, AV_LOG_ERROR, \" VQA video: problem: CBF0 chunk too large (0x%X bytes)\\n\", chunk_size); return AVERROR_INVALIDDATA; } bytestream2_get_buffer(&s->gb, s->codebook, chunk_size); } /* decode the frame */ if (vptz_chunk == -1) { /* something is wrong if there is no VPTZ chunk */ av_log(s->avctx, AV_LOG_ERROR, \" VQA video: problem: no VPTZ chunk found\\n\"); return AVERROR_INVALIDDATA; } bytestream2_seek(&s->gb, vptz_chunk, SEEK_SET); chunk_size = bytestream2_get_be32(&s->gb); if ((res = decode_format80(&s->gb, chunk_size, s->decode_buffer, s->decode_buffer_size, 1)) < 0) return res; /* render the final PAL8 frame */ if (s->vector_height == 4) index_shift = 4; else index_shift = 3; for (y = 0; y < s->frame.linesize[0] * s->height; y += s->frame.linesize[0] * s->vector_height) { for (x = y; x < y + s->width; x += 4, lobytes++, hibytes++) { pixel_ptr = x; /* get the vector index, the method for which varies according to * VQA file version */ switch (s->vqa_version) { case 1: lobyte = s->decode_buffer[lobytes * 2]; hibyte = s->decode_buffer[(lobytes * 2) + 1]; vector_index = ((hibyte << 8) | lobyte) >> 3; vector_index <<= index_shift; lines = s->vector_height; /* uniform color fill - a quick hack */ if (hibyte == 0xFF) { while (lines--) { s->frame.data[0][pixel_ptr + 0] = 255 - lobyte; s->frame.data[0][pixel_ptr + 1] = 255 - lobyte; s->frame.data[0][pixel_ptr + 2] = 255 - lobyte; s->frame.data[0][pixel_ptr + 3] = 255 - lobyte; pixel_ptr += s->frame.linesize[0]; } lines=0; } break; case 2: lobyte = s->decode_buffer[lobytes]; hibyte = s->decode_buffer[hibytes]; vector_index = (hibyte << 8) | lobyte; vector_index <<= index_shift; lines = s->vector_height; break; case 3: /* not implemented yet */ lines = 0; break; } while (lines--) { s->frame.data[0][pixel_ptr + 0] = s->codebook[vector_index++]; s->frame.data[0][pixel_ptr + 1] = s->codebook[vector_index++]; s->frame.data[0][pixel_ptr + 2] = s->codebook[vector_index++]; s->frame.data[0][pixel_ptr + 3] = s->codebook[vector_index++]; pixel_ptr += s->frame.linesize[0]; } } } /* handle partial codebook */ if ((cbp0_chunk != -1) && (cbpz_chunk != -1)) { /* a chunk should not have both chunk types */ av_log(s->avctx, AV_LOG_ERROR, \" VQA video: problem: found both CBP0 and CBPZ chunks\\n\"); return AVERROR_INVALIDDATA; } if (cbp0_chunk != -1) { bytestream2_seek(&s->gb, cbp0_chunk, SEEK_SET); chunk_size = bytestream2_get_be32(&s->gb); /* accumulate partial codebook */ bytestream2_get_buffer(&s->gb, &s->next_codebook_buffer[s->next_codebook_buffer_index], chunk_size); s->next_codebook_buffer_index += chunk_size; s->partial_countdown--; if (s->partial_countdown == 0) { /* time to replace codebook */ memcpy(s->codebook, s->next_codebook_buffer, s->next_codebook_buffer_index); /* reset accounting */ s->next_codebook_buffer_index = 0; s->partial_countdown = s->partial_count; } } if (cbpz_chunk != -1) { bytestream2_seek(&s->gb, cbpz_chunk, SEEK_SET); chunk_size = bytestream2_get_be32(&s->gb); /* accumulate partial codebook */ bytestream2_get_buffer(&s->gb, &s->next_codebook_buffer[s->next_codebook_buffer_index], chunk_size); s->next_codebook_buffer_index += chunk_size; s->partial_countdown--; if (s->partial_countdown == 0) { GetByteContext gb; bytestream2_init(&gb, s->next_codebook_buffer, s->next_codebook_buffer_index); /* decompress codebook */ if ((res = decode_format80(&gb, s->next_codebook_buffer_index, s->codebook, s->codebook_size, 0)) < 0) return res; /* reset accounting */ s->next_codebook_buffer_index = 0; s->partial_countdown = s->partial_count; } } return 0; }"} {"target": 0, "idx": 21092, "func": "static void platform_mmio_map(PCIDevice *d, int region_num, pcibus_t addr, pcibus_t size, int type) { int mmio_io_addr; mmio_io_addr = cpu_register_io_memory_simple(&platform_mmio_handler, DEVICE_NATIVE_ENDIAN); cpu_register_physical_memory(addr, size, mmio_io_addr); }"} {"target": 0, "idx": 21100, "func": "static void bt_submit_hci(struct HCIInfo *info, const uint8_t *data, int length) { struct bt_hci_s *hci = hci_from_info(info); uint16_t cmd; int paramlen, i; if (length < HCI_COMMAND_HDR_SIZE) goto short_hci; memcpy(&hci->last_cmd, data, 2); cmd = (data[1] << 8) | data[0]; paramlen = data[2]; if (cmd_opcode_ogf(cmd) == 0 || cmd_opcode_ocf(cmd) == 0) /* NOP */ return; data += HCI_COMMAND_HDR_SIZE; length -= HCI_COMMAND_HDR_SIZE; if (paramlen > length) return; #define PARAM(cmd, param) (((cmd##_cp *) data)->param) #define PARAM16(cmd, param) le16_to_cpup(&PARAM(cmd, param)) #define PARAMHANDLE(cmd) HNDL(PARAM(cmd, handle)) #define LENGTH_CHECK(cmd) if (length < sizeof(cmd##_cp)) goto short_hci /* Note: the supported commands bitmask in bt_hci_read_local_commands_rp * needs to be updated every time a command is implemented here! */ switch (cmd) { case cmd_opcode_pack(OGF_LINK_CTL, OCF_INQUIRY): LENGTH_CHECK(inquiry); if (PARAM(inquiry, length) < 1) { bt_hci_event_complete_status(hci, HCI_INVALID_PARAMETERS); break; } hci->lm.inquire = 1; hci->lm.periodic = 0; hci->lm.responses_left = PARAM(inquiry, num_rsp) ?: INT_MAX; hci->lm.responses = 0; bt_hci_event_status(hci, HCI_SUCCESS); bt_hci_inquiry_start(hci, PARAM(inquiry, length)); break; case cmd_opcode_pack(OGF_LINK_CTL, OCF_INQUIRY_CANCEL): if (!hci->lm.inquire || hci->lm.periodic) { fprintf(stderr, \"%s: Inquiry Cancel should only be issued after \" \"the Inquiry command has been issued, a Command \" \"Status event has been received for the Inquiry \" \"command, and before the Inquiry Complete event \" \"occurs\", __FUNCTION__); bt_hci_event_complete_status(hci, HCI_COMMAND_DISALLOWED); break; } hci->lm.inquire = 0; qemu_del_timer(hci->lm.inquiry_done); bt_hci_event_complete_status(hci, HCI_SUCCESS); break; case cmd_opcode_pack(OGF_LINK_CTL, OCF_PERIODIC_INQUIRY): LENGTH_CHECK(periodic_inquiry); if (!(PARAM(periodic_inquiry, length) < PARAM16(periodic_inquiry, min_period) && PARAM16(periodic_inquiry, min_period) < PARAM16(periodic_inquiry, max_period)) || PARAM(periodic_inquiry, length) < 1 || PARAM16(periodic_inquiry, min_period) < 2 || PARAM16(periodic_inquiry, max_period) < 3) { bt_hci_event_complete_status(hci, HCI_INVALID_PARAMETERS); break; } hci->lm.inquire = 1; hci->lm.periodic = 1; hci->lm.responses_left = PARAM(periodic_inquiry, num_rsp); hci->lm.responses = 0; hci->lm.inquiry_period = PARAM16(periodic_inquiry, max_period); bt_hci_event_complete_status(hci, HCI_SUCCESS); bt_hci_inquiry_start(hci, PARAM(periodic_inquiry, length)); break; case cmd_opcode_pack(OGF_LINK_CTL, OCF_EXIT_PERIODIC_INQUIRY): if (!hci->lm.inquire || !hci->lm.periodic) { fprintf(stderr, \"%s: Inquiry Cancel should only be issued after \" \"the Inquiry command has been issued, a Command \" \"Status event has been received for the Inquiry \" \"command, and before the Inquiry Complete event \" \"occurs\", __FUNCTION__); bt_hci_event_complete_status(hci, HCI_COMMAND_DISALLOWED); break; } hci->lm.inquire = 0; qemu_del_timer(hci->lm.inquiry_done); qemu_del_timer(hci->lm.inquiry_next); bt_hci_event_complete_status(hci, HCI_SUCCESS); break; case cmd_opcode_pack(OGF_LINK_CTL, OCF_CREATE_CONN): LENGTH_CHECK(create_conn); if (hci->lm.connecting >= HCI_HANDLES_MAX) { bt_hci_event_status(hci, HCI_REJECTED_LIMITED_RESOURCES); break; } bt_hci_event_status(hci, HCI_SUCCESS); if (bt_hci_connect(hci, &PARAM(create_conn, bdaddr))) bt_hci_connection_reject_event(hci, &PARAM(create_conn, bdaddr)); break; case cmd_opcode_pack(OGF_LINK_CTL, OCF_DISCONNECT): LENGTH_CHECK(disconnect); if (bt_hci_handle_bad(hci, PARAMHANDLE(disconnect))) { bt_hci_event_status(hci, HCI_NO_CONNECTION); break; } bt_hci_event_status(hci, HCI_SUCCESS); bt_hci_disconnect(hci, PARAMHANDLE(disconnect), PARAM(disconnect, reason)); break; case cmd_opcode_pack(OGF_LINK_CTL, OCF_CREATE_CONN_CANCEL): LENGTH_CHECK(create_conn_cancel); if (bt_hci_lmp_connection_ready(hci, &PARAM(create_conn_cancel, bdaddr))) { for (i = 0; i < HCI_HANDLES_MAX; i ++) if (bt_hci_role_master(hci, i) && hci->lm.handle[i].link && !bacmp(&hci->lm.handle[i].link->slave->bd_addr, &PARAM(create_conn_cancel, bdaddr))) break; bt_hci_event_complete_conn_cancel(hci, i < HCI_HANDLES_MAX ? HCI_ACL_CONNECTION_EXISTS : HCI_NO_CONNECTION, &PARAM(create_conn_cancel, bdaddr)); } else bt_hci_event_complete_conn_cancel(hci, HCI_SUCCESS, &PARAM(create_conn_cancel, bdaddr)); break; case cmd_opcode_pack(OGF_LINK_CTL, OCF_ACCEPT_CONN_REQ): LENGTH_CHECK(accept_conn_req); if (!hci->conn_req_host || bacmp(&PARAM(accept_conn_req, bdaddr), &hci->conn_req_host->bd_addr)) { bt_hci_event_status(hci, HCI_INVALID_PARAMETERS); break; } bt_hci_event_status(hci, HCI_SUCCESS); bt_hci_connection_accept(hci, hci->conn_req_host); hci->conn_req_host = 0; break; case cmd_opcode_pack(OGF_LINK_CTL, OCF_REJECT_CONN_REQ): LENGTH_CHECK(reject_conn_req); if (!hci->conn_req_host || bacmp(&PARAM(reject_conn_req, bdaddr), &hci->conn_req_host->bd_addr)) { bt_hci_event_status(hci, HCI_INVALID_PARAMETERS); break; } bt_hci_event_status(hci, HCI_SUCCESS); bt_hci_connection_reject(hci, hci->conn_req_host, PARAM(reject_conn_req, reason)); bt_hci_connection_reject_event(hci, &hci->conn_req_host->bd_addr); hci->conn_req_host = 0; break; case cmd_opcode_pack(OGF_LINK_CTL, OCF_AUTH_REQUESTED): LENGTH_CHECK(auth_requested); if (bt_hci_handle_bad(hci, PARAMHANDLE(auth_requested))) bt_hci_event_status(hci, HCI_NO_CONNECTION); else { bt_hci_event_status(hci, HCI_SUCCESS); bt_hci_event_auth_complete(hci, PARAMHANDLE(auth_requested)); } break; case cmd_opcode_pack(OGF_LINK_CTL, OCF_SET_CONN_ENCRYPT): LENGTH_CHECK(set_conn_encrypt); if (bt_hci_handle_bad(hci, PARAMHANDLE(set_conn_encrypt))) bt_hci_event_status(hci, HCI_NO_CONNECTION); else { bt_hci_event_status(hci, HCI_SUCCESS); bt_hci_event_encrypt_change(hci, PARAMHANDLE(set_conn_encrypt), PARAM(set_conn_encrypt, encrypt)); } break; case cmd_opcode_pack(OGF_LINK_CTL, OCF_REMOTE_NAME_REQ): LENGTH_CHECK(remote_name_req); if (bt_hci_name_req(hci, &PARAM(remote_name_req, bdaddr))) bt_hci_event_status(hci, HCI_NO_CONNECTION); break; case cmd_opcode_pack(OGF_LINK_CTL, OCF_REMOTE_NAME_REQ_CANCEL): LENGTH_CHECK(remote_name_req_cancel); bt_hci_event_complete_name_cancel(hci, &PARAM(remote_name_req_cancel, bdaddr)); break; case cmd_opcode_pack(OGF_LINK_CTL, OCF_READ_REMOTE_FEATURES): LENGTH_CHECK(read_remote_features); if (bt_hci_features_req(hci, PARAMHANDLE(read_remote_features))) bt_hci_event_status(hci, HCI_NO_CONNECTION); break; case cmd_opcode_pack(OGF_LINK_CTL, OCF_READ_REMOTE_EXT_FEATURES): LENGTH_CHECK(read_remote_ext_features); if (bt_hci_handle_bad(hci, PARAMHANDLE(read_remote_ext_features))) bt_hci_event_status(hci, HCI_NO_CONNECTION); else { bt_hci_event_status(hci, HCI_SUCCESS); bt_hci_event_read_remote_ext_features(hci, PARAMHANDLE(read_remote_ext_features)); } break; case cmd_opcode_pack(OGF_LINK_CTL, OCF_READ_REMOTE_VERSION): LENGTH_CHECK(read_remote_version); if (bt_hci_version_req(hci, PARAMHANDLE(read_remote_version))) bt_hci_event_status(hci, HCI_NO_CONNECTION); break; case cmd_opcode_pack(OGF_LINK_CTL, OCF_READ_CLOCK_OFFSET): LENGTH_CHECK(read_clock_offset); if (bt_hci_clkoffset_req(hci, PARAMHANDLE(read_clock_offset))) bt_hci_event_status(hci, HCI_NO_CONNECTION); break; case cmd_opcode_pack(OGF_LINK_CTL, OCF_READ_LMP_HANDLE): LENGTH_CHECK(read_lmp_handle); /* TODO: */ bt_hci_event_complete_lmp_handle(hci, PARAMHANDLE(read_lmp_handle)); break; case cmd_opcode_pack(OGF_LINK_POLICY, OCF_HOLD_MODE): LENGTH_CHECK(hold_mode); if (PARAM16(hold_mode, min_interval) > PARAM16(hold_mode, max_interval) || PARAM16(hold_mode, min_interval) < 0x0002 || PARAM16(hold_mode, max_interval) > 0xff00 || (PARAM16(hold_mode, min_interval) & 1) || (PARAM16(hold_mode, max_interval) & 1)) { bt_hci_event_status(hci, HCI_INVALID_PARAMETERS); break; } if (bt_hci_mode_change(hci, PARAMHANDLE(hold_mode), PARAM16(hold_mode, max_interval), acl_hold)) bt_hci_event_status(hci, HCI_NO_CONNECTION); break; case cmd_opcode_pack(OGF_LINK_POLICY, OCF_PARK_MODE): LENGTH_CHECK(park_mode); if (PARAM16(park_mode, min_interval) > PARAM16(park_mode, max_interval) || PARAM16(park_mode, min_interval) < 0x000e || (PARAM16(park_mode, min_interval) & 1) || (PARAM16(park_mode, max_interval) & 1)) { bt_hci_event_status(hci, HCI_INVALID_PARAMETERS); break; } if (bt_hci_mode_change(hci, PARAMHANDLE(park_mode), PARAM16(park_mode, max_interval), acl_parked)) bt_hci_event_status(hci, HCI_NO_CONNECTION); break; case cmd_opcode_pack(OGF_LINK_POLICY, OCF_EXIT_PARK_MODE): LENGTH_CHECK(exit_park_mode); if (bt_hci_mode_cancel(hci, PARAMHANDLE(exit_park_mode), acl_parked)) bt_hci_event_status(hci, HCI_NO_CONNECTION); break; case cmd_opcode_pack(OGF_LINK_POLICY, OCF_ROLE_DISCOVERY): LENGTH_CHECK(role_discovery); if (bt_hci_handle_bad(hci, PARAMHANDLE(role_discovery))) bt_hci_event_complete_role_discovery(hci, HCI_NO_CONNECTION, PARAMHANDLE(role_discovery), 0); else bt_hci_event_complete_role_discovery(hci, HCI_SUCCESS, PARAMHANDLE(role_discovery), bt_hci_role_master(hci, PARAMHANDLE(role_discovery))); break; case cmd_opcode_pack(OGF_HOST_CTL, OCF_SET_EVENT_MASK): LENGTH_CHECK(set_event_mask); memcpy(hci->event_mask, PARAM(set_event_mask, mask), 8); bt_hci_event_complete_status(hci, HCI_SUCCESS); break; case cmd_opcode_pack(OGF_HOST_CTL, OCF_RESET): bt_hci_reset(hci); bt_hci_event_status(hci, HCI_SUCCESS); break; case cmd_opcode_pack(OGF_HOST_CTL, OCF_SET_EVENT_FLT): if (length >= 1 && PARAM(set_event_flt, flt_type) == FLT_CLEAR_ALL) /* No length check */; else LENGTH_CHECK(set_event_flt); /* Filters are not implemented */ bt_hci_event_complete_status(hci, HCI_SUCCESS); break; case cmd_opcode_pack(OGF_HOST_CTL, OCF_FLUSH): LENGTH_CHECK(flush); if (bt_hci_handle_bad(hci, PARAMHANDLE(flush))) bt_hci_event_complete_flush(hci, HCI_NO_CONNECTION, PARAMHANDLE(flush)); else { /* TODO: ordering? */ bt_hci_event(hci, EVT_FLUSH_OCCURRED, &PARAM(flush, handle), EVT_FLUSH_OCCURRED_SIZE); bt_hci_event_complete_flush(hci, HCI_SUCCESS, PARAMHANDLE(flush)); } break; case cmd_opcode_pack(OGF_HOST_CTL, OCF_CHANGE_LOCAL_NAME): LENGTH_CHECK(change_local_name); if (hci->device.lmp_name) free((void *) hci->device.lmp_name); hci->device.lmp_name = strndup(PARAM(change_local_name, name), sizeof(PARAM(change_local_name, name))); bt_hci_event_complete_status(hci, HCI_SUCCESS); break; case cmd_opcode_pack(OGF_HOST_CTL, OCF_READ_LOCAL_NAME): bt_hci_event_complete_read_local_name(hci); break; case cmd_opcode_pack(OGF_HOST_CTL, OCF_READ_CONN_ACCEPT_TIMEOUT): bt_hci_event_complete_read_conn_accept_timeout(hci); break; case cmd_opcode_pack(OGF_HOST_CTL, OCF_WRITE_CONN_ACCEPT_TIMEOUT): /* TODO */ LENGTH_CHECK(write_conn_accept_timeout); if (PARAM16(write_conn_accept_timeout, timeout) < 0x0001 || PARAM16(write_conn_accept_timeout, timeout) > 0xb540) { bt_hci_event_complete_status(hci, HCI_INVALID_PARAMETERS); break; } hci->conn_accept_tout = PARAM16(write_conn_accept_timeout, timeout); bt_hci_event_complete_status(hci, HCI_SUCCESS); break; case cmd_opcode_pack(OGF_HOST_CTL, OCF_READ_SCAN_ENABLE): bt_hci_event_complete_read_scan_enable(hci); break; case cmd_opcode_pack(OGF_HOST_CTL, OCF_WRITE_SCAN_ENABLE): LENGTH_CHECK(write_scan_enable); /* TODO: check that the remaining bits are all 0 */ hci->device.inquiry_scan = !!(PARAM(write_scan_enable, scan_enable) & SCAN_INQUIRY); hci->device.page_scan = !!(PARAM(write_scan_enable, scan_enable) & SCAN_PAGE); bt_hci_event_complete_status(hci, HCI_SUCCESS); break; case cmd_opcode_pack(OGF_HOST_CTL, OCF_READ_CLASS_OF_DEV): bt_hci_event_complete_read_local_class(hci); break; case cmd_opcode_pack(OGF_HOST_CTL, OCF_WRITE_CLASS_OF_DEV): LENGTH_CHECK(write_class_of_dev); memcpy(hci->device.class, PARAM(write_class_of_dev, dev_class), sizeof(PARAM(write_class_of_dev, dev_class))); bt_hci_event_complete_status(hci, HCI_SUCCESS); break; case cmd_opcode_pack(OGF_HOST_CTL, OCF_READ_VOICE_SETTING): bt_hci_event_complete_voice_setting(hci); break; case cmd_opcode_pack(OGF_HOST_CTL, OCF_WRITE_VOICE_SETTING): LENGTH_CHECK(write_voice_setting); hci->voice_setting = PARAM(write_voice_setting, voice_setting); bt_hci_event_complete_status(hci, HCI_SUCCESS); break; case cmd_opcode_pack(OGF_HOST_CTL, OCF_HOST_NUMBER_OF_COMPLETED_PACKETS): if (length < data[0] * 2 + 1) goto short_hci; for (i = 0; i < data[0]; i ++) if (bt_hci_handle_bad(hci, data[i * 2 + 1] | (data[i * 2 + 2] << 8))) bt_hci_event_complete_status(hci, HCI_INVALID_PARAMETERS); break; case cmd_opcode_pack(OGF_HOST_CTL, OCF_READ_INQUIRY_MODE): /* Only if (local_features[3] & 0x40) && (local_commands[12] & 0x40) * else * goto unknown_command */ bt_hci_event_complete_read_inquiry_mode(hci); break; case cmd_opcode_pack(OGF_HOST_CTL, OCF_WRITE_INQUIRY_MODE): /* Only if (local_features[3] & 0x40) && (local_commands[12] & 0x80) * else * goto unknown_command */ LENGTH_CHECK(write_inquiry_mode); if (PARAM(write_inquiry_mode, mode) > 0x01) { bt_hci_event_complete_status(hci, HCI_INVALID_PARAMETERS); break; } hci->lm.inquiry_mode = PARAM(write_inquiry_mode, mode); bt_hci_event_complete_status(hci, HCI_SUCCESS); break; case cmd_opcode_pack(OGF_INFO_PARAM, OCF_READ_LOCAL_VERSION): bt_hci_read_local_version_rp(hci); break; case cmd_opcode_pack(OGF_INFO_PARAM, OCF_READ_LOCAL_COMMANDS): bt_hci_read_local_commands_rp(hci); break; case cmd_opcode_pack(OGF_INFO_PARAM, OCF_READ_LOCAL_FEATURES): bt_hci_read_local_features_rp(hci); break; case cmd_opcode_pack(OGF_INFO_PARAM, OCF_READ_LOCAL_EXT_FEATURES): LENGTH_CHECK(read_local_ext_features); bt_hci_read_local_ext_features_rp(hci, PARAM(read_local_ext_features, page_num)); break; case cmd_opcode_pack(OGF_INFO_PARAM, OCF_READ_BUFFER_SIZE): bt_hci_read_buffer_size_rp(hci); break; case cmd_opcode_pack(OGF_INFO_PARAM, OCF_READ_COUNTRY_CODE): bt_hci_read_country_code_rp(hci); break; case cmd_opcode_pack(OGF_INFO_PARAM, OCF_READ_BD_ADDR): bt_hci_read_bd_addr_rp(hci); break; case cmd_opcode_pack(OGF_STATUS_PARAM, OCF_READ_LINK_QUALITY): LENGTH_CHECK(read_link_quality); bt_hci_link_quality_rp(hci, PARAMHANDLE(read_link_quality)); break; default: bt_hci_event_status(hci, HCI_UNKNOWN_COMMAND); break; short_hci: fprintf(stderr, \"%s: HCI packet too short (%iB)\\n\", __FUNCTION__, length); bt_hci_event_status(hci, HCI_INVALID_PARAMETERS); break; } }"} {"target": 0, "idx": 21102, "func": "static int kvm_s390_register_io_adapter(S390FLICState *fs, uint32_t id, uint8_t isc, bool swap, bool is_maskable) { struct kvm_s390_io_adapter adapter = { .id = id, .isc = isc, .maskable = is_maskable, .swap = swap, }; KVMS390FLICState *flic = KVM_S390_FLIC(fs); int r, ret; struct kvm_device_attr attr = { .group = KVM_DEV_FLIC_ADAPTER_REGISTER, .addr = (uint64_t)&adapter, }; if (!kvm_check_extension(kvm_state, KVM_CAP_IRQ_ROUTING)) { /* nothing to do */ return 0; } r = ioctl(flic->fd, KVM_SET_DEVICE_ATTR, &attr); ret = r ? -errno : 0; return ret; }"} {"target": 0, "idx": 21106, "func": "static void dec_calc(DisasContext *dc, uint32_t insn) { uint32_t op0, op1, op2; uint32_t ra, rb, rd; op0 = extract32(insn, 0, 4); op1 = extract32(insn, 8, 2); op2 = extract32(insn, 6, 2); ra = extract32(insn, 16, 5); rb = extract32(insn, 11, 5); rd = extract32(insn, 21, 5); switch (op0) { case 0x0000: switch (op1) { case 0x00: /* l.add */ LOG_DIS(\"l.add r%d, r%d, r%d\\n\", rd, ra, rb); { int lab = gen_new_label(); TCGv_i64 ta = tcg_temp_new_i64(); TCGv_i64 tb = tcg_temp_new_i64(); TCGv_i64 td = tcg_temp_local_new_i64(); TCGv_i32 res = tcg_temp_local_new_i32(); TCGv_i32 sr_ove = tcg_temp_local_new_i32(); tcg_gen_extu_i32_i64(ta, cpu_R[ra]); tcg_gen_extu_i32_i64(tb, cpu_R[rb]); tcg_gen_add_i64(td, ta, tb); tcg_gen_trunc_i64_i32(res, td); tcg_gen_shri_i64(td, td, 31); tcg_gen_andi_i64(td, td, 0x3); /* Jump to lab when no overflow. */ tcg_gen_brcondi_i64(TCG_COND_EQ, td, 0x0, lab); tcg_gen_brcondi_i64(TCG_COND_EQ, td, 0x3, lab); tcg_gen_ori_i32(cpu_sr, cpu_sr, (SR_OV | SR_CY)); tcg_gen_andi_i32(sr_ove, cpu_sr, SR_OVE); tcg_gen_brcondi_i32(TCG_COND_NE, sr_ove, SR_OVE, lab); gen_exception(dc, EXCP_RANGE); gen_set_label(lab); tcg_gen_mov_i32(cpu_R[rd], res); tcg_temp_free_i64(ta); tcg_temp_free_i64(tb); tcg_temp_free_i64(td); tcg_temp_free_i32(res); tcg_temp_free_i32(sr_ove); } break; default: gen_illegal_exception(dc); break; } break; case 0x0001: /* l.addc */ switch (op1) { case 0x00: LOG_DIS(\"l.addc r%d, r%d, r%d\\n\", rd, ra, rb); { int lab = gen_new_label(); TCGv_i64 ta = tcg_temp_new_i64(); TCGv_i64 tb = tcg_temp_new_i64(); TCGv_i64 tcy = tcg_temp_local_new_i64(); TCGv_i64 td = tcg_temp_local_new_i64(); TCGv_i32 res = tcg_temp_local_new_i32(); TCGv_i32 sr_cy = tcg_temp_local_new_i32(); TCGv_i32 sr_ove = tcg_temp_local_new_i32(); tcg_gen_extu_i32_i64(ta, cpu_R[ra]); tcg_gen_extu_i32_i64(tb, cpu_R[rb]); tcg_gen_andi_i32(sr_cy, cpu_sr, SR_CY); tcg_gen_extu_i32_i64(tcy, sr_cy); tcg_gen_shri_i64(tcy, tcy, 10); tcg_gen_add_i64(td, ta, tb); tcg_gen_add_i64(td, td, tcy); tcg_gen_trunc_i64_i32(res, td); tcg_gen_shri_i64(td, td, 32); tcg_gen_andi_i64(td, td, 0x3); /* Jump to lab when no overflow. */ tcg_gen_brcondi_i64(TCG_COND_EQ, td, 0x0, lab); tcg_gen_brcondi_i64(TCG_COND_EQ, td, 0x3, lab); tcg_gen_ori_i32(cpu_sr, cpu_sr, (SR_OV | SR_CY)); tcg_gen_andi_i32(sr_ove, cpu_sr, SR_OVE); tcg_gen_brcondi_i32(TCG_COND_NE, sr_ove, SR_OVE, lab); gen_exception(dc, EXCP_RANGE); gen_set_label(lab); tcg_gen_mov_i32(cpu_R[rd], res); tcg_temp_free_i64(ta); tcg_temp_free_i64(tb); tcg_temp_free_i64(tcy); tcg_temp_free_i64(td); tcg_temp_free_i32(res); tcg_temp_free_i32(sr_cy); tcg_temp_free_i32(sr_ove); } break; default: gen_illegal_exception(dc); break; } break; case 0x0002: /* l.sub */ switch (op1) { case 0x00: LOG_DIS(\"l.sub r%d, r%d, r%d\\n\", rd, ra, rb); { int lab = gen_new_label(); TCGv_i64 ta = tcg_temp_new_i64(); TCGv_i64 tb = tcg_temp_new_i64(); TCGv_i64 td = tcg_temp_local_new_i64(); TCGv_i32 res = tcg_temp_local_new_i32(); TCGv_i32 sr_ove = tcg_temp_local_new_i32(); tcg_gen_extu_i32_i64(ta, cpu_R[ra]); tcg_gen_extu_i32_i64(tb, cpu_R[rb]); tcg_gen_sub_i64(td, ta, tb); tcg_gen_trunc_i64_i32(res, td); tcg_gen_shri_i64(td, td, 31); tcg_gen_andi_i64(td, td, 0x3); /* Jump to lab when no overflow. */ tcg_gen_brcondi_i64(TCG_COND_EQ, td, 0x0, lab); tcg_gen_brcondi_i64(TCG_COND_EQ, td, 0x3, lab); tcg_gen_ori_i32(cpu_sr, cpu_sr, (SR_OV | SR_CY)); tcg_gen_andi_i32(sr_ove, cpu_sr, SR_OVE); tcg_gen_brcondi_i32(TCG_COND_NE, sr_ove, SR_OVE, lab); gen_exception(dc, EXCP_RANGE); gen_set_label(lab); tcg_gen_mov_i32(cpu_R[rd], res); tcg_temp_free_i64(ta); tcg_temp_free_i64(tb); tcg_temp_free_i64(td); tcg_temp_free_i32(res); tcg_temp_free_i32(sr_ove); } break; default: gen_illegal_exception(dc); break; } break; case 0x0003: /* l.and */ switch (op1) { case 0x00: LOG_DIS(\"l.and r%d, r%d, r%d\\n\", rd, ra, rb); tcg_gen_and_tl(cpu_R[rd], cpu_R[ra], cpu_R[rb]); break; default: gen_illegal_exception(dc); break; } break; case 0x0004: /* l.or */ switch (op1) { case 0x00: LOG_DIS(\"l.or r%d, r%d, r%d\\n\", rd, ra, rb); tcg_gen_or_tl(cpu_R[rd], cpu_R[ra], cpu_R[rb]); break; default: gen_illegal_exception(dc); break; } break; case 0x0005: switch (op1) { case 0x00: /* l.xor */ LOG_DIS(\"l.xor r%d, r%d, r%d\\n\", rd, ra, rb); tcg_gen_xor_tl(cpu_R[rd], cpu_R[ra], cpu_R[rb]); break; default: gen_illegal_exception(dc); break; } break; case 0x0006: switch (op1) { case 0x03: /* l.mul */ LOG_DIS(\"l.mul r%d, r%d, r%d\\n\", rd, ra, rb); if (ra != 0 && rb != 0) { gen_helper_mul32(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]); } else { tcg_gen_movi_tl(cpu_R[rd], 0x0); } break; default: gen_illegal_exception(dc); break; } break; case 0x0009: switch (op1) { case 0x03: /* l.div */ LOG_DIS(\"l.div r%d, r%d, r%d\\n\", rd, ra, rb); { int lab0 = gen_new_label(); int lab1 = gen_new_label(); int lab2 = gen_new_label(); int lab3 = gen_new_label(); TCGv_i32 sr_ove = tcg_temp_local_new_i32(); if (rb == 0) { tcg_gen_ori_tl(cpu_sr, cpu_sr, (SR_OV | SR_CY)); tcg_gen_andi_tl(sr_ove, cpu_sr, SR_OVE); tcg_gen_brcondi_tl(TCG_COND_NE, sr_ove, SR_OVE, lab0); gen_exception(dc, EXCP_RANGE); gen_set_label(lab0); } else { tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_R[rb], 0x00000000, lab1); tcg_gen_brcondi_tl(TCG_COND_NE, cpu_R[ra], 0x80000000, lab2); tcg_gen_brcondi_tl(TCG_COND_NE, cpu_R[rb], 0xffffffff, lab2); gen_set_label(lab1); tcg_gen_ori_tl(cpu_sr, cpu_sr, (SR_OV | SR_CY)); tcg_gen_andi_tl(sr_ove, cpu_sr, SR_OVE); tcg_gen_brcondi_tl(TCG_COND_NE, sr_ove, SR_OVE, lab3); gen_exception(dc, EXCP_RANGE); gen_set_label(lab2); tcg_gen_div_tl(cpu_R[rd], cpu_R[ra], cpu_R[rb]); gen_set_label(lab3); } tcg_temp_free_i32(sr_ove); } break; default: gen_illegal_exception(dc); break; } break; case 0x000a: switch (op1) { case 0x03: /* l.divu */ LOG_DIS(\"l.divu r%d, r%d, r%d\\n\", rd, ra, rb); { int lab0 = gen_new_label(); int lab1 = gen_new_label(); int lab2 = gen_new_label(); TCGv_i32 sr_ove = tcg_temp_local_new_i32(); if (rb == 0) { tcg_gen_ori_tl(cpu_sr, cpu_sr, (SR_OV | SR_CY)); tcg_gen_andi_tl(sr_ove, cpu_sr, SR_OVE); tcg_gen_brcondi_tl(TCG_COND_NE, sr_ove, SR_OVE, lab0); gen_exception(dc, EXCP_RANGE); gen_set_label(lab0); } else { tcg_gen_brcondi_tl(TCG_COND_NE, cpu_R[rb], 0x00000000, lab1); tcg_gen_ori_tl(cpu_sr, cpu_sr, (SR_OV | SR_CY)); tcg_gen_andi_tl(sr_ove, cpu_sr, SR_OVE); tcg_gen_brcondi_tl(TCG_COND_NE, sr_ove, SR_OVE, lab2); gen_exception(dc, EXCP_RANGE); gen_set_label(lab1); tcg_gen_divu_tl(cpu_R[rd], cpu_R[ra], cpu_R[rb]); gen_set_label(lab2); } tcg_temp_free_i32(sr_ove); } break; default: gen_illegal_exception(dc); break; } break; case 0x000b: switch (op1) { case 0x03: /* l.mulu */ LOG_DIS(\"l.mulu r%d, r%d, r%d\\n\", rd, ra, rb); if (rb != 0 && ra != 0) { TCGv_i64 result = tcg_temp_local_new_i64(); TCGv_i64 tra = tcg_temp_local_new_i64(); TCGv_i64 trb = tcg_temp_local_new_i64(); TCGv_i64 high = tcg_temp_new_i64(); TCGv_i32 sr_ove = tcg_temp_local_new_i32(); int lab = gen_new_label(); /* Calculate each result. */ tcg_gen_extu_i32_i64(tra, cpu_R[ra]); tcg_gen_extu_i32_i64(trb, cpu_R[rb]); tcg_gen_mul_i64(result, tra, trb); tcg_temp_free_i64(tra); tcg_temp_free_i64(trb); tcg_gen_shri_i64(high, result, TARGET_LONG_BITS); /* Overflow or not. */ tcg_gen_brcondi_i64(TCG_COND_EQ, high, 0x00000000, lab); tcg_gen_ori_tl(cpu_sr, cpu_sr, (SR_OV | SR_CY)); tcg_gen_andi_tl(sr_ove, cpu_sr, SR_OVE); tcg_gen_brcondi_tl(TCG_COND_NE, sr_ove, SR_OVE, lab); gen_exception(dc, EXCP_RANGE); gen_set_label(lab); tcg_temp_free_i64(high); tcg_gen_trunc_i64_tl(cpu_R[rd], result); tcg_temp_free_i64(result); tcg_temp_free_i32(sr_ove); } else { tcg_gen_movi_tl(cpu_R[rd], 0); } break; default: gen_illegal_exception(dc); break; } break; case 0x000e: switch (op1) { case 0x00: /* l.cmov */ LOG_DIS(\"l.cmov r%d, r%d, r%d\\n\", rd, ra, rb); { int lab = gen_new_label(); TCGv res = tcg_temp_local_new(); TCGv sr_f = tcg_temp_new(); tcg_gen_andi_tl(sr_f, cpu_sr, SR_F); tcg_gen_mov_tl(res, cpu_R[rb]); tcg_gen_brcondi_tl(TCG_COND_NE, sr_f, SR_F, lab); tcg_gen_mov_tl(res, cpu_R[ra]); gen_set_label(lab); tcg_gen_mov_tl(cpu_R[rd], res); tcg_temp_free(sr_f); tcg_temp_free(res); } break; default: gen_illegal_exception(dc); break; } break; case 0x000f: switch (op1) { case 0x00: /* l.ff1 */ LOG_DIS(\"l.ff1 r%d, r%d, r%d\\n\", rd, ra, rb); gen_helper_ff1(cpu_R[rd], cpu_R[ra]); break; case 0x01: /* l.fl1 */ LOG_DIS(\"l.fl1 r%d, r%d, r%d\\n\", rd, ra, rb); gen_helper_fl1(cpu_R[rd], cpu_R[ra]); break; default: gen_illegal_exception(dc); break; } break; case 0x0008: switch (op1) { case 0x00: switch (op2) { case 0x00: /* l.sll */ LOG_DIS(\"l.sll r%d, r%d, r%d\\n\", rd, ra, rb); tcg_gen_shl_tl(cpu_R[rd], cpu_R[ra], cpu_R[rb]); break; case 0x01: /* l.srl */ LOG_DIS(\"l.srl r%d, r%d, r%d\\n\", rd, ra, rb); tcg_gen_shr_tl(cpu_R[rd], cpu_R[ra], cpu_R[rb]); break; case 0x02: /* l.sra */ LOG_DIS(\"l.sra r%d, r%d, r%d\\n\", rd, ra, rb); tcg_gen_sar_tl(cpu_R[rd], cpu_R[ra], cpu_R[rb]); break; case 0x03: /* l.ror */ LOG_DIS(\"l.ror r%d, r%d, r%d\\n\", rd, ra, rb); tcg_gen_rotr_tl(cpu_R[rd], cpu_R[ra], cpu_R[rb]); break; default: gen_illegal_exception(dc); break; } break; default: gen_illegal_exception(dc); break; } break; case 0x000c: switch (op1) { case 0x00: switch (op2) { case 0x00: /* l.exths */ LOG_DIS(\"l.exths r%d, r%d\\n\", rd, ra); tcg_gen_ext16s_tl(cpu_R[rd], cpu_R[ra]); break; case 0x01: /* l.extbs */ LOG_DIS(\"l.extbs r%d, r%d\\n\", rd, ra); tcg_gen_ext8s_tl(cpu_R[rd], cpu_R[ra]); break; case 0x02: /* l.exthz */ LOG_DIS(\"l.exthz r%d, r%d\\n\", rd, ra); tcg_gen_ext16u_tl(cpu_R[rd], cpu_R[ra]); break; case 0x03: /* l.extbz */ LOG_DIS(\"l.extbz r%d, r%d\\n\", rd, ra); tcg_gen_ext8u_tl(cpu_R[rd], cpu_R[ra]); break; default: gen_illegal_exception(dc); break; } break; default: gen_illegal_exception(dc); break; } break; case 0x000d: switch (op1) { case 0x00: switch (op2) { case 0x00: /* l.extws */ LOG_DIS(\"l.extws r%d, r%d\\n\", rd, ra); tcg_gen_ext32s_tl(cpu_R[rd], cpu_R[ra]); break; case 0x01: /* l.extwz */ LOG_DIS(\"l.extwz r%d, r%d\\n\", rd, ra); tcg_gen_ext32u_tl(cpu_R[rd], cpu_R[ra]); break; default: gen_illegal_exception(dc); break; } break; default: gen_illegal_exception(dc); break; } break; default: gen_illegal_exception(dc); break; } }"} {"target": 0, "idx": 21110, "func": "QEMUClockType timerlist_get_clock(QEMUTimerList *timer_list) { return timer_list->clock->type; }"} {"target": 1, "idx": 21118, "func": "void palette8tobgr16(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette) { long i; for(i=0; ireadfd != -1) { close(ioc->readfd); ioc->readfd = -1; } if (ioc->writefd != -1) { close(ioc->writefd); ioc->writefd = -1; } if (ioc->pid > 0) { #ifndef WIN32 qio_channel_command_abort(ioc, NULL); #endif } }"} {"target": 1, "idx": 21155, "func": "static int read_gab2_sub(AVStream *st, AVPacket *pkt) { if (!strcmp(pkt->data, \"GAB2\") && AV_RL16(pkt->data+5) == 2) { uint8_t desc[256]; int score = AVPROBE_SCORE_MAX / 2, ret; AVIStream *ast = st->priv_data; AVInputFormat *sub_demuxer; AVRational time_base; AVIOContext *pb = avio_alloc_context( pkt->data + 7, pkt->size - 7, 0, NULL, NULL, NULL, NULL); AVProbeData pd; unsigned int desc_len = avio_rl32(pb); if (desc_len > pb->buf_end - pb->buf_ptr) goto error; ret = avio_get_str16le(pb, desc_len, desc, sizeof(desc)); avio_skip(pb, desc_len - ret); if (*desc) av_dict_set(&st->metadata, \"title\", desc, 0); avio_rl16(pb); /* flags? */ avio_rl32(pb); /* data size */ pd = (AVProbeData) { .buf = pb->buf_ptr, .buf_size = pb->buf_end - pb->buf_ptr }; if (!(sub_demuxer = av_probe_input_format2(&pd, 1, &score))) goto error; if (!(ast->sub_ctx = avformat_alloc_context())) goto error; ast->sub_ctx->pb = pb; if (!avformat_open_input(&ast->sub_ctx, \"\", sub_demuxer, NULL)) { ff_read_packet(ast->sub_ctx, &ast->sub_pkt); *st->codec = *ast->sub_ctx->streams[0]->codec; ast->sub_ctx->streams[0]->codec->extradata = NULL; time_base = ast->sub_ctx->streams[0]->time_base; avpriv_set_pts_info(st, 64, time_base.num, time_base.den); } ast->sub_buffer = pkt->data; memset(pkt, 0, sizeof(*pkt)); return 1; error: av_freep(&pb); } return 0; }"} {"target": 0, "idx": 21166, "func": "int av_set_string3(void *obj, const char *name, const char *val, int alloc, const AVOption **o_out) { int ret; const AVOption *o = av_opt_find(obj, name, NULL, 0, 0); if (o_out) *o_out = o; if (!o) return AVERROR_OPTION_NOT_FOUND; if (!val || o->offset<=0) return AVERROR(EINVAL); if (o->type == FF_OPT_TYPE_BINARY) { uint8_t **dst = (uint8_t **)(((uint8_t*)obj) + o->offset); int *lendst = (int *)(dst + 1); uint8_t *bin, *ptr; int len = strlen(val); av_freep(dst); *lendst = 0; if (len & 1) return AVERROR(EINVAL); len /= 2; ptr = bin = av_malloc(len); while (*val) { int a = hexchar2int(*val++); int b = hexchar2int(*val++); if (a < 0 || b < 0) { av_free(bin); return AVERROR(EINVAL); } *ptr++ = (a << 4) | b; } *dst = bin; *lendst = len; return 0; } if (o->type != FF_OPT_TYPE_STRING) { int notfirst=0; for (;;) { int i; char buf[256]; int cmd=0; double d; if (*val == '+' || *val == '-') cmd= *(val++); for (i=0; iunit, 0, 0); if (o_named && o_named->type == FF_OPT_TYPE_CONST) d= o_named->default_val.dbl; else if (!strcmp(buf, \"default\")) d= o->default_val.dbl; else if (!strcmp(buf, \"max\" )) d= o->max; else if (!strcmp(buf, \"min\" )) d= o->min; else if (!strcmp(buf, \"none\" )) d= 0; else if (!strcmp(buf, \"all\" )) d= ~0; else { int res = av_expr_parse_and_eval(&d, buf, const_names, const_values, NULL, NULL, NULL, NULL, NULL, 0, obj); if (res < 0) { av_log(obj, AV_LOG_ERROR, \"Unable to parse option value \\\"%s\\\"\\n\", val); return res; } } } if (o->type == FF_OPT_TYPE_FLAGS) { if (cmd=='+') d= av_get_int(obj, name, NULL) | (int64_t)d; else if (cmd=='-') d= av_get_int(obj, name, NULL) &~(int64_t)d; } else { if (cmd=='+') d= notfirst*av_get_double(obj, name, NULL) + d; else if (cmd=='-') d= notfirst*av_get_double(obj, name, NULL) - d; } if ((ret = av_set_number2(obj, name, d, 1, 1, o_out)) < 0) return ret; val+= i; if (!*val) return 0; notfirst=1; } return AVERROR(EINVAL); } if (alloc) { av_free(*(void**)(((uint8_t*)obj) + o->offset)); val= av_strdup(val); } memcpy(((uint8_t*)obj) + o->offset, &val, sizeof(val)); return 0; }"} {"target": 0, "idx": 21168, "func": "void ff_h264_h_lpf_chroma_inter_msa(uint8_t *data, int img_width, int alpha, int beta, int8_t *tc) { uint8_t bs0 = 1; uint8_t bs1 = 1; uint8_t bs2 = 1; uint8_t bs3 = 1; if (tc[0] < 0) bs0 = 0; if (tc[1] < 0) bs1 = 0; if (tc[2] < 0) bs2 = 0; if (tc[3] < 0) bs3 = 0; avc_loopfilter_cb_or_cr_inter_edge_ver_msa(data, bs0, bs1, bs2, bs3, tc[0], tc[1], tc[2], tc[3], alpha, beta, img_width); }"} {"target": 0, "idx": 21202, "func": "void slirp_select_poll(fd_set *readfds, fd_set *writefds, fd_set *xfds, int select_error) { Slirp *slirp; struct socket *so, *so_next; int ret; if (QTAILQ_EMPTY(&slirp_instances)) { return; } global_readfds = readfds; global_writefds = writefds; global_xfds = xfds; curtime = qemu_get_clock_ms(rt_clock); QTAILQ_FOREACH(slirp, &slirp_instances, entry) { /* * See if anything has timed out */ if (time_fasttimo && ((curtime - time_fasttimo) >= 2)) { tcp_fasttimo(slirp); time_fasttimo = 0; } if (do_slowtimo && ((curtime - last_slowtimo) >= 499)) { ip_slowtimo(slirp); tcp_slowtimo(slirp); last_slowtimo = curtime; } /* * Check sockets */ if (!select_error) { /* * Check TCP sockets */ for (so = slirp->tcb.so_next; so != &slirp->tcb; so = so_next) { so_next = so->so_next; /* * FD_ISSET is meaningless on these sockets * (and they can crash the program) */ if (so->so_state & SS_NOFDREF || so->s == -1) continue; /* * Check for URG data * This will soread as well, so no need to * test for readfds below if this succeeds */ if (FD_ISSET(so->s, xfds)) sorecvoob(so); /* * Check sockets for reading */ else if (FD_ISSET(so->s, readfds)) { /* * Check for incoming connections */ if (so->so_state & SS_FACCEPTCONN) { tcp_connect(so); continue; } /* else */ ret = soread(so); /* Output it if we read something */ if (ret > 0) tcp_output(sototcpcb(so)); } /* * Check sockets for writing */ if (FD_ISSET(so->s, writefds)) { /* * Check for non-blocking, still-connecting sockets */ if (so->so_state & SS_ISFCONNECTING) { /* Connected */ so->so_state &= ~SS_ISFCONNECTING; ret = send(so->s, (const void *) &ret, 0, 0); if (ret < 0) { /* XXXXX Must fix, zero bytes is a NOP */ if (errno == EAGAIN || errno == EWOULDBLOCK || errno == EINPROGRESS || errno == ENOTCONN) continue; /* else failed */ so->so_state &= SS_PERSISTENT_MASK; so->so_state |= SS_NOFDREF; } /* else so->so_state &= ~SS_ISFCONNECTING; */ /* * Continue tcp_input */ tcp_input((struct mbuf *)NULL, sizeof(struct ip), so); /* continue; */ } else ret = sowrite(so); /* * XXXXX If we wrote something (a lot), there * could be a need for a window update. * In the worst case, the remote will send * a window probe to get things going again */ } /* * Probe a still-connecting, non-blocking socket * to check if it's still alive */ #ifdef PROBE_CONN if (so->so_state & SS_ISFCONNECTING) { ret = qemu_recv(so->s, &ret, 0,0); if (ret < 0) { /* XXX */ if (errno == EAGAIN || errno == EWOULDBLOCK || errno == EINPROGRESS || errno == ENOTCONN) continue; /* Still connecting, continue */ /* else failed */ so->so_state &= SS_PERSISTENT_MASK; so->so_state |= SS_NOFDREF; /* tcp_input will take care of it */ } else { ret = send(so->s, &ret, 0,0); if (ret < 0) { /* XXX */ if (errno == EAGAIN || errno == EWOULDBLOCK || errno == EINPROGRESS || errno == ENOTCONN) continue; /* else failed */ so->so_state &= SS_PERSISTENT_MASK; so->so_state |= SS_NOFDREF; } else so->so_state &= ~SS_ISFCONNECTING; } tcp_input((struct mbuf *)NULL, sizeof(struct ip),so); } /* SS_ISFCONNECTING */ #endif } /* * Now UDP sockets. * Incoming packets are sent straight away, they're not buffered. * Incoming UDP data isn't buffered either. */ for (so = slirp->udb.so_next; so != &slirp->udb; so = so_next) { so_next = so->so_next; if (so->s != -1 && FD_ISSET(so->s, readfds)) { sorecvfrom(so); } } /* * Check incoming ICMP relies. */ for (so = slirp->icmp.so_next; so != &slirp->icmp; so = so_next) { so_next = so->so_next; if (so->s != -1 && FD_ISSET(so->s, readfds)) { icmp_receive(so); } } } /* * See if we can start outputting */ if (slirp->if_queued) { if_start(slirp); } } /* clear global file descriptor sets. * these reside on the stack in vl.c * so they're unusable if we're not in * slirp_select_fill or slirp_select_poll. */ global_readfds = NULL; global_writefds = NULL; global_xfds = NULL; }"} {"target": 0, "idx": 21218, "func": "uint64_t helper_cmpbge (uint64_t op1, uint64_t op2) { uint8_t opa, opb, res; int i; res = 0; for (i = 0; i < 7; i++) { opa = op1 >> (i * 8); opb = op2 >> (i * 8); if (opa >= opb) res |= 1 << i; } return res; }"} {"target": 0, "idx": 21219, "func": "static int s390_ipl_init(SysBusDevice *dev) { S390IPLState *ipl = S390_IPL(dev); uint64_t pentry = KERN_IMAGE_START; int kernel_size; int bios_size; char *bios_filename; /* * Always load the bios if it was enforced, * even if an external kernel has been defined. */ if (!ipl->kernel || ipl->enforce_bios) { uint64_t fwbase = (MIN(ram_size, 0x80000000U) - 0x200000) & ~0xffffUL; if (bios_name == NULL) { bios_name = ipl->firmware; } bios_filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); if (bios_filename == NULL) { hw_error(\"could not find stage1 bootloader\\n\"); } bios_size = load_elf(bios_filename, bios_translate_addr, &fwbase, &ipl->bios_start_addr, NULL, NULL, 1, ELF_MACHINE, 0); if (bios_size > 0) { /* Adjust ELF start address to final location */ ipl->bios_start_addr += fwbase; } else { /* Try to load non-ELF file (e.g. s390-zipl.rom) */ bios_size = load_image_targphys(bios_filename, ZIPL_IMAGE_START, 4096); ipl->bios_start_addr = ZIPL_IMAGE_START; if (bios_size > 4096) { hw_error(\"stage1 bootloader is > 4k\\n\"); } } g_free(bios_filename); if (bios_size == -1) { hw_error(\"could not load bootloader '%s'\\n\", bios_name); } /* default boot target is the bios */ ipl->start_addr = ipl->bios_start_addr; } if (ipl->kernel) { kernel_size = load_elf(ipl->kernel, NULL, NULL, &pentry, NULL, NULL, 1, ELF_MACHINE, 0); if (kernel_size < 0) { kernel_size = load_image_targphys(ipl->kernel, 0, ram_size); } if (kernel_size < 0) { fprintf(stderr, \"could not load kernel '%s'\\n\", ipl->kernel); return -1; } /* * Is it a Linux kernel (starting at 0x10000)? If yes, we fill in the * kernel parameters here as well. Note: For old kernels (up to 3.2) * we can not rely on the ELF entry point - it was 0x800 (the SALIPL * loader) and it won't work. For this case we force it to 0x10000, too. */ if (pentry == KERN_IMAGE_START || pentry == 0x800) { ipl->start_addr = KERN_IMAGE_START; /* Overwrite parameters in the kernel image, which are \"rom\" */ strcpy(rom_ptr(KERN_PARM_AREA), ipl->cmdline); } else { ipl->start_addr = pentry; } if (ipl->initrd) { ram_addr_t initrd_offset; int initrd_size; initrd_offset = INITRD_START; while (kernel_size + 0x100000 > initrd_offset) { initrd_offset += 0x100000; } initrd_size = load_image_targphys(ipl->initrd, initrd_offset, ram_size - initrd_offset); if (initrd_size == -1) { fprintf(stderr, \"qemu: could not load initrd '%s'\\n\", ipl->initrd); exit(1); } /* * we have to overwrite values in the kernel image, * which are \"rom\" */ stq_p(rom_ptr(INITRD_PARM_START), initrd_offset); stq_p(rom_ptr(INITRD_PARM_SIZE), initrd_size); } } return 0; }"} {"target": 1, "idx": 21250, "func": "static int virtcon_parse(const char *devname) { QemuOptsList *device = qemu_find_opts(\"device\"); static int index = 0; char label[32]; QemuOpts *bus_opts, *dev_opts; if (strcmp(devname, \"none\") == 0) return 0; if (index == MAX_VIRTIO_CONSOLES) { fprintf(stderr, \"qemu: too many virtio consoles\\n\"); exit(1); } bus_opts = qemu_opts_create(device, NULL, 0); if (arch_type == QEMU_ARCH_S390X) { qemu_opt_set(bus_opts, \"driver\", \"virtio-serial-s390\"); } else { qemu_opt_set(bus_opts, \"driver\", \"virtio-serial-pci\"); } dev_opts = qemu_opts_create(device, NULL, 0); qemu_opt_set(dev_opts, \"driver\", \"virtconsole\"); snprintf(label, sizeof(label), \"virtcon%d\", index); virtcon_hds[index] = qemu_chr_new(label, devname, NULL); if (!virtcon_hds[index]) { fprintf(stderr, \"qemu: could not open virtio console '%s': %s\\n\", devname, strerror(errno)); return -1; } qemu_opt_set(dev_opts, \"chardev\", label); index++; return 0; }"} {"target": 0, "idx": 21256, "func": "static void monitor_puts(Monitor *mon, const char *str) { char c; for(;;) { c = *str++; if (c == '\\0') break; if (c == '\\n') { qstring_append_chr(mon->outbuf, '\\r'); } qstring_append_chr(mon->outbuf, c); if (c == '\\n') { monitor_flush(mon); } } }"} {"target": 0, "idx": 21261, "func": "static void gen_movl_seg_T0(DisasContext *s, int seg_reg) { if (s->pe && !s->vm86) { tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0); gen_helper_load_seg(cpu_env, tcg_const_i32(seg_reg), cpu_tmp2_i32); /* abort translation because the addseg value may change or because ss32 may change. For R_SS, translation must always stop as a special handling must be done to disable hardware interrupts for the next instruction */ if (seg_reg == R_SS || (s->code32 && seg_reg < R_FS)) s->is_jmp = DISAS_TB_JUMP; } else { gen_op_movl_seg_T0_vm(seg_reg); if (seg_reg == R_SS) s->is_jmp = DISAS_TB_JUMP; } }"} {"target": 0, "idx": 21267, "func": "void gen_intermediate_code_a64(ARMCPU *cpu, TranslationBlock *tb) { CPUState *cs = CPU(cpu); CPUARMState *env = &cpu->env; DisasContext dc1, *dc = &dc1; target_ulong pc_start; target_ulong next_page_start; int num_insns; int max_insns; pc_start = tb->pc; dc->tb = tb; dc->is_jmp = DISAS_NEXT; dc->pc = pc_start; dc->singlestep_enabled = cs->singlestep_enabled; dc->condjmp = 0; dc->aarch64 = 1; /* If we are coming from secure EL0 in a system with a 32-bit EL3, then * there is no secure EL1, so we route exceptions to EL3. */ dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3); dc->thumb = 0; dc->bswap_code = 0; dc->condexec_mask = 0; dc->condexec_cond = 0; dc->mmu_idx = ARM_TBFLAG_MMUIDX(tb->flags); dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx); #if !defined(CONFIG_USER_ONLY) dc->user = (dc->current_el == 0); #endif dc->fp_excp_el = ARM_TBFLAG_FPEXC_EL(tb->flags); dc->vec_len = 0; dc->vec_stride = 0; dc->cp_regs = cpu->cp_regs; dc->features = env->features; /* Single step state. The code-generation logic here is: * SS_ACTIVE == 0: * generate code with no special handling for single-stepping (except * that anything that can make us go to SS_ACTIVE == 1 must end the TB; * this happens anyway because those changes are all system register or * PSTATE writes). * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending) * emit code for one insn * emit code to clear PSTATE.SS * emit code to generate software step exception for completed step * end TB (as usual for having generated an exception) * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending) * emit code to generate a software step exception * end the TB */ dc->ss_active = ARM_TBFLAG_SS_ACTIVE(tb->flags); dc->pstate_ss = ARM_TBFLAG_PSTATE_SS(tb->flags); dc->is_ldex = false; dc->ss_same_el = (arm_debug_target_el(env) == dc->current_el); init_tmp_a64_array(dc); next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; num_insns = 0; max_insns = tb->cflags & CF_COUNT_MASK; if (max_insns == 0) { max_insns = CF_COUNT_MASK; } if (max_insns > TCG_MAX_INSNS) { max_insns = TCG_MAX_INSNS; } gen_tb_start(tb); tcg_clear_temp_count(); do { tcg_gen_insn_start(dc->pc, 0); num_insns++; if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) { CPUBreakpoint *bp; QTAILQ_FOREACH(bp, &cs->breakpoints, entry) { if (bp->pc == dc->pc) { gen_exception_internal_insn(dc, 0, EXCP_DEBUG); /* Advance PC so that clearing the breakpoint will invalidate this TB. */ dc->pc += 2; goto done_generating; } } } if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) { gen_io_start(); } if (dc->ss_active && !dc->pstate_ss) { /* Singlestep state is Active-pending. * If we're in this state at the start of a TB then either * a) we just took an exception to an EL which is being debugged * and this is the first insn in the exception handler * b) debug exceptions were masked and we just unmasked them * without changing EL (eg by clearing PSTATE.D) * In either case we're going to take a swstep exception in the * \"did not step an insn\" case, and so the syndrome ISV and EX * bits should be zero. */ assert(num_insns == 1); gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0), default_exception_el(dc)); dc->is_jmp = DISAS_EXC; break; } disas_a64_insn(env, dc); if (tcg_check_temp_count()) { fprintf(stderr, \"TCG temporary leak before \"TARGET_FMT_lx\"\\n\", dc->pc); } /* Translation stops when a conditional branch is encountered. * Otherwise the subsequent code could get translated several times. * Also stop translation when a page boundary is reached. This * ensures prefetch aborts occur at the right place. */ } while (!dc->is_jmp && !tcg_op_buf_full() && !cs->singlestep_enabled && !singlestep && !dc->ss_active && dc->pc < next_page_start && num_insns < max_insns); if (tb->cflags & CF_LAST_IO) { gen_io_end(); } if (unlikely(cs->singlestep_enabled || dc->ss_active) && dc->is_jmp != DISAS_EXC) { /* Note that this means single stepping WFI doesn't halt the CPU. * For conditional branch insns this is harmless unreachable code as * gen_goto_tb() has already handled emitting the debug exception * (and thus a tb-jump is not possible when singlestepping). */ assert(dc->is_jmp != DISAS_TB_JUMP); if (dc->is_jmp != DISAS_JUMP) { gen_a64_set_pc_im(dc->pc); } if (cs->singlestep_enabled) { gen_exception_internal(EXCP_DEBUG); } else { gen_step_complete_exception(dc); } } else { switch (dc->is_jmp) { case DISAS_NEXT: gen_goto_tb(dc, 1, dc->pc); break; default: case DISAS_UPDATE: gen_a64_set_pc_im(dc->pc); /* fall through */ case DISAS_JUMP: /* indicate that the hash table must be used to find the next TB */ tcg_gen_exit_tb(0); break; case DISAS_TB_JUMP: case DISAS_EXC: case DISAS_SWI: break; case DISAS_WFE: gen_a64_set_pc_im(dc->pc); gen_helper_wfe(cpu_env); break; case DISAS_YIELD: gen_a64_set_pc_im(dc->pc); gen_helper_yield(cpu_env); break; case DISAS_WFI: /* This is a special case because we don't want to just halt the CPU * if trying to debug across a WFI. */ gen_a64_set_pc_im(dc->pc); gen_helper_wfi(cpu_env); /* The helper doesn't necessarily throw an exception, but we * must go back to the main loop to check for interrupts anyway. */ tcg_gen_exit_tb(0); break; } } done_generating: gen_tb_end(tb, num_insns); #ifdef DEBUG_DISAS if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) { qemu_log(\"----------------\\n\"); qemu_log(\"IN: %s\\n\", lookup_symbol(pc_start)); log_target_disas(cs, pc_start, dc->pc - pc_start, 4 | (dc->bswap_code << 1)); qemu_log(\"\\n\"); } #endif tb->size = dc->pc - pc_start; tb->icount = num_insns; }"} {"target": 0, "idx": 21281, "func": "static int mov_read_stsc(MOVContext *c, AVIOContext *pb, MOVAtom atom) { AVStream *st; MOVStreamContext *sc; unsigned int i, entries; if (c->fc->nb_streams < 1) return 0; st = c->fc->streams[c->fc->nb_streams-1]; sc = st->priv_data; avio_r8(pb); /* version */ avio_rb24(pb); /* flags */ entries = avio_rb32(pb); av_log(c->fc, AV_LOG_TRACE, \"track[%i].stsc.entries = %i\\n\", c->fc->nb_streams-1, entries); if (!entries) return 0; if (entries >= UINT_MAX / sizeof(*sc->stsc_data)) return AVERROR_INVALIDDATA; sc->stsc_data = av_malloc(entries * sizeof(*sc->stsc_data)); if (!sc->stsc_data) return AVERROR(ENOMEM); for (i = 0; i < entries && !pb->eof_reached; i++) { sc->stsc_data[i].first = avio_rb32(pb); sc->stsc_data[i].count = avio_rb32(pb); sc->stsc_data[i].id = avio_rb32(pb); if (sc->stsc_data[i].id < 0 || sc->stsc_data[i].id > sc->stsd_count) { sc->stsc_data[i].id = 0; if (c->fc->error_recognition & AV_EF_EXPLODE) { av_log(c->fc, AV_LOG_ERROR, \"Invalid stsc index.\\n\"); return AVERROR_INVALIDDATA; } } } sc->stsc_count = i; if (pb->eof_reached) return AVERROR_EOF; return 0; }"} {"target": 1, "idx": 21284, "func": "static int decode_copy(uint8_t *frame, int width, int height, const uint8_t *src, const uint8_t *src_end) { const int size = width * height; if (src_end - src < size) return -1; bytestream_get_buffer(&src, frame, size); return 0; }"} {"target": 1, "idx": 21296, "func": "static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req) { NvmeNamespace *ns; uint32_t nsid = le32_to_cpu(cmd->nsid); if (nsid == 0 || nsid > n->num_namespaces) { return NVME_INVALID_NSID | NVME_DNR; } ns = &n->namespaces[nsid - 1]; switch (cmd->opcode) { case NVME_CMD_FLUSH: return nvme_flush(n, ns, cmd, req); case NVME_CMD_WRITE_ZEROS: return nvme_write_zeros(n, ns, cmd, req); case NVME_CMD_WRITE: case NVME_CMD_READ: return nvme_rw(n, ns, cmd, req); default: return NVME_INVALID_OPCODE | NVME_DNR; } }"} {"target": 1, "idx": 21302, "func": "void avfilter_link_free(AVFilterLink **link) { if (!*link) return; if ((*link)->pool) { int i; for (i = 0; i < POOL_SIZE; i++) { if ((*link)->pool->pic[i]) { AVFilterBufferRef *picref = (*link)->pool->pic[i]; /* free buffer: picrefs stored in the pool are not * supposed to contain a free callback */ av_freep(&picref->buf->data[0]); av_freep(&picref->buf); av_freep(&picref->audio); av_freep(&picref->video); av_freep(&picref); } } av_freep(&(*link)->pool); } av_freep(link); }"} {"target": 0, "idx": 21306, "func": "static uint64_t pl110_read(void *opaque, hwaddr offset, unsigned size) { pl110_state *s = (pl110_state *)opaque; if (offset >= 0xfe0 && offset < 0x1000) { return idregs[s->version][(offset - 0xfe0) >> 2]; } if (offset >= 0x200 && offset < 0x400) { return s->raw_palette[(offset - 0x200) >> 2]; } switch (offset >> 2) { case 0: /* LCDTiming0 */ return s->timing[0]; case 1: /* LCDTiming1 */ return s->timing[1]; case 2: /* LCDTiming2 */ return s->timing[2]; case 3: /* LCDTiming3 */ return s->timing[3]; case 4: /* LCDUPBASE */ return s->upbase; case 5: /* LCDLPBASE */ return s->lpbase; case 6: /* LCDIMSC */ if (s->version != PL110) { return s->cr; } return s->int_mask; case 7: /* LCDControl */ if (s->version != PL110) { return s->int_mask; } return s->cr; case 8: /* LCDRIS */ return s->int_status; case 9: /* LCDMIS */ return s->int_status & s->int_mask; case 11: /* LCDUPCURR */ /* TODO: Implement vertical refresh. */ return s->upbase; case 12: /* LCDLPCURR */ return s->lpbase; default: hw_error(\"pl110_read: Bad offset %x\\n\", (int)offset); return 0; } }"} {"target": 0, "idx": 21311, "func": "static void fill_coding_method_array (sb_int8_array tone_level_idx, sb_int8_array tone_level_idx_temp, sb_int8_array coding_method, int nb_channels, int c, int superblocktype_2_3, int cm_table_select) { int ch, sb, j; int tmp, acc, esp_40, comp; int add1, add2, add3, add4; int64_t multres; // This should never happen if (nb_channels <= 0) return; if (!superblocktype_2_3) { /* This case is untested, no samples available */ SAMPLES_NEEDED for (ch = 0; ch < nb_channels; ch++) for (sb = 0; sb < 30; sb++) { for (j = 1; j < 63; j++) { // The loop only iterates to 63 so the code doesn't overflow the buffer add1 = tone_level_idx[ch][sb][j] - 10; if (add1 < 0) add1 = 0; add2 = add3 = add4 = 0; if (sb > 1) { add2 = tone_level_idx[ch][sb - 2][j] + tone_level_idx_offset_table[sb][0] - 6; if (add2 < 0) add2 = 0; } if (sb > 0) { add3 = tone_level_idx[ch][sb - 1][j] + tone_level_idx_offset_table[sb][1] - 6; if (add3 < 0) add3 = 0; } if (sb < 29) { add4 = tone_level_idx[ch][sb + 1][j] + tone_level_idx_offset_table[sb][3] - 6; if (add4 < 0) add4 = 0; } tmp = tone_level_idx[ch][sb][j + 1] * 2 - add4 - add3 - add2 - add1; if (tmp < 0) tmp = 0; tone_level_idx_temp[ch][sb][j + 1] = tmp & 0xff; } tone_level_idx_temp[ch][sb][0] = tone_level_idx_temp[ch][sb][1]; } acc = 0; for (ch = 0; ch < nb_channels; ch++) for (sb = 0; sb < 30; sb++) for (j = 0; j < 64; j++) acc += tone_level_idx_temp[ch][sb][j]; multres = 0x66666667 * (acc * 10); esp_40 = (multres >> 32) / 8 + ((multres & 0xffffffff) >> 31); for (ch = 0; ch < nb_channels; ch++) for (sb = 0; sb < 30; sb++) for (j = 0; j < 64; j++) { comp = tone_level_idx_temp[ch][sb][j]* esp_40 * 10; if (comp < 0) comp += 0xff; comp /= 256; // signed shift switch(sb) { case 0: if (comp < 30) comp = 30; comp += 15; break; case 1: if (comp < 24) comp = 24; comp += 10; break; case 2: case 3: case 4: if (comp < 16) comp = 16; } if (comp <= 5) tmp = 0; else if (comp <= 10) tmp = 10; else if (comp <= 16) tmp = 16; else if (comp <= 24) tmp = -1; else tmp = 0; coding_method[ch][sb][j] = ((tmp & 0xfffa) + 30 )& 0xff; } for (sb = 0; sb < 30; sb++) fix_coding_method_array(sb, nb_channels, coding_method); for (ch = 0; ch < nb_channels; ch++) for (sb = 0; sb < 30; sb++) for (j = 0; j < 64; j++) if (sb >= 10) { if (coding_method[ch][sb][j] < 10) coding_method[ch][sb][j] = 10; } else { if (sb >= 2) { if (coding_method[ch][sb][j] < 16) coding_method[ch][sb][j] = 16; } else { if (coding_method[ch][sb][j] < 30) coding_method[ch][sb][j] = 30; } } } else { // superblocktype_2_3 != 0 for (ch = 0; ch < nb_channels; ch++) for (sb = 0; sb < 30; sb++) for (j = 0; j < 64; j++) coding_method[ch][sb][j] = coding_method_table[cm_table_select][sb]; } return; }"} {"target": 0, "idx": 21324, "func": "void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf, int flags) { ARMCPU *cpu = ARM_CPU(cs); CPUARMState *env = &cpu->env; int i; if (is_a64(env)) { aarch64_cpu_dump_state(cs, f, cpu_fprintf, flags); return; } for(i=0;i<16;i++) { cpu_fprintf(f, \"R%02d=%08x\", i, env->regs[i]); if ((i % 4) == 3) cpu_fprintf(f, \"\\n\"); else cpu_fprintf(f, \" \"); } if (arm_feature(env, ARM_FEATURE_M)) { uint32_t xpsr = xpsr_read(env); const char *mode; if (xpsr & XPSR_EXCP) { mode = \"handler\"; } else { if (env->v7m.control & R_V7M_CONTROL_NPRIV_MASK) { mode = \"unpriv-thread\"; } else { mode = \"priv-thread\"; } } cpu_fprintf(f, \"XPSR=%08x %c%c%c%c %c %s\\n\", xpsr, xpsr & XPSR_N ? 'N' : '-', xpsr & XPSR_Z ? 'Z' : '-', xpsr & XPSR_C ? 'C' : '-', xpsr & XPSR_V ? 'V' : '-', xpsr & XPSR_T ? 'T' : 'A', mode); } else { uint32_t psr = cpsr_read(env); const char *ns_status = \"\"; if (arm_feature(env, ARM_FEATURE_EL3) && (psr & CPSR_M) != ARM_CPU_MODE_MON) { ns_status = env->cp15.scr_el3 & SCR_NS ? \"NS \" : \"S \"; } cpu_fprintf(f, \"PSR=%08x %c%c%c%c %c %s%s%d\\n\", psr, psr & CPSR_N ? 'N' : '-', psr & CPSR_Z ? 'Z' : '-', psr & CPSR_C ? 'C' : '-', psr & CPSR_V ? 'V' : '-', psr & CPSR_T ? 'T' : 'A', ns_status, cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26); } if (flags & CPU_DUMP_FPU) { int numvfpregs = 0; if (arm_feature(env, ARM_FEATURE_VFP)) { numvfpregs += 16; } if (arm_feature(env, ARM_FEATURE_VFP3)) { numvfpregs += 16; } for (i = 0; i < numvfpregs; i++) { uint64_t v = float64_val(env->vfp.regs[i]); cpu_fprintf(f, \"s%02d=%08x s%02d=%08x d%02d=%016\" PRIx64 \"\\n\", i * 2, (uint32_t)v, i * 2 + 1, (uint32_t)(v >> 32), i, v); } cpu_fprintf(f, \"FPSCR: %08x\\n\", (int)env->vfp.xregs[ARM_VFP_FPSCR]); } }"} {"target": 1, "idx": 21335, "func": "int vmstate_load_state(QEMUFile *f, const VMStateDescription *vmsd, void *opaque, int version_id) { VMStateField *field = vmsd->fields; int ret = 0; trace_vmstate_load_state(vmsd->name, version_id); if (version_id > vmsd->version_id) { error_report(\"%s: incoming version_id %d is too new \" \"for local version_id %d\", vmsd->name, version_id, vmsd->version_id); trace_vmstate_load_state_end(vmsd->name, \"too new\", -EINVAL); return -EINVAL; } if (version_id < vmsd->minimum_version_id) { if (vmsd->load_state_old && version_id >= vmsd->minimum_version_id_old) { ret = vmsd->load_state_old(f, opaque, version_id); trace_vmstate_load_state_end(vmsd->name, \"old path\", ret); return ret; } error_report(\"%s: incoming version_id %d is too old \" \"for local minimum version_id %d\", vmsd->name, version_id, vmsd->minimum_version_id); trace_vmstate_load_state_end(vmsd->name, \"too old\", -EINVAL); return -EINVAL; } if (vmsd->pre_load) { int ret = vmsd->pre_load(opaque); if (ret) { return ret; } } while (field->name) { trace_vmstate_load_state_field(vmsd->name, field->name); if ((field->field_exists && field->field_exists(opaque, version_id)) || (!field->field_exists && field->version_id <= version_id)) { void *first_elem = opaque + field->offset; int i, n_elems = vmstate_n_elems(opaque, field); int size = vmstate_size(opaque, field); vmstate_handle_alloc(first_elem, field, opaque); if (field->flags & VMS_POINTER) { first_elem = *(void **)first_elem; assert(first_elem || !n_elems); } for (i = 0; i < n_elems; i++) { void *curr_elem = first_elem + size * i; if (field->flags & VMS_ARRAY_OF_POINTER) { curr_elem = *(void **)curr_elem; } if (field->flags & VMS_STRUCT) { ret = vmstate_load_state(f, field->vmsd, curr_elem, field->vmsd->version_id); } else { ret = field->info->get(f, curr_elem, size, field); } if (ret >= 0) { ret = qemu_file_get_error(f); } if (ret < 0) { qemu_file_set_error(f, ret); error_report(\"Failed to load %s:%s\", vmsd->name, field->name); trace_vmstate_load_field_error(field->name, ret); return ret; } } } else if (field->flags & VMS_MUST_EXIST) { error_report(\"Input validation failed: %s/%s\", vmsd->name, field->name); return -1; } field++; } ret = vmstate_subsection_load(f, vmsd, opaque); if (ret != 0) { return ret; } if (vmsd->post_load) { ret = vmsd->post_load(opaque, version_id); } trace_vmstate_load_state_end(vmsd->name, \"end\", ret); return ret; }"} {"target": 0, "idx": 21348, "func": "static void xen_log_start(MemoryListener *listener, MemoryRegionSection *section) { XenIOState *state = container_of(listener, XenIOState, memory_listener); xen_sync_dirty_bitmap(state, section->offset_within_address_space, int128_get64(section->size)); }"} {"target": 1, "idx": 21374, "func": "static void mpegvideo_extract_headers(AVCodecParserContext *s, AVCodecContext *avctx, const uint8_t *buf, int buf_size) { ParseContext1 *pc = s->priv_data; const uint8_t *buf_end; uint32_t start_code; int frame_rate_index, ext_type, bytes_left; int frame_rate_ext_n, frame_rate_ext_d; int picture_structure, top_field_first, repeat_first_field, progressive_frame; int horiz_size_ext, vert_size_ext, bit_rate_ext; //FIXME replace the crap with get_bits() s->repeat_pict = 0; buf_end = buf + buf_size; while (buf < buf_end) { start_code= -1; buf= ff_find_start_code(buf, buf_end, &start_code); bytes_left = buf_end - buf; switch(start_code) { case PICTURE_START_CODE: ff_fetch_timestamp(s, buf-buf_start-4, 1); if (bytes_left >= 2) { s->pict_type = (buf[1] >> 3) & 7; } break; case SEQ_START_CODE: if (bytes_left >= 7) { pc->width = (buf[0] << 4) | (buf[1] >> 4); pc->height = ((buf[1] & 0x0f) << 8) | buf[2]; avcodec_set_dimensions(avctx, pc->width, pc->height); frame_rate_index = buf[3] & 0xf; pc->frame_rate.den = avctx->time_base.den = ff_frame_rate_tab[frame_rate_index].num; pc->frame_rate.num = avctx->time_base.num = ff_frame_rate_tab[frame_rate_index].den; avctx->bit_rate = ((buf[4]<<10) | (buf[5]<<2) | (buf[6]>>6))*400; avctx->codec_id = CODEC_ID_MPEG1VIDEO; avctx->sub_id = 1; } break; case EXT_START_CODE: if (bytes_left >= 1) { ext_type = (buf[0] >> 4); switch(ext_type) { case 0x1: /* sequence extension */ if (bytes_left >= 6) { horiz_size_ext = ((buf[1] & 1) << 1) | (buf[2] >> 7); vert_size_ext = (buf[2] >> 5) & 3; bit_rate_ext = ((buf[2] & 0x1F)<<7) | (buf[3]>>1); frame_rate_ext_n = (buf[5] >> 5) & 3; frame_rate_ext_d = (buf[5] & 0x1f); pc->progressive_sequence = buf[1] & (1 << 3); avctx->has_b_frames= !(buf[5] >> 7); pc->width |=(horiz_size_ext << 12); pc->height |=( vert_size_ext << 12); avctx->bit_rate += (bit_rate_ext << 18) * 400; avcodec_set_dimensions(avctx, pc->width, pc->height); avctx->time_base.den = pc->frame_rate.den * (frame_rate_ext_n + 1); avctx->time_base.num = pc->frame_rate.num * (frame_rate_ext_d + 1); avctx->codec_id = CODEC_ID_MPEG2VIDEO; avctx->sub_id = 2; /* forces MPEG2 */ } break; case 0x8: /* picture coding extension */ if (bytes_left >= 5) { picture_structure = buf[2]&3; top_field_first = buf[3] & (1 << 7); repeat_first_field = buf[3] & (1 << 1); progressive_frame = buf[4] & (1 << 7); /* check if we must repeat the frame */ if (repeat_first_field) { if (pc->progressive_sequence) { if (top_field_first) s->repeat_pict = 4; else s->repeat_pict = 2; } else if (progressive_frame) { s->repeat_pict = 1; } } } break; } } break; case -1: goto the_end; default: /* we stop parsing when we encounter a slice. It ensures that this function takes a negligible amount of time */ if (start_code >= SLICE_MIN_START_CODE && start_code <= SLICE_MAX_START_CODE) goto the_end; break; } } the_end: ; }"} {"target": 1, "idx": 21383, "func": "static void test_smbios_ep_address(test_data *data) { uint32_t off; /* find smbios entry point structure */ for (off = 0xf0000; off < 0x100000; off += 0x10) { uint8_t sig[] = \"_SM_\"; int i; for (i = 0; i < sizeof sig - 1; ++i) { sig[i] = readb(off + i); } if (!memcmp(sig, \"_SM_\", sizeof sig)) { break; } } g_assert_cmphex(off, <, 0x100000); data->smbios_ep_addr = off; }"} {"target": 1, "idx": 21385, "func": "static void mov_update_dts_shift(MOVStreamContext *sc, int duration) { if (duration < 0) { sc->dts_shift = FFMAX(sc->dts_shift, -duration);"} {"target": 1, "idx": 21386, "func": "static void fft_calc_c(FFTContext *s, FFTComplex *z) { int nbits, i, n, num_transforms, offset, step; int n4, n2, n34; FFTSample tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8; FFTComplex *tmpz; const int fft_size = (1 << s->nbits); int64_t accu; num_transforms = (0x2aab >> (16 - s->nbits)) | 1; for (n=0; n> 1) | 1; for (n=0; n> 31); accu = (int64_t)Q31(M_SQRT1_2)*(tmp3 - tmp4); tmp7 = (int32_t)((accu + 0x40000000) >> 31); accu = (int64_t)Q31(M_SQRT1_2)*(tmp2 - tmp1); tmp6 = (int32_t)((accu + 0x40000000) >> 31); accu = (int64_t)Q31(M_SQRT1_2)*(tmp3 + tmp4); tmp8 = (int32_t)((accu + 0x40000000) >> 31); tmp1 = tmp5 + tmp7; tmp3 = tmp5 - tmp7; tmp2 = tmp6 + tmp8; tmp4 = tmp6 - tmp8; tmpz[5].re = tmpz[1].re - tmp1; tmpz[1].re = tmpz[1].re + tmp1; tmpz[5].im = tmpz[1].im - tmp2; tmpz[1].im = tmpz[1].im + tmp2; tmpz[7].re = tmpz[3].re - tmp4; tmpz[3].re = tmpz[3].re + tmp4; tmpz[7].im = tmpz[3].im + tmp3; tmpz[3].im = tmpz[3].im - tmp3; } step = 1 << ((MAX_LOG2_NFFT-4) - 4); n4 = 4; for (nbits=4; nbits<=s->nbits; nbits++){ n2 = 2*n4; n34 = 3*n4; num_transforms = (num_transforms >> 1) | 1; for (n=0; n> 31); accu = (int64_t)w_re*tmpz[ n2+i].im; accu -= (int64_t)w_im*tmpz[ n2+i].re; tmp2 = (int32_t)((accu + 0x40000000) >> 31); accu = (int64_t)w_re*tmpz[n34+i].re; accu -= (int64_t)w_im*tmpz[n34+i].im; tmp3 = (int32_t)((accu + 0x40000000) >> 31); accu = (int64_t)w_re*tmpz[n34+i].im; accu += (int64_t)w_im*tmpz[n34+i].re; tmp4 = (int32_t)((accu + 0x40000000) >> 31); tmp5 = tmp1 + tmp3; tmp1 = tmp1 - tmp3; tmp6 = tmp2 + tmp4; tmp2 = tmp2 - tmp4; tmpz[ n2+i].re = tmpz[ i].re - tmp5; tmpz[ i].re = tmpz[ i].re + tmp5; tmpz[ n2+i].im = tmpz[ i].im - tmp6; tmpz[ i].im = tmpz[ i].im + tmp6; tmpz[n34+i].re = tmpz[n4+i].re - tmp2; tmpz[ n4+i].re = tmpz[n4+i].re + tmp2; tmpz[n34+i].im = tmpz[n4+i].im + tmp1; tmpz[ n4+i].im = tmpz[n4+i].im - tmp1; w_re_ptr += step; w_im_ptr -= step; } } step >>= 1; n4 <<= 1; } }"} {"target": 1, "idx": 21387, "func": "int cpu_ppc_handle_mmu_fault(CPUPPCState *env, target_ulong address, int rw, int mmu_idx) { mmu_ctx_t ctx; int access_type; int ret = 0; if (rw == 2) { /* code access */ rw = 0; access_type = ACCESS_CODE; } else { /* data access */ access_type = env->access_type; } ret = get_physical_address(env, &ctx, address, rw, access_type); if (ret == 0) { tlb_set_page(env, address & TARGET_PAGE_MASK, ctx.raddr & TARGET_PAGE_MASK, ctx.prot, mmu_idx, TARGET_PAGE_SIZE); ret = 0; } else if (ret < 0) { LOG_MMU_STATE(env); if (access_type == ACCESS_CODE) { switch (ret) { case -1: /* No matches in page tables or TLB */ switch (env->mmu_model) { case POWERPC_MMU_SOFT_6xx: env->exception_index = POWERPC_EXCP_IFTLB; env->error_code = 1 << 18; env->spr[SPR_IMISS] = address; env->spr[SPR_ICMP] = 0x80000000 | ctx.ptem; goto tlb_miss; case POWERPC_MMU_SOFT_74xx: env->exception_index = POWERPC_EXCP_IFTLB; goto tlb_miss_74xx; case POWERPC_MMU_SOFT_4xx: case POWERPC_MMU_SOFT_4xx_Z: env->exception_index = POWERPC_EXCP_ITLB; env->error_code = 0; env->spr[SPR_40x_DEAR] = address; env->spr[SPR_40x_ESR] = 0x00000000; break; case POWERPC_MMU_32B: case POWERPC_MMU_601: #if defined(TARGET_PPC64) case POWERPC_MMU_620: case POWERPC_MMU_64B: case POWERPC_MMU_2_06: #endif env->exception_index = POWERPC_EXCP_ISI; env->error_code = 0x40000000; break; case POWERPC_MMU_BOOKE206: booke206_update_mas_tlb_miss(env, address, rw); /* fall through */ case POWERPC_MMU_BOOKE: env->exception_index = POWERPC_EXCP_ITLB; env->error_code = 0; env->spr[SPR_BOOKE_DEAR] = address; return -1; case POWERPC_MMU_MPC8xx: /* XXX: TODO */ cpu_abort(env, \"MPC8xx MMU model is not implemented\\n\"); break; case POWERPC_MMU_REAL: cpu_abort(env, \"PowerPC in real mode should never raise \" \"any MMU exceptions\\n\"); return -1; default: cpu_abort(env, \"Unknown or invalid MMU model\\n\"); return -1; } break; case -2: /* Access rights violation */ env->exception_index = POWERPC_EXCP_ISI; env->error_code = 0x08000000; break; case -3: /* No execute protection violation */ if ((env->mmu_model == POWERPC_MMU_BOOKE) || (env->mmu_model == POWERPC_MMU_BOOKE206)) { env->spr[SPR_BOOKE_ESR] = 0x00000000; } env->exception_index = POWERPC_EXCP_ISI; env->error_code = 0x10000000; break; case -4: /* Direct store exception */ /* No code fetch is allowed in direct-store areas */ env->exception_index = POWERPC_EXCP_ISI; env->error_code = 0x10000000; break; #if defined(TARGET_PPC64) case -5: /* No match in segment table */ if (env->mmu_model == POWERPC_MMU_620) { env->exception_index = POWERPC_EXCP_ISI; /* XXX: this might be incorrect */ env->error_code = 0x40000000; } else { env->exception_index = POWERPC_EXCP_ISEG; env->error_code = 0; } break; #endif } } else { switch (ret) { case -1: /* No matches in page tables or TLB */ switch (env->mmu_model) { case POWERPC_MMU_SOFT_6xx: if (rw == 1) { env->exception_index = POWERPC_EXCP_DSTLB; env->error_code = 1 << 16; } else { env->exception_index = POWERPC_EXCP_DLTLB; env->error_code = 0; } env->spr[SPR_DMISS] = address; env->spr[SPR_DCMP] = 0x80000000 | ctx.ptem; tlb_miss: env->error_code |= ctx.key << 19; env->spr[SPR_HASH1] = env->htab_base + get_pteg_offset(env, ctx.hash[0], HASH_PTE_SIZE_32); env->spr[SPR_HASH2] = env->htab_base + get_pteg_offset(env, ctx.hash[1], HASH_PTE_SIZE_32); break; case POWERPC_MMU_SOFT_74xx: if (rw == 1) { env->exception_index = POWERPC_EXCP_DSTLB; } else { env->exception_index = POWERPC_EXCP_DLTLB; } tlb_miss_74xx: /* Implement LRU algorithm */ env->error_code = ctx.key << 19; env->spr[SPR_TLBMISS] = (address & ~((target_ulong)0x3)) | ((env->last_way + 1) & (env->nb_ways - 1)); env->spr[SPR_PTEHI] = 0x80000000 | ctx.ptem; break; case POWERPC_MMU_SOFT_4xx: case POWERPC_MMU_SOFT_4xx_Z: env->exception_index = POWERPC_EXCP_DTLB; env->error_code = 0; env->spr[SPR_40x_DEAR] = address; if (rw) { env->spr[SPR_40x_ESR] = 0x00800000; } else { env->spr[SPR_40x_ESR] = 0x00000000; } break; case POWERPC_MMU_32B: case POWERPC_MMU_601: #if defined(TARGET_PPC64) case POWERPC_MMU_620: case POWERPC_MMU_64B: case POWERPC_MMU_2_06: #endif env->exception_index = POWERPC_EXCP_DSI; env->error_code = 0; env->spr[SPR_DAR] = address; if (rw == 1) { env->spr[SPR_DSISR] = 0x42000000; } else { env->spr[SPR_DSISR] = 0x40000000; } break; case POWERPC_MMU_MPC8xx: /* XXX: TODO */ cpu_abort(env, \"MPC8xx MMU model is not implemented\\n\"); break; case POWERPC_MMU_BOOKE206: booke206_update_mas_tlb_miss(env, address, rw); /* fall through */ case POWERPC_MMU_BOOKE: env->exception_index = POWERPC_EXCP_DTLB; env->error_code = 0; env->spr[SPR_BOOKE_DEAR] = address; env->spr[SPR_BOOKE_ESR] = rw ? ESR_ST : 0; return -1; case POWERPC_MMU_REAL: cpu_abort(env, \"PowerPC in real mode should never raise \" \"any MMU exceptions\\n\"); return -1; default: cpu_abort(env, \"Unknown or invalid MMU model\\n\"); return -1; } break; case -2: /* Access rights violation */ env->exception_index = POWERPC_EXCP_DSI; env->error_code = 0; if (env->mmu_model == POWERPC_MMU_SOFT_4xx || env->mmu_model == POWERPC_MMU_SOFT_4xx_Z) { env->spr[SPR_40x_DEAR] = address; if (rw) { env->spr[SPR_40x_ESR] |= 0x00800000; } } else if ((env->mmu_model == POWERPC_MMU_BOOKE) || (env->mmu_model == POWERPC_MMU_BOOKE206)) { env->spr[SPR_BOOKE_DEAR] = address; env->spr[SPR_BOOKE_ESR] = rw ? ESR_ST : 0; } else { env->spr[SPR_DAR] = address; if (rw == 1) { env->spr[SPR_DSISR] = 0x0A000000; } else { env->spr[SPR_DSISR] = 0x08000000; } } break; case -4: /* Direct store exception */ switch (access_type) { case ACCESS_FLOAT: /* Floating point load/store */ env->exception_index = POWERPC_EXCP_ALIGN; env->error_code = POWERPC_EXCP_ALIGN_FP; env->spr[SPR_DAR] = address; break; case ACCESS_RES: /* lwarx, ldarx or stwcx. */ env->exception_index = POWERPC_EXCP_DSI; env->error_code = 0; env->spr[SPR_DAR] = address; if (rw == 1) { env->spr[SPR_DSISR] = 0x06000000; } else { env->spr[SPR_DSISR] = 0x04000000; } break; case ACCESS_EXT: /* eciwx or ecowx */ env->exception_index = POWERPC_EXCP_DSI; env->error_code = 0; env->spr[SPR_DAR] = address; if (rw == 1) { env->spr[SPR_DSISR] = 0x06100000; } else { env->spr[SPR_DSISR] = 0x04100000; } break; default: printf(\"DSI: invalid exception (%d)\\n\", ret); env->exception_index = POWERPC_EXCP_PROGRAM; env->error_code = POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL; env->spr[SPR_DAR] = address; break; } break; #if defined(TARGET_PPC64) case -5: /* No match in segment table */ if (env->mmu_model == POWERPC_MMU_620) { env->exception_index = POWERPC_EXCP_DSI; env->error_code = 0; env->spr[SPR_DAR] = address; /* XXX: this might be incorrect */ if (rw == 1) { env->spr[SPR_DSISR] = 0x42000000; } else { env->spr[SPR_DSISR] = 0x40000000; } } else { env->exception_index = POWERPC_EXCP_DSEG; env->error_code = 0; env->spr[SPR_DAR] = address; } break; #endif } } #if 0 printf(\"%s: set exception to %d %02x\\n\", __func__, env->exception, env->error_code); #endif ret = 1; } return ret; }"} {"target": 1, "idx": 21392, "func": "void qemu_spice_display_init(DisplayState *ds) { assert(sdpy.ds == NULL); qemu_spice_display_init_common(&sdpy, ds); register_displaychangelistener(ds, &display_listener); sdpy.qxl.base.sif = &dpy_interface.base; qemu_spice_add_interface(&sdpy.qxl.base); assert(sdpy.worker); qemu_spice_create_host_memslot(&sdpy); qemu_spice_create_host_primary(&sdpy); }"} {"target": 0, "idx": 21411, "func": "static int qcow2_is_allocated(BlockDriverState *bs, int64_t sector_num, int nb_sectors, int *pnum) { uint64_t cluster_offset; int ret; *pnum = nb_sectors; /* FIXME We can get errors here, but the bdrv_is_allocated interface can't * pass them on today */ ret = qcow2_get_cluster_offset(bs, sector_num << 9, pnum, &cluster_offset); if (ret < 0) { *pnum = 0; } return (cluster_offset != 0); }"} {"target": 0, "idx": 21413, "func": "static void migrate_params_test_apply(MigrateSetParameters *params, MigrationParameters *dest) { *dest = migrate_get_current()->parameters; /* TODO use QAPI_CLONE() instead of duplicating it inline */ if (params->has_compress_level) { dest->compress_level = params->compress_level; } if (params->has_compress_threads) { dest->compress_threads = params->compress_threads; } if (params->has_decompress_threads) { dest->decompress_threads = params->decompress_threads; } if (params->has_cpu_throttle_initial) { dest->cpu_throttle_initial = params->cpu_throttle_initial; } if (params->has_cpu_throttle_increment) { dest->cpu_throttle_increment = params->cpu_throttle_increment; } if (params->has_tls_creds) { dest->tls_creds = g_strdup(params->tls_creds); } if (params->has_tls_hostname) { dest->tls_hostname = g_strdup(params->tls_hostname); } if (params->has_max_bandwidth) { dest->max_bandwidth = params->max_bandwidth; } if (params->has_downtime_limit) { dest->downtime_limit = params->downtime_limit; } if (params->has_x_checkpoint_delay) { dest->x_checkpoint_delay = params->x_checkpoint_delay; } if (params->has_block_incremental) { dest->block_incremental = params->block_incremental; } }"} {"target": 0, "idx": 21427, "func": "void cpu_state_reset(CPUMIPSState *env) { if (qemu_loglevel_mask(CPU_LOG_RESET)) { qemu_log(\"CPU Reset (CPU %d)\\n\", env->cpu_index); log_cpu_state(env, 0); } memset(env, 0, offsetof(CPUMIPSState, breakpoints)); tlb_flush(env, 1); /* Reset registers to their default values */ env->CP0_PRid = env->cpu_model->CP0_PRid; env->CP0_Config0 = env->cpu_model->CP0_Config0; #ifdef TARGET_WORDS_BIGENDIAN env->CP0_Config0 |= (1 << CP0C0_BE); #endif env->CP0_Config1 = env->cpu_model->CP0_Config1; env->CP0_Config2 = env->cpu_model->CP0_Config2; env->CP0_Config3 = env->cpu_model->CP0_Config3; env->CP0_Config6 = env->cpu_model->CP0_Config6; env->CP0_Config7 = env->cpu_model->CP0_Config7; env->CP0_LLAddr_rw_bitmask = env->cpu_model->CP0_LLAddr_rw_bitmask << env->cpu_model->CP0_LLAddr_shift; env->CP0_LLAddr_shift = env->cpu_model->CP0_LLAddr_shift; env->SYNCI_Step = env->cpu_model->SYNCI_Step; env->CCRes = env->cpu_model->CCRes; env->CP0_Status_rw_bitmask = env->cpu_model->CP0_Status_rw_bitmask; env->CP0_TCStatus_rw_bitmask = env->cpu_model->CP0_TCStatus_rw_bitmask; env->CP0_SRSCtl = env->cpu_model->CP0_SRSCtl; env->current_tc = 0; env->SEGBITS = env->cpu_model->SEGBITS; env->SEGMask = (target_ulong)((1ULL << env->cpu_model->SEGBITS) - 1); #if defined(TARGET_MIPS64) if (env->cpu_model->insn_flags & ISA_MIPS3) { env->SEGMask |= 3ULL << 62; } #endif env->PABITS = env->cpu_model->PABITS; env->PAMask = (target_ulong)((1ULL << env->cpu_model->PABITS) - 1); env->CP0_SRSConf0_rw_bitmask = env->cpu_model->CP0_SRSConf0_rw_bitmask; env->CP0_SRSConf0 = env->cpu_model->CP0_SRSConf0; env->CP0_SRSConf1_rw_bitmask = env->cpu_model->CP0_SRSConf1_rw_bitmask; env->CP0_SRSConf1 = env->cpu_model->CP0_SRSConf1; env->CP0_SRSConf2_rw_bitmask = env->cpu_model->CP0_SRSConf2_rw_bitmask; env->CP0_SRSConf2 = env->cpu_model->CP0_SRSConf2; env->CP0_SRSConf3_rw_bitmask = env->cpu_model->CP0_SRSConf3_rw_bitmask; env->CP0_SRSConf3 = env->cpu_model->CP0_SRSConf3; env->CP0_SRSConf4_rw_bitmask = env->cpu_model->CP0_SRSConf4_rw_bitmask; env->CP0_SRSConf4 = env->cpu_model->CP0_SRSConf4; env->active_fpu.fcr0 = env->cpu_model->CP1_fcr0; env->insn_flags = env->cpu_model->insn_flags; #if defined(CONFIG_USER_ONLY) env->hflags = MIPS_HFLAG_UM; /* Enable access to the CPUNum, SYNCI_Step, CC, and CCRes RDHWR hardware registers. */ env->CP0_HWREna |= 0x0000000F; if (env->CP0_Config1 & (1 << CP0C1_FP)) { env->hflags |= MIPS_HFLAG_FPU; } #ifdef TARGET_MIPS64 if (env->active_fpu.fcr0 & (1 << FCR0_F64)) { env->hflags |= MIPS_HFLAG_F64; } #endif #else if (env->hflags & MIPS_HFLAG_BMASK) { /* If the exception was raised from a delay slot, come back to the jump. */ env->CP0_ErrorEPC = env->active_tc.PC - 4; } else { env->CP0_ErrorEPC = env->active_tc.PC; } env->active_tc.PC = (int32_t)0xBFC00000; env->CP0_Random = env->tlb->nb_tlb - 1; env->tlb->tlb_in_use = env->tlb->nb_tlb; env->CP0_Wired = 0; env->CP0_EBase = 0x80000000 | (env->cpu_index & 0x3FF); env->CP0_Status = (1 << CP0St_BEV) | (1 << CP0St_ERL); /* vectored interrupts not implemented, timer on int 7, no performance counters. */ env->CP0_IntCtl = 0xe0000000; { int i; for (i = 0; i < 7; i++) { env->CP0_WatchLo[i] = 0; env->CP0_WatchHi[i] = 0x80000000; } env->CP0_WatchLo[7] = 0; env->CP0_WatchHi[7] = 0; } /* Count register increments in debug mode, EJTAG version 1 */ env->CP0_Debug = (1 << CP0DB_CNT) | (0x1 << CP0DB_VER); env->hflags = MIPS_HFLAG_CP0; if (env->CP0_Config3 & (1 << CP0C3_MT)) { int i; /* Only TC0 on VPE 0 starts as active. */ for (i = 0; i < ARRAY_SIZE(env->tcs); i++) { env->tcs[i].CP0_TCBind = env->cpu_index << CP0TCBd_CurVPE; env->tcs[i].CP0_TCHalt = 1; } env->active_tc.CP0_TCHalt = 1; env->halted = 1; if (!env->cpu_index) { /* VPE0 starts up enabled. */ env->mvp->CP0_MVPControl |= (1 << CP0MVPCo_EVP); env->CP0_VPEConf0 |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA); /* TC0 starts up unhalted. */ env->halted = 0; env->active_tc.CP0_TCHalt = 0; env->tcs[0].CP0_TCHalt = 0; /* With thread 0 active. */ env->active_tc.CP0_TCStatus = (1 << CP0TCSt_A); env->tcs[0].CP0_TCStatus = (1 << CP0TCSt_A); } } #endif #if defined(TARGET_MIPS64) if (env->cpu_model->insn_flags & ISA_MIPS3) { env->hflags |= MIPS_HFLAG_64; } #endif env->exception_index = EXCP_NONE; }"} {"target": 0, "idx": 21430, "func": "bool bdrv_op_is_blocked(BlockDriverState *bs, BlockOpType op, Error **errp) { BdrvOpBlocker *blocker; assert((int) op >= 0 && op < BLOCK_OP_TYPE_MAX); if (!QLIST_EMPTY(&bs->op_blockers[op])) { blocker = QLIST_FIRST(&bs->op_blockers[op]); if (errp) { *errp = error_copy(blocker->reason); error_prepend(errp, \"Node '%s' is busy: \", bdrv_get_device_or_node_name(bs)); } return true; } return false; }"} {"target": 0, "idx": 21435, "func": "static BlockJob *find_block_job(const char *device, AioContext **aio_context, Error **errp) { BlockBackend *blk; BlockDriverState *bs; *aio_context = NULL; blk = blk_by_name(device); if (!blk) { goto notfound; } *aio_context = blk_get_aio_context(blk); aio_context_acquire(*aio_context); if (!blk_is_available(blk)) { goto notfound; } bs = blk_bs(blk); if (!bs->job) { goto notfound; } return bs->job; notfound: error_set(errp, ERROR_CLASS_DEVICE_NOT_ACTIVE, \"No active block job on device '%s'\", device); if (*aio_context) { aio_context_release(*aio_context); *aio_context = NULL; } return NULL; }"} {"target": 0, "idx": 21450, "func": "static int bt_hci_parse(const char *str) { struct HCIInfo *hci; bdaddr_t bdaddr; if (nb_hcis >= MAX_NICS) { fprintf(stderr, \"qemu: Too many bluetooth HCIs (max %i).\\n\", MAX_NICS); return -1; } hci = hci_init(str); if (!hci) return -1; bdaddr.b[0] = 0x52; bdaddr.b[1] = 0x54; bdaddr.b[2] = 0x00; bdaddr.b[3] = 0x12; bdaddr.b[4] = 0x34; bdaddr.b[5] = 0x56 + nb_hcis; hci->bdaddr_set(hci, bdaddr.b); hci_table[nb_hcis++] = hci; return 0; }"} {"target": 0, "idx": 21459, "func": "static int cris_mmu_translate_page(struct cris_mmu_result *res, CPUState *env, uint32_t vaddr, int rw, int usermode, int debug) { unsigned int vpage; unsigned int idx; uint32_t pid, lo, hi; uint32_t tlb_vpn, tlb_pfn = 0; int tlb_pid, tlb_g, tlb_v, tlb_k, tlb_w, tlb_x; int cfg_v, cfg_k, cfg_w, cfg_x; int set, match = 0; uint32_t r_cause; uint32_t r_cfg; int rwcause; int mmu = 1; /* Data mmu is default. */ int vect_base; r_cause = env->sregs[SFR_R_MM_CAUSE]; r_cfg = env->sregs[SFR_RW_MM_CFG]; pid = env->pregs[PR_PID] & 0xff; switch (rw) { case 2: rwcause = CRIS_MMU_ERR_EXEC; mmu = 0; break; case 1: rwcause = CRIS_MMU_ERR_WRITE; break; default: case 0: rwcause = CRIS_MMU_ERR_READ; break; } /* I exception vectors 4 - 7, D 8 - 11. */ vect_base = (mmu + 1) * 4; vpage = vaddr >> 13; /* We know the index which to check on each set. Scan both I and D. */ #if 0 for (set = 0; set < 4; set++) { for (idx = 0; idx < 16; idx++) { lo = env->tlbsets[mmu][set][idx].lo; hi = env->tlbsets[mmu][set][idx].hi; tlb_vpn = EXTRACT_FIELD(hi, 13, 31); tlb_pfn = EXTRACT_FIELD(lo, 13, 31); printf (\"TLB: [%d][%d] hi=%x lo=%x v=%x p=%x\\n\", set, idx, hi, lo, tlb_vpn, tlb_pfn); } } #endif idx = vpage & 15; for (set = 0; set < 4; set++) { lo = env->tlbsets[mmu][set][idx].lo; hi = env->tlbsets[mmu][set][idx].hi; tlb_vpn = hi >> 13; tlb_pid = EXTRACT_FIELD(hi, 0, 7); tlb_g = EXTRACT_FIELD(lo, 4, 4); D_LOG(\"TLB[%d][%d][%d] v=%x vpage=%x lo=%x hi=%x\\n\", mmu, set, idx, tlb_vpn, vpage, lo, hi); if ((tlb_g || (tlb_pid == pid)) && tlb_vpn == vpage) { match = 1; break; } } res->bf_vec = vect_base; if (match) { cfg_w = EXTRACT_FIELD(r_cfg, 19, 19); cfg_k = EXTRACT_FIELD(r_cfg, 18, 18); cfg_x = EXTRACT_FIELD(r_cfg, 17, 17); cfg_v = EXTRACT_FIELD(r_cfg, 16, 16); tlb_pfn = EXTRACT_FIELD(lo, 13, 31); tlb_v = EXTRACT_FIELD(lo, 3, 3); tlb_k = EXTRACT_FIELD(lo, 2, 2); tlb_w = EXTRACT_FIELD(lo, 1, 1); tlb_x = EXTRACT_FIELD(lo, 0, 0); /* set_exception_vector(0x04, i_mmu_refill); set_exception_vector(0x05, i_mmu_invalid); set_exception_vector(0x06, i_mmu_access); set_exception_vector(0x07, i_mmu_execute); set_exception_vector(0x08, d_mmu_refill); set_exception_vector(0x09, d_mmu_invalid); set_exception_vector(0x0a, d_mmu_access); set_exception_vector(0x0b, d_mmu_write); */ if (cfg_k && tlb_k && usermode) { D(printf (\"tlb: kernel protected %x lo=%x pc=%x\\n\", vaddr, lo, env->pc)); match = 0; res->bf_vec = vect_base + 2; } else if (rw == 1 && cfg_w && !tlb_w) { D(printf (\"tlb: write protected %x lo=%x pc=%x\\n\", vaddr, lo, env->pc)); match = 0; /* write accesses never go through the I mmu. */ res->bf_vec = vect_base + 3; } else if (rw == 2 && cfg_x && !tlb_x) { D(printf (\"tlb: exec protected %x lo=%x pc=%x\\n\", vaddr, lo, env->pc)); match = 0; res->bf_vec = vect_base + 3; } else if (cfg_v && !tlb_v) { D(printf (\"tlb: invalid %x\\n\", vaddr)); match = 0; res->bf_vec = vect_base + 1; } res->prot = 0; if (match) { res->prot |= PAGE_READ; if (tlb_w) res->prot |= PAGE_WRITE; if (tlb_x) res->prot |= PAGE_EXEC; } else D(dump_tlb(env, mmu)); } else { /* If refill, provide a randomized set. */ set = env->mmu_rand_lfsr & 3; } if (!match && !debug) { cris_mmu_update_rand_lfsr(env); /* Compute index. */ idx = vpage & 15; /* Update RW_MM_TLB_SEL. */ env->sregs[SFR_RW_MM_TLB_SEL] = 0; set_field(&env->sregs[SFR_RW_MM_TLB_SEL], idx, 0, 4); set_field(&env->sregs[SFR_RW_MM_TLB_SEL], set, 4, 2); /* Update RW_MM_CAUSE. */ set_field(&r_cause, rwcause, 8, 2); set_field(&r_cause, vpage, 13, 19); set_field(&r_cause, pid, 0, 8); env->sregs[SFR_R_MM_CAUSE] = r_cause; D(printf(\"refill vaddr=%x pc=%x\\n\", vaddr, env->pc)); } D(printf (\"%s rw=%d mtch=%d pc=%x va=%x vpn=%x tlbvpn=%x pfn=%x pid=%x\" \" %x cause=%x sel=%x sp=%x %x %x\\n\", __func__, rw, match, env->pc, vaddr, vpage, tlb_vpn, tlb_pfn, tlb_pid, pid, r_cause, env->sregs[SFR_RW_MM_TLB_SEL], env->regs[R_SP], env->pregs[PR_USP], env->ksp)); res->phy = tlb_pfn << TARGET_PAGE_BITS; return !match; }"} {"target": 1, "idx": 21466, "func": "static void gen_dst(DisasContext *ctx) { if (rA(ctx->opcode) == 0) { gen_inval_exception(ctx, POWERPC_EXCP_INVAL_LSWX); } else { /* interpreted as no-op */ } }"} {"target": 0, "idx": 21473, "func": "static void x86_cpu_enable_xsave_components(X86CPU *cpu) { CPUX86State *env = &cpu->env; int i; if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) { return; } env->xsave_components = (XSTATE_FP_MASK | XSTATE_SSE_MASK); for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) { const ExtSaveArea *esa = &x86_ext_save_areas[i]; if (env->features[esa->feature] & esa->bits) { env->xsave_components |= (1ULL << i); } } if (kvm_enabled()) { KVMState *s = kvm_state; uint64_t kvm_mask = kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX); kvm_mask <<= 32; kvm_mask |= kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX); env->xsave_components &= kvm_mask; } }"} {"target": 0, "idx": 21487, "func": "int mmu_translate_real(CPUS390XState *env, target_ulong raddr, int rw, target_ulong *addr, int *flags) { /* TODO: low address protection once we flush the tlb on cr changes */ *flags = PAGE_READ | PAGE_WRITE; *addr = mmu_real2abs(env, raddr); /* TODO: storage key handling */ return 0; }"} {"target": 0, "idx": 21492, "func": "static void nic_selective_reset(EEPRO100State * s) { size_t i; uint16_t *eeprom_contents = eeprom93xx_data(s->eeprom); //~ eeprom93xx_reset(s->eeprom); memcpy(eeprom_contents, s->conf.macaddr.a, 6); eeprom_contents[0xa] = 0x4000; if (s->device == i82557B || s->device == i82557C) eeprom_contents[5] = 0x0100; uint16_t sum = 0; for (i = 0; i < EEPROM_SIZE - 1; i++) { sum += eeprom_contents[i]; } eeprom_contents[EEPROM_SIZE - 1] = 0xbaba - sum; TRACE(EEPROM, logout(\"checksum=0x%04x\\n\", eeprom_contents[EEPROM_SIZE - 1])); memset(s->mem, 0, sizeof(s->mem)); uint32_t val = BIT(21); memcpy(&s->mem[SCBCtrlMDI], &val, sizeof(val)); assert(sizeof(s->mdimem) == sizeof(eepro100_mdi_default)); memcpy(&s->mdimem[0], &eepro100_mdi_default[0], sizeof(s->mdimem)); }"} {"target": 1, "idx": 21508, "func": "void pci_bridge_reset_reg(PCIDevice *dev) { uint8_t *conf = dev->config; conf[PCI_PRIMARY_BUS] = 0; conf[PCI_SECONDARY_BUS] = 0; conf[PCI_SUBORDINATE_BUS] = 0; conf[PCI_SEC_LATENCY_TIMER] = 0; conf[PCI_IO_BASE] = 0; conf[PCI_IO_LIMIT] = 0; pci_set_word(conf + PCI_MEMORY_BASE, 0); pci_set_word(conf + PCI_MEMORY_LIMIT, 0); pci_set_word(conf + PCI_PREF_MEMORY_BASE, 0); pci_set_word(conf + PCI_PREF_MEMORY_LIMIT, 0); pci_set_word(conf + PCI_PREF_BASE_UPPER32, 0); pci_set_word(conf + PCI_PREF_LIMIT_UPPER32, 0); pci_set_word(conf + PCI_BRIDGE_CONTROL, 0); }"} {"target": 1, "idx": 21511, "func": "static int kvm_client_sync_dirty_bitmap(struct CPUPhysMemoryClient *client, target_phys_addr_t start_addr, target_phys_addr_t end_addr) { return kvm_physical_sync_dirty_bitmap(start_addr, end_addr); }"} {"target": 0, "idx": 21513, "func": "static uint32_t bonito_readl(void *opaque, target_phys_addr_t addr) { PCIBonitoState *s = opaque; uint32_t saddr; saddr = (addr - BONITO_REGBASE) >> 2; DPRINTF(\"bonito_readl \"TARGET_FMT_plx\" \\n\", addr); switch (saddr) { case BONITO_INTISR: return s->regs[saddr]; default: return s->regs[saddr]; } }"} {"target": 0, "idx": 21538, "func": "struct omap_mpu_state_s *omap310_mpu_init(MemoryRegion *system_memory, unsigned long sdram_size, const char *core) { int i; struct omap_mpu_state_s *s = (struct omap_mpu_state_s *) g_malloc0(sizeof(struct omap_mpu_state_s)); qemu_irq dma_irqs[6]; DriveInfo *dinfo; SysBusDevice *busdev; if (!core) core = \"ti925t\"; /* Core */ s->mpu_model = omap310; s->cpu = cpu_arm_init(core); if (s->cpu == NULL) { fprintf(stderr, \"Unable to find CPU definition\\n\"); exit(1); } s->sdram_size = sdram_size; s->sram_size = OMAP15XX_SRAM_SIZE; s->wakeup = qemu_allocate_irq(omap_mpu_wakeup, s, 0); /* Clocks */ omap_clk_init(s); /* Memory-mapped stuff */ memory_region_init_ram(&s->emiff_ram, NULL, \"omap1.dram\", s->sdram_size, &error_abort); vmstate_register_ram_global(&s->emiff_ram); memory_region_add_subregion(system_memory, OMAP_EMIFF_BASE, &s->emiff_ram); memory_region_init_ram(&s->imif_ram, NULL, \"omap1.sram\", s->sram_size, &error_abort); vmstate_register_ram_global(&s->imif_ram); memory_region_add_subregion(system_memory, OMAP_IMIF_BASE, &s->imif_ram); omap_clkm_init(system_memory, 0xfffece00, 0xe1008000, s); s->ih[0] = qdev_create(NULL, \"omap-intc\"); qdev_prop_set_uint32(s->ih[0], \"size\", 0x100); qdev_prop_set_ptr(s->ih[0], \"clk\", omap_findclk(s, \"arminth_ck\")); qdev_init_nofail(s->ih[0]); busdev = SYS_BUS_DEVICE(s->ih[0]); sysbus_connect_irq(busdev, 0, qdev_get_gpio_in(DEVICE(s->cpu), ARM_CPU_IRQ)); sysbus_connect_irq(busdev, 1, qdev_get_gpio_in(DEVICE(s->cpu), ARM_CPU_FIQ)); sysbus_mmio_map(busdev, 0, 0xfffecb00); s->ih[1] = qdev_create(NULL, \"omap-intc\"); qdev_prop_set_uint32(s->ih[1], \"size\", 0x800); qdev_prop_set_ptr(s->ih[1], \"clk\", omap_findclk(s, \"arminth_ck\")); qdev_init_nofail(s->ih[1]); busdev = SYS_BUS_DEVICE(s->ih[1]); sysbus_connect_irq(busdev, 0, qdev_get_gpio_in(s->ih[0], OMAP_INT_15XX_IH2_IRQ)); /* The second interrupt controller's FIQ output is not wired up */ sysbus_mmio_map(busdev, 0, 0xfffe0000); for (i = 0; i < 6; i++) { dma_irqs[i] = qdev_get_gpio_in(s->ih[omap1_dma_irq_map[i].ih], omap1_dma_irq_map[i].intr); } s->dma = omap_dma_init(0xfffed800, dma_irqs, system_memory, qdev_get_gpio_in(s->ih[0], OMAP_INT_DMA_LCD), s, omap_findclk(s, \"dma_ck\"), omap_dma_3_1); s->port[emiff ].addr_valid = omap_validate_emiff_addr; s->port[emifs ].addr_valid = omap_validate_emifs_addr; s->port[imif ].addr_valid = omap_validate_imif_addr; s->port[tipb ].addr_valid = omap_validate_tipb_addr; s->port[local ].addr_valid = omap_validate_local_addr; s->port[tipb_mpui].addr_valid = omap_validate_tipb_mpui_addr; /* Register SDRAM and SRAM DMA ports for fast transfers. */ soc_dma_port_add_mem(s->dma, memory_region_get_ram_ptr(&s->emiff_ram), OMAP_EMIFF_BASE, s->sdram_size); soc_dma_port_add_mem(s->dma, memory_region_get_ram_ptr(&s->imif_ram), OMAP_IMIF_BASE, s->sram_size); s->timer[0] = omap_mpu_timer_init(system_memory, 0xfffec500, qdev_get_gpio_in(s->ih[0], OMAP_INT_TIMER1), omap_findclk(s, \"mputim_ck\")); s->timer[1] = omap_mpu_timer_init(system_memory, 0xfffec600, qdev_get_gpio_in(s->ih[0], OMAP_INT_TIMER2), omap_findclk(s, \"mputim_ck\")); s->timer[2] = omap_mpu_timer_init(system_memory, 0xfffec700, qdev_get_gpio_in(s->ih[0], OMAP_INT_TIMER3), omap_findclk(s, \"mputim_ck\")); s->wdt = omap_wd_timer_init(system_memory, 0xfffec800, qdev_get_gpio_in(s->ih[0], OMAP_INT_WD_TIMER), omap_findclk(s, \"armwdt_ck\")); s->os_timer = omap_os_timer_init(system_memory, 0xfffb9000, qdev_get_gpio_in(s->ih[1], OMAP_INT_OS_TIMER), omap_findclk(s, \"clk32-kHz\")); s->lcd = omap_lcdc_init(system_memory, 0xfffec000, qdev_get_gpio_in(s->ih[0], OMAP_INT_LCD_CTRL), omap_dma_get_lcdch(s->dma), omap_findclk(s, \"lcd_ck\")); omap_ulpd_pm_init(system_memory, 0xfffe0800, s); omap_pin_cfg_init(system_memory, 0xfffe1000, s); omap_id_init(system_memory, s); omap_mpui_init(system_memory, 0xfffec900, s); s->private_tipb = omap_tipb_bridge_init(system_memory, 0xfffeca00, qdev_get_gpio_in(s->ih[0], OMAP_INT_BRIDGE_PRIV), omap_findclk(s, \"tipb_ck\")); s->public_tipb = omap_tipb_bridge_init(system_memory, 0xfffed300, qdev_get_gpio_in(s->ih[0], OMAP_INT_BRIDGE_PUB), omap_findclk(s, \"tipb_ck\")); omap_tcmi_init(system_memory, 0xfffecc00, s); s->uart[0] = omap_uart_init(0xfffb0000, qdev_get_gpio_in(s->ih[1], OMAP_INT_UART1), omap_findclk(s, \"uart1_ck\"), omap_findclk(s, \"uart1_ck\"), s->drq[OMAP_DMA_UART1_TX], s->drq[OMAP_DMA_UART1_RX], \"uart1\", serial_hds[0]); s->uart[1] = omap_uart_init(0xfffb0800, qdev_get_gpio_in(s->ih[1], OMAP_INT_UART2), omap_findclk(s, \"uart2_ck\"), omap_findclk(s, \"uart2_ck\"), s->drq[OMAP_DMA_UART2_TX], s->drq[OMAP_DMA_UART2_RX], \"uart2\", serial_hds[0] ? serial_hds[1] : NULL); s->uart[2] = omap_uart_init(0xfffb9800, qdev_get_gpio_in(s->ih[0], OMAP_INT_UART3), omap_findclk(s, \"uart3_ck\"), omap_findclk(s, \"uart3_ck\"), s->drq[OMAP_DMA_UART3_TX], s->drq[OMAP_DMA_UART3_RX], \"uart3\", serial_hds[0] && serial_hds[1] ? serial_hds[2] : NULL); s->dpll[0] = omap_dpll_init(system_memory, 0xfffecf00, omap_findclk(s, \"dpll1\")); s->dpll[1] = omap_dpll_init(system_memory, 0xfffed000, omap_findclk(s, \"dpll2\")); s->dpll[2] = omap_dpll_init(system_memory, 0xfffed100, omap_findclk(s, \"dpll3\")); dinfo = drive_get(IF_SD, 0, 0); if (!dinfo) { fprintf(stderr, \"qemu: missing SecureDigital device\\n\"); exit(1); } s->mmc = omap_mmc_init(0xfffb7800, system_memory, blk_bs(blk_by_legacy_dinfo(dinfo)), qdev_get_gpio_in(s->ih[1], OMAP_INT_OQN), &s->drq[OMAP_DMA_MMC_TX], omap_findclk(s, \"mmc_ck\")); s->mpuio = omap_mpuio_init(system_memory, 0xfffb5000, qdev_get_gpio_in(s->ih[1], OMAP_INT_KEYBOARD), qdev_get_gpio_in(s->ih[1], OMAP_INT_MPUIO), s->wakeup, omap_findclk(s, \"clk32-kHz\")); s->gpio = qdev_create(NULL, \"omap-gpio\"); qdev_prop_set_int32(s->gpio, \"mpu_model\", s->mpu_model); qdev_prop_set_ptr(s->gpio, \"clk\", omap_findclk(s, \"arm_gpio_ck\")); qdev_init_nofail(s->gpio); sysbus_connect_irq(SYS_BUS_DEVICE(s->gpio), 0, qdev_get_gpio_in(s->ih[0], OMAP_INT_GPIO_BANK1)); sysbus_mmio_map(SYS_BUS_DEVICE(s->gpio), 0, 0xfffce000); s->microwire = omap_uwire_init(system_memory, 0xfffb3000, qdev_get_gpio_in(s->ih[1], OMAP_INT_uWireTX), qdev_get_gpio_in(s->ih[1], OMAP_INT_uWireRX), s->drq[OMAP_DMA_UWIRE_TX], omap_findclk(s, \"mpuper_ck\")); s->pwl = omap_pwl_init(system_memory, 0xfffb5800, omap_findclk(s, \"armxor_ck\")); s->pwt = omap_pwt_init(system_memory, 0xfffb6000, omap_findclk(s, \"armxor_ck\")); s->i2c[0] = qdev_create(NULL, \"omap_i2c\"); qdev_prop_set_uint8(s->i2c[0], \"revision\", 0x11); qdev_prop_set_ptr(s->i2c[0], \"fclk\", omap_findclk(s, \"mpuper_ck\")); qdev_init_nofail(s->i2c[0]); busdev = SYS_BUS_DEVICE(s->i2c[0]); sysbus_connect_irq(busdev, 0, qdev_get_gpio_in(s->ih[1], OMAP_INT_I2C)); sysbus_connect_irq(busdev, 1, s->drq[OMAP_DMA_I2C_TX]); sysbus_connect_irq(busdev, 2, s->drq[OMAP_DMA_I2C_RX]); sysbus_mmio_map(busdev, 0, 0xfffb3800); s->rtc = omap_rtc_init(system_memory, 0xfffb4800, qdev_get_gpio_in(s->ih[1], OMAP_INT_RTC_TIMER), qdev_get_gpio_in(s->ih[1], OMAP_INT_RTC_ALARM), omap_findclk(s, \"clk32-kHz\")); s->mcbsp1 = omap_mcbsp_init(system_memory, 0xfffb1800, qdev_get_gpio_in(s->ih[1], OMAP_INT_McBSP1TX), qdev_get_gpio_in(s->ih[1], OMAP_INT_McBSP1RX), &s->drq[OMAP_DMA_MCBSP1_TX], omap_findclk(s, \"dspxor_ck\")); s->mcbsp2 = omap_mcbsp_init(system_memory, 0xfffb1000, qdev_get_gpio_in(s->ih[0], OMAP_INT_310_McBSP2_TX), qdev_get_gpio_in(s->ih[0], OMAP_INT_310_McBSP2_RX), &s->drq[OMAP_DMA_MCBSP2_TX], omap_findclk(s, \"mpuper_ck\")); s->mcbsp3 = omap_mcbsp_init(system_memory, 0xfffb7000, qdev_get_gpio_in(s->ih[1], OMAP_INT_McBSP3TX), qdev_get_gpio_in(s->ih[1], OMAP_INT_McBSP3RX), &s->drq[OMAP_DMA_MCBSP3_TX], omap_findclk(s, \"dspxor_ck\")); s->led[0] = omap_lpg_init(system_memory, 0xfffbd000, omap_findclk(s, \"clk32-kHz\")); s->led[1] = omap_lpg_init(system_memory, 0xfffbd800, omap_findclk(s, \"clk32-kHz\")); /* Register mappings not currenlty implemented: * MCSI2 Comm fffb2000 - fffb27ff (not mapped on OMAP310) * MCSI1 Bluetooth fffb2800 - fffb2fff (not mapped on OMAP310) * USB W2FC fffb4000 - fffb47ff * Camera Interface fffb6800 - fffb6fff * USB Host fffba000 - fffba7ff * FAC fffba800 - fffbafff * HDQ/1-Wire fffbc000 - fffbc7ff * TIPB switches fffbc800 - fffbcfff * Mailbox fffcf000 - fffcf7ff * Local bus IF fffec100 - fffec1ff * Local bus MMU fffec200 - fffec2ff * DSP MMU fffed200 - fffed2ff */ omap_setup_dsp_mapping(system_memory, omap15xx_dsp_mm); omap_setup_mpui_io(system_memory, s); qemu_register_reset(omap1_mpu_reset, s); return s; }"} {"target": 0, "idx": 21549, "func": "void nbd_client_close(NBDClient *client) { if (client->closing) { return; } client->closing = true; /* Force requests to finish. They will drop their own references, * then we'll close the socket and free the NBDClient. */ shutdown(client->sock, 2); /* Also tell the client, so that they release their reference. */ if (client->close) { client->close(client); } }"} {"target": 1, "idx": 21567, "func": "static bool find_dirty_block(RAMState *rs, PageSearchStatus *pss, bool *again, ram_addr_t *ram_addr_abs) { pss->offset = migration_bitmap_find_dirty(rs, pss->block, pss->offset, ram_addr_abs); if (pss->complete_round && pss->block == rs->last_seen_block && pss->offset >= rs->last_offset) { /* * We've been once around the RAM and haven't found anything. * Give up. */ *again = false; return false; } if (pss->offset >= pss->block->used_length) { /* Didn't find anything in this RAM Block */ pss->offset = 0; pss->block = QLIST_NEXT_RCU(pss->block, next); if (!pss->block) { /* Hit the end of the list */ pss->block = QLIST_FIRST_RCU(&ram_list.blocks); /* Flag that we've looped */ pss->complete_round = true; rs->ram_bulk_stage = false; if (migrate_use_xbzrle()) { /* If xbzrle is on, stop using the data compression at this * point. In theory, xbzrle can do better than compression. */ flush_compressed_data(rs); } } /* Didn't find anything this time, but try again on the new block */ *again = true; return false; } else { /* Can go around again, but... */ *again = true; /* We've found something so probably don't need to */ return true; } }"} {"target": 1, "idx": 21569, "func": "void OPPROTO op_POWER_doz (void) { if (Ts1 > Ts0) T0 = T1 - T0; else T0 = 0; RETURN(); }"} {"target": 0, "idx": 21576, "func": "int ff_wmv2_decode_secondary_picture_header(MpegEncContext *s) { Wmv2Context *const w = (Wmv2Context *) s; if (s->pict_type == AV_PICTURE_TYPE_I) { if (w->j_type_bit) w->j_type = get_bits1(&s->gb); else w->j_type = 0; // FIXME check if (!w->j_type) { if (w->per_mb_rl_bit) s->per_mb_rl_table = get_bits1(&s->gb); else s->per_mb_rl_table = 0; if (!s->per_mb_rl_table) { s->rl_chroma_table_index = decode012(&s->gb); s->rl_table_index = decode012(&s->gb); } s->dc_table_index = get_bits1(&s->gb); } s->inter_intra_pred = 0; s->no_rounding = 1; if (s->avctx->debug & FF_DEBUG_PICT_INFO) { av_log(s->avctx, AV_LOG_DEBUG, \"qscale:%d rlc:%d rl:%d dc:%d mbrl:%d j_type:%d \\n\", s->qscale, s->rl_chroma_table_index, s->rl_table_index, s->dc_table_index, s->per_mb_rl_table, w->j_type); } } else { int cbp_index; w->j_type = 0; parse_mb_skip(w); cbp_index = decode012(&s->gb); if (s->qscale <= 10) { int map[3] = { 0, 2, 1 }; w->cbp_table_index = map[cbp_index]; } else if (s->qscale <= 20) { int map[3] = { 1, 0, 2 }; w->cbp_table_index = map[cbp_index]; } else { int map[3] = {2,1,0}; w->cbp_table_index = map[cbp_index]; } if (w->mspel_bit) s->mspel = get_bits1(&s->gb); else s->mspel = 0; // FIXME check if (w->abt_flag) { w->per_mb_abt = get_bits1(&s->gb) ^ 1; if (!w->per_mb_abt) w->abt_type = decode012(&s->gb); } if (w->per_mb_rl_bit) s->per_mb_rl_table = get_bits1(&s->gb); else s->per_mb_rl_table = 0; if (!s->per_mb_rl_table) { s->rl_table_index = decode012(&s->gb); s->rl_chroma_table_index = s->rl_table_index; } s->dc_table_index = get_bits1(&s->gb); s->mv_table_index = get_bits1(&s->gb); s->inter_intra_pred = 0; // (s->width * s->height < 320 * 240 && s->bit_rate <= II_BITRATE); s->no_rounding ^= 1; if (s->avctx->debug & FF_DEBUG_PICT_INFO) { av_log(s->avctx, AV_LOG_DEBUG, \"rl:%d rlc:%d dc:%d mv:%d mbrl:%d qp:%d mspel:%d \" \"per_mb_abt:%d abt_type:%d cbp:%d ii:%d\\n\", s->rl_table_index, s->rl_chroma_table_index, s->dc_table_index, s->mv_table_index, s->per_mb_rl_table, s->qscale, s->mspel, w->per_mb_abt, w->abt_type, w->cbp_table_index, s->inter_intra_pred); } } s->esc3_level_length = 0; s->esc3_run_length = 0; s->picture_number++; // FIXME ? if (w->j_type) { ff_intrax8_decode_picture(&w->x8, 2 * s->qscale, (s->qscale - 1) | 1); ff_er_add_slice(&w->s.er, 0, 0, (w->s.mb_x >> 1) - 1, (w->s.mb_y >> 1) - 1, ER_MB_END); return 1; } return 0; }"} {"target": 1, "idx": 21583, "func": "static void fill_table(uint8_t* table[256 + 2*YUVRGB_TABLE_HEADROOM], const int elemsize, const int inc, void *y_tab) { int i; uint8_t *y_table = y_tab; y_table -= elemsize * (inc >> 9); for (i = 0; i < 256 + 2*YUVRGB_TABLE_HEADROOM; i++) { int64_t cb = av_clip(i-YUVRGB_TABLE_HEADROOM, 0, 255)*inc; table[i] = y_table + elemsize * (cb >> 16); } }"} {"target": 0, "idx": 21584, "func": "static int allocate_buffers(ALACContext *alac) { int ch; for (ch = 0; ch < FFMIN(alac->channels, 2); ch++) { int buf_size = alac->max_samples_per_frame * sizeof(int32_t); FF_ALLOC_OR_GOTO(alac->avctx, alac->predict_error_buffer[ch], buf_size, buf_alloc_fail); if (alac->sample_size == 16) { FF_ALLOC_OR_GOTO(alac->avctx, alac->output_samples_buffer[ch], buf_size, buf_alloc_fail); } FF_ALLOC_OR_GOTO(alac->avctx, alac->extra_bits_buffer[ch], buf_size, buf_alloc_fail); } return 0; buf_alloc_fail: alac_decode_close(alac->avctx); return AVERROR(ENOMEM); }"} {"target": 0, "idx": 21585, "func": "static int put_cod(Jpeg2000EncoderContext *s) { Jpeg2000CodingStyle *codsty = &s->codsty; if (s->buf_end - s->buf < 14) return -1; bytestream_put_be16(&s->buf, JPEG2000_COD); bytestream_put_be16(&s->buf, 12); // Lcod bytestream_put_byte(&s->buf, 0); // Scod // SGcod bytestream_put_byte(&s->buf, 0); // progression level bytestream_put_be16(&s->buf, 1); // num of layers if(s->avctx->pix_fmt == AV_PIX_FMT_YUV444P){ bytestream_put_byte(&s->buf, 2); // ICT }else{ bytestream_put_byte(&s->buf, 0); // unspecified } // SPcod bytestream_put_byte(&s->buf, codsty->nreslevels - 1); // num of decomp. levels bytestream_put_byte(&s->buf, codsty->log2_cblk_width-2); // cblk width bytestream_put_byte(&s->buf, codsty->log2_cblk_height-2); // cblk height bytestream_put_byte(&s->buf, 0); // cblk style bytestream_put_byte(&s->buf, codsty->transform); // transformation return 0; }"} {"target": 0, "idx": 21612, "func": "bool memory_region_test_and_clear_dirty(MemoryRegion *mr, hwaddr addr, hwaddr size, unsigned client) { assert(mr->terminates); return cpu_physical_memory_test_and_clear_dirty(mr->ram_addr + addr, size, client); }"} {"target": 0, "idx": 21622, "func": "static int bdrv_get_cluster_size(BlockDriverState *bs) { BlockDriverInfo bdi; int ret; ret = bdrv_get_info(bs, &bdi); if (ret < 0 || bdi.cluster_size == 0) { return bs->request_alignment; } else { return bdi.cluster_size; } }"} {"target": 0, "idx": 21641, "func": "static int libschroedinger_encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame, int *got_packet) { int enc_size = 0; SchroEncoderParams *p_schro_params = avctx->priv_data; SchroEncoder *encoder = p_schro_params->encoder; struct FFSchroEncodedFrame *p_frame_output = NULL; int go = 1; SchroBuffer *enc_buf; int presentation_frame; int parse_code; int last_frame_in_sequence = 0; int pkt_size, ret; if (!frame) { /* Push end of sequence if not already signalled. */ if (!p_schro_params->eos_signalled) { schro_encoder_end_of_stream(encoder); p_schro_params->eos_signalled = 1; } } else { /* Allocate frame data to schro input buffer. */ SchroFrame *in_frame = libschroedinger_frame_from_data(avctx, frame); if (!in_frame) return AVERROR(ENOMEM); /* Load next frame. */ schro_encoder_push_frame(encoder, in_frame); } if (p_schro_params->eos_pulled) go = 0; /* Now check to see if we have any output from the encoder. */ while (go) { int err; SchroStateEnum state; state = schro_encoder_wait(encoder); switch (state) { case SCHRO_STATE_HAVE_BUFFER: case SCHRO_STATE_END_OF_STREAM: enc_buf = schro_encoder_pull(encoder, &presentation_frame); if (enc_buf->length <= 0) return AVERROR_BUG; parse_code = enc_buf->data[4]; /* All non-frame data is prepended to actual frame data to * be able to set the pts correctly. So we don't write data * to the frame output queue until we actually have a frame */ if ((err = av_reallocp(&p_schro_params->enc_buf, p_schro_params->enc_buf_size + enc_buf->length)) < 0) { p_schro_params->enc_buf_size = 0; return err; } memcpy(p_schro_params->enc_buf + p_schro_params->enc_buf_size, enc_buf->data, enc_buf->length); p_schro_params->enc_buf_size += enc_buf->length; if (state == SCHRO_STATE_END_OF_STREAM) { p_schro_params->eos_pulled = 1; go = 0; } if (!SCHRO_PARSE_CODE_IS_PICTURE(parse_code)) { schro_buffer_unref(enc_buf); break; } /* Create output frame. */ p_frame_output = av_mallocz(sizeof(FFSchroEncodedFrame)); if (!p_frame_output) return AVERROR(ENOMEM); /* Set output data. */ p_frame_output->size = p_schro_params->enc_buf_size; p_frame_output->p_encbuf = p_schro_params->enc_buf; if (SCHRO_PARSE_CODE_IS_INTRA(parse_code) && SCHRO_PARSE_CODE_IS_REFERENCE(parse_code)) p_frame_output->key_frame = 1; /* Parse the coded frame number from the bitstream. Bytes 14 * through 17 represesent the frame number. */ p_frame_output->frame_num = AV_RB32(enc_buf->data + 13); ff_schro_queue_push_back(&p_schro_params->enc_frame_queue, p_frame_output); p_schro_params->enc_buf_size = 0; p_schro_params->enc_buf = NULL; schro_buffer_unref(enc_buf); break; case SCHRO_STATE_NEED_FRAME: go = 0; break; case SCHRO_STATE_AGAIN: break; default: av_log(avctx, AV_LOG_ERROR, \"Unknown Schro Encoder state\\n\"); return -1; } } /* Copy 'next' frame in queue. */ if (p_schro_params->enc_frame_queue.size == 1 && p_schro_params->eos_pulled) last_frame_in_sequence = 1; p_frame_output = ff_schro_queue_pop(&p_schro_params->enc_frame_queue); if (!p_frame_output) return 0; pkt_size = p_frame_output->size; if (last_frame_in_sequence && p_schro_params->enc_buf_size > 0) pkt_size += p_schro_params->enc_buf_size; if ((ret = ff_alloc_packet(pkt, pkt_size)) < 0) { av_log(avctx, AV_LOG_ERROR, \"Error getting output packet of size %d.\\n\", pkt_size); goto error; } memcpy(pkt->data, p_frame_output->p_encbuf, p_frame_output->size); avctx->coded_frame->key_frame = p_frame_output->key_frame; /* Use the frame number of the encoded frame as the pts. It is OK to * do so since Dirac is a constant frame rate codec. It expects input * to be of constant frame rate. */ pkt->pts = avctx->coded_frame->pts = p_frame_output->frame_num; pkt->dts = p_schro_params->dts++; enc_size = p_frame_output->size; /* Append the end of sequence information to the last frame in the * sequence. */ if (last_frame_in_sequence && p_schro_params->enc_buf_size > 0) { memcpy(pkt->data + enc_size, p_schro_params->enc_buf, p_schro_params->enc_buf_size); enc_size += p_schro_params->enc_buf_size; av_freep(&p_schro_params->enc_buf); p_schro_params->enc_buf_size = 0; } if (p_frame_output->key_frame) pkt->flags |= AV_PKT_FLAG_KEY; *got_packet = 1; error: /* free frame */ libschroedinger_free_frame(p_frame_output); return ret; }"} {"target": 0, "idx": 21665, "func": "static int fill_default_ref_list(H264Context *h){ MpegEncContext * const s = &h->s; int i; int smallest_poc_greater_than_current = -1; int structure_sel; Picture sorted_short_ref[32]; Picture field_entry_list[2][32]; Picture *frame_list[2]; if (FIELD_PICTURE) { structure_sel = PICT_FRAME; frame_list[0] = field_entry_list[0]; frame_list[1] = field_entry_list[1]; } else { structure_sel = 0; frame_list[0] = h->default_ref_list[0]; frame_list[1] = h->default_ref_list[1]; } if(h->slice_type_nos==FF_B_TYPE){ int list; int len[2]; int short_len[2]; int out_i; int limit= INT_MIN; /* sort frame according to POC in B slice */ for(out_i=0; out_ishort_ref_count; out_i++){ int best_i=INT_MIN; int best_poc=INT_MAX; for(i=0; ishort_ref_count; i++){ const int poc= h->short_ref[i]->poc; if(poc > limit && poc < best_poc){ best_poc= poc; best_i= i; } } assert(best_i != INT_MIN); limit= best_poc; sorted_short_ref[out_i]= *h->short_ref[best_i]; tprintf(h->s.avctx, \"sorted poc: %d->%d poc:%d fn:%d\\n\", best_i, out_i, sorted_short_ref[out_i].poc, sorted_short_ref[out_i].frame_num); if (-1 == smallest_poc_greater_than_current) { if (h->short_ref[best_i]->poc >= s->current_picture_ptr->poc) { smallest_poc_greater_than_current = out_i; } } } tprintf(h->s.avctx, \"current poc: %d, smallest_poc_greater_than_current: %d\\n\", s->current_picture_ptr->poc, smallest_poc_greater_than_current); // find the largest POC for(list=0; list<2; list++){ int index = 0; int j= -99; int step= list ? -1 : 1; for(i=0; ishort_ref_count && index < h->ref_count[list]; i++, j+=step) { int sel; while(j<0 || j>= h->short_ref_count){ if(j != -99 && step == (list ? -1 : 1)) return -1; step = -step; j= smallest_poc_greater_than_current + (step>>1); } sel = sorted_short_ref[j].reference | structure_sel; if(sel != PICT_FRAME) continue; frame_list[list][index ]= sorted_short_ref[j]; frame_list[list][index++].pic_id= sorted_short_ref[j].frame_num; } short_len[list] = index; for(i = 0; i < 16 && index < h->ref_count[ list ]; i++){ int sel; if(h->long_ref[i] == NULL) continue; sel = h->long_ref[i]->reference | structure_sel; if(sel != PICT_FRAME) continue; frame_list[ list ][index ]= *h->long_ref[i]; frame_list[ list ][index++].pic_id= i; } len[list] = index; } for(list=0; list<2; list++){ if (FIELD_PICTURE) len[list] = split_field_ref_list(h->default_ref_list[list], h->ref_count[list], frame_list[list], len[list], s->picture_structure, short_len[list]); // swap the two first elements of L1 when L0 and L1 are identical if(list && len[0] > 1 && len[0] == len[1]) for(i=0; h->default_ref_list[0][i].data[0] == h->default_ref_list[1][i].data[0]; i++) if(i == len[0]){ FFSWAP(Picture, h->default_ref_list[1][0], h->default_ref_list[1][1]); break; } if(len[list] < h->ref_count[ list ]) memset(&h->default_ref_list[list][len[list]], 0, sizeof(Picture)*(h->ref_count[ list ] - len[list])); } }else{ int index=0; int short_len; for(i=0; ishort_ref_count; i++){ int sel; sel = h->short_ref[i]->reference | structure_sel; if(sel != PICT_FRAME) continue; frame_list[0][index ]= *h->short_ref[i]; frame_list[0][index++].pic_id= h->short_ref[i]->frame_num; } short_len = index; for(i = 0; i < 16; i++){ int sel; if(h->long_ref[i] == NULL) continue; sel = h->long_ref[i]->reference | structure_sel; if(sel != PICT_FRAME) continue; frame_list[0][index ]= *h->long_ref[i]; frame_list[0][index++].pic_id= i; } if (FIELD_PICTURE) index = split_field_ref_list(h->default_ref_list[0], h->ref_count[0], frame_list[0], index, s->picture_structure, short_len); if(index < h->ref_count[0]) memset(&h->default_ref_list[0][index], 0, sizeof(Picture)*(h->ref_count[0] - index)); } #ifdef TRACE for (i=0; iref_count[0]; i++) { tprintf(h->s.avctx, \"List0: %s fn:%d 0x%p\\n\", (h->default_ref_list[0][i].long_ref ? \"LT\" : \"ST\"), h->default_ref_list[0][i].pic_id, h->default_ref_list[0][i].data[0]); } if(h->slice_type_nos==FF_B_TYPE){ for (i=0; iref_count[1]; i++) { tprintf(h->s.avctx, \"List1: %s fn:%d 0x%p\\n\", (h->default_ref_list[1][i].long_ref ? \"LT\" : \"ST\"), h->default_ref_list[1][i].pic_id, h->default_ref_list[1][i].data[0]); } } #endif return 0; }"} {"target": 1, "idx": 21670, "func": "void qemu_savevm_state_complete(QEMUFile *f) { QJSON *vmdesc; int vmdesc_len; SaveStateEntry *se; int ret; trace_savevm_state_complete(); cpu_synchronize_all_states(); QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { if (!se->ops || !se->ops->save_live_complete) { continue; } if (se->ops && se->ops->is_active) { if (!se->ops->is_active(se->opaque)) { continue; } } trace_savevm_section_start(se->idstr, se->section_id); save_section_header(f, se, QEMU_VM_SECTION_END); ret = se->ops->save_live_complete(f, se->opaque); trace_savevm_section_end(se->idstr, se->section_id, ret); if (ret < 0) { qemu_file_set_error(f, ret); return; } } vmdesc = qjson_new(); json_prop_int(vmdesc, \"page_size\", TARGET_PAGE_SIZE); json_start_array(vmdesc, \"devices\"); QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { if ((!se->ops || !se->ops->save_state) && !se->vmsd) { continue; } trace_savevm_section_start(se->idstr, se->section_id); json_start_object(vmdesc, NULL); json_prop_str(vmdesc, \"name\", se->idstr); json_prop_int(vmdesc, \"instance_id\", se->instance_id); save_section_header(f, se, QEMU_VM_SECTION_FULL); vmstate_save(f, se, vmdesc); json_end_object(vmdesc); trace_savevm_section_end(se->idstr, se->section_id, 0); } qemu_put_byte(f, QEMU_VM_EOF); json_end_array(vmdesc); qjson_finish(vmdesc); vmdesc_len = strlen(qjson_get_str(vmdesc)); if (should_send_vmdesc()) { qemu_put_byte(f, QEMU_VM_VMDESCRIPTION); qemu_put_be32(f, vmdesc_len); qemu_put_buffer(f, (uint8_t *)qjson_get_str(vmdesc), vmdesc_len); } object_unref(OBJECT(vmdesc)); qemu_fflush(f); }"} {"target": 1, "idx": 21694, "func": "static void mvp_init (CPUMIPSState *env, const mips_def_t *def) { env->mvp = g_malloc0(sizeof(CPUMIPSMVPContext)); /* MVPConf1 implemented, TLB sharable, no gating storage support, programmable cache partitioning implemented, number of allocatable and sharable TLB entries, MVP has allocatable TCs, 2 VPEs implemented, 5 TCs implemented. */ env->mvp->CP0_MVPConf0 = (1 << CP0MVPC0_M) | (1 << CP0MVPC0_TLBS) | (0 << CP0MVPC0_GS) | (1 << CP0MVPC0_PCP) | // TODO: actually do 2 VPEs. // (1 << CP0MVPC0_TCA) | (0x1 << CP0MVPC0_PVPE) | // (0x04 << CP0MVPC0_PTC); (1 << CP0MVPC0_TCA) | (0x0 << CP0MVPC0_PVPE) | (0x00 << CP0MVPC0_PTC); #if !defined(CONFIG_USER_ONLY) /* Usermode has no TLB support */ env->mvp->CP0_MVPConf0 |= (env->tlb->nb_tlb << CP0MVPC0_PTLBE); #endif /* Allocatable CP1 have media extensions, allocatable CP1 have FP support, no UDI implemented, no CP2 implemented, 1 CP1 implemented. */ env->mvp->CP0_MVPConf1 = (1 << CP0MVPC1_CIM) | (1 << CP0MVPC1_CIF) | (0x0 << CP0MVPC1_PCX) | (0x0 << CP0MVPC1_PCP2) | (0x1 << CP0MVPC1_PCP1); }"} {"target": 1, "idx": 21708, "func": "static int decode_band(IVI45DecContext *ctx, int plane_num, IVIBandDesc *band, AVCodecContext *avctx) { int result, i, t, idx1, idx2, pos; IVITile *tile; band->buf = band->bufs[ctx->dst_buf]; if (!band->buf) { av_log(avctx, AV_LOG_ERROR, \"Band buffer points to no data!\\n\"); return AVERROR_INVALIDDATA; } band->ref_buf = band->bufs[ctx->ref_buf]; band->data_ptr = ctx->frame_data + (get_bits_count(&ctx->gb) >> 3); result = ctx->decode_band_hdr(ctx, band, avctx); if (result) { av_log(avctx, AV_LOG_ERROR, \"Error while decoding band header: %d\\n\", result); return result; } if (band->is_empty) { av_log(avctx, AV_LOG_ERROR, \"Empty band encountered!\\n\"); return AVERROR_INVALIDDATA; } band->rv_map = &ctx->rvmap_tabs[band->rvmap_sel]; /* apply corrections to the selected rvmap table if present */ for (i = 0; i < band->num_corr; i++) { idx1 = band->corr[i * 2]; idx2 = band->corr[i * 2 + 1]; FFSWAP(uint8_t, band->rv_map->runtab[idx1], band->rv_map->runtab[idx2]); FFSWAP(int16_t, band->rv_map->valtab[idx1], band->rv_map->valtab[idx2]); } pos = get_bits_count(&ctx->gb); for (t = 0; t < band->num_tiles; t++) { tile = &band->tiles[t]; if (tile->mb_size != band->mb_size) { av_log(avctx, AV_LOG_ERROR, \"MB sizes mismatch: %d vs. %d\\n\", band->mb_size, tile->mb_size); return AVERROR_INVALIDDATA; } tile->is_empty = get_bits1(&ctx->gb); if (tile->is_empty) { ivi_process_empty_tile(avctx, band, tile, (ctx->planes[0].bands[0].mb_size >> 3) - (band->mb_size >> 3)); av_dlog(avctx, \"Empty tile encountered!\\n\"); } else { tile->data_size = ff_ivi_dec_tile_data_size(&ctx->gb); if (!tile->data_size) { av_log(avctx, AV_LOG_ERROR, \"Tile data size is zero!\\n\"); return AVERROR_INVALIDDATA; } result = ctx->decode_mb_info(ctx, band, tile, avctx); if (result < 0) break; result = ff_ivi_decode_blocks(&ctx->gb, band, tile); if (result < 0 || ((get_bits_count(&ctx->gb) - pos) >> 3) != tile->data_size) { av_log(avctx, AV_LOG_ERROR, \"Corrupted tile data encountered!\\n\"); break; } pos += tile->data_size << 3; // skip to next tile } } /* restore the selected rvmap table by applying its corrections in reverse order */ for (i = band->num_corr-1; i >= 0; i--) { idx1 = band->corr[i*2]; idx2 = band->corr[i*2+1]; FFSWAP(uint8_t, band->rv_map->runtab[idx1], band->rv_map->runtab[idx2]); FFSWAP(int16_t, band->rv_map->valtab[idx1], band->rv_map->valtab[idx2]); } #ifdef DEBUG if (band->checksum_present) { uint16_t chksum = ivi_calc_band_checksum(band); if (chksum != band->checksum) { av_log(avctx, AV_LOG_ERROR, \"Band checksum mismatch! Plane %d, band %d, received: %x, calculated: %x\\n\", band->plane, band->band_num, band->checksum, chksum); } } #endif align_get_bits(&ctx->gb); return result; }"} {"target": 0, "idx": 21723, "func": "static int s390_virtio_device_init(VirtIOS390Device *dev, VirtIODevice *vdev) { VirtIOS390Bus *bus; int dev_len; bus = DO_UPCAST(VirtIOS390Bus, bus, dev->qdev.parent_bus); dev->vdev = vdev; dev->dev_offs = bus->dev_offs; dev->feat_len = sizeof(uint32_t); /* always keep 32 bits features */ dev_len = VIRTIO_DEV_OFFS_CONFIG; dev_len += s390_virtio_device_num_vq(dev) * VIRTIO_VQCONFIG_LEN; dev_len += dev->feat_len * 2; dev_len += virtio_bus_get_vdev_config_len(&dev->bus); bus->dev_offs += dev_len; dev->host_features = virtio_bus_get_vdev_features(&dev->bus, dev->host_features); s390_virtio_device_sync(dev); s390_virtio_reset_idx(dev); if (dev->qdev.hotplugged) { S390CPU *cpu = s390_cpu_addr2state(0); s390_virtio_irq(cpu, VIRTIO_PARAM_DEV_ADD, dev->dev_offs); } return 0; }"} {"target": 0, "idx": 21731, "func": "RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size, void (*resized)(const char*, uint64_t length, void *host), void *host, bool resizeable, MemoryRegion *mr, Error **errp) { RAMBlock *new_block; Error *local_err = NULL; size = HOST_PAGE_ALIGN(size); max_size = HOST_PAGE_ALIGN(max_size); new_block = g_malloc0(sizeof(*new_block)); new_block->mr = mr; new_block->resized = resized; new_block->used_length = size; new_block->max_length = max_size; assert(max_size >= size); new_block->fd = -1; new_block->host = host; if (host) { new_block->flags |= RAM_PREALLOC; } if (resizeable) { new_block->flags |= RAM_RESIZEABLE; } ram_block_add(new_block, &local_err); if (local_err) { g_free(new_block); error_propagate(errp, local_err); return NULL; } mr->ram_block = new_block; return new_block; }"} {"target": 0, "idx": 21739, "func": "static inline void tcg_out_movi(TCGContext *s, TCGType type, int ret, tcg_target_long arg) { #if defined(__sparc_v9__) && !defined(__sparc_v8plus__) if (arg != (arg & 0xffffffff)) fprintf(stderr, \"unimplemented %s with constant %ld\\n\", __func__, arg); #endif if (arg == (arg & 0xfff)) tcg_out32(s, ARITH_OR | INSN_RD(ret) | INSN_RS1(TCG_REG_G0) | INSN_IMM13(arg)); else { tcg_out32(s, SETHI | INSN_RD(ret) | ((arg & 0xfffffc00) >> 10)); if (arg & 0x3ff) tcg_out32(s, ARITH_OR | INSN_RD(ret) | INSN_RS1(ret) | INSN_IMM13(arg & 0x3ff)); } }"} {"target": 0, "idx": 21740, "func": "qcrypto_block_luks_create(QCryptoBlock *block, QCryptoBlockCreateOptions *options, QCryptoBlockInitFunc initfunc, QCryptoBlockWriteFunc writefunc, void *opaque, Error **errp) { QCryptoBlockLUKS *luks; QCryptoBlockCreateOptionsLUKS luks_opts; Error *local_err = NULL; uint8_t *masterkey = NULL; uint8_t *slotkey = NULL; uint8_t *splitkey = NULL; size_t splitkeylen = 0; size_t i; QCryptoCipher *cipher = NULL; QCryptoIVGen *ivgen = NULL; char *password; const char *cipher_alg; const char *cipher_mode; const char *ivgen_alg; const char *ivgen_hash_alg = NULL; const char *hash_alg; char *cipher_mode_spec = NULL; QCryptoCipherAlgorithm ivcipheralg = 0; uint64_t iters; memcpy(&luks_opts, &options->u.luks, sizeof(luks_opts)); if (!luks_opts.has_iter_time) { luks_opts.iter_time = 2000; } if (!luks_opts.has_cipher_alg) { luks_opts.cipher_alg = QCRYPTO_CIPHER_ALG_AES_256; } if (!luks_opts.has_cipher_mode) { luks_opts.cipher_mode = QCRYPTO_CIPHER_MODE_XTS; } if (!luks_opts.has_ivgen_alg) { luks_opts.ivgen_alg = QCRYPTO_IVGEN_ALG_PLAIN64; } if (!luks_opts.has_hash_alg) { luks_opts.hash_alg = QCRYPTO_HASH_ALG_SHA256; } if (luks_opts.ivgen_alg == QCRYPTO_IVGEN_ALG_ESSIV) { if (!luks_opts.has_ivgen_hash_alg) { luks_opts.ivgen_hash_alg = QCRYPTO_HASH_ALG_SHA256; luks_opts.has_ivgen_hash_alg = true; } } /* Note we're allowing ivgen_hash_alg to be set even for * non-essiv iv generators that don't need a hash. It will * be silently ignored, for compatibility with dm-crypt */ if (!options->u.luks.key_secret) { error_setg(errp, \"Parameter 'key-secret' is required for cipher\"); return -1; } password = qcrypto_secret_lookup_as_utf8(luks_opts.key_secret, errp); if (!password) { return -1; } luks = g_new0(QCryptoBlockLUKS, 1); block->opaque = luks; memcpy(luks->header.magic, qcrypto_block_luks_magic, QCRYPTO_BLOCK_LUKS_MAGIC_LEN); /* We populate the header in native endianness initially and * then convert everything to big endian just before writing * it out to disk */ luks->header.version = QCRYPTO_BLOCK_LUKS_VERSION; qcrypto_block_luks_uuid_gen(luks->header.uuid); cipher_alg = qcrypto_block_luks_cipher_alg_lookup(luks_opts.cipher_alg, errp); if (!cipher_alg) { goto error; } cipher_mode = QCryptoCipherMode_lookup[luks_opts.cipher_mode]; ivgen_alg = QCryptoIVGenAlgorithm_lookup[luks_opts.ivgen_alg]; if (luks_opts.has_ivgen_hash_alg) { ivgen_hash_alg = QCryptoHashAlgorithm_lookup[luks_opts.ivgen_hash_alg]; cipher_mode_spec = g_strdup_printf(\"%s-%s:%s\", cipher_mode, ivgen_alg, ivgen_hash_alg); } else { cipher_mode_spec = g_strdup_printf(\"%s-%s\", cipher_mode, ivgen_alg); } hash_alg = QCryptoHashAlgorithm_lookup[luks_opts.hash_alg]; if (strlen(cipher_alg) >= QCRYPTO_BLOCK_LUKS_CIPHER_NAME_LEN) { error_setg(errp, \"Cipher name '%s' is too long for LUKS header\", cipher_alg); goto error; } if (strlen(cipher_mode_spec) >= QCRYPTO_BLOCK_LUKS_CIPHER_MODE_LEN) { error_setg(errp, \"Cipher mode '%s' is too long for LUKS header\", cipher_mode_spec); goto error; } if (strlen(hash_alg) >= QCRYPTO_BLOCK_LUKS_HASH_SPEC_LEN) { error_setg(errp, \"Hash name '%s' is too long for LUKS header\", hash_alg); goto error; } if (luks_opts.ivgen_alg == QCRYPTO_IVGEN_ALG_ESSIV) { ivcipheralg = qcrypto_block_luks_essiv_cipher(luks_opts.cipher_alg, luks_opts.ivgen_hash_alg, &local_err); if (local_err) { error_propagate(errp, local_err); goto error; } } else { ivcipheralg = luks_opts.cipher_alg; } strcpy(luks->header.cipher_name, cipher_alg); strcpy(luks->header.cipher_mode, cipher_mode_spec); strcpy(luks->header.hash_spec, hash_alg); luks->header.key_bytes = qcrypto_cipher_get_key_len(luks_opts.cipher_alg); if (luks_opts.cipher_mode == QCRYPTO_CIPHER_MODE_XTS) { luks->header.key_bytes *= 2; } /* Generate the salt used for hashing the master key * with PBKDF later */ if (qcrypto_random_bytes(luks->header.master_key_salt, QCRYPTO_BLOCK_LUKS_SALT_LEN, errp) < 0) { goto error; } /* Generate random master key */ masterkey = g_new0(uint8_t, luks->header.key_bytes); if (qcrypto_random_bytes(masterkey, luks->header.key_bytes, errp) < 0) { goto error; } /* Setup the block device payload encryption objects */ block->cipher = qcrypto_cipher_new(luks_opts.cipher_alg, luks_opts.cipher_mode, masterkey, luks->header.key_bytes, errp); if (!block->cipher) { goto error; } block->kdfhash = luks_opts.hash_alg; block->niv = qcrypto_cipher_get_iv_len(luks_opts.cipher_alg, luks_opts.cipher_mode); block->ivgen = qcrypto_ivgen_new(luks_opts.ivgen_alg, ivcipheralg, luks_opts.ivgen_hash_alg, masterkey, luks->header.key_bytes, errp); if (!block->ivgen) { goto error; } /* Determine how many iterations we need to hash the master * key, in order to have 1 second of compute time used */ iters = qcrypto_pbkdf2_count_iters(luks_opts.hash_alg, masterkey, luks->header.key_bytes, luks->header.master_key_salt, QCRYPTO_BLOCK_LUKS_SALT_LEN, QCRYPTO_BLOCK_LUKS_DIGEST_LEN, &local_err); if (local_err) { error_propagate(errp, local_err); goto error; } if (iters > (ULLONG_MAX / luks_opts.iter_time)) { error_setg_errno(errp, ERANGE, \"PBKDF iterations %llu too large to scale\", (unsigned long long)iters); goto error; } /* iter_time was in millis, but count_iters reported for secs */ iters = iters * luks_opts.iter_time / 1000; /* Why /= 8 ? That matches cryptsetup, but there's no * explanation why they chose /= 8... Probably so that * if all 8 keyslots are active we only spend 1 second * in total time to check all keys */ iters /= 8; if (iters > UINT32_MAX) { error_setg_errno(errp, ERANGE, \"PBKDF iterations %llu larger than %u\", (unsigned long long)iters, UINT32_MAX); goto error; } iters = MAX(iters, QCRYPTO_BLOCK_LUKS_MIN_MASTER_KEY_ITERS); luks->header.master_key_iterations = iters; /* Hash the master key, saving the result in the LUKS * header. This hash is used when opening the encrypted * device to verify that the user password unlocked a * valid master key */ if (qcrypto_pbkdf2(luks_opts.hash_alg, masterkey, luks->header.key_bytes, luks->header.master_key_salt, QCRYPTO_BLOCK_LUKS_SALT_LEN, luks->header.master_key_iterations, luks->header.master_key_digest, QCRYPTO_BLOCK_LUKS_DIGEST_LEN, errp) < 0) { goto error; } /* Although LUKS has multiple key slots, we're just going * to use the first key slot */ splitkeylen = luks->header.key_bytes * QCRYPTO_BLOCK_LUKS_STRIPES; for (i = 0; i < QCRYPTO_BLOCK_LUKS_NUM_KEY_SLOTS; i++) { luks->header.key_slots[i].active = i == 0 ? QCRYPTO_BLOCK_LUKS_KEY_SLOT_ENABLED : QCRYPTO_BLOCK_LUKS_KEY_SLOT_DISABLED; luks->header.key_slots[i].stripes = QCRYPTO_BLOCK_LUKS_STRIPES; /* This calculation doesn't match that shown in the spec, * but instead follows the cryptsetup implementation. */ luks->header.key_slots[i].key_offset = (QCRYPTO_BLOCK_LUKS_KEY_SLOT_OFFSET / QCRYPTO_BLOCK_LUKS_SECTOR_SIZE) + (ROUND_UP(DIV_ROUND_UP(splitkeylen, QCRYPTO_BLOCK_LUKS_SECTOR_SIZE), (QCRYPTO_BLOCK_LUKS_KEY_SLOT_OFFSET / QCRYPTO_BLOCK_LUKS_SECTOR_SIZE)) * i); } if (qcrypto_random_bytes(luks->header.key_slots[0].salt, QCRYPTO_BLOCK_LUKS_SALT_LEN, errp) < 0) { goto error; } /* Again we determine how many iterations are required to * hash the user password while consuming 1 second of compute * time */ iters = qcrypto_pbkdf2_count_iters(luks_opts.hash_alg, (uint8_t *)password, strlen(password), luks->header.key_slots[0].salt, QCRYPTO_BLOCK_LUKS_SALT_LEN, luks->header.key_bytes, &local_err); if (local_err) { error_propagate(errp, local_err); goto error; } if (iters > (ULLONG_MAX / luks_opts.iter_time)) { error_setg_errno(errp, ERANGE, \"PBKDF iterations %llu too large to scale\", (unsigned long long)iters); goto error; } /* iter_time was in millis, but count_iters reported for secs */ iters = iters * luks_opts.iter_time / 1000; if (iters > UINT32_MAX) { error_setg_errno(errp, ERANGE, \"PBKDF iterations %llu larger than %u\", (unsigned long long)iters, UINT32_MAX); goto error; } luks->header.key_slots[0].iterations = MAX(iters, QCRYPTO_BLOCK_LUKS_MIN_SLOT_KEY_ITERS); /* Generate a key that we'll use to encrypt the master * key, from the user's password */ slotkey = g_new0(uint8_t, luks->header.key_bytes); if (qcrypto_pbkdf2(luks_opts.hash_alg, (uint8_t *)password, strlen(password), luks->header.key_slots[0].salt, QCRYPTO_BLOCK_LUKS_SALT_LEN, luks->header.key_slots[0].iterations, slotkey, luks->header.key_bytes, errp) < 0) { goto error; } /* Setup the encryption objects needed to encrypt the * master key material */ cipher = qcrypto_cipher_new(luks_opts.cipher_alg, luks_opts.cipher_mode, slotkey, luks->header.key_bytes, errp); if (!cipher) { goto error; } ivgen = qcrypto_ivgen_new(luks_opts.ivgen_alg, ivcipheralg, luks_opts.ivgen_hash_alg, slotkey, luks->header.key_bytes, errp); if (!ivgen) { goto error; } /* Before storing the master key, we need to vastly * increase its size, as protection against forensic * disk data recovery */ splitkey = g_new0(uint8_t, splitkeylen); if (qcrypto_afsplit_encode(luks_opts.hash_alg, luks->header.key_bytes, luks->header.key_slots[0].stripes, masterkey, splitkey, errp) < 0) { goto error; } /* Now we encrypt the split master key with the key generated * from the user's password, before storing it */ if (qcrypto_block_encrypt_helper(cipher, block->niv, ivgen, QCRYPTO_BLOCK_LUKS_SECTOR_SIZE, 0, splitkey, splitkeylen, errp) < 0) { goto error; } /* The total size of the LUKS headers is the partition header + key * slot headers, rounded up to the nearest sector, combined with * the size of each master key material region, also rounded up * to the nearest sector */ luks->header.payload_offset = (QCRYPTO_BLOCK_LUKS_KEY_SLOT_OFFSET / QCRYPTO_BLOCK_LUKS_SECTOR_SIZE) + (ROUND_UP(DIV_ROUND_UP(splitkeylen, QCRYPTO_BLOCK_LUKS_SECTOR_SIZE), (QCRYPTO_BLOCK_LUKS_KEY_SLOT_OFFSET / QCRYPTO_BLOCK_LUKS_SECTOR_SIZE)) * QCRYPTO_BLOCK_LUKS_NUM_KEY_SLOTS); block->payload_offset = luks->header.payload_offset * QCRYPTO_BLOCK_LUKS_SECTOR_SIZE; /* Reserve header space to match payload offset */ initfunc(block, block->payload_offset, &local_err, opaque); if (local_err) { error_propagate(errp, local_err); goto error; } /* Everything on disk uses Big Endian, so flip header fields * before writing them */ cpu_to_be16s(&luks->header.version); cpu_to_be32s(&luks->header.payload_offset); cpu_to_be32s(&luks->header.key_bytes); cpu_to_be32s(&luks->header.master_key_iterations); for (i = 0; i < QCRYPTO_BLOCK_LUKS_NUM_KEY_SLOTS; i++) { cpu_to_be32s(&luks->header.key_slots[i].active); cpu_to_be32s(&luks->header.key_slots[i].iterations); cpu_to_be32s(&luks->header.key_slots[i].key_offset); cpu_to_be32s(&luks->header.key_slots[i].stripes); } /* Write out the partition header and key slot headers */ writefunc(block, 0, (const uint8_t *)&luks->header, sizeof(luks->header), &local_err, opaque); /* Delay checking local_err until we've byte-swapped */ /* Byte swap the header back to native, in case we need * to read it again later */ be16_to_cpus(&luks->header.version); be32_to_cpus(&luks->header.payload_offset); be32_to_cpus(&luks->header.key_bytes); be32_to_cpus(&luks->header.master_key_iterations); for (i = 0; i < QCRYPTO_BLOCK_LUKS_NUM_KEY_SLOTS; i++) { be32_to_cpus(&luks->header.key_slots[i].active); be32_to_cpus(&luks->header.key_slots[i].iterations); be32_to_cpus(&luks->header.key_slots[i].key_offset); be32_to_cpus(&luks->header.key_slots[i].stripes); } if (local_err) { error_propagate(errp, local_err); goto error; } /* Write out the master key material, starting at the * sector immediately following the partition header. */ if (writefunc(block, luks->header.key_slots[0].key_offset * QCRYPTO_BLOCK_LUKS_SECTOR_SIZE, splitkey, splitkeylen, errp, opaque) != splitkeylen) { goto error; } luks->cipher_alg = luks_opts.cipher_alg; luks->cipher_mode = luks_opts.cipher_mode; luks->ivgen_alg = luks_opts.ivgen_alg; luks->ivgen_hash_alg = luks_opts.ivgen_hash_alg; luks->hash_alg = luks_opts.hash_alg; memset(masterkey, 0, luks->header.key_bytes); g_free(masterkey); memset(slotkey, 0, luks->header.key_bytes); g_free(slotkey); g_free(splitkey); g_free(password); g_free(cipher_mode_spec); qcrypto_ivgen_free(ivgen); qcrypto_cipher_free(cipher); return 0; error: if (masterkey) { memset(masterkey, 0, luks->header.key_bytes); } g_free(masterkey); if (slotkey) { memset(slotkey, 0, luks->header.key_bytes); } g_free(slotkey); g_free(splitkey); g_free(password); g_free(cipher_mode_spec); qcrypto_ivgen_free(ivgen); qcrypto_cipher_free(cipher); g_free(luks); return -1; }"} {"target": 1, "idx": 21750, "func": "static int swf_write_trailer(AVFormatContext *s) { SWFContext *swf = s->priv_data; ByteIOContext *pb = &s->pb; AVCodecContext *enc, *video_enc; int file_size, i; video_enc = NULL; for(i=0;inb_streams;i++) { enc = &s->streams[i]->codec; if (enc->codec_type == CODEC_TYPE_VIDEO) video_enc = enc; } put_swf_tag(s, TAG_END); put_swf_end_tag(s); put_flush_packet(&s->pb); /* patch file size and number of frames if not streamed */ if (!url_is_streamed(&s->pb) && video_enc) { file_size = url_ftell(pb); url_fseek(pb, 4, SEEK_SET); put_le32(pb, file_size); url_fseek(pb, swf->duration_pos, SEEK_SET); put_le16(pb, video_enc->frame_number); } av_free(swf); return 0; }"} {"target": 1, "idx": 21762, "func": "static int xpm_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { XPMDecContext *x = avctx->priv_data; AVFrame *p=data; const uint8_t *end, *ptr = avpkt->data; int ncolors, cpp, ret, i, j; int64_t size; uint32_t *dst; avctx->pix_fmt = AV_PIX_FMT_BGRA; end = avpkt->data + avpkt->size; while (memcmp(ptr, \"/* XPM */\\n\", 10) && ptr < end - 10) ptr++; if (ptr >= end) { av_log(avctx, AV_LOG_ERROR, \"missing signature\\n\"); return AVERROR_INVALIDDATA; } ptr += mod_strcspn(ptr, \"\\\"\"); if (sscanf(ptr, \"\\\"%u %u %u %u\\\",\", &avctx->width, &avctx->height, &ncolors, &cpp) != 4) { av_log(avctx, AV_LOG_ERROR, \"missing image parameters\\n\"); return AVERROR_INVALIDDATA; } if ((ret = ff_set_dimensions(avctx, avctx->width, avctx->height)) < 0) return ret; if ((ret = ff_get_buffer(avctx, p, 0)) < 0) return ret; if (cpp <= 0 || cpp >= 5) { av_log(avctx, AV_LOG_ERROR, \"unsupported/invalid number of chars per pixel: %d\\n\", cpp); return AVERROR_INVALIDDATA; } size = 1; for (i = 0; i < cpp; i++) size *= 94; if (ncolors <= 0 || ncolors > size) { av_log(avctx, AV_LOG_ERROR, \"invalid number of colors: %d\\n\", ncolors); return AVERROR_INVALIDDATA; } size *= 4; av_fast_padded_malloc(&x->pixels, &x->pixels_size, size); if (!x->pixels) return AVERROR(ENOMEM); ptr += mod_strcspn(ptr, \",\") + 1; for (i = 0; i < ncolors; i++) { const uint8_t *index; int len; ptr += mod_strcspn(ptr, \"\\\"\") + 1; if (ptr + cpp > end) return AVERROR_INVALIDDATA; index = ptr; ptr += cpp; ptr = strstr(ptr, \"c \"); if (ptr) { ptr += 2; } else { return AVERROR_INVALIDDATA; } len = strcspn(ptr, \"\\\" \"); if ((ret = ascii2index(index, cpp)) < 0) return ret; x->pixels[ret] = color_string_to_rgba(ptr, len); ptr += mod_strcspn(ptr, \",\") + 1; } for (i = 0; i < avctx->height; i++) { dst = (uint32_t *)(p->data[0] + i * p->linesize[0]); ptr += mod_strcspn(ptr, \"\\\"\") + 1; for (j = 0; j < avctx->width; j++) { if (ptr + cpp > end) return AVERROR_INVALIDDATA; if ((ret = ascii2index(ptr, cpp)) < 0) return ret; *dst++ = x->pixels[ret]; ptr += cpp; } ptr += mod_strcspn(ptr, \",\") + 1; } p->key_frame = 1; p->pict_type = AV_PICTURE_TYPE_I; *got_frame = 1; return avpkt->size; }"} {"target": 0, "idx": 21766, "func": "static void ripemd160_transform(uint32_t *state, const uint8_t buffer[64], int ext) { uint32_t a, b, c, d, e, f, g, h, i, j; uint32_t block[16]; int n; if (ext) { a = state[0]; b = state[1]; c = state[2]; d = state[3]; e = state[4]; f = state[5]; g = state[6]; h = state[7]; i = state[8]; j = state[9]; } else { a = f = state[0]; b = g = state[1]; c = h = state[2]; d = i = state[3]; e = j = state[4]; } for (n = 0; n < 16; n++) block[n] = AV_RL32(buffer + 4 * n); for (n = 0; n < 16 - 1;) { ROUND160_0_TO_15(a,b,c,d,e,f,g,h,i,j); ROUND160_0_TO_15(e,a,b,c,d,j,f,g,h,i); ROUND160_0_TO_15(d,e,a,b,c,i,j,f,g,h); ROUND160_0_TO_15(c,d,e,a,b,h,i,j,f,g); ROUND160_0_TO_15(b,c,d,e,a,g,h,i,j,f); } ROUND160_0_TO_15(a,b,c,d,e,f,g,h,i,j); SWAP(a,f) for (; n < 32 - 1;) { ROUND160_16_TO_31(e,a,b,c,d,j,f,g,h,i); ROUND160_16_TO_31(d,e,a,b,c,i,j,f,g,h); ROUND160_16_TO_31(c,d,e,a,b,h,i,j,f,g); ROUND160_16_TO_31(b,c,d,e,a,g,h,i,j,f); ROUND160_16_TO_31(a,b,c,d,e,f,g,h,i,j); } ROUND160_16_TO_31(e,a,b,c,d,j,f,g,h,i); SWAP(b,g) for (; n < 48 - 1;) { ROUND160_32_TO_47(d,e,a,b,c,i,j,f,g,h); ROUND160_32_TO_47(c,d,e,a,b,h,i,j,f,g); ROUND160_32_TO_47(b,c,d,e,a,g,h,i,j,f); ROUND160_32_TO_47(a,b,c,d,e,f,g,h,i,j); ROUND160_32_TO_47(e,a,b,c,d,j,f,g,h,i); } ROUND160_32_TO_47(d,e,a,b,c,i,j,f,g,h); SWAP(c,h) for (; n < 64 - 1;) { ROUND160_48_TO_63(c,d,e,a,b,h,i,j,f,g); ROUND160_48_TO_63(b,c,d,e,a,g,h,i,j,f); ROUND160_48_TO_63(a,b,c,d,e,f,g,h,i,j); ROUND160_48_TO_63(e,a,b,c,d,j,f,g,h,i); ROUND160_48_TO_63(d,e,a,b,c,i,j,f,g,h); } ROUND160_48_TO_63(c,d,e,a,b,h,i,j,f,g); SWAP(d,i) for (; n < 75;) { ROUND160_64_TO_79(b,c,d,e,a,g,h,i,j,f); ROUND160_64_TO_79(a,b,c,d,e,f,g,h,i,j); ROUND160_64_TO_79(e,a,b,c,d,j,f,g,h,i); ROUND160_64_TO_79(d,e,a,b,c,i,j,f,g,h); ROUND160_64_TO_79(c,d,e,a,b,h,i,j,f,g); } ROUND160_64_TO_79(b,c,d,e,a,g,h,i,j,f); SWAP(e,j) if (ext) { state[0] += a; state[1] += b; state[2] += c; state[3] += d; state[4] += e; state[5] += f; state[6] += g; state[7] += h; state[8] += i; state[9] += j; } else { i += c + state[1]; state[1] = state[2] + d + j; state[2] = state[3] + e + f; state[3] = state[4] + a + g; state[4] = state[0] + b + h; state[0] = i; } }"} {"target": 0, "idx": 21767, "func": "static void opt_frame_size(const char *arg) { if (av_parse_video_frame_size(&frame_width, &frame_height, arg) < 0) { fprintf(stderr, \"Incorrect frame size\\n\"); av_exit(1); } if ((frame_width % 2) != 0 || (frame_height % 2) != 0) { fprintf(stderr, \"Frame size must be a multiple of 2\\n\"); av_exit(1); } }"} {"target": 1, "idx": 21776, "func": "static int sdl_write_trailer(AVFormatContext *s) { SDLContext *sdl = s->priv_data; sdl->quit = 1; if (sdl->overlay) SDL_FreeYUVOverlay(sdl->overlay); if (sdl->event_thread) SDL_WaitThread(sdl->event_thread, NULL); if (sdl->mutex) SDL_DestroyMutex(sdl->mutex); if (sdl->init_cond) SDL_DestroyCond(sdl->init_cond); if (!sdl->sdl_was_already_inited) SDL_Quit(); return 0; }"} {"target": 0, "idx": 21779, "func": "void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64]) { int mb_x, mb_y; int dct_linesize, dct_offset; op_pixels_func *op_pix; qpel_mc_func *op_qpix; mb_x = s->mb_x; mb_y = s->mb_y; #ifdef FF_POSTPROCESS quant_store[mb_y][mb_x]=s->qscale; //printf(\"[%02d][%02d] %d\\n\",mb_x,mb_y,s->qscale); #endif /* update DC predictors for P macroblocks */ if (!s->mb_intra) { if (s->h263_pred || s->h263_aic) { if(s->mbintra_table[mb_x + mb_y*s->mb_width]) { int wrap, xy, v; s->mbintra_table[mb_x + mb_y*s->mb_width]=0; wrap = 2 * s->mb_width + 2; xy = 2 * mb_x + 1 + (2 * mb_y + 1) * wrap; v = 1024; s->dc_val[0][xy] = v; s->dc_val[0][xy + 1] = v; s->dc_val[0][xy + wrap] = v; s->dc_val[0][xy + 1 + wrap] = v; /* ac pred */ memset(s->ac_val[0][xy], 0, 16 * sizeof(INT16)); memset(s->ac_val[0][xy + 1], 0, 16 * sizeof(INT16)); memset(s->ac_val[0][xy + wrap], 0, 16 * sizeof(INT16)); memset(s->ac_val[0][xy + 1 + wrap], 0, 16 * sizeof(INT16)); if (s->h263_msmpeg4) { s->coded_block[xy] = 0; s->coded_block[xy + 1] = 0; s->coded_block[xy + wrap] = 0; s->coded_block[xy + 1 + wrap] = 0; } /* chroma */ wrap = s->mb_width + 2; xy = mb_x + 1 + (mb_y + 1) * wrap; s->dc_val[1][xy] = v; s->dc_val[2][xy] = v; /* ac pred */ memset(s->ac_val[1][xy], 0, 16 * sizeof(INT16)); memset(s->ac_val[2][xy], 0, 16 * sizeof(INT16)); } } else { s->last_dc[0] = 128 << s->intra_dc_precision; s->last_dc[1] = 128 << s->intra_dc_precision; s->last_dc[2] = 128 << s->intra_dc_precision; } } else if (s->h263_pred || s->h263_aic) s->mbintra_table[mb_x + mb_y*s->mb_width]=1; /* update motion predictor, not for B-frames as they need the motion_val from the last P/S-Frame */ if (s->out_format == FMT_H263) { //FIXME move into h263.c if possible, format specific stuff shouldnt be here if(s->pict_type!=B_TYPE){ int xy, wrap, motion_x, motion_y; wrap = 2 * s->mb_width + 2; xy = 2 * mb_x + 1 + (2 * mb_y + 1) * wrap; if (s->mb_intra) { motion_x = 0; motion_y = 0; goto motion_init; } else if (s->mv_type == MV_TYPE_16X16) { motion_x = s->mv[0][0][0]; motion_y = s->mv[0][0][1]; motion_init: /* no update if 8X8 because it has been done during parsing */ s->motion_val[xy][0] = motion_x; s->motion_val[xy][1] = motion_y; s->motion_val[xy + 1][0] = motion_x; s->motion_val[xy + 1][1] = motion_y; s->motion_val[xy + wrap][0] = motion_x; s->motion_val[xy + wrap][1] = motion_y; s->motion_val[xy + 1 + wrap][0] = motion_x; s->motion_val[xy + 1 + wrap][1] = motion_y; } } } if (!(s->encoding && (s->intra_only || s->pict_type==B_TYPE))) { UINT8 *dest_y, *dest_cb, *dest_cr; UINT8 *mbskip_ptr; /* avoid copy if macroblock skipped in last frame too dont touch it for B-frames as they need the skip info from the next p-frame */ if (s->pict_type != B_TYPE) { mbskip_ptr = &s->mbskip_table[s->mb_y * s->mb_width + s->mb_x]; if (s->mb_skiped) { s->mb_skiped = 0; /* if previous was skipped too, then nothing to do ! skip only during decoding as we might trash the buffers during encoding a bit */ if (*mbskip_ptr != 0 && !s->encoding) goto the_end; *mbskip_ptr = 1; /* indicate that this time we skiped it */ } else { *mbskip_ptr = 0; /* not skipped */ } } dest_y = s->current_picture[0] + (mb_y * 16 * s->linesize) + mb_x * 16; dest_cb = s->current_picture[1] + (mb_y * 8 * (s->linesize >> 1)) + mb_x * 8; dest_cr = s->current_picture[2] + (mb_y * 8 * (s->linesize >> 1)) + mb_x * 8; if (s->interlaced_dct) { dct_linesize = s->linesize * 2; dct_offset = s->linesize; } else { dct_linesize = s->linesize; dct_offset = s->linesize * 8; } if (!s->mb_intra) { /* motion handling */ if((s->flags&CODEC_FLAG_HQ) || (!s->encoding)){ if ((!s->no_rounding) || s->pict_type==B_TYPE){ op_pix = put_pixels_tab; op_qpix= qpel_mc_rnd_tab; }else{ op_pix = put_no_rnd_pixels_tab; op_qpix= qpel_mc_no_rnd_tab; } if (s->mv_dir & MV_DIR_FORWARD) { MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture, op_pix, op_qpix); if ((!s->no_rounding) || s->pict_type==B_TYPE) op_pix = avg_pixels_tab; else op_pix = avg_no_rnd_pixels_tab; } if (s->mv_dir & MV_DIR_BACKWARD) { MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture, op_pix, op_qpix); } } /* add dct residue */ add_dct(s, block[0], 0, dest_y, dct_linesize); add_dct(s, block[1], 1, dest_y + 8, dct_linesize); add_dct(s, block[2], 2, dest_y + dct_offset, dct_linesize); add_dct(s, block[3], 3, dest_y + dct_offset + 8, dct_linesize); add_dct(s, block[4], 4, dest_cb, s->linesize >> 1); add_dct(s, block[5], 5, dest_cr, s->linesize >> 1); } else { /* dct only in intra block */ put_dct(s, block[0], 0, dest_y, dct_linesize); put_dct(s, block[1], 1, dest_y + 8, dct_linesize); put_dct(s, block[2], 2, dest_y + dct_offset, dct_linesize); put_dct(s, block[3], 3, dest_y + dct_offset + 8, dct_linesize); put_dct(s, block[4], 4, dest_cb, s->linesize >> 1); put_dct(s, block[5], 5, dest_cr, s->linesize >> 1); } } the_end: emms_c(); //FIXME remove }"} {"target": 1, "idx": 21794, "func": "static void kvm_hwpoison_page_add(ram_addr_t ram_addr) { HWPoisonPage *page; QLIST_FOREACH(page, &hwpoison_page_list, list) { if (page->ram_addr == ram_addr) { return; } } page = g_malloc(sizeof(HWPoisonPage)); page->ram_addr = ram_addr; QLIST_INSERT_HEAD(&hwpoison_page_list, page, list); }"} {"target": 1, "idx": 21796, "func": "static int decode_i_picture_primary_header(VC9Context *v) { GetBitContext *gb = &v->s.gb; int pqindex; /* Prolog common to all frametypes should be done in caller */ //BF = Buffer Fullness if (v->profile <= PROFILE_MAIN && get_bits(gb, 7)) { av_log(v->s.avctx, AV_LOG_DEBUG, \"I BufferFullness not 0\\n\"); } /* Quantizer stuff */ pqindex = get_bits(gb, 5); if (v->quantizer_mode == QUANT_FRAME_IMPLICIT) v->pq = pquant_table[0][pqindex]; else { v->pq = pquant_table[v->quantizer_mode-1][pqindex]; } if (pqindex < 9) v->halfpq = get_bits(gb, 1); if (v->quantizer_mode == QUANT_FRAME_EXPLICIT) v->pquantizer = get_bits(gb, 1); av_log(v->s.avctx, AV_LOG_DEBUG, \"I frame: QP=%i (+%i/2)\\n\", v->pq, v->halfpq); return 0; }"} {"target": 1, "idx": 21820, "func": "static void pcie_mmcfg_data_write(PCIBus *s, uint32_t mmcfg_addr, uint32_t val, int len) { PCIDevice *pci_dev = pcie_dev_find_by_mmcfg_addr(s, mmcfg_addr); if (!pci_dev) { return; } pci_host_config_write_common(pci_dev, PCIE_MMCFG_CONFOFFSET(mmcfg_addr), pci_config_size(pci_dev), val, len); }"} {"target": 1, "idx": 21822, "func": "static int get_bits(Jpeg2000DecoderContext *s, int n) { int res = 0; if (s->buf_end - s->buf < ((n - s->bit_index) >> 8)) return AVERROR_INVALIDDATA; while (--n >= 0) { res <<= 1; if (s->bit_index == 0) { s->bit_index = 7 + (*s->buf != 0xff); s->buf++; } s->bit_index--; res |= (*s->buf >> s->bit_index) & 1; } return res; }"} {"target": 0, "idx": 21828, "func": "static int mov_read_udta_string(MOVContext *c, ByteIOContext *pb, MOVAtom atom) { char *str = NULL; int size; uint16_t str_size; if (c->itunes_metadata) { int data_size = get_be32(pb); int tag = get_le32(pb); if (tag == MKTAG('d','a','t','a')) { get_be32(pb); // type get_be32(pb); // unknown str_size = data_size - 16; } else return 0; } else { str_size = get_be16(pb); // string length get_be16(pb); // language } switch (atom.type) { case MKTAG(0xa9,'n','a','m'): str = c->fc->title; size = sizeof(c->fc->title); break; case MKTAG(0xa9,'A','R','T'): case MKTAG(0xa9,'w','r','t'): str = c->fc->author; size = sizeof(c->fc->author); break; case MKTAG(0xa9,'c','p','y'): str = c->fc->copyright; size = sizeof(c->fc->copyright); break; case MKTAG(0xa9,'c','m','t'): case MKTAG(0xa9,'i','n','f'): str = c->fc->comment; size = sizeof(c->fc->comment); break; case MKTAG(0xa9,'a','l','b'): str = c->fc->album; size = sizeof(c->fc->album); break; } if (!str) return 0; get_buffer(pb, str, FFMIN(size, str_size)); dprintf(c->fc, \"%.4s %s\\n\", (char*)&atom.type, str); return 0; }"} {"target": 1, "idx": 21836, "func": "void aio_context_setup(AioContext *ctx, Error **errp) { #ifdef CONFIG_EPOLL assert(!ctx->epollfd); ctx->epollfd = epoll_create1(EPOLL_CLOEXEC); if (ctx->epollfd == -1) { ctx->epoll_available = false; } else { ctx->epoll_available = true; } #endif }"} {"target": 0, "idx": 21841, "func": "static int dvbsub_display_end_segment(AVCodecContext *avctx, const uint8_t *buf, int buf_size, AVSubtitle *sub) { DVBSubContext *ctx = avctx->priv_data; DVBSubDisplayDefinition *display_def = ctx->display_definition; DVBSubRegion *region; DVBSubRegionDisplay *display; AVSubtitleRect *rect; DVBSubCLUT *clut; uint32_t *clut_table; int i; int offset_x=0, offset_y=0; sub->rects = NULL; sub->start_display_time = 0; sub->end_display_time = ctx->time_out * 1000; sub->format = 0; if (display_def) { offset_x = display_def->x; offset_y = display_def->y; } sub->num_rects = ctx->display_list_size; if (sub->num_rects <= 0) return AVERROR_INVALIDDATA; sub->rects = av_mallocz_array(sub->num_rects * sub->num_rects, sizeof(*sub->rects)); if (!sub->rects) return AVERROR(ENOMEM); i = 0; for (display = ctx->display_list; display; display = display->next) { region = get_region(ctx, display->region_id); rect = sub->rects[i]; if (!region) continue; rect->x = display->x_pos + offset_x; rect->y = display->y_pos + offset_y; rect->w = region->width; rect->h = region->height; rect->nb_colors = 16; rect->type = SUBTITLE_BITMAP; rect->pict.linesize[0] = region->width; clut = get_clut(ctx, region->clut); if (!clut) clut = &default_clut; switch (region->depth) { case 2: clut_table = clut->clut4; break; case 8: clut_table = clut->clut256; break; case 4: default: clut_table = clut->clut16; break; } rect->pict.data[1] = av_mallocz(AVPALETTE_SIZE); if (!rect->pict.data[1]) { av_free(sub->rects); return AVERROR(ENOMEM); } memcpy(rect->pict.data[1], clut_table, (1 << region->depth) * sizeof(uint32_t)); rect->pict.data[0] = av_malloc(region->buf_size); if (!rect->pict.data[0]) { av_free(rect->pict.data[1]); av_free(sub->rects); return AVERROR(ENOMEM); } memcpy(rect->pict.data[0], region->pbuf, region->buf_size); i++; } sub->num_rects = i; #ifdef DEBUG save_display_set(ctx); #endif return 1; }"} {"target": 0, "idx": 21851, "func": "static QUANT_FN(pvq_encode_band) { return quant_band_template(pvq, f, rc, band, X, Y, N, b, blocks, lowband, duration, lowband_out, level, gain, lowband_scratch, fill, 1); }"} {"target": 0, "idx": 21863, "func": "static void kvm_set_phys_mem(target_phys_addr_t start_addr, ram_addr_t size, ram_addr_t phys_offset) { KVMState *s = kvm_state; ram_addr_t flags = phys_offset & ~TARGET_PAGE_MASK; KVMSlot *mem, old; int err; /* kvm works in page size chunks, but the function may be called with sub-page size and unaligned start address. */ size = TARGET_PAGE_ALIGN(size); start_addr = TARGET_PAGE_ALIGN(start_addr); /* KVM does not support read-only slots */ phys_offset &= ~IO_MEM_ROM; while (1) { mem = kvm_lookup_overlapping_slot(s, start_addr, start_addr + size); if (!mem) { break; } if (flags < IO_MEM_UNASSIGNED && start_addr >= mem->start_addr && (start_addr + size <= mem->start_addr + mem->memory_size) && (phys_offset - start_addr == mem->phys_offset - mem->start_addr)) { /* The new slot fits into the existing one and comes with * identical parameters - nothing to be done. */ return; } old = *mem; /* unregister the overlapping slot */ mem->memory_size = 0; err = kvm_set_user_memory_region(s, mem); if (err) { fprintf(stderr, \"%s: error unregistering overlapping slot: %s\\n\", __func__, strerror(-err)); abort(); } /* Workaround for older KVM versions: we can't join slots, even not by * unregistering the previous ones and then registering the larger * slot. We have to maintain the existing fragmentation. Sigh. * * This workaround assumes that the new slot starts at the same * address as the first existing one. If not or if some overlapping * slot comes around later, we will fail (not seen in practice so far) * - and actually require a recent KVM version. */ if (s->broken_set_mem_region && old.start_addr == start_addr && old.memory_size < size && flags < IO_MEM_UNASSIGNED) { mem = kvm_alloc_slot(s); mem->memory_size = old.memory_size; mem->start_addr = old.start_addr; mem->phys_offset = old.phys_offset; mem->flags = 0; err = kvm_set_user_memory_region(s, mem); if (err) { fprintf(stderr, \"%s: error updating slot: %s\\n\", __func__, strerror(-err)); abort(); } start_addr += old.memory_size; phys_offset += old.memory_size; size -= old.memory_size; continue; } /* register prefix slot */ if (old.start_addr < start_addr) { mem = kvm_alloc_slot(s); mem->memory_size = start_addr - old.start_addr; mem->start_addr = old.start_addr; mem->phys_offset = old.phys_offset; mem->flags = 0; err = kvm_set_user_memory_region(s, mem); if (err) { fprintf(stderr, \"%s: error registering prefix slot: %s\\n\", __func__, strerror(-err)); abort(); } } /* register suffix slot */ if (old.start_addr + old.memory_size > start_addr + size) { ram_addr_t size_delta; mem = kvm_alloc_slot(s); mem->start_addr = start_addr + size; size_delta = mem->start_addr - old.start_addr; mem->memory_size = old.memory_size - size_delta; mem->phys_offset = old.phys_offset + size_delta; mem->flags = 0; err = kvm_set_user_memory_region(s, mem); if (err) { fprintf(stderr, \"%s: error registering suffix slot: %s\\n\", __func__, strerror(-err)); abort(); } } } /* in case the KVM bug workaround already \"consumed\" the new slot */ if (!size) return; /* KVM does not need to know about this memory */ if (flags >= IO_MEM_UNASSIGNED) return; mem = kvm_alloc_slot(s); mem->memory_size = size; mem->start_addr = start_addr; mem->phys_offset = phys_offset; mem->flags = 0; err = kvm_set_user_memory_region(s, mem); if (err) { fprintf(stderr, \"%s: error registering slot: %s\\n\", __func__, strerror(-err)); abort(); } }"} {"target": 1, "idx": 21889, "func": "static void master_abort_mem_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) { }"} {"target": 1, "idx": 21899, "func": "void avcodec_align_dimensions(AVCodecContext *s, int *width, int *height){ int w_align= 1; int h_align= 1; switch(s->pix_fmt){ case PIX_FMT_YUV420P: case PIX_FMT_YUYV422: case PIX_FMT_UYVY422: case PIX_FMT_YUV422P: case PIX_FMT_YUV444P: case PIX_FMT_GRAY8: case PIX_FMT_GRAY16BE: case PIX_FMT_GRAY16LE: case PIX_FMT_YUVJ420P: case PIX_FMT_YUVJ422P: case PIX_FMT_YUVJ444P: case PIX_FMT_YUVA420P: w_align= 16; //FIXME check for non mpeg style codecs and use less alignment h_align= 16; break; case PIX_FMT_YUV411P: case PIX_FMT_UYYVYY411: w_align=32; h_align=8; break; case PIX_FMT_YUV410P: if(s->codec_id == CODEC_ID_SVQ1){ w_align=64; h_align=64; } case PIX_FMT_RGB555: if(s->codec_id == CODEC_ID_RPZA){ w_align=4; h_align=4; } case PIX_FMT_PAL8: case PIX_FMT_BGR8: case PIX_FMT_RGB8: if(s->codec_id == CODEC_ID_SMC){ w_align=4; h_align=4; } break; case PIX_FMT_BGR24: if((s->codec_id == CODEC_ID_MSZH) || (s->codec_id == CODEC_ID_ZLIB)){ w_align=4; h_align=4; } break; default: w_align= 1; h_align= 1; break; } *width = ALIGN(*width , w_align); *height= ALIGN(*height, h_align); if(s->codec_id == CODEC_ID_H264) *height+=2; // some of the optimized chroma MC reads one line too much }"} {"target": 1, "idx": 21919, "func": "void ff_init_cabac_decoder(CABACContext *c, const uint8_t *buf, int buf_size){ c->bytestream_start= c->bytestream= buf; c->bytestream_end= buf + buf_size; #if CABAC_BITS == 16 c->low = (*c->bytestream++)<<18; c->low+= (*c->bytestream++)<<10; #else c->low = (*c->bytestream++)<<10; #endif c->low+= ((*c->bytestream++)<<2) + 2; c->range= 0x1FE; }"} {"target": 1, "idx": 21925, "func": "static int hls_read_header(AVFormatContext *s) { void *u = (s->flags & AVFMT_FLAG_CUSTOM_IO) ? NULL : s->pb; HLSContext *c = s->priv_data; int ret = 0, i; int highest_cur_seq_no = 0; c->ctx = s; c->interrupt_callback = &s->interrupt_callback; c->strict_std_compliance = s->strict_std_compliance; c->first_packet = 1; c->first_timestamp = AV_NOPTS_VALUE; c->cur_timestamp = AV_NOPTS_VALUE; if (u) { // get the previous user agent & set back to null if string size is zero update_options(&c->user_agent, \"user-agent\", u); // get the previous cookies & set back to null if string size is zero update_options(&c->cookies, \"cookies\", u); // get the previous headers & set back to null if string size is zero update_options(&c->headers, \"headers\", u); // get the previous http proxt & set back to null if string size is zero update_options(&c->http_proxy, \"http_proxy\", u); } if ((ret = parse_playlist(c, s->filename, NULL, s->pb)) < 0) goto fail; if ((ret = save_avio_options(s)) < 0) goto fail; /* Some HLS servers don't like being sent the range header */ av_dict_set(&c->avio_opts, \"seekable\", \"0\", 0); if (c->n_variants == 0) { av_log(NULL, AV_LOG_WARNING, \"Empty playlist\\n\"); ret = AVERROR_EOF; goto fail; } /* If the playlist only contained playlists (Master Playlist), * parse each individual playlist. */ if (c->n_playlists > 1 || c->playlists[0]->n_segments == 0) { for (i = 0; i < c->n_playlists; i++) { struct playlist *pls = c->playlists[i]; if ((ret = parse_playlist(c, pls->url, pls, NULL)) < 0) goto fail; } } if (c->variants[0]->playlists[0]->n_segments == 0) { av_log(NULL, AV_LOG_WARNING, \"Empty playlist\\n\"); ret = AVERROR_EOF; goto fail; } /* If this isn't a live stream, calculate the total duration of the * stream. */ if (c->variants[0]->playlists[0]->finished) { int64_t duration = 0; for (i = 0; i < c->variants[0]->playlists[0]->n_segments; i++) duration += c->variants[0]->playlists[0]->segments[i]->duration; s->duration = duration; } /* Associate renditions with variants */ for (i = 0; i < c->n_variants; i++) { struct variant *var = c->variants[i]; if (var->audio_group[0]) add_renditions_to_variant(c, var, AVMEDIA_TYPE_AUDIO, var->audio_group); if (var->video_group[0]) add_renditions_to_variant(c, var, AVMEDIA_TYPE_VIDEO, var->video_group); if (var->subtitles_group[0]) add_renditions_to_variant(c, var, AVMEDIA_TYPE_SUBTITLE, var->subtitles_group); } /* Create a program for each variant */ for (i = 0; i < c->n_variants; i++) { struct variant *v = c->variants[i]; AVProgram *program; program = av_new_program(s, i); if (!program) goto fail; av_dict_set_int(&program->metadata, \"variant_bitrate\", v->bandwidth, 0); } /* Select the starting segments */ for (i = 0; i < c->n_playlists; i++) { struct playlist *pls = c->playlists[i]; if (pls->n_segments == 0) continue; pls->cur_seq_no = select_cur_seq_no(c, pls); highest_cur_seq_no = FFMAX(highest_cur_seq_no, pls->cur_seq_no); } /* Open the demuxer for each playlist */ for (i = 0; i < c->n_playlists; i++) { struct playlist *pls = c->playlists[i]; AVInputFormat *in_fmt = NULL; if (!(pls->ctx = avformat_alloc_context())) { ret = AVERROR(ENOMEM); goto fail; } if (pls->n_segments == 0) continue; pls->index = i; pls->needed = 1; pls->parent = s; /* * If this is a live stream and this playlist looks like it is one segment * behind, try to sync it up so that every substream starts at the same * time position (so e.g. avformat_find_stream_info() will see packets from * all active streams within the first few seconds). This is not very generic, * though, as the sequence numbers are technically independent. */ if (!pls->finished && pls->cur_seq_no == highest_cur_seq_no - 1 && highest_cur_seq_no < pls->start_seq_no + pls->n_segments) { pls->cur_seq_no = highest_cur_seq_no; } pls->read_buffer = av_malloc(INITIAL_BUFFER_SIZE); if (!pls->read_buffer){ ret = AVERROR(ENOMEM); avformat_free_context(pls->ctx); pls->ctx = NULL; goto fail; } ffio_init_context(&pls->pb, pls->read_buffer, INITIAL_BUFFER_SIZE, 0, pls, read_data, NULL, NULL); pls->pb.seekable = 0; ret = av_probe_input_buffer(&pls->pb, &in_fmt, pls->segments[0]->url, NULL, 0, 0); if (ret < 0) { /* Free the ctx - it isn't initialized properly at this point, * so avformat_close_input shouldn't be called. If * avformat_open_input fails below, it frees and zeros the * context, so it doesn't need any special treatment like this. */ av_log(s, AV_LOG_ERROR, \"Error when loading first segment '%s'\\n\", pls->segments[0]->url); avformat_free_context(pls->ctx); pls->ctx = NULL; goto fail; } pls->ctx->pb = &pls->pb; pls->ctx->io_open = nested_io_open; if ((ret = ff_copy_whiteblacklists(pls->ctx, s)) < 0) goto fail; ret = avformat_open_input(&pls->ctx, pls->segments[0]->url, in_fmt, NULL); if (ret < 0) goto fail; if (pls->id3_deferred_extra && pls->ctx->nb_streams == 1) { ff_id3v2_parse_apic(pls->ctx, &pls->id3_deferred_extra); avformat_queue_attached_pictures(pls->ctx); ff_id3v2_free_extra_meta(&pls->id3_deferred_extra); pls->id3_deferred_extra = NULL; } if (pls->is_id3_timestamped == -1) av_log(s, AV_LOG_WARNING, \"No expected HTTP requests have been made\\n\"); /* * For ID3 timestamped raw audio streams we need to detect the packet * durations to calculate timestamps in fill_timing_for_id3_timestamped_stream(), * but for other streams we can rely on our user calling avformat_find_stream_info() * on us if they want to. */ if (pls->is_id3_timestamped) { ret = avformat_find_stream_info(pls->ctx, NULL); if (ret < 0) goto fail; } pls->has_noheader_flag = !!(pls->ctx->ctx_flags & AVFMTCTX_NOHEADER); /* Create new AVStreams for each stream in this playlist */ ret = update_streams_from_subdemuxer(s, pls); if (ret < 0) goto fail; add_metadata_from_renditions(s, pls, AVMEDIA_TYPE_AUDIO); add_metadata_from_renditions(s, pls, AVMEDIA_TYPE_VIDEO); add_metadata_from_renditions(s, pls, AVMEDIA_TYPE_SUBTITLE); } update_noheader_flag(s); return 0; fail: free_playlist_list(c); free_variant_list(c); free_rendition_list(c); return ret; }"} {"target": 0, "idx": 21938, "func": "AVBitStreamFilterContext *av_bitstream_filter_init(const char *name) { AVBitStreamFilter *bsf = first_bitstream_filter; while (bsf) { if (!strcmp(name, bsf->name)) { AVBitStreamFilterContext *bsfc = av_mallocz(sizeof(AVBitStreamFilterContext)); bsfc->filter = bsf; bsfc->priv_data = bsf->priv_data_size ? av_mallocz(bsf->priv_data_size) : NULL; return bsfc; } bsf = bsf->next; } return NULL; }"} {"target": 1, "idx": 21957, "func": "PPC_OP(divw) { if ((Ts0 == INT32_MIN && Ts1 == -1) || Ts1 == 0) { T0 = (int32_t)((-1) * (T0 >> 31)); } else { T0 = (Ts0 / Ts1); } RETURN(); }"} {"target": 1, "idx": 21958, "func": "BlockInterfaceErrorAction drive_get_onerror(BlockDriverState *bdrv) { int index; for (index = 0; index < nb_drives; index++) if (drives_table[index].bdrv == bdrv) return drives_table[index].onerror; return BLOCK_ERR_REPORT; }"} {"target": 1, "idx": 21962, "func": "static void netfilter_finalize(Object *obj) { NetFilterState *nf = NETFILTER(obj); NetFilterClass *nfc = NETFILTER_GET_CLASS(obj); if (nfc->cleanup) { nfc->cleanup(nf); } if (nf->netdev && !QTAILQ_EMPTY(&nf->netdev->filters)) { QTAILQ_REMOVE(&nf->netdev->filters, nf, next); } }"} {"target": 1, "idx": 21967, "func": "static void pc_compat_1_4(QEMUMachineInitArgs *args) { pc_compat_1_5(args); has_pvpanic = false; x86_cpu_compat_set_features(\"n270\", FEAT_1_ECX, 0, CPUID_EXT_MOVBE); x86_cpu_compat_set_features(\"Westmere\", FEAT_1_ECX, 0, CPUID_EXT_PCLMULQDQ); }"} {"target": 1, "idx": 21969, "func": "static void amdvi_realize(DeviceState *dev, Error **err) { int ret = 0; AMDVIState *s = AMD_IOMMU_DEVICE(dev); X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(dev); PCIBus *bus = PC_MACHINE(qdev_get_machine())->bus; s->iotlb = g_hash_table_new_full(amdvi_uint64_hash, amdvi_uint64_equal, g_free, g_free); /* This device should take care of IOMMU PCI properties */ x86_iommu->type = TYPE_AMD; qdev_set_parent_bus(DEVICE(&s->pci), &bus->qbus); object_property_set_bool(OBJECT(&s->pci), true, \"realized\", err); s->capab_offset = pci_add_capability(&s->pci.dev, AMDVI_CAPAB_ID_SEC, 0, AMDVI_CAPAB_SIZE); assert(s->capab_offset > 0); ret = pci_add_capability(&s->pci.dev, PCI_CAP_ID_MSI, 0, AMDVI_CAPAB_REG_SIZE); assert(ret > 0); ret = pci_add_capability(&s->pci.dev, PCI_CAP_ID_HT, 0, AMDVI_CAPAB_REG_SIZE); assert(ret > 0); /* set up MMIO */ memory_region_init_io(&s->mmio, OBJECT(s), &mmio_mem_ops, s, \"amdvi-mmio\", AMDVI_MMIO_SIZE); sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->mmio); sysbus_mmio_map(SYS_BUS_DEVICE(s), 0, AMDVI_BASE_ADDR); pci_setup_iommu(bus, amdvi_host_dma_iommu, s); s->devid = object_property_get_int(OBJECT(&s->pci), \"addr\", err); msi_init(&s->pci.dev, 0, 1, true, false, err); amdvi_init(s); }"} {"target": 1, "idx": 21971, "func": "int avio_close_dyn_buf(AVIOContext *s, uint8_t **pbuffer) { DynBuffer *d = s->opaque; int size; static const char padbuf[FF_INPUT_BUFFER_PADDING_SIZE] = {0}; int padding = 0; if (!s) { *pbuffer = NULL; return 0; } /* don't attempt to pad fixed-size packet buffers */ if (!s->max_packet_size) { avio_write(s, padbuf, sizeof(padbuf)); padding = FF_INPUT_BUFFER_PADDING_SIZE; } avio_flush(s); *pbuffer = d->buffer; size = d->size; av_free(d); av_free(s); return size - padding; }"} {"target": 1, "idx": 21995, "func": "static void qemu_net_queue_append(NetQueue *queue, NetClientState *sender, unsigned flags, const uint8_t *buf, size_t size, NetPacketSent *sent_cb) { NetPacket *packet; if (queue->nq_count >= queue->nq_maxlen && !sent_cb) { return; /* drop if queue full and no callback */ } packet = g_malloc(sizeof(NetPacket) + size); packet->sender = sender; packet->flags = flags; packet->size = size; packet->sent_cb = sent_cb; memcpy(packet->data, buf, size); QTAILQ_INSERT_TAIL(&queue->packets, packet, entry); }"} {"target": 1, "idx": 22003, "func": "static void decode_subframe_lpc(ShortenContext *s, int channel, int residual_size, int pred_order) { int sum, i, j; int coeffs[pred_order]; for (i=0; igb, LPCQUANT); for (i=0; i < s->blocksize; i++) { sum = s->lpcqoffset; for (j=0; jdecoded[channel][i-j-1]; s->decoded[channel][i] = get_sr_golomb_shorten(&s->gb, residual_size) + (sum >> LPCQUANT); } }"} {"target": 1, "idx": 22006, "func": "bool timerlist_expired(QEMUTimerList *timer_list) { int64_t expire_time; if (!atomic_read(&timer_list->active_timers)) { return false; } qemu_mutex_lock(&timer_list->active_timers_lock); if (!timer_list->active_timers) { qemu_mutex_unlock(&timer_list->active_timers_lock); return false; } expire_time = timer_list->active_timers->expire_time; qemu_mutex_unlock(&timer_list->active_timers_lock); return expire_time < qemu_clock_get_ns(timer_list->clock->type); }"} {"target": 1, "idx": 22009, "func": "static int pxr24_uncompress(EXRContext *s, const uint8_t *src, int compressed_size, int uncompressed_size, EXRThreadData *td) { unsigned long dest_len, expected_len = 0; const uint8_t *in = td->tmp; uint8_t *out; int c, i, j; for (i = 0; i < s->nb_channels; i++) { if (s->channels[i].pixel_type == EXR_FLOAT) { expected_len += (td->xsize * td->ysize * 3);/* PRX 24 store float in 24 bit instead of 32 */ } else if (s->channels[i].pixel_type == EXR_HALF) { expected_len += (td->xsize * td->ysize * 2); } else {//UINT 32 expected_len += (td->xsize * td->ysize * 4); } } dest_len = expected_len; if (uncompress(td->tmp, &dest_len, src, compressed_size) != Z_OK) { return AVERROR_INVALIDDATA; } else if (dest_len != expected_len) { return AVERROR_INVALIDDATA; } out = td->uncompressed_data; for (i = 0; i < td->ysize; i++) for (c = 0; c < s->nb_channels; c++) { EXRChannel *channel = &s->channels[c]; const uint8_t *ptr[4]; uint32_t pixel = 0; switch (channel->pixel_type) { case EXR_FLOAT: ptr[0] = in; ptr[1] = ptr[0] + td->xsize; ptr[2] = ptr[1] + td->xsize; in = ptr[2] + td->xsize; for (j = 0; j < td->xsize; ++j) { uint32_t diff = (*(ptr[0]++) << 24) | (*(ptr[1]++) << 16) | (*(ptr[2]++) << 8); pixel += diff; bytestream_put_le32(&out, pixel); } break; case EXR_HALF: ptr[0] = in; ptr[1] = ptr[0] + td->xsize; in = ptr[1] + td->xsize; for (j = 0; j < td->xsize; j++) { uint32_t diff = (*(ptr[0]++) << 8) | *(ptr[1]++); pixel += diff; bytestream_put_le16(&out, pixel); } break; case EXR_UINT: ptr[0] = in; ptr[1] = ptr[0] + s->xdelta; ptr[2] = ptr[1] + s->xdelta; ptr[3] = ptr[2] + s->xdelta; in = ptr[3] + s->xdelta; for (j = 0; j < s->xdelta; ++j) { uint32_t diff = (*(ptr[0]++) << 24) | (*(ptr[1]++) << 16) | (*(ptr[2]++) << 8 ) | (*(ptr[3]++)); pixel += diff; bytestream_put_le32(&out, pixel); } break; default: return AVERROR_INVALIDDATA; } } return 0; }"} {"target": 1, "idx": 22013, "func": "static int mov_read_default(MOVContext *c, AVIOContext *pb, MOVAtom atom) { int64_t total_size = 0; MOVAtom a; int i; if (atom.size < 0) atom.size = INT64_MAX; while (total_size + 8 <= atom.size && !avio_feof(pb)) { int (*parse)(MOVContext*, AVIOContext*, MOVAtom) = NULL; a.size = atom.size; a.type=0; if (atom.size >= 8) { a.size = avio_rb32(pb); a.type = avio_rl32(pb); if (a.type == MKTAG('f','r','e','e') && a.size >= 8 && c->moov_retry) { uint8_t buf[8]; uint32_t *type = (uint32_t *)buf + 1; avio_read(pb, buf, 8); avio_seek(pb, -8, SEEK_CUR); if (*type == MKTAG('m','v','h','d') || *type == MKTAG('c','m','o','v')) { av_log(c->fc, AV_LOG_ERROR, \"Detected moov in a free atom.\\n\"); a.type = MKTAG('m','o','o','v'); } } if (atom.type != MKTAG('r','o','o','t') && atom.type != MKTAG('m','o','o','v')) { if (a.type == MKTAG('t','r','a','k') || a.type == MKTAG('m','d','a','t')) { av_log(c->fc, AV_LOG_ERROR, \"Broken file, trak/mdat not at top-level\\n\"); avio_skip(pb, -8); return 0; } } total_size += 8; if (a.size == 1) { /* 64 bit extended size */ a.size = avio_rb64(pb) - 8; total_size += 8; } } av_dlog(c->fc, \"type: %08x '%.4s' parent:'%.4s' sz: %\"PRId64\" %\"PRId64\" %\"PRId64\"\\n\", a.type, (char*)&a.type, (char*)&atom.type, a.size, total_size, atom.size); if (a.size == 0) { a.size = atom.size - total_size + 8; } a.size -= 8; if (a.size < 0) break; a.size = FFMIN(a.size, atom.size - total_size); for (i = 0; mov_default_parse_table[i].type; i++) if (mov_default_parse_table[i].type == a.type) { parse = mov_default_parse_table[i].parse; break; } // container is user data if (!parse && (atom.type == MKTAG('u','d','t','a') || atom.type == MKTAG('i','l','s','t'))) parse = mov_read_udta_string; if (!parse) { /* skip leaf atoms data */ avio_skip(pb, a.size); } else { int64_t start_pos = avio_tell(pb); int64_t left; int err = parse(c, pb, a); if (err < 0) return err; if (c->found_moov && c->found_mdat && ((!pb->seekable || c->fc->flags & AVFMT_FLAG_IGNIDX) || start_pos + a.size == avio_size(pb))) { if (!pb->seekable || c->fc->flags & AVFMT_FLAG_IGNIDX) c->next_root_atom = start_pos + a.size; return 0; } left = a.size - avio_tell(pb) + start_pos; if (left > 0) /* skip garbage at atom end */ avio_skip(pb, left); else if (left < 0) { av_log(c->fc, AV_LOG_WARNING, \"overread end of atom '%.4s' by %\"PRId64\" bytes\\n\", (char*)&a.type, -left); avio_seek(pb, left, SEEK_CUR); } } total_size += a.size; } if (total_size < atom.size && atom.size < 0x7ffff) avio_skip(pb, atom.size - total_size); return 0; }"} {"target": 1, "idx": 22016, "func": "static int ahci_populate_sglist(AHCIDevice *ad, QEMUSGList *sglist, AHCICmdHdr *cmd, int64_t limit, int32_t offset) { uint16_t opts = le16_to_cpu(cmd->opts); uint16_t prdtl = le16_to_cpu(cmd->prdtl); uint64_t cfis_addr = le64_to_cpu(cmd->tbl_addr); uint64_t prdt_addr = cfis_addr + 0x80; dma_addr_t prdt_len = (prdtl * sizeof(AHCI_SG)); dma_addr_t real_prdt_len = prdt_len; uint8_t *prdt; int i; int r = 0; uint64_t sum = 0; int off_idx = -1; int64_t off_pos = -1; int tbl_entry_size; IDEBus *bus = &ad->port; BusState *qbus = BUS(bus); /* * Note: AHCI PRDT can describe up to 256GiB. SATA/ATA only support * transactions of up to 32MiB as of ATA8-ACS3 rev 1b, assuming a * 512 byte sector size. We limit the PRDT in this implementation to * a reasonably large 2GiB, which can accommodate the maximum transfer * request for sector sizes up to 32K. */ if (!prdtl) { DPRINTF(ad->port_no, \"no sg list given by guest: 0x%08x\\n\", opts); return -1; } /* map PRDT */ if (!(prdt = dma_memory_map(ad->hba->as, prdt_addr, &prdt_len, DMA_DIRECTION_TO_DEVICE))){ DPRINTF(ad->port_no, \"map failed\\n\"); return -1; } if (prdt_len < real_prdt_len) { DPRINTF(ad->port_no, \"mapped less than expected\\n\"); r = -1; goto out; } /* Get entries in the PRDT, init a qemu sglist accordingly */ if (prdtl > 0) { AHCI_SG *tbl = (AHCI_SG *)prdt; sum = 0; for (i = 0; i < prdtl; i++) { tbl_entry_size = prdt_tbl_entry_size(&tbl[i]); if (offset < (sum + tbl_entry_size)) { off_idx = i; off_pos = offset - sum; break; } sum += tbl_entry_size; } if ((off_idx == -1) || (off_pos < 0) || (off_pos > tbl_entry_size)) { DPRINTF(ad->port_no, \"%s: Incorrect offset! \" \"off_idx: %d, off_pos: %\"PRId64\"\\n\", __func__, off_idx, off_pos); r = -1; goto out; } qemu_sglist_init(sglist, qbus->parent, (prdtl - off_idx), ad->hba->as); qemu_sglist_add(sglist, le64_to_cpu(tbl[off_idx].addr) + off_pos, MIN(prdt_tbl_entry_size(&tbl[off_idx]) - off_pos, limit)); for (i = off_idx + 1; i < prdtl && sglist->size < limit; i++) { qemu_sglist_add(sglist, le64_to_cpu(tbl[i].addr), MIN(prdt_tbl_entry_size(&tbl[i]), limit - sglist->size)); if (sglist->size > INT32_MAX) { error_report(\"AHCI Physical Region Descriptor Table describes \" \"more than 2 GiB.\"); qemu_sglist_destroy(sglist); r = -1; goto out; } } } out: dma_memory_unmap(ad->hba->as, prdt, prdt_len, DMA_DIRECTION_TO_DEVICE, prdt_len); return r; }"} {"target": 1, "idx": 22034, "func": "static void dwt_encode97_int(DWTContext *s, int *t) { int lev, w = s->linelen[s->ndeclevels-1][0]; int *line = s->i_linebuf; line += 5; for (lev = s->ndeclevels-1; lev >= 0; lev--){ int lh = s->linelen[lev][0], lv = s->linelen[lev][1], mh = s->mod[lev][0], mv = s->mod[lev][1], lp; int *l; // VER_SD l = line + mv; for (lp = 0; lp < lh; lp++) { int i, j = 0; for (i = 0; i < lv; i++) l[i] = t[w*i + lp]; sd_1d97_int(line, mv, mv + lv); // copy back and deinterleave for (i = mv; i < lv; i+=2, j++) t[w*j + lp] = ((l[i] * I_LFTG_X) + (1 << 16)) >> 17; for (i = 1-mv; i < lv; i+=2, j++) t[w*j + lp] = ((l[i] * I_LFTG_K) + (1 << 16)) >> 17; } // HOR_SD l = line + mh; for (lp = 0; lp < lv; lp++){ int i, j = 0; for (i = 0; i < lh; i++) l[i] = t[w*lp + i]; sd_1d97_int(line, mh, mh + lh); // copy back and deinterleave for (i = mh; i < lh; i+=2, j++) t[w*lp + j] = ((l[i] * I_LFTG_X) + (1 << 16)) >> 17; for (i = 1-mh; i < lh; i+=2, j++) t[w*lp + j] = ((l[i] * I_LFTG_K) + (1 << 16)) >> 17; } } }"} {"target": 1, "idx": 22037, "func": "static void decode_postinit(H264Context *h){ MpegEncContext * const s = &h->s; Picture *out = s->current_picture_ptr; Picture *cur = s->current_picture_ptr; int i, pics, out_of_order, out_idx; s->current_picture_ptr->qscale_type= FF_QSCALE_TYPE_H264; s->current_picture_ptr->pict_type= s->pict_type; if (h->next_output_pic) return; if (cur->field_poc[0]==INT_MAX || cur->field_poc[1]==INT_MAX) { //FIXME this allows the next thread to start once we encounter the first field of a PAFF packet //This works if the next packet contains the second field. It does not work if both fields are //in the same packet. //ff_thread_finish_setup(s->avctx); return; } cur->interlaced_frame = 0; cur->repeat_pict = 0; /* Signal interlacing information externally. */ /* Prioritize picture timing SEI information over used decoding process if it exists. */ if(h->sps.pic_struct_present_flag){ switch (h->sei_pic_struct) { case SEI_PIC_STRUCT_FRAME: break; case SEI_PIC_STRUCT_TOP_FIELD: case SEI_PIC_STRUCT_BOTTOM_FIELD: cur->interlaced_frame = 1; break; case SEI_PIC_STRUCT_TOP_BOTTOM: case SEI_PIC_STRUCT_BOTTOM_TOP: if (FIELD_OR_MBAFF_PICTURE) cur->interlaced_frame = 1; else // try to flag soft telecine progressive cur->interlaced_frame = h->prev_interlaced_frame; break; case SEI_PIC_STRUCT_TOP_BOTTOM_TOP: case SEI_PIC_STRUCT_BOTTOM_TOP_BOTTOM: // Signal the possibility of telecined film externally (pic_struct 5,6) // From these hints, let the applications decide if they apply deinterlacing. cur->repeat_pict = 1; break; case SEI_PIC_STRUCT_FRAME_DOUBLING: // Force progressive here, as doubling interlaced frame is a bad idea. cur->repeat_pict = 2; break; case SEI_PIC_STRUCT_FRAME_TRIPLING: cur->repeat_pict = 4; break; } if ((h->sei_ct_type & 3) && h->sei_pic_struct <= SEI_PIC_STRUCT_BOTTOM_TOP) cur->interlaced_frame = (h->sei_ct_type & (1<<1)) != 0; }else{ /* Derive interlacing flag from used decoding process. */ cur->interlaced_frame = FIELD_OR_MBAFF_PICTURE; } h->prev_interlaced_frame = cur->interlaced_frame; if (cur->field_poc[0] != cur->field_poc[1]){ /* Derive top_field_first from field pocs. */ cur->top_field_first = cur->field_poc[0] < cur->field_poc[1]; }else{ if(cur->interlaced_frame || h->sps.pic_struct_present_flag){ /* Use picture timing SEI information. Even if it is a information of a past frame, better than nothing. */ if(h->sei_pic_struct == SEI_PIC_STRUCT_TOP_BOTTOM || h->sei_pic_struct == SEI_PIC_STRUCT_TOP_BOTTOM_TOP) cur->top_field_first = 1; else cur->top_field_first = 0; }else{ /* Most likely progressive */ cur->top_field_first = 0; } } //FIXME do something with unavailable reference frames /* Sort B-frames into display order */ if(h->sps.bitstream_restriction_flag && s->avctx->has_b_frames < h->sps.num_reorder_frames){ s->avctx->has_b_frames = h->sps.num_reorder_frames; s->low_delay = 0; } if( s->avctx->strict_std_compliance >= FF_COMPLIANCE_STRICT && !h->sps.bitstream_restriction_flag){ s->avctx->has_b_frames= MAX_DELAYED_PIC_COUNT; s->low_delay= 0; } pics = 0; while(h->delayed_pic[pics]) pics++; assert(pics <= MAX_DELAYED_PIC_COUNT); h->delayed_pic[pics++] = cur; if(cur->reference == 0) cur->reference = DELAYED_PIC_REF; out = h->delayed_pic[0]; out_idx = 0; for(i=1; h->delayed_pic[i] && !h->delayed_pic[i]->key_frame && !h->delayed_pic[i]->mmco_reset; i++) if(h->delayed_pic[i]->poc < out->poc){ out = h->delayed_pic[i]; out_idx = i; } if(s->avctx->has_b_frames == 0 && (h->delayed_pic[0]->key_frame || h->delayed_pic[0]->mmco_reset)) h->next_outputed_poc= INT_MIN; out_of_order = out->poc < h->next_outputed_poc; if(h->sps.bitstream_restriction_flag && s->avctx->has_b_frames >= h->sps.num_reorder_frames) { } else if((out_of_order && pics-1 == s->avctx->has_b_frames && s->avctx->has_b_frames < MAX_DELAYED_PIC_COUNT) || (s->low_delay && ((h->next_outputed_poc != INT_MIN && out->poc > h->next_outputed_poc + 2) || cur->pict_type == AV_PICTURE_TYPE_B))) { s->low_delay = 0; s->avctx->has_b_frames++; } if(out_of_order || pics > s->avctx->has_b_frames){ out->reference &= ~DELAYED_PIC_REF; out->owner2 = s; // for frame threading, the owner must be the second field's thread // or else the first thread can release the picture and reuse it unsafely for(i=out_idx; h->delayed_pic[i]; i++) h->delayed_pic[i] = h->delayed_pic[i+1]; } if(!out_of_order && pics > s->avctx->has_b_frames){ h->next_output_pic = out; if(out_idx==0 && h->delayed_pic[0] && (h->delayed_pic[0]->key_frame || h->delayed_pic[0]->mmco_reset)) { h->next_outputed_poc = INT_MIN; } else h->next_outputed_poc = out->poc; }else{ av_log(s->avctx, AV_LOG_DEBUG, \"no picture\\n\"); } ff_thread_finish_setup(s->avctx); }"} {"target": 1, "idx": 22047, "func": "void error_propagate(Error **dst_errp, Error *local_err) { if (local_err && dst_errp == &error_abort) { error_report_err(local_err); abort(); } else if (dst_errp && !*dst_errp) { *dst_errp = local_err; } else if (local_err) { error_free(local_err); } }"} {"target": 1, "idx": 22058, "func": "gen_msub32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n, uint32_t up_shift, CPUTriCoreState *env) { TCGv temp = tcg_temp_new(); TCGv temp2 = tcg_temp_new(); TCGv temp3 = tcg_temp_new(); TCGv_i64 t1 = tcg_temp_new_i64(); TCGv_i64 t2 = tcg_temp_new_i64(); TCGv_i64 t3 = tcg_temp_new_i64(); TCGv_i64 t4 = tcg_temp_new_i64(); tcg_gen_ext_i32_i64(t2, arg2); tcg_gen_ext_i32_i64(t3, arg3); tcg_gen_mul_i64(t2, t2, t3); tcg_gen_ext_i32_i64(t1, arg1); /* if we shift part of the fraction out, we need to round up */ tcg_gen_andi_i64(t4, t2, (1ll << (up_shift - n)) - 1); tcg_gen_setcondi_i64(TCG_COND_NE, t4, t4, 0); tcg_gen_sari_i64(t2, t2, up_shift - n); tcg_gen_add_i64(t2, t2, t4); tcg_gen_sub_i64(t3, t1, t2); tcg_gen_trunc_i64_i32(temp3, t3); /* calc v bit */ tcg_gen_setcondi_i64(TCG_COND_GT, t1, t3, 0x7fffffffLL); tcg_gen_setcondi_i64(TCG_COND_LT, t2, t3, -0x80000000LL); tcg_gen_or_i64(t1, t1, t2); tcg_gen_trunc_i64_i32(cpu_PSW_V, t1); tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31); /* We produce an overflow on the host if the mul before was (0x80000000 * 0x80000000) << 1). If this is the case, we negate the ovf. */ if (n == 1) { tcg_gen_setcondi_tl(TCG_COND_EQ, temp, arg2, 0x80000000); tcg_gen_setcond_tl(TCG_COND_EQ, temp2, arg2, arg3); tcg_gen_and_tl(temp, temp, temp2); tcg_gen_shli_tl(temp, temp, 31); /* negate v bit, if special condition */ tcg_gen_xor_tl(cpu_PSW_V, cpu_PSW_V, temp); } /* Calc SV bit */ tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); /* Calc AV/SAV bits */ tcg_gen_add_tl(cpu_PSW_AV, temp3, temp3); tcg_gen_xor_tl(cpu_PSW_AV, temp3, cpu_PSW_AV); /* calc SAV */ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); /* write back result */ tcg_gen_mov_tl(ret, temp3); tcg_temp_free(temp); tcg_temp_free(temp2); tcg_temp_free(temp3); tcg_temp_free_i64(t1); tcg_temp_free_i64(t2); tcg_temp_free_i64(t3); tcg_temp_free_i64(t4); }"} {"target": 1, "idx": 22065, "func": "int32 float64_to_int32_round_to_zero( float64 a STATUS_PARAM ) { flag aSign; int16 aExp, shiftCount; uint64_t aSig, savedASig; int32 z; a = float64_squash_input_denormal(a STATUS_VAR); aSig = extractFloat64Frac( a ); aExp = extractFloat64Exp( a ); aSign = extractFloat64Sign( a ); if ( 0x41E < aExp ) { if ( ( aExp == 0x7FF ) && aSig ) aSign = 0; goto invalid; } else if ( aExp < 0x3FF ) { if ( aExp || aSig ) STATUS(float_exception_flags) |= float_flag_inexact; return 0; } aSig |= LIT64( 0x0010000000000000 ); shiftCount = 0x433 - aExp; savedASig = aSig; aSig >>= shiftCount; z = aSig; if ( aSign ) z = - z; if ( ( z < 0 ) ^ aSign ) { invalid: float_raise( float_flag_invalid STATUS_VAR); return aSign ? (int32_t) 0x80000000 : 0x7FFFFFFF; } if ( ( aSig<file) { bdrv_reopen_queue(bs_queue, bs->file, bdrv_inherited_flags(flags)); } bs_entry = g_new0(BlockReopenQueueEntry, 1); QSIMPLEQ_INSERT_TAIL(bs_queue, bs_entry, entry); bs_entry->state.bs = bs; bs_entry->state.flags = flags; return bs_queue; }"} {"target": 0, "idx": 22086, "func": "static void format_line(void *ptr, int level, const char *fmt, va_list vl, AVBPrint part[3], int *print_prefix, int type[2]) { AVClass* avc = ptr ? *(AVClass **) ptr : NULL; av_bprint_init(part+0, 0, 1); av_bprint_init(part+1, 0, 1); av_bprint_init(part+2, 0, 65536); if(type) type[0] = type[1] = AV_CLASS_CATEGORY_NA + 16; if (*print_prefix && avc) { if (avc->parent_log_context_offset) { AVClass** parent = *(AVClass ***) (((uint8_t *) ptr) + avc->parent_log_context_offset); if (parent && *parent) { av_bprintf(part+0, \"[%s @ %p] \", (*parent)->item_name(parent), parent); if(type) type[0] = get_category(parent); } } av_bprintf(part+1, \"[%s @ %p] \", avc->item_name(ptr), ptr); if(type) type[1] = get_category(ptr); } av_vbprintf(part+2, fmt, vl); if(*part[0].str || *part[1].str || *part[2].str) { char lastc = part[2].len ? part[2].str[part[2].len - 1] : 0; *print_prefix = lastc == '\\n' || lastc == '\\r'; } }"} {"target": 0, "idx": 22089, "func": "static int dnxhd_init_vlc(DNXHDContext *ctx, uint32_t cid) { if (cid != ctx->cid) { int index; if ((index = ff_dnxhd_get_cid_table(cid)) < 0) { av_log(ctx->avctx, AV_LOG_ERROR, \"unsupported cid %d\\n\", cid); return AVERROR(ENOSYS); } if (ff_dnxhd_cid_table[index].bit_depth != ctx->bit_depth) { av_log(ctx->avctx, AV_LOG_ERROR, \"bit depth mismatches %d %d\\n\", ff_dnxhd_cid_table[index].bit_depth, ctx->bit_depth); return AVERROR_INVALIDDATA; } ctx->cid_table = &ff_dnxhd_cid_table[index]; av_log(ctx->avctx, AV_LOG_VERBOSE, \"Profile cid %d.\\n\", cid); ff_free_vlc(&ctx->ac_vlc); ff_free_vlc(&ctx->dc_vlc); ff_free_vlc(&ctx->run_vlc); init_vlc(&ctx->ac_vlc, DNXHD_VLC_BITS, 257, ctx->cid_table->ac_bits, 1, 1, ctx->cid_table->ac_codes, 2, 2, 0); init_vlc(&ctx->dc_vlc, DNXHD_DC_VLC_BITS, ctx->bit_depth + 4, ctx->cid_table->dc_bits, 1, 1, ctx->cid_table->dc_codes, 1, 1, 0); init_vlc(&ctx->run_vlc, DNXHD_VLC_BITS, 62, ctx->cid_table->run_bits, 1, 1, ctx->cid_table->run_codes, 2, 2, 0); ctx->cid = cid; } return 0; }"} {"target": 0, "idx": 22093, "func": "static void check_default_settings(AVCodecContext *avctx) { X264Context *x4 = avctx->priv_data; int score = 0; score += x4->params.analyse.i_me_range == 0; score += x4->params.rc.i_qp_step == 3; score += x4->params.i_keyint_max == 12; score += x4->params.rc.i_qp_min == 2; score += x4->params.rc.i_qp_max == 31; score += x4->params.rc.f_qcompress == 0.5; score += fabs(x4->params.rc.f_ip_factor - 1.25) < 0.01; score += fabs(x4->params.rc.f_pb_factor - 1.25) < 0.01; score += x4->params.analyse.inter == 0 && x4->params.analyse.i_subpel_refine == 8; if (score >= 5) { av_log(avctx, AV_LOG_ERROR, \"Default settings detected, using medium profile\\n\"); x4->preset = av_strdup(\"medium\"); if (avctx->bit_rate == 200*1000) avctx->crf = 23; } }"} {"target": 0, "idx": 22094, "func": "int kvm_set_signal_mask(CPUState *env, const sigset_t *sigset) { struct kvm_signal_mask *sigmask; int r; if (!sigset) return kvm_vcpu_ioctl(env, KVM_SET_SIGNAL_MASK, NULL); sigmask = qemu_malloc(sizeof(*sigmask) + sizeof(*sigset)); sigmask->len = 8; memcpy(sigmask->sigset, sigset, sizeof(*sigset)); r = kvm_vcpu_ioctl(env, KVM_SET_SIGNAL_MASK, sigmask); free(sigmask); return r; }"} {"target": 1, "idx": 22132, "func": "static int unix_close(void *opaque) { QEMUFileSocket *s = opaque; close(s->fd); g_free(s); return 0; }"} {"target": 0, "idx": 22137, "func": "static int mpeg_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; Mpeg1Context *s = avctx->priv_data; AVFrame *picture = data; MpegEncContext *s2 = &s->mpeg_enc_ctx; av_dlog(avctx, \"fill_buffer\\n\"); if (buf_size == 0 || (buf_size == 4 && AV_RB32(buf) == SEQ_END_CODE)) { /* special case for last picture */ if (s2->low_delay == 0 && s2->next_picture_ptr) { *picture = s2->next_picture_ptr->f; s2->next_picture_ptr = NULL; *data_size = sizeof(AVFrame); } return buf_size; } if (s2->flags & CODEC_FLAG_TRUNCATED) { int next = ff_mpeg1_find_frame_end(&s2->parse_context, buf, buf_size, NULL); if (ff_combine_frame(&s2->parse_context, next, (const uint8_t **)&buf, &buf_size) < 0) return buf_size; } s2->codec_tag = avpriv_toupper4(avctx->codec_tag); if (s->mpeg_enc_ctx_allocated == 0 && ( s2->codec_tag == AV_RL32(\"VCR2\") || s2->codec_tag == AV_RL32(\"BW10\") )) vcr2_init_sequence(avctx); s->slice_count = 0; if (avctx->extradata && !avctx->frame_number) { int ret = decode_chunks(avctx, picture, data_size, avctx->extradata, avctx->extradata_size); if(*data_size) { av_log(avctx, AV_LOG_ERROR, \"picture in extradata\\n\"); *data_size = 0; } if (ret < 0 && (avctx->err_recognition & AV_EF_EXPLODE)) return ret; } return decode_chunks(avctx, picture, data_size, buf, buf_size); }"} {"target": 0, "idx": 22140, "func": "static void fd_chr_read(void *opaque) { CharDriverState *chr = opaque; FDCharDriver *s = chr->opaque; int size, len; uint8_t buf[1024]; len = sizeof(buf); if (len > s->max_size) len = s->max_size; if (len == 0) return; size = read(s->fd_in, buf, len); if (size == 0) { /* FD has been closed. Remove it from the active list. */ qemu_set_fd_handler2(s->fd_in, NULL, NULL, NULL, NULL); qemu_chr_event(chr, CHR_EVENT_CLOSED); return; } if (size > 0) { qemu_chr_read(chr, buf, size); } }"} {"target": 0, "idx": 22151, "func": "static void smbios_build_type_1_fields(QemuOpts *opts) { const char *val; val = qemu_opt_get(opts, \"manufacturer\"); if (val) { smbios_add_field(1, offsetof(struct smbios_type_1, manufacturer_str), val, strlen(val) + 1); } val = qemu_opt_get(opts, \"product\"); if (val) { smbios_add_field(1, offsetof(struct smbios_type_1, product_name_str), val, strlen(val) + 1); } val = qemu_opt_get(opts, \"version\"); if (val) { smbios_add_field(1, offsetof(struct smbios_type_1, version_str), val, strlen(val) + 1); } val = qemu_opt_get(opts, \"serial\"); if (val) { smbios_add_field(1, offsetof(struct smbios_type_1, serial_number_str), val, strlen(val) + 1); } val = qemu_opt_get(opts, \"uuid\"); if (val) { if (qemu_uuid_parse(val, qemu_uuid) != 0) { error_report(\"Invalid UUID\"); exit(1); } } val = qemu_opt_get(opts, \"sku\"); if (val) { smbios_add_field(1, offsetof(struct smbios_type_1, sku_number_str), val, strlen(val) + 1); } val = qemu_opt_get(opts, \"family\"); if (val) { smbios_add_field(1, offsetof(struct smbios_type_1, family_str), val, strlen(val) + 1); } }"} {"target": 0, "idx": 22155, "func": "static void vmsvga_fifo_run(struct vmsvga_state_s *s) { uint32_t cmd, colour; int args, len; int x, y, dx, dy, width, height; struct vmsvga_cursor_definition_s cursor; uint32_t cmd_start; len = vmsvga_fifo_length(s); while (len > 0) { /* May need to go back to the start of the command if incomplete */ cmd_start = s->cmd->stop; switch (cmd = vmsvga_fifo_read(s)) { case SVGA_CMD_UPDATE: case SVGA_CMD_UPDATE_VERBOSE: len -= 5; if (len < 0) { goto rewind; } x = vmsvga_fifo_read(s); y = vmsvga_fifo_read(s); width = vmsvga_fifo_read(s); height = vmsvga_fifo_read(s); vmsvga_update_rect_delayed(s, x, y, width, height); break; case SVGA_CMD_RECT_FILL: len -= 6; if (len < 0) { goto rewind; } colour = vmsvga_fifo_read(s); x = vmsvga_fifo_read(s); y = vmsvga_fifo_read(s); width = vmsvga_fifo_read(s); height = vmsvga_fifo_read(s); #ifdef HW_FILL_ACCEL if (vmsvga_fill_rect(s, colour, x, y, width, height) == 0) { break; } #endif args = 0; goto badcmd; case SVGA_CMD_RECT_COPY: len -= 7; if (len < 0) { goto rewind; } x = vmsvga_fifo_read(s); y = vmsvga_fifo_read(s); dx = vmsvga_fifo_read(s); dy = vmsvga_fifo_read(s); width = vmsvga_fifo_read(s); height = vmsvga_fifo_read(s); #ifdef HW_RECT_ACCEL if (vmsvga_copy_rect(s, x, y, dx, dy, width, height) == 0) { break; } #endif args = 0; goto badcmd; case SVGA_CMD_DEFINE_CURSOR: len -= 8; if (len < 0) { goto rewind; } cursor.id = vmsvga_fifo_read(s); cursor.hot_x = vmsvga_fifo_read(s); cursor.hot_y = vmsvga_fifo_read(s); cursor.width = x = vmsvga_fifo_read(s); cursor.height = y = vmsvga_fifo_read(s); vmsvga_fifo_read(s); cursor.bpp = vmsvga_fifo_read(s); args = SVGA_BITMAP_SIZE(x, y) + SVGA_PIXMAP_SIZE(x, y, cursor.bpp); if (SVGA_BITMAP_SIZE(x, y) > sizeof cursor.mask || SVGA_PIXMAP_SIZE(x, y, cursor.bpp) > sizeof cursor.image) { goto badcmd; } len -= args; if (len < 0) { goto rewind; } for (args = 0; args < SVGA_BITMAP_SIZE(x, y); args++) { cursor.mask[args] = vmsvga_fifo_read_raw(s); } for (args = 0; args < SVGA_PIXMAP_SIZE(x, y, cursor.bpp); args++) { cursor.image[args] = vmsvga_fifo_read_raw(s); } #ifdef HW_MOUSE_ACCEL vmsvga_cursor_define(s, &cursor); break; #else args = 0; goto badcmd; #endif /* * Other commands that we at least know the number of arguments * for so we can avoid FIFO desync if driver uses them illegally. */ case SVGA_CMD_DEFINE_ALPHA_CURSOR: len -= 6; if (len < 0) { goto rewind; } vmsvga_fifo_read(s); vmsvga_fifo_read(s); vmsvga_fifo_read(s); x = vmsvga_fifo_read(s); y = vmsvga_fifo_read(s); args = x * y; goto badcmd; case SVGA_CMD_RECT_ROP_FILL: args = 6; goto badcmd; case SVGA_CMD_RECT_ROP_COPY: args = 7; goto badcmd; case SVGA_CMD_DRAW_GLYPH_CLIPPED: len -= 4; if (len < 0) { goto rewind; } vmsvga_fifo_read(s); vmsvga_fifo_read(s); args = 7 + (vmsvga_fifo_read(s) >> 2); goto badcmd; case SVGA_CMD_SURFACE_ALPHA_BLEND: args = 12; goto badcmd; /* * Other commands that are not listed as depending on any * CAPABILITIES bits, but are not described in the README either. */ case SVGA_CMD_SURFACE_FILL: case SVGA_CMD_SURFACE_COPY: case SVGA_CMD_FRONT_ROP_FILL: case SVGA_CMD_FENCE: case SVGA_CMD_INVALID_CMD: break; /* Nop */ default: args = 0; badcmd: len -= args; if (len < 0) { goto rewind; } while (args--) { vmsvga_fifo_read(s); } printf(\"%s: Unknown command 0x%02x in SVGA command FIFO\\n\", __func__, cmd); break; rewind: s->cmd->stop = cmd_start; break; } } s->syncing = 0; }"} {"target": 1, "idx": 22167, "func": "static int hdev_get_max_segments(const struct stat *st) { #ifdef CONFIG_LINUX char buf[32]; const char *end; char *sysfspath; int ret; int fd = -1; long max_segments; sysfspath = g_strdup_printf(\"/sys/dev/block/%u:%u/queue/max_segments\", major(st->st_rdev), minor(st->st_rdev)); fd = open(sysfspath, O_RDONLY); if (fd == -1) { ret = -errno; goto out; } do { ret = read(fd, buf, sizeof(buf)); } while (ret == -1 && errno == EINTR); if (ret < 0) { ret = -errno; goto out; } else if (ret == 0) { ret = -EIO; goto out; } buf[ret] = 0; /* The file is ended with '\\n', pass 'end' to accept that. */ ret = qemu_strtol(buf, &end, 10, &max_segments); if (ret == 0 && end && *end == '\\n') { ret = max_segments; } out: g_free(sysfspath); return ret; #else return -ENOTSUP; #endif }"} {"target": 0, "idx": 22201, "func": "static void apic_update_irq(APICCommonState *s) { if (!(s->spurious_vec & APIC_SV_ENABLE)) { return; } if (apic_irq_pending(s) > 0) { cpu_interrupt(s->cpu_env, CPU_INTERRUPT_HARD); } else if (apic_accept_pic_intr(&s->busdev.qdev) && pic_get_output(isa_pic)) { apic_deliver_pic_intr(&s->busdev.qdev, 1); } }"} {"target": 1, "idx": 22250, "func": "PPC_OP(extsh) { T0 = (int32_t)((int16_t)(Ts0)); RETURN(); }"} {"target": 1, "idx": 22265, "func": "static int nfs_file_open(BlockDriverState *bs, QDict *options, int flags, Error **errp) { NFSClient *client = bs->opaque; int64_t ret; client->aio_context = bdrv_get_aio_context(bs); ret = nfs_client_open(client, options, (flags & BDRV_O_RDWR) ? O_RDWR : O_RDONLY, bs->open_flags, errp); if (ret < 0) { return ret; } qemu_mutex_init(&client->mutex); bs->total_sectors = ret; ret = 0; return ret; }"} {"target": 0, "idx": 22270, "func": "static uint32_t pmac_ide_readb (void *opaque,target_phys_addr_t addr) { uint8_t retval; MACIOIDEState *d = opaque; addr = (addr & 0xFFF) >> 4; switch (addr) { case 1 ... 7: retval = ide_ioport_read(&d->bus, addr); break; case 8: case 22: retval = ide_status_read(&d->bus, 0); break; default: retval = 0xFF; break; } return retval; }"} {"target": 0, "idx": 22276, "func": "target_phys_addr_t booke206_tlb_to_page_size(CPUState *env, ppcmas_tlb_t *tlb) { uint32_t tlbncfg; int tlbn = booke206_tlbm_to_tlbn(env, tlb); int tlbm_size; tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlbn]; if (tlbncfg & TLBnCFG_AVAIL) { tlbm_size = (tlb->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; } else { tlbm_size = (tlbncfg & TLBnCFG_MINSIZE) >> TLBnCFG_MINSIZE_SHIFT; tlbm_size <<= 1; } return 1024ULL << tlbm_size; }"} {"target": 0, "idx": 22279, "func": "static int xhci_setup_packet(XHCITransfer *xfer, XHCIPort *port, int ep) { usb_packet_setup(&xfer->packet, xfer->in_xfer ? USB_TOKEN_IN : USB_TOKEN_OUT, xfer->xhci->slots[xfer->slotid-1].devaddr, ep & 0x7f); usb_packet_addbuf(&xfer->packet, xfer->data, xfer->data_length); DPRINTF(\"xhci: setup packet pid 0x%x addr %d ep %d\\n\", xfer->packet.pid, xfer->packet.devaddr, xfer->packet.devep); return 0; }"} {"target": 0, "idx": 22280, "func": "static void thread_pool_cancel(BlockAIOCB *acb) { ThreadPoolElement *elem = (ThreadPoolElement *)acb; ThreadPool *pool = elem->pool; trace_thread_pool_cancel(elem, elem->common.opaque); qemu_mutex_lock(&pool->lock); if (elem->state == THREAD_QUEUED && /* No thread has yet started working on elem. we can try to \"steal\" * the item from the worker if we can get a signal from the * semaphore. Because this is non-blocking, we can do it with * the lock taken and ensure that elem will remain THREAD_QUEUED. */ qemu_sem_timedwait(&pool->sem, 0) == 0) { QTAILQ_REMOVE(&pool->request_list, elem, reqs); qemu_bh_schedule(pool->completion_bh); elem->state = THREAD_DONE; elem->ret = -ECANCELED; } qemu_mutex_unlock(&pool->lock); }"} {"target": 1, "idx": 22300, "func": "static void monitor_protocol_event_init(void) { qemu_mutex_init(&monitor_event_state_lock); /* Limit RTC & BALLOON events to 1 per second */ monitor_protocol_event_throttle(QEVENT_RTC_CHANGE, 1000); monitor_protocol_event_throttle(QEVENT_BALLOON_CHANGE, 1000); monitor_protocol_event_throttle(QEVENT_WATCHDOG, 1000); }"} {"target": 1, "idx": 22316, "func": "static int seek_test(const char *input_filename, const char *start, const char *end) { AVCodec *codec = NULL; AVCodecContext *ctx= NULL; AVCodecParameters *origin_par = NULL; AVFrame *fr = NULL; AVFormatContext *fmt_ctx = NULL; int video_stream; int result; int i, j; long int start_ts, end_ts; size_of_array = 0; number_of_elements = 0; crc_array = pts_array = NULL; result = avformat_open_input(&fmt_ctx, input_filename, NULL, NULL); if (result < 0) { av_log(NULL, AV_LOG_ERROR, \"Can't open file\\n\"); return result; } result = avformat_find_stream_info(fmt_ctx, NULL); if (result < 0) { av_log(NULL, AV_LOG_ERROR, \"Can't get stream info\\n\"); return result; } start_ts = read_seek_range(start); end_ts = read_seek_range(end); if ((start_ts < 0) || (end_ts < 0)) return -1; //TODO: add ability to work with audio format video_stream = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0); if (video_stream < 0) { av_log(NULL, AV_LOG_ERROR, \"Can't find video stream in input file\\n\"); return -1; } origin_par = fmt_ctx->streams[video_stream]->codecpar; codec = avcodec_find_decoder(origin_par->codec_id); if (!codec) { av_log(NULL, AV_LOG_ERROR, \"Can't find decoder\\n\"); return -1; } ctx = avcodec_alloc_context3(codec); if (!ctx) { av_log(NULL, AV_LOG_ERROR, \"Can't allocate decoder context\\n\"); return AVERROR(ENOMEM); } result = avcodec_parameters_to_context(ctx, origin_par); if (result) { av_log(NULL, AV_LOG_ERROR, \"Can't copy decoder context\\n\"); return result; } result = avcodec_open2(ctx, codec, NULL); if (result < 0) { av_log(ctx, AV_LOG_ERROR, \"Can't open decoder\\n\"); return result; } fr = av_frame_alloc(); if (!fr) { av_log(NULL, AV_LOG_ERROR, \"Can't allocate frame\\n\"); return AVERROR(ENOMEM); } result = compute_crc_of_packets(fmt_ctx, video_stream, ctx, fr, i, j, 1); if (result != 0) return -1; for (i = start_ts; i < end_ts; i += 100) { for (j = i + 100; j < end_ts; j += 100) result = compute_crc_of_packets(fmt_ctx, video_stream, ctx, fr, i, j, 0); if (result != 0) return -1; } av_freep(&crc_array); av_freep(&pts_array); av_frame_free(&fr); avcodec_close(ctx); avformat_close_input(&fmt_ctx); avcodec_free_context(&ctx); return 0; }"} {"target": 0, "idx": 22330, "func": "AUXReply aux_request(AUXBus *bus, AUXCommand cmd, uint32_t address, uint8_t len, uint8_t *data) { AUXReply ret = AUX_NACK; I2CBus *i2c_bus = aux_get_i2c_bus(bus); size_t i; bool is_write = false; DPRINTF(\"request at address 0x%\" PRIX32 \", command %u, len %u\\n\", address, cmd, len); switch (cmd) { /* * Forward the request on the AUX bus.. */ case WRITE_AUX: case READ_AUX: is_write = cmd == READ_AUX ? false : true; for (i = 0; i < len; i++) { if (!address_space_rw(&bus->aux_addr_space, address++, MEMTXATTRS_UNSPECIFIED, data++, 1, is_write)) { ret = AUX_I2C_ACK; } else { ret = AUX_NACK; break; } } break; /* * Classic I2C transactions.. */ case READ_I2C: case WRITE_I2C: is_write = cmd == READ_I2C ? false : true; if (i2c_bus_busy(i2c_bus)) { i2c_end_transfer(i2c_bus); } if (i2c_start_transfer(i2c_bus, address, is_write)) { ret = AUX_I2C_NACK; break; } ret = AUX_I2C_ACK; while (len > 0) { if (i2c_send_recv(i2c_bus, data++, is_write) < 0) { ret = AUX_I2C_NACK; break; } len--; } i2c_end_transfer(i2c_bus); break; /* * I2C MOT transactions. * * Here we send a start when: * - We didn't start transaction yet. * - We had a READ and we do a WRITE. * - We changed the address. */ case WRITE_I2C_MOT: case READ_I2C_MOT: is_write = cmd == READ_I2C_MOT ? false : true; if (!i2c_bus_busy(i2c_bus)) { /* * No transactions started.. */ if (i2c_start_transfer(i2c_bus, address, is_write)) { ret = AUX_I2C_NACK; break; } } else if ((address != bus->last_i2c_address) || (bus->last_transaction != cmd)) { /* * Transaction started but we need to restart.. */ i2c_end_transfer(i2c_bus); if (i2c_start_transfer(i2c_bus, address, is_write)) { ret = AUX_I2C_NACK; break; } } while (len > 0) { if (i2c_send_recv(i2c_bus, data++, is_write) < 0) { ret = AUX_I2C_NACK; i2c_end_transfer(i2c_bus); break; } len--; } bus->last_transaction = cmd; bus->last_i2c_address = address; ret = AUX_I2C_ACK; break; default: DPRINTF(\"Not implemented!\\n\"); return AUX_NACK; } DPRINTF(\"reply: %u\\n\", ret); return ret; }"} {"target": 1, "idx": 22361, "func": "static inline void RENAME(yuv2yuvX_ar)(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, uint8_t *uDest, uint8_t *vDest, uint8_t *aDest, long dstW, long chrDstW) { if (uDest) { YSCALEYUV2YV12X_ACCURATE(CHR_MMX_FILTER_OFFSET, uDest, chrDstW, 0) YSCALEYUV2YV12X_ACCURATE(CHR_MMX_FILTER_OFFSET, vDest, chrDstW + c->uv_off, c->uv_off) } if (CONFIG_SWSCALE_ALPHA && aDest) { YSCALEYUV2YV12X_ACCURATE(ALP_MMX_FILTER_OFFSET, aDest, dstW, 0) } YSCALEYUV2YV12X_ACCURATE(LUM_MMX_FILTER_OFFSET, dest, dstW, 0) }"} {"target": 1, "idx": 22364, "func": "MigrationInfo *qmp_query_migrate(Error **errp) { MigrationInfo *info = g_malloc0(sizeof(*info)); MigrationState *s = migrate_get_current(); switch (s->state) { case MIG_STATE_NONE: /* no migration has happened ever */ break; case MIG_STATE_SETUP: info->has_status = true; info->status = g_strdup(\"setup\"); info->has_total_time = false; break; case MIG_STATE_ACTIVE: case MIG_STATE_CANCELLING: info->has_status = true; info->status = g_strdup(\"active\"); info->has_total_time = true; info->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - s->total_time; info->has_expected_downtime = true; info->expected_downtime = s->expected_downtime; info->has_setup_time = true; info->setup_time = s->setup_time; info->has_ram = true; info->ram = g_malloc0(sizeof(*info->ram)); info->ram->transferred = ram_bytes_transferred(); info->ram->remaining = ram_bytes_remaining(); info->ram->total = ram_bytes_total(); info->ram->duplicate = dup_mig_pages_transferred(); info->ram->skipped = skipped_mig_pages_transferred(); info->ram->normal = norm_mig_pages_transferred(); info->ram->normal_bytes = norm_mig_bytes_transferred(); info->ram->dirty_pages_rate = s->dirty_pages_rate; info->ram->mbps = s->mbps; info->ram->dirty_sync_count = s->dirty_sync_count; if (blk_mig_active()) { info->has_disk = true; info->disk = g_malloc0(sizeof(*info->disk)); info->disk->transferred = blk_mig_bytes_transferred(); info->disk->remaining = blk_mig_bytes_remaining(); info->disk->total = blk_mig_bytes_total(); } get_xbzrle_cache_stats(info); break; case MIG_STATE_COMPLETED: get_xbzrle_cache_stats(info); info->has_status = true; info->status = g_strdup(\"completed\"); info->has_total_time = true; info->total_time = s->total_time; info->has_downtime = true; info->downtime = s->downtime; info->has_setup_time = true; info->setup_time = s->setup_time; info->has_ram = true; info->ram = g_malloc0(sizeof(*info->ram)); info->ram->transferred = ram_bytes_transferred(); info->ram->remaining = 0; info->ram->total = ram_bytes_total(); info->ram->duplicate = dup_mig_pages_transferred(); info->ram->skipped = skipped_mig_pages_transferred(); info->ram->normal = norm_mig_pages_transferred(); info->ram->normal_bytes = norm_mig_bytes_transferred(); info->ram->mbps = s->mbps; info->ram->dirty_sync_count = s->dirty_sync_count; break; case MIG_STATE_ERROR: info->has_status = true; info->status = g_strdup(\"failed\"); break; case MIG_STATE_CANCELLED: info->has_status = true; info->status = g_strdup(\"cancelled\"); break; } return info; }"} {"target": 1, "idx": 22365, "func": "static inline int cow_set_bit(BlockDriverState *bs, int64_t bitnum) { uint64_t offset = sizeof(struct cow_header_v2) + bitnum / 8; uint8_t bitmap; if (bdrv_pread(bs->file, offset, &bitmap, sizeof(bitmap)) != sizeof(bitmap)) { return -errno; } bitmap |= (1 << (bitnum % 8)); if (bdrv_pwrite(bs->file, offset, &bitmap, sizeof(bitmap)) != sizeof(bitmap)) { return -errno; } return 0; }"} {"target": 1, "idx": 22369, "func": "PPC_OP(cmpl) { if (T0 < T1) { T0 = 0x08; } else if (T0 > T1) { T0 = 0x04; } else { T0 = 0x02; } RETURN(); }"} {"target": 0, "idx": 22379, "func": "static int voc_probe(AVProbeData *p) { int version, check; if (p->buf_size < 26) return 0; if (memcmp(p->buf, voc_magic, sizeof(voc_magic) - 1)) return 0; version = p->buf[22] | (p->buf[23] << 8); check = p->buf[24] | (p->buf[25] << 8); if (~version + 0x1234 != check) return 10; return AVPROBE_SCORE_MAX; }"} {"target": 1, "idx": 22381, "func": "int ram_load(QEMUFile *f, void *opaque, int version_id) { ram_addr_t addr; int flags; if (version_id < 3 || version_id > 4) { return -EINVAL; } do { addr = qemu_get_be64(f); flags = addr & ~TARGET_PAGE_MASK; addr &= TARGET_PAGE_MASK; if (flags & RAM_SAVE_FLAG_MEM_SIZE) { if (version_id == 3) { if (addr != ram_bytes_total()) { return -EINVAL; } } else { /* Synchronize RAM block list */ char id[256]; ram_addr_t length; ram_addr_t total_ram_bytes = addr; while (total_ram_bytes) { RAMBlock *block; uint8_t len; len = qemu_get_byte(f); qemu_get_buffer(f, (uint8_t *)id, len); id[len] = 0; length = qemu_get_be64(f); QLIST_FOREACH(block, &ram_list.blocks, next) { if (!strncmp(id, block->idstr, sizeof(id))) { if (block->length != length) return -EINVAL; break; } } if (!block) { fprintf(stderr, \"Unknown ramblock \\\"%s\\\", cannot \" \"accept migration\\n\", id); return -EINVAL; } total_ram_bytes -= length; } } } if (flags & RAM_SAVE_FLAG_COMPRESS) { void *host; uint8_t ch; if (version_id == 3) host = qemu_get_ram_ptr(addr); else host = host_from_stream_offset(f, addr, flags); ch = qemu_get_byte(f); memset(host, ch, TARGET_PAGE_SIZE); #ifndef _WIN32 if (ch == 0 && (!kvm_enabled() || kvm_has_sync_mmu())) { madvise(host, TARGET_PAGE_SIZE, MADV_DONTNEED); } #endif } else if (flags & RAM_SAVE_FLAG_PAGE) { void *host; if (version_id == 3) host = qemu_get_ram_ptr(addr); else host = host_from_stream_offset(f, addr, flags); qemu_get_buffer(f, host, TARGET_PAGE_SIZE); } if (qemu_file_has_error(f)) { return -EIO; } } while (!(flags & RAM_SAVE_FLAG_EOS)); return 0; }"} {"target": 1, "idx": 22387, "func": "static void drive_backup_prepare(BlkActionState *common, Error **errp) { DriveBackupState *state = DO_UPCAST(DriveBackupState, common, common); BlockDriverState *bs; DriveBackup *backup; Error *local_err = NULL; assert(common->action->type == TRANSACTION_ACTION_KIND_DRIVE_BACKUP); backup = common->action->u.drive_backup.data; bs = qmp_get_root_bs(backup->device, errp); if (!bs) { return; } /* AioContext is released in .clean() */ state->aio_context = bdrv_get_aio_context(bs); aio_context_acquire(state->aio_context); bdrv_drained_begin(bs); state->bs = bs; do_drive_backup(backup, common->block_job_txn, &local_err); if (local_err) { error_propagate(errp, local_err); return; } state->job = state->bs->job; }"} {"target": 1, "idx": 22389, "func": "static inline void RET_STOP (DisasContext *ctx) { gen_op_update_nip((ctx)->nip); ctx->exception = EXCP_MTMSR; }"} {"target": 0, "idx": 22392, "func": "static void term_exit(void) { #ifndef __MINGW32__ tcsetattr (0, TCSANOW, &oldtty); #endif }"} {"target": 0, "idx": 22399, "func": "static int get_segment64(CPUPPCState *env, struct mmu_ctx_hash64 *ctx, target_ulong eaddr, int rw, int type) { hwaddr hash; target_ulong vsid; int pr, target_page_bits; int ret, ret2; pr = msr_pr; ctx->eaddr = eaddr; ppc_slb_t *slb; target_ulong pageaddr; int segment_bits; LOG_MMU(\"Check SLBs\\n\"); slb = slb_lookup(env, eaddr); if (!slb) { return -5; } if (slb->vsid & SLB_VSID_B) { vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT_1T; segment_bits = 40; } else { vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT; segment_bits = 28; } target_page_bits = (slb->vsid & SLB_VSID_L) ? TARGET_PAGE_BITS_16M : TARGET_PAGE_BITS; ctx->key = !!(pr ? (slb->vsid & SLB_VSID_KP) : (slb->vsid & SLB_VSID_KS)); ctx->nx = !!(slb->vsid & SLB_VSID_N); pageaddr = eaddr & ((1ULL << segment_bits) - (1ULL << target_page_bits)); if (slb->vsid & SLB_VSID_B) { hash = vsid ^ (vsid << 25) ^ (pageaddr >> target_page_bits); } else { hash = vsid ^ (pageaddr >> target_page_bits); } /* Only 5 bits of the page index are used in the AVPN */ ctx->ptem = (slb->vsid & SLB_VSID_PTEM) | ((pageaddr >> 16) & ((1ULL << segment_bits) - 0x80)); LOG_MMU(\"pte segment: key=%d nx %d vsid \" TARGET_FMT_lx \"\\n\", ctx->key, ctx->nx, vsid); ret = -1; /* Check if instruction fetch is allowed, if needed */ if (type != ACCESS_CODE || ctx->nx == 0) { /* Page address translation */ LOG_MMU(\"htab_base \" TARGET_FMT_plx \" htab_mask \" TARGET_FMT_plx \" hash \" TARGET_FMT_plx \"\\n\", env->htab_base, env->htab_mask, hash); ctx->hash[0] = hash; ctx->hash[1] = ~hash; /* Initialize real address with an invalid value */ ctx->raddr = (hwaddr)-1ULL; LOG_MMU(\"0 htab=\" TARGET_FMT_plx \"/\" TARGET_FMT_plx \" vsid=\" TARGET_FMT_lx \" ptem=\" TARGET_FMT_lx \" hash=\" TARGET_FMT_plx \"\\n\", env->htab_base, env->htab_mask, vsid, ctx->ptem, ctx->hash[0]); /* Primary table lookup */ ret = find_pte64(env, ctx, 0, rw, type, target_page_bits); if (ret < 0) { /* Secondary table lookup */ LOG_MMU(\"1 htab=\" TARGET_FMT_plx \"/\" TARGET_FMT_plx \" vsid=\" TARGET_FMT_lx \" api=\" TARGET_FMT_lx \" hash=\" TARGET_FMT_plx \"\\n\", env->htab_base, env->htab_mask, vsid, ctx->ptem, ctx->hash[1]); ret2 = find_pte64(env, ctx, 1, rw, type, target_page_bits); if (ret2 != -1) { ret = ret2; } } } else { LOG_MMU(\"No access allowed\\n\"); ret = -3; } return ret; }"} {"target": 0, "idx": 22413, "func": "static int v9fs_receivefd(int sockfd, int *status) { struct iovec iov; struct msghdr msg; struct cmsghdr *cmsg; int retval, data, fd; union MsgControl msg_control; iov.iov_base = &data; iov.iov_len = sizeof(data); memset(&msg, 0, sizeof(msg)); msg.msg_iov = &iov; msg.msg_iovlen = 1; msg.msg_control = &msg_control; msg.msg_controllen = sizeof(msg_control); do { retval = recvmsg(sockfd, &msg, 0); } while (retval < 0 && errno == EINTR); if (retval <= 0) { return retval; } /* * data is set to V9FS_FD_VALID, if ancillary data is sent. If this * request doesn't need ancillary data (fd) or an error occurred, * data is set to negative errno value. */ if (data != V9FS_FD_VALID) { *status = data; return 0; } /* * File descriptor (fd) is sent in the ancillary data. Check if we * indeed received it. One of the reasons to fail to receive it is if * we exceeded the maximum number of file descriptors! */ for (cmsg = CMSG_FIRSTHDR(&msg); cmsg; cmsg = CMSG_NXTHDR(&msg, cmsg)) { if (cmsg->cmsg_len != CMSG_LEN(sizeof(int)) || cmsg->cmsg_level != SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) { continue; } fd = *((int *)CMSG_DATA(cmsg)); *status = fd; return 0; } *status = -ENFILE; /* Ancillary data sent but not received */ return 0; }"} {"target": 0, "idx": 22417, "func": "static void kvm_inject_x86_mce_on(CPUState *env, struct kvm_x86_mce *mce, int flag) { struct kvm_x86_mce_data data = { .env = env, .mce = mce, .abort_on_error = (flag & ABORT_ON_ERROR), }; if (!env->mcg_cap) { fprintf(stderr, \"MCE support is not enabled!\\n\"); return; } run_on_cpu(env, kvm_do_inject_x86_mce, &data); }"} {"target": 0, "idx": 22418, "func": "static AioHandler *find_aio_handler(AioContext *ctx, int fd) { AioHandler *node; QLIST_FOREACH(node, &ctx->aio_handlers, node) { if (node->pfd.fd == fd) if (!node->deleted) return node; } return NULL; }"} {"target": 0, "idx": 22421, "func": "static void disas_xtensa_insn(CPUXtensaState *env, DisasContext *dc) { #define HAS_OPTION_BITS(opt) do { \\ if (!option_bits_enabled(dc, opt)) { \\ qemu_log(\"Option is not enabled %s:%d\\n\", \\ __FILE__, __LINE__); \\ goto invalid_opcode; \\ } \\ } while (0) #define HAS_OPTION(opt) HAS_OPTION_BITS(XTENSA_OPTION_BIT(opt)) #define TBD() qemu_log(\"TBD(pc = %08x): %s:%d\\n\", dc->pc, __FILE__, __LINE__) #define RESERVED() do { \\ qemu_log(\"RESERVED(pc = %08x, %02x%02x%02x): %s:%d\\n\", \\ dc->pc, b0, b1, b2, __FILE__, __LINE__); \\ goto invalid_opcode; \\ } while (0) #ifdef TARGET_WORDS_BIGENDIAN #define OP0 (((b0) & 0xf0) >> 4) #define OP1 (((b2) & 0xf0) >> 4) #define OP2 ((b2) & 0xf) #define RRR_R ((b1) & 0xf) #define RRR_S (((b1) & 0xf0) >> 4) #define RRR_T ((b0) & 0xf) #else #define OP0 (((b0) & 0xf)) #define OP1 (((b2) & 0xf)) #define OP2 (((b2) & 0xf0) >> 4) #define RRR_R (((b1) & 0xf0) >> 4) #define RRR_S (((b1) & 0xf)) #define RRR_T (((b0) & 0xf0) >> 4) #endif #define RRR_X ((RRR_R & 0x4) >> 2) #define RRR_Y ((RRR_T & 0x4) >> 2) #define RRR_W (RRR_R & 0x3) #define RRRN_R RRR_R #define RRRN_S RRR_S #define RRRN_T RRR_T #define RRI4_R RRR_R #define RRI4_S RRR_S #define RRI4_T RRR_T #ifdef TARGET_WORDS_BIGENDIAN #define RRI4_IMM4 ((b2) & 0xf) #else #define RRI4_IMM4 (((b2) & 0xf0) >> 4) #endif #define RRI8_R RRR_R #define RRI8_S RRR_S #define RRI8_T RRR_T #define RRI8_IMM8 (b2) #define RRI8_IMM8_SE ((((b2) & 0x80) ? 0xffffff00 : 0) | RRI8_IMM8) #ifdef TARGET_WORDS_BIGENDIAN #define RI16_IMM16 (((b1) << 8) | (b2)) #else #define RI16_IMM16 (((b2) << 8) | (b1)) #endif #ifdef TARGET_WORDS_BIGENDIAN #define CALL_N (((b0) & 0xc) >> 2) #define CALL_OFFSET ((((b0) & 0x3) << 16) | ((b1) << 8) | (b2)) #else #define CALL_N (((b0) & 0x30) >> 4) #define CALL_OFFSET ((((b0) & 0xc0) >> 6) | ((b1) << 2) | ((b2) << 10)) #endif #define CALL_OFFSET_SE \\ (((CALL_OFFSET & 0x20000) ? 0xfffc0000 : 0) | CALL_OFFSET) #define CALLX_N CALL_N #ifdef TARGET_WORDS_BIGENDIAN #define CALLX_M ((b0) & 0x3) #else #define CALLX_M (((b0) & 0xc0) >> 6) #endif #define CALLX_S RRR_S #define BRI12_M CALLX_M #define BRI12_S RRR_S #ifdef TARGET_WORDS_BIGENDIAN #define BRI12_IMM12 ((((b1) & 0xf) << 8) | (b2)) #else #define BRI12_IMM12 ((((b1) & 0xf0) >> 4) | ((b2) << 4)) #endif #define BRI12_IMM12_SE (((BRI12_IMM12 & 0x800) ? 0xfffff000 : 0) | BRI12_IMM12) #define BRI8_M BRI12_M #define BRI8_R RRI8_R #define BRI8_S RRI8_S #define BRI8_IMM8 RRI8_IMM8 #define BRI8_IMM8_SE RRI8_IMM8_SE #define RSR_SR (b1) uint8_t b0 = cpu_ldub_code(env, dc->pc); uint8_t b1 = cpu_ldub_code(env, dc->pc + 1); uint8_t b2 = 0; unsigned len = xtensa_op0_insn_len(OP0); static const uint32_t B4CONST[] = { 0xffffffff, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 16, 32, 64, 128, 256 }; static const uint32_t B4CONSTU[] = { 32768, 65536, 2, 3, 4, 5, 6, 7, 8, 10, 12, 16, 32, 64, 128, 256 }; switch (len) { case 2: HAS_OPTION(XTENSA_OPTION_CODE_DENSITY); break; case 3: b2 = cpu_ldub_code(env, dc->pc + 2); break; default: RESERVED(); } dc->next_pc = dc->pc + len; switch (OP0) { case 0: /*QRST*/ switch (OP1) { case 0: /*RST0*/ switch (OP2) { case 0: /*ST0*/ if ((RRR_R & 0xc) == 0x8) { HAS_OPTION(XTENSA_OPTION_BOOLEAN); } switch (RRR_R) { case 0: /*SNM0*/ switch (CALLX_M) { case 0: /*ILL*/ gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE); break; case 1: /*reserved*/ RESERVED(); break; case 2: /*JR*/ switch (CALLX_N) { case 0: /*RET*/ case 2: /*JX*/ if (gen_window_check1(dc, CALLX_S)) { gen_jump(dc, cpu_R[CALLX_S]); } break; case 1: /*RETWw*/ HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER); { TCGv_i32 tmp = tcg_const_i32(dc->pc); gen_advance_ccount(dc); gen_helper_retw(tmp, cpu_env, tmp); gen_jump(dc, tmp); tcg_temp_free(tmp); } break; case 3: /*reserved*/ RESERVED(); break; } break; case 3: /*CALLX*/ if (!gen_window_check2(dc, CALLX_S, CALLX_N << 2)) { break; } switch (CALLX_N) { case 0: /*CALLX0*/ { TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_mov_i32(tmp, cpu_R[CALLX_S]); tcg_gen_movi_i32(cpu_R[0], dc->next_pc); gen_jump(dc, tmp); tcg_temp_free(tmp); } break; case 1: /*CALLX4w*/ case 2: /*CALLX8w*/ case 3: /*CALLX12w*/ HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER); { TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_mov_i32(tmp, cpu_R[CALLX_S]); gen_callw(dc, CALLX_N, tmp); tcg_temp_free(tmp); } break; } break; } break; case 1: /*MOVSPw*/ HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER); if (gen_window_check2(dc, RRR_T, RRR_S)) { TCGv_i32 pc = tcg_const_i32(dc->pc); gen_advance_ccount(dc); gen_helper_movsp(cpu_env, pc); tcg_gen_mov_i32(cpu_R[RRR_T], cpu_R[RRR_S]); tcg_temp_free(pc); } break; case 2: /*SYNC*/ switch (RRR_T) { case 0: /*ISYNC*/ break; case 1: /*RSYNC*/ break; case 2: /*ESYNC*/ break; case 3: /*DSYNC*/ break; case 8: /*EXCW*/ HAS_OPTION(XTENSA_OPTION_EXCEPTION); break; case 12: /*MEMW*/ break; case 13: /*EXTW*/ break; case 15: /*NOP*/ break; default: /*reserved*/ RESERVED(); break; } break; case 3: /*RFEIx*/ switch (RRR_T) { case 0: /*RFETx*/ HAS_OPTION(XTENSA_OPTION_EXCEPTION); switch (RRR_S) { case 0: /*RFEx*/ if (gen_check_privilege(dc)) { tcg_gen_andi_i32(cpu_SR[PS], cpu_SR[PS], ~PS_EXCM); gen_helper_check_interrupts(cpu_env); gen_jump(dc, cpu_SR[EPC1]); } break; case 1: /*RFUEx*/ RESERVED(); break; case 2: /*RFDEx*/ if (gen_check_privilege(dc)) { gen_jump(dc, cpu_SR[ dc->config->ndepc ? DEPC : EPC1]); } break; case 4: /*RFWOw*/ case 5: /*RFWUw*/ HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER); if (gen_check_privilege(dc)) { TCGv_i32 tmp = tcg_const_i32(1); tcg_gen_andi_i32( cpu_SR[PS], cpu_SR[PS], ~PS_EXCM); tcg_gen_shl_i32(tmp, tmp, cpu_SR[WINDOW_BASE]); if (RRR_S == 4) { tcg_gen_andc_i32(cpu_SR[WINDOW_START], cpu_SR[WINDOW_START], tmp); } else { tcg_gen_or_i32(cpu_SR[WINDOW_START], cpu_SR[WINDOW_START], tmp); } gen_helper_restore_owb(cpu_env); gen_helper_check_interrupts(cpu_env); gen_jump(dc, cpu_SR[EPC1]); tcg_temp_free(tmp); } break; default: /*reserved*/ RESERVED(); break; } break; case 1: /*RFIx*/ HAS_OPTION(XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT); if (RRR_S >= 2 && RRR_S <= dc->config->nlevel) { if (gen_check_privilege(dc)) { tcg_gen_mov_i32(cpu_SR[PS], cpu_SR[EPS2 + RRR_S - 2]); gen_helper_check_interrupts(cpu_env); gen_jump(dc, cpu_SR[EPC1 + RRR_S - 1]); } } else { qemu_log(\"RFI %d is illegal\\n\", RRR_S); gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE); } break; case 2: /*RFME*/ TBD(); break; default: /*reserved*/ RESERVED(); break; } break; case 4: /*BREAKx*/ HAS_OPTION(XTENSA_OPTION_DEBUG); if (dc->debug) { gen_debug_exception(dc, DEBUGCAUSE_BI); } break; case 5: /*SYSCALLx*/ HAS_OPTION(XTENSA_OPTION_EXCEPTION); switch (RRR_S) { case 0: /*SYSCALLx*/ gen_exception_cause(dc, SYSCALL_CAUSE); break; case 1: /*SIMCALL*/ if (semihosting_enabled) { if (gen_check_privilege(dc)) { gen_helper_simcall(cpu_env); } } else { qemu_log(\"SIMCALL but semihosting is disabled\\n\"); gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE); } break; default: RESERVED(); break; } break; case 6: /*RSILx*/ HAS_OPTION(XTENSA_OPTION_INTERRUPT); if (gen_check_privilege(dc) && gen_window_check1(dc, RRR_T)) { tcg_gen_mov_i32(cpu_R[RRR_T], cpu_SR[PS]); tcg_gen_andi_i32(cpu_SR[PS], cpu_SR[PS], ~PS_INTLEVEL); tcg_gen_ori_i32(cpu_SR[PS], cpu_SR[PS], RRR_S); gen_helper_check_interrupts(cpu_env); gen_jumpi_check_loop_end(dc, 0); } break; case 7: /*WAITIx*/ HAS_OPTION(XTENSA_OPTION_INTERRUPT); if (gen_check_privilege(dc)) { gen_waiti(dc, RRR_S); } break; case 8: /*ANY4p*/ case 9: /*ALL4p*/ case 10: /*ANY8p*/ case 11: /*ALL8p*/ HAS_OPTION(XTENSA_OPTION_BOOLEAN); { const unsigned shift = (RRR_R & 2) ? 8 : 4; TCGv_i32 mask = tcg_const_i32( ((1 << shift) - 1) << RRR_S); TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_and_i32(tmp, cpu_SR[BR], mask); if (RRR_R & 1) { /*ALL*/ tcg_gen_addi_i32(tmp, tmp, 1 << RRR_S); } else { /*ANY*/ tcg_gen_add_i32(tmp, tmp, mask); } tcg_gen_shri_i32(tmp, tmp, RRR_S + shift); tcg_gen_deposit_i32(cpu_SR[BR], cpu_SR[BR], tmp, RRR_T, 1); tcg_temp_free(mask); tcg_temp_free(tmp); } break; default: /*reserved*/ RESERVED(); break; } break; case 1: /*AND*/ if (gen_window_check3(dc, RRR_R, RRR_S, RRR_T)) { tcg_gen_and_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]); } break; case 2: /*OR*/ if (gen_window_check3(dc, RRR_R, RRR_S, RRR_T)) { tcg_gen_or_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]); } break; case 3: /*XOR*/ if (gen_window_check3(dc, RRR_R, RRR_S, RRR_T)) { tcg_gen_xor_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]); } break; case 4: /*ST1*/ switch (RRR_R) { case 0: /*SSR*/ if (gen_window_check1(dc, RRR_S)) { gen_right_shift_sar(dc, cpu_R[RRR_S]); } break; case 1: /*SSL*/ if (gen_window_check1(dc, RRR_S)) { gen_left_shift_sar(dc, cpu_R[RRR_S]); } break; case 2: /*SSA8L*/ if (gen_window_check1(dc, RRR_S)) { TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_shli_i32(tmp, cpu_R[RRR_S], 3); gen_right_shift_sar(dc, tmp); tcg_temp_free(tmp); } break; case 3: /*SSA8B*/ if (gen_window_check1(dc, RRR_S)) { TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_shli_i32(tmp, cpu_R[RRR_S], 3); gen_left_shift_sar(dc, tmp); tcg_temp_free(tmp); } break; case 4: /*SSAI*/ { TCGv_i32 tmp = tcg_const_i32( RRR_S | ((RRR_T & 1) << 4)); gen_right_shift_sar(dc, tmp); tcg_temp_free(tmp); } break; case 6: /*RER*/ TBD(); break; case 7: /*WER*/ TBD(); break; case 8: /*ROTWw*/ HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER); if (gen_check_privilege(dc)) { TCGv_i32 tmp = tcg_const_i32( RRR_T | ((RRR_T & 8) ? 0xfffffff0 : 0)); gen_helper_rotw(cpu_env, tmp); tcg_temp_free(tmp); /* This can change tb->flags, so exit tb */ gen_jumpi_check_loop_end(dc, -1); } break; case 14: /*NSAu*/ HAS_OPTION(XTENSA_OPTION_MISC_OP_NSA); if (gen_window_check2(dc, RRR_S, RRR_T)) { gen_helper_nsa(cpu_R[RRR_T], cpu_R[RRR_S]); } break; case 15: /*NSAUu*/ HAS_OPTION(XTENSA_OPTION_MISC_OP_NSA); if (gen_window_check2(dc, RRR_S, RRR_T)) { gen_helper_nsau(cpu_R[RRR_T], cpu_R[RRR_S]); } break; default: /*reserved*/ RESERVED(); break; } break; case 5: /*TLB*/ HAS_OPTION_BITS( XTENSA_OPTION_BIT(XTENSA_OPTION_MMU) | XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION) | XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION)); if (gen_check_privilege(dc) && gen_window_check2(dc, RRR_S, RRR_T)) { TCGv_i32 dtlb = tcg_const_i32((RRR_R & 8) != 0); switch (RRR_R & 7) { case 3: /*RITLB0*/ /*RDTLB0*/ gen_helper_rtlb0(cpu_R[RRR_T], cpu_env, cpu_R[RRR_S], dtlb); break; case 4: /*IITLB*/ /*IDTLB*/ gen_helper_itlb(cpu_env, cpu_R[RRR_S], dtlb); /* This could change memory mapping, so exit tb */ gen_jumpi_check_loop_end(dc, -1); break; case 5: /*PITLB*/ /*PDTLB*/ tcg_gen_movi_i32(cpu_pc, dc->pc); gen_helper_ptlb(cpu_R[RRR_T], cpu_env, cpu_R[RRR_S], dtlb); break; case 6: /*WITLB*/ /*WDTLB*/ gen_helper_wtlb( cpu_env, cpu_R[RRR_T], cpu_R[RRR_S], dtlb); /* This could change memory mapping, so exit tb */ gen_jumpi_check_loop_end(dc, -1); break; case 7: /*RITLB1*/ /*RDTLB1*/ gen_helper_rtlb1(cpu_R[RRR_T], cpu_env, cpu_R[RRR_S], dtlb); break; default: tcg_temp_free(dtlb); RESERVED(); break; } tcg_temp_free(dtlb); } break; case 6: /*RT0*/ if (!gen_window_check2(dc, RRR_R, RRR_T)) { break; } switch (RRR_S) { case 0: /*NEG*/ tcg_gen_neg_i32(cpu_R[RRR_R], cpu_R[RRR_T]); break; case 1: /*ABS*/ { TCGv_i32 zero = tcg_const_i32(0); TCGv_i32 neg = tcg_temp_new_i32(); tcg_gen_neg_i32(neg, cpu_R[RRR_T]); tcg_gen_movcond_i32(TCG_COND_GE, cpu_R[RRR_R], cpu_R[RRR_T], zero, cpu_R[RRR_T], neg); tcg_temp_free(neg); tcg_temp_free(zero); } break; default: /*reserved*/ RESERVED(); break; } break; case 7: /*reserved*/ RESERVED(); break; case 8: /*ADD*/ if (gen_window_check3(dc, RRR_R, RRR_S, RRR_T)) { tcg_gen_add_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]); } break; case 9: /*ADD**/ case 10: case 11: if (gen_window_check3(dc, RRR_R, RRR_S, RRR_T)) { TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_shli_i32(tmp, cpu_R[RRR_S], OP2 - 8); tcg_gen_add_i32(cpu_R[RRR_R], tmp, cpu_R[RRR_T]); tcg_temp_free(tmp); } break; case 12: /*SUB*/ if (gen_window_check3(dc, RRR_R, RRR_S, RRR_T)) { tcg_gen_sub_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]); } break; case 13: /*SUB**/ case 14: case 15: if (gen_window_check3(dc, RRR_R, RRR_S, RRR_T)) { TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_shli_i32(tmp, cpu_R[RRR_S], OP2 - 12); tcg_gen_sub_i32(cpu_R[RRR_R], tmp, cpu_R[RRR_T]); tcg_temp_free(tmp); } break; } break; case 1: /*RST1*/ switch (OP2) { case 0: /*SLLI*/ case 1: if (gen_window_check2(dc, RRR_R, RRR_S)) { tcg_gen_shli_i32(cpu_R[RRR_R], cpu_R[RRR_S], 32 - (RRR_T | ((OP2 & 1) << 4))); } break; case 2: /*SRAI*/ case 3: if (gen_window_check2(dc, RRR_R, RRR_T)) { tcg_gen_sari_i32(cpu_R[RRR_R], cpu_R[RRR_T], RRR_S | ((OP2 & 1) << 4)); } break; case 4: /*SRLI*/ if (gen_window_check2(dc, RRR_R, RRR_T)) { tcg_gen_shri_i32(cpu_R[RRR_R], cpu_R[RRR_T], RRR_S); } break; case 6: /*XSR*/ if (gen_check_sr(dc, RSR_SR, SR_X) && (RSR_SR < 64 || gen_check_privilege(dc)) && gen_window_check1(dc, RRR_T)) { TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_mov_i32(tmp, cpu_R[RRR_T]); gen_rsr(dc, cpu_R[RRR_T], RSR_SR); gen_wsr(dc, RSR_SR, tmp); tcg_temp_free(tmp); } break; /* * Note: 64 bit ops are used here solely because SAR values * have range 0..63 */ #define gen_shift_reg(cmd, reg) do { \\ TCGv_i64 tmp = tcg_temp_new_i64(); \\ tcg_gen_extu_i32_i64(tmp, reg); \\ tcg_gen_##cmd##_i64(v, v, tmp); \\ tcg_gen_trunc_i64_i32(cpu_R[RRR_R], v); \\ tcg_temp_free_i64(v); \\ tcg_temp_free_i64(tmp); \\ } while (0) #define gen_shift(cmd) gen_shift_reg(cmd, cpu_SR[SAR]) case 8: /*SRC*/ if (gen_window_check3(dc, RRR_R, RRR_S, RRR_T)) { TCGv_i64 v = tcg_temp_new_i64(); tcg_gen_concat_i32_i64(v, cpu_R[RRR_T], cpu_R[RRR_S]); gen_shift(shr); } break; case 9: /*SRL*/ if (!gen_window_check2(dc, RRR_R, RRR_T)) { break; } if (dc->sar_5bit) { tcg_gen_shr_i32(cpu_R[RRR_R], cpu_R[RRR_T], cpu_SR[SAR]); } else { TCGv_i64 v = tcg_temp_new_i64(); tcg_gen_extu_i32_i64(v, cpu_R[RRR_T]); gen_shift(shr); } break; case 10: /*SLL*/ if (!gen_window_check2(dc, RRR_R, RRR_S)) { break; } if (dc->sar_m32_5bit) { tcg_gen_shl_i32(cpu_R[RRR_R], cpu_R[RRR_S], dc->sar_m32); } else { TCGv_i64 v = tcg_temp_new_i64(); TCGv_i32 s = tcg_const_i32(32); tcg_gen_sub_i32(s, s, cpu_SR[SAR]); tcg_gen_andi_i32(s, s, 0x3f); tcg_gen_extu_i32_i64(v, cpu_R[RRR_S]); gen_shift_reg(shl, s); tcg_temp_free(s); } break; case 11: /*SRA*/ if (!gen_window_check2(dc, RRR_R, RRR_T)) { break; } if (dc->sar_5bit) { tcg_gen_sar_i32(cpu_R[RRR_R], cpu_R[RRR_T], cpu_SR[SAR]); } else { TCGv_i64 v = tcg_temp_new_i64(); tcg_gen_ext_i32_i64(v, cpu_R[RRR_T]); gen_shift(sar); } break; #undef gen_shift #undef gen_shift_reg case 12: /*MUL16U*/ HAS_OPTION(XTENSA_OPTION_16_BIT_IMUL); if (gen_window_check3(dc, RRR_R, RRR_S, RRR_T)) { TCGv_i32 v1 = tcg_temp_new_i32(); TCGv_i32 v2 = tcg_temp_new_i32(); tcg_gen_ext16u_i32(v1, cpu_R[RRR_S]); tcg_gen_ext16u_i32(v2, cpu_R[RRR_T]); tcg_gen_mul_i32(cpu_R[RRR_R], v1, v2); tcg_temp_free(v2); tcg_temp_free(v1); } break; case 13: /*MUL16S*/ HAS_OPTION(XTENSA_OPTION_16_BIT_IMUL); if (gen_window_check3(dc, RRR_R, RRR_S, RRR_T)) { TCGv_i32 v1 = tcg_temp_new_i32(); TCGv_i32 v2 = tcg_temp_new_i32(); tcg_gen_ext16s_i32(v1, cpu_R[RRR_S]); tcg_gen_ext16s_i32(v2, cpu_R[RRR_T]); tcg_gen_mul_i32(cpu_R[RRR_R], v1, v2); tcg_temp_free(v2); tcg_temp_free(v1); } break; default: /*reserved*/ RESERVED(); break; } break; case 2: /*RST2*/ if (OP2 >= 8 && !gen_window_check3(dc, RRR_R, RRR_S, RRR_T)) { break; } if (OP2 >= 12) { HAS_OPTION(XTENSA_OPTION_32_BIT_IDIV); int label = gen_new_label(); tcg_gen_brcondi_i32(TCG_COND_NE, cpu_R[RRR_T], 0, label); gen_exception_cause(dc, INTEGER_DIVIDE_BY_ZERO_CAUSE); gen_set_label(label); } switch (OP2) { #define BOOLEAN_LOGIC(fn, r, s, t) \\ do { \\ HAS_OPTION(XTENSA_OPTION_BOOLEAN); \\ TCGv_i32 tmp1 = tcg_temp_new_i32(); \\ TCGv_i32 tmp2 = tcg_temp_new_i32(); \\ \\ tcg_gen_shri_i32(tmp1, cpu_SR[BR], s); \\ tcg_gen_shri_i32(tmp2, cpu_SR[BR], t); \\ tcg_gen_##fn##_i32(tmp1, tmp1, tmp2); \\ tcg_gen_deposit_i32(cpu_SR[BR], cpu_SR[BR], tmp1, r, 1); \\ tcg_temp_free(tmp1); \\ tcg_temp_free(tmp2); \\ } while (0) case 0: /*ANDBp*/ BOOLEAN_LOGIC(and, RRR_R, RRR_S, RRR_T); break; case 1: /*ANDBCp*/ BOOLEAN_LOGIC(andc, RRR_R, RRR_S, RRR_T); break; case 2: /*ORBp*/ BOOLEAN_LOGIC(or, RRR_R, RRR_S, RRR_T); break; case 3: /*ORBCp*/ BOOLEAN_LOGIC(orc, RRR_R, RRR_S, RRR_T); break; case 4: /*XORBp*/ BOOLEAN_LOGIC(xor, RRR_R, RRR_S, RRR_T); break; #undef BOOLEAN_LOGIC case 8: /*MULLi*/ HAS_OPTION(XTENSA_OPTION_32_BIT_IMUL); tcg_gen_mul_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]); break; case 10: /*MULUHi*/ case 11: /*MULSHi*/ HAS_OPTION(XTENSA_OPTION_32_BIT_IMUL_HIGH); { TCGv lo = tcg_temp_new(); if (OP2 == 10) { tcg_gen_mulu2_i32(lo, cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]); } else { tcg_gen_muls2_i32(lo, cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]); } tcg_temp_free(lo); } break; case 12: /*QUOUi*/ tcg_gen_divu_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]); break; case 13: /*QUOSi*/ case 15: /*REMSi*/ { int label1 = gen_new_label(); int label2 = gen_new_label(); tcg_gen_brcondi_i32(TCG_COND_NE, cpu_R[RRR_S], 0x80000000, label1); tcg_gen_brcondi_i32(TCG_COND_NE, cpu_R[RRR_T], 0xffffffff, label1); tcg_gen_movi_i32(cpu_R[RRR_R], OP2 == 13 ? 0x80000000 : 0); tcg_gen_br(label2); gen_set_label(label1); if (OP2 == 13) { tcg_gen_div_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]); } else { tcg_gen_rem_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]); } gen_set_label(label2); } break; case 14: /*REMUi*/ tcg_gen_remu_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]); break; default: /*reserved*/ RESERVED(); break; } break; case 3: /*RST3*/ switch (OP2) { case 0: /*RSR*/ if (gen_check_sr(dc, RSR_SR, SR_R) && (RSR_SR < 64 || gen_check_privilege(dc)) && gen_window_check1(dc, RRR_T)) { gen_rsr(dc, cpu_R[RRR_T], RSR_SR); } break; case 1: /*WSR*/ if (gen_check_sr(dc, RSR_SR, SR_W) && (RSR_SR < 64 || gen_check_privilege(dc)) && gen_window_check1(dc, RRR_T)) { gen_wsr(dc, RSR_SR, cpu_R[RRR_T]); } break; case 2: /*SEXTu*/ HAS_OPTION(XTENSA_OPTION_MISC_OP_SEXT); if (gen_window_check2(dc, RRR_R, RRR_S)) { int shift = 24 - RRR_T; if (shift == 24) { tcg_gen_ext8s_i32(cpu_R[RRR_R], cpu_R[RRR_S]); } else if (shift == 16) { tcg_gen_ext16s_i32(cpu_R[RRR_R], cpu_R[RRR_S]); } else { TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_shli_i32(tmp, cpu_R[RRR_S], shift); tcg_gen_sari_i32(cpu_R[RRR_R], tmp, shift); tcg_temp_free(tmp); } } break; case 3: /*CLAMPSu*/ HAS_OPTION(XTENSA_OPTION_MISC_OP_CLAMPS); if (gen_window_check2(dc, RRR_R, RRR_S)) { TCGv_i32 tmp1 = tcg_temp_new_i32(); TCGv_i32 tmp2 = tcg_temp_new_i32(); TCGv_i32 zero = tcg_const_i32(0); tcg_gen_sari_i32(tmp1, cpu_R[RRR_S], 24 - RRR_T); tcg_gen_xor_i32(tmp2, tmp1, cpu_R[RRR_S]); tcg_gen_andi_i32(tmp2, tmp2, 0xffffffff << (RRR_T + 7)); tcg_gen_sari_i32(tmp1, cpu_R[RRR_S], 31); tcg_gen_xori_i32(tmp1, tmp1, 0xffffffff >> (25 - RRR_T)); tcg_gen_movcond_i32(TCG_COND_EQ, cpu_R[RRR_R], tmp2, zero, cpu_R[RRR_S], tmp1); tcg_temp_free(tmp1); tcg_temp_free(tmp2); tcg_temp_free(zero); } break; case 4: /*MINu*/ case 5: /*MAXu*/ case 6: /*MINUu*/ case 7: /*MAXUu*/ HAS_OPTION(XTENSA_OPTION_MISC_OP_MINMAX); if (gen_window_check3(dc, RRR_R, RRR_S, RRR_T)) { static const TCGCond cond[] = { TCG_COND_LE, TCG_COND_GE, TCG_COND_LEU, TCG_COND_GEU }; tcg_gen_movcond_i32(cond[OP2 - 4], cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T], cpu_R[RRR_S], cpu_R[RRR_T]); } break; case 8: /*MOVEQZ*/ case 9: /*MOVNEZ*/ case 10: /*MOVLTZ*/ case 11: /*MOVGEZ*/ if (gen_window_check3(dc, RRR_R, RRR_S, RRR_T)) { static const TCGCond cond[] = { TCG_COND_EQ, TCG_COND_NE, TCG_COND_LT, TCG_COND_GE, }; TCGv_i32 zero = tcg_const_i32(0); tcg_gen_movcond_i32(cond[OP2 - 8], cpu_R[RRR_R], cpu_R[RRR_T], zero, cpu_R[RRR_S], cpu_R[RRR_R]); tcg_temp_free(zero); } break; case 12: /*MOVFp*/ case 13: /*MOVTp*/ HAS_OPTION(XTENSA_OPTION_BOOLEAN); if (gen_window_check2(dc, RRR_R, RRR_S)) { TCGv_i32 zero = tcg_const_i32(0); TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_andi_i32(tmp, cpu_SR[BR], 1 << RRR_T); tcg_gen_movcond_i32(OP2 & 1 ? TCG_COND_NE : TCG_COND_EQ, cpu_R[RRR_R], tmp, zero, cpu_R[RRR_S], cpu_R[RRR_R]); tcg_temp_free(tmp); tcg_temp_free(zero); } break; case 14: /*RUR*/ if (gen_window_check1(dc, RRR_R)) { int st = (RRR_S << 4) + RRR_T; if (uregnames[st].name) { tcg_gen_mov_i32(cpu_R[RRR_R], cpu_UR[st]); } else { qemu_log(\"RUR %d not implemented, \", st); TBD(); } } break; case 15: /*WUR*/ if (gen_window_check1(dc, RRR_T)) { if (uregnames[RSR_SR].name) { gen_wur(RSR_SR, cpu_R[RRR_T]); } else { qemu_log(\"WUR %d not implemented, \", RSR_SR); TBD(); } } break; } break; case 4: /*EXTUI*/ case 5: if (gen_window_check2(dc, RRR_R, RRR_T)) { int shiftimm = RRR_S | ((OP1 & 1) << 4); int maskimm = (1 << (OP2 + 1)) - 1; TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_shri_i32(tmp, cpu_R[RRR_T], shiftimm); tcg_gen_andi_i32(cpu_R[RRR_R], tmp, maskimm); tcg_temp_free(tmp); } break; case 6: /*CUST0*/ RESERVED(); break; case 7: /*CUST1*/ RESERVED(); break; case 8: /*LSCXp*/ switch (OP2) { case 0: /*LSXf*/ case 1: /*LSXUf*/ case 4: /*SSXf*/ case 5: /*SSXUf*/ HAS_OPTION(XTENSA_OPTION_FP_COPROCESSOR); if (gen_window_check2(dc, RRR_S, RRR_T) && gen_check_cpenable(dc, 0)) { TCGv_i32 addr = tcg_temp_new_i32(); tcg_gen_add_i32(addr, cpu_R[RRR_S], cpu_R[RRR_T]); gen_load_store_alignment(dc, 2, addr, false); if (OP2 & 0x4) { tcg_gen_qemu_st32(cpu_FR[RRR_R], addr, dc->cring); } else { tcg_gen_qemu_ld32u(cpu_FR[RRR_R], addr, dc->cring); } if (OP2 & 0x1) { tcg_gen_mov_i32(cpu_R[RRR_S], addr); } tcg_temp_free(addr); } break; default: /*reserved*/ RESERVED(); break; } break; case 9: /*LSC4*/ if (!gen_window_check2(dc, RRR_S, RRR_T)) { break; } switch (OP2) { case 0: /*L32E*/ HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER); if (gen_check_privilege(dc)) { TCGv_i32 addr = tcg_temp_new_i32(); tcg_gen_addi_i32(addr, cpu_R[RRR_S], (0xffffffc0 | (RRR_R << 2))); tcg_gen_qemu_ld32u(cpu_R[RRR_T], addr, dc->ring); tcg_temp_free(addr); } break; case 4: /*S32E*/ HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER); if (gen_check_privilege(dc)) { TCGv_i32 addr = tcg_temp_new_i32(); tcg_gen_addi_i32(addr, cpu_R[RRR_S], (0xffffffc0 | (RRR_R << 2))); tcg_gen_qemu_st32(cpu_R[RRR_T], addr, dc->ring); tcg_temp_free(addr); } break; default: RESERVED(); break; } break; case 10: /*FP0*/ HAS_OPTION(XTENSA_OPTION_FP_COPROCESSOR); switch (OP2) { case 0: /*ADD.Sf*/ if (gen_check_cpenable(dc, 0)) { gen_helper_add_s(cpu_FR[RRR_R], cpu_env, cpu_FR[RRR_S], cpu_FR[RRR_T]); } break; case 1: /*SUB.Sf*/ if (gen_check_cpenable(dc, 0)) { gen_helper_sub_s(cpu_FR[RRR_R], cpu_env, cpu_FR[RRR_S], cpu_FR[RRR_T]); } break; case 2: /*MUL.Sf*/ if (gen_check_cpenable(dc, 0)) { gen_helper_mul_s(cpu_FR[RRR_R], cpu_env, cpu_FR[RRR_S], cpu_FR[RRR_T]); } break; case 4: /*MADD.Sf*/ if (gen_check_cpenable(dc, 0)) { gen_helper_madd_s(cpu_FR[RRR_R], cpu_env, cpu_FR[RRR_R], cpu_FR[RRR_S], cpu_FR[RRR_T]); } break; case 5: /*MSUB.Sf*/ if (gen_check_cpenable(dc, 0)) { gen_helper_msub_s(cpu_FR[RRR_R], cpu_env, cpu_FR[RRR_R], cpu_FR[RRR_S], cpu_FR[RRR_T]); } break; case 8: /*ROUND.Sf*/ case 9: /*TRUNC.Sf*/ case 10: /*FLOOR.Sf*/ case 11: /*CEIL.Sf*/ case 14: /*UTRUNC.Sf*/ if (gen_window_check1(dc, RRR_R) && gen_check_cpenable(dc, 0)) { static const unsigned rounding_mode_const[] = { float_round_nearest_even, float_round_to_zero, float_round_down, float_round_up, [6] = float_round_to_zero, }; TCGv_i32 rounding_mode = tcg_const_i32( rounding_mode_const[OP2 & 7]); TCGv_i32 scale = tcg_const_i32(RRR_T); if (OP2 == 14) { gen_helper_ftoui(cpu_R[RRR_R], cpu_FR[RRR_S], rounding_mode, scale); } else { gen_helper_ftoi(cpu_R[RRR_R], cpu_FR[RRR_S], rounding_mode, scale); } tcg_temp_free(rounding_mode); tcg_temp_free(scale); } break; case 12: /*FLOAT.Sf*/ case 13: /*UFLOAT.Sf*/ if (gen_window_check1(dc, RRR_S) && gen_check_cpenable(dc, 0)) { TCGv_i32 scale = tcg_const_i32(-RRR_T); if (OP2 == 13) { gen_helper_uitof(cpu_FR[RRR_R], cpu_env, cpu_R[RRR_S], scale); } else { gen_helper_itof(cpu_FR[RRR_R], cpu_env, cpu_R[RRR_S], scale); } tcg_temp_free(scale); } break; case 15: /*FP1OP*/ switch (RRR_T) { case 0: /*MOV.Sf*/ if (gen_check_cpenable(dc, 0)) { tcg_gen_mov_i32(cpu_FR[RRR_R], cpu_FR[RRR_S]); } break; case 1: /*ABS.Sf*/ if (gen_check_cpenable(dc, 0)) { gen_helper_abs_s(cpu_FR[RRR_R], cpu_FR[RRR_S]); } break; case 4: /*RFRf*/ if (gen_window_check1(dc, RRR_R) && gen_check_cpenable(dc, 0)) { tcg_gen_mov_i32(cpu_R[RRR_R], cpu_FR[RRR_S]); } break; case 5: /*WFRf*/ if (gen_window_check1(dc, RRR_S) && gen_check_cpenable(dc, 0)) { tcg_gen_mov_i32(cpu_FR[RRR_R], cpu_R[RRR_S]); } break; case 6: /*NEG.Sf*/ if (gen_check_cpenable(dc, 0)) { gen_helper_neg_s(cpu_FR[RRR_R], cpu_FR[RRR_S]); } break; default: /*reserved*/ RESERVED(); break; } break; default: /*reserved*/ RESERVED(); break; } break; case 11: /*FP1*/ HAS_OPTION(XTENSA_OPTION_FP_COPROCESSOR); #define gen_compare(rel, br, a, b) \\ do { \\ if (gen_check_cpenable(dc, 0)) { \\ TCGv_i32 bit = tcg_const_i32(1 << br); \\ \\ gen_helper_##rel(cpu_env, bit, cpu_FR[a], cpu_FR[b]); \\ tcg_temp_free(bit); \\ } \\ } while (0) switch (OP2) { case 1: /*UN.Sf*/ gen_compare(un_s, RRR_R, RRR_S, RRR_T); break; case 2: /*OEQ.Sf*/ gen_compare(oeq_s, RRR_R, RRR_S, RRR_T); break; case 3: /*UEQ.Sf*/ gen_compare(ueq_s, RRR_R, RRR_S, RRR_T); break; case 4: /*OLT.Sf*/ gen_compare(olt_s, RRR_R, RRR_S, RRR_T); break; case 5: /*ULT.Sf*/ gen_compare(ult_s, RRR_R, RRR_S, RRR_T); break; case 6: /*OLE.Sf*/ gen_compare(ole_s, RRR_R, RRR_S, RRR_T); break; case 7: /*ULE.Sf*/ gen_compare(ule_s, RRR_R, RRR_S, RRR_T); break; #undef gen_compare case 8: /*MOVEQZ.Sf*/ case 9: /*MOVNEZ.Sf*/ case 10: /*MOVLTZ.Sf*/ case 11: /*MOVGEZ.Sf*/ if (gen_window_check1(dc, RRR_T) && gen_check_cpenable(dc, 0)) { static const TCGCond cond[] = { TCG_COND_EQ, TCG_COND_NE, TCG_COND_LT, TCG_COND_GE, }; TCGv_i32 zero = tcg_const_i32(0); tcg_gen_movcond_i32(cond[OP2 - 8], cpu_FR[RRR_R], cpu_R[RRR_T], zero, cpu_FR[RRR_S], cpu_FR[RRR_R]); tcg_temp_free(zero); } break; case 12: /*MOVF.Sf*/ case 13: /*MOVT.Sf*/ HAS_OPTION(XTENSA_OPTION_BOOLEAN); if (gen_check_cpenable(dc, 0)) { TCGv_i32 zero = tcg_const_i32(0); TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_andi_i32(tmp, cpu_SR[BR], 1 << RRR_T); tcg_gen_movcond_i32(OP2 & 1 ? TCG_COND_NE : TCG_COND_EQ, cpu_FR[RRR_R], tmp, zero, cpu_FR[RRR_S], cpu_FR[RRR_R]); tcg_temp_free(tmp); tcg_temp_free(zero); } break; default: /*reserved*/ RESERVED(); break; } break; default: /*reserved*/ RESERVED(); break; } break; case 1: /*L32R*/ if (gen_window_check1(dc, RRR_T)) { TCGv_i32 tmp = tcg_const_i32( ((dc->tb->flags & XTENSA_TBFLAG_LITBASE) ? 0 : ((dc->pc + 3) & ~3)) + (0xfffc0000 | (RI16_IMM16 << 2))); if (dc->tb->flags & XTENSA_TBFLAG_LITBASE) { tcg_gen_add_i32(tmp, tmp, dc->litbase); } tcg_gen_qemu_ld32u(cpu_R[RRR_T], tmp, dc->cring); tcg_temp_free(tmp); } break; case 2: /*LSAI*/ #define gen_load_store(type, shift) do { \\ if (gen_window_check2(dc, RRI8_S, RRI8_T)) { \\ TCGv_i32 addr = tcg_temp_new_i32(); \\ \\ tcg_gen_addi_i32(addr, cpu_R[RRI8_S], RRI8_IMM8 << shift); \\ if (shift) { \\ gen_load_store_alignment(dc, shift, addr, false); \\ } \\ tcg_gen_qemu_##type(cpu_R[RRI8_T], addr, dc->cring); \\ tcg_temp_free(addr); \\ } \\ } while (0) switch (RRI8_R) { case 0: /*L8UI*/ gen_load_store(ld8u, 0); break; case 1: /*L16UI*/ gen_load_store(ld16u, 1); break; case 2: /*L32I*/ gen_load_store(ld32u, 2); break; case 4: /*S8I*/ gen_load_store(st8, 0); break; case 5: /*S16I*/ gen_load_store(st16, 1); break; case 6: /*S32I*/ gen_load_store(st32, 2); break; #define gen_dcache_hit_test(w, shift) do { \\ if (gen_window_check1(dc, RRI##w##_S)) { \\ TCGv_i32 addr = tcg_temp_new_i32(); \\ TCGv_i32 res = tcg_temp_new_i32(); \\ tcg_gen_addi_i32(addr, cpu_R[RRI##w##_S], \\ RRI##w##_IMM##w << shift); \\ tcg_gen_qemu_ld8u(res, addr, dc->cring); \\ tcg_temp_free(addr); \\ tcg_temp_free(res); \\ } \\ } while (0) #define gen_dcache_hit_test4() gen_dcache_hit_test(4, 4) #define gen_dcache_hit_test8() gen_dcache_hit_test(8, 2) case 7: /*CACHEc*/ if (RRI8_T < 8) { HAS_OPTION(XTENSA_OPTION_DCACHE); } switch (RRI8_T) { case 0: /*DPFRc*/ gen_window_check1(dc, RRI8_S); break; case 1: /*DPFWc*/ gen_window_check1(dc, RRI8_S); break; case 2: /*DPFROc*/ gen_window_check1(dc, RRI8_S); break; case 3: /*DPFWOc*/ gen_window_check1(dc, RRI8_S); break; case 4: /*DHWBc*/ gen_dcache_hit_test8(); break; case 5: /*DHWBIc*/ gen_dcache_hit_test8(); break; case 6: /*DHIc*/ if (gen_check_privilege(dc)) { gen_dcache_hit_test8(); } break; case 7: /*DIIc*/ if (gen_check_privilege(dc)) { gen_window_check1(dc, RRI8_S); } break; case 8: /*DCEc*/ switch (OP1) { case 0: /*DPFLl*/ HAS_OPTION(XTENSA_OPTION_DCACHE_INDEX_LOCK); if (gen_check_privilege(dc)) { gen_dcache_hit_test4(); } break; case 2: /*DHUl*/ HAS_OPTION(XTENSA_OPTION_DCACHE_INDEX_LOCK); if (gen_check_privilege(dc)) { gen_dcache_hit_test4(); } break; case 3: /*DIUl*/ HAS_OPTION(XTENSA_OPTION_DCACHE_INDEX_LOCK); if (gen_check_privilege(dc)) { gen_window_check1(dc, RRI4_S); } break; case 4: /*DIWBc*/ HAS_OPTION(XTENSA_OPTION_DCACHE); if (gen_check_privilege(dc)) { gen_window_check1(dc, RRI4_S); } break; case 5: /*DIWBIc*/ HAS_OPTION(XTENSA_OPTION_DCACHE); if (gen_check_privilege(dc)) { gen_window_check1(dc, RRI4_S); } break; default: /*reserved*/ RESERVED(); break; } break; #undef gen_dcache_hit_test #undef gen_dcache_hit_test4 #undef gen_dcache_hit_test8 #define gen_icache_hit_test(w, shift) do { \\ if (gen_window_check1(dc, RRI##w##_S)) { \\ TCGv_i32 addr = tcg_temp_new_i32(); \\ tcg_gen_movi_i32(cpu_pc, dc->pc); \\ tcg_gen_addi_i32(addr, cpu_R[RRI##w##_S], \\ RRI##w##_IMM##w << shift); \\ gen_helper_itlb_hit_test(cpu_env, addr); \\ tcg_temp_free(addr); \\ }\\ } while (0) #define gen_icache_hit_test4() gen_icache_hit_test(4, 4) #define gen_icache_hit_test8() gen_icache_hit_test(8, 2) case 12: /*IPFc*/ HAS_OPTION(XTENSA_OPTION_ICACHE); gen_window_check1(dc, RRI8_S); break; case 13: /*ICEc*/ switch (OP1) { case 0: /*IPFLl*/ HAS_OPTION(XTENSA_OPTION_ICACHE_INDEX_LOCK); if (gen_check_privilege(dc)) { gen_icache_hit_test4(); } break; case 2: /*IHUl*/ HAS_OPTION(XTENSA_OPTION_ICACHE_INDEX_LOCK); if (gen_check_privilege(dc)) { gen_icache_hit_test4(); } break; case 3: /*IIUl*/ HAS_OPTION(XTENSA_OPTION_ICACHE_INDEX_LOCK); if (gen_check_privilege(dc)) { gen_window_check1(dc, RRI4_S); } break; default: /*reserved*/ RESERVED(); break; } break; case 14: /*IHIc*/ HAS_OPTION(XTENSA_OPTION_ICACHE); gen_icache_hit_test8(); break; case 15: /*IIIc*/ HAS_OPTION(XTENSA_OPTION_ICACHE); if (gen_check_privilege(dc)) { gen_window_check1(dc, RRI8_S); } break; default: /*reserved*/ RESERVED(); break; } break; #undef gen_icache_hit_test #undef gen_icache_hit_test4 #undef gen_icache_hit_test8 case 9: /*L16SI*/ gen_load_store(ld16s, 1); break; #undef gen_load_store case 10: /*MOVI*/ if (gen_window_check1(dc, RRI8_T)) { tcg_gen_movi_i32(cpu_R[RRI8_T], RRI8_IMM8 | (RRI8_S << 8) | ((RRI8_S & 0x8) ? 0xfffff000 : 0)); } break; #define gen_load_store_no_hw_align(type) do { \\ if (gen_window_check2(dc, RRI8_S, RRI8_T)) { \\ TCGv_i32 addr = tcg_temp_local_new_i32(); \\ tcg_gen_addi_i32(addr, cpu_R[RRI8_S], RRI8_IMM8 << 2); \\ gen_load_store_alignment(dc, 2, addr, true); \\ tcg_gen_qemu_##type(cpu_R[RRI8_T], addr, dc->cring); \\ tcg_temp_free(addr); \\ } \\ } while (0) case 11: /*L32AIy*/ HAS_OPTION(XTENSA_OPTION_MP_SYNCHRO); gen_load_store_no_hw_align(ld32u); /*TODO acquire?*/ break; case 12: /*ADDI*/ if (gen_window_check2(dc, RRI8_S, RRI8_T)) { tcg_gen_addi_i32(cpu_R[RRI8_T], cpu_R[RRI8_S], RRI8_IMM8_SE); } break; case 13: /*ADDMI*/ if (gen_window_check2(dc, RRI8_S, RRI8_T)) { tcg_gen_addi_i32(cpu_R[RRI8_T], cpu_R[RRI8_S], RRI8_IMM8_SE << 8); } break; case 14: /*S32C1Iy*/ HAS_OPTION(XTENSA_OPTION_CONDITIONAL_STORE); if (gen_window_check2(dc, RRI8_S, RRI8_T)) { int label = gen_new_label(); TCGv_i32 tmp = tcg_temp_local_new_i32(); TCGv_i32 addr = tcg_temp_local_new_i32(); TCGv_i32 tpc; tcg_gen_mov_i32(tmp, cpu_R[RRI8_T]); tcg_gen_addi_i32(addr, cpu_R[RRI8_S], RRI8_IMM8 << 2); gen_load_store_alignment(dc, 2, addr, true); gen_advance_ccount(dc); tpc = tcg_const_i32(dc->pc); gen_helper_check_atomctl(cpu_env, tpc, addr); tcg_gen_qemu_ld32u(cpu_R[RRI8_T], addr, dc->cring); tcg_gen_brcond_i32(TCG_COND_NE, cpu_R[RRI8_T], cpu_SR[SCOMPARE1], label); tcg_gen_qemu_st32(tmp, addr, dc->cring); gen_set_label(label); tcg_temp_free(tpc); tcg_temp_free(addr); tcg_temp_free(tmp); } break; case 15: /*S32RIy*/ HAS_OPTION(XTENSA_OPTION_MP_SYNCHRO); gen_load_store_no_hw_align(st32); /*TODO release?*/ break; #undef gen_load_store_no_hw_align default: /*reserved*/ RESERVED(); break; } break; case 3: /*LSCIp*/ switch (RRI8_R) { case 0: /*LSIf*/ case 4: /*SSIf*/ case 8: /*LSIUf*/ case 12: /*SSIUf*/ HAS_OPTION(XTENSA_OPTION_FP_COPROCESSOR); if (gen_window_check1(dc, RRI8_S) && gen_check_cpenable(dc, 0)) { TCGv_i32 addr = tcg_temp_new_i32(); tcg_gen_addi_i32(addr, cpu_R[RRI8_S], RRI8_IMM8 << 2); gen_load_store_alignment(dc, 2, addr, false); if (RRI8_R & 0x4) { tcg_gen_qemu_st32(cpu_FR[RRI8_T], addr, dc->cring); } else { tcg_gen_qemu_ld32u(cpu_FR[RRI8_T], addr, dc->cring); } if (RRI8_R & 0x8) { tcg_gen_mov_i32(cpu_R[RRI8_S], addr); } tcg_temp_free(addr); } break; default: /*reserved*/ RESERVED(); break; } break; case 4: /*MAC16d*/ HAS_OPTION(XTENSA_OPTION_MAC16); { enum { MAC16_UMUL = 0x0, MAC16_MUL = 0x4, MAC16_MULA = 0x8, MAC16_MULS = 0xc, MAC16_NONE = 0xf, } op = OP1 & 0xc; bool is_m1_sr = (OP2 & 0x3) == 2; bool is_m2_sr = (OP2 & 0xc) == 0; uint32_t ld_offset = 0; if (OP2 > 9) { RESERVED(); } switch (OP2 & 2) { case 0: /*MACI?/MACC?*/ is_m1_sr = true; ld_offset = (OP2 & 1) ? -4 : 4; if (OP2 >= 8) { /*MACI/MACC*/ if (OP1 == 0) { /*LDINC/LDDEC*/ op = MAC16_NONE; } else { RESERVED(); } } else if (op != MAC16_MULA) { /*MULA.*.*.LDINC/LDDEC*/ RESERVED(); } break; case 2: /*MACD?/MACA?*/ if (op == MAC16_UMUL && OP2 != 7) { /*UMUL only in MACAA*/ RESERVED(); } break; } if (op != MAC16_NONE) { if (!is_m1_sr && !gen_window_check1(dc, RRR_S)) { break; } if (!is_m2_sr && !gen_window_check1(dc, RRR_T)) { break; } } if (ld_offset && !gen_window_check1(dc, RRR_S)) { break; } { TCGv_i32 vaddr = tcg_temp_new_i32(); TCGv_i32 mem32 = tcg_temp_new_i32(); if (ld_offset) { tcg_gen_addi_i32(vaddr, cpu_R[RRR_S], ld_offset); gen_load_store_alignment(dc, 2, vaddr, false); tcg_gen_qemu_ld32u(mem32, vaddr, dc->cring); } if (op != MAC16_NONE) { TCGv_i32 m1 = gen_mac16_m( is_m1_sr ? cpu_SR[MR + RRR_X] : cpu_R[RRR_S], OP1 & 1, op == MAC16_UMUL); TCGv_i32 m2 = gen_mac16_m( is_m2_sr ? cpu_SR[MR + 2 + RRR_Y] : cpu_R[RRR_T], OP1 & 2, op == MAC16_UMUL); if (op == MAC16_MUL || op == MAC16_UMUL) { tcg_gen_mul_i32(cpu_SR[ACCLO], m1, m2); if (op == MAC16_UMUL) { tcg_gen_movi_i32(cpu_SR[ACCHI], 0); } else { tcg_gen_sari_i32(cpu_SR[ACCHI], cpu_SR[ACCLO], 31); } } else { TCGv_i32 lo = tcg_temp_new_i32(); TCGv_i32 hi = tcg_temp_new_i32(); tcg_gen_mul_i32(lo, m1, m2); tcg_gen_sari_i32(hi, lo, 31); if (op == MAC16_MULA) { tcg_gen_add2_i32(cpu_SR[ACCLO], cpu_SR[ACCHI], cpu_SR[ACCLO], cpu_SR[ACCHI], lo, hi); } else { tcg_gen_sub2_i32(cpu_SR[ACCLO], cpu_SR[ACCHI], cpu_SR[ACCLO], cpu_SR[ACCHI], lo, hi); } tcg_gen_ext8s_i32(cpu_SR[ACCHI], cpu_SR[ACCHI]); tcg_temp_free_i32(lo); tcg_temp_free_i32(hi); } tcg_temp_free(m1); tcg_temp_free(m2); } if (ld_offset) { tcg_gen_mov_i32(cpu_R[RRR_S], vaddr); tcg_gen_mov_i32(cpu_SR[MR + RRR_W], mem32); } tcg_temp_free(vaddr); tcg_temp_free(mem32); } } break; case 5: /*CALLN*/ switch (CALL_N) { case 0: /*CALL0*/ tcg_gen_movi_i32(cpu_R[0], dc->next_pc); gen_jumpi(dc, (dc->pc & ~3) + (CALL_OFFSET_SE << 2) + 4, 0); break; case 1: /*CALL4w*/ case 2: /*CALL8w*/ case 3: /*CALL12w*/ HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER); if (gen_window_check1(dc, CALL_N << 2)) { gen_callwi(dc, CALL_N, (dc->pc & ~3) + (CALL_OFFSET_SE << 2) + 4, 0); } break; } break; case 6: /*SI*/ switch (CALL_N) { case 0: /*J*/ gen_jumpi(dc, dc->pc + 4 + CALL_OFFSET_SE, 0); break; case 1: /*BZ*/ if (gen_window_check1(dc, BRI12_S)) { static const TCGCond cond[] = { TCG_COND_EQ, /*BEQZ*/ TCG_COND_NE, /*BNEZ*/ TCG_COND_LT, /*BLTZ*/ TCG_COND_GE, /*BGEZ*/ }; gen_brcondi(dc, cond[BRI12_M & 3], cpu_R[BRI12_S], 0, 4 + BRI12_IMM12_SE); } break; case 2: /*BI0*/ if (gen_window_check1(dc, BRI8_S)) { static const TCGCond cond[] = { TCG_COND_EQ, /*BEQI*/ TCG_COND_NE, /*BNEI*/ TCG_COND_LT, /*BLTI*/ TCG_COND_GE, /*BGEI*/ }; gen_brcondi(dc, cond[BRI8_M & 3], cpu_R[BRI8_S], B4CONST[BRI8_R], 4 + BRI8_IMM8_SE); } break; case 3: /*BI1*/ switch (BRI8_M) { case 0: /*ENTRYw*/ HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER); { TCGv_i32 pc = tcg_const_i32(dc->pc); TCGv_i32 s = tcg_const_i32(BRI12_S); TCGv_i32 imm = tcg_const_i32(BRI12_IMM12); gen_advance_ccount(dc); gen_helper_entry(cpu_env, pc, s, imm); tcg_temp_free(imm); tcg_temp_free(s); tcg_temp_free(pc); /* This can change tb->flags, so exit tb */ gen_jumpi_check_loop_end(dc, -1); } break; case 1: /*B1*/ switch (BRI8_R) { case 0: /*BFp*/ case 1: /*BTp*/ HAS_OPTION(XTENSA_OPTION_BOOLEAN); { TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_andi_i32(tmp, cpu_SR[BR], 1 << RRI8_S); gen_brcondi(dc, BRI8_R == 1 ? TCG_COND_NE : TCG_COND_EQ, tmp, 0, 4 + RRI8_IMM8_SE); tcg_temp_free(tmp); } break; case 8: /*LOOP*/ case 9: /*LOOPNEZ*/ case 10: /*LOOPGTZ*/ HAS_OPTION(XTENSA_OPTION_LOOP); if (gen_window_check1(dc, RRI8_S)) { uint32_t lend = dc->pc + RRI8_IMM8 + 4; TCGv_i32 tmp = tcg_const_i32(lend); tcg_gen_subi_i32(cpu_SR[LCOUNT], cpu_R[RRI8_S], 1); tcg_gen_movi_i32(cpu_SR[LBEG], dc->next_pc); gen_helper_wsr_lend(cpu_env, tmp); tcg_temp_free(tmp); if (BRI8_R > 8) { int label = gen_new_label(); tcg_gen_brcondi_i32( BRI8_R == 9 ? TCG_COND_NE : TCG_COND_GT, cpu_R[RRI8_S], 0, label); gen_jumpi(dc, lend, 1); gen_set_label(label); } gen_jumpi(dc, dc->next_pc, 0); } break; default: /*reserved*/ RESERVED(); break; } break; case 2: /*BLTUI*/ case 3: /*BGEUI*/ if (gen_window_check1(dc, BRI8_S)) { gen_brcondi(dc, BRI8_M == 2 ? TCG_COND_LTU : TCG_COND_GEU, cpu_R[BRI8_S], B4CONSTU[BRI8_R], 4 + BRI8_IMM8_SE); } break; } break; } break; case 7: /*B*/ { TCGCond eq_ne = (RRI8_R & 8) ? TCG_COND_NE : TCG_COND_EQ; switch (RRI8_R & 7) { case 0: /*BNONE*/ /*BANY*/ if (gen_window_check2(dc, RRI8_S, RRI8_T)) { TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_and_i32(tmp, cpu_R[RRI8_S], cpu_R[RRI8_T]); gen_brcondi(dc, eq_ne, tmp, 0, 4 + RRI8_IMM8_SE); tcg_temp_free(tmp); } break; case 1: /*BEQ*/ /*BNE*/ case 2: /*BLT*/ /*BGE*/ case 3: /*BLTU*/ /*BGEU*/ if (gen_window_check2(dc, RRI8_S, RRI8_T)) { static const TCGCond cond[] = { [1] = TCG_COND_EQ, [2] = TCG_COND_LT, [3] = TCG_COND_LTU, [9] = TCG_COND_NE, [10] = TCG_COND_GE, [11] = TCG_COND_GEU, }; gen_brcond(dc, cond[RRI8_R], cpu_R[RRI8_S], cpu_R[RRI8_T], 4 + RRI8_IMM8_SE); } break; case 4: /*BALL*/ /*BNALL*/ if (gen_window_check2(dc, RRI8_S, RRI8_T)) { TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_and_i32(tmp, cpu_R[RRI8_S], cpu_R[RRI8_T]); gen_brcond(dc, eq_ne, tmp, cpu_R[RRI8_T], 4 + RRI8_IMM8_SE); tcg_temp_free(tmp); } break; case 5: /*BBC*/ /*BBS*/ if (gen_window_check2(dc, RRI8_S, RRI8_T)) { #ifdef TARGET_WORDS_BIGENDIAN TCGv_i32 bit = tcg_const_i32(0x80000000); #else TCGv_i32 bit = tcg_const_i32(0x00000001); #endif TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_andi_i32(tmp, cpu_R[RRI8_T], 0x1f); #ifdef TARGET_WORDS_BIGENDIAN tcg_gen_shr_i32(bit, bit, tmp); #else tcg_gen_shl_i32(bit, bit, tmp); #endif tcg_gen_and_i32(tmp, cpu_R[RRI8_S], bit); gen_brcondi(dc, eq_ne, tmp, 0, 4 + RRI8_IMM8_SE); tcg_temp_free(tmp); tcg_temp_free(bit); } break; case 6: /*BBCI*/ /*BBSI*/ case 7: if (gen_window_check1(dc, RRI8_S)) { TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_andi_i32(tmp, cpu_R[RRI8_S], #ifdef TARGET_WORDS_BIGENDIAN 0x80000000 >> (((RRI8_R & 1) << 4) | RRI8_T)); #else 0x00000001 << (((RRI8_R & 1) << 4) | RRI8_T)); #endif gen_brcondi(dc, eq_ne, tmp, 0, 4 + RRI8_IMM8_SE); tcg_temp_free(tmp); } break; } } break; #define gen_narrow_load_store(type) do { \\ if (gen_window_check2(dc, RRRN_S, RRRN_T)) { \\ TCGv_i32 addr = tcg_temp_new_i32(); \\ tcg_gen_addi_i32(addr, cpu_R[RRRN_S], RRRN_R << 2); \\ gen_load_store_alignment(dc, 2, addr, false); \\ tcg_gen_qemu_##type(cpu_R[RRRN_T], addr, dc->cring); \\ tcg_temp_free(addr); \\ } \\ } while (0) case 8: /*L32I.Nn*/ gen_narrow_load_store(ld32u); break; case 9: /*S32I.Nn*/ gen_narrow_load_store(st32); break; #undef gen_narrow_load_store case 10: /*ADD.Nn*/ if (gen_window_check3(dc, RRRN_R, RRRN_S, RRRN_T)) { tcg_gen_add_i32(cpu_R[RRRN_R], cpu_R[RRRN_S], cpu_R[RRRN_T]); } break; case 11: /*ADDI.Nn*/ if (gen_window_check2(dc, RRRN_R, RRRN_S)) { tcg_gen_addi_i32(cpu_R[RRRN_R], cpu_R[RRRN_S], RRRN_T ? RRRN_T : -1); } break; case 12: /*ST2n*/ if (!gen_window_check1(dc, RRRN_S)) { break; } if (RRRN_T < 8) { /*MOVI.Nn*/ tcg_gen_movi_i32(cpu_R[RRRN_S], RRRN_R | (RRRN_T << 4) | ((RRRN_T & 6) == 6 ? 0xffffff80 : 0)); } else { /*BEQZ.Nn*/ /*BNEZ.Nn*/ TCGCond eq_ne = (RRRN_T & 4) ? TCG_COND_NE : TCG_COND_EQ; gen_brcondi(dc, eq_ne, cpu_R[RRRN_S], 0, 4 + (RRRN_R | ((RRRN_T & 3) << 4))); } break; case 13: /*ST3n*/ switch (RRRN_R) { case 0: /*MOV.Nn*/ if (gen_window_check2(dc, RRRN_S, RRRN_T)) { tcg_gen_mov_i32(cpu_R[RRRN_T], cpu_R[RRRN_S]); } break; case 15: /*S3*/ switch (RRRN_T) { case 0: /*RET.Nn*/ gen_jump(dc, cpu_R[0]); break; case 1: /*RETW.Nn*/ HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER); { TCGv_i32 tmp = tcg_const_i32(dc->pc); gen_advance_ccount(dc); gen_helper_retw(tmp, cpu_env, tmp); gen_jump(dc, tmp); tcg_temp_free(tmp); } break; case 2: /*BREAK.Nn*/ HAS_OPTION(XTENSA_OPTION_DEBUG); if (dc->debug) { gen_debug_exception(dc, DEBUGCAUSE_BN); } break; case 3: /*NOP.Nn*/ break; case 6: /*ILL.Nn*/ gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE); break; default: /*reserved*/ RESERVED(); break; } break; default: /*reserved*/ RESERVED(); break; } break; default: /*reserved*/ RESERVED(); break; } if (dc->is_jmp == DISAS_NEXT) { gen_check_loop_end(dc, 0); } dc->pc = dc->next_pc; return; invalid_opcode: qemu_log(\"INVALID(pc = %08x)\\n\", dc->pc); gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE); #undef HAS_OPTION }"} {"target": 0, "idx": 22425, "func": "static int end_frame(AVFilterLink *inlink) { AVFilterContext *ctx = inlink->dst; FPSContext *s = ctx->priv; AVFilterLink *outlink = ctx->outputs[0]; AVFilterBufferRef *buf = inlink->cur_buf; int64_t delta; int i, ret; inlink->cur_buf = NULL; s->frames_in++; /* discard frames until we get the first timestamp */ if (s->pts == AV_NOPTS_VALUE) { if (buf->pts != AV_NOPTS_VALUE) { write_to_fifo(s->fifo, buf); s->first_pts = s->pts = buf->pts; } else { av_log(ctx, AV_LOG_WARNING, \"Discarding initial frame(s) with no \" \"timestamp.\\n\"); avfilter_unref_buffer(buf); s->drop++; } return 0; } /* now wait for the next timestamp */ if (buf->pts == AV_NOPTS_VALUE) { return write_to_fifo(s->fifo, buf); } /* number of output frames */ delta = av_rescale_q(buf->pts - s->pts, inlink->time_base, outlink->time_base); if (delta < 1) { /* drop the frame and everything buffered except the first */ AVFilterBufferRef *tmp; int drop = av_fifo_size(s->fifo)/sizeof(AVFilterBufferRef*); av_log(ctx, AV_LOG_DEBUG, \"Dropping %d frame(s).\\n\", drop); s->drop += drop; av_fifo_generic_read(s->fifo, &tmp, sizeof(tmp), NULL); flush_fifo(s->fifo); ret = write_to_fifo(s->fifo, tmp); avfilter_unref_buffer(buf); return ret; } /* can output >= 1 frames */ for (i = 0; i < delta; i++) { AVFilterBufferRef *buf_out; av_fifo_generic_read(s->fifo, &buf_out, sizeof(buf_out), NULL); /* duplicate the frame if needed */ if (!av_fifo_size(s->fifo) && i < delta - 1) { av_log(ctx, AV_LOG_DEBUG, \"Duplicating frame.\\n\"); write_to_fifo(s->fifo, avfilter_ref_buffer(buf_out, AV_PERM_READ)); s->dup++; } buf_out->pts = av_rescale_q(s->first_pts, inlink->time_base, outlink->time_base) + s->frames_out; if ((ret = ff_start_frame(outlink, buf_out)) < 0 || (ret = ff_draw_slice(outlink, 0, outlink->h, 1)) < 0 || (ret = ff_end_frame(outlink)) < 0) { avfilter_unref_bufferp(&buf); return ret; } s->frames_out++; } flush_fifo(s->fifo); ret = write_to_fifo(s->fifo, buf); s->pts = s->first_pts + av_rescale_q(s->frames_out, outlink->time_base, inlink->time_base); return ret; }"} {"target": 1, "idx": 22426, "func": "static int roq_read_packet(AVFormatContext *s, AVPacket *pkt) { RoqDemuxContext *roq = s->priv_data; AVIOContext *pb = s->pb; int ret = 0; unsigned int chunk_size; unsigned int chunk_type; unsigned int codebook_size; unsigned char preamble[RoQ_CHUNK_PREAMBLE_SIZE]; int packet_read = 0; int64_t codebook_offset; while (!packet_read) { if (avio_feof(s->pb)) return AVERROR(EIO); /* get the next chunk preamble */ if ((ret = avio_read(pb, preamble, RoQ_CHUNK_PREAMBLE_SIZE)) != RoQ_CHUNK_PREAMBLE_SIZE) return AVERROR(EIO); chunk_type = AV_RL16(&preamble[0]); chunk_size = AV_RL32(&preamble[2]); if(chunk_size > INT_MAX) return AVERROR_INVALIDDATA; chunk_size = ffio_limit(pb, chunk_size); switch (chunk_type) { case RoQ_INFO: if (roq->video_stream_index == -1) { AVStream *st = avformat_new_stream(s, NULL); if (!st) return AVERROR(ENOMEM); avpriv_set_pts_info(st, 63, 1, roq->frame_rate); roq->video_stream_index = st->index; st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO; st->codecpar->codec_id = AV_CODEC_ID_ROQ; st->codecpar->codec_tag = 0; /* no fourcc */ if (avio_read(pb, preamble, RoQ_CHUNK_PREAMBLE_SIZE) != RoQ_CHUNK_PREAMBLE_SIZE) return AVERROR(EIO); st->codecpar->width = roq->width = AV_RL16(preamble); st->codecpar->height = roq->height = AV_RL16(preamble + 2); break; } /* don't care about this chunk anymore */ avio_skip(pb, RoQ_CHUNK_PREAMBLE_SIZE); break; case RoQ_QUAD_CODEBOOK: if (roq->video_stream_index < 0) return AVERROR_INVALIDDATA; /* packet needs to contain both this codebook and next VQ chunk */ codebook_offset = avio_tell(pb) - RoQ_CHUNK_PREAMBLE_SIZE; codebook_size = chunk_size; avio_skip(pb, codebook_size); if (avio_read(pb, preamble, RoQ_CHUNK_PREAMBLE_SIZE) != RoQ_CHUNK_PREAMBLE_SIZE) return AVERROR(EIO); chunk_size = AV_RL32(&preamble[2]) + RoQ_CHUNK_PREAMBLE_SIZE * 2 + codebook_size; if (chunk_size > INT_MAX) return AVERROR_INVALIDDATA; /* rewind */ avio_seek(pb, codebook_offset, SEEK_SET); /* load up the packet */ ret= av_get_packet(pb, pkt, chunk_size); if (ret != chunk_size) return AVERROR(EIO); pkt->stream_index = roq->video_stream_index; pkt->pts = roq->video_pts++; packet_read = 1; break; case RoQ_SOUND_MONO: case RoQ_SOUND_STEREO: if (roq->audio_stream_index == -1) { AVStream *st = avformat_new_stream(s, NULL); if (!st) return AVERROR(ENOMEM); avpriv_set_pts_info(st, 32, 1, RoQ_AUDIO_SAMPLE_RATE); roq->audio_stream_index = st->index; st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO; st->codecpar->codec_id = AV_CODEC_ID_ROQ_DPCM; st->codecpar->codec_tag = 0; /* no tag */ if (chunk_type == RoQ_SOUND_STEREO) { st->codecpar->channels = 2; st->codecpar->channel_layout = AV_CH_LAYOUT_STEREO; } else { st->codecpar->channels = 1; st->codecpar->channel_layout = AV_CH_LAYOUT_MONO; } roq->audio_channels = st->codecpar->channels; st->codecpar->sample_rate = RoQ_AUDIO_SAMPLE_RATE; st->codecpar->bits_per_coded_sample = 16; st->codecpar->bit_rate = st->codecpar->channels * st->codecpar->sample_rate * st->codecpar->bits_per_coded_sample; st->codecpar->block_align = st->codecpar->channels * st->codecpar->bits_per_coded_sample; } case RoQ_QUAD_VQ: if (chunk_type == RoQ_QUAD_VQ) { if (roq->video_stream_index < 0) return AVERROR_INVALIDDATA; } /* load up the packet */ if (av_new_packet(pkt, chunk_size + RoQ_CHUNK_PREAMBLE_SIZE)) return AVERROR(EIO); /* copy over preamble */ memcpy(pkt->data, preamble, RoQ_CHUNK_PREAMBLE_SIZE); if (chunk_type == RoQ_QUAD_VQ) { pkt->stream_index = roq->video_stream_index; pkt->pts = roq->video_pts++; } else { pkt->stream_index = roq->audio_stream_index; pkt->pts = roq->audio_frame_count; roq->audio_frame_count += (chunk_size / roq->audio_channels); } pkt->pos= avio_tell(pb); ret = avio_read(pb, pkt->data + RoQ_CHUNK_PREAMBLE_SIZE, chunk_size); if (ret != chunk_size) ret = AVERROR(EIO); packet_read = 1; break; default: av_log(s, AV_LOG_ERROR, \" unknown RoQ chunk (%04X)\\n\", chunk_type); return AVERROR_INVALIDDATA; } } return ret; }"} {"target": 1, "idx": 22429, "func": "static inline void RENAME(hScale)(int16_t *dst, int dstW, uint8_t *src, int srcW, int xInc, int16_t *filter, int16_t *filterPos, int filterSize) { #ifdef HAVE_MMX assert(filterSize % 4 == 0 && filterSize>0); if(filterSize==4) // allways true for upscaling, sometimes for down too { long counter= -2*dstW; filter-= counter*2; filterPos-= counter/2; dst-= counter/2; asm volatile( \"pxor %%mm7, %%mm7 \\n\\t\" \"movq \"MANGLE(w02)\", %%mm6 \\n\\t\" \"push %%\"REG_BP\" \\n\\t\" // we use 7 regs here ... \"mov %%\"REG_a\", %%\"REG_BP\" \\n\\t\" \".balign 16 \\n\\t\" \"1: \\n\\t\" \"movzwl (%2, %%\"REG_BP\"), %%eax \\n\\t\" \"movzwl 2(%2, %%\"REG_BP\"), %%ebx\\n\\t\" \"movq (%1, %%\"REG_BP\", 4), %%mm1\\n\\t\" \"movq 8(%1, %%\"REG_BP\", 4), %%mm3\\n\\t\" \"movd (%3, %%\"REG_a\"), %%mm0 \\n\\t\" \"movd (%3, %%\"REG_b\"), %%mm2 \\n\\t\" \"punpcklbw %%mm7, %%mm0 \\n\\t\" \"punpcklbw %%mm7, %%mm2 \\n\\t\" \"pmaddwd %%mm1, %%mm0 \\n\\t\" \"pmaddwd %%mm2, %%mm3 \\n\\t\" \"psrad $8, %%mm0 \\n\\t\" \"psrad $8, %%mm3 \\n\\t\" \"packssdw %%mm3, %%mm0 \\n\\t\" \"pmaddwd %%mm6, %%mm0 \\n\\t\" \"packssdw %%mm0, %%mm0 \\n\\t\" \"movd %%mm0, (%4, %%\"REG_BP\") \\n\\t\" \"add $4, %%\"REG_BP\" \\n\\t\" \" jnc 1b \\n\\t\" \"pop %%\"REG_BP\" \\n\\t\" : \"+a\" (counter) : \"c\" (filter), \"d\" (filterPos), \"S\" (src), \"D\" (dst) : \"%\"REG_b ); } else if(filterSize==8) { long counter= -2*dstW; filter-= counter*4; filterPos-= counter/2; dst-= counter/2; asm volatile( \"pxor %%mm7, %%mm7 \\n\\t\" \"movq \"MANGLE(w02)\", %%mm6 \\n\\t\" \"push %%\"REG_BP\" \\n\\t\" // we use 7 regs here ... \"mov %%\"REG_a\", %%\"REG_BP\" \\n\\t\" \".balign 16 \\n\\t\" \"1: \\n\\t\" \"movzwl (%2, %%\"REG_BP\"), %%eax \\n\\t\" \"movzwl 2(%2, %%\"REG_BP\"), %%ebx\\n\\t\" \"movq (%1, %%\"REG_BP\", 8), %%mm1\\n\\t\" \"movq 16(%1, %%\"REG_BP\", 8), %%mm3\\n\\t\" \"movd (%3, %%\"REG_a\"), %%mm0 \\n\\t\" \"movd (%3, %%\"REG_b\"), %%mm2 \\n\\t\" \"punpcklbw %%mm7, %%mm0 \\n\\t\" \"punpcklbw %%mm7, %%mm2 \\n\\t\" \"pmaddwd %%mm1, %%mm0 \\n\\t\" \"pmaddwd %%mm2, %%mm3 \\n\\t\" \"movq 8(%1, %%\"REG_BP\", 8), %%mm1\\n\\t\" \"movq 24(%1, %%\"REG_BP\", 8), %%mm5\\n\\t\" \"movd 4(%3, %%\"REG_a\"), %%mm4 \\n\\t\" \"movd 4(%3, %%\"REG_b\"), %%mm2 \\n\\t\" \"punpcklbw %%mm7, %%mm4 \\n\\t\" \"punpcklbw %%mm7, %%mm2 \\n\\t\" \"pmaddwd %%mm1, %%mm4 \\n\\t\" \"pmaddwd %%mm2, %%mm5 \\n\\t\" \"paddd %%mm4, %%mm0 \\n\\t\" \"paddd %%mm5, %%mm3 \\n\\t\" \"psrad $8, %%mm0 \\n\\t\" \"psrad $8, %%mm3 \\n\\t\" \"packssdw %%mm3, %%mm0 \\n\\t\" \"pmaddwd %%mm6, %%mm0 \\n\\t\" \"packssdw %%mm0, %%mm0 \\n\\t\" \"movd %%mm0, (%4, %%\"REG_BP\") \\n\\t\" \"add $4, %%\"REG_BP\" \\n\\t\" \" jnc 1b \\n\\t\" \"pop %%\"REG_BP\" \\n\\t\" : \"+a\" (counter) : \"c\" (filter), \"d\" (filterPos), \"S\" (src), \"D\" (dst) : \"%\"REG_b ); } else { uint8_t *offset = src+filterSize; long counter= -2*dstW; // filter-= counter*filterSize/2; filterPos-= counter/2; dst-= counter/2; asm volatile( \"pxor %%mm7, %%mm7 \\n\\t\" \"movq \"MANGLE(w02)\", %%mm6 \\n\\t\" \".balign 16 \\n\\t\" \"1: \\n\\t\" \"mov %2, %%\"REG_c\" \\n\\t\" \"movzwl (%%\"REG_c\", %0), %%eax \\n\\t\" \"movzwl 2(%%\"REG_c\", %0), %%ebx \\n\\t\" \"mov %5, %%\"REG_c\" \\n\\t\" \"pxor %%mm4, %%mm4 \\n\\t\" \"pxor %%mm5, %%mm5 \\n\\t\" \"2: \\n\\t\" \"movq (%1), %%mm1 \\n\\t\" \"movq (%1, %6), %%mm3 \\n\\t\" \"movd (%%\"REG_c\", %%\"REG_a\"), %%mm0\\n\\t\" \"movd (%%\"REG_c\", %%\"REG_b\"), %%mm2\\n\\t\" \"punpcklbw %%mm7, %%mm0 \\n\\t\" \"punpcklbw %%mm7, %%mm2 \\n\\t\" \"pmaddwd %%mm1, %%mm0 \\n\\t\" \"pmaddwd %%mm2, %%mm3 \\n\\t\" \"paddd %%mm3, %%mm5 \\n\\t\" \"paddd %%mm0, %%mm4 \\n\\t\" \"add $8, %1 \\n\\t\" \"add $4, %%\"REG_c\" \\n\\t\" \"cmp %4, %%\"REG_c\" \\n\\t\" \" jb 2b \\n\\t\" \"add %6, %1 \\n\\t\" \"psrad $8, %%mm4 \\n\\t\" \"psrad $8, %%mm5 \\n\\t\" \"packssdw %%mm5, %%mm4 \\n\\t\" \"pmaddwd %%mm6, %%mm4 \\n\\t\" \"packssdw %%mm4, %%mm4 \\n\\t\" \"mov %3, %%\"REG_a\" \\n\\t\" \"movd %%mm4, (%%\"REG_a\", %0) \\n\\t\" \"add $4, %0 \\n\\t\" \" jnc 1b \\n\\t\" : \"+r\" (counter), \"+r\" (filter) : \"m\" (filterPos), \"m\" (dst), \"m\"(offset), \"m\" (src), \"r\" ((long)filterSize*2) : \"%\"REG_b, \"%\"REG_a, \"%\"REG_c ); } #else #ifdef HAVE_ALTIVEC hScale_altivec_real(dst, dstW, src, srcW, xInc, filter, filterPos, filterSize); #else int i; for(i=0; i>7), (1<<15)-1); // the cubic equation does overflow ... // dst[i] = val>>7; } #endif #endif }"} {"target": 1, "idx": 22431, "func": "void qemu_file_set_error(QEMUFile *f, int ret) { if (f->last_error == 0) { f->last_error = ret; } }"} {"target": 0, "idx": 22436, "func": "static int encode_picture_lossless(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){ MpegEncContext * const s = avctx->priv_data; MJpegContext * const m = s->mjpeg_ctx; AVFrame *pict = data; const int width= s->width; const int height= s->height; AVFrame * const p= (AVFrame*)&s->current_picture; const int predictor= avctx->prediction_method+1; init_put_bits(&s->pb, buf, buf_size); *p = *pict; p->pict_type= FF_I_TYPE; p->key_frame= 1; ff_mjpeg_encode_picture_header(s); s->header_bits= put_bits_count(&s->pb); if(avctx->pix_fmt == PIX_FMT_RGB32){ int x, y, i; const int linesize= p->linesize[0]; uint16_t (*buffer)[4]= (void *) s->rd_scratchpad; int left[3], top[3], topleft[3]; for(i=0; i<3; i++){ buffer[0][i]= 1 << (9 - 1); } for(y = 0; y < height; y++) { const int modified_predictor= y ? predictor : 1; uint8_t *ptr = p->data[0] + (linesize * y); if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < width*3*4){ av_log(s->avctx, AV_LOG_ERROR, \"encoded frame too large\\n\"); return -1; } for(i=0; i<3; i++){ top[i]= left[i]= topleft[i]= buffer[0][i]; } for(x = 0; x < width; x++) { buffer[x][1] = ptr[4*x+0] - ptr[4*x+1] + 0x100; buffer[x][2] = ptr[4*x+2] - ptr[4*x+1] + 0x100; buffer[x][0] = (ptr[4*x+0] + 2*ptr[4*x+1] + ptr[4*x+2])>>2; for(i=0;i<3;i++) { int pred, diff; PREDICT(pred, topleft[i], top[i], left[i], modified_predictor); topleft[i]= top[i]; top[i]= buffer[x+1][i]; left[i]= buffer[x][i]; diff= ((left[i] - pred + 0x100)&0x1FF) - 0x100; if(i==0) ff_mjpeg_encode_dc(s, diff, m->huff_size_dc_luminance, m->huff_code_dc_luminance); //FIXME ugly else ff_mjpeg_encode_dc(s, diff, m->huff_size_dc_chrominance, m->huff_code_dc_chrominance); } } } }else{ int mb_x, mb_y, i; const int mb_width = (width + s->mjpeg_hsample[0] - 1) / s->mjpeg_hsample[0]; const int mb_height = (height + s->mjpeg_vsample[0] - 1) / s->mjpeg_vsample[0]; for(mb_y = 0; mb_y < mb_height; mb_y++) { if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < mb_width * 4 * 3 * s->mjpeg_hsample[0] * s->mjpeg_vsample[0]){ av_log(s->avctx, AV_LOG_ERROR, \"encoded frame too large\\n\"); return -1; } for(mb_x = 0; mb_x < mb_width; mb_x++) { if(mb_x==0 || mb_y==0){ for(i=0;i<3;i++) { uint8_t *ptr; int x, y, h, v, linesize; h = s->mjpeg_hsample[i]; v = s->mjpeg_vsample[i]; linesize= p->linesize[i]; for(y=0; ydata[i] + (linesize * (v * mb_y + y)) + (h * mb_x + x); //FIXME optimize this crap if(y==0 && mb_y==0){ if(x==0 && mb_x==0){ pred= 128; }else{ pred= ptr[-1]; } }else{ if(x==0 && mb_x==0){ pred= ptr[-linesize]; }else{ PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor); } } if(i==0) ff_mjpeg_encode_dc(s, (int8_t)(*ptr - pred), m->huff_size_dc_luminance, m->huff_code_dc_luminance); //FIXME ugly else ff_mjpeg_encode_dc(s, (int8_t)(*ptr - pred), m->huff_size_dc_chrominance, m->huff_code_dc_chrominance); } } } }else{ for(i=0;i<3;i++) { uint8_t *ptr; int x, y, h, v, linesize; h = s->mjpeg_hsample[i]; v = s->mjpeg_vsample[i]; linesize= p->linesize[i]; for(y=0; ydata[i] + (linesize * (v * mb_y + y)) + (h * mb_x + x); //FIXME optimize this crap //printf(\"%d %d %d %d %8X\\n\", mb_x, mb_y, x, y, ptr); PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor); if(i==0) ff_mjpeg_encode_dc(s, (int8_t)(*ptr - pred), m->huff_size_dc_luminance, m->huff_code_dc_luminance); //FIXME ugly else ff_mjpeg_encode_dc(s, (int8_t)(*ptr - pred), m->huff_size_dc_chrominance, m->huff_code_dc_chrominance); } } } } } } } emms_c(); ff_mjpeg_encode_picture_trailer(s); s->picture_number++; flush_put_bits(&s->pb); return pbBufPtr(&s->pb) - s->pb.buf; // return (put_bits_count(&f->pb)+7)/8; }"} {"target": 0, "idx": 22459, "func": "static int opt_input_file(OptionsContext *o, const char *opt, const char *filename) { AVFormatContext *ic; AVInputFormat *file_iformat = NULL; int err, i, ret; int64_t timestamp; uint8_t buf[128]; AVDictionary **opts; int orig_nb_streams; // number of streams before avformat_find_stream_info if (o->format) { if (!(file_iformat = av_find_input_format(o->format))) { av_log(NULL, AV_LOG_FATAL, \"Unknown input format: '%s'\\n\", o->format); exit_program(1); } } if (!strcmp(filename, \"-\")) filename = \"pipe:\"; using_stdin |= !strncmp(filename, \"pipe:\", 5) || !strcmp(filename, \"/dev/stdin\"); /* get default parameters from command line */ ic = avformat_alloc_context(); if (!ic) { print_error(filename, AVERROR(ENOMEM)); exit_program(1); } if (o->nb_audio_sample_rate) { snprintf(buf, sizeof(buf), \"%d\", o->audio_sample_rate[o->nb_audio_sample_rate - 1].u.i); av_dict_set(&format_opts, \"sample_rate\", buf, 0); } if (o->nb_audio_channels) { snprintf(buf, sizeof(buf), \"%d\", o->audio_channels[o->nb_audio_channels - 1].u.i); av_dict_set(&format_opts, \"channels\", buf, 0); } if (o->nb_frame_rates) { av_dict_set(&format_opts, \"framerate\", o->frame_rates[o->nb_frame_rates - 1].u.str, 0); } if (o->nb_frame_sizes) { av_dict_set(&format_opts, \"video_size\", o->frame_sizes[o->nb_frame_sizes - 1].u.str, 0); } if (o->nb_frame_pix_fmts) av_dict_set(&format_opts, \"pixel_format\", o->frame_pix_fmts[o->nb_frame_pix_fmts - 1].u.str, 0); ic->video_codec_id = video_codec_name ? find_codec_or_die(video_codec_name , AVMEDIA_TYPE_VIDEO , 0)->id : CODEC_ID_NONE; ic->audio_codec_id = audio_codec_name ? find_codec_or_die(audio_codec_name , AVMEDIA_TYPE_AUDIO , 0)->id : CODEC_ID_NONE; ic->subtitle_codec_id= subtitle_codec_name ? find_codec_or_die(subtitle_codec_name, AVMEDIA_TYPE_SUBTITLE, 0)->id : CODEC_ID_NONE; ic->flags |= AVFMT_FLAG_NONBLOCK; ic->interrupt_callback = int_cb; if (loop_input) { av_log(NULL, AV_LOG_WARNING, \"-loop_input is deprecated, use -loop 1\\n\"); ic->loop_input = loop_input; } /* open the input file with generic avformat function */ err = avformat_open_input(&ic, filename, file_iformat, &format_opts); if (err < 0) { print_error(filename, err); exit_program(1); } assert_avoptions(format_opts); /* apply forced codec ids */ for (i = 0; i < ic->nb_streams; i++) choose_decoder(o, ic, ic->streams[i]); /* Set AVCodecContext options for avformat_find_stream_info */ opts = setup_find_stream_info_opts(ic, codec_opts); orig_nb_streams = ic->nb_streams; /* If not enough info to get the stream parameters, we decode the first frames to get it. (used in mpeg case for example) */ ret = avformat_find_stream_info(ic, opts); if (ret < 0) { av_log(NULL, AV_LOG_FATAL, \"%s: could not find codec parameters\\n\", filename); av_close_input_file(ic); exit_program(1); } timestamp = o->start_time; /* add the stream start time */ if (ic->start_time != AV_NOPTS_VALUE) timestamp += ic->start_time; /* if seeking requested, we execute it */ if (o->start_time != 0) { ret = av_seek_frame(ic, -1, timestamp, AVSEEK_FLAG_BACKWARD); if (ret < 0) { av_log(NULL, AV_LOG_WARNING, \"%s: could not seek to position %0.3f\\n\", filename, (double)timestamp / AV_TIME_BASE); } } /* update the current parameters so that they match the one of the input stream */ add_input_streams(o, ic); /* dump the file content */ av_dump_format(ic, nb_input_files, filename, 0); input_files = grow_array(input_files, sizeof(*input_files), &nb_input_files, nb_input_files + 1); input_files[nb_input_files - 1].ctx = ic; input_files[nb_input_files - 1].ist_index = nb_input_streams - ic->nb_streams; input_files[nb_input_files - 1].ts_offset = o->input_ts_offset - (copy_ts ? 0 : timestamp); input_files[nb_input_files - 1].nb_streams = ic->nb_streams; input_files[nb_input_files - 1].rate_emu = o->rate_emu; for (i = 0; i < o->nb_dump_attachment; i++) { int j; for (j = 0; j < ic->nb_streams; j++) { AVStream *st = ic->streams[j]; if (check_stream_specifier(ic, st, o->dump_attachment[i].specifier) == 1) dump_attachment(st, o->dump_attachment[i].u.str); } } for (i = 0; i < orig_nb_streams; i++) av_dict_free(&opts[i]); av_freep(&opts); reset_options(o, 1); return 0; }"} {"target": 0, "idx": 22462, "func": "uint64_t helper_efdctuidz (uint64_t val) { CPU_DoubleU u; u.ll = val; /* NaN are not treated the same way IEEE 754 does */ if (unlikely(float64_is_nan(u.d))) return 0; return float64_to_uint64_round_to_zero(u.d, &env->vec_status); }"} {"target": 0, "idx": 22465, "func": "void bios_linker_loader_add_checksum(BIOSLinker *linker, const char *file_name, void *start, unsigned size, uint8_t *checksum) { BiosLinkerLoaderEntry entry; const BiosLinkerFileEntry *file = bios_linker_find_file(linker, file_name); ptrdiff_t checksum_offset = (gchar *)checksum - file->blob->data; ptrdiff_t start_offset = (gchar *)start - file->blob->data; assert(checksum_offset >= 0); assert(start_offset >= 0); assert(checksum_offset + 1 <= file->blob->len); assert(start_offset + size <= file->blob->len); assert(*checksum == 0x0); memset(&entry, 0, sizeof entry); strncpy(entry.cksum.file, file_name, sizeof entry.cksum.file - 1); entry.command = cpu_to_le32(BIOS_LINKER_LOADER_COMMAND_ADD_CHECKSUM); entry.cksum.offset = cpu_to_le32(checksum_offset); entry.cksum.start = cpu_to_le32(start_offset); entry.cksum.length = cpu_to_le32(size); g_array_append_vals(linker->cmd_blob, &entry, sizeof entry); }"} {"target": 1, "idx": 22503, "func": "void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f) { PerThreadContext *p = avctx->internal->thread_ctx; FrameThreadContext *fctx; AVFrame *dst, *tmp; FF_DISABLE_DEPRECATION_WARNINGS int can_direct_free = !(avctx->active_thread_type & FF_THREAD_FRAME) || avctx->thread_safe_callbacks || ( #if FF_API_GET_BUFFER !avctx->get_buffer && #endif avctx->get_buffer2 == avcodec_default_get_buffer2); FF_ENABLE_DEPRECATION_WARNINGS if (!f->f->buf[0]) return; if (avctx->debug & FF_DEBUG_BUFFERS) av_log(avctx, AV_LOG_DEBUG, \"thread_release_buffer called on pic %p\\n\", f); av_buffer_unref(&f->progress); f->owner = NULL; if (can_direct_free) { av_frame_unref(f->f); return; } fctx = p->parent; pthread_mutex_lock(&fctx->buffer_mutex); if (p->num_released_buffers + 1 >= INT_MAX / sizeof(*p->released_buffers)) goto fail; tmp = av_fast_realloc(p->released_buffers, &p->released_buffers_allocated, (p->num_released_buffers + 1) * sizeof(*p->released_buffers)); if (!tmp) goto fail; p->released_buffers = tmp; dst = &p->released_buffers[p->num_released_buffers]; av_frame_move_ref(dst, f->f); p->num_released_buffers++; fail: pthread_mutex_unlock(&fctx->buffer_mutex); }"} {"target": 1, "idx": 22531, "func": "void ff_schro_queue_free(FFSchroQueue *queue, void (*free_func)(void *)) { while (queue->p_head) free_func(ff_schro_queue_pop(queue)); }"} {"target": 1, "idx": 22534, "func": "void qemu_mutex_lock_iothread(void) { if (!tcg_enabled()) { qemu_mutex_lock(&qemu_global_mutex); } else { iothread_requesting_mutex = true; if (qemu_mutex_trylock(&qemu_global_mutex)) { qemu_cpu_kick_thread(first_cpu); qemu_mutex_lock(&qemu_global_mutex); } iothread_requesting_mutex = false; qemu_cond_broadcast(&qemu_io_proceeded_cond); } }"} {"target": 0, "idx": 22545, "func": "static inline void gen_op_addl_ESP_im(int32_t val) { tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[R_ESP])); tcg_gen_addi_tl(cpu_tmp0, cpu_tmp0, val); #ifdef TARGET_X86_64 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0xffffffff); #endif tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[R_ESP])); }"} {"target": 0, "idx": 22552, "func": "static unsigned int dec_movu_r(DisasContext *dc) { TCGv t0; int size = memsize_z(dc); DIS(fprintf (logfile, \"movu.%c $r%u, $r%u\\n\", memsize_char(size), dc->op1, dc->op2)); cris_cc_mask(dc, CC_MASK_NZ); t0 = tcg_temp_new(TCG_TYPE_TL); dec_prep_move_r(dc, dc->op1, dc->op2, size, 0, t0); cris_alu(dc, CC_OP_MOVE, cpu_R[dc->op2], cpu_R[dc->op2], t0, 4); tcg_temp_free(t0); return 2; }"} {"target": 0, "idx": 22559, "func": "AVInputFormat *av_find_input_format(const char *short_name) { AVInputFormat *fmt = NULL; while ((fmt = av_iformat_next(fmt))) if (match_format(short_name, fmt->name)) return fmt; return NULL; }"} {"target": 1, "idx": 22597, "func": "static av_cold void dsputil_init_sse2(DSPContext *c, AVCodecContext *avctx, int mm_flags) { #if HAVE_SSE2_INLINE const int high_bit_depth = avctx->bits_per_raw_sample > 8; if (!high_bit_depth && avctx->idct_algo == FF_IDCT_XVIDMMX) { c->idct_put = ff_idct_xvid_sse2_put; c->idct_add = ff_idct_xvid_sse2_add; c->idct = ff_idct_xvid_sse2; c->idct_permutation_type = FF_SSE2_IDCT_PERM; } #endif /* HAVE_SSE2_INLINE */ #if HAVE_SSE2_EXTERNAL c->scalarproduct_int16 = ff_scalarproduct_int16_sse2; c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_sse2; if (mm_flags & AV_CPU_FLAG_ATOM) { c->vector_clip_int32 = ff_vector_clip_int32_int_sse2; } else { c->vector_clip_int32 = ff_vector_clip_int32_sse2; } if (avctx->flags & CODEC_FLAG_BITEXACT) { c->apply_window_int16 = ff_apply_window_int16_sse2; } else if (!(mm_flags & AV_CPU_FLAG_SSE2SLOW)) { c->apply_window_int16 = ff_apply_window_int16_round_sse2; } c->bswap_buf = ff_bswap32_buf_sse2; #endif /* HAVE_SSE2_EXTERNAL */ }"} {"target": 0, "idx": 22601, "func": "static int au_probe(AVProbeData *p) { /* check file header */ if (p->buf_size <= 24) return 0; if (p->buf[0] == '.' && p->buf[1] == 's' && p->buf[2] == 'n' && p->buf[3] == 'd') return AVPROBE_SCORE_MAX; else return 0; }"} {"target": 1, "idx": 22615, "func": "static void pc_fw_add_pflash_drv(void) { QemuOpts *opts; QEMUMachine *machine; char *filename; if (bios_name == NULL) { bios_name = BIOS_FILENAME; } filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); opts = drive_add(IF_PFLASH, -1, filename, \"readonly=on\"); g_free(filename); if (opts == NULL) { return; } machine = find_default_machine(); if (machine == NULL) { return; } drive_init(opts, machine->use_scsi); }"} {"target": 0, "idx": 22637, "func": "void ff_af_queue_remove(AudioFrameQueue *afq, int nb_samples, int64_t *pts, int *duration) { int64_t out_pts = AV_NOPTS_VALUE; int removed_samples = 0; #ifdef DEBUG ff_af_queue_log_state(afq); #endif /* get output pts from the next frame or generated pts */ if (afq->frame_queue) { if (afq->frame_queue->pts != AV_NOPTS_VALUE) out_pts = afq->frame_queue->pts - afq->remaining_delay; } else { if (afq->next_pts != AV_NOPTS_VALUE) out_pts = afq->next_pts - afq->remaining_delay; } if (pts) { if (out_pts != AV_NOPTS_VALUE) *pts = ff_samples_to_time_base(afq->avctx, out_pts); else *pts = AV_NOPTS_VALUE; } /* if the delay is larger than the packet duration, we use up delay samples for the output packet and leave all frames in the queue */ if (afq->remaining_delay >= nb_samples) { removed_samples += nb_samples; afq->remaining_delay -= nb_samples; } /* remove frames from the queue until we have enough to cover the requested number of samples or until the queue is empty */ while (removed_samples < nb_samples && afq->frame_queue) { removed_samples += afq->frame_queue->duration; delete_next_frame(afq); } afq->remaining_samples -= removed_samples; /* if there are no frames left and we have room for more samples, use any remaining delay samples */ if (removed_samples < nb_samples && afq->remaining_samples > 0) { int add_samples = FFMIN(afq->remaining_samples, nb_samples - removed_samples); removed_samples += add_samples; afq->remaining_samples -= add_samples; } if (removed_samples > nb_samples) av_log(afq->avctx, AV_LOG_WARNING, \"frame_size is too large\\n\"); if (duration) *duration = ff_samples_to_time_base(afq->avctx, removed_samples); }"} {"target": 1, "idx": 22652, "func": "void stream_start(BlockDriverState *bs, BlockDriverState *base, const char *base_id, int64_t speed, BlockDriverCompletionFunc *cb, void *opaque, Error **errp) { StreamBlockJob *s; Coroutine *co; s = block_job_create(&stream_job_type, bs, speed, cb, opaque, errp); if (!s) { return; } s->base = base; if (base_id) { pstrcpy(s->backing_file_id, sizeof(s->backing_file_id), base_id); } co = qemu_coroutine_create(stream_run); trace_stream_start(bs, base, s, co, opaque); qemu_coroutine_enter(co, s); }"} {"target": 1, "idx": 22655, "func": "static double get_diff_limited_q(MpegEncContext *s, RateControlEntry *rce, double q){ RateControlContext *rcc= &s->rc_context; AVCodecContext *a= s->avctx; const int pict_type= rce->new_pict_type; const double last_p_q = rcc->last_qscale_for[P_TYPE]; const double last_non_b_q= rcc->last_qscale_for[rcc->last_non_b_pict_type]; if (pict_type==I_TYPE && (a->i_quant_factor>0.0 || rcc->last_non_b_pict_type==P_TYPE)) q= last_p_q *FFABS(a->i_quant_factor) + a->i_quant_offset; else if(pict_type==B_TYPE && a->b_quant_factor>0.0) q= last_non_b_q* a->b_quant_factor + a->b_quant_offset; /* last qscale / qdiff stuff */ if(rcc->last_non_b_pict_type==pict_type || pict_type!=I_TYPE){ double last_q= rcc->last_qscale_for[pict_type]; const int maxdiff= FF_QP2LAMBDA * a->max_qdiff; if (q > last_q + maxdiff) q= last_q + maxdiff; else if(q < last_q - maxdiff) q= last_q - maxdiff; } rcc->last_qscale_for[pict_type]= q; //Note we cannot do that after blurring if(pict_type!=B_TYPE) rcc->last_non_b_pict_type= pict_type; return q; }"} {"target": 0, "idx": 22662, "func": "static int mov_write_hdlr_tag(AVIOContext *pb, MOVTrack *track) { const char *hdlr, *descr = NULL, *hdlr_type = NULL; int64_t pos = avio_tell(pb); if (!track) { /* no media --> data handler */ hdlr = \"dhlr\"; hdlr_type = \"url \"; descr = \"DataHandler\"; } else { hdlr = (track->mode == MODE_MOV) ? \"mhlr\" : \"\\0\\0\\0\\0\"; if (track->enc->codec_type == AVMEDIA_TYPE_VIDEO) { hdlr_type = \"vide\"; descr = \"VideoHandler\"; } else if (track->enc->codec_type == AVMEDIA_TYPE_AUDIO) { hdlr_type = \"soun\"; descr = \"SoundHandler\"; } else if (track->enc->codec_type == AVMEDIA_TYPE_SUBTITLE) { if (track->tag == MKTAG('t','x','3','g')) hdlr_type = \"sbtl\"; else hdlr_type = \"text\"; descr = \"SubtitleHandler\"; } else if (track->enc->codec_tag == MKTAG('r','t','p',' ')) { hdlr_type = \"hint\"; descr = \"HintHandler\"; } } avio_wb32(pb, 0); /* size */ ffio_wfourcc(pb, \"hdlr\"); avio_wb32(pb, 0); /* Version & flags */ avio_write(pb, hdlr, 4); /* handler */ ffio_wfourcc(pb, hdlr_type); /* handler type */ avio_wb32(pb, 0); /* reserved */ avio_wb32(pb, 0); /* reserved */ avio_wb32(pb, 0); /* reserved */ if (!track || track->mode == MODE_MOV) avio_w8(pb, strlen(descr)); /* pascal string */ avio_write(pb, descr, strlen(descr)); /* handler description */ if (track && track->mode != MODE_MOV) avio_w8(pb, 0); /* c string */ return update_size(pb, pos); }"} {"target": 1, "idx": 22683, "func": "PPC_OP(srawi) { T1 = T0; T0 = (Ts0 >> PARAM(1)); if (Ts1 < 0 && (Ts1 & PARAM(2)) != 0) { xer_ca = 1; } else { xer_ca = 0; } RETURN(); }"} {"target": 1, "idx": 22693, "func": "void trace_init_file(const char *file) { #ifdef CONFIG_TRACE_SIMPLE st_set_trace_file(file); #elif defined CONFIG_TRACE_LOG /* If both the simple and the log backends are enabled, \"-trace file\" * only applies to the simple backend; use \"-D\" for the log backend. */ if (file) { qemu_set_log_filename(file); } #else if (file) { fprintf(stderr, \"error: -trace file=...: \" \"option not supported by the selected tracing backends\\n\"); exit(1); } #endif }"} {"target": 1, "idx": 22702, "func": "static int srt_probe(AVProbeData *p) { const unsigned char *ptr = p->buf; int i, v, num = 0; if (AV_RB24(ptr) == 0xEFBBBF) ptr += 3; /* skip UTF-8 BOM */ while (*ptr == '\\r' || *ptr == '\\n') ptr++; for (i=0; i<2; i++) { if ((num == i || num + 1 == i) && sscanf(ptr, \"%*d:%*2d:%*2d%*1[,.]%*3d --> %*d:%*2d:%*2d%*1[,.]%3d\", &v) == 1) return AVPROBE_SCORE_MAX; num = atoi(ptr); ptr += strcspn(ptr, \"\\n\") + 1; } return 0; }"} {"target": 1, "idx": 22705, "func": "static void piix4_update_hotplug(PIIX4PMState *s) { PCIDevice *dev = &s->dev; BusState *bus = qdev_get_parent_bus(&dev->qdev); DeviceState *qdev, *next; s->pci0_hotplug_enable = ~0; QTAILQ_FOREACH_SAFE(qdev, &bus->children, sibling, next) { PCIDevice *pdev = PCI_DEVICE(qdev); PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(pdev); int slot = PCI_SLOT(pdev->devfn); if (pc->no_hotplug) { s->pci0_hotplug_enable &= ~(1 << slot); } } }"} {"target": 1, "idx": 22709, "func": "static void stream_set_speed(BlockJob *job, int64_t speed, Error **errp) { StreamBlockJob *s = container_of(job, StreamBlockJob, common); if (speed < 0) { error_setg(errp, QERR_INVALID_PARAMETER, \"speed\"); return; } ratelimit_set_speed(&s->limit, speed / BDRV_SECTOR_SIZE, SLICE_TIME); }"} {"target": 1, "idx": 22718, "func": "static av_cold int dnxhd_decode_close(AVCodecContext *avctx) { DNXHDContext *ctx = avctx->priv_data; ff_free_vlc(&ctx->ac_vlc); ff_free_vlc(&ctx->dc_vlc); ff_free_vlc(&ctx->run_vlc); av_freep(&ctx->mb_scan_index); av_freep(&ctx->rows); return 0; }"} {"target": 1, "idx": 22741, "func": "int unix_start_incoming_migration(const char *path) { struct sockaddr_un un; int sock; dprintf(\"Attempting to start an incoming migration\\n\"); sock = socket(PF_UNIX, SOCK_STREAM, 0); if (sock < 0) { fprintf(stderr, \"Could not open unix socket: %s\\n\", strerror(errno)); return -EINVAL; } memset(&un, 0, sizeof(un)); un.sun_family = AF_UNIX; snprintf(un.sun_path, sizeof(un.sun_path), \"%s\", path); unlink(un.sun_path); if (bind(sock, (struct sockaddr*) &un, sizeof(un)) < 0) { fprintf(stderr, \"bind(unix:%s): %s\\n\", un.sun_path, strerror(errno)); goto err; } if (listen(sock, 1) < 0) { fprintf(stderr, \"listen(unix:%s): %s\\n\", un.sun_path, strerror(errno)); goto err; } qemu_set_fd_handler2(sock, NULL, unix_accept_incoming_migration, NULL, (void *)(unsigned long)sock); return 0; err: close(sock); return -EINVAL; }"} {"target": 0, "idx": 22752, "func": "void bdrv_refresh_limits(BlockDriverState *bs, Error **errp) { BlockDriver *drv = bs->drv; Error *local_err = NULL; memset(&bs->bl, 0, sizeof(bs->bl)); if (!drv) { return; } /* Default alignment based on whether driver has byte interface */ bs->request_alignment = drv->bdrv_co_preadv ? 1 : 512; /* Take some limits from the children as a default */ if (bs->file) { bdrv_refresh_limits(bs->file->bs, &local_err); if (local_err) { error_propagate(errp, local_err); return; } bs->bl.opt_transfer = bs->file->bs->bl.opt_transfer; bs->bl.max_transfer = bs->file->bs->bl.max_transfer; bs->bl.min_mem_alignment = bs->file->bs->bl.min_mem_alignment; bs->bl.opt_mem_alignment = bs->file->bs->bl.opt_mem_alignment; bs->bl.max_iov = bs->file->bs->bl.max_iov; } else { bs->bl.min_mem_alignment = 512; bs->bl.opt_mem_alignment = getpagesize(); /* Safe default since most protocols use readv()/writev()/etc */ bs->bl.max_iov = IOV_MAX; } if (bs->backing) { bdrv_refresh_limits(bs->backing->bs, &local_err); if (local_err) { error_propagate(errp, local_err); return; } bs->bl.opt_transfer = MAX(bs->bl.opt_transfer, bs->backing->bs->bl.opt_transfer); bs->bl.max_transfer = MIN_NON_ZERO(bs->bl.max_transfer, bs->backing->bs->bl.max_transfer); bs->bl.opt_mem_alignment = MAX(bs->bl.opt_mem_alignment, bs->backing->bs->bl.opt_mem_alignment); bs->bl.min_mem_alignment = MAX(bs->bl.min_mem_alignment, bs->backing->bs->bl.min_mem_alignment); bs->bl.max_iov = MIN(bs->bl.max_iov, bs->backing->bs->bl.max_iov); } /* Then let the driver override it */ if (drv->bdrv_refresh_limits) { drv->bdrv_refresh_limits(bs, errp); } }"} {"target": 0, "idx": 22759, "func": "static int check_refblocks(BlockDriverState *bs, BdrvCheckResult *res, BdrvCheckMode fix, bool *rebuild, void **refcount_table, int64_t *nb_clusters) { BDRVQcow2State *s = bs->opaque; int64_t i, size; int ret; for(i = 0; i < s->refcount_table_size; i++) { uint64_t offset, cluster; offset = s->refcount_table[i]; cluster = offset >> s->cluster_bits; /* Refcount blocks are cluster aligned */ if (offset_into_cluster(s, offset)) { fprintf(stderr, \"ERROR refcount block %\" PRId64 \" is not \" \"cluster aligned; refcount table entry corrupted\\n\", i); res->corruptions++; *rebuild = true; continue; } if (cluster >= *nb_clusters) { fprintf(stderr, \"%s refcount block %\" PRId64 \" is outside image\\n\", fix & BDRV_FIX_ERRORS ? \"Repairing\" : \"ERROR\", i); if (fix & BDRV_FIX_ERRORS) { int64_t new_nb_clusters; Error *local_err = NULL; if (offset > INT64_MAX - s->cluster_size) { ret = -EINVAL; goto resize_fail; } ret = bdrv_truncate(bs->file, offset + s->cluster_size, &local_err); if (ret < 0) { error_report_err(local_err); goto resize_fail; } size = bdrv_getlength(bs->file->bs); if (size < 0) { ret = size; goto resize_fail; } new_nb_clusters = size_to_clusters(s, size); assert(new_nb_clusters >= *nb_clusters); ret = realloc_refcount_array(s, refcount_table, nb_clusters, new_nb_clusters); if (ret < 0) { res->check_errors++; return ret; } if (cluster >= *nb_clusters) { ret = -EINVAL; goto resize_fail; } res->corruptions_fixed++; ret = inc_refcounts(bs, res, refcount_table, nb_clusters, offset, s->cluster_size); if (ret < 0) { return ret; } /* No need to check whether the refcount is now greater than 1: * This area was just allocated and zeroed, so it can only be * exactly 1 after inc_refcounts() */ continue; resize_fail: res->corruptions++; *rebuild = true; fprintf(stderr, \"ERROR could not resize image: %s\\n\", strerror(-ret)); } else { res->corruptions++; } continue; } if (offset != 0) { ret = inc_refcounts(bs, res, refcount_table, nb_clusters, offset, s->cluster_size); if (ret < 0) { return ret; } if (s->get_refcount(*refcount_table, cluster) != 1) { fprintf(stderr, \"ERROR refcount block %\" PRId64 \" refcount=%\" PRIu64 \"\\n\", i, s->get_refcount(*refcount_table, cluster)); res->corruptions++; *rebuild = true; } } } return 0; }"} {"target": 1, "idx": 22801, "func": "DriveInfo *drive_init(QemuOpts *opts, BlockInterfaceType block_default_type) { const char *buf; const char *file = NULL; const char *serial; const char *mediastr = \"\"; BlockInterfaceType type; enum { MEDIA_DISK, MEDIA_CDROM } media; int bus_id, unit_id; int cyls, heads, secs, translation; BlockDriver *drv = NULL; int max_devs; int index; int ro = 0; int bdrv_flags = 0; int on_read_error, on_write_error; const char *devaddr; DriveInfo *dinfo; BlockIOLimit io_limits; int snapshot = 0; bool copy_on_read; int ret; Error *error = NULL; translation = BIOS_ATA_TRANSLATION_AUTO; media = MEDIA_DISK; /* extract parameters */ bus_id = qemu_opt_get_number(opts, \"bus\", 0); unit_id = qemu_opt_get_number(opts, \"unit\", -1); index = qemu_opt_get_number(opts, \"index\", -1); cyls = qemu_opt_get_number(opts, \"cyls\", 0); heads = qemu_opt_get_number(opts, \"heads\", 0); secs = qemu_opt_get_number(opts, \"secs\", 0); snapshot = qemu_opt_get_bool(opts, \"snapshot\", 0); ro = qemu_opt_get_bool(opts, \"readonly\", 0); copy_on_read = qemu_opt_get_bool(opts, \"copy-on-read\", false); file = qemu_opt_get(opts, \"file\"); serial = qemu_opt_get(opts, \"serial\"); if ((buf = qemu_opt_get(opts, \"if\")) != NULL) { for (type = 0; type < IF_COUNT && strcmp(buf, if_name[type]); type++) ; if (type == IF_COUNT) { error_report(\"unsupported bus type '%s'\", buf); return NULL; } } else { type = block_default_type; } max_devs = if_max_devs[type]; if (cyls || heads || secs) { if (cyls < 1) { error_report(\"invalid physical cyls number\"); return NULL; } if (heads < 1) { error_report(\"invalid physical heads number\"); return NULL; } if (secs < 1) { error_report(\"invalid physical secs number\"); return NULL; } } if ((buf = qemu_opt_get(opts, \"trans\")) != NULL) { if (!cyls) { error_report(\"'%s' trans must be used with cyls, heads and secs\", buf); return NULL; } if (!strcmp(buf, \"none\")) translation = BIOS_ATA_TRANSLATION_NONE; else if (!strcmp(buf, \"lba\")) translation = BIOS_ATA_TRANSLATION_LBA; else if (!strcmp(buf, \"auto\")) translation = BIOS_ATA_TRANSLATION_AUTO; else { error_report(\"'%s' invalid translation type\", buf); return NULL; } } if ((buf = qemu_opt_get(opts, \"media\")) != NULL) { if (!strcmp(buf, \"disk\")) { media = MEDIA_DISK; } else if (!strcmp(buf, \"cdrom\")) { if (cyls || secs || heads) { error_report(\"CHS can't be set with media=%s\", buf); return NULL; } media = MEDIA_CDROM; } else { error_report(\"'%s' invalid media\", buf); return NULL; } } if ((buf = qemu_opt_get(opts, \"discard\")) != NULL) { if (bdrv_parse_discard_flags(buf, &bdrv_flags) != 0) { error_report(\"invalid discard option\"); return NULL; } } bdrv_flags |= BDRV_O_CACHE_WB; if ((buf = qemu_opt_get(opts, \"cache\")) != NULL) { if (bdrv_parse_cache_flags(buf, &bdrv_flags) != 0) { error_report(\"invalid cache option\"); return NULL; } } #ifdef CONFIG_LINUX_AIO if ((buf = qemu_opt_get(opts, \"aio\")) != NULL) { if (!strcmp(buf, \"native\")) { bdrv_flags |= BDRV_O_NATIVE_AIO; } else if (!strcmp(buf, \"threads\")) { /* this is the default */ } else { error_report(\"invalid aio option\"); return NULL; } } #endif if ((buf = qemu_opt_get(opts, \"format\")) != NULL) { if (is_help_option(buf)) { error_printf(\"Supported formats:\"); bdrv_iterate_format(bdrv_format_print, NULL); error_printf(\"\\n\"); return NULL; } drv = bdrv_find_whitelisted_format(buf); if (!drv) { error_report(\"'%s' invalid format\", buf); return NULL; } } /* disk I/O throttling */ io_limits.bps[BLOCK_IO_LIMIT_TOTAL] = qemu_opt_get_number(opts, \"bps\", 0); io_limits.bps[BLOCK_IO_LIMIT_READ] = qemu_opt_get_number(opts, \"bps_rd\", 0); io_limits.bps[BLOCK_IO_LIMIT_WRITE] = qemu_opt_get_number(opts, \"bps_wr\", 0); io_limits.iops[BLOCK_IO_LIMIT_TOTAL] = qemu_opt_get_number(opts, \"iops\", 0); io_limits.iops[BLOCK_IO_LIMIT_READ] = qemu_opt_get_number(opts, \"iops_rd\", 0); io_limits.iops[BLOCK_IO_LIMIT_WRITE] = qemu_opt_get_number(opts, \"iops_wr\", 0); if (!do_check_io_limits(&io_limits, &error)) { error_report(\"%s\", error_get_pretty(error)); error_free(error); return NULL; } if (qemu_opt_get(opts, \"boot\") != NULL) { fprintf(stderr, \"qemu-kvm: boot=on|off is deprecated and will be \" \"ignored. Future versions will reject this parameter. Please \" \"update your scripts.\\n\"); } on_write_error = BLOCKDEV_ON_ERROR_ENOSPC; if ((buf = qemu_opt_get(opts, \"werror\")) != NULL) { if (type != IF_IDE && type != IF_SCSI && type != IF_VIRTIO && type != IF_NONE) { error_report(\"werror is not supported by this bus type\"); return NULL; } on_write_error = parse_block_error_action(buf, 0); if (on_write_error < 0) { return NULL; } } on_read_error = BLOCKDEV_ON_ERROR_REPORT; if ((buf = qemu_opt_get(opts, \"rerror\")) != NULL) { if (type != IF_IDE && type != IF_VIRTIO && type != IF_SCSI && type != IF_NONE) { error_report(\"rerror is not supported by this bus type\"); return NULL; } on_read_error = parse_block_error_action(buf, 1); if (on_read_error < 0) { return NULL; } } if ((devaddr = qemu_opt_get(opts, \"addr\")) != NULL) { if (type != IF_VIRTIO) { error_report(\"addr is not supported by this bus type\"); return NULL; } } /* compute bus and unit according index */ if (index != -1) { if (bus_id != 0 || unit_id != -1) { error_report(\"index cannot be used with bus and unit\"); return NULL; } bus_id = drive_index_to_bus_id(type, index); unit_id = drive_index_to_unit_id(type, index); } /* if user doesn't specify a unit_id, * try to find the first free */ if (unit_id == -1) { unit_id = 0; while (drive_get(type, bus_id, unit_id) != NULL) { unit_id++; if (max_devs && unit_id >= max_devs) { unit_id -= max_devs; bus_id++; } } } /* check unit id */ if (max_devs && unit_id >= max_devs) { error_report(\"unit %d too big (max is %d)\", unit_id, max_devs - 1); return NULL; } /* * catch multiple definitions */ if (drive_get(type, bus_id, unit_id) != NULL) { error_report(\"drive with bus=%d, unit=%d (index=%d) exists\", bus_id, unit_id, index); return NULL; } /* init */ dinfo = g_malloc0(sizeof(*dinfo)); if ((buf = qemu_opts_id(opts)) != NULL) { dinfo->id = g_strdup(buf); } else { /* no id supplied -> create one */ dinfo->id = g_malloc0(32); if (type == IF_IDE || type == IF_SCSI) mediastr = (media == MEDIA_CDROM) ? \"-cd\" : \"-hd\"; if (max_devs) snprintf(dinfo->id, 32, \"%s%i%s%i\", if_name[type], bus_id, mediastr, unit_id); else snprintf(dinfo->id, 32, \"%s%s%i\", if_name[type], mediastr, unit_id); } dinfo->bdrv = bdrv_new(dinfo->id); dinfo->bdrv->open_flags = snapshot ? BDRV_O_SNAPSHOT : 0; dinfo->bdrv->read_only = ro; dinfo->devaddr = devaddr; dinfo->type = type; dinfo->bus = bus_id; dinfo->unit = unit_id; dinfo->cyls = cyls; dinfo->heads = heads; dinfo->secs = secs; dinfo->trans = translation; dinfo->opts = opts; dinfo->refcount = 1; dinfo->serial = serial; QTAILQ_INSERT_TAIL(&drives, dinfo, next); bdrv_set_on_error(dinfo->bdrv, on_read_error, on_write_error); /* disk I/O throttling */ bdrv_set_io_limits(dinfo->bdrv, &io_limits); switch(type) { case IF_IDE: case IF_SCSI: case IF_XEN: case IF_NONE: dinfo->media_cd = media == MEDIA_CDROM; break; case IF_SD: case IF_FLOPPY: case IF_PFLASH: case IF_MTD: break; case IF_VIRTIO: /* add virtio block device */ opts = qemu_opts_create_nofail(qemu_find_opts(\"device\")); if (arch_type == QEMU_ARCH_S390X) { qemu_opt_set(opts, \"driver\", \"virtio-blk-s390\"); } else { qemu_opt_set(opts, \"driver\", \"virtio-blk-pci\"); } qemu_opt_set(opts, \"drive\", dinfo->id); if (devaddr) qemu_opt_set(opts, \"addr\", devaddr); break; default: abort(); } if (!file || !*file) { return dinfo; } if (snapshot) { /* always use cache=unsafe with snapshot */ bdrv_flags &= ~BDRV_O_CACHE_MASK; bdrv_flags |= (BDRV_O_SNAPSHOT|BDRV_O_CACHE_WB|BDRV_O_NO_FLUSH); } if (copy_on_read) { bdrv_flags |= BDRV_O_COPY_ON_READ; } if (runstate_check(RUN_STATE_INMIGRATE)) { bdrv_flags |= BDRV_O_INCOMING; } if (media == MEDIA_CDROM) { /* CDROM is fine for any interface, don't check. */ ro = 1; } else if (ro == 1) { if (type != IF_SCSI && type != IF_VIRTIO && type != IF_FLOPPY && type != IF_NONE && type != IF_PFLASH) { error_report(\"readonly not supported by this bus type\"); goto err; } } bdrv_flags |= ro ? 0 : BDRV_O_RDWR; if (ro && copy_on_read) { error_report(\"warning: disabling copy_on_read on readonly drive\"); } ret = bdrv_open(dinfo->bdrv, file, NULL, bdrv_flags, drv); if (ret < 0) { if (ret == -EMEDIUMTYPE) { error_report(\"could not open disk image %s: not in %s format\", file, drv->format_name); } else { error_report(\"could not open disk image %s: %s\", file, strerror(-ret)); } goto err; } if (bdrv_key_required(dinfo->bdrv)) autostart = 0; return dinfo; err: bdrv_delete(dinfo->bdrv); g_free(dinfo->id); QTAILQ_REMOVE(&drives, dinfo, next); g_free(dinfo); return NULL; }"} {"target": 1, "idx": 22815, "func": "static const char *keyval_parse_one(QDict *qdict, const char *params, const char *implied_key, Error **errp) { const char *key, *key_end, *s; size_t len; char key_in_cur[128]; QDict *cur; int ret; QObject *next; QString *val; key = params; len = strcspn(params, \"=,\"); if (implied_key && len && key[len] != '=') { /* Desugar implied key */ key = implied_key; len = strlen(implied_key); } key_end = key + len; /* * Loop over key fragments: @s points to current fragment, it * applies to @cur. @key_in_cur[] holds the previous fragment. */ cur = qdict; s = key; for (;;) { ret = parse_qapi_name(s, false); len = ret < 0 ? 0 : ret; assert(s + len <= key_end); if (!len || (s + len < key_end && s[len] != '.')) { assert(key != implied_key); error_setg(errp, \"Invalid parameter '%.*s'\", (int)(key_end - key), key); return NULL; } if (len >= sizeof(key_in_cur)) { assert(key != implied_key); error_setg(errp, \"Parameter%s '%.*s' is too long\", s != key || s + len != key_end ? \" fragment\" : \"\", (int)len, s); return NULL; } if (s != key) { next = keyval_parse_put(cur, key_in_cur, NULL, key, s - 1, errp); if (!next) { return NULL; } cur = qobject_to_qdict(next); assert(cur); } memcpy(key_in_cur, s, len); key_in_cur[len] = 0; s += len; if (*s != '.') { break; } s++; } if (key == implied_key) { assert(!*s); s = params; } else { if (*s != '=') { error_setg(errp, \"Expected '=' after parameter '%.*s'\", (int)(s - key), key); return NULL; } s++; } val = qstring_new(); for (;;) { if (!*s) { break; } else if (*s == ',') { s++; if (*s != ',') { break; } } qstring_append_chr(val, *s++); } if (!keyval_parse_put(cur, key_in_cur, val, key, key_end, errp)) { return NULL; } return s; }"} {"target": 0, "idx": 22826, "func": "static void mdct_test(AC3MDCTContext *mdct, AVLFG *lfg) { int16_t input[MDCT_SAMPLES]; int32_t output[AC3_MAX_COEFS]; float input1[MDCT_SAMPLES]; float output1[AC3_MAX_COEFS]; float s, a, err, e, emax; int i, k, n; for (i = 0; i < MDCT_SAMPLES; i++) { input[i] = (av_lfg_get(lfg) % 65535 - 32767) * 9 / 10; input1[i] = input[i]; } mdct512(mdct, output, input); /* do it by hand */ for (k = 0; k < AC3_MAX_COEFS; k++) { s = 0; for (n = 0; n < MDCT_SAMPLES; n++) { a = (2*M_PI*(2*n+1+MDCT_SAMPLES/2)*(2*k+1) / (4 * MDCT_SAMPLES)); s += input1[n] * cos(a); } output1[k] = -2 * s / MDCT_SAMPLES; } err = 0; emax = 0; for (i = 0; i < AC3_MAX_COEFS; i++) { av_log(NULL, AV_LOG_DEBUG, \"%3d: %7d %7.0f\\n\", i, output[i], output1[i]); e = output[i] - output1[i]; if (e > emax) emax = e; err += e * e; } av_log(NULL, AV_LOG_DEBUG, \"err2=%f emax=%f\\n\", err / AC3_MAX_COEFS, emax); }"} {"target": 1, "idx": 22833, "func": "static AVRational update_sar(int old_w, int old_h, AVRational sar, int new_w, int new_h) { // attempt to keep aspect during typical resolution switches if (!sar.num) sar = (AVRational){1, 1}; sar = av_mul_q(sar, (AVRational){new_h * old_w, new_w * old_h}); return sar; }"} {"target": 1, "idx": 22834, "func": "static uint32_t qpci_pc_config_readl(QPCIBus *bus, int devfn, uint8_t offset) { outl(0xcf8, (1 << 31) | (devfn << 8) | offset); return inl(0xcfc); }"} {"target": 1, "idx": 22844, "func": "static int decode_block(BinkAudioContext *s, float **out, int use_dct) { int ch, i, j, k; float q, quant[25]; int width, coeff; GetBitContext *gb = &s->gb; if (use_dct) skip_bits(gb, 2); for (ch = 0; ch < s->channels; ch++) { FFTSample *coeffs = out[ch]; if (s->version_b) { if (get_bits_left(gb) < 64) return AVERROR_INVALIDDATA; coeffs[0] = av_int2float(get_bits_long(gb, 32)) * s->root; coeffs[1] = av_int2float(get_bits_long(gb, 32)) * s->root; } else { if (get_bits_left(gb) < 58) return AVERROR_INVALIDDATA; coeffs[0] = get_float(gb) * s->root; coeffs[1] = get_float(gb) * s->root; } if (get_bits_left(gb) < s->num_bands * 8) return AVERROR_INVALIDDATA; for (i = 0; i < s->num_bands; i++) { int value = get_bits(gb, 8); quant[i] = quant_table[FFMIN(value, 95)]; } k = 0; q = quant[0]; // parse coefficients i = 2; while (i < s->frame_len) { if (s->version_b) { j = i + 16; } else { int v; GET_BITS_SAFE(v, 1); if (v) { GET_BITS_SAFE(v, 4); j = i + rle_length_tab[v] * 8; } else { j = i + 8; } } j = FFMIN(j, s->frame_len); GET_BITS_SAFE(width, 4); if (width == 0) { memset(coeffs + i, 0, (j - i) * sizeof(*coeffs)); i = j; while (s->bands[k] < i) q = quant[k++]; } else { while (i < j) { if (s->bands[k] == i) q = quant[k++]; GET_BITS_SAFE(coeff, width); if (coeff) { int v; GET_BITS_SAFE(v, 1); if (v) coeffs[i] = -q * coeff; else coeffs[i] = q * coeff; } else { coeffs[i] = 0.0f; } i++; } } } if (CONFIG_BINKAUDIO_DCT_DECODER && use_dct) { coeffs[0] /= 0.5; s->trans.dct.dct_calc(&s->trans.dct, coeffs); } else if (CONFIG_BINKAUDIO_RDFT_DECODER) s->trans.rdft.rdft_calc(&s->trans.rdft, coeffs); } for (ch = 0; ch < s->channels; ch++) { int j; int count = s->overlap_len * s->channels; if (!s->first) { j = ch; for (i = 0; i < s->overlap_len; i++, j += s->channels) out[ch][i] = (s->previous[ch][i] * (count - j) + out[ch][i] * j) / count; } memcpy(s->previous[ch], &out[ch][s->frame_len - s->overlap_len], s->overlap_len * sizeof(*s->previous[ch])); } s->first = 0; return 0; }"} {"target": 1, "idx": 22875, "func": "static int load_normal_reset(S390CPU *cpu) { S390CPUClass *scc = S390_CPU_GET_CLASS(cpu); pause_all_vcpus(); cpu_synchronize_all_states(); cpu_reset_all(); io_subsystem_reset(); scc->initial_cpu_reset(CPU(cpu)); scc->load_normal(CPU(cpu)); cpu_synchronize_all_post_reset(); resume_all_vcpus(); return 0; }"} {"target": 1, "idx": 22876, "func": "static void v9fs_post_lcreate(V9fsState *s, V9fsLcreateState *vs, int err) { if (err == 0) { v9fs_string_copy(&vs->fidp->path, &vs->fullname); stat_to_qid(&vs->stbuf, &vs->qid); vs->offset += pdu_marshal(vs->pdu, vs->offset, \"Qd\", &vs->qid, &vs->iounit); err = vs->offset; } else { vs->fidp->fid_type = P9_FID_NONE; close(vs->fidp->fs.fd); err = -errno; } complete_pdu(s, vs->pdu, err); v9fs_string_free(&vs->name); v9fs_string_free(&vs->fullname); qemu_free(vs); }"} {"target": 1, "idx": 22906, "func": "static void get_xbzrle_cache_stats(MigrationInfo *info) { if (migrate_use_xbzrle()) { info->has_xbzrle_cache = true; info->xbzrle_cache = g_malloc0(sizeof(*info->xbzrle_cache)); info->xbzrle_cache->cache_size = migrate_xbzrle_cache_size(); info->xbzrle_cache->bytes = xbzrle_mig_bytes_transferred(); info->xbzrle_cache->pages = xbzrle_mig_pages_transferred(); info->xbzrle_cache->cache_miss = xbzrle_mig_pages_cache_miss(); info->xbzrle_cache->cache_miss_rate = xbzrle_mig_cache_miss_rate(); info->xbzrle_cache->overflow = xbzrle_mig_pages_overflow(); } }"} {"target": 1, "idx": 22931, "func": "static PowerPCCPU *ppc440_init_xilinx(ram_addr_t *ram_size, int do_init, const char *cpu_model, uint32_t sysclk) { PowerPCCPU *cpu; CPUPPCState *env; qemu_irq *irqs; cpu = POWERPC_CPU(cpu_generic_init(TYPE_POWERPC_CPU, cpu_model)); if (cpu == NULL) { fprintf(stderr, \"Unable to initialize CPU!\\n\"); exit(1); } env = &cpu->env; ppc_booke_timers_init(cpu, sysclk, 0/* no flags */); ppc_dcr_init(env, NULL, NULL); /* interrupt controller */ irqs = g_malloc0(sizeof(qemu_irq) * PPCUIC_OUTPUT_NB); irqs[PPCUIC_OUTPUT_INT] = ((qemu_irq *)env->irq_inputs)[PPC40x_INPUT_INT]; irqs[PPCUIC_OUTPUT_CINT] = ((qemu_irq *)env->irq_inputs)[PPC40x_INPUT_CINT]; ppcuic_init(env, irqs, 0x0C0, 0, 1); return cpu; }"} {"target": 1, "idx": 22944, "func": "static void create_default_qtables(uint8_t *qtables, uint8_t q) { int factor = q; int i; factor = av_clip(q, 1, 99); if (q < 50) q = 5000 / factor; else q = 200 - factor * 2; for (i = 0; i < 128; i++) { int val = (default_quantizers[i] * q + 50) / 100; /* Limit the quantizers to 1 <= q <= 255. */ val = av_clip(val, 1, 255); qtables[i] = val; } }"} {"target": 0, "idx": 22949, "func": "void ff_af_queue_close(AudioFrameQueue *afq) { /* remove/free any remaining frames */ while (afq->frame_queue) delete_next_frame(afq); memset(afq, 0, sizeof(*afq)); }"} {"target": 1, "idx": 22955, "func": "static void lumRangeToJpeg16_c(int16_t *_dst, int width) { int i; int32_t *dst = (int32_t *) _dst; for (i = 0; i < width; i++) dst[i] = (FFMIN(dst[i],30189<<4)*19077 - (39057361<<4))>>14; }"} {"target": 1, "idx": 22956, "func": "static void fill_coding_method_array(sb_int8_array tone_level_idx, sb_int8_array tone_level_idx_temp, sb_int8_array coding_method, int nb_channels, int c, int superblocktype_2_3, int cm_table_select) { int ch, sb, j; int tmp, acc, esp_40, comp; int add1, add2, add3, add4; int64_t multres; if (!superblocktype_2_3) { /* This case is untested, no samples available */ SAMPLES_NEEDED for (ch = 0; ch < nb_channels; ch++) for (sb = 0; sb < 30; sb++) { for (j = 1; j < 63; j++) { // The loop only iterates to 63 so the code doesn't overflow the buffer add1 = tone_level_idx[ch][sb][j] - 10; if (add1 < 0) add1 = 0; add2 = add3 = add4 = 0; if (sb > 1) { add2 = tone_level_idx[ch][sb - 2][j] + tone_level_idx_offset_table[sb][0] - 6; if (add2 < 0) add2 = 0; } if (sb > 0) { add3 = tone_level_idx[ch][sb - 1][j] + tone_level_idx_offset_table[sb][1] - 6; if (add3 < 0) add3 = 0; } if (sb < 29) { add4 = tone_level_idx[ch][sb + 1][j] + tone_level_idx_offset_table[sb][3] - 6; if (add4 < 0) add4 = 0; } tmp = tone_level_idx[ch][sb][j + 1] * 2 - add4 - add3 - add2 - add1; if (tmp < 0) tmp = 0; tone_level_idx_temp[ch][sb][j + 1] = tmp & 0xff; } tone_level_idx_temp[ch][sb][0] = tone_level_idx_temp[ch][sb][1]; } acc = 0; for (ch = 0; ch < nb_channels; ch++) for (sb = 0; sb < 30; sb++) for (j = 0; j < 64; j++) acc += tone_level_idx_temp[ch][sb][j]; multres = 0x66666667 * (acc * 10); esp_40 = (multres >> 32) / 8 + ((multres & 0xffffffff) >> 31); for (ch = 0; ch < nb_channels; ch++) for (sb = 0; sb < 30; sb++) for (j = 0; j < 64; j++) { comp = tone_level_idx_temp[ch][sb][j]* esp_40 * 10; if (comp < 0) comp += 0xff; comp /= 256; // signed shift switch(sb) { case 0: if (comp < 30) comp = 30; comp += 15; break; case 1: if (comp < 24) comp = 24; comp += 10; break; case 2: case 3: case 4: if (comp < 16) comp = 16; } if (comp <= 5) tmp = 0; else if (comp <= 10) tmp = 10; else if (comp <= 16) tmp = 16; else if (comp <= 24) tmp = -1; else tmp = 0; coding_method[ch][sb][j] = ((tmp & 0xfffa) + 30 )& 0xff; } for (sb = 0; sb < 30; sb++) fix_coding_method_array(sb, nb_channels, coding_method); for (ch = 0; ch < nb_channels; ch++) for (sb = 0; sb < 30; sb++) for (j = 0; j < 64; j++) if (sb >= 10) { if (coding_method[ch][sb][j] < 10) coding_method[ch][sb][j] = 10; } else { if (sb >= 2) { if (coding_method[ch][sb][j] < 16) coding_method[ch][sb][j] = 16; } else { if (coding_method[ch][sb][j] < 30) coding_method[ch][sb][j] = 30; } } } else { // superblocktype_2_3 != 0 for (ch = 0; ch < nb_channels; ch++) for (sb = 0; sb < 30; sb++) for (j = 0; j < 64; j++) coding_method[ch][sb][j] = coding_method_table[cm_table_select][sb]; } }"} {"target": 1, "idx": 22959, "func": "static void find_motion(DeshakeContext *deshake, uint8_t *src1, uint8_t *src2, int width, int height, int stride, Transform *t) { int x, y; IntMotionVector mv = {0, 0}; int counts[128][128]; int count_max_value = 0; int contrast; int pos; double *angles = av_malloc(sizeof(*angles) * width * height / (16 * deshake->blocksize)); int center_x = 0, center_y = 0; double p_x, p_y; // Reset counts to zero for (x = 0; x < deshake->rx * 2 + 1; x++) { for (y = 0; y < deshake->ry * 2 + 1; y++) { counts[x][y] = 0; } } pos = 0; // Find motion for every block and store the motion vector in the counts for (y = deshake->ry; y < height - deshake->ry - (deshake->blocksize * 2); y += deshake->blocksize * 2) { // We use a width of 16 here to match the libavcodec sad functions for (x = deshake->rx; x < width - deshake->rx - 16; x += 16) { // If the contrast is too low, just skip this block as it probably // won't be very useful to us. contrast = block_contrast(src2, x, y, stride, deshake->blocksize); if (contrast > deshake->contrast) { //av_log(NULL, AV_LOG_ERROR, \"%d\\n\", contrast); find_block_motion(deshake, src1, src2, x, y, stride, &mv); if (mv.x != -1 && mv.y != -1) { counts[mv.x + deshake->rx][mv.y + deshake->ry] += 1; if (x > deshake->rx && y > deshake->ry) angles[pos++] = block_angle(x, y, 0, 0, &mv); center_x += mv.x; center_y += mv.y; } } } } pos = FFMAX(1, pos); center_x /= pos; center_y /= pos; t->angle = clean_mean(angles, pos); if (t->angle < 0.001) t->angle = 0; // Find the most common motion vector in the frame and use it as the gmv for (y = deshake->ry * 2; y >= 0; y--) { for (x = 0; x < deshake->rx * 2 + 1; x++) { //av_log(NULL, AV_LOG_ERROR, \"%5d \", counts[x][y]); if (counts[x][y] > count_max_value) { t->vector.x = x - deshake->rx; t->vector.y = y - deshake->ry; count_max_value = counts[x][y]; } } //av_log(NULL, AV_LOG_ERROR, \"\\n\"); } p_x = (center_x - width / 2); p_y = (center_y - height / 2); t->vector.x += (cos(t->angle)-1)*p_x - sin(t->angle)*p_y; t->vector.y += sin(t->angle)*p_x + (cos(t->angle)-1)*p_y; // Clamp max shift & rotation? t->vector.x = av_clipf(t->vector.x, -deshake->rx * 2, deshake->rx * 2); t->vector.y = av_clipf(t->vector.y, -deshake->ry * 2, deshake->ry * 2); t->angle = av_clipf(t->angle, -0.1, 0.1); //av_log(NULL, AV_LOG_ERROR, \"%d x %d\\n\", avg->x, avg->y); av_free(angles); }"} {"target": 1, "idx": 22966, "func": "static int dirac_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *pkt) { DiracContext *s = avctx->priv_data; AVFrame *picture = data; uint8_t *buf = pkt->data; int buf_size = pkt->size; int i, data_unit_size, buf_idx = 0; int ret; /* release unused frames */ for (i = 0; i < MAX_FRAMES; i++) if (s->all_frames[i].avframe->data[0] && !s->all_frames[i].avframe->reference) { av_frame_unref(s->all_frames[i].avframe); memset(s->all_frames[i].interpolated, 0, sizeof(s->all_frames[i].interpolated)); } s->current_picture = NULL; *got_frame = 0; /* end of stream, so flush delayed pics */ if (buf_size == 0) return get_delayed_pic(s, (AVFrame *)data, got_frame); for (;;) { /*[DIRAC_STD] Here starts the code from parse_info() defined in 9.6 [DIRAC_STD] PARSE_INFO_PREFIX = \"BBCD\" as defined in ISO/IEC 646 BBCD start code search */ for (; buf_idx + DATA_UNIT_HEADER_SIZE < buf_size; buf_idx++) { if (buf[buf_idx ] == 'B' && buf[buf_idx+1] == 'B' && buf[buf_idx+2] == 'C' && buf[buf_idx+3] == 'D') break; } /* BBCD found or end of data */ if (buf_idx + DATA_UNIT_HEADER_SIZE >= buf_size) break; data_unit_size = AV_RB32(buf+buf_idx+5); if (buf_idx + data_unit_size > buf_size || !data_unit_size) { if(buf_idx + data_unit_size > buf_size) av_log(s->avctx, AV_LOG_ERROR, \"Data unit with size %d is larger than input buffer, discarding\\n\", data_unit_size); buf_idx += 4; continue; } /* [DIRAC_STD] dirac_decode_data_unit makes reference to the while defined in 9.3 inside the function parse_sequence() */ if (dirac_decode_data_unit(avctx, buf+buf_idx, data_unit_size)) { av_log(s->avctx, AV_LOG_ERROR,\"Error in dirac_decode_data_unit\\n\"); return -1; } buf_idx += data_unit_size; } if (!s->current_picture) return buf_size; if (s->current_picture->avframe->display_picture_number > s->frame_number) { DiracFrame *delayed_frame = remove_frame(s->delay_frames, s->frame_number); s->current_picture->avframe->reference |= DELAYED_PIC_REF; if (add_frame(s->delay_frames, MAX_DELAY, s->current_picture)) { int min_num = s->delay_frames[0]->avframe->display_picture_number; /* Too many delayed frames, so we display the frame with the lowest pts */ av_log(avctx, AV_LOG_ERROR, \"Delay frame overflow\\n\"); for (i = 1; s->delay_frames[i]; i++) if (s->delay_frames[i]->avframe->display_picture_number < min_num) min_num = s->delay_frames[i]->avframe->display_picture_number; delayed_frame = remove_frame(s->delay_frames, min_num); add_frame(s->delay_frames, MAX_DELAY, s->current_picture); } if (delayed_frame) { delayed_frame->avframe->reference ^= DELAYED_PIC_REF; if((ret=av_frame_ref(data, delayed_frame->avframe)) < 0) return ret; *got_frame = 1; } } else if (s->current_picture->avframe->display_picture_number == s->frame_number) { /* The right frame at the right time :-) */ if((ret=av_frame_ref(data, s->current_picture->avframe)) < 0) return ret; *got_frame = 1; } if (*got_frame) s->frame_number = picture->display_picture_number + 1; return buf_idx; }"} {"target": 1, "idx": 22969, "func": "static TCGv gen_ea(CPUM68KState *env, DisasContext *s, uint16_t insn, int opsize, TCGv val, TCGv *addrp, ea_what what) { TCGv reg; TCGv result; uint32_t offset; switch ((insn >> 3) & 7) { case 0: /* Data register direct. */ reg = DREG(insn, 0); if (what == EA_STORE) { gen_partset_reg(opsize, reg, val); return store_dummy; } else { return gen_extend(reg, opsize, what == EA_LOADS); } case 1: /* Address register direct. */ reg = AREG(insn, 0); if (what == EA_STORE) { tcg_gen_mov_i32(reg, val); return store_dummy; } else { return gen_extend(reg, opsize, what == EA_LOADS); } case 2: /* Indirect register */ reg = AREG(insn, 0); return gen_ldst(s, opsize, reg, val, what); case 3: /* Indirect postincrement. */ reg = AREG(insn, 0); result = gen_ldst(s, opsize, reg, val, what); /* ??? This is not exception safe. The instruction may still fault after this point. */ if (what == EA_STORE || !addrp) tcg_gen_addi_i32(reg, reg, opsize_bytes(opsize)); return result; case 4: /* Indirect predecrememnt. */ { TCGv tmp; if (addrp && what == EA_STORE) { tmp = *addrp; } else { tmp = gen_lea(env, s, insn, opsize); if (IS_NULL_QREG(tmp)) return tmp; if (addrp) *addrp = tmp; } result = gen_ldst(s, opsize, tmp, val, what); /* ??? This is not exception safe. The instruction may still fault after this point. */ if (what == EA_STORE || !addrp) { reg = AREG(insn, 0); tcg_gen_mov_i32(reg, tmp); } } return result; case 5: /* Indirect displacement. */ case 6: /* Indirect index + displacement. */ return gen_ea_once(env, s, insn, opsize, val, addrp, what); case 7: /* Other */ switch (insn & 7) { case 0: /* Absolute short. */ case 1: /* Absolute long. */ case 2: /* pc displacement */ case 3: /* pc index+displacement. */ return gen_ea_once(env, s, insn, opsize, val, addrp, what); case 4: /* Immediate. */ /* Sign extend values for consistency. */ switch (opsize) { case OS_BYTE: if (what == EA_LOADS) { offset = cpu_ldsb_code(env, s->pc + 1); } else { offset = cpu_ldub_code(env, s->pc + 1); } s->pc += 2; break; case OS_WORD: if (what == EA_LOADS) { offset = cpu_ldsw_code(env, s->pc); } else { offset = cpu_lduw_code(env, s->pc); } s->pc += 2; break; case OS_LONG: offset = read_im32(env, s); break; default: qemu_assert(0, \"Bad immediate operand\"); } return tcg_const_i32(offset); default: return NULL_QREG; } } /* Should never happen. */ return NULL_QREG; }"} {"target": 1, "idx": 22970, "func": "static av_cold void nvenc_setup_rate_control(AVCodecContext *avctx) { NvencContext *ctx = avctx->priv_data; if (avctx->bit_rate > 0) { ctx->encode_config.rcParams.averageBitRate = avctx->bit_rate; } else if (ctx->encode_config.rcParams.averageBitRate > 0) { ctx->encode_config.rcParams.maxBitRate = ctx->encode_config.rcParams.averageBitRate; } if (avctx->rc_max_rate > 0) ctx->encode_config.rcParams.maxBitRate = avctx->rc_max_rate; if (ctx->rc < 0) { if (ctx->flags & NVENC_ONE_PASS) ctx->twopass = 0; if (ctx->flags & NVENC_TWO_PASSES) ctx->twopass = 1; if (ctx->twopass < 0) ctx->twopass = (ctx->flags & NVENC_LOWLATENCY) != 0; if (ctx->cbr) { if (ctx->twopass) { ctx->rc = NV_ENC_PARAMS_RC_2_PASS_QUALITY; } else { ctx->rc = NV_ENC_PARAMS_RC_CBR; } } else if (avctx->global_quality > 0) { ctx->rc = NV_ENC_PARAMS_RC_CONSTQP; } else if (ctx->twopass) { ctx->rc = NV_ENC_PARAMS_RC_2_PASS_VBR; } else if (avctx->qmin >= 0 && avctx->qmax >= 0) { ctx->rc = NV_ENC_PARAMS_RC_VBR_MINQP; } } if (ctx->flags & NVENC_LOSSLESS) { set_lossless(avctx); } else if (ctx->rc > 0) { nvenc_override_rate_control(avctx); } else { ctx->encode_config.rcParams.rateControlMode = NV_ENC_PARAMS_RC_VBR; set_vbr(avctx); } if (avctx->rc_buffer_size > 0) { ctx->encode_config.rcParams.vbvBufferSize = avctx->rc_buffer_size; } else if (ctx->encode_config.rcParams.averageBitRate > 0) { ctx->encode_config.rcParams.vbvBufferSize = 2 * ctx->encode_config.rcParams.averageBitRate; } }"} {"target": 1, "idx": 22976, "func": "static av_cold int vp3_decode_end(AVCodecContext *avctx) { Vp3DecodeContext *s = avctx->priv_data; int i; if (avctx->is_copy && !s->current_frame.data[0]) return 0; av_free(s->superblock_coding); av_free(s->all_fragments); av_free(s->coded_fragment_list[0]); av_free(s->dct_tokens_base); av_free(s->superblock_fragments); av_free(s->macroblock_coding); av_free(s->motion_val[0]); av_free(s->motion_val[1]); av_free(s->edge_emu_buffer); if (avctx->is_copy) return 0; for (i = 0; i < 16; i++) { free_vlc(&s->dc_vlc[i]); free_vlc(&s->ac_vlc_1[i]); free_vlc(&s->ac_vlc_2[i]); free_vlc(&s->ac_vlc_3[i]); free_vlc(&s->ac_vlc_4[i]); } free_vlc(&s->superblock_run_length_vlc); free_vlc(&s->fragment_run_length_vlc); free_vlc(&s->mode_code_vlc); free_vlc(&s->motion_vector_vlc); /* release all frames */ if (s->golden_frame.data[0]) ff_thread_release_buffer(avctx, &s->golden_frame); if (s->last_frame.data[0] && s->last_frame.type != FF_BUFFER_TYPE_COPY) ff_thread_release_buffer(avctx, &s->last_frame); /* no need to release the current_frame since it will always be pointing * to the same frame as either the golden or last frame */ return 0; }"} {"target": 1, "idx": 22980, "func": "static void vnc_disconnect_finish(VncState *vs) { vnc_qmp_event(vs, QEVENT_VNC_DISCONNECTED); buffer_free(&vs->input); buffer_free(&vs->output); qobject_decref(vs->info); #ifdef CONFIG_VNC_TLS vnc_tls_client_cleanup(vs); #endif /* CONFIG_VNC_TLS */ #ifdef CONFIG_VNC_SASL vnc_sasl_client_cleanup(vs); #endif /* CONFIG_VNC_SASL */ audio_del(vs); QTAILQ_REMOVE(&vs->vd->clients, vs, next); if (QTAILQ_EMPTY(&vs->vd->clients)) { dcl->idle = 1; } qemu_remove_mouse_mode_change_notifier(&vs->mouse_mode_notifier); vnc_remove_timer(vs->vd); if (vs->vd->lock_key_sync) qemu_remove_led_event_handler(vs->led); qemu_free(vs); }"} {"target": 0, "idx": 22994, "func": "void qemu_opts_print(QemuOpts *opts) { QemuOpt *opt; QemuOptDesc *desc = opts->list->desc; if (desc[0].name == NULL) { QTAILQ_FOREACH(opt, &opts->head, next) { printf(\"%s=\\\"%s\\\" \", opt->name, opt->str); } return; } for (; desc && desc->name; desc++) { const char *value; QemuOpt *opt = qemu_opt_find(opts, desc->name); value = opt ? opt->str : desc->def_value_str; if (!value) { continue; } if (desc->type == QEMU_OPT_STRING) { printf(\"%s='%s' \", desc->name, value); } else if ((desc->type == QEMU_OPT_SIZE || desc->type == QEMU_OPT_NUMBER) && opt) { printf(\"%s=%\" PRId64 \" \", desc->name, opt->value.uint); } else { printf(\"%s=%s \", desc->name, value); } } }"} {"target": 0, "idx": 23016, "func": "static uint32_t nvic_readl(nvic_state *s, uint32_t offset) { ARMCPU *cpu; uint32_t val; int irq; switch (offset) { case 4: /* Interrupt Control Type. */ return (s->num_irq / 32) - 1; case 0x10: /* SysTick Control and Status. */ val = s->systick.control; s->systick.control &= ~SYSTICK_COUNTFLAG; return val; case 0x14: /* SysTick Reload Value. */ return s->systick.reload; case 0x18: /* SysTick Current Value. */ { int64_t t; if ((s->systick.control & SYSTICK_ENABLE) == 0) return 0; t = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); if (t >= s->systick.tick) return 0; val = ((s->systick.tick - (t + 1)) / systick_scale(s)) + 1; /* The interrupt in triggered when the timer reaches zero. However the counter is not reloaded until the next clock tick. This is a hack to return zero during the first tick. */ if (val > s->systick.reload) val = 0; return val; } case 0x1c: /* SysTick Calibration Value. */ return 10000; case 0xd00: /* CPUID Base. */ cpu = ARM_CPU(current_cpu); return cpu->env.cp15.c0_cpuid; case 0xd04: /* Interrupt Control State. */ /* VECTACTIVE */ val = s->gic.running_irq[0]; if (val == 1023) { val = 0; } else if (val >= 32) { val -= 16; } /* RETTOBASE */ if (s->gic.running_irq[0] == 1023 || s->gic.last_active[s->gic.running_irq[0]][0] == 1023) { val |= (1 << 11); } /* VECTPENDING */ if (s->gic.current_pending[0] != 1023) val |= (s->gic.current_pending[0] << 12); /* ISRPENDING */ for (irq = 32; irq < s->num_irq; irq++) { if (s->gic.irq_state[irq].pending) { val |= (1 << 22); break; } } /* PENDSTSET */ if (s->gic.irq_state[ARMV7M_EXCP_SYSTICK].pending) val |= (1 << 26); /* PENDSVSET */ if (s->gic.irq_state[ARMV7M_EXCP_PENDSV].pending) val |= (1 << 28); /* NMIPENDSET */ if (s->gic.irq_state[ARMV7M_EXCP_NMI].pending) val |= (1 << 31); return val; case 0xd08: /* Vector Table Offset. */ cpu = ARM_CPU(current_cpu); return cpu->env.v7m.vecbase; case 0xd0c: /* Application Interrupt/Reset Control. */ return 0xfa05000; case 0xd10: /* System Control. */ /* TODO: Implement SLEEPONEXIT. */ return 0; case 0xd14: /* Configuration Control. */ /* TODO: Implement Configuration Control bits. */ return 0; case 0xd24: /* System Handler Status. */ val = 0; if (s->gic.irq_state[ARMV7M_EXCP_MEM].active) val |= (1 << 0); if (s->gic.irq_state[ARMV7M_EXCP_BUS].active) val |= (1 << 1); if (s->gic.irq_state[ARMV7M_EXCP_USAGE].active) val |= (1 << 3); if (s->gic.irq_state[ARMV7M_EXCP_SVC].active) val |= (1 << 7); if (s->gic.irq_state[ARMV7M_EXCP_DEBUG].active) val |= (1 << 8); if (s->gic.irq_state[ARMV7M_EXCP_PENDSV].active) val |= (1 << 10); if (s->gic.irq_state[ARMV7M_EXCP_SYSTICK].active) val |= (1 << 11); if (s->gic.irq_state[ARMV7M_EXCP_USAGE].pending) val |= (1 << 12); if (s->gic.irq_state[ARMV7M_EXCP_MEM].pending) val |= (1 << 13); if (s->gic.irq_state[ARMV7M_EXCP_BUS].pending) val |= (1 << 14); if (s->gic.irq_state[ARMV7M_EXCP_SVC].pending) val |= (1 << 15); if (s->gic.irq_state[ARMV7M_EXCP_MEM].enabled) val |= (1 << 16); if (s->gic.irq_state[ARMV7M_EXCP_BUS].enabled) val |= (1 << 17); if (s->gic.irq_state[ARMV7M_EXCP_USAGE].enabled) val |= (1 << 18); return val; case 0xd28: /* Configurable Fault Status. */ /* TODO: Implement Fault Status. */ qemu_log_mask(LOG_UNIMP, \"Configurable Fault Status unimplemented\\n\"); return 0; case 0xd2c: /* Hard Fault Status. */ case 0xd30: /* Debug Fault Status. */ case 0xd34: /* Mem Manage Address. */ case 0xd38: /* Bus Fault Address. */ case 0xd3c: /* Aux Fault Status. */ /* TODO: Implement fault status registers. */ qemu_log_mask(LOG_UNIMP, \"Fault status registers unimplemented\\n\"); return 0; case 0xd40: /* PFR0. */ return 0x00000030; case 0xd44: /* PRF1. */ return 0x00000200; case 0xd48: /* DFR0. */ return 0x00100000; case 0xd4c: /* AFR0. */ return 0x00000000; case 0xd50: /* MMFR0. */ return 0x00000030; case 0xd54: /* MMFR1. */ return 0x00000000; case 0xd58: /* MMFR2. */ return 0x00000000; case 0xd5c: /* MMFR3. */ return 0x00000000; case 0xd60: /* ISAR0. */ return 0x01141110; case 0xd64: /* ISAR1. */ return 0x02111000; case 0xd68: /* ISAR2. */ return 0x21112231; case 0xd6c: /* ISAR3. */ return 0x01111110; case 0xd70: /* ISAR4. */ return 0x01310102; /* TODO: Implement debug registers. */ default: qemu_log_mask(LOG_GUEST_ERROR, \"NVIC: Bad read offset 0x%x\\n\", offset); return 0; } }"} {"target": 0, "idx": 23049, "func": "static int gxf_write_header(AVFormatContext *s) { AVIOContext *pb = s->pb; GXFContext *gxf = s->priv_data; GXFStreamContext *vsc = NULL; uint8_t tracks[255] = {0}; int i, media_info = 0; if (!pb->seekable) { av_log(s, AV_LOG_ERROR, \"gxf muxer does not support streamed output, patch welcome\"); return -1; } gxf->flags |= 0x00080000; /* material is simple clip */ for (i = 0; i < s->nb_streams; ++i) { AVStream *st = s->streams[i]; GXFStreamContext *sc = av_mallocz(sizeof(*sc)); if (!sc) return AVERROR(ENOMEM); st->priv_data = sc; sc->media_type = ff_codec_get_tag(gxf_media_types, st->codecpar->codec_id); if (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) { if (st->codecpar->codec_id != AV_CODEC_ID_PCM_S16LE) { av_log(s, AV_LOG_ERROR, \"only 16 BIT PCM LE allowed for now\\n\"); return -1; } if (st->codecpar->sample_rate != 48000) { av_log(s, AV_LOG_ERROR, \"only 48000hz sampling rate is allowed\\n\"); return -1; } if (st->codecpar->channels != 1) { av_log(s, AV_LOG_ERROR, \"only mono tracks are allowed\\n\"); return -1; } sc->track_type = 2; sc->sample_rate = st->codecpar->sample_rate; avpriv_set_pts_info(st, 64, 1, sc->sample_rate); sc->sample_size = 16; sc->frame_rate_index = -2; sc->lines_index = -2; sc->fields = -2; gxf->audio_tracks++; gxf->flags |= 0x04000000; /* audio is 16 bit pcm */ media_info = 'A'; } else if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) { if (i != 0) { av_log(s, AV_LOG_ERROR, \"video stream must be the first track\\n\"); return -1; } /* FIXME check from time_base ? */ if (st->codecpar->height == 480 || st->codecpar->height == 512) { /* NTSC or NTSC+VBI */ sc->frame_rate_index = 5; sc->sample_rate = 60; gxf->flags |= 0x00000080; gxf->time_base = (AVRational){ 1001, 60000 }; } else if (st->codecpar->height == 576 || st->codecpar->height == 608) { /* PAL or PAL+VBI */ sc->frame_rate_index = 6; sc->media_type++; sc->sample_rate = 50; gxf->flags |= 0x00000040; gxf->time_base = (AVRational){ 1, 50 }; } else { av_log(s, AV_LOG_ERROR, \"unsupported video resolution, \" \"gxf muxer only accepts PAL or NTSC resolutions currently\\n\"); return -1; } avpriv_set_pts_info(st, 64, gxf->time_base.num, gxf->time_base.den); if (gxf_find_lines_index(st) < 0) sc->lines_index = -1; sc->sample_size = st->codecpar->bit_rate; sc->fields = 2; /* interlaced */ vsc = sc; switch (st->codecpar->codec_id) { case AV_CODEC_ID_MJPEG: sc->track_type = 1; gxf->flags |= 0x00004000; media_info = 'J'; break; case AV_CODEC_ID_MPEG1VIDEO: sc->track_type = 9; gxf->mpeg_tracks++; media_info = 'L'; break; case AV_CODEC_ID_MPEG2VIDEO: sc->first_gop_closed = -1; sc->track_type = 4; gxf->mpeg_tracks++; gxf->flags |= 0x00008000; media_info = 'M'; break; case AV_CODEC_ID_DVVIDEO: if (st->codecpar->format == AV_PIX_FMT_YUV422P) { sc->media_type += 2; sc->track_type = 6; gxf->flags |= 0x00002000; media_info = 'E'; } else { sc->track_type = 5; gxf->flags |= 0x00001000; media_info = 'D'; } break; default: av_log(s, AV_LOG_ERROR, \"video codec not supported\\n\"); return -1; } } /* FIXME first 10 audio tracks are 0 to 9 next 22 are A to V */ sc->media_info = media_info<<8 | ('0'+tracks[media_info]++); sc->order = s->nb_streams - st->index; } if (ff_audio_interleave_init(s, GXF_samples_per_frame, (AVRational){ 1, 48000 }) < 0) return -1; gxf_init_timecode_track(&gxf->timecode_track, vsc); gxf->flags |= 0x200000; // time code track is non-drop frame gxf_write_map_packet(s, 0); gxf_write_flt_packet(s); gxf_write_umf_packet(s); gxf->packet_count = 3; avio_flush(pb); return 0; }"} {"target": 0, "idx": 23050, "func": "static void qxl_render_update_area_unlocked(PCIQXLDevice *qxl) { VGACommonState *vga = &qxl->vga; int i; if (qxl->guest_primary.resized) { qxl->guest_primary.resized = 0; qxl->guest_primary.data = memory_region_get_ram_ptr(&qxl->vga.vram); qxl_set_rect_to_surface(qxl, &qxl->dirty[0]); qxl->num_dirty_rects = 1; trace_qxl_render_guest_primary_resized( qxl->guest_primary.surface.width, qxl->guest_primary.surface.height, qxl->guest_primary.qxl_stride, qxl->guest_primary.bytes_pp, qxl->guest_primary.bits_pp); if (qxl->guest_primary.qxl_stride > 0) { qemu_free_displaysurface(vga->ds); qemu_create_displaysurface_from(qxl->guest_primary.surface.width, qxl->guest_primary.surface.height, qxl->guest_primary.bits_pp, qxl->guest_primary.abs_stride, qxl->guest_primary.data); } else { qemu_resize_displaysurface(vga->ds, qxl->guest_primary.surface.width, qxl->guest_primary.surface.height); } dpy_gfx_resize(vga->ds); } for (i = 0; i < qxl->num_dirty_rects; i++) { if (qemu_spice_rect_is_empty(qxl->dirty+i)) { break; } qxl_blit(qxl, qxl->dirty+i); dpy_gfx_update(vga->ds, qxl->dirty[i].left, qxl->dirty[i].top, qxl->dirty[i].right - qxl->dirty[i].left, qxl->dirty[i].bottom - qxl->dirty[i].top); } qxl->num_dirty_rects = 0; }"} {"target": 0, "idx": 23052, "func": "static void page_init(void) { /* NOTE: we can always suppose that qemu_host_page_size >= TARGET_PAGE_SIZE */ #ifdef _WIN32 { SYSTEM_INFO system_info; GetSystemInfo(&system_info); qemu_real_host_page_size = system_info.dwPageSize; } #else qemu_real_host_page_size = getpagesize(); #endif if (qemu_host_page_size == 0) { qemu_host_page_size = qemu_real_host_page_size; } if (qemu_host_page_size < TARGET_PAGE_SIZE) { qemu_host_page_size = TARGET_PAGE_SIZE; } qemu_host_page_mask = ~(qemu_host_page_size - 1); #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY) { #ifdef HAVE_KINFO_GETVMMAP struct kinfo_vmentry *freep; int i, cnt; freep = kinfo_getvmmap(getpid(), &cnt); if (freep) { mmap_lock(); for (i = 0; i < cnt; i++) { unsigned long startaddr, endaddr; startaddr = freep[i].kve_start; endaddr = freep[i].kve_end; if (h2g_valid(startaddr)) { startaddr = h2g(startaddr) & TARGET_PAGE_MASK; if (h2g_valid(endaddr)) { endaddr = h2g(endaddr); page_set_flags(startaddr, endaddr, PAGE_RESERVED); } else { #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS endaddr = ~0ul; page_set_flags(startaddr, endaddr, PAGE_RESERVED); #endif } } } free(freep); mmap_unlock(); } #else FILE *f; last_brk = (unsigned long)sbrk(0); f = fopen(\"/compat/linux/proc/self/maps\", \"r\"); if (f) { mmap_lock(); do { unsigned long startaddr, endaddr; int n; n = fscanf(f, \"%lx-%lx %*[^\\n]\\n\", &startaddr, &endaddr); if (n == 2 && h2g_valid(startaddr)) { startaddr = h2g(startaddr) & TARGET_PAGE_MASK; if (h2g_valid(endaddr)) { endaddr = h2g(endaddr); } else { endaddr = ~0ul; } page_set_flags(startaddr, endaddr, PAGE_RESERVED); } } while (!feof(f)); fclose(f); mmap_unlock(); } #endif } #endif }"} {"target": 0, "idx": 23066, "func": "static SocketAddress *tcp_build_address(const char *host_port, Error **errp) { InetSocketAddress *iaddr = g_new(InetSocketAddress, 1); SocketAddress *saddr; if (inet_parse(iaddr, host_port, errp)) { qapi_free_InetSocketAddress(iaddr); return NULL; } saddr = g_new0(SocketAddress, 1); saddr->type = SOCKET_ADDRESS_KIND_INET; saddr->u.inet.data = iaddr; return saddr; }"} {"target": 0, "idx": 23074, "func": "static void nbd_close(BlockDriverState *bs) { BDRVNBDState *s = bs->opaque; qemu_opts_del(s->socket_opts); nbd_client_session_close(&s->client); }"} {"target": 0, "idx": 23076, "func": "static void reset(DeviceState *d) { sPAPRDRConnector *drc = SPAPR_DR_CONNECTOR(d); sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc); trace_spapr_drc_reset(spapr_drc_index(drc)); g_free(drc->ccs); drc->ccs = NULL; /* immediately upon reset we can safely assume DRCs whose devices * are pending removal can be safely removed, and that they will * subsequently be left in an ISOLATED state. move the DRC to this * state in these cases (which will in turn complete any pending * device removals) */ if (drc->awaiting_release) { drck->set_isolation_state(drc, SPAPR_DR_ISOLATION_STATE_ISOLATED); /* generally this should also finalize the removal, but if the device * hasn't yet been configured we normally defer removal under the * assumption that this transition is taking place as part of device * configuration. so check if we're still waiting after this, and * force removal if we are */ if (drc->awaiting_release) { spapr_drc_detach(drc, DEVICE(drc->dev), NULL); } /* non-PCI devices may be awaiting a transition to UNUSABLE */ if (spapr_drc_type(drc) != SPAPR_DR_CONNECTOR_TYPE_PCI && drc->awaiting_release) { drck->set_allocation_state(drc, SPAPR_DR_ALLOCATION_STATE_UNUSABLE); } } }"} {"target": 0, "idx": 23088, "func": "int kvm_remove_breakpoint(CPUState *current_env, target_ulong addr, target_ulong len, int type) { struct kvm_sw_breakpoint *bp; CPUState *env; int err; if (type == GDB_BREAKPOINT_SW) { bp = kvm_find_sw_breakpoint(current_env, addr); if (!bp) return -ENOENT; if (bp->use_count > 1) { bp->use_count--; return 0; } err = kvm_arch_remove_sw_breakpoint(current_env, bp); if (err) return err; QTAILQ_REMOVE(¤t_env->kvm_state->kvm_sw_breakpoints, bp, entry); qemu_free(bp); } else { err = kvm_arch_remove_hw_breakpoint(addr, len, type); if (err) return err; } for (env = first_cpu; env != NULL; env = env->next_cpu) { err = kvm_update_guest_debug(env, 0); if (err) return err; } return 0; }"} {"target": 0, "idx": 23104, "func": "void helper_fcmp_gt_DT(CPUSH4State *env, float64 t0, float64 t1) { int relation; set_float_exception_flags(0, &env->fp_status); relation = float64_compare(t0, t1, &env->fp_status); if (unlikely(relation == float_relation_unordered)) { update_fpscr(env, GETPC()); } else { env->sr_t = (relation == float_relation_greater); } }"} {"target": 0, "idx": 23105, "func": "static int img_info(int argc, char **argv) { int c; OutputFormat output_format = OFORMAT_HUMAN; const char *filename, *fmt, *output; BlockDriverState *bs; ImageInfo *info; fmt = NULL; output = NULL; for(;;) { int option_index = 0; static const struct option long_options[] = { {\"help\", no_argument, 0, 'h'}, {\"format\", required_argument, 0, 'f'}, {\"output\", required_argument, 0, OPTION_OUTPUT}, {0, 0, 0, 0} }; c = getopt_long(argc, argv, \"f:h\", long_options, &option_index); if (c == -1) { break; } switch(c) { case '?': case 'h': help(); break; case 'f': fmt = optarg; break; case OPTION_OUTPUT: output = optarg; break; } } if (optind >= argc) { help(); } filename = argv[optind++]; if (output && !strcmp(output, \"json\")) { output_format = OFORMAT_JSON; } else if (output && !strcmp(output, \"human\")) { output_format = OFORMAT_HUMAN; } else if (output) { error_report(\"--output must be used with human or json as argument.\"); return 1; } bs = bdrv_new_open(filename, fmt, BDRV_O_FLAGS | BDRV_O_NO_BACKING); if (!bs) { return 1; } info = g_new0(ImageInfo, 1); collect_image_info(bs, info, filename, fmt); switch (output_format) { case OFORMAT_HUMAN: dump_human_image_info(info); dump_snapshots(bs); break; case OFORMAT_JSON: collect_snapshots(bs, info); dump_json_image_info(info); break; } qapi_free_ImageInfo(info); bdrv_delete(bs); return 0; }"} {"target": 0, "idx": 23108, "func": "int qio_dns_resolver_lookup_sync(QIODNSResolver *resolver, SocketAddressLegacy *addr, size_t *naddrs, SocketAddressLegacy ***addrs, Error **errp) { switch (addr->type) { case SOCKET_ADDRESS_LEGACY_KIND_INET: return qio_dns_resolver_lookup_sync_inet(resolver, addr, naddrs, addrs, errp); case SOCKET_ADDRESS_LEGACY_KIND_UNIX: case SOCKET_ADDRESS_LEGACY_KIND_VSOCK: case SOCKET_ADDRESS_LEGACY_KIND_FD: return qio_dns_resolver_lookup_sync_nop(resolver, addr, naddrs, addrs, errp); default: abort(); } }"} {"target": 0, "idx": 23116, "func": "static coroutine_fn int qcow_co_readv(BlockDriverState *bs, int64_t sector_num, int nb_sectors, QEMUIOVector *qiov) { BDRVQcowState *s = bs->opaque; int index_in_cluster; int ret = 0, n; uint64_t cluster_offset; struct iovec hd_iov; QEMUIOVector hd_qiov; uint8_t *buf; void *orig_buf; Error *err = NULL; if (qiov->niov > 1) { buf = orig_buf = qemu_try_blockalign(bs, qiov->size); if (buf == NULL) { return -ENOMEM; } } else { orig_buf = NULL; buf = (uint8_t *)qiov->iov->iov_base; } qemu_co_mutex_lock(&s->lock); while (nb_sectors != 0) { /* prepare next request */ cluster_offset = get_cluster_offset(bs, sector_num << 9, 0, 0, 0, 0); index_in_cluster = sector_num & (s->cluster_sectors - 1); n = s->cluster_sectors - index_in_cluster; if (n > nb_sectors) { n = nb_sectors; } if (!cluster_offset) { if (bs->backing) { /* read from the base image */ hd_iov.iov_base = (void *)buf; hd_iov.iov_len = n * 512; qemu_iovec_init_external(&hd_qiov, &hd_iov, 1); qemu_co_mutex_unlock(&s->lock); ret = bdrv_co_readv(bs->backing, sector_num, n, &hd_qiov); qemu_co_mutex_lock(&s->lock); if (ret < 0) { goto fail; } } else { /* Note: in this case, no need to wait */ memset(buf, 0, 512 * n); } } else if (cluster_offset & QCOW_OFLAG_COMPRESSED) { /* add AIO support for compressed blocks ? */ if (decompress_cluster(bs, cluster_offset) < 0) { goto fail; } memcpy(buf, s->cluster_cache + index_in_cluster * 512, 512 * n); } else { if ((cluster_offset & 511) != 0) { goto fail; } hd_iov.iov_base = (void *)buf; hd_iov.iov_len = n * 512; qemu_iovec_init_external(&hd_qiov, &hd_iov, 1); qemu_co_mutex_unlock(&s->lock); ret = bdrv_co_readv(bs->file, (cluster_offset >> 9) + index_in_cluster, n, &hd_qiov); qemu_co_mutex_lock(&s->lock); if (ret < 0) { break; } if (bs->encrypted) { assert(s->cipher); if (encrypt_sectors(s, sector_num, buf, n, false, &err) < 0) { goto fail; } } } ret = 0; nb_sectors -= n; sector_num += n; buf += n * 512; } done: qemu_co_mutex_unlock(&s->lock); if (qiov->niov > 1) { qemu_iovec_from_buf(qiov, 0, orig_buf, qiov->size); qemu_vfree(orig_buf); } return ret; fail: error_free(err); ret = -EIO; goto done; }"} {"target": 0, "idx": 23123, "func": "static int mpeg4_decode_gop_header(MpegEncContext *s, GetBitContext *gb) { int hours, minutes, seconds; if (!show_bits(gb, 23)) { av_log(s->avctx, AV_LOG_WARNING, \"GOP header invalid\\n\"); return -1; } hours = get_bits(gb, 5); minutes = get_bits(gb, 6); skip_bits1(gb); seconds = get_bits(gb, 6); s->time_base = seconds + 60*(minutes + 60*hours); skip_bits1(gb); skip_bits1(gb); return 0; }"} {"target": 0, "idx": 23136, "func": "static int ftp_restart(FTPContext *s, int64_t pos) { char command[CONTROL_BUFFER_SIZE]; const int rest_codes[] = {350, 0}; snprintf(command, sizeof(command), \"REST %\"PRId64\"\\r\\n\", pos); if (!ftp_send_command(s, command, rest_codes, NULL)) return AVERROR(EIO); return 0; }"} {"target": 0, "idx": 23140, "func": "static int coroutine_fn bdrv_co_do_copy_on_readv(BdrvChild *child, int64_t offset, unsigned int bytes, QEMUIOVector *qiov) { BlockDriverState *bs = child->bs; /* Perform I/O through a temporary buffer so that users who scribble over * their read buffer while the operation is in progress do not end up * modifying the image file. This is critical for zero-copy guest I/O * where anything might happen inside guest memory. */ void *bounce_buffer; BlockDriver *drv = bs->drv; struct iovec iov; QEMUIOVector bounce_qiov; int64_t cluster_offset; unsigned int cluster_bytes; size_t skip_bytes; int ret; /* FIXME We cannot require callers to have write permissions when all they * are doing is a read request. If we did things right, write permissions * would be obtained anyway, but internally by the copy-on-read code. As * long as it is implemented here rather than in a separate filter driver, * the copy-on-read code doesn't have its own BdrvChild, however, for which * it could request permissions. Therefore we have to bypass the permission * system for the moment. */ // assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE)); /* Cover entire cluster so no additional backing file I/O is required when * allocating cluster in the image file. */ bdrv_round_to_clusters(bs, offset, bytes, &cluster_offset, &cluster_bytes); trace_bdrv_co_do_copy_on_readv(bs, offset, bytes, cluster_offset, cluster_bytes); iov.iov_len = cluster_bytes; iov.iov_base = bounce_buffer = qemu_try_blockalign(bs, iov.iov_len); if (bounce_buffer == NULL) { ret = -ENOMEM; goto err; } qemu_iovec_init_external(&bounce_qiov, &iov, 1); ret = bdrv_driver_preadv(bs, cluster_offset, cluster_bytes, &bounce_qiov, 0); if (ret < 0) { goto err; } bdrv_debug_event(bs, BLKDBG_COR_WRITE); if (drv->bdrv_co_pwrite_zeroes && buffer_is_zero(bounce_buffer, iov.iov_len)) { /* FIXME: Should we (perhaps conditionally) be setting * BDRV_REQ_MAY_UNMAP, if it will allow for a sparser copy * that still correctly reads as zero? */ ret = bdrv_co_do_pwrite_zeroes(bs, cluster_offset, cluster_bytes, 0); } else { /* This does not change the data on the disk, it is not necessary * to flush even in cache=writethrough mode. */ ret = bdrv_driver_pwritev(bs, cluster_offset, cluster_bytes, &bounce_qiov, 0); } if (ret < 0) { /* It might be okay to ignore write errors for guest requests. If this * is a deliberate copy-on-read then we don't want to ignore the error. * Simply report it in all cases. */ goto err; } skip_bytes = offset - cluster_offset; qemu_iovec_from_buf(qiov, 0, bounce_buffer + skip_bytes, bytes); err: qemu_vfree(bounce_buffer); return ret; }"} {"target": 0, "idx": 23145, "func": "int kvm_ioctl(KVMState *s, int type, ...) { int ret; void *arg; va_list ap; va_start(ap, type); arg = va_arg(ap, void *); va_end(ap); ret = ioctl(s->fd, type, arg); if (ret == -1) ret = -errno; return ret; }"} {"target": 0, "idx": 23155, "func": "static uint64_t exynos4210_mct_read(void *opaque, target_phys_addr_t offset, unsigned size) { Exynos4210MCTState *s = (Exynos4210MCTState *)opaque; int index; int shift; uint64_t count; uint32_t value; int lt_i; switch (offset) { case MCT_CFG: value = s->reg_mct_cfg; break; case G_CNT_L: case G_CNT_U: shift = 8 * (offset & 0x4); count = exynos4210_gfrc_get_count(&s->g_timer); value = UINT32_MAX & (count >> shift); DPRINTF(\"read FRC=0x%llx\\n\", count); break; case G_CNT_WSTAT: value = s->g_timer.reg.cnt_wstat; break; case G_COMP_L(0): case G_COMP_L(1): case G_COMP_L(2): case G_COMP_L(3): case G_COMP_U(0): case G_COMP_U(1): case G_COMP_U(2): case G_COMP_U(3): index = GET_G_COMP_IDX(offset); shift = 8 * (offset & 0x4); value = UINT32_MAX & (s->g_timer.reg.comp[index] >> shift); break; case G_TCON: value = s->g_timer.reg.tcon; break; case G_INT_CSTAT: value = s->g_timer.reg.int_cstat; break; case G_INT_ENB: value = s->g_timer.reg.int_enb; break; break; case G_WSTAT: value = s->g_timer.reg.wstat; break; case G_COMP0_ADD_INCR: case G_COMP1_ADD_INCR: case G_COMP2_ADD_INCR: case G_COMP3_ADD_INCR: value = s->g_timer.reg.comp_add_incr[GET_G_COMP_ADD_INCR_IDX(offset)]; break; /* Local timers */ case L0_TCNTB: case L0_ICNTB: case L0_FRCNTB: case L1_TCNTB: case L1_ICNTB: case L1_FRCNTB: lt_i = GET_L_TIMER_IDX(offset); index = GET_L_TIMER_CNT_REG_IDX(offset, lt_i); value = s->l_timer[lt_i].reg.cnt[index]; break; case L0_TCNTO: case L1_TCNTO: lt_i = GET_L_TIMER_IDX(offset); value = exynos4210_ltick_cnt_get_cnto(&s->l_timer[lt_i].tick_timer); DPRINTF(\"local timer[%d] read TCNTO %x\\n\", lt_i, value); break; case L0_ICNTO: case L1_ICNTO: lt_i = GET_L_TIMER_IDX(offset); value = exynos4210_ltick_int_get_cnto(&s->l_timer[lt_i].tick_timer); DPRINTF(\"local timer[%d] read ICNTO %x\\n\", lt_i, value); break; case L0_FRCNTO: case L1_FRCNTO: lt_i = GET_L_TIMER_IDX(offset); value = exynos4210_lfrc_get_count(&s->l_timer[lt_i]); break; case L0_TCON: case L1_TCON: lt_i = ((offset & 0xF00) - L0_TCNTB) / 0x100; value = s->l_timer[lt_i].reg.tcon; break; case L0_INT_CSTAT: case L1_INT_CSTAT: lt_i = ((offset & 0xF00) - L0_TCNTB) / 0x100; value = s->l_timer[lt_i].reg.int_cstat; break; case L0_INT_ENB: case L1_INT_ENB: lt_i = ((offset & 0xF00) - L0_TCNTB) / 0x100; value = s->l_timer[lt_i].reg.int_enb; break; case L0_WSTAT: case L1_WSTAT: lt_i = ((offset & 0xF00) - L0_TCNTB) / 0x100; value = s->l_timer[lt_i].reg.wstat; break; default: hw_error(\"exynos4210.mct: bad read offset \" TARGET_FMT_plx \"\\n\", offset); break; } return value; }"} {"target": 0, "idx": 23159, "func": "theora_gptopts(AVFormatContext *ctx, int idx, uint64_t gp, int64_t *dts) { struct ogg *ogg = ctx->priv_data; struct ogg_stream *os = ogg->streams + idx; struct theora_params *thp = os->private; uint64_t iframe = gp >> thp->gpshift; uint64_t pframe = gp & thp->gpmask; if (thp->version < 0x030201) iframe++; if(!pframe) os->pflags |= AV_PKT_FLAG_KEY; if (dts) *dts = iframe + pframe; return iframe + pframe; }"} {"target": 0, "idx": 23161, "func": "static void av_build_index_raw(AVFormatContext *s) { AVPacket pkt1, *pkt = &pkt1; int ret; AVStream *st; st = s->streams[0]; av_read_frame_flush(s); url_fseek(&s->pb, s->data_offset, SEEK_SET); for(;;) { ret = av_read_frame(s, pkt); if (ret < 0) break; if (pkt->stream_index == 0 && st->parser && (pkt->flags & PKT_FLAG_KEY)) { add_index_entry(st, st->parser->frame_offset, pkt->dts, AVINDEX_KEYFRAME); } av_free_packet(pkt); } }"} {"target": 0, "idx": 23163, "func": "static FWCfgState *bochs_bios_init(void) { FWCfgState *fw_cfg; uint8_t *smbios_tables, *smbios_anchor; size_t smbios_tables_len, smbios_anchor_len; uint64_t *numa_fw_cfg; int i, j; unsigned int apic_id_limit = pc_apic_id_limit(max_cpus); fw_cfg = fw_cfg_init_io(BIOS_CFG_IOPORT); /* FW_CFG_MAX_CPUS is a bit confusing/problematic on x86: * * SeaBIOS needs FW_CFG_MAX_CPUS for CPU hotplug, but the CPU hotplug * QEMU<->SeaBIOS interface is not based on the \"CPU index\", but on the APIC * ID of hotplugged CPUs[1]. This means that FW_CFG_MAX_CPUS is not the * \"maximum number of CPUs\", but the \"limit to the APIC ID values SeaBIOS * may see\". * * So, this means we must not use max_cpus, here, but the maximum possible * APIC ID value, plus one. * * [1] The only kind of \"CPU identifier\" used between SeaBIOS and QEMU is * the APIC ID, not the \"CPU index\" */ fw_cfg_add_i16(fw_cfg, FW_CFG_MAX_CPUS, (uint16_t)apic_id_limit); fw_cfg_add_i32(fw_cfg, FW_CFG_ID, 1); fw_cfg_add_i64(fw_cfg, FW_CFG_RAM_SIZE, (uint64_t)ram_size); fw_cfg_add_bytes(fw_cfg, FW_CFG_ACPI_TABLES, acpi_tables, acpi_tables_len); fw_cfg_add_i32(fw_cfg, FW_CFG_IRQ0_OVERRIDE, kvm_allows_irq0_override()); smbios_tables = smbios_get_table_legacy(&smbios_tables_len); if (smbios_tables) { fw_cfg_add_bytes(fw_cfg, FW_CFG_SMBIOS_ENTRIES, smbios_tables, smbios_tables_len); } smbios_get_tables(&smbios_tables, &smbios_tables_len, &smbios_anchor, &smbios_anchor_len); if (smbios_anchor) { fw_cfg_add_file(fw_cfg, \"etc/smbios/smbios-tables\", smbios_tables, smbios_tables_len); fw_cfg_add_file(fw_cfg, \"etc/smbios/smbios-anchor\", smbios_anchor, smbios_anchor_len); } fw_cfg_add_bytes(fw_cfg, FW_CFG_E820_TABLE, &e820_reserve, sizeof(e820_reserve)); fw_cfg_add_file(fw_cfg, \"etc/e820\", e820_table, sizeof(struct e820_entry) * e820_entries); fw_cfg_add_bytes(fw_cfg, FW_CFG_HPET, &hpet_cfg, sizeof(hpet_cfg)); /* allocate memory for the NUMA channel: one (64bit) word for the number * of nodes, one word for each VCPU->node and one word for each node to * hold the amount of memory. */ numa_fw_cfg = g_new0(uint64_t, 1 + apic_id_limit + nb_numa_nodes); numa_fw_cfg[0] = cpu_to_le64(nb_numa_nodes); for (i = 0; i < max_cpus; i++) { unsigned int apic_id = x86_cpu_apic_id_from_index(i); assert(apic_id < apic_id_limit); for (j = 0; j < nb_numa_nodes; j++) { if (test_bit(i, numa_info[j].node_cpu)) { numa_fw_cfg[apic_id + 1] = cpu_to_le64(j); break; } } } for (i = 0; i < nb_numa_nodes; i++) { numa_fw_cfg[apic_id_limit + 1 + i] = cpu_to_le64(numa_info[i].node_mem); } fw_cfg_add_bytes(fw_cfg, FW_CFG_NUMA, numa_fw_cfg, (1 + apic_id_limit + nb_numa_nodes) * sizeof(*numa_fw_cfg)); return fw_cfg; }"} {"target": 0, "idx": 23174, "func": "int mmu_translate(CPUS390XState *env, target_ulong vaddr, int rw, uint64_t asc, target_ulong *raddr, int *flags) { int r = -1; uint8_t *sk; *flags = PAGE_READ | PAGE_WRITE | PAGE_EXEC; vaddr &= TARGET_PAGE_MASK; if (!(env->psw.mask & PSW_MASK_DAT)) { *raddr = vaddr; r = 0; goto out; } switch (asc) { case PSW_ASC_PRIMARY: case PSW_ASC_HOME: r = mmu_translate_asc(env, vaddr, asc, raddr, flags, rw); break; case PSW_ASC_SECONDARY: /* * Instruction: Primary * Data: Secondary */ if (rw == 2) { r = mmu_translate_asc(env, vaddr, PSW_ASC_PRIMARY, raddr, flags, rw); *flags &= ~(PAGE_READ | PAGE_WRITE); } else { r = mmu_translate_asc(env, vaddr, PSW_ASC_SECONDARY, raddr, flags, rw); *flags &= ~(PAGE_EXEC); } break; case PSW_ASC_ACCREG: default: hw_error(\"guest switched to unknown asc mode\\n\"); break; } out: /* Convert real address -> absolute address */ *raddr = mmu_real2abs(env, *raddr); if (*raddr <= ram_size) { sk = &env->storage_keys[*raddr / TARGET_PAGE_SIZE]; if (*flags & PAGE_READ) { *sk |= SK_R; } if (*flags & PAGE_WRITE) { *sk |= SK_C; } } return r; }"} {"target": 0, "idx": 23184, "func": "static inline int decode_ac_coeffs(GetBitContext *gb, int16_t *out, int blocks_per_slice, int plane_size_factor, const uint8_t *scan) { int pos, block_mask, run, level, sign, run_cb_index, lev_cb_index; int max_coeffs, bits_left; /* set initial prediction values */ run = 4; level = 2; max_coeffs = blocks_per_slice << 6; block_mask = blocks_per_slice - 1; for (pos = blocks_per_slice - 1; pos < max_coeffs;) { run_cb_index = ff_prores_run_to_cb_index[FFMIN(run, 15)]; lev_cb_index = ff_prores_lev_to_cb_index[FFMIN(level, 9)]; bits_left = get_bits_left(gb); if (bits_left <= 0 || (bits_left <= 8 && !show_bits(gb, bits_left))) return 0; run = decode_vlc_codeword(gb, ff_prores_ac_codebook[run_cb_index]); if (run < 0) return AVERROR_INVALIDDATA; bits_left = get_bits_left(gb); if (bits_left <= 0 || (bits_left <= 8 && !show_bits(gb, bits_left))) return AVERROR_INVALIDDATA; level = decode_vlc_codeword(gb, ff_prores_ac_codebook[lev_cb_index]) + 1; if (level < 0) return AVERROR_INVALIDDATA; pos += run + 1; if (pos >= max_coeffs) break; sign = get_sbits(gb, 1); out[((pos & block_mask) << 6) + scan[pos >> plane_size_factor]] = (level ^ sign) - sign; } return 0; }"} {"target": 1, "idx": 23186, "func": "static int local_opendir(FsContext *ctx, V9fsPath *fs_path, V9fsFidOpenState *fs) { int dirfd; DIR *stream; dirfd = local_opendir_nofollow(ctx, fs_path->data); if (dirfd == -1) { return -1; } stream = fdopendir(dirfd); if (!stream) { return -1; } fs->dir.stream = stream; return 0; }"} {"target": 1, "idx": 23193, "func": "static int mpeg_mux_init(AVFormatContext *ctx) { MpegMuxContext *s = ctx->priv_data; int bitrate, i, mpa_id, mpv_id, ac3_id; AVStream *st; StreamInfo *stream; s->packet_number = 0; s->is_vcd = (ctx->oformat == &mpeg1vcd_mux); s->is_mpeg2 = (ctx->oformat == &mpeg2vob_mux); if (s->is_vcd) s->packet_size = 2324; /* VCD packet size */ else s->packet_size = 2048; /* startcode(4) + length(2) + flags(1) */ s->packet_data_max_size = s->packet_size - 7; if (s->is_mpeg2) s->packet_data_max_size -= 2; s->audio_bound = 0; s->video_bound = 0; mpa_id = AUDIO_ID; ac3_id = 0x80; mpv_id = VIDEO_ID; s->scr_stream_index = -1; for(i=0;inb_streams;i++) { st = ctx->streams[i]; stream = av_mallocz(sizeof(StreamInfo)); if (!stream) goto fail; st->priv_data = stream; switch(st->codec.codec_type) { case CODEC_TYPE_AUDIO: if (st->codec.codec_id == CODEC_ID_AC3) stream->id = ac3_id++; else stream->id = mpa_id++; stream->max_buffer_size = 4 * 1024; s->audio_bound++; break; case CODEC_TYPE_VIDEO: /* by default, video is used for the SCR computation */ if (s->scr_stream_index == -1) s->scr_stream_index = i; stream->id = mpv_id++; stream->max_buffer_size = 46 * 1024; s->video_bound++; break; default: av_abort(); } } /* if no SCR, use first stream (audio) */ if (s->scr_stream_index == -1) s->scr_stream_index = 0; /* we increase slightly the bitrate to take into account the headers. XXX: compute it exactly */ bitrate = 2000; for(i=0;inb_streams;i++) { st = ctx->streams[i]; bitrate += st->codec.bit_rate; } s->mux_rate = (bitrate + (8 * 50) - 1) / (8 * 50); if (s->is_vcd || s->is_mpeg2) /* every packet */ s->pack_header_freq = 1; else /* every 2 seconds */ s->pack_header_freq = 2 * bitrate / s->packet_size / 8; /* the above seems to make pack_header_freq zero sometimes */ if (s->pack_header_freq == 0) s->pack_header_freq = 1; if (s->is_mpeg2) /* every 200 packets. Need to look at the spec. */ s->system_header_freq = s->pack_header_freq * 40; else if (s->is_vcd) /* every 40 packets, this is my invention */ s->system_header_freq = s->pack_header_freq * 40; else s->system_header_freq = s->pack_header_freq * 5; for(i=0;inb_streams;i++) { stream = ctx->streams[i]->priv_data; stream->buffer_ptr = 0; stream->packet_number = 0; stream->start_pts = AV_NOPTS_VALUE; stream->start_dts = AV_NOPTS_VALUE; } s->last_scr = 0; return 0; fail: for(i=0;inb_streams;i++) { av_free(ctx->streams[i]->priv_data); } return -ENOMEM; }"} {"target": 1, "idx": 23206, "func": "static int mxf_read_header(AVFormatContext *s) { MXFContext *mxf = s->priv_data; KLVPacket klv; int64_t essence_offset = 0; int ret; mxf->last_forward_tell = INT64_MAX; mxf->edit_units_per_packet = 1; if (!mxf_read_sync(s->pb, mxf_header_partition_pack_key, 14)) { av_log(s, AV_LOG_ERROR, \"could not find header partition pack key\\n\"); return AVERROR_INVALIDDATA; } avio_seek(s->pb, -14, SEEK_CUR); mxf->fc = s; mxf->run_in = avio_tell(s->pb); while (!url_feof(s->pb)) { const MXFMetadataReadTableEntry *metadata; if (klv_read_packet(&klv, s->pb) < 0) { /* EOF - seek to previous partition or stop */ if(mxf_parse_handle_partition_or_eof(mxf) <= 0) break; else continue; } PRINT_KEY(s, \"read header\", klv.key); av_dlog(s, \"size %\"PRIu64\" offset %#\"PRIx64\"\\n\", klv.length, klv.offset); if (IS_KLV_KEY(klv.key, mxf_encrypted_triplet_key) || IS_KLV_KEY(klv.key, mxf_essence_element_key) || IS_KLV_KEY(klv.key, mxf_avid_essence_element_key) || IS_KLV_KEY(klv.key, mxf_system_item_key)) { if (!mxf->current_partition) { av_log(mxf->fc, AV_LOG_ERROR, \"found essence prior to first PartitionPack\\n\"); return AVERROR_INVALIDDATA; } if (!mxf->current_partition->essence_offset) { /* for OP1a we compute essence_offset * for OPAtom we point essence_offset after the KL (usually op1a_essence_offset + 20 or 25) * TODO: for OP1a we could eliminate this entire if statement, always stopping parsing at op1a_essence_offset * for OPAtom we still need the actual essence_offset though (the KL's length can vary) */ int64_t op1a_essence_offset = round_to_kag(mxf->current_partition->this_partition + mxf->current_partition->pack_length, mxf->current_partition->kag_size) + round_to_kag(mxf->current_partition->header_byte_count, mxf->current_partition->kag_size) + round_to_kag(mxf->current_partition->index_byte_count, mxf->current_partition->kag_size); if (mxf->op == OPAtom) { /* point essence_offset to the actual data * OPAtom has all the essence in one big KLV */ mxf->current_partition->essence_offset = avio_tell(s->pb); mxf->current_partition->essence_length = klv.length; } else { /* NOTE: op1a_essence_offset may be less than to klv.offset (C0023S01.mxf) */ mxf->current_partition->essence_offset = op1a_essence_offset; } } if (!essence_offset) essence_offset = klv.offset; /* seek to footer, previous partition or stop */ if (mxf_parse_handle_essence(mxf) <= 0) break; continue; } else if (!memcmp(klv.key, mxf_header_partition_pack_key, 13) && klv.key[13] >= 2 && klv.key[13] <= 4 && mxf->current_partition) { /* next partition pack - keep going, seek to previous partition or stop */ if(mxf_parse_handle_partition_or_eof(mxf) <= 0) break; else if (mxf->parsing_backward) continue; /* we're still parsing forward. proceed to parsing this partition pack */ } for (metadata = mxf_metadata_read_table; metadata->read; metadata++) { if (IS_KLV_KEY(klv.key, metadata->key)) { int res; if (klv.key[5] == 0x53) { res = mxf_read_local_tags(mxf, &klv, metadata->read, metadata->ctx_size, metadata->type); } else { uint64_t next = avio_tell(s->pb) + klv.length; res = metadata->read(mxf, s->pb, 0, klv.length, klv.key, klv.offset); /* only seek forward, else this can loop for a long time */ if (avio_tell(s->pb) > next) { av_log(s, AV_LOG_ERROR, \"read past end of KLV @ %#\"PRIx64\"\\n\", klv.offset); return AVERROR_INVALIDDATA; } avio_seek(s->pb, next, SEEK_SET); } if (res < 0) { av_log(s, AV_LOG_ERROR, \"error reading header metadata\\n\"); return res; } break; } } if (!metadata->read) avio_skip(s->pb, klv.length); } /* FIXME avoid seek */ if (!essence_offset) { av_log(s, AV_LOG_ERROR, \"no essence\\n\"); return AVERROR_INVALIDDATA; } avio_seek(s->pb, essence_offset, SEEK_SET); mxf_compute_essence_containers(mxf); /* we need to do this before computing the index tables * to be able to fill in zero IndexDurations with st->duration */ if ((ret = mxf_parse_structural_metadata(mxf)) < 0) return ret; if ((ret = mxf_compute_index_tables(mxf)) < 0) return ret; if (mxf->nb_index_tables > 1) { /* TODO: look up which IndexSID to use via EssenceContainerData */ av_log(mxf->fc, AV_LOG_INFO, \"got %i index tables - only the first one (IndexSID %i) will be used\\n\", mxf->nb_index_tables, mxf->index_tables[0].index_sid); } else if (mxf->nb_index_tables == 0 && mxf->op == OPAtom) { av_log(mxf->fc, AV_LOG_ERROR, \"cannot demux OPAtom without an index\\n\"); return AVERROR_INVALIDDATA; } mxf_handle_small_eubc(s); return 0; }"} {"target": 1, "idx": 23213, "func": "get_net_error_message(gint error) { HMODULE module = NULL; gchar *retval = NULL; wchar_t *msg = NULL; int flags; size_t nchars; flags = FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_IGNORE_INSERTS | FORMAT_MESSAGE_FROM_SYSTEM; if (error >= NERR_BASE && error <= MAX_NERR) { module = LoadLibraryExW(L\"netmsg.dll\", NULL, LOAD_LIBRARY_AS_DATAFILE); if (module != NULL) { flags |= FORMAT_MESSAGE_FROM_HMODULE; } } FormatMessageW(flags, module, error, 0, (LPWSTR)&msg, 0, NULL); if (msg != NULL) { nchars = wcslen(msg); if (nchars > 2 && msg[nchars - 1] == L'\\n' && msg[nchars - 2] == L'\\r') { msg[nchars - 2] = L'\\0'; } retval = g_utf16_to_utf8(msg, -1, NULL, NULL, NULL); LocalFree(msg); } if (module != NULL) { FreeLibrary(module); } return retval; }"} {"target": 0, "idx": 23237, "func": "static void kqemu_record_flush(void) { PCRecord *r, *r_next; int h; for(h = 0; h < PC_REC_HASH_SIZE; h++) { for(r = pc_rec_hash[h]; r != NULL; r = r_next) { r_next = r->next; free(r); } pc_rec_hash[h] = NULL; } nb_pc_records = 0; }"} {"target": 0, "idx": 23244, "func": "static void iscsi_allocationmap_set(IscsiLun *iscsilun, int64_t sector_num, int nb_sectors) { int64_t cluster_num, nb_clusters; if (iscsilun->allocationmap == NULL) { return; } cluster_num = sector_num / iscsilun->cluster_sectors; nb_clusters = DIV_ROUND_UP(sector_num + nb_sectors, iscsilun->cluster_sectors) - cluster_num; bitmap_set(iscsilun->allocationmap, cluster_num, nb_clusters); }"} {"target": 0, "idx": 23245, "func": "int cpu_exec(CPUArchState *env) { CPUState *cpu = ENV_GET_CPU(env); CPUClass *cc = CPU_GET_CLASS(cpu); #ifdef TARGET_I386 X86CPU *x86_cpu = X86_CPU(cpu); #endif int ret, interrupt_request; TranslationBlock *tb; uint8_t *tc_ptr; uintptr_t next_tb; SyncClocks sc; /* This must be volatile so it is not trashed by longjmp() */ volatile bool have_tb_lock = false; if (cpu->halted) { if (!cpu_has_work(cpu)) { return EXCP_HALTED; } cpu->halted = 0; } current_cpu = cpu; /* As long as current_cpu is null, up to the assignment just above, * requests by other threads to exit the execution loop are expected to * be issued using the exit_request global. We must make sure that our * evaluation of the global value is performed past the current_cpu * value transition point, which requires a memory barrier as well as * an instruction scheduling constraint on modern architectures. */ smp_mb(); if (unlikely(exit_request)) { cpu->exit_request = 1; } cc->cpu_exec_enter(cpu); cpu->exception_index = -1; /* Calculate difference between guest clock and host clock. * This delay includes the delay of the last cycle, so * what we have to do is sleep until it is 0. As for the * advance/delay we gain here, we try to fix it next time. */ init_delay_params(&sc, cpu); /* prepare setjmp context for exception handling */ for(;;) { if (sigsetjmp(cpu->jmp_env, 0) == 0) { /* if an exception is pending, we execute it here */ if (cpu->exception_index >= 0) { if (cpu->exception_index >= EXCP_INTERRUPT) { /* exit request from the cpu execution loop */ ret = cpu->exception_index; if (ret == EXCP_DEBUG) { cpu_handle_debug_exception(env); } break; } else { #if defined(CONFIG_USER_ONLY) /* if user mode only, we simulate a fake exception which will be handled outside the cpu execution loop */ #if defined(TARGET_I386) cc->do_interrupt(cpu); #endif ret = cpu->exception_index; break; #else cc->do_interrupt(cpu); cpu->exception_index = -1; #endif } } next_tb = 0; /* force lookup of first TB */ for(;;) { interrupt_request = cpu->interrupt_request; if (unlikely(interrupt_request)) { if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) { /* Mask out external interrupts for this step. */ interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK; } if (interrupt_request & CPU_INTERRUPT_DEBUG) { cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG; cpu->exception_index = EXCP_DEBUG; cpu_loop_exit(cpu); } if (interrupt_request & CPU_INTERRUPT_HALT) { cpu->interrupt_request &= ~CPU_INTERRUPT_HALT; cpu->halted = 1; cpu->exception_index = EXCP_HLT; cpu_loop_exit(cpu); } #if defined(TARGET_I386) if (interrupt_request & CPU_INTERRUPT_INIT) { cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0); do_cpu_init(x86_cpu); cpu->exception_index = EXCP_HALTED; cpu_loop_exit(cpu); } #else if (interrupt_request & CPU_INTERRUPT_RESET) { cpu_reset(cpu); } #endif /* The target hook has 3 exit conditions: False when the interrupt isn't processed, True when it is, and we should restart on a new TB, and via longjmp via cpu_loop_exit. */ if (cc->cpu_exec_interrupt(cpu, interrupt_request)) { next_tb = 0; } /* Don't use the cached interrupt_request value, do_interrupt may have updated the EXITTB flag. */ if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) { cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB; /* ensure that no TB jump will be modified as the program flow was changed */ next_tb = 0; } } if (unlikely(cpu->exit_request)) { cpu->exit_request = 0; cpu->exception_index = EXCP_INTERRUPT; cpu_loop_exit(cpu); } spin_lock(&tcg_ctx.tb_ctx.tb_lock); have_tb_lock = true; tb = tb_find_fast(env); /* Note: we do it here to avoid a gcc bug on Mac OS X when doing it in tb_find_slow */ if (tcg_ctx.tb_ctx.tb_invalidated_flag) { /* as some TB could have been invalidated because of memory exceptions while generating the code, we must recompute the hash index here */ next_tb = 0; tcg_ctx.tb_ctx.tb_invalidated_flag = 0; } if (qemu_loglevel_mask(CPU_LOG_EXEC)) { qemu_log(\"Trace %p [\" TARGET_FMT_lx \"] %s\\n\", tb->tc_ptr, tb->pc, lookup_symbol(tb->pc)); } /* see if we can patch the calling TB. When the TB spans two pages, we cannot safely do a direct jump. */ if (next_tb != 0 && tb->page_addr[1] == -1) { tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK), next_tb & TB_EXIT_MASK, tb); } have_tb_lock = false; spin_unlock(&tcg_ctx.tb_ctx.tb_lock); /* cpu_interrupt might be called while translating the TB, but before it is linked into a potentially infinite loop and becomes env->current_tb. Avoid starting execution if there is a pending interrupt. */ cpu->current_tb = tb; barrier(); if (likely(!cpu->exit_request)) { trace_exec_tb(tb, tb->pc); tc_ptr = tb->tc_ptr; /* execute the generated code */ next_tb = cpu_tb_exec(cpu, tc_ptr); switch (next_tb & TB_EXIT_MASK) { case TB_EXIT_REQUESTED: /* Something asked us to stop executing * chained TBs; just continue round the main * loop. Whatever requested the exit will also * have set something else (eg exit_request or * interrupt_request) which we will handle * next time around the loop. */ tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK); next_tb = 0; break; case TB_EXIT_ICOUNT_EXPIRED: { /* Instruction counter expired. */ int insns_left; tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK); insns_left = cpu->icount_decr.u32; if (cpu->icount_extra && insns_left >= 0) { /* Refill decrementer and continue execution. */ cpu->icount_extra += insns_left; if (cpu->icount_extra > 0xffff) { insns_left = 0xffff; } else { insns_left = cpu->icount_extra; } cpu->icount_extra -= insns_left; cpu->icount_decr.u16.low = insns_left; } else { if (insns_left > 0) { /* Execute remaining instructions. */ cpu_exec_nocache(env, insns_left, tb); align_clocks(&sc, cpu); } cpu->exception_index = EXCP_INTERRUPT; next_tb = 0; cpu_loop_exit(cpu); } break; } default: break; } } cpu->current_tb = NULL; /* Try to align the host and virtual clocks if the guest is in advance */ align_clocks(&sc, cpu); /* reset soft MMU for next block (it can currently only be set by a memory fault) */ } /* for(;;) */ } else { /* Reload env after longjmp - the compiler may have smashed all * local variables as longjmp is marked 'noreturn'. */ cpu = current_cpu; env = cpu->env_ptr; cc = CPU_GET_CLASS(cpu); #ifdef TARGET_I386 x86_cpu = X86_CPU(cpu); #endif if (have_tb_lock) { spin_unlock(&tcg_ctx.tb_ctx.tb_lock); have_tb_lock = false; } } } /* for(;;) */ cc->cpu_exec_exit(cpu); /* fail safe : never use current_cpu outside cpu_exec() */ current_cpu = NULL; return ret; }"} {"target": 0, "idx": 23247, "func": "static void test_qemu_strtosz_metric(void) { const char *str = \"12345k\"; char *endptr = NULL; int64_t res; res = qemu_strtosz_metric(str, &endptr); g_assert_cmpint(res, ==, 12345000); g_assert(endptr == str + 6); }"} {"target": 0, "idx": 23250, "func": "static inline void RENAME(yuv422ptoyuy2)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst, long width, long height, long lumStride, long chromStride, long dstStride) { RENAME(yuvPlanartoyuy2)(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride, 1); }"} {"target": 1, "idx": 23256, "func": "int ff_h263_decode_mb(MpegEncContext *s, int16_t block[6][64]) { int cbpc, cbpy, i, cbp, pred_x, pred_y, mx, my, dquant; int16_t *mot_val; const int xy= s->mb_x + s->mb_y * s->mb_stride; int cbpb = 0, pb_mv_count = 0; av_assert2(!s->h263_pred); if (s->pict_type == AV_PICTURE_TYPE_P) { do{ if (get_bits1(&s->gb)) { /* skip mb */ s->mb_intra = 0; for(i=0;i<6;i++) s->block_last_index[i] = -1; s->mv_dir = MV_DIR_FORWARD; s->mv_type = MV_TYPE_16X16; s->current_picture.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0; s->mv[0][0][0] = 0; s->mv[0][0][1] = 0; s->mb_skipped = !(s->obmc | s->loop_filter); goto end; cbpc = get_vlc2(&s->gb, ff_h263_inter_MCBPC_vlc.table, INTER_MCBPC_VLC_BITS, 2); if (cbpc < 0){ av_log(s->avctx, AV_LOG_ERROR, \"cbpc damaged at %d %d\\n\", s->mb_x, s->mb_y); }while(cbpc == 20); s->bdsp.clear_blocks(s->block[0]); dquant = cbpc & 8; s->mb_intra = ((cbpc & 4) != 0); if (s->mb_intra) goto intra; if(s->pb_frame && get_bits1(&s->gb)) pb_mv_count = h263_get_modb(&s->gb, s->pb_frame, &cbpb); cbpy = get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1); if(s->alt_inter_vlc==0 || (cbpc & 3)!=3) cbpy ^= 0xF; cbp = (cbpc & 3) | (cbpy << 2); if (dquant) { h263_decode_dquant(s); s->mv_dir = MV_DIR_FORWARD; if ((cbpc & 16) == 0) { s->current_picture.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0; /* 16x16 motion prediction */ s->mv_type = MV_TYPE_16X16; ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y); if (s->umvplus) mx = h263p_decode_umotion(s, pred_x); else mx = ff_h263_decode_motion(s, pred_x, 1); if (mx >= 0xffff) if (s->umvplus) my = h263p_decode_umotion(s, pred_y); else my = ff_h263_decode_motion(s, pred_y, 1); if (my >= 0xffff) s->mv[0][0][0] = mx; s->mv[0][0][1] = my; if (s->umvplus && (mx - pred_x) == 1 && (my - pred_y) == 1) skip_bits1(&s->gb); /* Bit stuffing to prevent PSC */ } else { s->current_picture.mb_type[xy] = MB_TYPE_8x8 | MB_TYPE_L0; s->mv_type = MV_TYPE_8X8; for(i=0;i<4;i++) { mot_val = ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y); if (s->umvplus) mx = h263p_decode_umotion(s, pred_x); else mx = ff_h263_decode_motion(s, pred_x, 1); if (mx >= 0xffff) if (s->umvplus) my = h263p_decode_umotion(s, pred_y); else my = ff_h263_decode_motion(s, pred_y, 1); if (my >= 0xffff) s->mv[0][i][0] = mx; s->mv[0][i][1] = my; if (s->umvplus && (mx - pred_x) == 1 && (my - pred_y) == 1) skip_bits1(&s->gb); /* Bit stuffing to prevent PSC */ mot_val[0] = mx; mot_val[1] = my; } else if(s->pict_type==AV_PICTURE_TYPE_B) { int mb_type; const int stride= s->b8_stride; int16_t *mot_val0 = s->current_picture.motion_val[0][2 * (s->mb_x + s->mb_y * stride)]; int16_t *mot_val1 = s->current_picture.motion_val[1][2 * (s->mb_x + s->mb_y * stride)]; // const int mv_xy= s->mb_x + 1 + s->mb_y * s->mb_stride; //FIXME ugly mot_val0[0 ]= mot_val0[2 ]= mot_val0[0+2*stride]= mot_val0[2+2*stride]= mot_val0[1 ]= mot_val0[3 ]= mot_val0[1+2*stride]= mot_val0[3+2*stride]= mot_val1[0 ]= mot_val1[2 ]= mot_val1[0+2*stride]= mot_val1[2+2*stride]= mot_val1[1 ]= mot_val1[3 ]= mot_val1[1+2*stride]= mot_val1[3+2*stride]= 0; do{ mb_type= get_vlc2(&s->gb, h263_mbtype_b_vlc.table, H263_MBTYPE_B_VLC_BITS, 2); if (mb_type < 0){ av_log(s->avctx, AV_LOG_ERROR, \"b mb_type damaged at %d %d\\n\", s->mb_x, s->mb_y); mb_type= h263_mb_type_b_map[ mb_type ]; }while(!mb_type); s->mb_intra = IS_INTRA(mb_type); if(HAS_CBP(mb_type)){ s->bdsp.clear_blocks(s->block[0]); cbpc = get_vlc2(&s->gb, cbpc_b_vlc.table, CBPC_B_VLC_BITS, 1); if(s->mb_intra){ dquant = IS_QUANT(mb_type); goto intra; cbpy = get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1); if (cbpy < 0){ av_log(s->avctx, AV_LOG_ERROR, \"b cbpy damaged at %d %d\\n\", s->mb_x, s->mb_y); if(s->alt_inter_vlc==0 || (cbpc & 3)!=3) cbpy ^= 0xF; cbp = (cbpc & 3) | (cbpy << 2); }else cbp=0; av_assert2(!s->mb_intra); if(IS_QUANT(mb_type)){ h263_decode_dquant(s); if(IS_DIRECT(mb_type)){ s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT; mb_type |= set_direct_mv(s); }else{ s->mv_dir = 0; s->mv_type= MV_TYPE_16X16; //FIXME UMV if(USES_LIST(mb_type, 0)){ int16_t *mot_val= ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y); s->mv_dir = MV_DIR_FORWARD; if (s->umvplus) mx = h263p_decode_umotion(s, pred_x); else mx = ff_h263_decode_motion(s, pred_x, 1); if (mx >= 0xffff) if (s->umvplus) my = h263p_decode_umotion(s, pred_y); else my = ff_h263_decode_motion(s, pred_y, 1); if (my >= 0xffff) if (s->umvplus && (mx - pred_x) == 1 && (my - pred_y) == 1) skip_bits1(&s->gb); /* Bit stuffing to prevent PSC */ s->mv[0][0][0] = mx; s->mv[0][0][1] = my; mot_val[0 ]= mot_val[2 ]= mot_val[0+2*stride]= mot_val[2+2*stride]= mx; mot_val[1 ]= mot_val[3 ]= mot_val[1+2*stride]= mot_val[3+2*stride]= my; if(USES_LIST(mb_type, 1)){ int16_t *mot_val= ff_h263_pred_motion(s, 0, 1, &pred_x, &pred_y); s->mv_dir |= MV_DIR_BACKWARD; if (s->umvplus) mx = h263p_decode_umotion(s, pred_x); else mx = ff_h263_decode_motion(s, pred_x, 1); if (mx >= 0xffff) if (s->umvplus) my = h263p_decode_umotion(s, pred_y); else my = ff_h263_decode_motion(s, pred_y, 1); if (my >= 0xffff) if (s->umvplus && (mx - pred_x) == 1 && (my - pred_y) == 1) skip_bits1(&s->gb); /* Bit stuffing to prevent PSC */ s->mv[1][0][0] = mx; s->mv[1][0][1] = my; mot_val[0 ]= mot_val[2 ]= mot_val[0+2*stride]= mot_val[2+2*stride]= mx; mot_val[1 ]= mot_val[3 ]= mot_val[1+2*stride]= mot_val[3+2*stride]= my; s->current_picture.mb_type[xy] = mb_type; } else { /* I-Frame */ do{ cbpc = get_vlc2(&s->gb, ff_h263_intra_MCBPC_vlc.table, INTRA_MCBPC_VLC_BITS, 2); if (cbpc < 0){ av_log(s->avctx, AV_LOG_ERROR, \"I cbpc damaged at %d %d\\n\", s->mb_x, s->mb_y); }while(cbpc == 8); s->bdsp.clear_blocks(s->block[0]); dquant = cbpc & 4; s->mb_intra = 1; intra: s->current_picture.mb_type[xy] = MB_TYPE_INTRA; if (s->h263_aic) { s->ac_pred = get_bits1(&s->gb); if(s->ac_pred){ s->current_picture.mb_type[xy] = MB_TYPE_INTRA | MB_TYPE_ACPRED; s->h263_aic_dir = get_bits1(&s->gb); }else s->ac_pred = 0; if(s->pb_frame && get_bits1(&s->gb)) pb_mv_count = h263_get_modb(&s->gb, s->pb_frame, &cbpb); cbpy = get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1); if(cbpy<0){ av_log(s->avctx, AV_LOG_ERROR, \"I cbpy damaged at %d %d\\n\", s->mb_x, s->mb_y); cbp = (cbpc & 3) | (cbpy << 2); if (dquant) { h263_decode_dquant(s); pb_mv_count += !!s->pb_frame; while(pb_mv_count--){ ff_h263_decode_motion(s, 0, 1); ff_h263_decode_motion(s, 0, 1); /* decode each block */ for (i = 0; i < 6; i++) { if (h263_decode_block(s, block[i], i, cbp&32) < 0) return -1; cbp+=cbp; if(s->pb_frame && h263_skip_b_part(s, cbpb) < 0) return -1; if(s->obmc && !s->mb_intra){ if(s->pict_type == AV_PICTURE_TYPE_P && s->mb_x+1mb_width && s->mb_num_left != 1) preview_obmc(s); end: /* per-MB end of slice check */ { int v= show_bits(&s->gb, 16); if (get_bits_left(&s->gb) < 16) { v >>= 16 - get_bits_left(&s->gb); if(v==0) return SLICE_END; return SLICE_OK;"} {"target": 0, "idx": 23259, "func": "static void DEF(put, pixels16_x2)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h) { MOVQ_BFE(mm6); __asm__ volatile( \"lea (%3, %3), %%\"REG_a\" \\n\\t\" \".p2align 3 \\n\\t\" \"1: \\n\\t\" \"movq (%1), %%mm0 \\n\\t\" \"movq 1(%1), %%mm1 \\n\\t\" \"movq (%1, %3), %%mm2 \\n\\t\" \"movq 1(%1, %3), %%mm3 \\n\\t\" PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) \"movq %%mm4, (%2) \\n\\t\" \"movq %%mm5, (%2, %3) \\n\\t\" \"movq 8(%1), %%mm0 \\n\\t\" \"movq 9(%1), %%mm1 \\n\\t\" \"movq 8(%1, %3), %%mm2 \\n\\t\" \"movq 9(%1, %3), %%mm3 \\n\\t\" PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) \"movq %%mm4, 8(%2) \\n\\t\" \"movq %%mm5, 8(%2, %3) \\n\\t\" \"add %%\"REG_a\", %1 \\n\\t\" \"add %%\"REG_a\", %2 \\n\\t\" \"movq (%1), %%mm0 \\n\\t\" \"movq 1(%1), %%mm1 \\n\\t\" \"movq (%1, %3), %%mm2 \\n\\t\" \"movq 1(%1, %3), %%mm3 \\n\\t\" PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) \"movq %%mm4, (%2) \\n\\t\" \"movq %%mm5, (%2, %3) \\n\\t\" \"movq 8(%1), %%mm0 \\n\\t\" \"movq 9(%1), %%mm1 \\n\\t\" \"movq 8(%1, %3), %%mm2 \\n\\t\" \"movq 9(%1, %3), %%mm3 \\n\\t\" PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) \"movq %%mm4, 8(%2) \\n\\t\" \"movq %%mm5, 8(%2, %3) \\n\\t\" \"add %%\"REG_a\", %1 \\n\\t\" \"add %%\"REG_a\", %2 \\n\\t\" \"subl $4, %0 \\n\\t\" \"jnz 1b \\n\\t\" :\"+g\"(h), \"+S\"(pixels), \"+D\"(block) :\"r\"((x86_reg)line_size) :REG_a, \"memory\"); }"} {"target": 1, "idx": 23274, "func": "int register_savevm(const char *idstr, int instance_id, int version_id, SaveStateHandler *save_state, LoadStateHandler *load_state, void *opaque) { SaveStateEntry *se, **pse; se = qemu_malloc(sizeof(SaveStateEntry)); if (!se) return -1; pstrcpy(se->idstr, sizeof(se->idstr), idstr); se->instance_id = (instance_id == -1) ? 0 : instance_id; se->version_id = version_id; se->save_state = save_state; se->load_state = load_state; se->opaque = opaque; se->next = NULL; /* add at the end of list */ pse = &first_se; while (*pse != NULL) { if (instance_id == -1 && strcmp(se->idstr, (*pse)->idstr) == 0 && se->instance_id <= (*pse)->instance_id) se->instance_id = (*pse)->instance_id + 1; pse = &(*pse)->next; } *pse = se; return 0; }"} {"target": 1, "idx": 23277, "func": "static void arm_mptimer_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = arm_mptimer_realize; dc->vmsd = &vmstate_arm_mptimer; dc->reset = arm_mptimer_reset; dc->no_user = 1; dc->props = arm_mptimer_properties; }"} {"target": 1, "idx": 23299, "func": "static int decode_mb_cavlc(H264Context *h){ MpegEncContext * const s = &h->s; const int mb_xy= s->mb_x + s->mb_y*s->mb_stride; int partition_count; unsigned int mb_type, cbp; int dct8x8_allowed= h->pps.transform_8x8_mode; s->dsp.clear_blocks(h->mb); //FIXME avoid if already clear (move after skip handlong? tprintf(s->avctx, \"pic:%d mb:%d/%d\\n\", h->frame_num, s->mb_x, s->mb_y); cbp = 0; /* avoid warning. FIXME: find a solution without slowing down the code */ if(h->slice_type != I_TYPE && h->slice_type != SI_TYPE){ if(s->mb_skip_run==-1) s->mb_skip_run= get_ue_golomb(&s->gb); if (s->mb_skip_run--) { if(FRAME_MBAFF && (s->mb_y&1) == 0){ if(s->mb_skip_run==0) h->mb_mbaff = h->mb_field_decoding_flag = get_bits1(&s->gb); else predict_field_decoding_flag(h); } decode_mb_skip(h); return 0; } } if(FRAME_MBAFF){ if( (s->mb_y&1) == 0 ) h->mb_mbaff = h->mb_field_decoding_flag = get_bits1(&s->gb); }else h->mb_field_decoding_flag= (s->picture_structure!=PICT_FRAME); h->prev_mb_skipped= 0; mb_type= get_ue_golomb(&s->gb); if(h->slice_type == B_TYPE){ if(mb_type < 23){ partition_count= b_mb_type_info[mb_type].partition_count; mb_type= b_mb_type_info[mb_type].type; }else{ mb_type -= 23; goto decode_intra_mb; } }else if(h->slice_type == P_TYPE /*|| h->slice_type == SP_TYPE */){ if(mb_type < 5){ partition_count= p_mb_type_info[mb_type].partition_count; mb_type= p_mb_type_info[mb_type].type; }else{ mb_type -= 5; goto decode_intra_mb; } }else{ assert(h->slice_type == I_TYPE); decode_intra_mb: if(mb_type > 25){ av_log(h->s.avctx, AV_LOG_ERROR, \"mb_type %d in %c slice too large at %d %d\\n\", mb_type, av_get_pict_type_char(h->slice_type), s->mb_x, s->mb_y); return -1; } partition_count=0; cbp= i_mb_type_info[mb_type].cbp; h->intra16x16_pred_mode= i_mb_type_info[mb_type].pred_mode; mb_type= i_mb_type_info[mb_type].type; } if(MB_FIELD) mb_type |= MB_TYPE_INTERLACED; h->slice_table[ mb_xy ]= h->slice_num; if(IS_INTRA_PCM(mb_type)){ unsigned int x, y; // We assume these blocks are very rare so we do not optimize it. align_get_bits(&s->gb); // The pixels are stored in the same order as levels in h->mb array. for(y=0; y<16; y++){ const int index= 4*(y&3) + 32*((y>>2)&1) + 128*(y>>3); for(x=0; x<16; x++){ tprintf(s->avctx, \"LUMA ICPM LEVEL (%3d)\\n\", show_bits(&s->gb, 8)); h->mb[index + (x&3) + 16*((x>>2)&1) + 64*(x>>3)]= get_bits(&s->gb, 8); } } for(y=0; y<8; y++){ const int index= 256 + 4*(y&3) + 32*(y>>2); for(x=0; x<8; x++){ tprintf(s->avctx, \"CHROMA U ICPM LEVEL (%3d)\\n\", show_bits(&s->gb, 8)); h->mb[index + (x&3) + 16*(x>>2)]= get_bits(&s->gb, 8); } } for(y=0; y<8; y++){ const int index= 256 + 64 + 4*(y&3) + 32*(y>>2); for(x=0; x<8; x++){ tprintf(s->avctx, \"CHROMA V ICPM LEVEL (%3d)\\n\", show_bits(&s->gb, 8)); h->mb[index + (x&3) + 16*(x>>2)]= get_bits(&s->gb, 8); } } // In deblocking, the quantizer is 0 s->current_picture.qscale_table[mb_xy]= 0; h->chroma_qp = get_chroma_qp(h->pps.chroma_qp_index_offset, 0); // All coeffs are present memset(h->non_zero_count[mb_xy], 16, 16); s->current_picture.mb_type[mb_xy]= mb_type; return 0; } if(MB_MBAFF){ h->ref_count[0] <<= 1; h->ref_count[1] <<= 1; } fill_caches(h, mb_type, 0); //mb_pred if(IS_INTRA(mb_type)){ int pred_mode; // init_top_left_availability(h); if(IS_INTRA4x4(mb_type)){ int i; int di = 1; if(dct8x8_allowed && get_bits1(&s->gb)){ mb_type |= MB_TYPE_8x8DCT; di = 4; } // fill_intra4x4_pred_table(h); for(i=0; i<16; i+=di){ int mode= pred_intra_mode(h, i); if(!get_bits1(&s->gb)){ const int rem_mode= get_bits(&s->gb, 3); mode = rem_mode + (rem_mode >= mode); } if(di==4) fill_rectangle( &h->intra4x4_pred_mode_cache[ scan8[i] ], 2, 2, 8, mode, 1 ); else h->intra4x4_pred_mode_cache[ scan8[i] ] = mode; } write_back_intra_pred_mode(h); if( check_intra4x4_pred_mode(h) < 0) return -1; }else{ h->intra16x16_pred_mode= check_intra_pred_mode(h, h->intra16x16_pred_mode); if(h->intra16x16_pred_mode < 0) return -1; } pred_mode= check_intra_pred_mode(h, get_ue_golomb(&s->gb)); if(pred_mode < 0) return -1; h->chroma_pred_mode= pred_mode; }else if(partition_count==4){ int i, j, sub_partition_count[4], list, ref[2][4]; if(h->slice_type == B_TYPE){ for(i=0; i<4; i++){ h->sub_mb_type[i]= get_ue_golomb(&s->gb); if(h->sub_mb_type[i] >=13){ av_log(h->s.avctx, AV_LOG_ERROR, \"B sub_mb_type %u out of range at %d %d\\n\", h->sub_mb_type[i], s->mb_x, s->mb_y); return -1; } sub_partition_count[i]= b_sub_mb_type_info[ h->sub_mb_type[i] ].partition_count; h->sub_mb_type[i]= b_sub_mb_type_info[ h->sub_mb_type[i] ].type; } if( IS_DIRECT(h->sub_mb_type[0]) || IS_DIRECT(h->sub_mb_type[1]) || IS_DIRECT(h->sub_mb_type[2]) || IS_DIRECT(h->sub_mb_type[3])) { pred_direct_motion(h, &mb_type); h->ref_cache[0][scan8[4]] = h->ref_cache[1][scan8[4]] = h->ref_cache[0][scan8[12]] = h->ref_cache[1][scan8[12]] = PART_NOT_AVAILABLE; } }else{ assert(h->slice_type == P_TYPE || h->slice_type == SP_TYPE); //FIXME SP correct ? for(i=0; i<4; i++){ h->sub_mb_type[i]= get_ue_golomb(&s->gb); if(h->sub_mb_type[i] >=4){ av_log(h->s.avctx, AV_LOG_ERROR, \"P sub_mb_type %u out of range at %d %d\\n\", h->sub_mb_type[i], s->mb_x, s->mb_y); return -1; } sub_partition_count[i]= p_sub_mb_type_info[ h->sub_mb_type[i] ].partition_count; h->sub_mb_type[i]= p_sub_mb_type_info[ h->sub_mb_type[i] ].type; } } for(list=0; listlist_count; list++){ int ref_count= IS_REF0(mb_type) ? 1 : h->ref_count[list]; for(i=0; i<4; i++){ if(IS_DIRECT(h->sub_mb_type[i])) continue; if(IS_DIR(h->sub_mb_type[i], 0, list)){ unsigned int tmp = get_te0_golomb(&s->gb, ref_count); //FIXME init to 0 before and skip? if(tmp>=ref_count){ av_log(h->s.avctx, AV_LOG_ERROR, \"ref %u overflow\\n\", tmp); return -1; } ref[list][i]= tmp; }else{ //FIXME ref[list][i] = -1; } } } if(dct8x8_allowed) dct8x8_allowed = get_dct8x8_allowed(h); for(list=0; listlist_count; list++){ for(i=0; i<4; i++){ if(IS_DIRECT(h->sub_mb_type[i])) { h->ref_cache[list][ scan8[4*i] ] = h->ref_cache[list][ scan8[4*i]+1 ]; continue; } h->ref_cache[list][ scan8[4*i] ]=h->ref_cache[list][ scan8[4*i]+1 ]= h->ref_cache[list][ scan8[4*i]+8 ]=h->ref_cache[list][ scan8[4*i]+9 ]= ref[list][i]; if(IS_DIR(h->sub_mb_type[i], 0, list)){ const int sub_mb_type= h->sub_mb_type[i]; const int block_width= (sub_mb_type & (MB_TYPE_16x16|MB_TYPE_16x8)) ? 2 : 1; for(j=0; jmv_cache[list][ scan8[index] ]; pred_motion(h, index, block_width, list, h->ref_cache[list][ scan8[index] ], &mx, &my); mx += get_se_golomb(&s->gb); my += get_se_golomb(&s->gb); tprintf(s->avctx, \"final mv:%d %d\\n\", mx, my); if(IS_SUB_8X8(sub_mb_type)){ mv_cache[ 1 ][0]= mv_cache[ 8 ][0]= mv_cache[ 9 ][0]= mx; mv_cache[ 1 ][1]= mv_cache[ 8 ][1]= mv_cache[ 9 ][1]= my; }else if(IS_SUB_8X4(sub_mb_type)){ mv_cache[ 1 ][0]= mx; mv_cache[ 1 ][1]= my; }else if(IS_SUB_4X8(sub_mb_type)){ mv_cache[ 8 ][0]= mx; mv_cache[ 8 ][1]= my; } mv_cache[ 0 ][0]= mx; mv_cache[ 0 ][1]= my; } }else{ uint32_t *p= (uint32_t *)&h->mv_cache[list][ scan8[4*i] ][0]; p[0] = p[1]= p[8] = p[9]= 0; } } } }else if(IS_DIRECT(mb_type)){ pred_direct_motion(h, &mb_type); dct8x8_allowed &= h->sps.direct_8x8_inference_flag; }else{ int list, mx, my, i; //FIXME we should set ref_idx_l? to 0 if we use that later ... if(IS_16X16(mb_type)){ for(list=0; listlist_count; list++){ unsigned int val; if(IS_DIR(mb_type, 0, list)){ val= get_te0_golomb(&s->gb, h->ref_count[list]); if(val >= h->ref_count[list]){ av_log(h->s.avctx, AV_LOG_ERROR, \"ref %u overflow\\n\", val); return -1; } }else val= LIST_NOT_USED&0xFF; fill_rectangle(&h->ref_cache[list][ scan8[0] ], 4, 4, 8, val, 1); } for(list=0; listlist_count; list++){ unsigned int val; if(IS_DIR(mb_type, 0, list)){ pred_motion(h, 0, 4, list, h->ref_cache[list][ scan8[0] ], &mx, &my); mx += get_se_golomb(&s->gb); my += get_se_golomb(&s->gb); tprintf(s->avctx, \"final mv:%d %d\\n\", mx, my); val= pack16to32(mx,my); }else val=0; fill_rectangle(h->mv_cache[list][ scan8[0] ], 4, 4, 8, val, 4); } } else if(IS_16X8(mb_type)){ for(list=0; listlist_count; list++){ for(i=0; i<2; i++){ unsigned int val; if(IS_DIR(mb_type, i, list)){ val= get_te0_golomb(&s->gb, h->ref_count[list]); if(val >= h->ref_count[list]){ av_log(h->s.avctx, AV_LOG_ERROR, \"ref %u overflow\\n\", val); return -1; } }else val= LIST_NOT_USED&0xFF; fill_rectangle(&h->ref_cache[list][ scan8[0] + 16*i ], 4, 2, 8, val, 1); } } for(list=0; listlist_count; list++){ for(i=0; i<2; i++){ unsigned int val; if(IS_DIR(mb_type, i, list)){ pred_16x8_motion(h, 8*i, list, h->ref_cache[list][scan8[0] + 16*i], &mx, &my); mx += get_se_golomb(&s->gb); my += get_se_golomb(&s->gb); tprintf(s->avctx, \"final mv:%d %d\\n\", mx, my); val= pack16to32(mx,my); }else val=0; fill_rectangle(h->mv_cache[list][ scan8[0] + 16*i ], 4, 2, 8, val, 4); } } }else{ assert(IS_8X16(mb_type)); for(list=0; listlist_count; list++){ for(i=0; i<2; i++){ unsigned int val; if(IS_DIR(mb_type, i, list)){ //FIXME optimize val= get_te0_golomb(&s->gb, h->ref_count[list]); if(val >= h->ref_count[list]){ av_log(h->s.avctx, AV_LOG_ERROR, \"ref %u overflow\\n\", val); return -1; } }else val= LIST_NOT_USED&0xFF; fill_rectangle(&h->ref_cache[list][ scan8[0] + 2*i ], 2, 4, 8, val, 1); } } for(list=0; listlist_count; list++){ for(i=0; i<2; i++){ unsigned int val; if(IS_DIR(mb_type, i, list)){ pred_8x16_motion(h, i*4, list, h->ref_cache[list][ scan8[0] + 2*i ], &mx, &my); mx += get_se_golomb(&s->gb); my += get_se_golomb(&s->gb); tprintf(s->avctx, \"final mv:%d %d\\n\", mx, my); val= pack16to32(mx,my); }else val=0; fill_rectangle(h->mv_cache[list][ scan8[0] + 2*i ], 2, 4, 8, val, 4); } } } } if(IS_INTER(mb_type)) write_back_motion(h, mb_type); if(!IS_INTRA16x16(mb_type)){ cbp= get_ue_golomb(&s->gb); if(cbp > 47){ av_log(h->s.avctx, AV_LOG_ERROR, \"cbp too large (%u) at %d %d\\n\", cbp, s->mb_x, s->mb_y); return -1; } if(IS_INTRA4x4(mb_type)) cbp= golomb_to_intra4x4_cbp[cbp]; else cbp= golomb_to_inter_cbp[cbp]; } h->cbp = cbp; if(dct8x8_allowed && (cbp&15) && !IS_INTRA(mb_type)){ if(get_bits1(&s->gb)) mb_type |= MB_TYPE_8x8DCT; } s->current_picture.mb_type[mb_xy]= mb_type; if(cbp || IS_INTRA16x16(mb_type)){ int i8x8, i4x4, chroma_idx; int chroma_qp, dquant; GetBitContext *gb= IS_INTRA(mb_type) ? h->intra_gb_ptr : h->inter_gb_ptr; const uint8_t *scan, *scan8x8, *dc_scan; // fill_non_zero_count_cache(h); if(IS_INTERLACED(mb_type)){ scan8x8= s->qscale ? h->field_scan8x8_cavlc : h->field_scan8x8_cavlc_q0; scan= s->qscale ? h->field_scan : h->field_scan_q0; dc_scan= luma_dc_field_scan; }else{ scan8x8= s->qscale ? h->zigzag_scan8x8_cavlc : h->zigzag_scan8x8_cavlc_q0; scan= s->qscale ? h->zigzag_scan : h->zigzag_scan_q0; dc_scan= luma_dc_zigzag_scan; } dquant= get_se_golomb(&s->gb); if( dquant > 25 || dquant < -26 ){ av_log(h->s.avctx, AV_LOG_ERROR, \"dquant out of range (%d) at %d %d\\n\", dquant, s->mb_x, s->mb_y); return -1; } s->qscale += dquant; if(((unsigned)s->qscale) > 51){ if(s->qscale<0) s->qscale+= 52; else s->qscale-= 52; } h->chroma_qp= chroma_qp= get_chroma_qp(h->pps.chroma_qp_index_offset, s->qscale); if(IS_INTRA16x16(mb_type)){ if( decode_residual(h, h->intra_gb_ptr, h->mb, LUMA_DC_BLOCK_INDEX, dc_scan, h->dequant4_coeff[0][s->qscale], 16) < 0){ return -1; //FIXME continue if partitioned and other return -1 too } assert((cbp&15) == 0 || (cbp&15) == 15); if(cbp&15){ for(i8x8=0; i8x8<4; i8x8++){ for(i4x4=0; i4x4<4; i4x4++){ const int index= i4x4 + 4*i8x8; if( decode_residual(h, h->intra_gb_ptr, h->mb + 16*index, index, scan + 1, h->dequant4_coeff[0][s->qscale], 15) < 0 ){ return -1; } } } }else{ fill_rectangle(&h->non_zero_count_cache[scan8[0]], 4, 4, 8, 0, 1); } }else{ for(i8x8=0; i8x8<4; i8x8++){ if(cbp & (1<mb[64*i8x8]; uint8_t *nnz; for(i4x4=0; i4x4<4; i4x4++){ if( decode_residual(h, gb, buf, i4x4+4*i8x8, scan8x8+16*i4x4, h->dequant8_coeff[IS_INTRA( mb_type ) ? 0:1][s->qscale], 16) <0 ) return -1; } nnz= &h->non_zero_count_cache[ scan8[4*i8x8] ]; nnz[0] += nnz[1] + nnz[8] + nnz[9]; }else{ for(i4x4=0; i4x4<4; i4x4++){ const int index= i4x4 + 4*i8x8; if( decode_residual(h, gb, h->mb + 16*index, index, scan, h->dequant4_coeff[IS_INTRA( mb_type ) ? 0:3][s->qscale], 16) <0 ){ return -1; } } } }else{ uint8_t * const nnz= &h->non_zero_count_cache[ scan8[4*i8x8] ]; nnz[0] = nnz[1] = nnz[8] = nnz[9] = 0; } } } if(cbp&0x30){ for(chroma_idx=0; chroma_idx<2; chroma_idx++) if( decode_residual(h, gb, h->mb + 256 + 16*4*chroma_idx, CHROMA_DC_BLOCK_INDEX, chroma_dc_scan, NULL, 4) < 0){ return -1; } } if(cbp&0x20){ for(chroma_idx=0; chroma_idx<2; chroma_idx++){ const uint32_t *qmul = h->dequant4_coeff[chroma_idx+1+(IS_INTRA( mb_type ) ? 0:3)][chroma_qp]; for(i4x4=0; i4x4<4; i4x4++){ const int index= 16 + 4*chroma_idx + i4x4; if( decode_residual(h, gb, h->mb + 16*index, index, scan + 1, qmul, 15) < 0){ return -1; } } } }else{ uint8_t * const nnz= &h->non_zero_count_cache[0]; nnz[ scan8[16]+0 ] = nnz[ scan8[16]+1 ] =nnz[ scan8[16]+8 ] =nnz[ scan8[16]+9 ] = nnz[ scan8[20]+0 ] = nnz[ scan8[20]+1 ] =nnz[ scan8[20]+8 ] =nnz[ scan8[20]+9 ] = 0; } }else{ uint8_t * const nnz= &h->non_zero_count_cache[0]; fill_rectangle(&nnz[scan8[0]], 4, 4, 8, 0, 1); nnz[ scan8[16]+0 ] = nnz[ scan8[16]+1 ] =nnz[ scan8[16]+8 ] =nnz[ scan8[16]+9 ] = nnz[ scan8[20]+0 ] = nnz[ scan8[20]+1 ] =nnz[ scan8[20]+8 ] =nnz[ scan8[20]+9 ] = 0; } s->current_picture.qscale_table[mb_xy]= s->qscale; write_back_non_zero_count(h); if(MB_MBAFF){ h->ref_count[0] >>= 1; h->ref_count[1] >>= 1; } return 0; }"} {"target": 1, "idx": 23307, "func": "void pci_default_write_config(PCIDevice *d, uint32_t address, uint32_t val, int len) { int can_write, i; uint32_t end, addr; if (len == 4 && ((address >= 0x10 && address < 0x10 + 4 * 6) || (address >= 0x30 && address < 0x34))) { PCIIORegion *r; int reg; if ( address >= 0x30 ) { reg = PCI_ROM_SLOT; }else{ reg = (address - 0x10) >> 2; } r = &d->io_regions[reg]; if (r->size == 0) goto default_config; /* compute the stored value */ if (reg == PCI_ROM_SLOT) { /* keep ROM enable bit */ val &= (~(r->size - 1)) | 1; } else { val &= ~(r->size - 1); val |= r->type; } *(uint32_t *)(d->config + address) = cpu_to_le32(val); pci_update_mappings(d); return; } default_config: /* not efficient, but simple */ addr = address; for(i = 0; i < len; i++) { /* default read/write accesses */ switch(d->config[0x0e]) { case 0x00: case 0x80: switch(addr) { case 0x00: case 0x01: case 0x02: case 0x03: case 0x08: case 0x09: case 0x0a: case 0x0b: case 0x0e: case 0x10 ... 0x27: /* base */ case 0x30 ... 0x33: /* rom */ case 0x3d: can_write = 0; break; default: can_write = 1; break; } break; default: case 0x01: switch(addr) { case 0x00: case 0x01: case 0x02: case 0x03: case 0x08: case 0x09: case 0x0a: case 0x0b: case 0x0e: case 0x38 ... 0x3b: /* rom */ case 0x3d: can_write = 0; break; default: can_write = 1; break; } break; } if (can_write) { d->config[addr] = val; } addr++; val >>= 8; } end = address + len; if (end > PCI_COMMAND && address < (PCI_COMMAND + 2)) { /* if the command register is modified, we must modify the mappings */ pci_update_mappings(d); } }"} {"target": 1, "idx": 23309, "func": "static int decode_trns_chunk(AVCodecContext *avctx, PNGDecContext *s, uint32_t length) { int v, i; if (s->color_type == PNG_COLOR_TYPE_PALETTE) { if (length > 256 || !(s->state & PNG_PLTE)) return AVERROR_INVALIDDATA; for (i = 0; i < length; i++) { v = bytestream2_get_byte(&s->gb); s->palette[i] = (s->palette[i] & 0x00ffffff) | (v << 24); } } else if (s->color_type == PNG_COLOR_TYPE_GRAY || s->color_type == PNG_COLOR_TYPE_RGB) { if ((s->color_type == PNG_COLOR_TYPE_GRAY && length != 2) || (s->color_type == PNG_COLOR_TYPE_RGB && length != 6)) return AVERROR_INVALIDDATA; for (i = 0; i < length / 2; i++) { /* only use the least significant bits */ v = av_mod_uintp2(bytestream2_get_be16(&s->gb), s->bit_depth); if (s->bit_depth > 8) AV_WB16(&s->transparent_color_be[2 * i], v); else s->transparent_color_be[i] = v; } } else { return AVERROR_INVALIDDATA; } bytestream2_skip(&s->gb, 4); /* crc */ s->has_trns = 1; return 0; }"} {"target": 1, "idx": 23323, "func": "static inline int wv_get_value_integer(WavpackFrameContext *s, uint32_t *crc, int S) { unsigned bit; if (s->extra_bits) { S <<= s->extra_bits; if (s->got_extra_bits && get_bits_left(&s->gb_extra_bits) >= s->extra_bits) { S |= get_bits_long(&s->gb_extra_bits, s->extra_bits); *crc = *crc * 9 + (S & 0xffff) * 3 + ((unsigned)S >> 16); } } bit = (S & s->and) | s->or; bit = ((S + bit) << s->shift) - bit; if (s->hybrid) bit = av_clip(bit, s->hybrid_minclip, s->hybrid_maxclip); return bit << s->post_shift; }"} {"target": 1, "idx": 23339, "func": "target_ulong helper_ldl(CPUMIPSState *env, target_ulong arg1, target_ulong arg2, int mem_idx) { uint64_t tmp; tmp = do_lbu(env, arg2, mem_idx); arg1 = (arg1 & 0x00FFFFFFFFFFFFFFULL) | (tmp << 56); if (GET_LMASK64(arg2) <= 6) { tmp = do_lbu(env, GET_OFFSET(arg2, 1), mem_idx); arg1 = (arg1 & 0xFF00FFFFFFFFFFFFULL) | (tmp << 48); } if (GET_LMASK64(arg2) <= 5) { tmp = do_lbu(env, GET_OFFSET(arg2, 2), mem_idx); arg1 = (arg1 & 0xFFFF00FFFFFFFFFFULL) | (tmp << 40); } if (GET_LMASK64(arg2) <= 4) { tmp = do_lbu(env, GET_OFFSET(arg2, 3), mem_idx); arg1 = (arg1 & 0xFFFFFF00FFFFFFFFULL) | (tmp << 32); } if (GET_LMASK64(arg2) <= 3) { tmp = do_lbu(env, GET_OFFSET(arg2, 4), mem_idx); arg1 = (arg1 & 0xFFFFFFFF00FFFFFFULL) | (tmp << 24); } if (GET_LMASK64(arg2) <= 2) { tmp = do_lbu(env, GET_OFFSET(arg2, 5), mem_idx); arg1 = (arg1 & 0xFFFFFFFFFF00FFFFULL) | (tmp << 16); } if (GET_LMASK64(arg2) <= 1) { tmp = do_lbu(env, GET_OFFSET(arg2, 6), mem_idx); arg1 = (arg1 & 0xFFFFFFFFFFFF00FFULL) | (tmp << 8); } if (GET_LMASK64(arg2) == 0) { tmp = do_lbu(env, GET_OFFSET(arg2, 7), mem_idx); arg1 = (arg1 & 0xFFFFFFFFFFFFFF00ULL) | tmp; } return arg1; }"} {"target": 0, "idx": 23344, "func": "static int load_matrix(MpegEncContext *s, uint16_t matrix0[64], uint16_t matrix1[64], int intra) { int i; for (i = 0; i < 64; i++) { int j = s->dsp.idct_permutation[ff_zigzag_direct[i]]; int v = get_bits(&s->gb, 8); if (v == 0) { av_log(s->avctx, AV_LOG_ERROR, \"matrix damaged\\n\"); return -1; } if (intra && i == 0 && v != 8) { av_log(s->avctx, AV_LOG_ERROR, \"intra matrix specifies invalid DC quantizer %d, ignoring\\n\", v); v = 8; // needed by pink.mpg / issue1046 } matrix0[j] = v; if (matrix1) matrix1[j] = v; } return 0; }"} {"target": 1, "idx": 23354, "func": "static void child_handler(int sig) { int status; while (waitpid(-1, &status, WNOHANG) > 0) /* NOTHING */; }"} {"target": 0, "idx": 23361, "func": "static int copy_from(IpvideoContext *s, AVFrame *src, AVFrame *dst, int delta_x, int delta_y) { int current_offset = s->pixel_ptr - dst->data[0]; int motion_offset = current_offset + delta_y * dst->linesize[0] + delta_x * (1 + s->is_16bpp); if (motion_offset < 0) { av_log(s->avctx, AV_LOG_ERROR, \" Interplay video: motion offset < 0 (%d)\\n\", motion_offset); return AVERROR_INVALIDDATA; } else if (motion_offset > s->upper_motion_limit_offset) { av_log(s->avctx, AV_LOG_ERROR, \" Interplay video: motion offset above limit (%d >= %d)\\n\", motion_offset, s->upper_motion_limit_offset); return AVERROR_INVALIDDATA; } if (src->data[0] == NULL) { av_log(s->avctx, AV_LOG_ERROR, \"Invalid decode type, corrupted header?\\n\"); return AVERROR(EINVAL); } s->hdsp.put_pixels_tab[!s->is_16bpp][0](s->pixel_ptr, src->data[0] + motion_offset, dst->linesize[0], 8); return 0; }"} {"target": 0, "idx": 23371, "func": "static void imx_epit_reset(DeviceState *dev) { IMXEPITState *s = IMX_EPIT(dev); /* * Soft reset doesn't touch some bits; hard reset clears them */ s->cr &= (CR_EN|CR_ENMOD|CR_STOPEN|CR_DOZEN|CR_WAITEN|CR_DBGEN); s->sr = 0; s->lr = TIMER_MAX; s->cmp = 0; s->cnt = 0; /* stop both timers */ ptimer_stop(s->timer_cmp); ptimer_stop(s->timer_reload); /* compute new frequency */ imx_epit_set_freq(s); /* init both timers to TIMER_MAX */ ptimer_set_limit(s->timer_cmp, TIMER_MAX, 1); ptimer_set_limit(s->timer_reload, TIMER_MAX, 1); if (s->freq && (s->cr & CR_EN)) { /* if the timer is still enabled, restart it */ ptimer_run(s->timer_reload, 0); } }"} {"target": 0, "idx": 23374, "func": "static void quit_timers(void) { alarm_timer->stop(alarm_timer); alarm_timer = NULL; }"} {"target": 1, "idx": 23386, "func": "static int coroutine_fn bdrv_aligned_pwritev(BdrvChild *child, BdrvTrackedRequest *req, int64_t offset, unsigned int bytes, int64_t align, QEMUIOVector *qiov, int flags) { BlockDriverState *bs = child->bs; BlockDriver *drv = bs->drv; bool waited; int ret; int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE); uint64_t bytes_remaining = bytes; int max_transfer; if (bdrv_has_readonly_bitmaps(bs)) { return -EPERM; assert(is_power_of_2(align)); assert((offset & (align - 1)) == 0); assert((bytes & (align - 1)) == 0); assert(!qiov || bytes == qiov->size); assert((bs->open_flags & BDRV_O_NO_IO) == 0); assert(!(flags & ~BDRV_REQ_MASK)); max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX), align); waited = wait_serialising_requests(req); assert(!waited || !req->serialising); assert(req->overlap_offset <= offset); assert(offset + bytes <= req->overlap_offset + req->overlap_bytes); assert(child->perm & BLK_PERM_WRITE); assert(end_sector <= bs->total_sectors || child->perm & BLK_PERM_RESIZE); ret = notifier_with_return_list_notify(&bs->before_write_notifiers, req); if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF && !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_pwrite_zeroes && qemu_iovec_is_zero(qiov)) { flags |= BDRV_REQ_ZERO_WRITE; if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) { flags |= BDRV_REQ_MAY_UNMAP; if (ret < 0) { /* Do nothing, write notifier decided to fail this request */ } else if (flags & BDRV_REQ_ZERO_WRITE) { bdrv_debug_event(bs, BLKDBG_PWRITEV_ZERO); ret = bdrv_co_do_pwrite_zeroes(bs, offset, bytes, flags); } else if (flags & BDRV_REQ_WRITE_COMPRESSED) { ret = bdrv_driver_pwritev_compressed(bs, offset, bytes, qiov); } else if (bytes <= max_transfer) { bdrv_debug_event(bs, BLKDBG_PWRITEV); ret = bdrv_driver_pwritev(bs, offset, bytes, qiov, flags); } else { bdrv_debug_event(bs, BLKDBG_PWRITEV); while (bytes_remaining) { int num = MIN(bytes_remaining, max_transfer); QEMUIOVector local_qiov; int local_flags = flags; assert(num); if (num < bytes_remaining && (flags & BDRV_REQ_FUA) && !(bs->supported_write_flags & BDRV_REQ_FUA)) { /* If FUA is going to be emulated by flush, we only * need to flush on the last iteration */ local_flags &= ~BDRV_REQ_FUA; qemu_iovec_init(&local_qiov, qiov->niov); qemu_iovec_concat(&local_qiov, qiov, bytes - bytes_remaining, num); ret = bdrv_driver_pwritev(bs, offset + bytes - bytes_remaining, num, &local_qiov, local_flags); qemu_iovec_destroy(&local_qiov); if (ret < 0) { break; bytes_remaining -= num; bdrv_debug_event(bs, BLKDBG_PWRITEV_DONE); atomic_inc(&bs->write_gen); bdrv_set_dirty(bs, offset, bytes); stat64_max(&bs->wr_highest_offset, offset + bytes); if (ret >= 0) { bs->total_sectors = MAX(bs->total_sectors, end_sector); ret = 0; return ret;"} {"target": 0, "idx": 23393, "func": "static BlockDriverAIOCB *raw_aio_read(BlockDriverState *bs, int64_t sector_num, uint8_t *buf, int nb_sectors, BlockDriverCompletionFunc *cb, void *opaque) { RawAIOCB *acb; /* * If O_DIRECT is used and the buffer is not aligned fall back * to synchronous IO. */ BDRVRawState *s = bs->opaque; if (unlikely(s->aligned_buf != NULL && ((uintptr_t) buf % 512))) { QEMUBH *bh; acb = qemu_aio_get(bs, cb, opaque); acb->ret = raw_pread(bs, 512 * sector_num, buf, 512 * nb_sectors); bh = qemu_bh_new(raw_aio_em_cb, acb); qemu_bh_schedule(bh); return &acb->common; } acb = raw_aio_setup(bs, sector_num, buf, nb_sectors, cb, opaque); if (!acb) return NULL; if (aio_read(&acb->aiocb) < 0) { qemu_aio_release(acb); return NULL; } return &acb->common; }"} {"target": 0, "idx": 23405, "func": "static int coroutine_fn bdrv_co_do_readv(BdrvChild *child, int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, BdrvRequestFlags flags) { if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) { return -EINVAL; } return bdrv_co_preadv(child->bs, sector_num << BDRV_SECTOR_BITS, nb_sectors << BDRV_SECTOR_BITS, qiov, flags); }"} {"target": 0, "idx": 23411, "func": "static void sigchld_handler(int signal) { qemu_bh_schedule(sigchld_bh); }"} {"target": 0, "idx": 23425, "func": "static PESContext *add_pes_stream(MpegTSContext *ts, int pid, int pcr_pid, int stream_type) { MpegTSFilter *tss; PESContext *pes; /* if no pid found, then add a pid context */ pes = av_mallocz(sizeof(PESContext)); if (!pes) return 0; pes->ts = ts; pes->stream = ts->stream; pes->pid = pid; pes->pcr_pid = pcr_pid; pes->stream_type = stream_type; pes->state = MPEGTS_SKIP; pes->pts = AV_NOPTS_VALUE; pes->dts = AV_NOPTS_VALUE; tss = mpegts_open_pes_filter(ts, pid, mpegts_push_data, pes); if (!tss) { av_free(pes); return 0; } return pes; }"} {"target": 1, "idx": 23429, "func": "void mpeg_motion_internal(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int field_based, int bottom_field, int field_select, uint8_t **ref_picture, op_pixels_func (*pix_op)[4], int motion_x, int motion_y, int h, int is_mpeg12, int mb_y) { uint8_t *ptr_y, *ptr_cb, *ptr_cr; int dxy, uvdxy, mx, my, src_x, src_y, uvsrc_x, uvsrc_y, v_edge_pos; emuedge_linesize_type uvlinesize, linesize; #if 0 if(s->quarter_sample) { motion_x>>=1; motion_y>>=1; } #endif v_edge_pos = s->v_edge_pos >> field_based; linesize = s->current_picture.f.linesize[0] << field_based; uvlinesize = s->current_picture.f.linesize[1] << field_based; dxy = ((motion_y & 1) << 1) | (motion_x & 1); src_x = s->mb_x* 16 + (motion_x >> 1); src_y =( mb_y<<(4-field_based)) + (motion_y >> 1); if (!is_mpeg12 && s->out_format == FMT_H263) { if((s->workaround_bugs & FF_BUG_HPEL_CHROMA) && field_based){ mx = (motion_x>>1)|(motion_x&1); my = motion_y >>1; uvdxy = ((my & 1) << 1) | (mx & 1); uvsrc_x = s->mb_x* 8 + (mx >> 1); uvsrc_y =( mb_y<<(3-field_based))+ (my >> 1); }else{ uvdxy = dxy | (motion_y & 2) | ((motion_x & 2) >> 1); uvsrc_x = src_x>>1; uvsrc_y = src_y>>1; } }else if(!is_mpeg12 && s->out_format == FMT_H261){//even chroma mv's are full pel in H261 mx = motion_x / 4; my = motion_y / 4; uvdxy = 0; uvsrc_x = s->mb_x*8 + mx; uvsrc_y = mb_y*8 + my; } else { if(s->chroma_y_shift){ mx = motion_x / 2; my = motion_y / 2; uvdxy = ((my & 1) << 1) | (mx & 1); uvsrc_x = s->mb_x* 8 + (mx >> 1); uvsrc_y =( mb_y<<(3-field_based))+ (my >> 1); } else { if(s->chroma_x_shift){ //Chroma422 mx = motion_x / 2; uvdxy = ((motion_y & 1) << 1) | (mx & 1); uvsrc_x = s->mb_x* 8 + (mx >> 1); uvsrc_y = src_y; } else { //Chroma444 uvdxy = dxy; uvsrc_x = src_x; uvsrc_y = src_y; } } } ptr_y = ref_picture[0] + src_y * linesize + src_x; ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x; ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x; if( (unsigned)src_x > FFMAX(s->h_edge_pos - (motion_x&1) - 16, 0) || (unsigned)src_y > FFMAX( v_edge_pos - (motion_y&1) - h , 0)){ if(is_mpeg12 || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MPEG1VIDEO){ av_log(s->avctx,AV_LOG_DEBUG, \"MPEG motion vector out of boundary (%d %d)\\n\", src_x, src_y); return; } s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y, s->linesize, 17, 17+field_based, src_x, src_y<h_edge_pos, s->v_edge_pos); ptr_y = s->edge_emu_buffer; if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){ uint8_t *uvbuf= s->edge_emu_buffer+18*s->linesize; s->vdsp.emulated_edge_mc(uvbuf , ptr_cb, s->uvlinesize, 9, 9+field_based, uvsrc_x, uvsrc_y<h_edge_pos>>1, s->v_edge_pos>>1); s->vdsp.emulated_edge_mc(uvbuf+16, ptr_cr, s->uvlinesize, 9, 9+field_based, uvsrc_x, uvsrc_y<h_edge_pos>>1, s->v_edge_pos>>1); ptr_cb= uvbuf; ptr_cr= uvbuf+16; } } if(bottom_field){ //FIXME use this for field pix too instead of the obnoxious hack which changes picture.data dest_y += s->linesize; dest_cb+= s->uvlinesize; dest_cr+= s->uvlinesize; } if(field_select){ ptr_y += s->linesize; ptr_cb+= s->uvlinesize; ptr_cr+= s->uvlinesize; } pix_op[0][dxy](dest_y, ptr_y, linesize, h); if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){ pix_op[s->chroma_x_shift][uvdxy] (dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift); pix_op[s->chroma_x_shift][uvdxy] (dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift); } if(!is_mpeg12 && (CONFIG_H261_ENCODER || CONFIG_H261_DECODER) && s->out_format == FMT_H261){ ff_h261_loop_filter(s); } }"} {"target": 0, "idx": 23455, "func": "static void s390_virtio_irq(S390CPU *cpu, int config_change, uint64_t token) { if (kvm_enabled()) { kvm_s390_virtio_irq(cpu, config_change, token); } else { cpu_inject_ext(cpu, VIRTIO_EXT_CODE, config_change, token); } }"} {"target": 0, "idx": 23465, "func": "static int vble_unpack(VBLEContext *ctx, GetBitContext *gb) { int i; static const uint8_t LUT[256] = { 8,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, 6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, 7,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, 6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, }; /* Read all the lengths in first */ for (i = 0; i < ctx->size; i++) { /* At most we need to read 9 bits total to get indices up to 8 */ int val = show_bits(gb, 8); // read reverse unary if (val) { val = LUT[val]; skip_bits(gb, val + 1); ctx->len[i] = val; } else { skip_bits(gb, 8); if (!get_bits1(gb)) return -1; ctx->len[i] = 8; } } /* For any values that have length 0 */ memset(ctx->val, 0, ctx->size); for (i = 0; i < ctx->size; i++) { /* Check we have enough bits left */ if (get_bits_left(gb) < ctx->len[i]) return -1; /* get_bits can't take a length of 0 */ if (ctx->len[i]) ctx->val[i] = (1 << ctx->len[i]) + get_bits(gb, ctx->len[i]) - 1; } return 0; }"} {"target": 0, "idx": 23470, "func": "int ffurl_alloc(URLContext **puc, const char *filename, int flags, const AVIOInterruptCB *int_cb) { URLProtocol *up = NULL; char proto_str[128], proto_nested[128], *ptr; size_t proto_len = strspn(filename, URL_SCHEME_CHARS); if (filename[proto_len] != ':' || is_dos_path(filename)) strcpy(proto_str, \"file\"); else av_strlcpy(proto_str, filename, FFMIN(proto_len + 1, sizeof(proto_str))); av_strlcpy(proto_nested, proto_str, sizeof(proto_nested)); if ((ptr = strchr(proto_nested, '+'))) *ptr = '\\0'; while (up = ffurl_protocol_next(up)) { if (!strcmp(proto_str, up->name)) return url_alloc_for_protocol(puc, up, filename, flags, int_cb); if (up->flags & URL_PROTOCOL_FLAG_NESTED_SCHEME && !strcmp(proto_nested, up->name)) return url_alloc_for_protocol(puc, up, filename, flags, int_cb); } *puc = NULL; return AVERROR_PROTOCOL_NOT_FOUND; }"} {"target": 0, "idx": 23491, "func": "BlockDriverState *bdrv_next(BlockDriverState *bs) { if (!bs) { return QTAILQ_FIRST(&bdrv_states); } return QTAILQ_NEXT(bs, device_list); }"} {"target": 0, "idx": 23495, "func": "void mcf_uart_write(void *opaque, target_phys_addr_t addr, uint64_t val, unsigned size) { mcf_uart_state *s = (mcf_uart_state *)opaque; switch (addr & 0x3f) { case 0x00: s->mr[s->current_mr] = val; s->current_mr = 1; break; case 0x04: /* CSR is ignored. */ break; case 0x08: /* Command Register. */ mcf_do_command(s, val); break; case 0x0c: /* Transmit Buffer. */ s->sr &= ~MCF_UART_TxEMP; s->tb = val; mcf_uart_do_tx(s); break; case 0x10: /* ACR is ignored. */ break; case 0x14: s->imr = val; break; default: break; } mcf_uart_update(s); }"} {"target": 0, "idx": 23504, "func": "MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run) { ARMCPU *cpu; uint32_t switched_level; if (kvm_irqchip_in_kernel()) { /* * We only need to sync timer states with user-space interrupt * controllers, so return early and save cycles if we don't. */ return MEMTXATTRS_UNSPECIFIED; } cpu = ARM_CPU(cs); /* Synchronize our shadowed in-kernel device irq lines with the kvm ones */ if (run->s.regs.device_irq_level != cpu->device_irq_level) { switched_level = cpu->device_irq_level ^ run->s.regs.device_irq_level; qemu_mutex_lock_iothread(); if (switched_level & KVM_ARM_DEV_EL1_VTIMER) { qemu_set_irq(cpu->gt_timer_outputs[GTIMER_VIRT], !!(run->s.regs.device_irq_level & KVM_ARM_DEV_EL1_VTIMER)); switched_level &= ~KVM_ARM_DEV_EL1_VTIMER; } if (switched_level & KVM_ARM_DEV_EL1_PTIMER) { qemu_set_irq(cpu->gt_timer_outputs[GTIMER_PHYS], !!(run->s.regs.device_irq_level & KVM_ARM_DEV_EL1_PTIMER)); switched_level &= ~KVM_ARM_DEV_EL1_PTIMER; } /* XXX PMU IRQ is missing */ if (switched_level) { qemu_log_mask(LOG_UNIMP, \"%s: unhandled in-kernel device IRQ %x\\n\", __func__, switched_level); } /* We also mark unknown levels as processed to not waste cycles */ cpu->device_irq_level = run->s.regs.device_irq_level; qemu_mutex_unlock_iothread(); } return MEMTXATTRS_UNSPECIFIED; }"} {"target": 0, "idx": 23508, "func": "static int avi_write_trailer(AVFormatContext *s) { AVIContext *avi = s->priv_data; AVIOContext *pb = s->pb; int res = 0; int i, j, n, nb_frames; int64_t file_size; if (pb->seekable) { if (avi->riff_id == 1) { ff_end_tag(pb, avi->movi_list); res = avi_write_idx1(s); ff_end_tag(pb, avi->riff_start); } else { avi_write_ix(s); ff_end_tag(pb, avi->movi_list); ff_end_tag(pb, avi->riff_start); file_size = avio_tell(pb); avio_seek(pb, avi->odml_list - 8, SEEK_SET); ffio_wfourcc(pb, \"LIST\"); /* Making this AVI OpenDML one */ avio_skip(pb, 16); for (n = nb_frames = 0; n < s->nb_streams; n++) { AVCodecParameters *par = s->streams[n]->codecpar; AVIStream *avist = s->streams[n]->priv_data; if (par->codec_type == AVMEDIA_TYPE_VIDEO) { if (nb_frames < avist->packet_count) nb_frames = avist->packet_count; } else { if (par->codec_id == AV_CODEC_ID_MP2 || par->codec_id == AV_CODEC_ID_MP3) nb_frames += avist->packet_count; } } avio_wl32(pb, nb_frames); avio_seek(pb, file_size, SEEK_SET); avi_write_counters(s, avi->riff_id); } } for (i = 0; i < s->nb_streams; i++) { AVIStream *avist = s->streams[i]->priv_data; for (j = 0; j < avist->indexes.ents_allocated / AVI_INDEX_CLUSTER_SIZE; j++) av_free(avist->indexes.cluster[j]); av_freep(&avist->indexes.cluster); avist->indexes.ents_allocated = avist->indexes.entry = 0; } return res; }"} {"target": 0, "idx": 23516, "func": "static uint64_t omap_mpui_io_read(void *opaque, target_phys_addr_t addr, unsigned size) { if (size != 2) { return omap_badwidth_read16(opaque, addr); } if (addr == OMAP_MPUI_BASE) /* CMR */ return 0xfe4d; OMAP_BAD_REG(addr); return 0; }"} {"target": 0, "idx": 23540, "func": "static void nam_writeb (void *opaque, uint32_t addr, uint32_t val) { PCIAC97LinkState *d = opaque; AC97LinkState *s = &d->ac97; dolog (\"U nam writeb %#x <- %#x\\n\", addr, val); s->cas = 0; }"} {"target": 0, "idx": 23555, "func": "static void s390_flic_common_realize(DeviceState *dev, Error **errp) { S390FLICState *fs = S390_FLIC_COMMON(dev); uint32_t max_batch = fs->adapter_routes_max_batch; if (max_batch > ADAPTER_ROUTES_MAX_GSI) { error_setg(errp, \"flic property adapter_routes_max_batch too big\" \" (%d > %d)\", max_batch, ADAPTER_ROUTES_MAX_GSI); } fs->ais_supported = true; }"} {"target": 0, "idx": 23560, "func": "static int dxva2_get_buffer(AVCodecContext *s, AVFrame *frame, int flags) { InputStream *ist = s->opaque; DXVA2Context *ctx = ist->hwaccel_ctx; return av_hwframe_get_buffer(ctx->hw_frames_ctx, frame, 0); }"} {"target": 0, "idx": 23576, "func": "void ff_h264_idct8_add_c(uint8_t *dst, DCTELEM *block, int stride){ int i; uint8_t *cm = ff_cropTbl + MAX_NEG_CROP; block[0] += 32; for( i = 0; i < 8; i++ ) { const int a0 = block[0+i*8] + block[4+i*8]; const int a2 = block[0+i*8] - block[4+i*8]; const int a4 = (block[2+i*8]>>1) - block[6+i*8]; const int a6 = (block[6+i*8]>>1) + block[2+i*8]; const int b0 = a0 + a6; const int b2 = a2 + a4; const int b4 = a2 - a4; const int b6 = a0 - a6; const int a1 = -block[3+i*8] + block[5+i*8] - block[7+i*8] - (block[7+i*8]>>1); const int a3 = block[1+i*8] + block[7+i*8] - block[3+i*8] - (block[3+i*8]>>1); const int a5 = -block[1+i*8] + block[7+i*8] + block[5+i*8] + (block[5+i*8]>>1); const int a7 = block[3+i*8] + block[5+i*8] + block[1+i*8] + (block[1+i*8]>>1); const int b1 = (a7>>2) + a1; const int b3 = a3 + (a5>>2); const int b5 = (a3>>2) - a5; const int b7 = a7 - (a1>>2); block[0+i*8] = b0 + b7; block[7+i*8] = b0 - b7; block[1+i*8] = b2 + b5; block[6+i*8] = b2 - b5; block[2+i*8] = b4 + b3; block[5+i*8] = b4 - b3; block[3+i*8] = b6 + b1; block[4+i*8] = b6 - b1; } for( i = 0; i < 8; i++ ) { const int a0 = block[i+0*8] + block[i+4*8]; const int a2 = block[i+0*8] - block[i+4*8]; const int a4 = (block[i+2*8]>>1) - block[i+6*8]; const int a6 = (block[i+6*8]>>1) + block[i+2*8]; const int b0 = a0 + a6; const int b2 = a2 + a4; const int b4 = a2 - a4; const int b6 = a0 - a6; const int a1 = -block[i+3*8] + block[i+5*8] - block[i+7*8] - (block[i+7*8]>>1); const int a3 = block[i+1*8] + block[i+7*8] - block[i+3*8] - (block[i+3*8]>>1); const int a5 = -block[i+1*8] + block[i+7*8] + block[i+5*8] + (block[i+5*8]>>1); const int a7 = block[i+3*8] + block[i+5*8] + block[i+1*8] + (block[i+1*8]>>1); const int b1 = (a7>>2) + a1; const int b3 = a3 + (a5>>2); const int b5 = (a3>>2) - a5; const int b7 = a7 - (a1>>2); dst[i + 0*stride] = cm[ dst[i + 0*stride] + ((b0 + b7) >> 6) ]; dst[i + 1*stride] = cm[ dst[i + 1*stride] + ((b2 + b5) >> 6) ]; dst[i + 2*stride] = cm[ dst[i + 2*stride] + ((b4 + b3) >> 6) ]; dst[i + 3*stride] = cm[ dst[i + 3*stride] + ((b6 + b1) >> 6) ]; dst[i + 4*stride] = cm[ dst[i + 4*stride] + ((b6 - b1) >> 6) ]; dst[i + 5*stride] = cm[ dst[i + 5*stride] + ((b4 - b3) >> 6) ]; dst[i + 6*stride] = cm[ dst[i + 6*stride] + ((b2 - b5) >> 6) ]; dst[i + 7*stride] = cm[ dst[i + 7*stride] + ((b0 - b7) >> 6) ]; } }"} {"target": 0, "idx": 23587, "func": "void do_load_dcr (void) { target_ulong val; if (unlikely(env->dcr_env == NULL)) { if (loglevel != 0) { fprintf(logfile, \"No DCR environment\\n\"); } do_raise_exception_err(EXCP_PROGRAM, EXCP_INVAL | EXCP_INVAL_INVAL); } else if (unlikely(ppc_dcr_read(env->dcr_env, T0, &val) != 0)) { if (loglevel != 0) { fprintf(logfile, \"DCR read error %d %03x\\n\", (int)T0, (int)T0); } do_raise_exception_err(EXCP_PROGRAM, EXCP_INVAL | EXCP_PRIV_REG); } else { T0 = val; } }"} {"target": 0, "idx": 23622, "func": "void tlb_set_page(CPUArchState *env, target_ulong vaddr, target_phys_addr_t paddr, int prot, int mmu_idx, target_ulong size) { MemoryRegionSection *section; unsigned int index; target_ulong address; target_ulong code_address; uintptr_t addend; CPUTLBEntry *te; target_phys_addr_t iotlb; assert(size >= TARGET_PAGE_SIZE); if (size != TARGET_PAGE_SIZE) { tlb_add_large_page(env, vaddr, size); } section = phys_page_find(paddr >> TARGET_PAGE_BITS); #if defined(DEBUG_TLB) printf(\"tlb_set_page: vaddr=\" TARGET_FMT_lx \" paddr=0x\" TARGET_FMT_plx \" prot=%x idx=%d pd=0x%08lx\\n\", vaddr, paddr, prot, mmu_idx, pd); #endif address = vaddr; if (!(memory_region_is_ram(section->mr) || memory_region_is_romd(section->mr))) { /* IO memory case (romd handled later) */ address |= TLB_MMIO; } if (memory_region_is_ram(section->mr) || memory_region_is_romd(section->mr)) { addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + memory_region_section_addr(section, paddr); } else { addend = 0; } code_address = address; iotlb = memory_region_section_get_iotlb(env, section, vaddr, paddr, prot, &address); index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); env->iotlb[mmu_idx][index] = iotlb - vaddr; te = &env->tlb_table[mmu_idx][index]; te->addend = addend - vaddr; if (prot & PAGE_READ) { te->addr_read = address; } else { te->addr_read = -1; } if (prot & PAGE_EXEC) { te->addr_code = code_address; } else { te->addr_code = -1; } if (prot & PAGE_WRITE) { if ((memory_region_is_ram(section->mr) && section->readonly) || memory_region_is_romd(section->mr)) { /* Write access calls the I/O callback. */ te->addr_write = address | TLB_MMIO; } else if (memory_region_is_ram(section->mr) && !cpu_physical_memory_is_dirty( section->mr->ram_addr + memory_region_section_addr(section, paddr))) { te->addr_write = address | TLB_NOTDIRTY; } else { te->addr_write = address; } } else { te->addr_write = -1; } }"} {"target": 1, "idx": 23626, "func": "static void rtl8139_io_writeb(void *opaque, uint8_t addr, uint32_t val) { RTL8139State *s = opaque; addr &= 0xff; switch (addr) { case MAC0 ... MAC0+5: s->phys[addr - MAC0] = val; break; case MAC0+6 ... MAC0+7: /* reserved */ break; case MAR0 ... MAR0+7: s->mult[addr - MAR0] = val; break; case ChipCmd: rtl8139_ChipCmd_write(s, val); break; case Cfg9346: rtl8139_Cfg9346_write(s, val); break; case TxConfig: /* windows driver sometimes writes using byte-lenth call */ rtl8139_TxConfig_writeb(s, val); break; case Config0: rtl8139_Config0_write(s, val); break; case Config1: rtl8139_Config1_write(s, val); break; case Config3: rtl8139_Config3_write(s, val); break; case Config4: rtl8139_Config4_write(s, val); break; case Config5: rtl8139_Config5_write(s, val); break; case MediaStatus: /* ignore */ DPRINTF(\"not implemented write(b) to MediaStatus val=0x%02x\\n\", val); break; case HltClk: DPRINTF(\"HltClk write val=0x%08x\\n\", val); if (val == 'R') { s->clock_enabled = 1; } else if (val == 'H') { s->clock_enabled = 0; } break; case TxThresh: DPRINTF(\"C+ TxThresh write(b) val=0x%02x\\n\", val); s->TxThresh = val; break; case TxPoll: DPRINTF(\"C+ TxPoll write(b) val=0x%02x\\n\", val); if (val & (1 << 7)) { DPRINTF(\"C+ TxPoll high priority transmission (not \" \"implemented)\\n\"); //rtl8139_cplus_transmit(s); } if (val & (1 << 6)) { DPRINTF(\"C+ TxPoll normal priority transmission\\n\"); rtl8139_cplus_transmit(s); } break; default: DPRINTF(\"not implemented write(b) addr=0x%x val=0x%02x\\n\", addr, val); break; } }"} {"target": 1, "idx": 23627, "func": "static void rv40_h_weak_loop_filter(uint8_t *src, const int stride, const int filter_p1, const int filter_q1, const int alpha, const int beta, const int lim_p0q0, const int lim_q1, const int lim_p1) { rv40_weak_loop_filter(src, stride, 1, filter_p1, filter_q1, alpha, beta, lim_p0q0, lim_q1, lim_p1); }"} {"target": 1, "idx": 23636, "func": "static int decode_subframe(WmallDecodeCtx *s) { int offset = s->samples_per_frame; int subframe_len = s->samples_per_frame; int total_samples = s->samples_per_frame * s->num_channels; int i, j, rawpcm_tile, padding_zeroes, res; s->subframe_offset = get_bits_count(&s->gb); /* reset channel context and find the next block offset and size == the next block of the channel with the smallest number of decoded samples */ for (i = 0; i < s->num_channels; i++) { if (offset > s->channel[i].decoded_samples) { offset = s->channel[i].decoded_samples; subframe_len = s->channel[i].subframe_len[s->channel[i].cur_subframe]; } } /* get a list of all channels that contain the estimated block */ s->channels_for_cur_subframe = 0; for (i = 0; i < s->num_channels; i++) { const int cur_subframe = s->channel[i].cur_subframe; /* subtract already processed samples */ total_samples -= s->channel[i].decoded_samples; /* and count if there are multiple subframes that match our profile */ if (offset == s->channel[i].decoded_samples && subframe_len == s->channel[i].subframe_len[cur_subframe]) { total_samples -= s->channel[i].subframe_len[cur_subframe]; s->channel[i].decoded_samples += s->channel[i].subframe_len[cur_subframe]; s->channel_indexes_for_cur_subframe[s->channels_for_cur_subframe] = i; ++s->channels_for_cur_subframe; } } /* check if the frame will be complete after processing the estimated block */ if (!total_samples) s->parsed_all_subframes = 1; s->seekable_tile = get_bits1(&s->gb); if (s->seekable_tile) { clear_codec_buffers(s); s->do_arith_coding = get_bits1(&s->gb); if (s->do_arith_coding) { avpriv_request_sample(s->avctx, \"Arithmetic coding\"); return AVERROR_PATCHWELCOME; } s->do_ac_filter = get_bits1(&s->gb); s->do_inter_ch_decorr = get_bits1(&s->gb); s->do_mclms = get_bits1(&s->gb); if (s->do_ac_filter) decode_ac_filter(s); if (s->do_mclms) decode_mclms(s); if ((res = decode_cdlms(s)) < 0) return res; s->movave_scaling = get_bits(&s->gb, 3); s->quant_stepsize = get_bits(&s->gb, 8) + 1; reset_codec(s); } else if (!s->cdlms[0][0].order) { av_log(s->avctx, AV_LOG_DEBUG, \"Waiting for seekable tile\\n\"); s->frame.nb_samples = 0; return -1; } rawpcm_tile = get_bits1(&s->gb); for (i = 0; i < s->num_channels; i++) s->is_channel_coded[i] = 1; if (!rawpcm_tile) { for (i = 0; i < s->num_channels; i++) s->is_channel_coded[i] = get_bits1(&s->gb); if (s->bV3RTM) { // LPC s->do_lpc = get_bits1(&s->gb); if (s->do_lpc) { decode_lpc(s); avpriv_request_sample(s->avctx, \"Expect wrong output since \" \"inverse LPC filter\"); } } else s->do_lpc = 0; } if (get_bits1(&s->gb)) padding_zeroes = get_bits(&s->gb, 5); else padding_zeroes = 0; if (rawpcm_tile) { int bits = s->bits_per_sample - padding_zeroes; if (bits <= 0) { av_log(s->avctx, AV_LOG_ERROR, \"Invalid number of padding bits in raw PCM tile\\n\"); return AVERROR_INVALIDDATA; } av_dlog(s->avctx, \"RAWPCM %d bits per sample. \" \"total %d bits, remain=%d\\n\", bits, bits * s->num_channels * subframe_len, get_bits_count(&s->gb)); for (i = 0; i < s->num_channels; i++) for (j = 0; j < subframe_len; j++) s->channel_coeffs[i][j] = get_sbits(&s->gb, bits); } else { for (i = 0; i < s->num_channels; i++) if (s->is_channel_coded[i]) { decode_channel_residues(s, i, subframe_len); if (s->seekable_tile) use_high_update_speed(s, i); else use_normal_update_speed(s, i); revert_cdlms(s, i, 0, subframe_len); } else { memset(s->channel_residues[i], 0, sizeof(**s->channel_residues) * subframe_len); } } if (s->do_mclms) revert_mclms(s, subframe_len); if (s->do_inter_ch_decorr) revert_inter_ch_decorr(s, subframe_len); if (s->do_ac_filter) revert_acfilter(s, subframe_len); /* Dequantize */ if (s->quant_stepsize != 1) for (i = 0; i < s->num_channels; i++) for (j = 0; j < subframe_len; j++) s->channel_residues[i][j] *= s->quant_stepsize; /* Write to proper output buffer depending on bit-depth */ for (i = 0; i < s->channels_for_cur_subframe; i++) { int c = s->channel_indexes_for_cur_subframe[i]; int subframe_len = s->channel[c].subframe_len[s->channel[c].cur_subframe]; for (j = 0; j < subframe_len; j++) { if (s->bits_per_sample == 16) { *s->samples_16[c]++ = (int16_t) s->channel_residues[c][j] << padding_zeroes; } else { *s->samples_32[c]++ = s->channel_residues[c][j] << padding_zeroes; } } } /* handled one subframe */ for (i = 0; i < s->channels_for_cur_subframe; i++) { int c = s->channel_indexes_for_cur_subframe[i]; if (s->channel[c].cur_subframe >= s->channel[c].num_subframes) { av_log(s->avctx, AV_LOG_ERROR, \"broken subframe\\n\"); return AVERROR_INVALIDDATA; } ++s->channel[c].cur_subframe; } return 0; }"} {"target": 1, "idx": 23643, "func": "static void RENAME(yuv2rgb565_1)(SwsContext *c, const int16_t *buf0, const int16_t *ubuf[2], const int16_t *bguf[2], const int16_t *abuf0, uint8_t *dest, int dstW, int uvalpha, int y) { const int16_t *ubuf0 = ubuf[0], *ubuf1 = ubuf[1]; const int16_t *buf1= buf0; //FIXME needed for RGB1/BGR1 if (uvalpha < 2048) { // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster __asm__ volatile( \"mov %%\"REG_b\", \"ESP_OFFSET\"(%5) \\n\\t\" \"mov %4, %%\"REG_b\" \\n\\t\" \"push %%\"REG_BP\" \\n\\t\" YSCALEYUV2RGB1(%%REGBP, %5) \"pxor %%mm7, %%mm7 \\n\\t\" /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */ #ifdef DITHER1XBPP \"paddusb \"BLUE_DITHER\"(%5), %%mm2 \\n\\t\" \"paddusb \"GREEN_DITHER\"(%5), %%mm4 \\n\\t\" \"paddusb \"RED_DITHER\"(%5), %%mm5 \\n\\t\" #endif WRITERGB16(%%REGb, 8280(%5), %%REGBP) \"pop %%\"REG_BP\" \\n\\t\" \"mov \"ESP_OFFSET\"(%5), %%\"REG_b\" \\n\\t\" :: \"c\" (buf0), \"d\" (buf1), \"S\" (ubuf0), \"D\" (ubuf1), \"m\" (dest), \"a\" (&c->redDither) ); } else { __asm__ volatile( \"mov %%\"REG_b\", \"ESP_OFFSET\"(%5) \\n\\t\" \"mov %4, %%\"REG_b\" \\n\\t\" \"push %%\"REG_BP\" \\n\\t\" YSCALEYUV2RGB1b(%%REGBP, %5) \"pxor %%mm7, %%mm7 \\n\\t\" /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */ #ifdef DITHER1XBPP \"paddusb \"BLUE_DITHER\"(%5), %%mm2 \\n\\t\" \"paddusb \"GREEN_DITHER\"(%5), %%mm4 \\n\\t\" \"paddusb \"RED_DITHER\"(%5), %%mm5 \\n\\t\" #endif WRITERGB16(%%REGb, 8280(%5), %%REGBP) \"pop %%\"REG_BP\" \\n\\t\" \"mov \"ESP_OFFSET\"(%5), %%\"REG_b\" \\n\\t\" :: \"c\" (buf0), \"d\" (buf1), \"S\" (ubuf0), \"D\" (ubuf1), \"m\" (dest), \"a\" (&c->redDither) ); } }"} {"target": 1, "idx": 23656, "func": "void hmp_memchar_write(Monitor *mon, const QDict *qdict) { uint32_t size; const char *chardev = qdict_get_str(qdict, \"device\"); const char *data = qdict_get_str(qdict, \"data\"); Error *errp = NULL; size = strlen(data); qmp_memchar_write(chardev, size, data, false, 0, &errp); hmp_handle_error(mon, &errp); }"} {"target": 0, "idx": 23668, "func": "static int do_bit_allocation(AC3DecodeContext *ctx, int flags) { ac3_audio_block *ab = &ctx->audio_block; int i, snroffst = 0; if (!flags) /* bit allocation is not required */ return 0; if (ab->flags & AC3_AB_SNROFFSTE) { /* check whether snroffsts are zero */ snroffst += ab->csnroffst; if (ab->flags & AC3_AB_CPLINU) snroffst += ab->cplfsnroffst; for (i = 0; i < ctx->bsi.nfchans; i++) snroffst += ab->fsnroffst[i]; if (ctx->bsi.flags & AC3_BSI_LFEON) snroffst += ab->lfefsnroffst; if (!snroffst) { memset(ab->cplbap, 0, sizeof (ab->cplbap)); for (i = 0; i < ctx->bsi.nfchans; i++) memset(ab->bap[i], 0, sizeof (ab->bap[i])); memset(ab->lfebap, 0, sizeof (ab->lfebap)); return 0; } } /* perform bit allocation */ if ((ab->flags & AC3_AB_CPLINU) && (flags & 64)) if (_do_bit_allocation(ctx, 5)) return -1; for (i = 0; i < ctx->bsi.nfchans; i++) if (flags & (1 << i)) if (_do_bit_allocation(ctx, i)) return -1; if ((ctx->bsi.flags & AC3_BSI_LFEON) && (flags & 32)) if (_do_bit_allocation(ctx, 6)) return -1; return 0; }"} {"target": 0, "idx": 23680, "func": "void qmp_drive_backup(const char *device, const char *target, bool has_format, const char *format, enum MirrorSyncMode sync, bool has_mode, enum NewImageMode mode, bool has_speed, int64_t speed, bool has_on_source_error, BlockdevOnError on_source_error, bool has_on_target_error, BlockdevOnError on_target_error, Error **errp) { BlockDriverState *bs; BlockDriverState *target_bs; BlockDriverState *source = NULL; BlockDriver *drv = NULL; Error *local_err = NULL; int flags; int64_t size; int ret; if (!has_speed) { speed = 0; } if (!has_on_source_error) { on_source_error = BLOCKDEV_ON_ERROR_REPORT; } if (!has_on_target_error) { on_target_error = BLOCKDEV_ON_ERROR_REPORT; } if (!has_mode) { mode = NEW_IMAGE_MODE_ABSOLUTE_PATHS; } bs = bdrv_find(device); if (!bs) { error_set(errp, QERR_DEVICE_NOT_FOUND, device); return; } if (!bdrv_is_inserted(bs)) { error_set(errp, QERR_DEVICE_HAS_NO_MEDIUM, device); return; } if (!has_format) { format = mode == NEW_IMAGE_MODE_EXISTING ? NULL : bs->drv->format_name; } if (format) { drv = bdrv_find_format(format); if (!drv) { error_set(errp, QERR_INVALID_BLOCK_FORMAT, format); return; } } if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_BACKUP_SOURCE, errp)) { return; } flags = bs->open_flags | BDRV_O_RDWR; /* See if we have a backing HD we can use to create our new image * on top of. */ if (sync == MIRROR_SYNC_MODE_TOP) { source = bs->backing_hd; if (!source) { sync = MIRROR_SYNC_MODE_FULL; } } if (sync == MIRROR_SYNC_MODE_NONE) { source = bs; } size = bdrv_getlength(bs); if (size < 0) { error_setg_errno(errp, -size, \"bdrv_getlength failed\"); return; } if (mode != NEW_IMAGE_MODE_EXISTING) { assert(format && drv); if (source) { bdrv_img_create(target, format, source->filename, source->drv->format_name, NULL, size, flags, &local_err, false); } else { bdrv_img_create(target, format, NULL, NULL, NULL, size, flags, &local_err, false); } } if (local_err) { error_propagate(errp, local_err); return; } target_bs = NULL; ret = bdrv_open(&target_bs, target, NULL, NULL, flags, drv, &local_err); if (ret < 0) { error_propagate(errp, local_err); return; } backup_start(bs, target_bs, speed, sync, on_source_error, on_target_error, block_job_cb, bs, &local_err); if (local_err != NULL) { bdrv_unref(target_bs); error_propagate(errp, local_err); return; } }"} {"target": 0, "idx": 23688, "func": "static void malta_fpga_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) { MaltaFPGAState *s = opaque; uint32_t saddr; saddr = (addr & 0xfffff); switch (saddr) { /* SWITCH Register */ case 0x00200: break; /* JMPRS Register */ case 0x00210: break; /* LEDBAR Register */ case 0x00408: s->leds = val & 0xff; malta_fpga_update_display(s); break; /* ASCIIWORD Register */ case 0x00410: snprintf(s->display_text, 9, \"%08X\", (uint32_t)val); malta_fpga_update_display(s); break; /* ASCIIPOS0 to ASCIIPOS7 Registers */ case 0x00418: case 0x00420: case 0x00428: case 0x00430: case 0x00438: case 0x00440: case 0x00448: case 0x00450: s->display_text[(saddr - 0x00418) >> 3] = (char) val; malta_fpga_update_display(s); break; /* SOFTRES Register */ case 0x00500: if (val == 0x42) qemu_system_reset_request (); break; /* BRKRES Register */ case 0x00508: s->brk = val & 0xff; break; /* UART Registers are handled directly by the serial device */ /* GPOUT Register */ case 0x00a00: s->gpout = val & 0xff; break; /* I2COE Register */ case 0x00b08: s->i2coe = val & 0x03; break; /* I2COUT Register */ case 0x00b10: eeprom24c0x_write(val & 0x02, val & 0x01); s->i2cout = val; break; /* I2CSEL Register */ case 0x00b18: s->i2csel = val & 0x01; break; default: #if 0 printf (\"malta_fpga_write: Bad register offset 0x\" TARGET_FMT_lx \"\\n\", addr); #endif break; } }"} {"target": 0, "idx": 23692, "func": "static int revert_channel_correlation(ALSDecContext *ctx, ALSBlockData *bd, ALSChannelData **cd, int *reverted, unsigned int offset, int c) { ALSChannelData *ch = cd[c]; unsigned int dep = 0; unsigned int channels = ctx->avctx->channels; if (reverted[c]) return 0; reverted[c] = 1; while (dep < channels && !ch[dep].stop_flag) { revert_channel_correlation(ctx, bd, cd, reverted, offset, ch[dep].master_channel); dep++; } if (dep == channels) { av_log(ctx->avctx, AV_LOG_WARNING, \"Invalid channel correlation!\\n\"); return AVERROR_INVALIDDATA; } bd->const_block = ctx->const_block + c; bd->shift_lsbs = ctx->shift_lsbs + c; bd->opt_order = ctx->opt_order + c; bd->store_prev_samples = ctx->store_prev_samples + c; bd->use_ltp = ctx->use_ltp + c; bd->ltp_lag = ctx->ltp_lag + c; bd->ltp_gain = ctx->ltp_gain[c]; bd->lpc_cof = ctx->lpc_cof[c]; bd->quant_cof = ctx->quant_cof[c]; bd->raw_samples = ctx->raw_samples[c] + offset; dep = 0; while (!ch[dep].stop_flag) { unsigned int smp; unsigned int begin = 1; unsigned int end = bd->block_length - 1; int64_t y; int32_t *master = ctx->raw_samples[ch[dep].master_channel] + offset; if (ch[dep].time_diff_flag) { int t = ch[dep].time_diff_index; if (ch[dep].time_diff_sign) { t = -t; begin -= t; } else { end -= t; } for (smp = begin; smp < end; smp++) { y = (1 << 6) + MUL64(ch[dep].weighting[0], master[smp - 1 ]) + MUL64(ch[dep].weighting[1], master[smp ]) + MUL64(ch[dep].weighting[2], master[smp + 1 ]) + MUL64(ch[dep].weighting[3], master[smp - 1 + t]) + MUL64(ch[dep].weighting[4], master[smp + t]) + MUL64(ch[dep].weighting[5], master[smp + 1 + t]); bd->raw_samples[smp] += y >> 7; } } else { for (smp = begin; smp < end; smp++) { y = (1 << 6) + MUL64(ch[dep].weighting[0], master[smp - 1]) + MUL64(ch[dep].weighting[1], master[smp ]) + MUL64(ch[dep].weighting[2], master[smp + 1]); bd->raw_samples[smp] += y >> 7; } } dep++; } return 0; }"} {"target": 0, "idx": 23696, "func": "build_hpet(GArray *table_data, GArray *linker) { Acpi20Hpet *hpet; hpet = acpi_data_push(table_data, sizeof(*hpet)); /* Note timer_block_id value must be kept in sync with value advertised by * emulated hpet */ hpet->timer_block_id = cpu_to_le32(0x8086a201); hpet->addr.address = cpu_to_le64(HPET_BASE); build_header(linker, table_data, (void *)hpet, \"HPET\", sizeof(*hpet), 1, NULL); }"} {"target": 0, "idx": 23704, "func": "static inline void downmix_3f_1r_to_mono(float *samples) { int i; for (i = 0; i < 256; i++) { samples[i] += (samples[i + 256] + samples[i + 512] + samples[i + 768]); samples[i + 256] = samples[i + 512] = samples[i + 768] = 0; } }"} {"target": 0, "idx": 23714, "func": "static int qsv_decode_init(AVCodecContext *avctx, QSVContext *q) { const AVPixFmtDescriptor *desc; mfxSession session = NULL; int iopattern = 0; mfxVideoParam param = { { 0 } }; int frame_width = avctx->coded_width; int frame_height = avctx->coded_height; int ret; desc = av_pix_fmt_desc_get(avctx->sw_pix_fmt); if (!desc) return AVERROR_BUG; if (!q->async_fifo) { q->async_fifo = av_fifo_alloc((1 + q->async_depth) * (sizeof(mfxSyncPoint*) + sizeof(QSVFrame*))); if (!q->async_fifo) return AVERROR(ENOMEM); } if (avctx->pix_fmt == AV_PIX_FMT_QSV && avctx->hwaccel_context) { AVQSVContext *user_ctx = avctx->hwaccel_context; session = user_ctx->session; iopattern = user_ctx->iopattern; q->ext_buffers = user_ctx->ext_buffers; q->nb_ext_buffers = user_ctx->nb_ext_buffers; } if (avctx->hw_frames_ctx) { AVHWFramesContext *frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data; AVQSVFramesContext *frames_hwctx = frames_ctx->hwctx; if (!iopattern) { if (frames_hwctx->frame_type & MFX_MEMTYPE_OPAQUE_FRAME) iopattern = MFX_IOPATTERN_OUT_OPAQUE_MEMORY; else if (frames_hwctx->frame_type & MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET) iopattern = MFX_IOPATTERN_OUT_VIDEO_MEMORY; } frame_width = frames_hwctx->surfaces[0].Info.Width; frame_height = frames_hwctx->surfaces[0].Info.Height; } if (!iopattern) iopattern = MFX_IOPATTERN_OUT_SYSTEM_MEMORY; q->iopattern = iopattern; ret = qsv_init_session(avctx, q, session, avctx->hw_frames_ctx); if (ret < 0) { av_log(avctx, AV_LOG_ERROR, \"Error initializing an MFX session\\n\"); return ret; } ret = ff_qsv_codec_id_to_mfx(avctx->codec_id); if (ret < 0) return ret; param.mfx.CodecId = ret; param.mfx.CodecProfile = avctx->profile; param.mfx.CodecLevel = avctx->level; param.mfx.FrameInfo.BitDepthLuma = desc->comp[0].depth; param.mfx.FrameInfo.BitDepthChroma = desc->comp[0].depth; param.mfx.FrameInfo.Shift = desc->comp[0].depth > 8; param.mfx.FrameInfo.FourCC = q->fourcc; param.mfx.FrameInfo.Width = frame_width; param.mfx.FrameInfo.Height = frame_height; param.mfx.FrameInfo.ChromaFormat = MFX_CHROMAFORMAT_YUV420; param.IOPattern = q->iopattern; param.AsyncDepth = q->async_depth; param.ExtParam = q->ext_buffers; param.NumExtParam = q->nb_ext_buffers; ret = MFXVideoDECODE_Init(q->session, ¶m); if (ret < 0) return ff_qsv_print_error(avctx, ret, \"Error initializing the MFX video decoder\"); q->frame_info = param.mfx.FrameInfo; return 0; }"} {"target": 0, "idx": 23744, "func": "static void virtio_pci_modern_region_map(VirtIOPCIProxy *proxy, VirtIOPCIRegion *region, struct virtio_pci_cap *cap) { memory_region_add_subregion(&proxy->modern_bar, region->offset, ®ion->mr); cap->cfg_type = region->type; cap->offset = cpu_to_le32(region->offset); cap->length = cpu_to_le32(memory_region_size(®ion->mr)); virtio_pci_add_mem_cap(proxy, cap); }"} {"target": 0, "idx": 23747, "func": "static void mixer_reset (AC97LinkState *s) { uint8_t active[LAST_INDEX]; dolog (\"mixer_reset\\n\"); memset (s->mixer_data, 0, sizeof (s->mixer_data)); memset (active, 0, sizeof (active)); mixer_store (s, AC97_Reset , 0x0000); /* 6940 */ mixer_store (s, AC97_Master_Volume_Mono_Mute , 0x8000); mixer_store (s, AC97_PC_BEEP_Volume_Mute , 0x0000); mixer_store (s, AC97_Phone_Volume_Mute , 0x8008); mixer_store (s, AC97_Mic_Volume_Mute , 0x8008); mixer_store (s, AC97_CD_Volume_Mute , 0x8808); mixer_store (s, AC97_Aux_Volume_Mute , 0x8808); mixer_store (s, AC97_Record_Gain_Mic_Mute , 0x8000); mixer_store (s, AC97_General_Purpose , 0x0000); mixer_store (s, AC97_3D_Control , 0x0000); mixer_store (s, AC97_Powerdown_Ctrl_Stat , 0x000f); /* * Sigmatel 9700 (STAC9700) */ mixer_store (s, AC97_Vendor_ID1 , 0x8384); mixer_store (s, AC97_Vendor_ID2 , 0x7600); /* 7608 */ mixer_store (s, AC97_Extended_Audio_ID , 0x0809); mixer_store (s, AC97_Extended_Audio_Ctrl_Stat, 0x0009); mixer_store (s, AC97_PCM_Front_DAC_Rate , 0xbb80); mixer_store (s, AC97_PCM_Surround_DAC_Rate , 0xbb80); mixer_store (s, AC97_PCM_LFE_DAC_Rate , 0xbb80); mixer_store (s, AC97_PCM_LR_ADC_Rate , 0xbb80); mixer_store (s, AC97_MIC_ADC_Rate , 0xbb80); record_select (s, 0); set_volume (s, AC97_Master_Volume_Mute, 0x8000); set_volume (s, AC97_PCM_Out_Volume_Mute, 0x8808); set_volume (s, AC97_Line_In_Volume_Mute, 0x8808); reset_voices (s, active); }"} {"target": 0, "idx": 23751, "func": "int bdrv_pdiscard(BlockDriverState *bs, int64_t offset, int count) { Coroutine *co; DiscardCo rwco = { .bs = bs, .offset = offset, .count = count, .ret = NOT_DONE, }; if (qemu_in_coroutine()) { /* Fast-path if already in coroutine context */ bdrv_pdiscard_co_entry(&rwco); } else { AioContext *aio_context = bdrv_get_aio_context(bs); co = qemu_coroutine_create(bdrv_pdiscard_co_entry, &rwco); qemu_coroutine_enter(co); while (rwco.ret == NOT_DONE) { aio_poll(aio_context, true); } } return rwco.ret; }"} {"target": 0, "idx": 23758, "func": "void HELPER(stby_b)(CPUHPPAState *env, target_ulong addr, target_ulong val) { uintptr_t ra = GETPC(); switch (addr & 3) { case 3: cpu_stb_data_ra(env, addr, val, ra); break; case 2: cpu_stw_data_ra(env, addr, val, ra); break; case 1: /* The 3 byte store must appear atomic. */ if (parallel_cpus) { atomic_store_3(env, addr, val, 0x00ffffffu, ra); } else { cpu_stb_data_ra(env, addr, val >> 16, ra); cpu_stw_data_ra(env, addr + 1, val, ra); } break; default: cpu_stl_data_ra(env, addr, val, ra); break; } }"} {"target": 1, "idx": 23762, "func": "int av_image_check_sar(unsigned int w, unsigned int h, AVRational sar) { int64_t scaled_dim; if (!sar.den) return AVERROR(EINVAL); if (!sar.num || sar.num == sar.den) return 0; if (sar.num < sar.den) scaled_dim = av_rescale_rnd(w, sar.num, sar.den, AV_ROUND_ZERO); else scaled_dim = av_rescale_rnd(h, sar.den, sar.num, AV_ROUND_ZERO); if (scaled_dim > 0) return 0; return AVERROR(EINVAL); }"} {"target": 1, "idx": 23769, "func": "static void decode_gray_bitstream(HYuvContext *s, int count) { int i; OPEN_READER(re, &s->gb); count /= 2; if (count >= (get_bits_left(&s->gb)) / (32 * 2)) { for (i = 0; i < count && get_bits_left(&s->gb) > 0; i++) { READ_2PIX(s->temp[0][2 * i], s->temp[0][2 * i + 1], 0); } } else { for (i = 0; i < count; i++) { READ_2PIX(s->temp[0][2 * i], s->temp[0][2 * i + 1], 0); } } CLOSE_READER(re, &s->gb); }"} {"target": 1, "idx": 23771, "func": "static void memory_region_destructor_alias(MemoryRegion *mr) { memory_region_unref(mr->alias); }"} {"target": 1, "idx": 23779, "func": "int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, target_ulong eaddr, int rwx, int mmu_idx) { CPUState *cs = CPU(cpu); CPUPPCState *env = &cpu->env; ppc_slb_t *slb; hwaddr pte_offset; ppc_hash_pte64_t pte; int pp_prot, amr_prot, prot; uint64_t new_pte1; const int need_prot[] = {PAGE_READ, PAGE_WRITE, PAGE_EXEC}; hwaddr raddr; assert((rwx == 0) || (rwx == 1) || (rwx == 2)); /* 1. Handle real mode accesses */ if (((rwx == 2) && (msr_ir == 0)) || ((rwx != 2) && (msr_dr == 0))) { /* Translation is off */ /* In real mode the top 4 effective address bits are ignored */ raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL; tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK, PAGE_READ | PAGE_WRITE | PAGE_EXEC, mmu_idx, TARGET_PAGE_SIZE); return 0; } /* 2. Translation is on, so look up the SLB */ slb = slb_lookup(cpu, eaddr); if (!slb) { if (rwx == 2) { cs->exception_index = POWERPC_EXCP_ISEG; env->error_code = 0; } else { cs->exception_index = POWERPC_EXCP_DSEG; env->error_code = 0; env->spr[SPR_DAR] = eaddr; } return 1; } /* 3. Check for segment level no-execute violation */ if ((rwx == 2) && (slb->vsid & SLB_VSID_N)) { cs->exception_index = POWERPC_EXCP_ISI; env->error_code = 0x10000000; return 1; } /* 4. Locate the PTE in the hash table */ pte_offset = ppc_hash64_htab_lookup(cpu, slb, eaddr, &pte); if (pte_offset == -1) { if (rwx == 2) { cs->exception_index = POWERPC_EXCP_ISI; env->error_code = 0x40000000; } else { cs->exception_index = POWERPC_EXCP_DSI; env->error_code = 0; env->spr[SPR_DAR] = eaddr; if (rwx == 1) { env->spr[SPR_DSISR] = 0x42000000; } else { env->spr[SPR_DSISR] = 0x40000000; } } return 1; } qemu_log_mask(CPU_LOG_MMU, \"found PTE at offset %08\" HWADDR_PRIx \"\\n\", pte_offset); /* 5. Check access permissions */ pp_prot = ppc_hash64_pte_prot(cpu, slb, pte); amr_prot = ppc_hash64_amr_prot(cpu, pte); prot = pp_prot & amr_prot; if ((need_prot[rwx] & ~prot) != 0) { /* Access right violation */ qemu_log_mask(CPU_LOG_MMU, \"PTE access rejected\\n\"); if (rwx == 2) { cs->exception_index = POWERPC_EXCP_ISI; env->error_code = 0x08000000; } else { target_ulong dsisr = 0; cs->exception_index = POWERPC_EXCP_DSI; env->error_code = 0; env->spr[SPR_DAR] = eaddr; if (need_prot[rwx] & ~pp_prot) { dsisr |= 0x08000000; } if (rwx == 1) { dsisr |= 0x02000000; } if (need_prot[rwx] & ~amr_prot) { dsisr |= 0x00200000; } env->spr[SPR_DSISR] = dsisr; } return 1; } qemu_log_mask(CPU_LOG_MMU, \"PTE access granted !\\n\"); /* 6. Update PTE referenced and changed bits if necessary */ new_pte1 = pte.pte1 | HPTE64_R_R; /* set referenced bit */ if (rwx == 1) { new_pte1 |= HPTE64_R_C; /* set changed (dirty) bit */ } else { /* Treat the page as read-only for now, so that a later write * will pass through this function again to set the C bit */ prot &= ~PAGE_WRITE; } if (new_pte1 != pte.pte1) { ppc_hash64_store_hpte(cpu, pte_offset / HASH_PTE_SIZE_64, pte.pte0, new_pte1); } /* 7. Determine the real address from the PTE */ raddr = deposit64(pte.pte1 & HPTE64_R_RPN, 0, slb->sps->page_shift, eaddr); tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK, prot, mmu_idx, TARGET_PAGE_SIZE); return 0; }"} {"target": 1, "idx": 23786, "func": "static int pcx_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; PCXContext * const s = avctx->priv_data; AVFrame *picture = data; AVFrame * const p = &s->picture; int compressed, xmin, ymin, xmax, ymax; unsigned int w, h, bits_per_pixel, bytes_per_line, nplanes, stride, y, x, bytes_per_scanline; uint8_t *ptr; uint8_t const *bufstart = buf; uint8_t *scanline; int ret = -1; if (buf[0] != 0x0a || buf[1] > 5) { av_log(avctx, AV_LOG_ERROR, \"this is not PCX encoded data\\n\"); return AVERROR_INVALIDDATA; } compressed = buf[2]; xmin = AV_RL16(buf+ 4); ymin = AV_RL16(buf+ 6); xmax = AV_RL16(buf+ 8); ymax = AV_RL16(buf+10); if (xmax < xmin || ymax < ymin) { av_log(avctx, AV_LOG_ERROR, \"invalid image dimensions\\n\"); return AVERROR_INVALIDDATA; } w = xmax - xmin + 1; h = ymax - ymin + 1; bits_per_pixel = buf[3]; bytes_per_line = AV_RL16(buf+66); nplanes = buf[65]; bytes_per_scanline = nplanes * bytes_per_line; if (bytes_per_scanline < w * bits_per_pixel * nplanes / 8) { av_log(avctx, AV_LOG_ERROR, \"PCX data is corrupted\\n\"); return AVERROR_INVALIDDATA; } switch ((nplanes<<8) + bits_per_pixel) { case 0x0308: avctx->pix_fmt = AV_PIX_FMT_RGB24; break; case 0x0108: case 0x0104: case 0x0102: case 0x0101: case 0x0401: case 0x0301: case 0x0201: avctx->pix_fmt = AV_PIX_FMT_PAL8; break; default: av_log(avctx, AV_LOG_ERROR, \"invalid PCX file\\n\"); return AVERROR_INVALIDDATA; } buf += 128; if (p->data[0]) avctx->release_buffer(avctx, p); if (av_image_check_size(w, h, 0, avctx)) return AVERROR_INVALIDDATA; if (w != avctx->width || h != avctx->height) avcodec_set_dimensions(avctx, w, h); if ((ret = avctx->get_buffer(avctx, p)) < 0) { av_log(avctx, AV_LOG_ERROR, \"get_buffer() failed\\n\"); return ret; } p->pict_type = AV_PICTURE_TYPE_I; ptr = p->data[0]; stride = p->linesize[0]; scanline = av_malloc(bytes_per_scanline); if (!scanline) return AVERROR(ENOMEM); if (nplanes == 3 && bits_per_pixel == 8) { for (y=0; y> (x&7), v = 0; for (i=nplanes - 1; i>=0; i--) { v <<= 1; v += !!(scanline[i*bytes_per_line + (x>>3)] & m); } ptr[x] = v; } ptr += stride; } } if (nplanes == 1 && bits_per_pixel == 8) { pcx_palette(&buf, (uint32_t *) p->data[1], 256); } else if (bits_per_pixel * nplanes == 1) { AV_WN32A(p->data[1] , 0xFF000000); AV_WN32A(p->data[1]+4, 0xFFFFFFFF); } else if (bits_per_pixel < 8) { const uint8_t *palette = bufstart+16; pcx_palette(&palette, (uint32_t *) p->data[1], 16); } *picture = s->picture; *data_size = sizeof(AVFrame); ret = buf - bufstart; end: av_free(scanline); return ret; }"} {"target": 1, "idx": 23809, "func": "QemuOpts *qemu_opts_from_qdict(QemuOptsList *list, const QDict *qdict) { QemuOpts *opts; opts = qemu_opts_create(list, qdict_get_try_str(qdict, \"id\"), 1); if (opts == NULL) return NULL; qdict_iter(qdict, qemu_opts_from_qdict_1, opts); return opts; }"} {"target": 0, "idx": 23816, "func": "static void conditional_branch(DBDMA_channel *ch) { dbdma_cmd *current = &ch->current; uint16_t br; uint16_t sel_mask, sel_value; uint32_t status; int cond; DBDMA_DPRINTF(\"conditional_branch\\n\"); /* check if we must branch */ br = le16_to_cpu(current->command) & BR_MASK; switch(br) { case BR_NEVER: /* don't branch */ next(ch); return; case BR_ALWAYS: /* always branch */ branch(ch); return; } status = be32_to_cpu(ch->regs[DBDMA_STATUS]) & DEVSTAT; sel_mask = (be32_to_cpu(ch->regs[DBDMA_BRANCH_SEL]) >> 16) & 0x0f; sel_value = be32_to_cpu(ch->regs[DBDMA_BRANCH_SEL]) & 0x0f; cond = (status & sel_mask) == (sel_value & sel_mask); switch(br) { case BR_IFSET: /* branch if condition bit is 1 */ if (cond) branch(ch); else next(ch); return; case BR_IFCLR: /* branch if condition bit is 0 */ if (!cond) branch(ch); else next(ch); return; } }"} {"target": 0, "idx": 23854, "func": "static void dequantization_int(int x, int y, Jpeg2000Cblk *cblk, Jpeg2000Component *comp, Jpeg2000T1Context *t1, Jpeg2000Band *band) { int i, j; int w = cblk->coord[0][1] - cblk->coord[0][0]; for (j = 0; j < (cblk->coord[1][1] - cblk->coord[1][0]); ++j) { int32_t *datap = &comp->i_data[(comp->coord[0][1] - comp->coord[0][0]) * (y + j) + x]; int *src = t1->data[j]; if (band->i_stepsize == 16384) { for (i = 0; i < w; ++i) datap[i] = src[i] / 2; } else { // This should be VERY uncommon for (i = 0; i < w; ++i) datap[i] = (src[i] * (int64_t)band->i_stepsize) / 32768; } } }"} {"target": 0, "idx": 23855, "func": "static void gdb_accept(void) { GDBState *s; struct sockaddr_in sockaddr; socklen_t len; int fd; for(;;) { len = sizeof(sockaddr); fd = accept(gdbserver_fd, (struct sockaddr *)&sockaddr, &len); if (fd < 0 && errno != EINTR) { perror(\"accept\"); return; } else if (fd >= 0) { #ifndef _WIN32 fcntl(fd, F_SETFD, FD_CLOEXEC); #endif break; } } /* set short latency */ socket_set_nodelay(fd); s = g_malloc0(sizeof(GDBState)); s->c_cpu = first_cpu; s->g_cpu = first_cpu; s->fd = fd; gdb_has_xml = false; gdbserver_state = s; fcntl(fd, F_SETFL, O_NONBLOCK); }"} {"target": 0, "idx": 23870, "func": "static int vaapi_encode_h265_init_sequence_params(AVCodecContext *avctx) { VAAPIEncodeContext *ctx = avctx->priv_data; VAEncSequenceParameterBufferHEVC *vseq = ctx->codec_sequence_params; VAEncPictureParameterBufferHEVC *vpic = ctx->codec_picture_params; VAAPIEncodeH265Context *priv = ctx->priv_data; VAAPIEncodeH265MiscSequenceParams *mseq = &priv->misc_sequence_params; int i; { // general_profile_space == 0. vseq->general_profile_idc = 1; // Main profile (ctx->codec_profile?) vseq->general_tier_flag = 0; vseq->general_level_idc = avctx->level * 3; vseq->intra_period = 0; vseq->intra_idr_period = 0; vseq->ip_period = 0; vseq->pic_width_in_luma_samples = ctx->aligned_width; vseq->pic_height_in_luma_samples = ctx->aligned_height; vseq->seq_fields.bits.chroma_format_idc = 1; // 4:2:0. vseq->seq_fields.bits.separate_colour_plane_flag = 0; vseq->seq_fields.bits.bit_depth_luma_minus8 = 0; // 8-bit luma. vseq->seq_fields.bits.bit_depth_chroma_minus8 = 0; // 8-bit chroma. // Other misc flags all zero. // These have to come from the capabilities of the encoder. We have // no way to query it, so just hardcode ones which worked for me... // CTB size from 8x8 to 32x32. vseq->log2_min_luma_coding_block_size_minus3 = 0; vseq->log2_diff_max_min_luma_coding_block_size = 2; // Transform size from 4x4 to 32x32. vseq->log2_min_transform_block_size_minus2 = 0; vseq->log2_diff_max_min_transform_block_size = 3; // Full transform hierarchy allowed (2-5). vseq->max_transform_hierarchy_depth_inter = 3; vseq->max_transform_hierarchy_depth_intra = 3; vseq->vui_parameters_present_flag = 0; vseq->bits_per_second = avctx->bit_rate; if (avctx->framerate.num > 0 && avctx->framerate.den > 0) { vseq->vui_num_units_in_tick = avctx->framerate.num; vseq->vui_time_scale = avctx->framerate.den; } else { vseq->vui_num_units_in_tick = avctx->time_base.num; vseq->vui_time_scale = avctx->time_base.den; } vseq->intra_period = ctx->p_per_i * (ctx->b_per_p + 1); vseq->intra_idr_period = vseq->intra_period; vseq->ip_period = ctx->b_per_p + 1; } { vpic->decoded_curr_pic.picture_id = VA_INVALID_ID; vpic->decoded_curr_pic.flags = VA_PICTURE_HEVC_INVALID; for (i = 0; i < FF_ARRAY_ELEMS(vpic->reference_frames); i++) { vpic->reference_frames[i].picture_id = VA_INVALID_ID; vpic->reference_frames[i].flags = VA_PICTURE_HEVC_INVALID; } vpic->collocated_ref_pic_index = 0xff; vpic->last_picture = 0; vpic->pic_init_qp = priv->fixed_qp_idr; vpic->diff_cu_qp_delta_depth = 0; vpic->pps_cb_qp_offset = 0; vpic->pps_cr_qp_offset = 0; // tiles_enabled_flag == 0, so ignore num_tile_(rows|columns)_minus1. vpic->log2_parallel_merge_level_minus2 = 0; // No limit on size. vpic->ctu_max_bitsize_allowed = 0; vpic->num_ref_idx_l0_default_active_minus1 = 0; vpic->num_ref_idx_l1_default_active_minus1 = 0; vpic->slice_pic_parameter_set_id = 0; vpic->pic_fields.bits.screen_content_flag = 0; vpic->pic_fields.bits.enable_gpu_weighted_prediction = 0; // Per-CU QP changes are required for non-constant-QP modes. vpic->pic_fields.bits.cu_qp_delta_enabled_flag = ctx->va_rc_mode != VA_RC_CQP; } { mseq->video_parameter_set_id = 5; mseq->seq_parameter_set_id = 5; mseq->vps_max_layers_minus1 = 0; mseq->vps_max_sub_layers_minus1 = 0; mseq->vps_temporal_id_nesting_flag = 1; mseq->sps_max_sub_layers_minus1 = 0; mseq->sps_temporal_id_nesting_flag = 1; for (i = 0; i < 32; i++) { mseq->general_profile_compatibility_flag[i] = (i == vseq->general_profile_idc); } mseq->general_progressive_source_flag = 1; mseq->general_interlaced_source_flag = 0; mseq->general_non_packed_constraint_flag = 0; mseq->general_frame_only_constraint_flag = 1; mseq->general_inbld_flag = 0; mseq->log2_max_pic_order_cnt_lsb_minus4 = 8; mseq->vps_sub_layer_ordering_info_present_flag = 0; mseq->vps_max_dec_pic_buffering_minus1[0] = 1; mseq->vps_max_num_reorder_pics[0] = ctx->b_per_p; mseq->vps_max_latency_increase_plus1[0] = 0; mseq->sps_sub_layer_ordering_info_present_flag = 0; mseq->sps_max_dec_pic_buffering_minus1[0] = 1; mseq->sps_max_num_reorder_pics[0] = ctx->b_per_p; mseq->sps_max_latency_increase_plus1[0] = 0; mseq->vps_timing_info_present_flag = 1; mseq->vps_num_units_in_tick = avctx->time_base.num; mseq->vps_time_scale = avctx->time_base.den; mseq->vps_poc_proportional_to_timing_flag = 1; mseq->vps_num_ticks_poc_diff_minus1 = 0; if (ctx->input_width != ctx->aligned_width || ctx->input_height != ctx->aligned_height) { mseq->conformance_window_flag = 1; mseq->conf_win_left_offset = 0; mseq->conf_win_right_offset = (ctx->aligned_width - ctx->input_width) / 2; mseq->conf_win_top_offset = 0; mseq->conf_win_bottom_offset = (ctx->aligned_height - ctx->input_height) / 2; } else { mseq->conformance_window_flag = 0; } mseq->num_short_term_ref_pic_sets = 0; // STRPSs should ideally be here rather than repeated in each slice. mseq->vui_parameters_present_flag = 1; if (avctx->sample_aspect_ratio.num != 0) { mseq->aspect_ratio_info_present_flag = 1; if (avctx->sample_aspect_ratio.num == avctx->sample_aspect_ratio.den) { mseq->aspect_ratio_idc = 1; } else { mseq->aspect_ratio_idc = 255; // Extended SAR. mseq->sar_width = avctx->sample_aspect_ratio.num; mseq->sar_height = avctx->sample_aspect_ratio.den; } } if (1) { // Should this be conditional on some of these being set? mseq->video_signal_type_present_flag = 1; mseq->video_format = 5; // Unspecified. mseq->video_full_range_flag = 0; mseq->colour_description_present_flag = 1; mseq->colour_primaries = avctx->color_primaries; mseq->transfer_characteristics = avctx->color_trc; mseq->matrix_coeffs = avctx->colorspace; } } return 0; }"} {"target": 0, "idx": 23872, "func": "static void rdft_calc_c(RDFTContext *s, FFTSample *data) { int i, i1, i2; FFTComplex ev, od; const int n = 1 << s->nbits; const float k1 = 0.5; const float k2 = 0.5 - s->inverse; const FFTSample *tcos = s->tcos; const FFTSample *tsin = s->tsin; if (!s->inverse) { s->fft.fft_permute(&s->fft, (FFTComplex*)data); s->fft.fft_calc(&s->fft, (FFTComplex*)data); } /* i=0 is a special case because of packing, the DC term is real, so we are going to throw the N/2 term (also real) in with it. */ ev.re = data[0]; data[0] = ev.re+data[1]; data[1] = ev.re-data[1]; for (i = 1; i < (n>>2); i++) { i1 = 2*i; i2 = n-i1; /* Separate even and odd FFTs */ ev.re = k1*(data[i1 ]+data[i2 ]); od.im = -k2*(data[i1 ]-data[i2 ]); ev.im = k1*(data[i1+1]-data[i2+1]); od.re = k2*(data[i1+1]+data[i2+1]); /* Apply twiddle factors to the odd FFT and add to the even FFT */ data[i1 ] = ev.re + od.re*tcos[i] - od.im*tsin[i]; data[i1+1] = ev.im + od.im*tcos[i] + od.re*tsin[i]; data[i2 ] = ev.re - od.re*tcos[i] + od.im*tsin[i]; data[i2+1] = -ev.im + od.im*tcos[i] + od.re*tsin[i]; } data[2*i+1]=s->sign_convention*data[2*i+1]; if (s->inverse) { data[0] *= k1; data[1] *= k1; s->fft.fft_permute(&s->fft, (FFTComplex*)data); s->fft.fft_calc(&s->fft, (FFTComplex*)data); } }"} {"target": 0, "idx": 23877, "func": "static av_cold int g726_init(AVCodecContext * avctx) { AVG726Context* c = (AVG726Context*)avctx->priv_data; unsigned int index= (avctx->bit_rate + avctx->sample_rate/2) / avctx->sample_rate - 2; if ( (avctx->bit_rate != 16000 && avctx->bit_rate != 24000 && avctx->bit_rate != 32000 && avctx->bit_rate != 40000)) { av_log(avctx, AV_LOG_ERROR, \"G726: unsupported audio format\\n\"); return -1; } if (avctx->sample_rate != 8000 && avctx->strict_std_compliance>FF_COMPLIANCE_INOFFICIAL) { av_log(avctx, AV_LOG_ERROR, \"G726: unsupported audio format\\n\"); return -1; } if(avctx->channels != 1){ av_log(avctx, AV_LOG_ERROR, \"Only mono is supported\\n\"); return -1; } if(index>3){ av_log(avctx, AV_LOG_ERROR, \"Unsupported number of bits %d\\n\", index+2); return -1; } g726_reset(&c->c, index); c->code_size = c->c.tbls->bits; c->bit_buffer = 0; c->bits_left = 0; avctx->coded_frame = avcodec_alloc_frame(); if (!avctx->coded_frame) return AVERROR(ENOMEM); avctx->coded_frame->key_frame = 1; return 0; }"} {"target": 1, "idx": 23888, "func": "unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, unsigned long offset) { const unsigned long *p = addr + BITOP_WORD(offset); unsigned long result = offset & ~(BITS_PER_LONG-1); unsigned long tmp; if (offset >= size) { return size; } size -= result; offset %= BITS_PER_LONG; if (offset) { tmp = *(p++); tmp |= ~0UL >> (BITS_PER_LONG - offset); if (size < BITS_PER_LONG) { goto found_first; } if (~tmp) { goto found_middle; } size -= BITS_PER_LONG; result += BITS_PER_LONG; } while (size & ~(BITS_PER_LONG-1)) { if (~(tmp = *(p++))) { goto found_middle; } result += BITS_PER_LONG; size -= BITS_PER_LONG; } if (!size) { return result; } tmp = *p; found_first: tmp |= ~0UL << size; if (tmp == ~0UL) { /* Are any bits zero? */ return result + size; /* Nope. */ } found_middle: return result + ffz(tmp); }"} {"target": 1, "idx": 23895, "func": "void visit_end_implicit_struct(Visitor *v, Error **errp) { assert(!error_is_set(errp)); if (v->end_implicit_struct) { v->end_implicit_struct(v, errp); } }"} {"target": 1, "idx": 23899, "func": "static int nbd_reject_length(NBDClient *client, bool fatal, Error **errp) { int ret; assert(client->optlen); if (nbd_drop(client->ioc, client->optlen, errp) < 0) { return -EIO; } ret = nbd_negotiate_send_rep_err(client, NBD_REP_ERR_INVALID, errp, \"option '%s' should have zero length\", nbd_opt_lookup(client->opt)); if (fatal && !ret) { error_setg(errp, \"option '%s' should have zero length\", nbd_opt_lookup(client->opt)); return -EINVAL; } return ret; }"} {"target": 1, "idx": 23912, "func": "static int kvmppc_read_host_property(const char *node_path, const char *prop, void *val, size_t len) { char *path; FILE *f; int ret; int pathlen; pathlen = snprintf(NULL, 0, \"%s/%s/%s\", PROC_DEVTREE_PATH, node_path, prop) + 1; path = qemu_malloc(pathlen); if (path == NULL) { ret = -ENOMEM; goto out; } snprintf(path, pathlen, \"%s/%s/%s\", PROC_DEVTREE_PATH, node_path, prop); f = fopen(path, \"rb\"); if (f == NULL) { ret = errno; goto free; } len = fread(val, len, 1, f); if (len != 1) { ret = ferror(f); goto close; } close: fclose(f); free: free(path); out: return ret; }"} {"target": 1, "idx": 23916, "func": "static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr, TCGMemOpIdx oi, bool is_64) { TCGMemOp memop = get_memop(oi); #ifdef CONFIG_SOFTMMU unsigned memi = get_mmuidx(oi); TCGReg addrz, param; tcg_insn_unit *func; tcg_insn_unit *label_ptr; addrz = tcg_out_tlb_load(s, addr, memi, memop, offsetof(CPUTLBEntry, addr_read)); /* The fast path is exactly one insn. Thus we can perform the entire TLB Hit in the (annulled) delay slot of the branch over the TLB Miss case. */ /* beq,a,pt %[xi]cc, label0 */ label_ptr = s->code_ptr; tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0); /* delay slot */ tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1, qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]); /* TLB Miss. */ param = TCG_REG_O1; if (!SPARC64 && TARGET_LONG_BITS == 64) { /* Skip the high-part; we'll perform the extract in the trampoline. */ param++; } tcg_out_mov(s, TCG_TYPE_REG, param++, addr); /* We use the helpers to extend SB and SW data, leaving the case of SL needing explicit extending below. */ if ((memop & MO_SSIZE) == MO_SL) { func = qemu_ld_trampoline[memop & (MO_BSWAP | MO_SIZE)]; } else { func = qemu_ld_trampoline[memop & (MO_BSWAP | MO_SSIZE)]; } tcg_debug_assert(func != NULL); tcg_out_call_nodelay(s, func); /* delay slot */ tcg_out_movi(s, TCG_TYPE_I32, param, oi); /* Recall that all of the helpers return 64-bit results. Which complicates things for sparcv8plus. */ if (SPARC64) { /* We let the helper sign-extend SB and SW, but leave SL for here. */ if (is_64 && (memop & MO_SSIZE) == MO_SL) { tcg_out_arithi(s, data, TCG_REG_O0, 0, SHIFT_SRA); } else { tcg_out_mov(s, TCG_TYPE_REG, data, TCG_REG_O0); } } else { if ((memop & MO_SIZE) == MO_64) { tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, 32, SHIFT_SLLX); tcg_out_arithi(s, TCG_REG_O1, TCG_REG_O1, 0, SHIFT_SRL); tcg_out_arith(s, data, TCG_REG_O0, TCG_REG_O1, ARITH_OR); } else if (is_64) { /* Re-extend from 32-bit rather than reassembling when we know the high register must be an extension. */ tcg_out_arithi(s, data, TCG_REG_O1, 0, memop & MO_SIGN ? SHIFT_SRA : SHIFT_SRL); } else { tcg_out_mov(s, TCG_TYPE_I32, data, TCG_REG_O1); } } *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr)); #else if (SPARC64 && TARGET_LONG_BITS == 32) { tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL); addr = TCG_REG_T1; } tcg_out_ldst_rr(s, data, addr, (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0), qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]); #endif /* CONFIG_SOFTMMU */ }"} {"target": 1, "idx": 23918, "func": "opts_visitor_cleanup(OptsVisitor *ov) { if (ov->unprocessed_opts != NULL) { g_hash_table_destroy(ov->unprocessed_opts); } g_free(ov->fake_id_opt); memset(ov, '\\0', sizeof *ov); }"} {"target": 0, "idx": 23922, "func": "static int usb_host_open(USBHostDevice *s, libusb_device *dev) { USBDevice *udev = USB_DEVICE(s); int bus_num = libusb_get_bus_number(dev); int addr = libusb_get_device_address(dev); int rc; trace_usb_host_open_started(bus_num, addr); if (s->dh != NULL) { goto fail; } rc = libusb_open(dev, &s->dh); if (rc != 0) { goto fail; } s->dev = dev; s->bus_num = bus_num; s->addr = addr; usb_host_detach_kernel(s); libusb_get_device_descriptor(dev, &s->ddesc); usb_host_get_port(s->dev, s->port, sizeof(s->port)); usb_ep_init(udev); usb_host_ep_update(s); udev->speed = speed_map[libusb_get_device_speed(dev)]; usb_host_speed_compat(s); if (s->ddesc.iProduct) { libusb_get_string_descriptor_ascii(s->dh, s->ddesc.iProduct, (unsigned char *)udev->product_desc, sizeof(udev->product_desc)); } else { snprintf(udev->product_desc, sizeof(udev->product_desc), \"host:%d.%d\", bus_num, addr); } rc = usb_device_attach(udev); if (rc) { goto fail; } trace_usb_host_open_success(bus_num, addr); return 0; fail: trace_usb_host_open_failure(bus_num, addr); if (s->dh != NULL) { libusb_close(s->dh); s->dh = NULL; s->dev = NULL; } return -1; }"} {"target": 0, "idx": 23924, "func": "int64_t xbzrle_cache_resize(int64_t new_size, Error **errp) { PageCache *new_cache; int64_t ret; /* Check for truncation */ if (new_size != (size_t)new_size) { error_setg(errp, QERR_INVALID_PARAMETER_VALUE, \"cache size\", \"exceeding address space\"); return -1; } /* Cache should not be larger than guest ram size */ if (new_size > ram_bytes_total()) { error_setg(errp, QERR_INVALID_PARAMETER_VALUE, \"cache size\", \"exceeds guest ram size\"); return -1; } XBZRLE_cache_lock(); if (XBZRLE.cache != NULL) { if (pow2floor(new_size) == migrate_xbzrle_cache_size()) { goto out_new_size; } new_cache = cache_init(new_size, TARGET_PAGE_SIZE, errp); if (!new_cache) { ret = -1; goto out; } cache_fini(XBZRLE.cache); XBZRLE.cache = new_cache; } out_new_size: ret = pow2floor(new_size); out: XBZRLE_cache_unlock(); return ret; }"} {"target": 0, "idx": 23933, "func": "static void setup_rt_frame(int sig, struct target_sigaction *ka, target_siginfo_t *info, target_sigset_t *set, CPUAlphaState *env) { abi_ulong frame_addr, r26; struct target_rt_sigframe *frame; int i, err = 0; frame_addr = get_sigframe(ka, env, sizeof(*frame)); if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { goto give_sigsegv; } err |= copy_siginfo_to_user(&frame->info, info); __put_user(0, &frame->uc.tuc_flags); __put_user(0, &frame->uc.tuc_link); __put_user(set->sig[0], &frame->uc.tuc_osf_sigmask); __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp); __put_user(sas_ss_flags(env->ir[IR_SP]), &frame->uc.tuc_stack.ss_flags); __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size); err |= setup_sigcontext(&frame->uc.tuc_mcontext, env, frame_addr, set); for (i = 0; i < TARGET_NSIG_WORDS; ++i) { __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]); } if (ka->sa_restorer) { r26 = ka->sa_restorer; } else { __put_user(INSN_MOV_R30_R16, &frame->retcode[0]); __put_user(INSN_LDI_R0 + TARGET_NR_rt_sigreturn, &frame->retcode[1]); __put_user(INSN_CALLSYS, &frame->retcode[2]); /* imb(); */ r26 = frame_addr; } if (err) { give_sigsegv: if (sig == TARGET_SIGSEGV) { ka->_sa_handler = TARGET_SIG_DFL; } force_sig(TARGET_SIGSEGV); } env->ir[IR_RA] = r26; env->ir[IR_PV] = env->pc = ka->_sa_handler; env->ir[IR_A0] = sig; env->ir[IR_A1] = frame_addr + offsetof(struct target_rt_sigframe, info); env->ir[IR_A2] = frame_addr + offsetof(struct target_rt_sigframe, uc); env->ir[IR_SP] = frame_addr; }"} {"target": 0, "idx": 23938, "func": "static void imx_gpt_reset(DeviceState *dev) { IMXGPTState *s = IMX_GPT(dev); /* stop timer */ ptimer_stop(s->timer); /* * Soft reset doesn't touch some bits; hard reset clears them */ s->cr &= ~(GPT_CR_EN|GPT_CR_ENMOD|GPT_CR_STOPEN|GPT_CR_DOZEN| GPT_CR_WAITEN|GPT_CR_DBGEN); s->sr = 0; s->pr = 0; s->ir = 0; s->cnt = 0; s->ocr1 = TIMER_MAX; s->ocr2 = TIMER_MAX; s->ocr3 = TIMER_MAX; s->icr1 = 0; s->icr2 = 0; s->next_timeout = TIMER_MAX; s->next_int = 0; /* compute new freq */ imx_gpt_set_freq(s); /* reset the limit to TIMER_MAX */ ptimer_set_limit(s->timer, TIMER_MAX, 1); /* if the timer is still enabled, restart it */ if (s->freq && (s->cr & GPT_CR_EN)) { ptimer_run(s->timer, 1); } }"} {"target": 1, "idx": 23942, "func": "static int dynticks_start_timer(struct qemu_alarm_timer *t) { struct sigevent ev; timer_t host_timer; struct sigaction act; sigfillset(&act.sa_mask); act.sa_flags = 0; act.sa_handler = host_alarm_handler; sigaction(SIGALRM, &act, NULL); ev.sigev_value.sival_int = 0; ev.sigev_notify = SIGEV_SIGNAL; ev.sigev_signo = SIGALRM; if (timer_create(CLOCK_REALTIME, &ev, &host_timer)) { perror(\"timer_create\"); /* disable dynticks */ fprintf(stderr, \"Dynamic Ticks disabled\\n\"); return -1; } t->priv = (void *)(long)host_timer; return 0; }"} {"target": 1, "idx": 23947, "func": "static void s390_pci_generate_event(uint8_t cc, uint16_t pec, uint32_t fh, uint32_t fid, uint64_t faddr, uint32_t e) { SeiContainer *sei_cont = g_malloc0(sizeof(SeiContainer)); S390pciState *s = S390_PCI_HOST_BRIDGE( object_resolve_path(TYPE_S390_PCI_HOST_BRIDGE, NULL)); if (!s) { return; } sei_cont->fh = fh; sei_cont->fid = fid; sei_cont->cc = cc; sei_cont->pec = pec; sei_cont->faddr = faddr; sei_cont->e = e; QTAILQ_INSERT_TAIL(&s->pending_sei, sei_cont, link); css_generate_css_crws(0); }"} {"target": 1, "idx": 23956, "func": "void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask, CPSRWriteType write_type) { uint32_t changed_daif; if (mask & CPSR_NZCV) { env->ZF = (~val) & CPSR_Z; env->NF = val; env->CF = (val >> 29) & 1; env->VF = (val << 3) & 0x80000000; } if (mask & CPSR_Q) env->QF = ((val & CPSR_Q) != 0); if (mask & CPSR_T) env->thumb = ((val & CPSR_T) != 0); if (mask & CPSR_IT_0_1) { env->condexec_bits &= ~3; env->condexec_bits |= (val >> 25) & 3; } if (mask & CPSR_IT_2_7) { env->condexec_bits &= 3; env->condexec_bits |= (val >> 8) & 0xfc; } if (mask & CPSR_GE) { env->GE = (val >> 16) & 0xf; } /* In a V7 implementation that includes the security extensions but does * not include Virtualization Extensions the SCR.FW and SCR.AW bits control * whether non-secure software is allowed to change the CPSR_F and CPSR_A * bits respectively. * * In a V8 implementation, it is permitted for privileged software to * change the CPSR A/F bits regardless of the SCR.AW/FW bits. */ if (write_type != CPSRWriteRaw && !arm_feature(env, ARM_FEATURE_V8) && arm_feature(env, ARM_FEATURE_EL3) && !arm_feature(env, ARM_FEATURE_EL2) && !arm_is_secure(env)) { changed_daif = (env->daif ^ val) & mask; if (changed_daif & CPSR_A) { /* Check to see if we are allowed to change the masking of async * abort exceptions from a non-secure state. */ if (!(env->cp15.scr_el3 & SCR_AW)) { qemu_log_mask(LOG_GUEST_ERROR, \"Ignoring attempt to switch CPSR_A flag from \" \"non-secure world with SCR.AW bit clear\\n\"); mask &= ~CPSR_A; } } if (changed_daif & CPSR_F) { /* Check to see if we are allowed to change the masking of FIQ * exceptions from a non-secure state. */ if (!(env->cp15.scr_el3 & SCR_FW)) { qemu_log_mask(LOG_GUEST_ERROR, \"Ignoring attempt to switch CPSR_F flag from \" \"non-secure world with SCR.FW bit clear\\n\"); mask &= ~CPSR_F; } /* Check whether non-maskable FIQ (NMFI) support is enabled. * If this bit is set software is not allowed to mask * FIQs, but is allowed to set CPSR_F to 0. */ if ((A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_NMFI) && (val & CPSR_F)) { qemu_log_mask(LOG_GUEST_ERROR, \"Ignoring attempt to enable CPSR_F flag \" \"(non-maskable FIQ [NMFI] support enabled)\\n\"); mask &= ~CPSR_F; } } } env->daif &= ~(CPSR_AIF & mask); env->daif |= val & CPSR_AIF & mask; if (write_type != CPSRWriteRaw && (env->uncached_cpsr & CPSR_M) != CPSR_USER && ((env->uncached_cpsr ^ val) & mask & CPSR_M)) { if (bad_mode_switch(env, val & CPSR_M)) { /* Attempt to switch to an invalid mode: this is UNPREDICTABLE. * We choose to ignore the attempt and leave the CPSR M field * untouched. */ mask &= ~CPSR_M; } else { switch_mode(env, val & CPSR_M); } } mask &= ~CACHED_CPSR_BITS; env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask); }"} {"target": 1, "idx": 23965, "func": "static void vfio_probe_nvidia_bar0_quirk(VFIOPCIDevice *vdev, int nr) { VFIOQuirk *quirk; VFIOConfigMirrorQuirk *mirror; if (!vfio_pci_is(vdev, PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID) || !vfio_is_vga(vdev) || nr != 0) { return; } quirk = g_malloc0(sizeof(*quirk)); mirror = quirk->data = g_malloc0(sizeof(*mirror)); mirror->mem = quirk->mem = g_malloc0(sizeof(MemoryRegion)); quirk->nr_mem = 1; mirror->vdev = vdev; mirror->offset = 0x88000; mirror->bar = nr; memory_region_init_io(mirror->mem, OBJECT(vdev), &vfio_nvidia_mirror_quirk, mirror, \"vfio-nvidia-bar0-88000-mirror-quirk\", PCIE_CONFIG_SPACE_SIZE); memory_region_add_subregion_overlap(&vdev->bars[nr].region.mem, mirror->offset, mirror->mem, 1); QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next); /* The 0x1800 offset mirror only seems to get used by legacy VGA */ if (vdev->has_vga) { quirk = g_malloc0(sizeof(*quirk)); mirror = quirk->data = g_malloc0(sizeof(*mirror)); mirror->mem = quirk->mem = g_malloc0(sizeof(MemoryRegion)); quirk->nr_mem = 1; mirror->vdev = vdev; mirror->offset = 0x1800; mirror->bar = nr; memory_region_init_io(mirror->mem, OBJECT(vdev), &vfio_nvidia_mirror_quirk, mirror, \"vfio-nvidia-bar0-1800-mirror-quirk\", PCI_CONFIG_SPACE_SIZE); memory_region_add_subregion_overlap(&vdev->bars[nr].region.mem, mirror->offset, mirror->mem, 1); QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next); } trace_vfio_quirk_nvidia_bar0_probe(vdev->vbasedev.name); }"} {"target": 1, "idx": 24009, "func": "static inline void gen_op_arith_compute_ov(DisasContext *ctx, TCGv arg0, TCGv arg1, TCGv arg2, int sub) { TCGv t0 = tcg_temp_new(); tcg_gen_xor_tl(cpu_ov, arg0, arg1); tcg_gen_xor_tl(t0, arg1, arg2); if (sub) { tcg_gen_and_tl(cpu_ov, cpu_ov, t0); } else { tcg_gen_andc_tl(cpu_ov, cpu_ov, t0); } tcg_temp_free(t0); if (NARROW_MODE(ctx)) { tcg_gen_ext32s_tl(cpu_ov, cpu_ov); } tcg_gen_shri_tl(cpu_ov, cpu_ov, TARGET_LONG_BITS - 1); tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov); }"} {"target": 1, "idx": 24018, "func": "static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, uint8_t *buf, int buf_size) { H264Context *h = avctx->priv_data; MpegEncContext *s = &h->s; AVFrame *pict = data; int buf_index; s->flags= avctx->flags; s->flags2= avctx->flags2; /* no supplementary picture */ if (buf_size == 0) { return 0; } if(s->flags&CODEC_FLAG_TRUNCATED){ int next= find_frame_end(h, buf, buf_size); if( ff_combine_frame(&s->parse_context, next, &buf, &buf_size) < 0 ) return buf_size; //printf(\"next:%d buf_size:%d last_index:%d\\n\", next, buf_size, s->parse_context.last_index); } if(h->is_avc && !h->got_avcC) { int i, cnt, nalsize; unsigned char *p = avctx->extradata; if(avctx->extradata_size < 7) { av_log(avctx, AV_LOG_ERROR, \"avcC too short\\n\"); return -1; } if(*p != 1) { av_log(avctx, AV_LOG_ERROR, \"Unknown avcC version %d\\n\", *p); return -1; } /* sps and pps in the avcC always have length coded with 2 bytes, so put a fake nal_length_size = 2 while parsing them */ h->nal_length_size = 2; // Decode sps from avcC cnt = *(p+5) & 0x1f; // Number of sps p += 6; for (i = 0; i < cnt; i++) { nalsize = BE_16(p) + 2; if(decode_nal_units(h, p, nalsize) < 0) { av_log(avctx, AV_LOG_ERROR, \"Decoding sps %d from avcC failed\\n\", i); return -1; } p += nalsize; } // Decode pps from avcC cnt = *(p++); // Number of pps for (i = 0; i < cnt; i++) { nalsize = BE_16(p) + 2; if(decode_nal_units(h, p, nalsize) != nalsize) { av_log(avctx, AV_LOG_ERROR, \"Decoding pps %d from avcC failed\\n\", i); return -1; } p += nalsize; } // Now store right nal length size, that will be use to parse all other nals h->nal_length_size = ((*(((char*)(avctx->extradata))+4))&0x03)+1; // Do not reparse avcC h->got_avcC = 1; } if(!h->is_avc && s->avctx->extradata_size && s->picture_number==0){ if(decode_nal_units(h, s->avctx->extradata, s->avctx->extradata_size) < 0) return -1; } buf_index=decode_nal_units(h, buf, buf_size); if(buf_index < 0) return -1; //FIXME do something with unavailable reference frames // if(ret==FRAME_SKIPPED) return get_consumed_bytes(s, buf_index, buf_size); if(!s->current_picture_ptr){ av_log(h->s.avctx, AV_LOG_DEBUG, \"error, NO frame\\n\"); return -1; } { Picture *out = s->current_picture_ptr; #if 0 //decode order *data_size = sizeof(AVFrame); #else /* Sort B-frames into display order */ Picture *cur = s->current_picture_ptr; Picture *prev = h->delayed_output_pic; int out_idx = 0; int pics = 0; int out_of_order; int cross_idr = 0; int dropped_frame = 0; int i; if(h->sps.bitstream_restriction_flag && s->avctx->has_b_frames < h->sps.num_reorder_frames){ s->avctx->has_b_frames = h->sps.num_reorder_frames; s->low_delay = 0; } while(h->delayed_pic[pics]) pics++; h->delayed_pic[pics++] = cur; if(cur->reference == 0) cur->reference = 1; for(i=0; h->delayed_pic[i]; i++) if(h->delayed_pic[i]->key_frame || h->delayed_pic[i]->poc==0) cross_idr = 1; out = h->delayed_pic[0]; for(i=1; h->delayed_pic[i] && !h->delayed_pic[i]->key_frame; i++) if(h->delayed_pic[i]->poc < out->poc){ out = h->delayed_pic[i]; out_idx = i; } out_of_order = !cross_idr && prev && out->poc < prev->poc; if(prev && pics <= s->avctx->has_b_frames) out = prev; else if((out_of_order && pics-1 == s->avctx->has_b_frames) || (s->low_delay && ((!cross_idr && prev && out->poc > prev->poc + 2) || cur->pict_type == B_TYPE))) { s->low_delay = 0; s->avctx->has_b_frames++; out = prev; } else if(out_of_order) out = prev; if(out_of_order || pics > s->avctx->has_b_frames){ dropped_frame = (out != h->delayed_pic[out_idx]); for(i=out_idx; h->delayed_pic[i]; i++) h->delayed_pic[i] = h->delayed_pic[i+1]; } if(prev == out && !dropped_frame) *data_size = 0; else *data_size = sizeof(AVFrame); if(prev && prev != out && prev->reference == 1) prev->reference = 0; h->delayed_output_pic = out; #endif *pict= *(AVFrame*)out; } assert(pict->data[0]); ff_print_debug_info(s, pict); //printf(\"out %d\\n\", (int)pict->data[0]); #if 0 //? /* Return the Picture timestamp as the frame number */ /* we substract 1 because it is added on utils.c */ avctx->frame_number = s->picture_number - 1; #endif return get_consumed_bytes(s, buf_index, buf_size); }"} {"target": 0, "idx": 24044, "func": "static void cpu_common_reset(CPUState *cpu) { CPUClass *cc = CPU_GET_CLASS(cpu); if (qemu_loglevel_mask(CPU_LOG_RESET)) { qemu_log(\"CPU Reset (CPU %d)\\n\", cpu->cpu_index); log_cpu_state(cpu, cc->reset_dump_flags); } cpu->interrupt_request = 0; cpu->halted = 0; cpu->mem_io_pc = 0; cpu->mem_io_vaddr = 0; cpu->icount_extra = 0; cpu->icount_decr.u32 = 0; cpu->can_do_io = 1; cpu->exception_index = -1; cpu->crash_occurred = false; if (tcg_enabled()) { cpu_tb_jmp_cache_clear(cpu); #ifdef CONFIG_SOFTMMU tlb_flush(cpu, 0); #endif } }"} {"target": 0, "idx": 24047, "func": "int qemu_opts_foreach(QemuOptsList *list, qemu_opts_loopfunc func, void *opaque, int abort_on_failure) { QemuOpts *opts; int rc = 0; TAILQ_FOREACH(opts, &list->head, next) { rc = func(opts, opaque); if (abort_on_failure && rc != 0) break; } return rc; }"} {"target": 0, "idx": 24050, "func": "static void subpage_write(void *opaque, target_phys_addr_t addr, uint64_t value, unsigned len) { subpage_t *mmio = opaque; unsigned int idx = SUBPAGE_IDX(addr); MemoryRegionSection *section; #if defined(DEBUG_SUBPAGE) printf(\"%s: subpage %p len %d addr \" TARGET_FMT_plx \" idx %d value %\"PRIx64\"\\n\", __func__, mmio, len, addr, idx, value); #endif section = &phys_sections[mmio->sub_section[idx]]; addr += mmio->base; addr -= section->offset_within_address_space; addr += section->offset_within_region; io_mem_write(section->mr, addr, value, len); }"} {"target": 0, "idx": 24072, "func": "static int nvenc_find_free_reg_resource(AVCodecContext *avctx) { NvencContext *ctx = avctx->priv_data; NvencDynLoadFunctions *dl_fn = &ctx->nvenc_dload_funcs; NV_ENCODE_API_FUNCTION_LIST *p_nvenc = &dl_fn->nvenc_funcs; int i; if (ctx->nb_registered_frames == FF_ARRAY_ELEMS(ctx->registered_frames)) { for (i = 0; i < ctx->nb_registered_frames; i++) { if (!ctx->registered_frames[i].mapped) { if (ctx->registered_frames[i].regptr) { p_nvenc->nvEncUnregisterResource(ctx->nvencoder, ctx->registered_frames[i].regptr); ctx->registered_frames[i].regptr = NULL; } return i; } } } else { return ctx->nb_registered_frames++; } av_log(avctx, AV_LOG_ERROR, \"Too many registered CUDA frames\\n\"); return AVERROR(ENOMEM); }"} {"target": 1, "idx": 24075, "func": "void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr) { unsigned int i; /* Discard jump cache entries for any tb which might potentially overlap the flushed page. */ i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE); memset(&cpu->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *)); i = tb_jmp_cache_hash_page(addr); memset(&cpu->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *)); }"} {"target": 0, "idx": 24084, "func": "DeviceState *bdrv_get_attached(BlockDriverState *bs) { return bs->peer; }"} {"target": 0, "idx": 24085, "func": "static void qapi_dealloc_push(QapiDeallocVisitor *qov, void *value) { StackEntry *e = g_malloc0(sizeof(*e)); e->value = value; /* see if we're just pushing a list head tracker */ if (value == NULL) { e->is_list_head = true; } QTAILQ_INSERT_HEAD(&qov->stack, e, node); }"} {"target": 0, "idx": 24087, "func": "void *vnc_zlib_zalloc(void *x, unsigned items, unsigned size) { void *p; size *= items; size = (size + ZALLOC_ALIGNMENT - 1) & ~(ZALLOC_ALIGNMENT - 1); p = qemu_mallocz(size); return (p); }"} {"target": 0, "idx": 24090, "func": "static int send_png_rect(VncState *vs, int x, int y, int w, int h, VncPalette *palette) { png_byte color_type; png_structp png_ptr; png_infop info_ptr; png_colorp png_palette = NULL; pixman_image_t *linebuf; int level = tight_png_conf[vs->tight.compression].png_zlib_level; int filters = tight_png_conf[vs->tight.compression].png_filters; uint8_t *buf; int dy; png_ptr = png_create_write_struct_2(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL, NULL, vnc_png_malloc, vnc_png_free); if (png_ptr == NULL) return -1; info_ptr = png_create_info_struct(png_ptr); if (info_ptr == NULL) { png_destroy_write_struct(&png_ptr, NULL); return -1; } png_set_write_fn(png_ptr, (void *) vs, png_write_data, png_flush_data); png_set_compression_level(png_ptr, level); png_set_filter(png_ptr, PNG_FILTER_TYPE_DEFAULT, filters); if (palette) { color_type = PNG_COLOR_TYPE_PALETTE; } else { color_type = PNG_COLOR_TYPE_RGB; } png_set_IHDR(png_ptr, info_ptr, w, h, 8, color_type, PNG_INTERLACE_NONE, PNG_COMPRESSION_TYPE_DEFAULT, PNG_FILTER_TYPE_DEFAULT); if (color_type == PNG_COLOR_TYPE_PALETTE) { struct palette_cb_priv priv; png_palette = png_malloc(png_ptr, sizeof(*png_palette) * palette_size(palette)); priv.vs = vs; priv.png_palette = png_palette; palette_iter(palette, write_png_palette, &priv); png_set_PLTE(png_ptr, info_ptr, png_palette, palette_size(palette)); if (vs->client_pf.bytes_per_pixel == 4) { tight_encode_indexed_rect32(vs->tight.tight.buffer, w * h, palette); } else { tight_encode_indexed_rect16(vs->tight.tight.buffer, w * h, palette); } } png_write_info(png_ptr, info_ptr); buffer_reserve(&vs->tight.png, 2048); linebuf = qemu_pixman_linebuf_create(PIXMAN_BE_r8g8b8, w); buf = (uint8_t *)pixman_image_get_data(linebuf); for (dy = 0; dy < h; dy++) { if (color_type == PNG_COLOR_TYPE_PALETTE) { memcpy(buf, vs->tight.tight.buffer + (dy * w), w); } else { qemu_pixman_linebuf_fill(linebuf, vs->vd->server, w, dy); } png_write_row(png_ptr, buf); } qemu_pixman_image_unref(linebuf); png_write_end(png_ptr, NULL); if (color_type == PNG_COLOR_TYPE_PALETTE) { png_free(png_ptr, png_palette); } png_destroy_write_struct(&png_ptr, &info_ptr); vnc_write_u8(vs, VNC_TIGHT_PNG << 4); tight_send_compact_size(vs, vs->tight.png.offset); vnc_write(vs, vs->tight.png.buffer, vs->tight.png.offset); buffer_reset(&vs->tight.png); return 1; }"} {"target": 0, "idx": 24100, "func": "void nvdimm_init_acpi_state(AcpiNVDIMMState *state, MemoryRegion *io, FWCfgState *fw_cfg, Object *owner) { memory_region_init_io(&state->io_mr, owner, &nvdimm_dsm_ops, state, \"nvdimm-acpi-io\", NVDIMM_ACPI_IO_LEN); memory_region_add_subregion(io, NVDIMM_ACPI_IO_BASE, &state->io_mr); state->dsm_mem = g_array_new(false, true /* clear */, 1); acpi_data_push(state->dsm_mem, TARGET_PAGE_SIZE); fw_cfg_add_file(fw_cfg, NVDIMM_DSM_MEM_FILE, state->dsm_mem->data, state->dsm_mem->len); }"} {"target": 0, "idx": 24103, "func": "static int calculate_refcounts(BlockDriverState *bs, BdrvCheckResult *res, BdrvCheckMode fix, bool *rebuild, void **refcount_table, int64_t *nb_clusters) { BDRVQcow2State *s = bs->opaque; int64_t i; QCowSnapshot *sn; int ret; if (!*refcount_table) { int64_t old_size = 0; ret = realloc_refcount_array(s, refcount_table, &old_size, *nb_clusters); if (ret < 0) { res->check_errors++; return ret; } } /* header */ ret = inc_refcounts(bs, res, refcount_table, nb_clusters, 0, s->cluster_size); if (ret < 0) { return ret; } /* current L1 table */ ret = check_refcounts_l1(bs, res, refcount_table, nb_clusters, s->l1_table_offset, s->l1_size, CHECK_FRAG_INFO); if (ret < 0) { return ret; } /* snapshots */ for (i = 0; i < s->nb_snapshots; i++) { sn = s->snapshots + i; ret = check_refcounts_l1(bs, res, refcount_table, nb_clusters, sn->l1_table_offset, sn->l1_size, 0); if (ret < 0) { return ret; } } ret = inc_refcounts(bs, res, refcount_table, nb_clusters, s->snapshots_offset, s->snapshots_size); if (ret < 0) { return ret; } /* refcount data */ ret = inc_refcounts(bs, res, refcount_table, nb_clusters, s->refcount_table_offset, s->refcount_table_size * sizeof(uint64_t)); if (ret < 0) { return ret; } /* encryption */ if (s->crypto_header.length) { ret = inc_refcounts(bs, res, refcount_table, nb_clusters, s->crypto_header.offset, s->crypto_header.length); if (ret < 0) { return ret; } } return check_refblocks(bs, res, fix, rebuild, refcount_table, nb_clusters); }"} {"target": 1, "idx": 24123, "func": "static void ehci_trace_qtd(EHCIQueue *q, target_phys_addr_t addr, EHCIqtd *qtd) { trace_usb_ehci_qtd(q, addr, qtd->next, qtd->altnext, get_field(qtd->token, QTD_TOKEN_TBYTES), get_field(qtd->token, QTD_TOKEN_CPAGE), get_field(qtd->token, QTD_TOKEN_CERR), get_field(qtd->token, QTD_TOKEN_PID), (bool)(qtd->token & QTD_TOKEN_IOC), (bool)(qtd->token & QTD_TOKEN_ACTIVE), (bool)(qtd->token & QTD_TOKEN_HALT), (bool)(qtd->token & QTD_TOKEN_BABBLE), (bool)(qtd->token & QTD_TOKEN_XACTERR)); }"} {"target": 1, "idx": 24141, "func": "static void megasas_scsi_uninit(PCIDevice *d) { MegasasState *s = MEGASAS(d); if (megasas_use_msix(s)) { msix_uninit(d, &s->mmio_io, &s->mmio_io); } if (megasas_use_msi(s)) { msi_uninit(d); } }"} {"target": 0, "idx": 24148, "func": "static int jpeg2000_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { Jpeg2000DecoderContext *s = avctx->priv_data; ThreadFrame frame = { .f = data }; AVFrame *picture = data; int tileno, ret; s->avctx = avctx; s->buf = s->buf_start = avpkt->data; s->buf_end = s->buf_start + avpkt->size; s->curtileno = 0; // TODO: only one tile in DCI JP2K. to implement for more tiles // reduction factor, i.e number of resolution levels to skip s->reduction_factor = s->lowres; ff_jpeg2000_init_tier1_luts(); if (s->buf_end - s->buf < 2) return AVERROR(EINVAL); // check if the image is in jp2 format if ((AV_RB32(s->buf) == 12) && (AV_RB32(s->buf + 4) == JP2_SIG_TYPE) && (AV_RB32(s->buf + 8) == JP2_SIG_VALUE)) { if (!jp2_find_codestream(s)) { av_log(avctx, AV_LOG_ERROR, \"couldn't find jpeg2k codestream atom\\n\"); return -1; } } if (bytestream_get_be16(&s->buf) != JPEG2000_SOC) { av_log(avctx, AV_LOG_ERROR, \"SOC marker not present\\n\"); return -1; } if (ret = jpeg2000_read_main_headers(s)) goto end; /* get picture buffer */ if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0) { av_log(avctx, AV_LOG_ERROR, \"ff_thread_get_buffer() failed.\\n\"); goto end; } picture->pict_type = AV_PICTURE_TYPE_I; picture->key_frame = 1; if (ret = jpeg2000_read_bitstream_packets(s)) goto end; for (tileno = 0; tileno < s->numXtiles * s->numYtiles; tileno++) if (ret = jpeg2000_decode_tile(s, s->tile + tileno, picture)) goto end; *got_frame = 1; end: jpeg2000_dec_cleanup(s); return ret ? ret : s->buf - s->buf_start; }"} {"target": 0, "idx": 24162, "func": "static void vfio_platform_eoi(VFIODevice *vbasedev) { VFIOINTp *intp; VFIOPlatformDevice *vdev = container_of(vbasedev, VFIOPlatformDevice, vbasedev); qemu_mutex_lock(&vdev->intp_mutex); QLIST_FOREACH(intp, &vdev->intp_list, next) { if (intp->state == VFIO_IRQ_ACTIVE) { trace_vfio_platform_eoi(intp->pin, event_notifier_get_fd(&intp->interrupt)); intp->state = VFIO_IRQ_INACTIVE; /* deassert the virtual IRQ */ qemu_set_irq(intp->qemuirq, 0); if (intp->flags & VFIO_IRQ_INFO_AUTOMASKED) { /* unmasks the physical level-sensitive IRQ */ vfio_unmask_single_irqindex(vbasedev, intp->pin); } /* a single IRQ can be active at a time */ break; } } /* in case there are pending IRQs, handle the first one */ if (!QSIMPLEQ_EMPTY(&vdev->pending_intp_queue)) { intp = QSIMPLEQ_FIRST(&vdev->pending_intp_queue); vfio_intp_inject_pending_lockheld(intp); QSIMPLEQ_REMOVE_HEAD(&vdev->pending_intp_queue, pqnext); } qemu_mutex_unlock(&vdev->intp_mutex); }"} {"target": 0, "idx": 24164, "func": "static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv val) { TCGv tmp; tmp = new_tmp(); tcg_gen_trunc_i64_i32(tmp, val); store_reg(s, rlow, tmp); tmp = new_tmp(); tcg_gen_shri_i64(val, val, 32); tcg_gen_trunc_i64_i32(tmp, val); store_reg(s, rhigh, tmp); }"} {"target": 0, "idx": 24179, "func": "static void vararg_number(void) { QObject *obj; QInt *qint; QFloat *qfloat; int value = 0x2342; int64_t value64 = 0x2342342343LL; double valuef = 2.323423423; obj = qobject_from_jsonf(\"%d\", value); g_assert(obj != NULL); g_assert(qobject_type(obj) == QTYPE_QINT); qint = qobject_to_qint(obj); g_assert(qint_get_int(qint) == value); QDECREF(qint); obj = qobject_from_jsonf(\"%\" PRId64, value64); g_assert(obj != NULL); g_assert(qobject_type(obj) == QTYPE_QINT); qint = qobject_to_qint(obj); g_assert(qint_get_int(qint) == value64); QDECREF(qint); obj = qobject_from_jsonf(\"%f\", valuef); g_assert(obj != NULL); g_assert(qobject_type(obj) == QTYPE_QFLOAT); qfloat = qobject_to_qfloat(obj); g_assert(qfloat_get_double(qfloat) == valuef); QDECREF(qfloat); }"} {"target": 0, "idx": 24200, "func": "static int ohci_service_ed_list(OHCIState *ohci, uint32_t head, int completion) { struct ohci_ed ed; uint32_t next_ed; uint32_t cur; int active; active = 0; if (head == 0) return 0; for (cur = head; cur; cur = next_ed) { if (ohci_read_ed(ohci, cur, &ed)) { trace_usb_ohci_ed_read_error(cur); ohci_die(ohci); return 0; } next_ed = ed.next & OHCI_DPTR_MASK; if ((ed.head & OHCI_ED_H) || (ed.flags & OHCI_ED_K)) { uint32_t addr; /* Cancel pending packets for ED that have been paused. */ addr = ed.head & OHCI_DPTR_MASK; if (ohci->async_td && addr == ohci->async_td) { usb_cancel_packet(&ohci->usb_packet); ohci->async_td = 0; usb_device_ep_stopped(ohci->usb_packet.ep->dev, ohci->usb_packet.ep); } continue; } while ((ed.head & OHCI_DPTR_MASK) != ed.tail) { trace_usb_ohci_ed_pkt(cur, (ed.head & OHCI_ED_H) != 0, (ed.head & OHCI_ED_C) != 0, ed.head & OHCI_DPTR_MASK, ed.tail & OHCI_DPTR_MASK, ed.next & OHCI_DPTR_MASK); trace_usb_ohci_ed_pkt_flags( OHCI_BM(ed.flags, ED_FA), OHCI_BM(ed.flags, ED_EN), OHCI_BM(ed.flags, ED_D), (ed.flags & OHCI_ED_S)!= 0, (ed.flags & OHCI_ED_K) != 0, (ed.flags & OHCI_ED_F) != 0, OHCI_BM(ed.flags, ED_MPS)); active = 1; if ((ed.flags & OHCI_ED_F) == 0) { if (ohci_service_td(ohci, &ed)) break; } else { /* Handle isochronous endpoints */ if (ohci_service_iso_td(ohci, &ed, completion)) break; } } if (ohci_put_ed(ohci, cur, &ed)) { ohci_die(ohci); return 0; } } return active; }"} {"target": 0, "idx": 24201, "func": "void s390_pci_iommu_disable(S390PCIBusDevice *pbdev) { memory_region_del_subregion(&pbdev->mr, &pbdev->iommu_mr); object_unparent(OBJECT(&pbdev->iommu_mr)); pbdev->iommu_enabled = false; }"} {"target": 0, "idx": 24205, "func": "int ffurl_connect(URLContext *uc, AVDictionary **options) { int err = uc->prot->url_open2 ? uc->prot->url_open2(uc, uc->filename, uc->flags, options) : uc->prot->url_open(uc, uc->filename, uc->flags); if (err) return err; uc->is_connected = 1; /* We must be careful here as ffurl_seek() could be slow, * for example for http */ if ((uc->flags & AVIO_FLAG_WRITE) || !strcmp(uc->prot->name, \"file\")) if (!uc->is_streamed && ffurl_seek(uc, 0, SEEK_SET) < 0) uc->is_streamed = 1; return 0; }"} {"target": 0, "idx": 24206, "func": "uint8_t sd_read_data(SDState *sd) { /* TODO: Append CRCs */ uint8_t ret; int io_len; if (!sd->bdrv || !bdrv_is_inserted(sd->bdrv) || !sd->enable) return 0x00; if (sd->state != sd_sendingdata_state) { fprintf(stderr, \"sd_read_data: not in Sending-Data state\\n\"); return 0x00; } if (sd->card_status & (ADDRESS_ERROR | WP_VIOLATION)) return 0x00; io_len = (sd->ocr & (1 << 30)) ? 512 : sd->blk_len; switch (sd->current_cmd) { case 6: /* CMD6: SWITCH_FUNCTION */ ret = sd->data[sd->data_offset ++]; if (sd->data_offset >= 64) sd->state = sd_transfer_state; break; case 9: /* CMD9: SEND_CSD */ case 10: /* CMD10: SEND_CID */ ret = sd->data[sd->data_offset ++]; if (sd->data_offset >= 16) sd->state = sd_transfer_state; break; case 11: /* CMD11: READ_DAT_UNTIL_STOP */ if (sd->data_offset == 0) BLK_READ_BLOCK(sd->data_start, io_len); ret = sd->data[sd->data_offset ++]; if (sd->data_offset >= io_len) { sd->data_start += io_len; sd->data_offset = 0; if (sd->data_start + io_len > sd->size) { sd->card_status |= ADDRESS_ERROR; break; } } break; case 13: /* ACMD13: SD_STATUS */ ret = sd->sd_status[sd->data_offset ++]; if (sd->data_offset >= sizeof(sd->sd_status)) sd->state = sd_transfer_state; break; case 17: /* CMD17: READ_SINGLE_BLOCK */ if (sd->data_offset == 0) BLK_READ_BLOCK(sd->data_start, io_len); ret = sd->data[sd->data_offset ++]; if (sd->data_offset >= io_len) sd->state = sd_transfer_state; break; case 18: /* CMD18: READ_MULTIPLE_BLOCK */ if (sd->data_offset == 0) BLK_READ_BLOCK(sd->data_start, io_len); ret = sd->data[sd->data_offset ++]; if (sd->data_offset >= io_len) { sd->data_start += io_len; sd->data_offset = 0; if (sd->data_start + io_len > sd->size) { sd->card_status |= ADDRESS_ERROR; break; } } break; case 22: /* ACMD22: SEND_NUM_WR_BLOCKS */ ret = sd->data[sd->data_offset ++]; if (sd->data_offset >= 4) sd->state = sd_transfer_state; break; case 30: /* CMD30: SEND_WRITE_PROT */ ret = sd->data[sd->data_offset ++]; if (sd->data_offset >= 4) sd->state = sd_transfer_state; break; case 51: /* ACMD51: SEND_SCR */ ret = sd->scr[sd->data_offset ++]; if (sd->data_offset >= sizeof(sd->scr)) sd->state = sd_transfer_state; break; case 56: /* CMD56: GEN_CMD */ if (sd->data_offset == 0) APP_READ_BLOCK(sd->data_start, sd->blk_len); ret = sd->data[sd->data_offset ++]; if (sd->data_offset >= sd->blk_len) sd->state = sd_transfer_state; break; default: fprintf(stderr, \"sd_read_data: unknown command\\n\"); return 0x00; } return ret; }"} {"target": 0, "idx": 24212, "func": "static void test_visitor_out_int(TestOutputVisitorData *data, const void *unused) { int64_t value = -42; QObject *obj; visit_type_int(data->ov, NULL, &value, &error_abort); obj = visitor_get(data); g_assert(qobject_type(obj) == QTYPE_QINT); g_assert_cmpint(qint_get_int(qobject_to_qint(obj)), ==, value); }"} {"target": 0, "idx": 24213, "func": "void net_slirp_hostfwd_remove(Monitor *mon, const char *src_str) { struct in_addr host_addr = { .s_addr = INADDR_ANY }; int host_port; char buf[256] = \"\"; const char *p = src_str; int is_udp = 0; int n; if (!slirp_inited) { monitor_printf(mon, \"user mode network stack not in use\\n\"); return; } if (!src_str || !src_str[0]) goto fail_syntax; get_str_sep(buf, sizeof(buf), &p, ':'); if (!strcmp(buf, \"tcp\") || buf[0] == '\\0') { is_udp = 0; } else if (!strcmp(buf, \"udp\")) { is_udp = 1; } else { goto fail_syntax; } if (get_str_sep(buf, sizeof(buf), &p, ':') < 0) { goto fail_syntax; } if (buf[0] != '\\0' && !inet_aton(buf, &host_addr)) { goto fail_syntax; } host_port = atoi(p); n = slirp_remove_hostfwd(is_udp, host_addr, host_port); monitor_printf(mon, \"removed %d host forwarding rules for %s\\n\", n, src_str); return; fail_syntax: monitor_printf(mon, \"invalid format\\n\"); }"} {"target": 1, "idx": 24222, "func": "static int qcow2_co_readv(BlockDriverState *bs, int64_t sector_num, int remaining_sectors, QEMUIOVector *qiov) { BDRVQcowState *s = bs->opaque; int index_in_cluster, n1; int ret; int cur_nr_sectors; /* number of sectors in current iteration */ uint64_t cluster_offset = 0; uint64_t bytes_done = 0; QEMUIOVector hd_qiov; uint8_t *cluster_data = NULL; qemu_iovec_init(&hd_qiov, qiov->niov); qemu_co_mutex_lock(&s->lock); while (remaining_sectors != 0) { /* prepare next request */ cur_nr_sectors = remaining_sectors; if (s->crypt_method) { cur_nr_sectors = MIN(cur_nr_sectors, QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors); } ret = qcow2_get_cluster_offset(bs, sector_num << 9, &cur_nr_sectors, &cluster_offset); if (ret < 0) { goto fail; } index_in_cluster = sector_num & (s->cluster_sectors - 1); qemu_iovec_reset(&hd_qiov); qemu_iovec_copy(&hd_qiov, qiov, bytes_done, cur_nr_sectors * 512); if (!cluster_offset) { if (bs->backing_hd) { /* read from the base image */ n1 = qcow2_backing_read1(bs->backing_hd, &hd_qiov, sector_num, cur_nr_sectors); if (n1 > 0) { BLKDBG_EVENT(bs->file, BLKDBG_READ_BACKING_AIO); qemu_co_mutex_unlock(&s->lock); ret = bdrv_co_readv(bs->backing_hd, sector_num, n1, &hd_qiov); qemu_co_mutex_lock(&s->lock); if (ret < 0) { goto fail; } } } else { /* Note: in this case, no need to wait */ qemu_iovec_memset(&hd_qiov, 0, 512 * cur_nr_sectors); } } else if (cluster_offset & QCOW_OFLAG_COMPRESSED) { /* add AIO support for compressed blocks ? */ ret = qcow2_decompress_cluster(bs, cluster_offset); if (ret < 0) { goto fail; } qemu_iovec_from_buffer(&hd_qiov, s->cluster_cache + index_in_cluster * 512, 512 * cur_nr_sectors); } else { if ((cluster_offset & 511) != 0) { ret = -EIO; goto fail; } if (s->crypt_method) { /* * For encrypted images, read everything into a temporary * contiguous buffer on which the AES functions can work. */ if (!cluster_data) { cluster_data = g_malloc0(QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size); } assert(cur_nr_sectors <= QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors); qemu_iovec_reset(&hd_qiov); qemu_iovec_add(&hd_qiov, cluster_data, 512 * cur_nr_sectors); } BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO); qemu_co_mutex_unlock(&s->lock); ret = bdrv_co_readv(bs->file, (cluster_offset >> 9) + index_in_cluster, cur_nr_sectors, &hd_qiov); qemu_co_mutex_lock(&s->lock); if (ret < 0) { goto fail; } if (s->crypt_method) { qcow2_encrypt_sectors(s, sector_num, cluster_data, cluster_data, cur_nr_sectors, 0, &s->aes_decrypt_key); qemu_iovec_reset(&hd_qiov); qemu_iovec_copy(&hd_qiov, qiov, bytes_done, cur_nr_sectors * 512); qemu_iovec_from_buffer(&hd_qiov, cluster_data, 512 * cur_nr_sectors); } } remaining_sectors -= cur_nr_sectors; sector_num += cur_nr_sectors; bytes_done += cur_nr_sectors * 512; } ret = 0; fail: qemu_co_mutex_unlock(&s->lock); qemu_iovec_destroy(&hd_qiov); return ret; }"} {"target": 1, "idx": 24230, "func": "static int read_kuki_chunk(AVFormatContext *s, int64_t size) { AVIOContext *pb = s->pb; AVStream *st = s->streams[0]; if (size < 0 || size > INT_MAX - FF_INPUT_BUFFER_PADDING_SIZE) return -1; if (st->codec->codec_id == AV_CODEC_ID_AAC) { /* The magic cookie format for AAC is an mp4 esds atom. The lavc AAC decoder requires the data from the codec specific description as extradata input. */ int strt, skip; MOVAtom atom; strt = avio_tell(pb); ff_mov_read_esds(s, pb, atom); skip = size - (avio_tell(pb) - strt); if (skip < 0 || !st->codec->extradata || st->codec->codec_id != AV_CODEC_ID_AAC) { av_log(s, AV_LOG_ERROR, \"invalid AAC magic cookie\\n\"); return AVERROR_INVALIDDATA; } avio_skip(pb, skip); } else if (st->codec->codec_id == AV_CODEC_ID_ALAC) { #define ALAC_PREAMBLE 12 #define ALAC_HEADER 36 #define ALAC_NEW_KUKI 24 uint8_t preamble[12]; if (size < ALAC_NEW_KUKI) { av_log(s, AV_LOG_ERROR, \"invalid ALAC magic cookie\\n\"); avio_skip(pb, size); return AVERROR_INVALIDDATA; } avio_read(pb, preamble, ALAC_PREAMBLE); st->codec->extradata = av_mallocz(ALAC_HEADER + FF_INPUT_BUFFER_PADDING_SIZE); if (!st->codec->extradata) return AVERROR(ENOMEM); /* For the old style cookie, we skip 12 bytes, then read 36 bytes. * The new style cookie only contains the last 24 bytes of what was * 36 bytes in the old style cookie, so we fabricate the first 12 bytes * in that case to maintain compatibility. */ if (!memcmp(&preamble[4], \"frmaalac\", 8)) { if (size < ALAC_PREAMBLE + ALAC_HEADER) { av_log(s, AV_LOG_ERROR, \"invalid ALAC magic cookie\\n\"); av_freep(&st->codec->extradata); return AVERROR_INVALIDDATA; } avio_read(pb, st->codec->extradata, ALAC_HEADER); avio_skip(pb, size - ALAC_PREAMBLE - ALAC_HEADER); } else { AV_WB32(st->codec->extradata, 36); memcpy(&st->codec->extradata[4], \"alac\", 4); AV_WB32(&st->codec->extradata[8], 0); memcpy(&st->codec->extradata[12], preamble, 12); avio_read(pb, &st->codec->extradata[24], ALAC_NEW_KUKI - 12); avio_skip(pb, size - ALAC_NEW_KUKI); } st->codec->extradata_size = ALAC_HEADER; } else { st->codec->extradata = av_mallocz(size + FF_INPUT_BUFFER_PADDING_SIZE); if (!st->codec->extradata) return AVERROR(ENOMEM); avio_read(pb, st->codec->extradata, size); st->codec->extradata_size = size; } return 0; }"} {"target": 1, "idx": 24231, "func": "static void *qemu_fopen_rdma(RDMAContext *rdma, const char *mode) { QEMUFileRDMA *r = g_malloc0(sizeof(QEMUFileRDMA)); if (qemu_file_mode_is_not_valid(mode)) { return NULL; } r->rdma = rdma; if (mode[0] == 'w') { r->file = qemu_fopen_ops(r, &rdma_write_ops); } else { r->file = qemu_fopen_ops(r, &rdma_read_ops); } return r->file; }"} {"target": 1, "idx": 24244, "func": "static int decode_codestream(J2kDecoderContext *s) { J2kCodingStyle *codsty = s->codsty; J2kQuantStyle *qntsty = s->qntsty; uint8_t *properties = s->properties; for (;;){ int marker, len, ret = 0; const uint8_t *oldbuf; if (s->buf_end - s->buf < 2){ av_log(s->avctx, AV_LOG_ERROR, \"Missing EOC\\n\"); break; } marker = bytestream_get_be16(&s->buf); if(s->avctx->debug & FF_DEBUG_STARTCODE) av_log(s->avctx, AV_LOG_DEBUG, \"marker 0x%.4X at pos 0x%tx\\n\", marker, s->buf - s->buf_start - 4); oldbuf = s->buf; if (marker == J2K_SOD){ J2kTile *tile = s->tile + s->curtileno; if (ret = init_tile(s, s->curtileno)) return ret; if (ret = decode_packets(s, tile)) return ret; continue; } if (marker == J2K_EOC) break; if (s->buf_end - s->buf < 2) return AVERROR(EINVAL); len = bytestream_get_be16(&s->buf); switch(marker){ case J2K_SIZ: ret = get_siz(s); break; case J2K_COC: ret = get_coc(s, codsty, properties); break; case J2K_COD: ret = get_cod(s, codsty, properties); break; case J2K_QCC: ret = get_qcc(s, len, qntsty, properties); break; case J2K_QCD: ret = get_qcd(s, len, qntsty, properties); break; case J2K_SOT: if (!(ret = get_sot(s))){ codsty = s->tile[s->curtileno].codsty; qntsty = s->tile[s->curtileno].qntsty; properties = s->tile[s->curtileno].properties; } break; case J2K_COM: // the comment is ignored s->buf += len - 2; break; default: av_log(s->avctx, AV_LOG_ERROR, \"unsupported marker 0x%.4X at pos 0x%tx\\n\", marker, s->buf - s->buf_start - 4); s->buf += len - 2; break; } if (s->buf - oldbuf != len || ret){ av_log(s->avctx, AV_LOG_ERROR, \"error during processing marker segment %.4x\\n\", marker); return ret ? ret : -1; } } return 0; }"} {"target": 0, "idx": 24269, "func": "static av_cold int ass_decode_init(AVCodecContext *avctx) { avctx->subtitle_header = av_malloc(avctx->extradata_size); if (!avctx->extradata) return AVERROR(ENOMEM); memcpy(avctx->subtitle_header, avctx->extradata, avctx->extradata_size); avctx->subtitle_header_size = avctx->extradata_size; return 0; }"} {"target": 1, "idx": 24280, "func": "int v9fs_device_realize_common(V9fsState *s, Error **errp) { int i, len; struct stat stat; FsDriverEntry *fse; V9fsPath path; int rc = 1; /* initialize pdu allocator */ QLIST_INIT(&s->free_list); QLIST_INIT(&s->active_list); for (i = 0; i < (MAX_REQ - 1); i++) { QLIST_INSERT_HEAD(&s->free_list, &s->pdus[i], next); s->pdus[i].s = s; s->pdus[i].idx = i; } v9fs_path_init(&path); fse = get_fsdev_fsentry(s->fsconf.fsdev_id); if (!fse) { /* We don't have a fsdev identified by fsdev_id */ error_setg(errp, \"9pfs device couldn't find fsdev with the \" \"id = %s\", s->fsconf.fsdev_id ? s->fsconf.fsdev_id : \"NULL\"); goto out; } if (!s->fsconf.tag) { /* we haven't specified a mount_tag */ error_setg(errp, \"fsdev with id %s needs mount_tag arguments\", s->fsconf.fsdev_id); goto out; } s->ctx.export_flags = fse->export_flags; s->ctx.fs_root = g_strdup(fse->path); s->ctx.exops.get_st_gen = NULL; len = strlen(s->fsconf.tag); if (len > MAX_TAG_LEN - 1) { error_setg(errp, \"mount tag '%s' (%d bytes) is longer than \" \"maximum (%d bytes)\", s->fsconf.tag, len, MAX_TAG_LEN - 1); goto out; } s->tag = g_strdup(s->fsconf.tag); s->ctx.uid = -1; s->ops = fse->ops; s->fid_list = NULL; qemu_co_rwlock_init(&s->rename_lock); if (s->ops->init(&s->ctx) < 0) { error_setg(errp, \"9pfs Failed to initialize fs-driver with id:%s\" \" and export path:%s\", s->fsconf.fsdev_id, s->ctx.fs_root); goto out; } /* * Check details of export path, We need to use fs driver * call back to do that. Since we are in the init path, we don't * use co-routines here. */ if (s->ops->name_to_path(&s->ctx, NULL, \"/\", &path) < 0) { error_setg(errp, \"error in converting name to path %s\", strerror(errno)); goto out; } if (s->ops->lstat(&s->ctx, &path, &stat)) { error_setg(errp, \"share path %s does not exist\", fse->path); goto out; } else if (!S_ISDIR(stat.st_mode)) { error_setg(errp, \"share path %s is not a directory\", fse->path); goto out; } v9fs_path_free(&path); rc = 0; out: if (rc) { if (s->ops->cleanup && s->ctx.private) { s->ops->cleanup(&s->ctx); } g_free(s->tag); g_free(s->ctx.fs_root); v9fs_path_free(&path); } return rc; }"} {"target": 1, "idx": 24283, "func": "static int wsd_read_header(AVFormatContext *s) { AVIOContext *pb = s->pb; AVStream *st; int version; uint32_t text_offset, data_offset, channel_assign; char playback_time[AV_TIMECODE_STR_SIZE]; st = avformat_new_stream(s, NULL); if (!st) return AVERROR(ENOMEM); avio_skip(pb, 8); version = avio_r8(pb); av_log(s, AV_LOG_DEBUG, \"version: %i.%i\\n\", version >> 4, version & 0xF); avio_skip(pb, 11); if (version < 0x10) { text_offset = 0x80; data_offset = 0x800; avio_skip(pb, 8); } else { text_offset = avio_rb32(pb); data_offset = avio_rb32(pb); } avio_skip(pb, 4); av_timecode_make_smpte_tc_string(playback_time, avio_rb32(pb), 0); av_dict_set(&s->metadata, \"playback_time\", playback_time, 0); st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO; st->codecpar->codec_id = s->iformat->raw_codec_id; st->codecpar->sample_rate = avio_rb32(pb) / 8; avio_skip(pb, 4); st->codecpar->channels = avio_r8(pb) & 0xF; st->codecpar->bit_rate = st->codecpar->channels * st->codecpar->sample_rate * 8LL; if (!st->codecpar->channels) return AVERROR_INVALIDDATA; avio_skip(pb, 3); channel_assign = avio_rb32(pb); if (!(channel_assign & 1)) { int i; for (i = 1; i < 32; i++) if (channel_assign & (1 << i)) st->codecpar->channel_layout |= wsd_to_av_channel_layoyt(s, i); } avio_skip(pb, 16); if (avio_rb32(pb)) avpriv_request_sample(s, \"emphasis\"); if (avio_seek(pb, text_offset, SEEK_SET) >= 0) { get_metadata(s, \"title\", 128); get_metadata(s, \"composer\", 128); get_metadata(s, \"song_writer\", 128); get_metadata(s, \"artist\", 128); get_metadata(s, \"album\", 128); get_metadata(s, \"genre\", 32); get_metadata(s, \"date\", 32); get_metadata(s, \"location\", 32); get_metadata(s, \"comment\", 512); get_metadata(s, \"user\", 512); } return avio_seek(pb, data_offset, SEEK_SET); }"} {"target": 0, "idx": 24289, "func": "x11grab_read_header(AVFormatContext *s1) { struct x11grab *x11grab = s1->priv_data; Display *dpy; AVStream *st = NULL; enum AVPixelFormat input_pixfmt; XImage *image; int x_off = 0; int y_off = 0; int screen; int use_shm; char *dpyname, *offset; int ret = 0; Colormap color_map; XColor color[256]; int i; dpyname = av_strdup(s1->filename); if (!dpyname) goto out; offset = strchr(dpyname, '+'); if (offset) { sscanf(offset, \"%d,%d\", &x_off, &y_off); if (strstr(offset, \"nomouse\")) { av_log(s1, AV_LOG_WARNING, \"'nomouse' specification in argument is deprecated: \" \"use 'draw_mouse' option with value 0 instead\\n\"); x11grab->draw_mouse = 0; } *offset= 0; } av_log(s1, AV_LOG_INFO, \"device: %s -> display: %s x: %d y: %d width: %d height: %d\\n\", s1->filename, dpyname, x_off, y_off, x11grab->width, x11grab->height); dpy = XOpenDisplay(dpyname); av_freep(&dpyname); if(!dpy) { av_log(s1, AV_LOG_ERROR, \"Could not open X display.\\n\"); ret = AVERROR(EIO); goto out; } st = avformat_new_stream(s1, NULL); if (!st) { ret = AVERROR(ENOMEM); goto out; } avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */ screen = DefaultScreen(dpy); if (x11grab->follow_mouse) { int screen_w, screen_h; Window w; screen_w = DisplayWidth(dpy, screen); screen_h = DisplayHeight(dpy, screen); XQueryPointer(dpy, RootWindow(dpy, screen), &w, &w, &x_off, &y_off, &ret, &ret, &ret); x_off -= x11grab->width / 2; y_off -= x11grab->height / 2; x_off = FFMIN(FFMAX(x_off, 0), screen_w - x11grab->width); y_off = FFMIN(FFMAX(y_off, 0), screen_h - x11grab->height); av_log(s1, AV_LOG_INFO, \"followmouse is enabled, resetting grabbing region to x: %d y: %d\\n\", x_off, y_off); } use_shm = XShmQueryExtension(dpy); av_log(s1, AV_LOG_INFO, \"shared memory extension%s found\\n\", use_shm ? \"\" : \" not\"); if(use_shm) { int scr = XDefaultScreen(dpy); image = XShmCreateImage(dpy, DefaultVisual(dpy, scr), DefaultDepth(dpy, scr), ZPixmap, NULL, &x11grab->shminfo, x11grab->width, x11grab->height); x11grab->shminfo.shmid = shmget(IPC_PRIVATE, image->bytes_per_line * image->height, IPC_CREAT|0777); if (x11grab->shminfo.shmid == -1) { av_log(s1, AV_LOG_ERROR, \"Fatal: Can't get shared memory!\\n\"); ret = AVERROR(ENOMEM); goto out; } x11grab->shminfo.shmaddr = image->data = shmat(x11grab->shminfo.shmid, 0, 0); x11grab->shminfo.readOnly = False; if (!XShmAttach(dpy, &x11grab->shminfo)) { av_log(s1, AV_LOG_ERROR, \"Fatal: Failed to attach shared memory!\\n\"); /* needs some better error subroutine :) */ ret = AVERROR(EIO); goto out; } } else { image = XGetImage(dpy, RootWindow(dpy, screen), x_off,y_off, x11grab->width, x11grab->height, AllPlanes, ZPixmap); } switch (image->bits_per_pixel) { case 8: av_log (s1, AV_LOG_DEBUG, \"8 bit palette\\n\"); input_pixfmt = AV_PIX_FMT_PAL8; color_map = DefaultColormap(dpy, screen); for (i = 0; i < 256; ++i) color[i].pixel = i; XQueryColors(dpy, color_map, color, 256); for (i = 0; i < 256; ++i) x11grab->palette[i] = (color[i].red & 0xFF00) << 8 | (color[i].green & 0xFF00) | (color[i].blue & 0xFF00) >> 8; x11grab->palette_changed = 1; break; case 16: if ( image->red_mask == 0xf800 && image->green_mask == 0x07e0 && image->blue_mask == 0x001f ) { av_log (s1, AV_LOG_DEBUG, \"16 bit RGB565\\n\"); input_pixfmt = AV_PIX_FMT_RGB565; } else if (image->red_mask == 0x7c00 && image->green_mask == 0x03e0 && image->blue_mask == 0x001f ) { av_log(s1, AV_LOG_DEBUG, \"16 bit RGB555\\n\"); input_pixfmt = AV_PIX_FMT_RGB555; } else { av_log(s1, AV_LOG_ERROR, \"RGB ordering at image depth %i not supported ... aborting\\n\", image->bits_per_pixel); av_log(s1, AV_LOG_ERROR, \"color masks: r 0x%.6lx g 0x%.6lx b 0x%.6lx\\n\", image->red_mask, image->green_mask, image->blue_mask); ret = AVERROR_PATCHWELCOME; goto out; } break; case 24: if ( image->red_mask == 0xff0000 && image->green_mask == 0x00ff00 && image->blue_mask == 0x0000ff ) { input_pixfmt = AV_PIX_FMT_BGR24; } else if ( image->red_mask == 0x0000ff && image->green_mask == 0x00ff00 && image->blue_mask == 0xff0000 ) { input_pixfmt = AV_PIX_FMT_RGB24; } else { av_log(s1, AV_LOG_ERROR,\"rgb ordering at image depth %i not supported ... aborting\\n\", image->bits_per_pixel); av_log(s1, AV_LOG_ERROR, \"color masks: r 0x%.6lx g 0x%.6lx b 0x%.6lx\\n\", image->red_mask, image->green_mask, image->blue_mask); ret = AVERROR_PATCHWELCOME; goto out; } break; case 32: input_pixfmt = AV_PIX_FMT_0RGB32; break; default: av_log(s1, AV_LOG_ERROR, \"image depth %i not supported ... aborting\\n\", image->bits_per_pixel); ret = AVERROR_PATCHWELCOME; goto out; } x11grab->frame_size = x11grab->width * x11grab->height * image->bits_per_pixel/8; x11grab->dpy = dpy; x11grab->time_base = av_inv_q(x11grab->framerate); x11grab->time_frame = av_gettime() / av_q2d(x11grab->time_base); x11grab->x_off = x_off; x11grab->y_off = y_off; x11grab->image = image; x11grab->use_shm = use_shm; st->codec->codec_type = AVMEDIA_TYPE_VIDEO; st->codec->codec_id = AV_CODEC_ID_RAWVIDEO; st->codec->width = x11grab->width; st->codec->height = x11grab->height; st->codec->pix_fmt = input_pixfmt; st->codec->time_base = x11grab->time_base; st->codec->bit_rate = x11grab->frame_size * 1/av_q2d(x11grab->time_base) * 8; out: av_free(dpyname); return ret; }"} {"target": 0, "idx": 24291, "func": "static void vp8_h_loop_filter_simple_c(uint8_t *dst, ptrdiff_t stride, int flim) { int i; for (i = 0; i < 16; i++) if (simple_limit(dst+i*stride, 1, flim)) filter_common(dst+i*stride, 1, 1); }"} {"target": 1, "idx": 24292, "func": "void bdrv_drain(BlockDriverState *bs) { bool busy = true; bdrv_drain_recurse(bs); while (busy) { /* Keep iterating */ bdrv_flush_io_queue(bs); busy = bdrv_requests_pending(bs); busy |= aio_poll(bdrv_get_aio_context(bs), busy);"} {"target": 1, "idx": 24294, "func": "static void main_loop(void) { bool nonblocking; int last_io __attribute__ ((unused)) = 0; #ifdef CONFIG_PROFILER int64_t ti; #endif int r; qemu_main_loop_start(); for (;;) { #ifdef CONFIG_IOTHREAD nonblocking = !kvm_enabled() && last_io > 0; #else nonblocking = cpu_exec_all(); if (vm_request_pending()) { nonblocking = true; } #endif #ifdef CONFIG_PROFILER ti = profile_getclock(); #endif last_io = main_loop_wait(nonblocking); #ifdef CONFIG_PROFILER dev_time += profile_getclock() - ti; #endif if (qemu_debug_requested()) { vm_stop(VMSTOP_DEBUG); } if (qemu_shutdown_requested()) { qemu_kill_report(); monitor_protocol_event(QEVENT_SHUTDOWN, NULL); if (no_shutdown) { vm_stop(VMSTOP_SHUTDOWN); } else break; } if (qemu_reset_requested()) { pause_all_vcpus(); cpu_synchronize_all_states(); qemu_system_reset(VMRESET_REPORT); resume_all_vcpus(); } if (qemu_powerdown_requested()) { monitor_protocol_event(QEVENT_POWERDOWN, NULL); qemu_irq_raise(qemu_system_powerdown); } if ((r = qemu_vmstop_requested())) { vm_stop(r); } } bdrv_close_all(); pause_all_vcpus(); }"} {"target": 0, "idx": 24311, "func": "static void bootp_reply(const struct bootp_t *bp) { BOOTPClient *bc = NULL; struct mbuf *m; struct bootp_t *rbp; struct sockaddr_in saddr, daddr; struct in_addr dns_addr; const struct in_addr *preq_addr; int dhcp_msg_type, val; uint8_t *q; /* extract exact DHCP msg type */ dhcp_decode(bp, &dhcp_msg_type, &preq_addr); dprintf(\"bootp packet op=%d msgtype=%d\", bp->bp_op, dhcp_msg_type); if (preq_addr) dprintf(\" req_addr=%08x\\n\", ntohl(preq_addr->s_addr)); else dprintf(\"\\n\"); if (dhcp_msg_type == 0) dhcp_msg_type = DHCPREQUEST; /* Force reply for old BOOTP clients */ if (dhcp_msg_type != DHCPDISCOVER && dhcp_msg_type != DHCPREQUEST) return; /* XXX: this is a hack to get the client mac address */ memcpy(client_ethaddr, bp->bp_hwaddr, 6); if ((m = m_get()) == NULL) return; m->m_data += IF_MAXLINKHDR; rbp = (struct bootp_t *)m->m_data; m->m_data += sizeof(struct udpiphdr); memset(rbp, 0, sizeof(struct bootp_t)); if (dhcp_msg_type == DHCPDISCOVER) { if (preq_addr) { bc = request_addr(preq_addr, client_ethaddr); if (bc) { daddr.sin_addr = *preq_addr; } } if (!bc) { new_addr: bc = get_new_addr(&daddr.sin_addr); if (!bc) { dprintf(\"no address left\\n\"); return; } } memcpy(bc->macaddr, client_ethaddr, 6); } else if (preq_addr) { bc = request_addr(preq_addr, client_ethaddr); if (bc) { daddr.sin_addr = *preq_addr; memcpy(bc->macaddr, client_ethaddr, 6); } else { daddr.sin_addr.s_addr = 0; } } else { bc = find_addr(&daddr.sin_addr, bp->bp_hwaddr); if (!bc) { /* if never assigned, behaves as if it was already assigned (windows fix because it remembers its address) */ goto new_addr; } } saddr.sin_addr.s_addr = htonl(ntohl(special_addr.s_addr) | CTL_ALIAS); saddr.sin_port = htons(BOOTP_SERVER); daddr.sin_port = htons(BOOTP_CLIENT); rbp->bp_op = BOOTP_REPLY; rbp->bp_xid = bp->bp_xid; rbp->bp_htype = 1; rbp->bp_hlen = 6; memcpy(rbp->bp_hwaddr, bp->bp_hwaddr, 6); rbp->bp_yiaddr = daddr.sin_addr; /* Client IP address */ rbp->bp_siaddr = saddr.sin_addr; /* Server IP address */ q = rbp->bp_vend; memcpy(q, rfc1533_cookie, 4); q += 4; if (bc) { dprintf(\"%s addr=%08x\\n\", (dhcp_msg_type == DHCPDISCOVER) ? \"offered\" : \"ack'ed\", ntohl(daddr.sin_addr.s_addr)); if (dhcp_msg_type == DHCPDISCOVER) { *q++ = RFC2132_MSG_TYPE; *q++ = 1; *q++ = DHCPOFFER; } else /* DHCPREQUEST */ { *q++ = RFC2132_MSG_TYPE; *q++ = 1; *q++ = DHCPACK; } if (bootp_filename) snprintf((char *)rbp->bp_file, sizeof(rbp->bp_file), \"%s\", bootp_filename); *q++ = RFC2132_SRV_ID; *q++ = 4; memcpy(q, &saddr.sin_addr, 4); q += 4; *q++ = RFC1533_NETMASK; *q++ = 4; *q++ = 0xff; *q++ = 0xff; *q++ = 0xff; *q++ = 0x00; if (!slirp_restrict) { *q++ = RFC1533_GATEWAY; *q++ = 4; memcpy(q, &saddr.sin_addr, 4); q += 4; *q++ = RFC1533_DNS; *q++ = 4; dns_addr.s_addr = htonl(ntohl(special_addr.s_addr) | CTL_DNS); memcpy(q, &dns_addr, 4); q += 4; } *q++ = RFC2132_LEASE_TIME; *q++ = 4; val = htonl(LEASE_TIME); memcpy(q, &val, 4); q += 4; if (*slirp_hostname) { val = strlen(slirp_hostname); *q++ = RFC1533_HOSTNAME; *q++ = val; memcpy(q, slirp_hostname, val); q += val; } } else { static const char nak_msg[] = \"requested address not available\"; dprintf(\"nak'ed addr=%08x\\n\", ntohl(preq_addr->s_addr)); *q++ = RFC2132_MSG_TYPE; *q++ = 1; *q++ = DHCPNAK; *q++ = RFC2132_MESSAGE; *q++ = sizeof(nak_msg) - 1; memcpy(q, nak_msg, sizeof(nak_msg) - 1); q += sizeof(nak_msg) - 1; } *q++ = RFC1533_END; daddr.sin_addr.s_addr = 0xffffffffu; m->m_len = sizeof(struct bootp_t) - sizeof(struct ip) - sizeof(struct udphdr); udp_output2(NULL, m, &saddr, &daddr, IPTOS_LOWDELAY); }"} {"target": 0, "idx": 24312, "func": "static void print_features(FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...), uint32_t features, const char *prefix) { unsigned int i; for (i = 0; i < ARRAY_SIZE(feature_name); i++) if (feature_name[i] && (features & (1 << i))) { if (prefix) (*cpu_fprintf)(f, \"%s\", prefix); (*cpu_fprintf)(f, \"%s \", feature_name[i]); } }"} {"target": 0, "idx": 24315, "func": "int cpu_x86_gen_code(uint8_t *gen_code_buf, int max_code_size, int *gen_code_size_ptr, uint8_t *pc_start, int flags) { DisasContext dc1, *dc = &dc1; uint8_t *gen_code_end, *pc_ptr; long ret; #ifdef DEBUG_DISAS struct disassemble_info disasm_info; #endif dc->code32 = (flags >> GEN_FLAG_CODE32_SHIFT) & 1; dc->addseg = (flags >> GEN_FLAG_ADDSEG_SHIFT) & 1; dc->f_st = (flags >> GEN_FLAG_ST_SHIFT) & 7; dc->cc_op = CC_OP_DYNAMIC; gen_code_ptr = gen_code_buf; gen_code_end = gen_code_buf + max_code_size - 4096; gen_start(); dc->is_jmp = 0; pc_ptr = pc_start; do { ret = disas_insn(dc, pc_ptr); if (ret == -1) error(\"unknown instruction at PC=0x%x B=%02x %02x\", pc_ptr, pc_ptr[0], pc_ptr[1]); pc_ptr = (void *)ret; } while (!dc->is_jmp && gen_code_ptr < gen_code_end); /* we must store the eflags state if it is not already done */ if (dc->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(dc->cc_op); if (dc->is_jmp != 1) { /* we add an additionnal jmp to update the simulated PC */ gen_op_jmp_im(ret); } gen_end(); *gen_code_size_ptr = gen_code_ptr - gen_code_buf; #ifdef DEBUG_DISAS if (loglevel) { uint8_t *pc; int count; INIT_DISASSEMBLE_INFO(disasm_info, logfile, fprintf); #if 0 disasm_info.flavour = bfd_get_flavour (abfd); disasm_info.arch = bfd_get_arch (abfd); disasm_info.mach = bfd_get_mach (abfd); #endif #ifdef WORDS_BIGENDIAN disasm_info.endian = BFD_ENDIAN_BIG; #else disasm_info.endian = BFD_ENDIAN_LITTLE; #endif fprintf(logfile, \"IN:\\n\"); disasm_info.buffer = pc_start; disasm_info.buffer_vma = (unsigned long)pc_start; disasm_info.buffer_length = pc_ptr - pc_start; pc = pc_start; while (pc < pc_ptr) { fprintf(logfile, \"0x%08lx: \", (long)pc); count = print_insn_i386((unsigned long)pc, &disasm_info); fprintf(logfile, \"\\n\"); pc += count; } fprintf(logfile, \"\\n\"); pc = gen_code_buf; disasm_info.buffer = pc; disasm_info.buffer_vma = (unsigned long)pc; disasm_info.buffer_length = *gen_code_size_ptr; fprintf(logfile, \"OUT: [size=%d]\\n\", *gen_code_size_ptr); while (pc < gen_code_ptr) { fprintf(logfile, \"0x%08lx: \", (long)pc); count = print_insn_i386((unsigned long)pc, &disasm_info); fprintf(logfile, \"\\n\"); pc += count; } fprintf(logfile, \"\\n\"); } #endif return 0; }"} {"target": 1, "idx": 24328, "func": "static const char *qobject_input_get_keyval(QObjectInputVisitor *qiv, const char *name, Error **errp) { QObject *qobj; QString *qstr; qobj = qobject_input_get_object(qiv, name, true, errp); if (!qobj) { return NULL; } qstr = qobject_to_qstring(qobj); if (!qstr) { error_setg(errp, QERR_INVALID_PARAMETER_TYPE, full_name(qiv, name), \"string\"); return NULL; } return qstring_get_str(qstr); }"} {"target": 1, "idx": 24331, "func": "static void qio_channel_socket_finalize(Object *obj) { QIOChannelSocket *ioc = QIO_CHANNEL_SOCKET(obj); if (ioc->fd != -1) { #ifdef WIN32 WSAEventSelect(ioc->fd, NULL, 0); #endif closesocket(ioc->fd); ioc->fd = -1;"} {"target": 1, "idx": 24351, "func": "bool ftrace_init(void) { char mount_point[PATH_MAX]; char path[PATH_MAX]; int debugfs_found; int trace_fd = -1; debugfs_found = find_mount(mount_point, \"debugfs\"); if (debugfs_found) { snprintf(path, PATH_MAX, \"%s/tracing/tracing_on\", mount_point); trace_fd = open(path, O_WRONLY); if (trace_fd < 0) { if (errno == EACCES) { trace_marker_fd = open(\"/dev/null\", O_WRONLY); if (trace_marker_fd != -1) { return true; } } perror(\"Could not open ftrace 'tracing_on' file\"); return false; } else { if (write(trace_fd, \"1\", 1) < 0) { perror(\"Could not write to 'tracing_on' file\"); close(trace_fd); return false; } close(trace_fd); } snprintf(path, PATH_MAX, \"%s/tracing/trace_marker\", mount_point); trace_marker_fd = open(path, O_WRONLY); if (trace_marker_fd < 0) { perror(\"Could not open ftrace 'trace_marker' file\"); return false; } } else { fprintf(stderr, \"debugfs is not mounted\\n\"); return false; } return true; }"} {"target": 0, "idx": 24356, "func": "yuv2rgba64_1_c_template(SwsContext *c, const int32_t *buf0, const int32_t *ubuf[2], const int32_t *vbuf[2], const int32_t *abuf0, uint16_t *dest, int dstW, int uvalpha, int y, enum AVPixelFormat target, int hasAlpha, int eightbytes) { const int32_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0]; int i; int A1 = 0xffff<<14, A2= 0xffff<<14; if (uvalpha < 2048) { for (i = 0; i < ((dstW + 1) >> 1); i++) { int Y1 = (buf0[i * 2] ) >> 2; int Y2 = (buf0[i * 2 + 1]) >> 2; int U = (ubuf0[i] + (-128 << 11)) >> 2; int V = (vbuf0[i] + (-128 << 11)) >> 2; int R, G, B; Y1 -= c->yuv2rgb_y_offset; Y2 -= c->yuv2rgb_y_offset; Y1 *= c->yuv2rgb_y_coeff; Y2 *= c->yuv2rgb_y_coeff; Y1 += 1 << 13; Y2 += 1 << 13; if (hasAlpha) { A1 = abuf0[i * 2 ] << 11; A2 = abuf0[i * 2 + 1] << 11; A1 += 1 << 13; A2 += 1 << 13; } R = V * c->yuv2rgb_v2r_coeff; G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff; B = U * c->yuv2rgb_u2b_coeff; output_pixel(&dest[0], av_clip_uintp2(R_B + Y1, 30) >> 14); output_pixel(&dest[1], av_clip_uintp2( G + Y1, 30) >> 14); output_pixel(&dest[2], av_clip_uintp2(B_R + Y1, 30) >> 14); if (eightbytes) { output_pixel(&dest[3], av_clip_uintp2(A1 , 30) >> 14); output_pixel(&dest[4], av_clip_uintp2(R_B + Y2, 30) >> 14); output_pixel(&dest[5], av_clip_uintp2( G + Y2, 30) >> 14); output_pixel(&dest[6], av_clip_uintp2(B_R + Y2, 30) >> 14); output_pixel(&dest[7], av_clip_uintp2(A2 , 30) >> 14); dest += 8; } else { output_pixel(&dest[3], av_clip_uintp2(R_B + Y2, 30) >> 14); output_pixel(&dest[4], av_clip_uintp2( G + Y2, 30) >> 14); output_pixel(&dest[5], av_clip_uintp2(B_R + Y2, 30) >> 14); dest += 6; } } } else { const int32_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1]; int A1 = 0xffff<<14, A2 = 0xffff<<14; for (i = 0; i < ((dstW + 1) >> 1); i++) { int Y1 = (buf0[i * 2] ) >> 2; int Y2 = (buf0[i * 2 + 1]) >> 2; int U = (ubuf0[i] + ubuf1[i] + (-128 << 12)) >> 3; int V = (vbuf0[i] + vbuf1[i] + (-128 << 12)) >> 3; int R, G, B; Y1 -= c->yuv2rgb_y_offset; Y2 -= c->yuv2rgb_y_offset; Y1 *= c->yuv2rgb_y_coeff; Y2 *= c->yuv2rgb_y_coeff; Y1 += 1 << 13; Y2 += 1 << 13; if (hasAlpha) { A1 = abuf0[i * 2 ] << 11; A2 = abuf0[i * 2 + 1] << 11; A1 += 1 << 13; A2 += 1 << 13; } R = V * c->yuv2rgb_v2r_coeff; G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff; B = U * c->yuv2rgb_u2b_coeff; output_pixel(&dest[0], av_clip_uintp2(R_B + Y1, 30) >> 14); output_pixel(&dest[1], av_clip_uintp2( G + Y1, 30) >> 14); output_pixel(&dest[2], av_clip_uintp2(B_R + Y1, 30) >> 14); if (eightbytes) { output_pixel(&dest[3], av_clip_uintp2(A1 , 30) >> 14); output_pixel(&dest[4], av_clip_uintp2(R_B + Y2, 30) >> 14); output_pixel(&dest[5], av_clip_uintp2( G + Y2, 30) >> 14); output_pixel(&dest[6], av_clip_uintp2(B_R + Y2, 30) >> 14); output_pixel(&dest[7], av_clip_uintp2(A2 , 30) >> 14); dest += 8; } else { output_pixel(&dest[3], av_clip_uintp2(R_B + Y2, 30) >> 14); output_pixel(&dest[4], av_clip_uintp2( G + Y2, 30) >> 14); output_pixel(&dest[5], av_clip_uintp2(B_R + Y2, 30) >> 14); dest += 6; } } } }"} {"target": 1, "idx": 24364, "func": "void visit_type_int(Visitor *v, int64_t *obj, const char *name, Error **errp) { if (!error_is_set(errp)) { v->type_int(v, obj, name, errp); } }"} {"target": 0, "idx": 24368, "func": "static void avc_luma_mid_8w_msa(const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, int32_t height) { uint32_t loop_cnt; v16i8 src0, src1, src2, src3, src4; v16i8 mask0, mask1, mask2; v8i16 hz_out0, hz_out1, hz_out2, hz_out3; v8i16 hz_out4, hz_out5, hz_out6, hz_out7, hz_out8; v8i16 dst0, dst1, dst2, dst3; v16u8 out0, out1; LD_SB3(&luma_mask_arr[0], 16, mask0, mask1, mask2); LD_SB5(src, src_stride, src0, src1, src2, src3, src4); XORI_B5_128_SB(src0, src1, src2, src3, src4); src += (5 * src_stride); hz_out0 = AVC_HORZ_FILTER_SH(src0, src0, mask0, mask1, mask2); hz_out1 = AVC_HORZ_FILTER_SH(src1, src1, mask0, mask1, mask2); hz_out2 = AVC_HORZ_FILTER_SH(src2, src2, mask0, mask1, mask2); hz_out3 = AVC_HORZ_FILTER_SH(src3, src3, mask0, mask1, mask2); hz_out4 = AVC_HORZ_FILTER_SH(src4, src4, mask0, mask1, mask2); for (loop_cnt = (height >> 2); loop_cnt--;) { LD_SB4(src, src_stride, src0, src1, src2, src3); XORI_B4_128_SB(src0, src1, src2, src3); src += (4 * src_stride); hz_out5 = AVC_HORZ_FILTER_SH(src0, src0, mask0, mask1, mask2); hz_out6 = AVC_HORZ_FILTER_SH(src1, src1, mask0, mask1, mask2); hz_out7 = AVC_HORZ_FILTER_SH(src2, src2, mask0, mask1, mask2); hz_out8 = AVC_HORZ_FILTER_SH(src3, src3, mask0, mask1, mask2); dst0 = AVC_CALC_DPADD_H_6PIX_2COEFF_SH(hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, hz_out5); dst1 = AVC_CALC_DPADD_H_6PIX_2COEFF_SH(hz_out1, hz_out2, hz_out3, hz_out4, hz_out5, hz_out6); dst2 = AVC_CALC_DPADD_H_6PIX_2COEFF_SH(hz_out2, hz_out3, hz_out4, hz_out5, hz_out6, hz_out7); dst3 = AVC_CALC_DPADD_H_6PIX_2COEFF_SH(hz_out3, hz_out4, hz_out5, hz_out6, hz_out7, hz_out8); out0 = PCKEV_XORI128_UB(dst0, dst1); out1 = PCKEV_XORI128_UB(dst2, dst3); ST8x4_UB(out0, out1, dst, dst_stride); dst += (4 * dst_stride); hz_out3 = hz_out7; hz_out1 = hz_out5; hz_out5 = hz_out4; hz_out4 = hz_out8; hz_out2 = hz_out6; hz_out0 = hz_out5; } }"} {"target": 0, "idx": 24371, "func": "static av_cold int pam_encode_close(AVCodecContext *avctx) { av_frame_free(&avctx->coded_frame); return 0; }"} {"target": 0, "idx": 24375, "func": "static av_cold int oggvorbis_init_encoder(vorbis_info *vi, AVCodecContext *avccontext) { OggVorbisContext *context = avccontext->priv_data; double cfreq; if (avccontext->flags & CODEC_FLAG_QSCALE) { /* variable bitrate */ if (vorbis_encode_setup_vbr(vi, avccontext->channels, avccontext->sample_rate, avccontext->global_quality / (float)FF_QP2LAMBDA / 10.0)) return -1; } else { int minrate = avccontext->rc_min_rate > 0 ? avccontext->rc_min_rate : -1; int maxrate = avccontext->rc_min_rate > 0 ? avccontext->rc_max_rate : -1; /* constant bitrate */ if (vorbis_encode_setup_managed(vi, avccontext->channels, avccontext->sample_rate, minrate, avccontext->bit_rate, maxrate)) return -1; /* variable bitrate by estimate, disable slow rate management */ if (minrate == -1 && maxrate == -1) if (vorbis_encode_ctl(vi, OV_ECTL_RATEMANAGE2_SET, NULL)) return -1; } /* cutoff frequency */ if (avccontext->cutoff > 0) { cfreq = avccontext->cutoff / 1000.0; if (vorbis_encode_ctl(vi, OV_ECTL_LOWPASS_SET, &cfreq)) return -1; } if (context->iblock) { vorbis_encode_ctl(vi, OV_ECTL_IBLOCK_SET, &context->iblock); } return vorbis_encode_setup_init(vi); }"} {"target": 1, "idx": 24400, "func": "static int encode_frame(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr) { NellyMoserEncodeContext *s = avctx->priv_data; int ret; if (s->last_frame) return 0; memcpy(s->buf, s->buf + NELLY_SAMPLES, NELLY_BUF_LEN * sizeof(*s->buf)); if (frame) { memcpy(s->buf + NELLY_BUF_LEN, frame->data[0], frame->nb_samples * sizeof(*s->buf)); if (frame->nb_samples < NELLY_SAMPLES) { memset(s->buf + NELLY_BUF_LEN + avctx->frame_size, 0, (NELLY_SAMPLES - frame->nb_samples) * sizeof(*s->buf)); if (frame->nb_samples >= NELLY_BUF_LEN) s->last_frame = 1; } if ((ret = ff_af_queue_add(&s->afq, frame) < 0)) return ret; } else { memset(s->buf + NELLY_BUF_LEN, 0, NELLY_SAMPLES * sizeof(*s->buf)); s->last_frame = 1; } if ((ret = ff_alloc_packet(avpkt, NELLY_BLOCK_LEN))) { av_log(avctx, AV_LOG_ERROR, \"Error getting output packet\\n\"); return ret; } encode_block(s, avpkt->data, avpkt->size); /* Get the next frame pts/duration */ ff_af_queue_remove(&s->afq, avctx->frame_size, &avpkt->pts, &avpkt->duration); *got_packet_ptr = 1; return 0; }"} {"target": 1, "idx": 24424, "func": "static void imdct_and_windowing(AACContext *ac, SingleChannelElement *sce, float bias) { IndividualChannelStream *ics = &sce->ics; float *in = sce->coeffs; float *out = sce->ret; float *saved = sce->saved; const float *swindow = ics->use_kb_window[0] ? ff_aac_kbd_short_128 : ff_sine_128; const float *lwindow_prev = ics->use_kb_window[1] ? ff_aac_kbd_long_1024 : ff_sine_1024; const float *swindow_prev = ics->use_kb_window[1] ? ff_aac_kbd_short_128 : ff_sine_128; float *buf = ac->buf_mdct; float *temp = ac->temp; int i; // imdct if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) { if (ics->window_sequence[1] == ONLY_LONG_SEQUENCE || ics->window_sequence[1] == LONG_STOP_SEQUENCE) av_log(ac->avctx, AV_LOG_WARNING, \"Transition from an ONLY_LONG or LONG_STOP to an EIGHT_SHORT sequence detected. \" \"If you heard an audible artifact, please submit the sample to the FFmpeg developers.\\n\"); for (i = 0; i < 1024; i += 128) ff_imdct_half(&ac->mdct_small, buf + i, in + i); } else ff_imdct_half(&ac->mdct, buf, in); /* window overlapping * NOTE: To simplify the overlapping code, all 'meaningless' short to long * and long to short transitions are considered to be short to short * transitions. This leaves just two cases (long to long and short to short) * with a little special sauce for EIGHT_SHORT_SEQUENCE. */ if ((ics->window_sequence[1] == ONLY_LONG_SEQUENCE || ics->window_sequence[1] == LONG_STOP_SEQUENCE) && (ics->window_sequence[0] == ONLY_LONG_SEQUENCE || ics->window_sequence[0] == LONG_START_SEQUENCE)) { ac->dsp.vector_fmul_window( out, saved, buf, lwindow_prev, bias, 512); } else { for (i = 0; i < 448; i++) out[i] = saved[i] + bias; if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) { ac->dsp.vector_fmul_window(out + 448 + 0*128, saved + 448, buf + 0*128, swindow_prev, bias, 64); ac->dsp.vector_fmul_window(out + 448 + 1*128, buf + 0*128 + 64, buf + 1*128, swindow, bias, 64); ac->dsp.vector_fmul_window(out + 448 + 2*128, buf + 1*128 + 64, buf + 2*128, swindow, bias, 64); ac->dsp.vector_fmul_window(out + 448 + 3*128, buf + 2*128 + 64, buf + 3*128, swindow, bias, 64); ac->dsp.vector_fmul_window(temp, buf + 3*128 + 64, buf + 4*128, swindow, bias, 64); memcpy( out + 448 + 4*128, temp, 64 * sizeof(float)); } else { ac->dsp.vector_fmul_window(out + 448, saved + 448, buf, swindow_prev, bias, 64); for (i = 576; i < 1024; i++) out[i] = buf[i-512] + bias; } } // buffer update if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) { for (i = 0; i < 64; i++) saved[i] = temp[64 + i] - bias; ac->dsp.vector_fmul_window(saved + 64, buf + 4*128 + 64, buf + 5*128, swindow, 0, 64); ac->dsp.vector_fmul_window(saved + 192, buf + 5*128 + 64, buf + 6*128, swindow, 0, 64); ac->dsp.vector_fmul_window(saved + 320, buf + 6*128 + 64, buf + 7*128, swindow, 0, 64); memcpy( saved + 448, buf + 7*128 + 64, 64 * sizeof(float)); } else if (ics->window_sequence[0] == LONG_START_SEQUENCE) { memcpy( saved, buf + 512, 448 * sizeof(float)); memcpy( saved + 448, buf + 7*128 + 64, 64 * sizeof(float)); } else { // LONG_STOP or ONLY_LONG memcpy( saved, buf + 512, 512 * sizeof(float)); } }"} {"target": 0, "idx": 24426, "func": "void ff_dsputil_init_alpha(DSPContext* c, AVCodecContext *avctx) { const int high_bit_depth = avctx->bits_per_raw_sample > 8; if (!high_bit_depth) { c->put_pixels_tab[0][0] = put_pixels16_axp_asm; c->put_pixels_tab[0][1] = put_pixels16_x2_axp; c->put_pixels_tab[0][2] = put_pixels16_y2_axp; c->put_pixels_tab[0][3] = put_pixels16_xy2_axp; c->put_no_rnd_pixels_tab[0][0] = put_pixels16_axp_asm; c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_axp; c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_axp; c->put_no_rnd_pixels_tab[0][3] = put_no_rnd_pixels16_xy2_axp; c->avg_pixels_tab[0][0] = avg_pixels16_axp; c->avg_pixels_tab[0][1] = avg_pixels16_x2_axp; c->avg_pixels_tab[0][2] = avg_pixels16_y2_axp; c->avg_pixels_tab[0][3] = avg_pixels16_xy2_axp; c->avg_no_rnd_pixels_tab[0][0] = avg_no_rnd_pixels16_axp; c->avg_no_rnd_pixels_tab[0][1] = avg_no_rnd_pixels16_x2_axp; c->avg_no_rnd_pixels_tab[0][2] = avg_no_rnd_pixels16_y2_axp; c->avg_no_rnd_pixels_tab[0][3] = avg_no_rnd_pixels16_xy2_axp; c->put_pixels_tab[1][0] = put_pixels_axp_asm; c->put_pixels_tab[1][1] = put_pixels_x2_axp; c->put_pixels_tab[1][2] = put_pixels_y2_axp; c->put_pixels_tab[1][3] = put_pixels_xy2_axp; c->put_no_rnd_pixels_tab[1][0] = put_pixels_axp_asm; c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels_x2_axp; c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels_y2_axp; c->put_no_rnd_pixels_tab[1][3] = put_no_rnd_pixels_xy2_axp; c->avg_pixels_tab[1][0] = avg_pixels_axp; c->avg_pixels_tab[1][1] = avg_pixels_x2_axp; c->avg_pixels_tab[1][2] = avg_pixels_y2_axp; c->avg_pixels_tab[1][3] = avg_pixels_xy2_axp; c->avg_no_rnd_pixels_tab[1][0] = avg_no_rnd_pixels_axp; c->avg_no_rnd_pixels_tab[1][1] = avg_no_rnd_pixels_x2_axp; c->avg_no_rnd_pixels_tab[1][2] = avg_no_rnd_pixels_y2_axp; c->avg_no_rnd_pixels_tab[1][3] = avg_no_rnd_pixels_xy2_axp; c->clear_blocks = clear_blocks_axp; } /* amask clears all bits that correspond to present features. */ if (amask(AMASK_MVI) == 0) { c->put_pixels_clamped = put_pixels_clamped_mvi_asm; c->add_pixels_clamped = add_pixels_clamped_mvi_asm; if (!high_bit_depth) c->get_pixels = get_pixels_mvi; c->diff_pixels = diff_pixels_mvi; c->sad[0] = pix_abs16x16_mvi_asm; c->sad[1] = pix_abs8x8_mvi; c->pix_abs[0][0] = pix_abs16x16_mvi_asm; c->pix_abs[1][0] = pix_abs8x8_mvi; c->pix_abs[0][1] = pix_abs16x16_x2_mvi; c->pix_abs[0][2] = pix_abs16x16_y2_mvi; c->pix_abs[0][3] = pix_abs16x16_xy2_mvi; } put_pixels_clamped_axp_p = c->put_pixels_clamped; add_pixels_clamped_axp_p = c->add_pixels_clamped; if (avctx->bits_per_raw_sample <= 8 && (avctx->idct_algo == FF_IDCT_AUTO || avctx->idct_algo == FF_IDCT_SIMPLEALPHA)) { c->idct_put = ff_simple_idct_put_axp; c->idct_add = ff_simple_idct_add_axp; c->idct = ff_simple_idct_axp; } }"} {"target": 0, "idx": 24433, "func": "static char *check_nan_suffix(char *s) { char *start = s; if (*s++ != '(') return start; while ((*s >= 'a' && *s <= 'z') || (*s >= 'A' && *s <= 'Z') || (*s >= '0' && *s <= '9') || *s == '_') s++; return *s == ')' ? s + 1 : start; }"} {"target": 1, "idx": 24472, "func": "static void diff_bytes_c(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w){ long i; #if !HAVE_FAST_UNALIGNED if((long)src2 & (sizeof(long)-1)){ for(i=0; i+7data; int buf_size = avpkt->size; DPXContext *const s = avctx->priv_data; AVFrame *picture = data; AVFrame *const p = &s->picture; uint8_t *ptr[AV_NUM_DATA_POINTERS]; unsigned int offset; int magic_num, endian; int x, y, i, ret; int w, h, bits_per_color, descriptor, elements, packing, total_size; unsigned int rgbBuffer = 0; int n_datum = 0; if (avpkt->size <= 1634) { av_log(avctx, AV_LOG_ERROR, \"Packet too small for DPX header\\n\"); return AVERROR_INVALIDDATA; } magic_num = AV_RB32(buf); buf += 4; /* Check if the files \"magic number\" is \"SDPX\" which means it uses * big-endian or XPDS which is for little-endian files */ if (magic_num == AV_RL32(\"SDPX\")) { endian = 0; } else if (magic_num == AV_RB32(\"SDPX\")) { endian = 1; } else { av_log(avctx, AV_LOG_ERROR, \"DPX marker not found\\n\"); return AVERROR_INVALIDDATA; } offset = read32(&buf, endian); if (avpkt->size <= offset) { av_log(avctx, AV_LOG_ERROR, \"Invalid data start offset\\n\"); return AVERROR_INVALIDDATA; } // Need to end in 0x304 offset from start of file buf = avpkt->data + 0x304; w = read32(&buf, endian); h = read32(&buf, endian); if ((ret = av_image_check_size(w, h, 0, avctx)) < 0) return ret; if (w != avctx->width || h != avctx->height) avcodec_set_dimensions(avctx, w, h); // Need to end in 0x320 to read the descriptor buf += 20; descriptor = buf[0]; // Need to end in 0x323 to read the bits per color buf += 3; avctx->bits_per_raw_sample = bits_per_color = buf[0]; buf++; packing = *((uint16_t*)buf); buf += 824; avctx->sample_aspect_ratio.num = read32(&buf, endian); avctx->sample_aspect_ratio.den = read32(&buf, endian); if (avctx->sample_aspect_ratio.num > 0 && avctx->sample_aspect_ratio.den > 0) av_reduce(&avctx->sample_aspect_ratio.num, &avctx->sample_aspect_ratio.den, avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den, 0x10000); else avctx->sample_aspect_ratio = (AVRational){ 0, 1 }; switch (descriptor) { case 51: // RGBA elements = 4; break; case 50: // RGB elements = 3; break; default: av_log(avctx, AV_LOG_ERROR, \"Unsupported descriptor %d\\n\", descriptor); return AVERROR_INVALIDDATA; } switch (bits_per_color) { case 8: if (elements == 4) { avctx->pix_fmt = AV_PIX_FMT_RGBA; } else { avctx->pix_fmt = AV_PIX_FMT_RGB24; } total_size = avctx->width * avctx->height * elements; break; case 10: if (!packing) { av_log(avctx, AV_LOG_ERROR, \"Packing to 32bit required\\n\"); return -1; } avctx->pix_fmt = AV_PIX_FMT_GBRP10; total_size = (avctx->width * avctx->height * elements + 2) / 3 * 4; break; case 12: if (!packing) { av_log(avctx, AV_LOG_ERROR, \"Packing to 16bit required\\n\"); return -1; } if (endian) { avctx->pix_fmt = AV_PIX_FMT_GBRP12BE; } else { avctx->pix_fmt = AV_PIX_FMT_GBRP12LE; } total_size = 2 * avctx->width * avctx->height * elements; break; case 16: if (endian) { avctx->pix_fmt = elements == 4 ? AV_PIX_FMT_RGBA64BE : AV_PIX_FMT_RGB48BE; } else { avctx->pix_fmt = elements == 4 ? AV_PIX_FMT_RGBA64LE : AV_PIX_FMT_RGB48LE; } total_size = 2 * avctx->width * avctx->height * elements; break; default: av_log(avctx, AV_LOG_ERROR, \"Unsupported color depth : %d\\n\", bits_per_color); return AVERROR_INVALIDDATA; } if (s->picture.data[0]) avctx->release_buffer(avctx, &s->picture); if ((ret = ff_get_buffer(avctx, p)) < 0) { av_log(avctx, AV_LOG_ERROR, \"get_buffer() failed\\n\"); return ret; } // Move pointer to offset from start of file buf = avpkt->data + offset; for (i=0; idata[i]; if (total_size > avpkt->size) { av_log(avctx, AV_LOG_ERROR, \"Overread buffer. Invalid header?\\n\"); return AVERROR_INVALIDDATA; } switch (bits_per_color) { case 10: for (x = 0; x < avctx->height; x++) { uint16_t *dst[3] = {(uint16_t*)ptr[0], (uint16_t*)ptr[1], (uint16_t*)ptr[2]}; for (y = 0; y < avctx->width; y++) { *dst[2]++ = read10in32(&buf, &rgbBuffer, &n_datum, endian); *dst[0]++ = read10in32(&buf, &rgbBuffer, &n_datum, endian); *dst[1]++ = read10in32(&buf, &rgbBuffer, &n_datum, endian); // For 10 bit, ignore alpha if (elements == 4) read10in32(&buf, &rgbBuffer, &n_datum, endian); } for (i = 0; i < 3; i++) ptr[i] += p->linesize[i]; } break; case 12: for (x = 0; x < avctx->height; x++) { uint16_t *dst[3] = {(uint16_t*)ptr[0], (uint16_t*)ptr[1], (uint16_t*)ptr[2]}; for (y = 0; y < avctx->width; y++) { *dst[2] = *((uint16_t*)buf); *dst[2] = (*dst[2] >> 4) | (*dst[2] << 12); dst[2]++; buf += 2; *dst[0] = *((uint16_t*)buf); *dst[0] = (*dst[0] >> 4) | (*dst[0] << 12); dst[0]++; buf += 2; *dst[1] = *((uint16_t*)buf); *dst[1] = (*dst[1] >> 4) | (*dst[1] << 12); dst[1]++; buf += 2; // For 12 bit, ignore alpha if (elements == 4) buf += 2; } for (i = 0; i < 3; i++) ptr[i] += p->linesize[i]; } break; case 16: elements *= 2; case 8: for (x = 0; x < avctx->height; x++) { memcpy(ptr[0], buf, elements*avctx->width); ptr[0] += p->linesize[0]; buf += elements*avctx->width; } break; } *picture = s->picture; *got_frame = 1; return buf_size; }"} {"target": 1, "idx": 24483, "func": "static void gen_pool16c_insn(DisasContext *ctx, int *is_branch) { int rd = mmreg((ctx->opcode >> 3) & 0x7); int rs = mmreg(ctx->opcode & 0x7); int opc; switch (((ctx->opcode) >> 4) & 0x3f) { case NOT16 + 0: case NOT16 + 1: case NOT16 + 2: case NOT16 + 3: gen_logic(ctx, OPC_NOR, rd, rs, 0); break; case XOR16 + 0: case XOR16 + 1: case XOR16 + 2: case XOR16 + 3: gen_logic(ctx, OPC_XOR, rd, rd, rs); break; case AND16 + 0: case AND16 + 1: case AND16 + 2: case AND16 + 3: gen_logic(ctx, OPC_AND, rd, rd, rs); break; case OR16 + 0: case OR16 + 1: case OR16 + 2: case OR16 + 3: gen_logic(ctx, OPC_OR, rd, rd, rs); break; case LWM16 + 0: case LWM16 + 1: case LWM16 + 2: case LWM16 + 3: { static const int lwm_convert[] = { 0x11, 0x12, 0x13, 0x14 }; int offset = ZIMM(ctx->opcode, 0, 4); gen_ldst_multiple(ctx, LWM32, lwm_convert[(ctx->opcode >> 4) & 0x3], 29, offset << 2); } break; case SWM16 + 0: case SWM16 + 1: case SWM16 + 2: case SWM16 + 3: { static const int swm_convert[] = { 0x11, 0x12, 0x13, 0x14 }; int offset = ZIMM(ctx->opcode, 0, 4); gen_ldst_multiple(ctx, SWM32, swm_convert[(ctx->opcode >> 4) & 0x3], 29, offset << 2); } break; case JR16 + 0: case JR16 + 1: { int reg = ctx->opcode & 0x1f; gen_compute_branch(ctx, OPC_JR, 2, reg, 0, 0); } *is_branch = 1; break; case JRC16 + 0: case JRC16 + 1: { int reg = ctx->opcode & 0x1f; gen_compute_branch(ctx, OPC_JR, 2, reg, 0, 0); /* Let normal delay slot handling in our caller take us to the branch target. */ } break; case JALR16 + 0: case JALR16 + 1: opc = OPC_JALR; goto do_jalr; case JALR16S + 0: case JALR16S + 1: opc = OPC_JALRS; do_jalr: { int reg = ctx->opcode & 0x1f; gen_compute_branch(ctx, opc, 2, reg, 31, 0); } *is_branch = 1; break; case MFHI16 + 0: case MFHI16 + 1: gen_HILO(ctx, OPC_MFHI, 0, uMIPS_RS5(ctx->opcode)); break; case MFLO16 + 0: case MFLO16 + 1: gen_HILO(ctx, OPC_MFLO, 0, uMIPS_RS5(ctx->opcode)); break; case BREAK16: generate_exception(ctx, EXCP_BREAK); break; case SDBBP16: /* XXX: not clear which exception should be raised * when in debug mode... */ check_insn(ctx, ISA_MIPS32); if (!(ctx->hflags & MIPS_HFLAG_DM)) { generate_exception(ctx, EXCP_DBp); } else { generate_exception(ctx, EXCP_DBp); } break; case JRADDIUSP + 0: case JRADDIUSP + 1: { int imm = ZIMM(ctx->opcode, 0, 5); gen_compute_branch(ctx, OPC_JR, 2, 31, 0, 0); gen_arith_imm(ctx, OPC_ADDIU, 29, 29, imm << 2); /* Let normal delay slot handling in our caller take us to the branch target. */ } break; default: generate_exception(ctx, EXCP_RI); break; } }"} {"target": 1, "idx": 24484, "func": "int ff_filter_frame(AVFilterLink *link, AVFrame *frame) { int (*filter_frame)(AVFilterLink *, AVFrame *); AVFilterPad *dst = link->dstpad; AVFrame *out; FF_DPRINTF_START(NULL, filter_frame); ff_dlog_link(NULL, link, 1); if (!(filter_frame = dst->filter_frame)) filter_frame = default_filter_frame; /* copy the frame if needed */ if (dst->needs_writable && !av_frame_is_writable(frame)) { av_log(link->dst, AV_LOG_DEBUG, \"Copying data in avfilter.\\n\"); switch (link->type) { case AVMEDIA_TYPE_VIDEO: out = ff_get_video_buffer(link, link->w, link->h); break; case AVMEDIA_TYPE_AUDIO: out = ff_get_audio_buffer(link, frame->nb_samples); break; default: return AVERROR(EINVAL); } if (!out) { av_frame_free(&frame); return AVERROR(ENOMEM); } av_frame_copy_props(out, frame); switch (link->type) { case AVMEDIA_TYPE_VIDEO: av_image_copy(out->data, out->linesize, frame->data, frame->linesize, frame->format, frame->width, frame->height); break; case AVMEDIA_TYPE_AUDIO: av_samples_copy(out->extended_data, frame->extended_data, 0, 0, frame->nb_samples, av_get_channel_layout_nb_channels(frame->channel_layout), frame->format); break; default: return AVERROR(EINVAL); } av_frame_free(&frame); } else out = frame; return filter_frame(link, out); }"} {"target": 1, "idx": 24485, "func": "static ssize_t vnc_tls_push(gnutls_transport_ptr_t transport, const void *data, size_t len) { VncState *vs = (VncState *)transport; int ret; retry: ret = send(vs->csock, data, len, 0); if (ret < 0) { if (errno == EINTR) goto retry; return -1; } return ret; }"} {"target": 1, "idx": 24486, "func": "static void property_set_enum(Object *obj, Visitor *v, void *opaque, const char *name, Error **errp) { EnumProperty *prop = opaque; int value; visit_type_enum(v, &value, prop->strings, NULL, name, errp); prop->set(obj, value, errp); }"} {"target": 0, "idx": 24488, "func": "void ff_h264_filter_mb_fast(const H264Context *h, H264SliceContext *sl, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize) { assert(!FRAME_MBAFF(h)); if(!h->h264dsp.h264_loop_filter_strength || h->pps.chroma_qp_diff) { ff_h264_filter_mb(h, sl, mb_x, mb_y, img_y, img_cb, img_cr, linesize, uvlinesize); return; } #if CONFIG_SMALL h264_filter_mb_fast_internal(h, sl, mb_x, mb_y, img_y, img_cb, img_cr, linesize, uvlinesize, h->pixel_shift); #else if(h->pixel_shift){ h264_filter_mb_fast_internal(h, sl, mb_x, mb_y, img_y, img_cb, img_cr, linesize, uvlinesize, 1); }else{ h264_filter_mb_fast_internal(h, sl, mb_x, mb_y, img_y, img_cb, img_cr, linesize, uvlinesize, 0); } #endif }"} {"target": 0, "idx": 24501, "func": "void qmp_eject(const char *device, bool has_force, bool force, Error **errp) { Error *local_err = NULL; int rc; if (!has_force) { force = false; } rc = do_open_tray(device, force, &local_err); if (local_err) { error_propagate(errp, local_err); return; } if (rc == EINPROGRESS) { error_setg(errp, \"Device '%s' is locked and force was not specified, \" \"wait for tray to open and try again\", device); return; } qmp_x_blockdev_remove_medium(device, errp); }"} {"target": 0, "idx": 24508, "func": "int check_prot(int prot, int rw, int access_type) { int ret; if (access_type == ACCESS_CODE) { if (prot & PAGE_EXEC) { ret = 0; } else { ret = -2; } } else if (rw) { if (prot & PAGE_WRITE) { ret = 0; } else { ret = -2; } } else { if (prot & PAGE_READ) { ret = 0; } else { ret = -2; } } return ret; }"} {"target": 1, "idx": 24540, "func": "static int vmdk_write_extent(VmdkExtent *extent, int64_t cluster_offset, int64_t offset_in_cluster, QEMUIOVector *qiov, uint64_t qiov_offset, uint64_t n_bytes, uint64_t offset) { int ret; VmdkGrainMarker *data = NULL; uLongf buf_len; QEMUIOVector local_qiov; struct iovec iov; int64_t write_offset; int64_t write_end_sector; if (extent->compressed) { void *compressed_data; if (!extent->has_marker) { ret = -EINVAL; goto out; } buf_len = (extent->cluster_sectors << 9) * 2; data = g_malloc(buf_len + sizeof(VmdkGrainMarker)); compressed_data = g_malloc(n_bytes); qemu_iovec_to_buf(qiov, qiov_offset, compressed_data, n_bytes); ret = compress(data->data, &buf_len, compressed_data, n_bytes); g_free(compressed_data); if (ret != Z_OK || buf_len == 0) { ret = -EINVAL; goto out; } data->lba = offset >> BDRV_SECTOR_BITS; data->size = buf_len; n_bytes = buf_len + sizeof(VmdkGrainMarker); iov = (struct iovec) { .iov_base = data, .iov_len = n_bytes, }; qemu_iovec_init_external(&local_qiov, &iov, 1); } else { qemu_iovec_init(&local_qiov, qiov->niov); qemu_iovec_concat(&local_qiov, qiov, qiov_offset, n_bytes); } write_offset = cluster_offset + offset_in_cluster, ret = bdrv_co_pwritev(extent->file, write_offset, n_bytes, &local_qiov, 0); write_end_sector = DIV_ROUND_UP(write_offset + n_bytes, BDRV_SECTOR_SIZE); if (extent->compressed) { extent->next_cluster_sector = write_end_sector; } else { extent->next_cluster_sector = MAX(extent->next_cluster_sector, write_end_sector); } if (ret < 0) { goto out; } ret = 0; out: g_free(data); if (!extent->compressed) { qemu_iovec_destroy(&local_qiov); } return ret; }"} {"target": 1, "idx": 24542, "func": "static int http_server(void) { int server_fd, ret, rtsp_server_fd, delay, delay1; struct pollfd poll_table[HTTP_MAX_CONNECTIONS + 2], *poll_entry; HTTPContext *c, *c_next; server_fd = socket_open_listen(&my_http_addr); if (server_fd < 0) return -1; rtsp_server_fd = socket_open_listen(&my_rtsp_addr); if (rtsp_server_fd < 0) return -1; http_log(\"ffserver started.\\n\"); start_children(first_feed); first_http_ctx = NULL; nb_connections = 0; first_http_ctx = NULL; start_multicast(); for(;;) { poll_entry = poll_table; poll_entry->fd = server_fd; poll_entry->events = POLLIN; poll_entry++; poll_entry->fd = rtsp_server_fd; poll_entry->events = POLLIN; poll_entry++; /* wait for events on each HTTP handle */ c = first_http_ctx; delay = 1000; while (c != NULL) { int fd; fd = c->fd; switch(c->state) { case HTTPSTATE_SEND_HEADER: case RTSPSTATE_SEND_REPLY: case RTSPSTATE_SEND_PACKET: c->poll_entry = poll_entry; poll_entry->fd = fd; poll_entry->events = POLLOUT; poll_entry++; break; case HTTPSTATE_SEND_DATA_HEADER: case HTTPSTATE_SEND_DATA: case HTTPSTATE_SEND_DATA_TRAILER: if (!c->is_packetized) { /* for TCP, we output as much as we can (may need to put a limit) */ c->poll_entry = poll_entry; poll_entry->fd = fd; poll_entry->events = POLLOUT; poll_entry++; } else { /* not strictly correct, but currently cannot add more than one fd in poll entry */ delay = 0; } break; case HTTPSTATE_WAIT_REQUEST: case HTTPSTATE_RECEIVE_DATA: case HTTPSTATE_WAIT_FEED: case RTSPSTATE_WAIT_REQUEST: /* need to catch errors */ c->poll_entry = poll_entry; poll_entry->fd = fd; poll_entry->events = POLLIN;/* Maybe this will work */ poll_entry++; break; case HTTPSTATE_WAIT: c->poll_entry = NULL; delay1 = compute_send_delay(c); if (delay1 < delay) delay = delay1; break; case HTTPSTATE_WAIT_SHORT: c->poll_entry = NULL; delay1 = 10; /* one tick wait XXX: 10 ms assumed */ if (delay1 < delay) delay = delay1; break; default: c->poll_entry = NULL; break; } c = c->next; } /* wait for an event on one connection. We poll at least every second to handle timeouts */ do { ret = poll(poll_table, poll_entry - poll_table, delay); } while (ret == -1); cur_time = gettime_ms(); if (need_to_start_children) { need_to_start_children = 0; start_children(first_feed); } /* now handle the events */ for(c = first_http_ctx; c != NULL; c = c_next) { c_next = c->next; if (handle_connection(c) < 0) { /* close and free the connection */ log_connection(c); close_connection(c); } } poll_entry = poll_table; /* new HTTP connection request ? */ if (poll_entry->revents & POLLIN) { new_connection(server_fd, 0); } poll_entry++; /* new RTSP connection request ? */ if (poll_entry->revents & POLLIN) { new_connection(rtsp_server_fd, 1); } } }"} {"target": 0, "idx": 24555, "func": "static int dnxhd_decode_header(DNXHDContext *ctx, AVFrame *frame, const uint8_t *buf, int buf_size, int first_field) { static const uint8_t header_prefix[] = { 0x00, 0x00, 0x02, 0x80, 0x01 }; static const uint8_t header_prefix444[] = { 0x00, 0x00, 0x02, 0x80, 0x02 }; int i, cid, ret; if (buf_size < 0x280) return AVERROR_INVALIDDATA; if (memcmp(buf, header_prefix, 5) && memcmp(buf, header_prefix444, 5)) { av_log(ctx->avctx, AV_LOG_ERROR, \"error in header\\n\"); return AVERROR_INVALIDDATA; } if (buf[5] & 2) { /* interlaced */ ctx->cur_field = buf[5] & 1; frame->interlaced_frame = 1; frame->top_field_first = first_field ^ ctx->cur_field; av_log(ctx->avctx, AV_LOG_DEBUG, \"interlaced %d, cur field %d\\n\", buf[5] & 3, ctx->cur_field); } ctx->height = AV_RB16(buf + 0x18); ctx->width = AV_RB16(buf + 0x1a); av_dlog(ctx->avctx, \"width %d, height %d\\n\", ctx->width, ctx->height); ctx->is_444 = 0; if (buf[0x4] == 0x2) { ctx->avctx->pix_fmt = AV_PIX_FMT_YUV444P10; ctx->avctx->bits_per_raw_sample = 10; if (ctx->bit_depth != 10) { ff_blockdsp_init(&ctx->bdsp, ctx->avctx); ff_idctdsp_init(&ctx->idsp, ctx->avctx); ctx->bit_depth = 10; ctx->decode_dct_block = dnxhd_decode_dct_block_10_444; } ctx->is_444 = 1; } else if (buf[0x21] & 0x40) { ctx->avctx->pix_fmt = AV_PIX_FMT_YUV422P10; ctx->avctx->bits_per_raw_sample = 10; if (ctx->bit_depth != 10) { ff_blockdsp_init(&ctx->bdsp, ctx->avctx); ff_idctdsp_init(&ctx->idsp, ctx->avctx); ctx->bit_depth = 10; ctx->decode_dct_block = dnxhd_decode_dct_block_10; } } else { ctx->avctx->pix_fmt = AV_PIX_FMT_YUV422P; ctx->avctx->bits_per_raw_sample = 8; if (ctx->bit_depth != 8) { ff_blockdsp_init(&ctx->bdsp, ctx->avctx); ff_idctdsp_init(&ctx->idsp, ctx->avctx); ctx->bit_depth = 8; ctx->decode_dct_block = dnxhd_decode_dct_block_8; } } cid = AV_RB32(buf + 0x28); av_dlog(ctx->avctx, \"compression id %d\\n\", cid); if ((ret = dnxhd_init_vlc(ctx, cid)) < 0) return ret; if (buf_size < ctx->cid_table->coding_unit_size) { av_log(ctx->avctx, AV_LOG_ERROR, \"incorrect frame size\\n\"); return AVERROR_INVALIDDATA; } ctx->mb_width = ctx->width >> 4; ctx->mb_height = buf[0x16d]; av_dlog(ctx->avctx, \"mb width %d, mb height %d\\n\", ctx->mb_width, ctx->mb_height); if ((ctx->height + 15) >> 4 == ctx->mb_height && frame->interlaced_frame) ctx->height <<= 1; if (ctx->mb_height > 68 || (ctx->mb_height << frame->interlaced_frame) > (ctx->height + 15) >> 4) { av_log(ctx->avctx, AV_LOG_ERROR, \"mb height too big: %d\\n\", ctx->mb_height); return AVERROR_INVALIDDATA; } for (i = 0; i < ctx->mb_height; i++) { ctx->mb_scan_index[i] = AV_RB32(buf + 0x170 + (i << 2)); av_dlog(ctx->avctx, \"mb scan index %d\\n\", ctx->mb_scan_index[i]); if (buf_size < ctx->mb_scan_index[i] + 0x280LL) { av_log(ctx->avctx, AV_LOG_ERROR, \"invalid mb scan index\\n\"); return AVERROR_INVALIDDATA; } } return 0; }"} {"target": 1, "idx": 24559, "func": "static int get_monitor_def(target_long *pval, const char *name) { const MonitorDef *md; void *ptr; for(md = monitor_defs; md->name != NULL; md++) { if (compare_cmd(name, md->name)) { if (md->get_value) { *pval = md->get_value(md, md->offset); } else { CPUState *env = mon_get_cpu(); if (!env) return -2; ptr = (uint8_t *)env + md->offset; switch(md->type) { case MD_I32: *pval = *(int32_t *)ptr; break; case MD_TLONG: *pval = *(target_long *)ptr; break; default: *pval = 0; break; } } return 0; } } return -1; }"} {"target": 1, "idx": 24578, "func": "static int do_sigframe_return_v2(CPUState *env, target_ulong frame_addr, struct target_ucontext_v2 *uc) { sigset_t host_set; abi_ulong *regspace; target_to_host_sigset(&host_set, &uc->tuc_sigmask); sigprocmask(SIG_SETMASK, &host_set, NULL); if (restore_sigcontext(env, &uc->tuc_mcontext)) if (do_sigaltstack(frame_addr + offsetof(struct target_ucontext_v2, tuc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT) #if 0 /* Send SIGTRAP if we're single-stepping */ if (ptrace_cancel_bpt(current)) send_sig(SIGTRAP, current, 1); #endif return 0;"} {"target": 0, "idx": 24588, "func": "FWCfgState *pc_memory_init(MemoryRegion *system_memory, const char *kernel_filename, const char *kernel_cmdline, const char *initrd_filename, ram_addr_t below_4g_mem_size, ram_addr_t above_4g_mem_size, MemoryRegion *rom_memory, MemoryRegion **ram_memory, PcGuestInfo *guest_info) { int linux_boot, i; MemoryRegion *ram, *option_rom_mr; MemoryRegion *ram_below_4g, *ram_above_4g; FWCfgState *fw_cfg; linux_boot = (kernel_filename != NULL); /* Allocate RAM. We allocate it as a single memory region and use * aliases to address portions of it, mostly for backwards compatibility * with older qemus that used qemu_ram_alloc(). */ ram = g_malloc(sizeof(*ram)); memory_region_init_ram(ram, NULL, \"pc.ram\", below_4g_mem_size + above_4g_mem_size); vmstate_register_ram_global(ram); *ram_memory = ram; ram_below_4g = g_malloc(sizeof(*ram_below_4g)); memory_region_init_alias(ram_below_4g, NULL, \"ram-below-4g\", ram, 0, below_4g_mem_size); memory_region_add_subregion(system_memory, 0, ram_below_4g); e820_add_entry(0, below_4g_mem_size, E820_RAM); if (above_4g_mem_size > 0) { ram_above_4g = g_malloc(sizeof(*ram_above_4g)); memory_region_init_alias(ram_above_4g, NULL, \"ram-above-4g\", ram, below_4g_mem_size, above_4g_mem_size); memory_region_add_subregion(system_memory, 0x100000000ULL, ram_above_4g); e820_add_entry(0x100000000ULL, above_4g_mem_size, E820_RAM); } /* Initialize PC system firmware */ pc_system_firmware_init(rom_memory, guest_info->isapc_ram_fw); option_rom_mr = g_malloc(sizeof(*option_rom_mr)); memory_region_init_ram(option_rom_mr, NULL, \"pc.rom\", PC_ROM_SIZE); vmstate_register_ram_global(option_rom_mr); memory_region_add_subregion_overlap(rom_memory, PC_ROM_MIN_VGA, option_rom_mr, 1); fw_cfg = bochs_bios_init(); rom_set_fw(fw_cfg); if (linux_boot) { load_linux(fw_cfg, kernel_filename, initrd_filename, kernel_cmdline, below_4g_mem_size); } for (i = 0; i < nb_option_roms; i++) { rom_add_option(option_rom[i].name, option_rom[i].bootindex); } guest_info->fw_cfg = fw_cfg; return fw_cfg; }"} {"target": 0, "idx": 24603, "func": "static void press_key(VncState *vs, int keysym) { int keycode = keysym2scancode(vs->vd->kbd_layout, keysym) & SCANCODE_KEYMASK; qemu_input_event_send_key_number(vs->vd->dcl.con, keycode, true); qemu_input_event_send_key_delay(0); qemu_input_event_send_key_number(vs->vd->dcl.con, keycode, false); qemu_input_event_send_key_delay(0); }"} {"target": 0, "idx": 24606, "func": "static void s390_virtio_device_sync(VirtIOS390Device *dev) { VirtIOS390Bus *bus = DO_UPCAST(VirtIOS390Bus, bus, dev->qdev.parent_bus); ram_addr_t cur_offs; uint8_t num_vq; int i; virtio_reset(dev->vdev); /* Sync dev space */ stb_phys(dev->dev_offs + VIRTIO_DEV_OFFS_TYPE, dev->vdev->device_id); stb_phys(dev->dev_offs + VIRTIO_DEV_OFFS_NUM_VQ, s390_virtio_device_num_vq(dev)); stb_phys(dev->dev_offs + VIRTIO_DEV_OFFS_FEATURE_LEN, dev->feat_len); stb_phys(dev->dev_offs + VIRTIO_DEV_OFFS_CONFIG_LEN, dev->vdev->config_len); num_vq = s390_virtio_device_num_vq(dev); stb_phys(dev->dev_offs + VIRTIO_DEV_OFFS_NUM_VQ, num_vq); /* Sync virtqueues */ for (i = 0; i < num_vq; i++) { ram_addr_t vq = (dev->dev_offs + VIRTIO_DEV_OFFS_CONFIG) + (i * VIRTIO_VQCONFIG_LEN); ram_addr_t vring; vring = s390_virtio_next_ring(bus); virtio_queue_set_addr(dev->vdev, i, vring); virtio_queue_set_vector(dev->vdev, i, i); stq_phys(vq + VIRTIO_VQCONFIG_OFFS_ADDRESS, vring); stw_phys(vq + VIRTIO_VQCONFIG_OFFS_NUM, virtio_queue_get_num(dev->vdev, i)); } cur_offs = dev->dev_offs; cur_offs += VIRTIO_DEV_OFFS_CONFIG; cur_offs += num_vq * VIRTIO_VQCONFIG_LEN; /* Sync feature bitmap */ if (dev->vdev->get_features) { stl_phys(cur_offs, dev->vdev->get_features(dev->vdev)); } dev->feat_offs = cur_offs + dev->feat_len; cur_offs += dev->feat_len * 2; /* Sync config space */ if (dev->vdev->get_config) { dev->vdev->get_config(dev->vdev, dev->vdev->config); } cpu_physical_memory_rw(cur_offs, dev->vdev->config, dev->vdev->config_len, 1); cur_offs += dev->vdev->config_len; }"} {"target": 1, "idx": 24621, "func": "static void FUNCC(pred8x8_left_dc)(uint8_t *_src, int stride){ int i; int dc0, dc2; pixel4 dc0splat, dc2splat; pixel *src = (pixel*)_src; stride /= sizeof(pixel); dc0=dc2=0; for(i=0;i<4; i++){ dc0+= src[-1+i*stride]; dc2+= src[-1+(i+4)*stride]; } dc0splat = PIXEL_SPLAT_X4((dc0 + 2)>>2); dc2splat = PIXEL_SPLAT_X4((dc2 + 2)>>2); for(i=0; i<4; i++){ ((pixel4*)(src+i*stride))[0]= ((pixel4*)(src+i*stride))[1]= dc0splat; } for(i=4; i<8; i++){ ((pixel4*)(src+i*stride))[0]= ((pixel4*)(src+i*stride))[1]= dc2splat; } }"} {"target": 1, "idx": 24625, "func": "static void alloc_and_copy(uint8_t **poutbuf, int *poutbuf_size, const uint8_t *sps_pps, uint32_t sps_pps_size, const uint8_t *in, uint32_t in_size) { uint32_t offset = *poutbuf_size; uint8_t nal_header_size = offset ? 3 : 4; *poutbuf_size += sps_pps_size+in_size+nal_header_size; *poutbuf = av_realloc(*poutbuf, *poutbuf_size); if (sps_pps) memcpy(*poutbuf+offset, sps_pps, sps_pps_size); memcpy(*poutbuf+sps_pps_size+nal_header_size+offset, in, in_size); if (!offset) AV_WB32(*poutbuf+sps_pps_size, 1); else { (*poutbuf+offset)[0] = (*poutbuf+offset)[1] = 0; (*poutbuf+offset)[2] = 1; } }"} {"target": 1, "idx": 24626, "func": "void aio_set_event_notifier(AioContext *ctx, EventNotifier *e, EventNotifierHandler *io_notify, AioFlushEventNotifierHandler *io_flush) { AioHandler *node; QLIST_FOREACH(node, &ctx->aio_handlers, node) { if (node->e == e && !node->deleted) { break; } } /* Are we deleting the fd handler? */ if (!io_notify) { if (node) { g_source_remove_poll(&ctx->source, &node->pfd); /* If the lock is held, just mark the node as deleted */ if (ctx->walking_handlers) { node->deleted = 1; node->pfd.revents = 0; } else { /* Otherwise, delete it for real. We can't just mark it as * deleted because deleted nodes are only cleaned up after * releasing the walking_handlers lock. */ QLIST_REMOVE(node, node); g_free(node); } } } else { if (node == NULL) { /* Alloc and insert if it's not already there */ node = g_malloc0(sizeof(AioHandler)); node->e = e; node->pfd.fd = (uintptr_t)event_notifier_get_handle(e); node->pfd.events = G_IO_IN; QLIST_INSERT_HEAD(&ctx->aio_handlers, node, node); g_source_add_poll(&ctx->source, &node->pfd); } /* Update handler with latest information */ node->io_notify = io_notify; node->io_flush = io_flush; } aio_notify(ctx); }"} {"target": 0, "idx": 24629, "func": "int float_near_ulp(float a, float b, unsigned max_ulp) { union av_intfloat32 x, y; x.f = a; y.f = b; if (is_negative(x) != is_negative(y)) { // handle -0.0 == +0.0 return a == b; } if (abs(x.i - y.i) <= max_ulp) return 1; return 0; }"} {"target": 1, "idx": 24631, "func": "static av_cold int roq_dpcm_encode_init(AVCodecContext *avctx) { ROQDPCMContext *context = avctx->priv_data; if (avctx->channels > 2) { av_log(avctx, AV_LOG_ERROR, \"Audio must be mono or stereo\\n\"); return -1; } if (avctx->sample_rate != 22050) { av_log(avctx, AV_LOG_ERROR, \"Audio must be 22050 Hz\\n\"); return -1; } if (avctx->sample_fmt != AV_SAMPLE_FMT_S16) { av_log(avctx, AV_LOG_ERROR, \"Audio must be signed 16-bit\\n\"); return -1; } avctx->frame_size = ROQ_FIRST_FRAME_SIZE; context->lastSample[0] = context->lastSample[1] = 0; avctx->coded_frame= avcodec_alloc_frame(); return 0; }"} {"target": 1, "idx": 24648, "func": "static void spapr_populate_cpu_dt(CPUState *cs, void *fdt, int offset, sPAPRMachineState *spapr) { PowerPCCPU *cpu = POWERPC_CPU(cs); CPUPPCState *env = &cpu->env; PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cs); int index = ppc_get_vcpu_dt_id(cpu); uint32_t segs[] = {cpu_to_be32(28), cpu_to_be32(40), 0xffffffff, 0xffffffff}; uint32_t tbfreq = kvm_enabled() ? kvmppc_get_tbfreq() : SPAPR_TIMEBASE_FREQ; uint32_t cpufreq = kvm_enabled() ? kvmppc_get_clockfreq() : 1000000000; uint32_t page_sizes_prop[64]; size_t page_sizes_prop_size; uint32_t vcpus_per_socket = smp_threads * smp_cores; uint32_t pft_size_prop[] = {0, cpu_to_be32(spapr->htab_shift)}; int compat_smt = MIN(smp_threads, ppc_compat_max_threads(cpu)); sPAPRDRConnector *drc; sPAPRDRConnectorClass *drck; int drc_index; uint32_t radix_AP_encodings[PPC_PAGE_SIZES_MAX_SZ]; int i; drc = spapr_dr_connector_by_id(SPAPR_DR_CONNECTOR_TYPE_CPU, index); if (drc) { drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc); drc_index = drck->get_index(drc); _FDT((fdt_setprop_cell(fdt, offset, \"ibm,my-drc-index\", drc_index))); } _FDT((fdt_setprop_cell(fdt, offset, \"reg\", index))); _FDT((fdt_setprop_string(fdt, offset, \"device_type\", \"cpu\"))); _FDT((fdt_setprop_cell(fdt, offset, \"cpu-version\", env->spr[SPR_PVR]))); _FDT((fdt_setprop_cell(fdt, offset, \"d-cache-block-size\", env->dcache_line_size))); _FDT((fdt_setprop_cell(fdt, offset, \"d-cache-line-size\", env->dcache_line_size))); _FDT((fdt_setprop_cell(fdt, offset, \"i-cache-block-size\", env->icache_line_size))); _FDT((fdt_setprop_cell(fdt, offset, \"i-cache-line-size\", env->icache_line_size))); if (pcc->l1_dcache_size) { _FDT((fdt_setprop_cell(fdt, offset, \"d-cache-size\", pcc->l1_dcache_size))); } else { error_report(\"Warning: Unknown L1 dcache size for cpu\"); } if (pcc->l1_icache_size) { _FDT((fdt_setprop_cell(fdt, offset, \"i-cache-size\", pcc->l1_icache_size))); } else { error_report(\"Warning: Unknown L1 icache size for cpu\"); } _FDT((fdt_setprop_cell(fdt, offset, \"timebase-frequency\", tbfreq))); _FDT((fdt_setprop_cell(fdt, offset, \"clock-frequency\", cpufreq))); _FDT((fdt_setprop_cell(fdt, offset, \"slb-size\", env->slb_nr))); _FDT((fdt_setprop_cell(fdt, offset, \"ibm,slb-size\", env->slb_nr))); _FDT((fdt_setprop_string(fdt, offset, \"status\", \"okay\"))); _FDT((fdt_setprop(fdt, offset, \"64-bit\", NULL, 0))); if (env->spr_cb[SPR_PURR].oea_read) { _FDT((fdt_setprop(fdt, offset, \"ibm,purr\", NULL, 0))); } if (env->mmu_model & POWERPC_MMU_1TSEG) { _FDT((fdt_setprop(fdt, offset, \"ibm,processor-segment-sizes\", segs, sizeof(segs)))); } /* Advertise VMX/VSX (vector extensions) if available * 0 / no property == no vector extensions * 1 == VMX / Altivec available * 2 == VSX available */ if (env->insns_flags & PPC_ALTIVEC) { uint32_t vmx = (env->insns_flags2 & PPC2_VSX) ? 2 : 1; _FDT((fdt_setprop_cell(fdt, offset, \"ibm,vmx\", vmx))); } /* Advertise DFP (Decimal Floating Point) if available * 0 / no property == no DFP * 1 == DFP available */ if (env->insns_flags2 & PPC2_DFP) { _FDT((fdt_setprop_cell(fdt, offset, \"ibm,dfp\", 1))); } page_sizes_prop_size = ppc_create_page_sizes_prop(env, page_sizes_prop, sizeof(page_sizes_prop)); if (page_sizes_prop_size) { _FDT((fdt_setprop(fdt, offset, \"ibm,segment-page-sizes\", page_sizes_prop, page_sizes_prop_size))); } spapr_populate_pa_features(env, fdt, offset); _FDT((fdt_setprop_cell(fdt, offset, \"ibm,chip-id\", cs->cpu_index / vcpus_per_socket))); _FDT((fdt_setprop(fdt, offset, \"ibm,pft-size\", pft_size_prop, sizeof(pft_size_prop)))); _FDT(spapr_fixup_cpu_numa_dt(fdt, offset, cs)); _FDT(spapr_fixup_cpu_smt_dt(fdt, offset, cpu, compat_smt)); if (pcc->radix_page_info) { for (i = 0; i < pcc->radix_page_info->count; i++) { radix_AP_encodings[i] = cpu_to_be32(pcc->radix_page_info->entries[i]); } _FDT((fdt_setprop(fdt, offset, \"ibm,processor-radix-AP-encodings\", radix_AP_encodings, pcc->radix_page_info->count * sizeof(radix_AP_encodings[0])))); } }"} {"target": 1, "idx": 24655, "func": "static void test_function(const TestStruct test_sample) { int ret, i; void **output_data = NULL; AVAudioFifo *afifo = av_audio_fifo_alloc(test_sample.format, test_sample.nb_ch, test_sample.nb_samples_pch); if (!afifo) { ERROR(\"ERROR: av_audio_fifo_alloc returned NULL!\"); } ret = write_samples_to_audio_fifo(afifo, test_sample, test_sample.nb_samples_pch, 0); if (ret < 0){ ERROR(\"ERROR: av_audio_fifo_write failed!\"); } printf(\"written: %d\\n\", ret); ret = write_samples_to_audio_fifo(afifo, test_sample, test_sample.nb_samples_pch, 0); if (ret < 0){ ERROR(\"ERROR: av_audio_fifo_write failed!\"); } printf(\"written: %d\\n\", ret); printf(\"remaining samples in audio_fifo: %d\\n\\n\", av_audio_fifo_size(afifo)); ret = read_samples_from_audio_fifo(afifo, &output_data, test_sample.nb_samples_pch); if (ret < 0){ ERROR(\"ERROR: av_audio_fifo_read failed!\"); } printf(\"read: %d\\n\", ret); print_audio_bytes(&test_sample, output_data, ret); printf(\"remaining samples in audio_fifo: %d\\n\\n\", av_audio_fifo_size(afifo)); /* test av_audio_fifo_peek */ ret = av_audio_fifo_peek(afifo, output_data, afifo->nb_samples); if (ret < 0){ ERROR(\"ERROR: av_audio_fifo_peek failed!\"); } printf(\"peek:\\n\"); print_audio_bytes(&test_sample, output_data, ret); printf(\"\\n\"); /* test av_audio_fifo_peek_at */ printf(\"peek_at:\\n\"); for (i = 0; i < afifo->nb_samples; ++i){ ret = av_audio_fifo_peek_at(afifo, output_data, 1, i); if (ret < 0){ ERROR(\"ERROR: av_audio_fifo_peek_at failed!\"); } printf(\"%d:\\n\", i); print_audio_bytes(&test_sample, output_data, ret); } printf(\"\\n\"); /* test av_audio_fifo_drain */ ret = av_audio_fifo_drain(afifo, afifo->nb_samples); if (ret < 0){ ERROR(\"ERROR: av_audio_fifo_drain failed!\"); } if (afifo->nb_samples){ ERROR(\"drain failed to flush all samples in audio_fifo!\"); } /* deallocate */ for (i = 0; i < afifo->nb_buffers; ++i){ av_freep(&output_data[i]); } av_freep(&output_data); av_audio_fifo_free(afifo); }"} {"target": 1, "idx": 24668, "func": "static void ff_wmv2_idct_put_c(uint8_t *dest, int line_size, DCTELEM *block) { ff_wmv2_idct_c(block); put_pixels_clamped_c(block, dest, line_size); }"} {"target": 0, "idx": 24677, "func": "static void decode_interframe_v4(AVCodecContext *avctx, uint8_t *src, uint32_t size) { Hnm4VideoContext *hnm = avctx->priv_data; GetByteContext gb; uint32_t writeoffset = 0, count, left, offset; uint8_t tag, previous, backline, backward, swap; bytestream2_init(&gb, src, size); while (bytestream2_tell(&gb) < size) { count = bytestream2_peek_byte(&gb) & 0x1F; if (count == 0) { tag = bytestream2_get_byte(&gb) & 0xE0; tag = tag >> 5; if (tag == 0) { hnm->current[writeoffset++] = bytestream2_get_byte(&gb); hnm->current[writeoffset++] = bytestream2_get_byte(&gb); } else if (tag == 1) { writeoffset += bytestream2_get_byte(&gb) * 2; } else if (tag == 2) { count = bytestream2_get_le16(&gb); count *= 2; writeoffset += count; } else if (tag == 3) { count = bytestream2_get_byte(&gb) * 2; while (count > 0) { hnm->current[writeoffset++] = bytestream2_peek_byte(&gb); count--; } bytestream2_skip(&gb, 1); } else { break; } } else { previous = bytestream2_peek_byte(&gb) & 0x20; backline = bytestream2_peek_byte(&gb) & 0x40; backward = bytestream2_peek_byte(&gb) & 0x80; bytestream2_skip(&gb, 1); swap = bytestream2_peek_byte(&gb) & 0x01; offset = bytestream2_get_le16(&gb); offset = (offset >> 1) & 0x7FFF; offset = writeoffset + (offset * 2) - 0x8000; left = count; if (!backward && offset + count >= hnm->width * hnm->height) { av_log(avctx, AV_LOG_ERROR, \"Attempting to read out of bounds\"); break; } else if (backward && offset >= hnm->width * hnm->height) { av_log(avctx, AV_LOG_ERROR, \"Attempting to read out of bounds\"); break; } else if (writeoffset + count >= hnm->width * hnm->height) { av_log(avctx, AV_LOG_ERROR, \"Attempting to write out of bounds\"); break; } if (previous) { while (left > 0) { if (backline) { hnm->current[writeoffset++] = hnm->previous[offset - (2 * hnm->width) + 1]; hnm->current[writeoffset++] = hnm->previous[offset++]; offset++; } else { hnm->current[writeoffset++] = hnm->previous[offset++]; hnm->current[writeoffset++] = hnm->previous[offset++]; } if (backward) offset -= 4; left--; } } else { while (left > 0) { if (backline) { hnm->current[writeoffset++] = hnm->current[offset - (2 * hnm->width) + 1]; hnm->current[writeoffset++] = hnm->current[offset++]; offset++; } else { hnm->current[writeoffset++] = hnm->current[offset++]; hnm->current[writeoffset++] = hnm->current[offset++]; } if (backward) offset -= 4; left--; } } if (swap) { left = count; writeoffset -= count * 2; while (left > 0) { swap = hnm->current[writeoffset]; hnm->current[writeoffset] = hnm->current[writeoffset + 1]; hnm->current[writeoffset + 1] = swap; left--; writeoffset += 2; } } } } }"} {"target": 0, "idx": 24720, "func": "static void setup_frame(int sig, struct emulated_sigaction *ka, target_sigset_t *set, CPUX86State *env) { struct sigframe *frame; int err = 0; frame = get_sigframe(ka, env, sizeof(*frame)); #if 0 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) goto give_sigsegv; #endif err |= __put_user((/*current->exec_domain && current->exec_domain->signal_invmap && sig < 32 ? current->exec_domain->signal_invmap[sig] : */ sig), &frame->sig); if (err) goto give_sigsegv; setup_sigcontext(&frame->sc, &frame->fpstate, env, set->sig[0]); if (err) goto give_sigsegv; if (TARGET_NSIG_WORDS > 1) { err |= __copy_to_user(frame->extramask, &set->sig[1], sizeof(frame->extramask)); } if (err) goto give_sigsegv; /* Set up to return from userspace. If provided, use a stub already in userspace. */ if (ka->sa.sa_flags & TARGET_SA_RESTORER) { err |= __put_user(ka->sa.sa_restorer, &frame->pretcode); } else { err |= __put_user(frame->retcode, &frame->pretcode); /* This is popl %eax ; movl $,%eax ; int $0x80 */ err |= __put_user(0xb858, (short *)(frame->retcode+0)); err |= __put_user(TARGET_NR_sigreturn, (int *)(frame->retcode+2)); err |= __put_user(0x80cd, (short *)(frame->retcode+6)); } if (err) goto give_sigsegv; /* Set up registers for signal handler */ env->regs[R_ESP] = (unsigned long) frame; env->eip = (unsigned long) ka->sa._sa_handler; cpu_x86_load_seg(env, R_DS, __USER_DS); cpu_x86_load_seg(env, R_ES, __USER_DS); cpu_x86_load_seg(env, R_SS, __USER_DS); cpu_x86_load_seg(env, R_CS, __USER_CS); env->eflags &= ~TF_MASK; return; give_sigsegv: if (sig == TARGET_SIGSEGV) ka->sa._sa_handler = TARGET_SIG_DFL; force_sig(TARGET_SIGSEGV /* , current */); }"} {"target": 1, "idx": 24730, "func": "static void write_target_close(BlockDriverState *bs) { BDRVVVFATState* s = *((BDRVVVFATState**) bs->opaque); bdrv_delete(s->qcow); free(s->qcow_filename); }"} {"target": 1, "idx": 24732, "func": "static int aiff_read_packet(AVFormatContext *s, AVPacket *pkt) { AVStream *st = s->streams[0]; AIFFInputContext *aiff = s->priv_data; int64_t max_size; int res, size; /* calculate size of remaining data */ max_size = aiff->data_end - avio_tell(s->pb); if (max_size <= 0) return AVERROR_EOF; /* Now for that packet */ switch (st->codecpar->codec_id) { case AV_CODEC_ID_ADPCM_IMA_QT: case AV_CODEC_ID_GSM: case AV_CODEC_ID_QDM2: case AV_CODEC_ID_QCELP: size = st->codecpar->block_align; break; default: size = st->codecpar->block_align ? (MAX_SIZE / st->codecpar->block_align) * st->codecpar->block_align : MAX_SIZE; size = FFMIN(max_size, size); res = av_get_packet(s->pb, pkt, size); if (res < 0) return res; if (size >= st->codecpar->block_align) pkt->flags &= ~AV_PKT_FLAG_CORRUPT; /* Only one stream in an AIFF file */ pkt->stream_index = 0; pkt->duration = (res / st->codecpar->block_align) * aiff->block_duration; return 0;"} {"target": 0, "idx": 24742, "func": "uint64_t helper_fnmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3) { CPU_DoubleU farg1, farg2, farg3; farg1.ll = arg1; farg2.ll = arg2; farg3.ll = arg3; if (unlikely(float64_is_signaling_nan(farg1.d) || float64_is_signaling_nan(farg2.d) || float64_is_signaling_nan(farg3.d))) { /* sNaN operation */ farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN); } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) || (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) { /* Multiplication of zero by infinity */ farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ); } else { #if USE_PRECISE_EMULATION #ifdef FLOAT128 /* This is the way the PowerPC specification defines it */ float128 ft0_128, ft1_128; ft0_128 = float64_to_float128(farg1.d, &env->fp_status); ft1_128 = float64_to_float128(farg2.d, &env->fp_status); ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status); if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) && float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) { /* Magnitude subtraction of infinities */ farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI); } else { ft1_128 = float64_to_float128(farg3.d, &env->fp_status); ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status); farg1.d = float128_to_float64(ft0_128, &env->fp_status); } #else /* This is OK on x86 hosts */ farg1.d = (farg1.d * farg2.d) - farg3.d; #endif #else farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status); farg1.d = float64_sub(farg1.d, farg3.d, &env->fp_status); #endif if (likely(!float64_is_nan(farg1.d))) farg1.d = float64_chs(farg1.d); } return farg1.ll; }"} {"target": 0, "idx": 24744, "func": "static void dec_modu(DisasContext *dc) { int l1; LOG_DIS(\"modu r%d, r%d, %d\\n\", dc->r2, dc->r0, dc->r1); if (!(dc->features & LM32_FEATURE_DIVIDE)) { qemu_log_mask(LOG_GUEST_ERROR, \"hardware divider is not available\\n\"); t_gen_illegal_insn(dc); return; } l1 = gen_new_label(); tcg_gen_brcondi_tl(TCG_COND_NE, cpu_R[dc->r1], 0, l1); tcg_gen_movi_tl(cpu_pc, dc->pc); t_gen_raise_exception(dc, EXCP_DIVIDE_BY_ZERO); gen_set_label(l1); tcg_gen_remu_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]); }"} {"target": 0, "idx": 24748, "func": "static void omap_l4_io_writeb(void *opaque, target_phys_addr_t addr, uint32_t value) { unsigned int i = (addr - OMAP2_L4_BASE) >> TARGET_PAGE_BITS; return omap_l4_io_writeb_fn[i](omap_l4_io_opaque[i], addr, value); }"} {"target": 0, "idx": 24754, "func": "static void v9fs_create_post_lstat(V9fsState *s, V9fsCreateState *vs, int err) { if (err == 0 || errno != ENOENT) { err = -errno; goto out; } if (vs->perm & P9_STAT_MODE_DIR) { err = v9fs_do_mkdir(s, vs); v9fs_create_post_mkdir(s, vs, err); } else if (vs->perm & P9_STAT_MODE_SYMLINK) { err = v9fs_do_symlink(s, vs); v9fs_create_post_perms(s, vs, err); } else if (vs->perm & P9_STAT_MODE_LINK) { int32_t nfid = atoi(vs->extension.data); V9fsFidState *nfidp = lookup_fid(s, nfid); if (nfidp == NULL) { err = -errno; v9fs_post_create(s, vs, err); } err = v9fs_do_link(s, &nfidp->path, &vs->fullname); v9fs_create_post_perms(s, vs, err); } else if (vs->perm & P9_STAT_MODE_DEVICE) { char ctype; uint32_t major, minor; mode_t nmode = 0; if (sscanf(vs->extension.data, \"%c %u %u\", &ctype, &major, &minor) != 3) { err = -errno; v9fs_post_create(s, vs, err); } switch (ctype) { case 'c': nmode = S_IFCHR; break; case 'b': nmode = S_IFBLK; break; default: err = -EIO; v9fs_post_create(s, vs, err); } nmode |= vs->perm & 0777; err = v9fs_do_mknod(s, vs, nmode, makedev(major, minor)); v9fs_create_post_perms(s, vs, err); } else if (vs->perm & P9_STAT_MODE_NAMED_PIPE) { err = v9fs_do_mknod(s, vs, S_IFIFO | (vs->perm & 0777), 0); v9fs_post_create(s, vs, err); } else if (vs->perm & P9_STAT_MODE_SOCKET) { err = v9fs_do_mksock(s, &vs->fullname); v9fs_create_post_mksock(s, vs, err); } else { vs->fidp->fd = v9fs_do_open2(s, vs); v9fs_create_post_open2(s, vs, err); } return; out: v9fs_post_create(s, vs, err); }"} {"target": 0, "idx": 24756, "func": "void pal_init (CPUState *env) { }"} {"target": 0, "idx": 24759, "func": "target_ulong spapr_rtas_call(sPAPREnvironment *spapr, uint32_t token, uint32_t nargs, target_ulong args, uint32_t nret, target_ulong rets) { if ((token >= TOKEN_BASE) && ((token - TOKEN_BASE) < TOKEN_MAX)) { struct rtas_call *call = rtas_table + (token - TOKEN_BASE); if (call->fn) { call->fn(spapr, token, nargs, args, nret, rets); return H_SUCCESS; } } /* HACK: Some Linux early debug code uses RTAS display-character, * but assumes the token value is 0xa (which it is on some real * machines) without looking it up in the device tree. This * special case makes this work */ if (token == 0xa) { rtas_display_character(spapr, 0xa, nargs, args, nret, rets); return H_SUCCESS; } hcall_dprintf(\"Unknown RTAS token 0x%x\\n\", token); rtas_st(rets, 0, -3); return H_PARAMETER; }"} {"target": 0, "idx": 24761, "func": "static void apply_tns_filter(float *out, float *in, int order, int direction, float *tns_coefs, int ltp_used, int w, int filt, int start_i, int len) { int i, j, inc, start = start_i; float tmp[TNS_MAX_ORDER+1]; if (direction) { inc = -1; start = (start + len) - 1; } else { inc = 1; } if (!ltp_used) { /* AR filter */ for (i = 0; i < len; i++, start += inc) out[i] = in[start]; for (j = 1; j <= FFMIN(i, order); j++) out[i] += tns_coefs[j]*in[start - j*inc]; } else { /* MA filter */ for (i = 0; i < len; i++, start += inc) { tmp[0] = out[i] = in[start]; for (j = 1; j <= FFMIN(i, order); j++) out[i] += tmp[j]*tns_coefs[j]; for (j = order; j > 0; j--) tmp[j] = tmp[j - 1]; } } }"} {"target": 0, "idx": 24768, "func": "static int mkv_write_ass_blocks(AVFormatContext *s, AVIOContext *pb, AVPacket *pkt) { MatroskaMuxContext *mkv = s->priv_data; int i, layer = 0, max_duration = 0, size, line_size, data_size = pkt->size; uint8_t *start, *end, *data = pkt->data; ebml_master blockgroup; char buffer[2048]; while (data_size) { int duration = ass_get_duration(data); max_duration = FFMAX(duration, max_duration); end = memchr(data, '\\n', data_size); size = line_size = end ? end - data + 1 : data_size; size -= end ? (end[-1] == '\\r') + 1 : 0; start = data; for (i = 0; i < 3; i++, start++) if (!(start = memchr(start, ',', size - (start - data)))) return max_duration; size -= start - data; sscanf(data, \"Dialogue: %d,\", &layer); i = snprintf(buffer, sizeof(buffer), \"%\" PRId64 \",%d,\", s->streams[pkt->stream_index]->nb_frames, layer); size = FFMIN(i + size, sizeof(buffer)); memcpy(buffer + i, start, size - i); av_log(s, AV_LOG_DEBUG, \"Writing block at offset %\" PRIu64 \", size %d, \" \"pts %\" PRId64 \", duration %d\\n\", avio_tell(pb), size, pkt->pts, duration); blockgroup = start_ebml_master(pb, MATROSKA_ID_BLOCKGROUP, mkv_blockgroup_size(size)); put_ebml_id(pb, MATROSKA_ID_BLOCK); put_ebml_num(pb, size + 4, 0); // this assumes stream_index is less than 126 avio_w8(pb, 0x80 | (pkt->stream_index + 1)); avio_wb16(pb, pkt->pts - mkv->cluster_pts); avio_w8(pb, 0); avio_write(pb, buffer, size); put_ebml_uint(pb, MATROSKA_ID_BLOCKDURATION, duration); end_ebml_master(pb, blockgroup); data += line_size; data_size -= line_size; } return max_duration; }"} {"target": 1, "idx": 24771, "func": "static int ram_block_enable_notify(const char *block_name, void *host_addr, ram_addr_t offset, ram_addr_t length, void *opaque) { MigrationIncomingState *mis = opaque; struct uffdio_register reg_struct; reg_struct.range.start = (uintptr_t)host_addr; reg_struct.range.len = length; reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING; /* Now tell our userfault_fd that it's responsible for this area */ if (ioctl(mis->userfault_fd, UFFDIO_REGISTER, ®_struct)) { error_report(\"%s userfault register: %s\", __func__, strerror(errno)); return 0;"} {"target": 0, "idx": 24787, "func": "static void draw_bar_yuv(AVFrame *out, const float *h, const float *rcp_h, const ColorFloat *c, int bar_h) { int x, y, yh, w = out->width; float mul, ht, rcp_bar_h = 1.0f / bar_h; uint8_t *vy = out->data[0], *vu = out->data[1], *vv = out->data[2]; uint8_t *lpy, *lpu, *lpv; int lsy = out->linesize[0], lsu = out->linesize[1], lsv = out->linesize[2]; int fmt = out->format; for (y = 0; y < bar_h; y += 2) { yh = (fmt == AV_PIX_FMT_YUV420P) ? y / 2 : y; ht = (bar_h - y) * rcp_bar_h; lpy = vy + y * lsy; lpu = vu + yh * lsu; lpv = vv + yh * lsv; for (x = 0; x < w; x += 2) { if (h[x] <= ht) { *lpy++ = 16; *lpu++ = 128; *lpv++ = 128; } else { mul = (h[x] - ht) * rcp_h[x]; *lpy++ = mul * c[x].yuv.y + (1.0f - mul) * 16.0f + 0.5f; *lpu++ = mul * c[x].yuv.u + (1.0f - mul) * 128.0f + 0.5f; *lpv++ = mul * c[x].yuv.v + (1.0f - mul) * 128.0f + 0.5f; } /* u and v are skipped on yuv422p and yuv420p */ if (fmt == AV_PIX_FMT_YUV444P) { if (h[x+1] <= ht) { *lpy++ = 16; *lpu++ = 128; *lpv++ = 128; } else { mul = (h[x+1] - ht) * rcp_h[x+1]; *lpy++ = mul * c[x+1].yuv.y + (1.0f - mul) * 16.0f + 0.5f; *lpu++ = mul * c[x+1].yuv.u + (1.0f - mul) * 128.0f + 0.5f; *lpv++ = mul * c[x+1].yuv.v + (1.0f - mul) * 128.0f + 0.5f; } } else { if (h[x+1] <= ht) { *lpy++ = 16; } else { mul = (h[x+1] - ht) * rcp_h[x+1]; *lpy++ = mul * c[x+1].yuv.y + (1.0f - mul) * 16.0f + 0.5f; } } } ht = (bar_h - (y+1)) * rcp_bar_h; lpy = vy + (y+1) * lsy; lpu = vu + (y+1) * lsu; lpv = vv + (y+1) * lsv; for (x = 0; x < w; x += 2) { /* u and v are skipped on yuv420p */ if (fmt != AV_PIX_FMT_YUV420P) { if (h[x] <= ht) { *lpy++ = 16; *lpu++ = 128; *lpv++ = 128; } else { mul = (h[x] - ht) * rcp_h[x]; *lpy++ = mul * c[x].yuv.y + (1.0f - mul) * 16.0f + 0.5f; *lpu++ = mul * c[x].yuv.u + (1.0f - mul) * 128.0f + 0.5f; *lpv++ = mul * c[x].yuv.v + (1.0f - mul) * 128.0f + 0.5f; } } else { if (h[x] <= ht) { *lpy++ = 16; } else { mul = (h[x] - ht) * rcp_h[x]; *lpy++ = mul * c[x].yuv.y + (1.0f - mul) * 16.0f + 0.5f; } } /* u and v are skipped on yuv422p and yuv420p */ if (out->format == AV_PIX_FMT_YUV444P) { if (h[x+1] <= ht) { *lpy++ = 16; *lpu++ = 128; *lpv++ = 128; } else { mul = (h[x+1] - ht) * rcp_h[x+1]; *lpy++ = mul * c[x+1].yuv.y + (1.0f - mul) * 16.0f + 0.5f; *lpu++ = mul * c[x+1].yuv.u + (1.0f - mul) * 128.0f + 0.5f; *lpv++ = mul * c[x+1].yuv.v + (1.0f - mul) * 128.0f + 0.5f; } } else { if (h[x+1] <= ht) { *lpy++ = 16; } else { mul = (h[x+1] - ht) * rcp_h[x+1]; *lpy++ = mul * c[x+1].yuv.y + (1.0f - mul) * 16.0f + 0.5f; } } } } }"} {"target": 1, "idx": 24794, "func": "static int qemu_chr_open_win_file_out(QemuOpts *opts, CharDriverState **_chr) { const char *file_out = qemu_opt_get(opts, \"path\"); HANDLE fd_out; fd_out = CreateFile(file_out, GENERIC_WRITE, FILE_SHARE_READ, NULL, OPEN_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL); if (fd_out == INVALID_HANDLE_VALUE) { return -EIO; } return qemu_chr_open_win_file(fd_out, _chr); }"} {"target": 1, "idx": 24818, "func": "int ff_h264_fill_default_ref_list(H264Context *h, H264SliceContext *sl) { int i, len; if (sl->slice_type_nos == AV_PICTURE_TYPE_B) { H264Picture *sorted[32]; int cur_poc, list; int lens[2]; if (FIELD_PICTURE(h)) cur_poc = h->cur_pic_ptr->field_poc[h->picture_structure == PICT_BOTTOM_FIELD]; else cur_poc = h->cur_pic_ptr->poc; for (list = 0; list < 2; list++) { len = add_sorted(sorted, h->short_ref, h->short_ref_count, cur_poc, 1 ^ list); len += add_sorted(sorted + len, h->short_ref, h->short_ref_count, cur_poc, 0 ^ list); av_assert0(len <= 32); len = build_def_list(h->default_ref_list[list], FF_ARRAY_ELEMS(h->default_ref_list[0]), sorted, len, 0, h->picture_structure); len += build_def_list(h->default_ref_list[list] + len, FF_ARRAY_ELEMS(h->default_ref_list[0]) - len, h->long_ref, 16, 1, h->picture_structure); av_assert0(len <= 32); if (len < sl->ref_count[list]) memset(&h->default_ref_list[list][len], 0, sizeof(H264Ref) * (sl->ref_count[list] - len)); lens[list] = len; } if (lens[0] == lens[1] && lens[1] > 1) { for (i = 0; i < lens[0] && h->default_ref_list[0][i].parent->f.buf[0]->buffer == h->default_ref_list[1][i].parent->f.buf[0]->buffer; i++); if (i == lens[0]) { FFSWAP(H264Ref, h->default_ref_list[1][0], h->default_ref_list[1][1]); } } } else { len = build_def_list(h->default_ref_list[0], FF_ARRAY_ELEMS(h->default_ref_list[0]), h->short_ref, h->short_ref_count, 0, h->picture_structure); len += build_def_list(h->default_ref_list[0] + len, FF_ARRAY_ELEMS(h->default_ref_list[0]) - len, h-> long_ref, 16, 1, h->picture_structure); av_assert0(len <= 32); if (len < sl->ref_count[0]) memset(&h->default_ref_list[0][len], 0, sizeof(H264Ref) * (sl->ref_count[0] - len)); } #ifdef TRACE for (i = 0; i < sl->ref_count[0]; i++) { tprintf(h->avctx, \"List0: %s fn:%d 0x%p\\n\", (h->default_ref_list[0][i].parent->long_ref ? \"LT\" : \"ST\"), h->default_ref_list[0][i].pic_id, h->default_ref_list[0][i].parent->f.data[0]); } if (sl->slice_type_nos == AV_PICTURE_TYPE_B) { for (i = 0; i < sl->ref_count[1]; i++) { tprintf(h->avctx, \"List1: %s fn:%d 0x%p\\n\", (h->default_ref_list[1][i].parent->long_ref ? \"LT\" : \"ST\"), h->default_ref_list[1][i].pic_id, h->default_ref_list[1][i].parent->f.data[0]); } } #endif return 0; }"} {"target": 1, "idx": 24824, "func": "static void vtd_init(IntelIOMMUState *s) { X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(s); memset(s->csr, 0, DMAR_REG_SIZE); memset(s->wmask, 0, DMAR_REG_SIZE); memset(s->w1cmask, 0, DMAR_REG_SIZE); memset(s->womask, 0, DMAR_REG_SIZE); s->iommu_ops.translate = vtd_iommu_translate; s->iommu_ops.notify_started = vtd_iommu_notify_started; s->root = 0; s->root_extended = false; s->dmar_enabled = false; s->iq_head = 0; s->iq_tail = 0; s->iq = 0; s->iq_size = 0; s->qi_enabled = false; s->iq_last_desc_type = VTD_INV_DESC_NONE; s->next_frcd_reg = 0; s->cap = VTD_CAP_FRO | VTD_CAP_NFR | VTD_CAP_ND | VTD_CAP_MGAW | VTD_CAP_SAGAW | VTD_CAP_MAMV | VTD_CAP_PSI | VTD_CAP_SLLPS; s->ecap = VTD_ECAP_QI | VTD_ECAP_IRO; if (x86_iommu->intr_supported) { s->ecap |= VTD_ECAP_IR | VTD_ECAP_EIM; } vtd_reset_context_cache(s); vtd_reset_iotlb(s); /* Define registers with default values and bit semantics */ vtd_define_long(s, DMAR_VER_REG, 0x10UL, 0, 0); vtd_define_quad(s, DMAR_CAP_REG, s->cap, 0, 0); vtd_define_quad(s, DMAR_ECAP_REG, s->ecap, 0, 0); vtd_define_long(s, DMAR_GCMD_REG, 0, 0xff800000UL, 0); vtd_define_long_wo(s, DMAR_GCMD_REG, 0xff800000UL); vtd_define_long(s, DMAR_GSTS_REG, 0, 0, 0); vtd_define_quad(s, DMAR_RTADDR_REG, 0, 0xfffffffffffff000ULL, 0); vtd_define_quad(s, DMAR_CCMD_REG, 0, 0xe0000003ffffffffULL, 0); vtd_define_quad_wo(s, DMAR_CCMD_REG, 0x3ffff0000ULL); /* Advanced Fault Logging not supported */ vtd_define_long(s, DMAR_FSTS_REG, 0, 0, 0x11UL); vtd_define_long(s, DMAR_FECTL_REG, 0x80000000UL, 0x80000000UL, 0); vtd_define_long(s, DMAR_FEDATA_REG, 0, 0x0000ffffUL, 0); vtd_define_long(s, DMAR_FEADDR_REG, 0, 0xfffffffcUL, 0); /* Treated as RsvdZ when EIM in ECAP_REG is not supported * vtd_define_long(s, DMAR_FEUADDR_REG, 0, 0xffffffffUL, 0); */ vtd_define_long(s, DMAR_FEUADDR_REG, 0, 0, 0); /* Treated as RO for implementations that PLMR and PHMR fields reported * as Clear in the CAP_REG. * vtd_define_long(s, DMAR_PMEN_REG, 0, 0x80000000UL, 0); */ vtd_define_long(s, DMAR_PMEN_REG, 0, 0, 0); vtd_define_quad(s, DMAR_IQH_REG, 0, 0, 0); vtd_define_quad(s, DMAR_IQT_REG, 0, 0x7fff0ULL, 0); vtd_define_quad(s, DMAR_IQA_REG, 0, 0xfffffffffffff007ULL, 0); vtd_define_long(s, DMAR_ICS_REG, 0, 0, 0x1UL); vtd_define_long(s, DMAR_IECTL_REG, 0x80000000UL, 0x80000000UL, 0); vtd_define_long(s, DMAR_IEDATA_REG, 0, 0xffffffffUL, 0); vtd_define_long(s, DMAR_IEADDR_REG, 0, 0xfffffffcUL, 0); /* Treadted as RsvdZ when EIM in ECAP_REG is not supported */ vtd_define_long(s, DMAR_IEUADDR_REG, 0, 0, 0); /* IOTLB registers */ vtd_define_quad(s, DMAR_IOTLB_REG, 0, 0Xb003ffff00000000ULL, 0); vtd_define_quad(s, DMAR_IVA_REG, 0, 0xfffffffffffff07fULL, 0); vtd_define_quad_wo(s, DMAR_IVA_REG, 0xfffffffffffff07fULL); /* Fault Recording Registers, 128-bit */ vtd_define_quad(s, DMAR_FRCD_REG_0_0, 0, 0, 0); vtd_define_quad(s, DMAR_FRCD_REG_0_2, 0, 0, 0x8000000000000000ULL); /* * Interrupt remapping registers. */ vtd_define_quad(s, DMAR_IRTA_REG, 0, 0xfffffffffffff80fULL, 0); }"} {"target": 0, "idx": 24829, "func": "const uint8_t *ff_h263_find_resync_marker(const uint8_t *av_restrict p, const uint8_t *av_restrict end) { av_assert2(p < end); end-=2; p++; for(;pactive_fpu.fcr31 = (env->active_fpu.fcr31 & 0x017fffff) | ((arg1 & 0xfe) << 24) | ((arg1 & 0x1) << 23); break; case 26: if (arg1 & 0x007c0000) return; env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0xfffc0f83) | (arg1 & 0x0003f07c); break; case 28: if (arg1 & 0x007c0000) return; env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0xfefff07c) | (arg1 & 0x00000f83) | ((arg1 & 0x4) << 22); break; case 31: if (arg1 & 0x007c0000) return; env->active_fpu.fcr31 = arg1; break; default: return; } /* set rounding mode */ restore_rounding_mode(env); /* set flush-to-zero mode */ restore_flush_mode(env); set_float_exception_flags(0, &env->active_fpu.fp_status); if ((GET_FP_ENABLE(env->active_fpu.fcr31) | 0x20) & GET_FP_CAUSE(env->active_fpu.fcr31)) do_raise_exception(env, EXCP_FPE, GETPC()); }"} {"target": 0, "idx": 24853, "func": "static void cpu_set_irq(void *opaque, int irq, int level) { CPUState *env = opaque; if (level) { CPUIRQ_DPRINTF(\"Raise CPU IRQ %d\\n\", irq); env->halted = 0; env->pil_in |= 1 << irq; cpu_check_irqs(env); } else { CPUIRQ_DPRINTF(\"Lower CPU IRQ %d\\n\", irq); env->pil_in &= ~(1 << irq); cpu_check_irqs(env); } }"} {"target": 0, "idx": 24855, "func": "static int get_stream_blocksize(BlockDriverState *bdrv) { uint8_t cmd[6]; uint8_t buf[12]; uint8_t sensebuf[8]; sg_io_hdr_t io_header; int ret; memset(cmd, 0, sizeof(cmd)); memset(buf, 0, sizeof(buf)); cmd[0] = MODE_SENSE; cmd[4] = sizeof(buf); memset(&io_header, 0, sizeof(io_header)); io_header.interface_id = 'S'; io_header.dxfer_direction = SG_DXFER_FROM_DEV; io_header.dxfer_len = sizeof(buf); io_header.dxferp = buf; io_header.cmdp = cmd; io_header.cmd_len = sizeof(cmd); io_header.mx_sb_len = sizeof(sensebuf); io_header.sbp = sensebuf; io_header.timeout = 6000; /* XXX */ ret = bdrv_ioctl(bdrv, SG_IO, &io_header); if (ret < 0 || io_header.driver_status || io_header.host_status) { return -1; } return (buf[9] << 16) | (buf[10] << 8) | buf[11]; }"} {"target": 0, "idx": 24880, "func": "int qcow2_pre_write_overlap_check(BlockDriverState *bs, int ign, int64_t offset, int64_t size) { int ret = qcow2_check_metadata_overlap(bs, ign, offset, size); if (ret < 0) { return ret; } else if (ret > 0) { int metadata_ol_bitnr = ffs(ret) - 1; assert(metadata_ol_bitnr < QCOW2_OL_MAX_BITNR); qcow2_signal_corruption(bs, true, offset, size, \"Preventing invalid \" \"write on metadata (overlaps with %s)\", metadata_ol_names[metadata_ol_bitnr]); return -EIO; } return 0; }"} {"target": 0, "idx": 24881, "func": "int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx) { int i, ret; Picture *pic; s->mb_skipped = 0; if (!ff_thread_can_start_frame(avctx)) { av_log(avctx, AV_LOG_ERROR, \"Attempt to start a frame outside SETUP state\\n\"); return -1; } /* mark & release old frames */ if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr && s->last_picture_ptr != s->next_picture_ptr && s->last_picture_ptr->f.buf[0]) { ff_mpeg_unref_picture(s, s->last_picture_ptr); } /* release forgotten pictures */ /* if (mpeg124/h263) */ for (i = 0; i < MAX_PICTURE_COUNT; i++) { if (&s->picture[i] != s->last_picture_ptr && &s->picture[i] != s->next_picture_ptr && s->picture[i].reference && !s->picture[i].needs_realloc) { if (!(avctx->active_thread_type & FF_THREAD_FRAME)) av_log(avctx, AV_LOG_ERROR, \"releasing zombie picture\\n\"); ff_mpeg_unref_picture(s, &s->picture[i]); } } ff_mpeg_unref_picture(s, &s->current_picture); release_unused_pictures(s); if (s->current_picture_ptr && s->current_picture_ptr->f.buf[0] == NULL) { // we already have a unused image // (maybe it was set before reading the header) pic = s->current_picture_ptr; } else { i = ff_find_unused_picture(s, 0); if (i < 0) { av_log(s->avctx, AV_LOG_ERROR, \"no frame buffer available\\n\"); return i; } pic = &s->picture[i]; } pic->reference = 0; if (!s->droppable) { if (s->pict_type != AV_PICTURE_TYPE_B) pic->reference = 3; } pic->f.coded_picture_number = s->coded_picture_number++; if (ff_alloc_picture(s, pic, 0) < 0) return -1; s->current_picture_ptr = pic; // FIXME use only the vars from current_pic s->current_picture_ptr->f.top_field_first = s->top_field_first; if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) { if (s->picture_structure != PICT_FRAME) s->current_picture_ptr->f.top_field_first = (s->picture_structure == PICT_TOP_FIELD) == s->first_field; } s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame && !s->progressive_sequence; s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME; s->current_picture_ptr->f.pict_type = s->pict_type; // if (s->flags && CODEC_FLAG_QSCALE) // s->current_picture_ptr->quality = s->new_picture_ptr->quality; s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I; if ((ret = ff_mpeg_ref_picture(s, &s->current_picture, s->current_picture_ptr)) < 0) return ret; if (s->pict_type != AV_PICTURE_TYPE_B) { s->last_picture_ptr = s->next_picture_ptr; if (!s->droppable) s->next_picture_ptr = s->current_picture_ptr; } av_dlog(s->avctx, \"L%p N%p C%p L%p N%p C%p type:%d drop:%d\\n\", s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr, s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL, s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL, s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL, s->pict_type, s->droppable); if ((s->last_picture_ptr == NULL || s->last_picture_ptr->f.buf[0] == NULL) && (s->pict_type != AV_PICTURE_TYPE_I || s->picture_structure != PICT_FRAME)) { int h_chroma_shift, v_chroma_shift; av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift); if (s->pict_type == AV_PICTURE_TYPE_B && s->next_picture_ptr && s->next_picture_ptr->f.buf[0]) av_log(avctx, AV_LOG_DEBUG, \"allocating dummy last picture for B frame\\n\"); else if (s->pict_type != AV_PICTURE_TYPE_I) av_log(avctx, AV_LOG_ERROR, \"warning: first frame is no keyframe\\n\"); else if (s->picture_structure != PICT_FRAME) av_log(avctx, AV_LOG_DEBUG, \"allocate dummy last picture for field based first keyframe\\n\"); /* Allocate a dummy frame */ i = ff_find_unused_picture(s, 0); if (i < 0) { av_log(s->avctx, AV_LOG_ERROR, \"no frame buffer available\\n\"); return i; } s->last_picture_ptr = &s->picture[i]; s->last_picture_ptr->reference = 3; s->last_picture_ptr->f.key_frame = 0; s->last_picture_ptr->f.pict_type = AV_PICTURE_TYPE_P; if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) { s->last_picture_ptr = NULL; return -1; } memset(s->last_picture_ptr->f.data[0], 0x80, avctx->height * s->last_picture_ptr->f.linesize[0]); memset(s->last_picture_ptr->f.data[1], 0x80, (avctx->height >> v_chroma_shift) * s->last_picture_ptr->f.linesize[1]); memset(s->last_picture_ptr->f.data[2], 0x80, (avctx->height >> v_chroma_shift) * s->last_picture_ptr->f.linesize[2]); if(s->codec_id == AV_CODEC_ID_FLV1 || s->codec_id == AV_CODEC_ID_H263){ for(i=0; iheight; i++) memset(s->last_picture_ptr->f.data[0] + s->last_picture_ptr->f.linesize[0]*i, 16, avctx->width); } ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0); ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1); } if ((s->next_picture_ptr == NULL || s->next_picture_ptr->f.buf[0] == NULL) && s->pict_type == AV_PICTURE_TYPE_B) { /* Allocate a dummy frame */ i = ff_find_unused_picture(s, 0); if (i < 0) { av_log(s->avctx, AV_LOG_ERROR, \"no frame buffer available\\n\"); return i; } s->next_picture_ptr = &s->picture[i]; s->next_picture_ptr->reference = 3; s->next_picture_ptr->f.key_frame = 0; s->next_picture_ptr->f.pict_type = AV_PICTURE_TYPE_P; if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) { s->next_picture_ptr = NULL; return -1; } ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0); ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1); } #if 0 // BUFREF-FIXME memset(s->last_picture.f.data, 0, sizeof(s->last_picture.f.data)); memset(s->next_picture.f.data, 0, sizeof(s->next_picture.f.data)); #endif if (s->last_picture_ptr) { ff_mpeg_unref_picture(s, &s->last_picture); if (s->last_picture_ptr->f.buf[0] && (ret = ff_mpeg_ref_picture(s, &s->last_picture, s->last_picture_ptr)) < 0) return ret; } if (s->next_picture_ptr) { ff_mpeg_unref_picture(s, &s->next_picture); if (s->next_picture_ptr->f.buf[0] && (ret = ff_mpeg_ref_picture(s, &s->next_picture, s->next_picture_ptr)) < 0) return ret; } av_assert0(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr && s->last_picture_ptr->f.buf[0])); if (s->picture_structure!= PICT_FRAME) { int i; for (i = 0; i < 4; i++) { if (s->picture_structure == PICT_BOTTOM_FIELD) { s->current_picture.f.data[i] += s->current_picture.f.linesize[i]; } s->current_picture.f.linesize[i] *= 2; s->last_picture.f.linesize[i] *= 2; s->next_picture.f.linesize[i] *= 2; } } s->err_recognition = avctx->err_recognition; /* set dequantizer, we can't do it during init as * it might change for mpeg4 and we can't do it in the header * decode as init is not called for mpeg4 there yet */ if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) { s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra; s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter; } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) { s->dct_unquantize_intra = s->dct_unquantize_h263_intra; s->dct_unquantize_inter = s->dct_unquantize_h263_inter; } else { s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra; s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter; } return 0; }"} {"target": 1, "idx": 24895, "func": "static void co_write_request(void *opaque) { BDRVSheepdogState *s = opaque; qemu_coroutine_enter(s->co_send, NULL); }"} {"target": 1, "idx": 24896, "func": "static int decode_frame_ilbm(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { IffContext *s = avctx->priv_data; const uint8_t *buf = avpkt->size >= 2 ? avpkt->data + AV_RB16(avpkt->data) : NULL; const int buf_size = avpkt->size >= 2 ? avpkt->size - AV_RB16(avpkt->data) : 0; const uint8_t *buf_end = buf+buf_size; int y, plane, res; if ((res = extract_header(avctx, avpkt)) < 0) return res; if (s->init) { if ((res = avctx->reget_buffer(avctx, &s->frame)) < 0) { av_log(avctx, AV_LOG_ERROR, \"reget_buffer() failed\\n\"); return res; } } else if ((res = avctx->get_buffer(avctx, &s->frame)) < 0) { av_log(avctx, AV_LOG_ERROR, \"get_buffer() failed\\n\"); return res; } else if (avctx->bits_per_coded_sample <= 8 && avctx->pix_fmt != PIX_FMT_GRAY8) { if ((res = ff_cmap_read_palette(avctx, (uint32_t*)s->frame.data[1])) < 0) return res; } s->init = 1; if (avctx->codec_tag == MKTAG('A','C','B','M')) { if (avctx->pix_fmt == PIX_FMT_PAL8 || avctx->pix_fmt == PIX_FMT_GRAY8) { memset(s->frame.data[0], 0, avctx->height * s->frame.linesize[0]); for (plane = 0; plane < s->bpp; plane++) { for(y = 0; y < avctx->height && buf < buf_end; y++ ) { uint8_t *row = &s->frame.data[0][ y*s->frame.linesize[0] ]; decodeplane8(row, buf, FFMIN(s->planesize, buf_end - buf), plane); buf += s->planesize; } } } else if (s->ham) { // HAM to PIX_FMT_BGR32 memset(s->frame.data[0], 0, avctx->height * s->frame.linesize[0]); for(y = 0; y < avctx->height; y++) { uint8_t *row = &s->frame.data[0][y * s->frame.linesize[0]]; memset(s->ham_buf, 0, s->planesize * 8); for (plane = 0; plane < s->bpp; plane++) { const uint8_t * start = buf + (plane * avctx->height + y) * s->planesize; if (start >= buf_end) break; decodeplane8(s->ham_buf, start, FFMIN(s->planesize, buf_end - start), plane); } decode_ham_plane32((uint32_t *) row, s->ham_buf, s->ham_palbuf, s->planesize); } } } else if (avctx->codec_tag == MKTAG('D','E','E','P')) { int raw_width = avctx->width * (av_get_bits_per_pixel(&av_pix_fmt_descriptors[avctx->pix_fmt]) >> 3); int x; for(y = 0; y < avctx->height && buf < buf_end; y++ ) { uint8_t *row = &s->frame.data[0][y * s->frame.linesize[0]]; memcpy(row, buf, FFMIN(raw_width, buf_end - buf)); buf += raw_width; if (avctx->pix_fmt == PIX_FMT_BGR32) { for(x = 0; x < avctx->width; x++) row[4 * x + 3] = row[4 * x + 3] & 0xF0 | (row[4 * x + 3] >> 4); } } } else if (avctx->codec_tag == MKTAG('I','L','B','M')) { // interleaved if (avctx->pix_fmt == PIX_FMT_PAL8 || avctx->pix_fmt == PIX_FMT_GRAY8) { for(y = 0; y < avctx->height; y++ ) { uint8_t *row = &s->frame.data[0][ y*s->frame.linesize[0] ]; memset(row, 0, avctx->width); for (plane = 0; plane < s->bpp && buf < buf_end; plane++) { decodeplane8(row, buf, FFMIN(s->planesize, buf_end - buf), plane); buf += s->planesize; } } } else if (s->ham) { // HAM to PIX_FMT_BGR32 for (y = 0; y < avctx->height; y++) { uint8_t *row = &s->frame.data[0][ y*s->frame.linesize[0] ]; memset(s->ham_buf, 0, s->planesize * 8); for (plane = 0; plane < s->bpp && buf < buf_end; plane++) { decodeplane8(s->ham_buf, buf, FFMIN(s->planesize, buf_end - buf), plane); buf += s->planesize; } decode_ham_plane32((uint32_t *) row, s->ham_buf, s->ham_palbuf, s->planesize); } } else { // PIX_FMT_BGR32 for(y = 0; y < avctx->height; y++ ) { uint8_t *row = &s->frame.data[0][y*s->frame.linesize[0]]; memset(row, 0, avctx->width << 2); for (plane = 0; plane < s->bpp && buf < buf_end; plane++) { decodeplane32((uint32_t *) row, buf, FFMIN(s->planesize, buf_end - buf), plane); buf += s->planesize; } } } } else if (avctx->codec_tag == MKTAG('P','B','M',' ')) { // IFF-PBM if (avctx->pix_fmt == PIX_FMT_PAL8 || avctx->pix_fmt == PIX_FMT_GRAY8) { for(y = 0; y < avctx->height; y++ ) { uint8_t *row = &s->frame.data[0][y * s->frame.linesize[0]]; memcpy(row, buf, FFMIN(avctx->width, buf_end - buf)); buf += avctx->width + (avctx->width % 2); // padding if odd } } else if (s->ham) { // IFF-PBM: HAM to PIX_FMT_BGR32 for (y = 0; y < avctx->height; y++) { uint8_t *row = &s->frame.data[0][ y*s->frame.linesize[0] ]; memcpy(s->ham_buf, buf, FFMIN(avctx->width, buf_end - buf)); buf += avctx->width + (avctx->width & 1); // padding if odd decode_ham_plane32((uint32_t *) row, s->ham_buf, s->ham_palbuf, s->planesize); } } else { av_log_ask_for_sample(avctx, \"unsupported bpp\\n\"); return AVERROR_INVALIDDATA; } } *data_size = sizeof(AVFrame); *(AVFrame*)data = s->frame; return buf_size; }"} {"target": 1, "idx": 24897, "func": "udp_listen(Slirp *slirp, u_int32_t haddr, u_int hport, u_int32_t laddr, u_int lport, int flags) { struct sockaddr_in addr; struct socket *so; socklen_t addrlen = sizeof(struct sockaddr_in), opt = 1; so = socreate(slirp); if (!so) { return NULL; } so->s = socket(AF_INET,SOCK_DGRAM,0); so->so_expire = curtime + SO_EXPIRE; insque(so, &slirp->udb); addr.sin_family = AF_INET; addr.sin_addr.s_addr = haddr; addr.sin_port = hport; if (bind(so->s,(struct sockaddr *)&addr, addrlen) < 0) { udp_detach(so); return NULL; } setsockopt(so->s,SOL_SOCKET,SO_REUSEADDR,(char *)&opt,sizeof(int)); getsockname(so->s,(struct sockaddr *)&addr,&addrlen); so->so_fport = addr.sin_port; if (addr.sin_addr.s_addr == 0 || addr.sin_addr.s_addr == loopback_addr.s_addr) { so->so_faddr = slirp->vhost_addr; } else { so->so_faddr = addr.sin_addr; } so->so_lport = lport; so->so_laddr.s_addr = laddr; if (flags != SS_FACCEPTONCE) so->so_expire = 0; so->so_state &= SS_PERSISTENT_MASK; so->so_state |= SS_ISFCONNECTED | flags; return so; }"} {"target": 1, "idx": 24899, "func": "static uint32_t grlib_irqmp_readl(void *opaque, target_phys_addr_t addr) { IRQMP *irqmp = opaque; IRQMPState *state; assert(irqmp != NULL); state = irqmp->state; assert(state != NULL); addr &= 0xff; /* global registers */ switch (addr) { case LEVEL_OFFSET: return state->level; case PENDING_OFFSET: return state->pending; case FORCE0_OFFSET: /* This register is an \"alias\" for the force register of CPU 0 */ return state->force[0]; case CLEAR_OFFSET: case MP_STATUS_OFFSET: /* Always read as 0 */ return 0; case BROADCAST_OFFSET: return state->broadcast; default: break; } /* mask registers */ if (addr >= MASK_OFFSET && addr < FORCE_OFFSET) { int cpu = (addr - MASK_OFFSET) / 4; assert(cpu >= 0 && cpu < IRQMP_MAX_CPU); return state->mask[cpu]; } /* force registers */ if (addr >= FORCE_OFFSET && addr < EXTENDED_OFFSET) { int cpu = (addr - FORCE_OFFSET) / 4; assert(cpu >= 0 && cpu < IRQMP_MAX_CPU); return state->force[cpu]; } /* extended (not supported) */ if (addr >= EXTENDED_OFFSET && addr < IRQMP_REG_SIZE) { int cpu = (addr - EXTENDED_OFFSET) / 4; assert(cpu >= 0 && cpu < IRQMP_MAX_CPU); return state->extended[cpu]; } trace_grlib_irqmp_unknown_register(\"read\", addr); return 0; }"} {"target": 1, "idx": 24902, "func": "void qemu_sglist_destroy(QEMUSGList *qsg) { g_free(qsg->sg); }"} {"target": 1, "idx": 24921, "func": "static int virtio_9p_device_init(VirtIODevice *vdev) { V9fsState *s = VIRTIO_9P(vdev); int i, len; struct stat stat; FsDriverEntry *fse; V9fsPath path; virtio_init(VIRTIO_DEVICE(s), \"virtio-9p\", VIRTIO_ID_9P, sizeof(struct virtio_9p_config) + MAX_TAG_LEN); /* initialize pdu allocator */ QLIST_INIT(&s->free_list); QLIST_INIT(&s->active_list); for (i = 0; i < (MAX_REQ - 1); i++) { QLIST_INSERT_HEAD(&s->free_list, &s->pdus[i], next); } s->vq = virtio_add_queue(vdev, MAX_REQ, handle_9p_output); fse = get_fsdev_fsentry(s->fsconf.fsdev_id); if (!fse) { /* We don't have a fsdev identified by fsdev_id */ fprintf(stderr, \"Virtio-9p device couldn't find fsdev with the \" \"id = %s\\n\", s->fsconf.fsdev_id ? s->fsconf.fsdev_id : \"NULL\"); goto out; } if (!s->fsconf.tag) { /* we haven't specified a mount_tag */ fprintf(stderr, \"fsdev with id %s needs mount_tag arguments\\n\", s->fsconf.fsdev_id); goto out; } s->ctx.export_flags = fse->export_flags; s->ctx.fs_root = g_strdup(fse->path); s->ctx.exops.get_st_gen = NULL; len = strlen(s->fsconf.tag); if (len > MAX_TAG_LEN - 1) { fprintf(stderr, \"mount tag '%s' (%d bytes) is longer than \" \"maximum (%d bytes)\", s->fsconf.tag, len, MAX_TAG_LEN - 1); goto out; } s->tag = g_strdup(s->fsconf.tag); s->ctx.uid = -1; s->ops = fse->ops; s->config_size = sizeof(struct virtio_9p_config) + len; s->fid_list = NULL; qemu_co_rwlock_init(&s->rename_lock); if (s->ops->init(&s->ctx) < 0) { fprintf(stderr, \"Virtio-9p Failed to initialize fs-driver with id:%s\" \" and export path:%s\\n\", s->fsconf.fsdev_id, s->ctx.fs_root); goto out; } if (v9fs_init_worker_threads() < 0) { fprintf(stderr, \"worker thread initialization failed\\n\"); goto out; } /* * Check details of export path, We need to use fs driver * call back to do that. Since we are in the init path, we don't * use co-routines here. */ v9fs_path_init(&path); if (s->ops->name_to_path(&s->ctx, NULL, \"/\", &path) < 0) { fprintf(stderr, \"error in converting name to path %s\", strerror(errno)); goto out; } if (s->ops->lstat(&s->ctx, &path, &stat)) { fprintf(stderr, \"share path %s does not exist\\n\", fse->path); goto out; } else if (!S_ISDIR(stat.st_mode)) { fprintf(stderr, \"share path %s is not a directory\\n\", fse->path); goto out; } v9fs_path_free(&path); return 0; out: g_free(s->ctx.fs_root); g_free(s->tag); virtio_cleanup(vdev); v9fs_path_free(&path); return -1; }"} {"target": 0, "idx": 24933, "func": "static int v9fs_request(V9fsProxy *proxy, int type, void *response, const char *fmt, ...) { dev_t rdev; va_list ap; int size = 0; int retval = 0; uint64_t offset; ProxyHeader header = { 0, 0}; struct timespec spec[2]; int flags, mode, uid, gid; V9fsString *name, *value; V9fsString *path, *oldpath; struct iovec *iovec = NULL, *reply = NULL; qemu_mutex_lock(&proxy->mutex); if (proxy->sockfd == -1) { retval = -EIO; goto err_out; } iovec = &proxy->out_iovec; reply = &proxy->in_iovec; va_start(ap, fmt); switch (type) { case T_OPEN: path = va_arg(ap, V9fsString *); flags = va_arg(ap, int); retval = proxy_marshal(iovec, PROXY_HDR_SZ, \"sd\", path, flags); if (retval > 0) { header.size = retval; header.type = T_OPEN; } break; case T_CREATE: path = va_arg(ap, V9fsString *); flags = va_arg(ap, int); mode = va_arg(ap, int); uid = va_arg(ap, int); gid = va_arg(ap, int); retval = proxy_marshal(iovec, PROXY_HDR_SZ, \"sdddd\", path, flags, mode, uid, gid); if (retval > 0) { header.size = retval; header.type = T_CREATE; } break; case T_MKNOD: path = va_arg(ap, V9fsString *); mode = va_arg(ap, int); rdev = va_arg(ap, long int); uid = va_arg(ap, int); gid = va_arg(ap, int); retval = proxy_marshal(iovec, PROXY_HDR_SZ, \"ddsdq\", uid, gid, path, mode, rdev); if (retval > 0) { header.size = retval; header.type = T_MKNOD; } break; case T_MKDIR: path = va_arg(ap, V9fsString *); mode = va_arg(ap, int); uid = va_arg(ap, int); gid = va_arg(ap, int); retval = proxy_marshal(iovec, PROXY_HDR_SZ, \"ddsd\", uid, gid, path, mode); if (retval > 0) { header.size = retval; header.type = T_MKDIR; } break; case T_SYMLINK: oldpath = va_arg(ap, V9fsString *); path = va_arg(ap, V9fsString *); uid = va_arg(ap, int); gid = va_arg(ap, int); retval = proxy_marshal(iovec, PROXY_HDR_SZ, \"ddss\", uid, gid, oldpath, path); if (retval > 0) { header.size = retval; header.type = T_SYMLINK; } break; case T_LINK: oldpath = va_arg(ap, V9fsString *); path = va_arg(ap, V9fsString *); retval = proxy_marshal(iovec, PROXY_HDR_SZ, \"ss\", oldpath, path); if (retval > 0) { header.size = retval; header.type = T_LINK; } break; case T_LSTAT: path = va_arg(ap, V9fsString *); retval = proxy_marshal(iovec, PROXY_HDR_SZ, \"s\", path); if (retval > 0) { header.size = retval; header.type = T_LSTAT; } break; case T_READLINK: path = va_arg(ap, V9fsString *); size = va_arg(ap, int); retval = proxy_marshal(iovec, PROXY_HDR_SZ, \"sd\", path, size); if (retval > 0) { header.size = retval; header.type = T_READLINK; } break; case T_STATFS: path = va_arg(ap, V9fsString *); retval = proxy_marshal(iovec, PROXY_HDR_SZ, \"s\", path); if (retval > 0) { header.size = retval; header.type = T_STATFS; } break; case T_CHMOD: path = va_arg(ap, V9fsString *); mode = va_arg(ap, int); retval = proxy_marshal(iovec, PROXY_HDR_SZ, \"sd\", path, mode); if (retval > 0) { header.size = retval; header.type = T_CHMOD; } break; case T_CHOWN: path = va_arg(ap, V9fsString *); uid = va_arg(ap, int); gid = va_arg(ap, int); retval = proxy_marshal(iovec, PROXY_HDR_SZ, \"sdd\", path, uid, gid); if (retval > 0) { header.size = retval; header.type = T_CHOWN; } break; case T_TRUNCATE: path = va_arg(ap, V9fsString *); offset = va_arg(ap, uint64_t); retval = proxy_marshal(iovec, PROXY_HDR_SZ, \"sq\", path, offset); if (retval > 0) { header.size = retval; header.type = T_TRUNCATE; } break; case T_UTIME: path = va_arg(ap, V9fsString *); spec[0].tv_sec = va_arg(ap, long); spec[0].tv_nsec = va_arg(ap, long); spec[1].tv_sec = va_arg(ap, long); spec[1].tv_nsec = va_arg(ap, long); retval = proxy_marshal(iovec, PROXY_HDR_SZ, \"sqqqq\", path, spec[0].tv_sec, spec[1].tv_nsec, spec[1].tv_sec, spec[1].tv_nsec); if (retval > 0) { header.size = retval; header.type = T_UTIME; } break; case T_RENAME: oldpath = va_arg(ap, V9fsString *); path = va_arg(ap, V9fsString *); retval = proxy_marshal(iovec, PROXY_HDR_SZ, \"ss\", oldpath, path); if (retval > 0) { header.size = retval; header.type = T_RENAME; } break; case T_REMOVE: path = va_arg(ap, V9fsString *); retval = proxy_marshal(iovec, PROXY_HDR_SZ, \"s\", path); if (retval > 0) { header.size = retval; header.type = T_REMOVE; } break; case T_LGETXATTR: size = va_arg(ap, int); path = va_arg(ap, V9fsString *); name = va_arg(ap, V9fsString *); retval = proxy_marshal(iovec, PROXY_HDR_SZ, \"dss\", size, path, name); if (retval > 0) { header.size = retval; header.type = T_LGETXATTR; } break; case T_LLISTXATTR: size = va_arg(ap, int); path = va_arg(ap, V9fsString *); retval = proxy_marshal(iovec, PROXY_HDR_SZ, \"ds\", size, path); if (retval > 0) { header.size = retval; header.type = T_LLISTXATTR; } break; case T_LSETXATTR: path = va_arg(ap, V9fsString *); name = va_arg(ap, V9fsString *); value = va_arg(ap, V9fsString *); size = va_arg(ap, int); flags = va_arg(ap, int); retval = proxy_marshal(iovec, PROXY_HDR_SZ, \"sssdd\", path, name, value, size, flags); if (retval > 0) { header.size = retval; header.type = T_LSETXATTR; } break; case T_LREMOVEXATTR: path = va_arg(ap, V9fsString *); name = va_arg(ap, V9fsString *); retval = proxy_marshal(iovec, PROXY_HDR_SZ, \"ss\", path, name); if (retval > 0) { header.size = retval; header.type = T_LREMOVEXATTR; } break; case T_GETVERSION: path = va_arg(ap, V9fsString *); retval = proxy_marshal(iovec, PROXY_HDR_SZ, \"s\", path); if (retval > 0) { header.size = retval; header.type = T_GETVERSION; } break; default: error_report(\"Invalid type %d\", type); retval = -EINVAL; break; } va_end(ap); if (retval < 0) { goto err_out; } /* marshal the header details */ proxy_marshal(iovec, 0, \"dd\", header.type, header.size); header.size += PROXY_HDR_SZ; retval = qemu_write_full(proxy->sockfd, iovec->iov_base, header.size); if (retval != header.size) { goto close_error; } switch (type) { case T_OPEN: case T_CREATE: /* * A file descriptor is returned as response for * T_OPEN,T_CREATE on success */ if (v9fs_receivefd(proxy->sockfd, &retval) < 0) { goto close_error; } break; case T_MKNOD: case T_MKDIR: case T_SYMLINK: case T_LINK: case T_CHMOD: case T_CHOWN: case T_RENAME: case T_TRUNCATE: case T_UTIME: case T_REMOVE: case T_LSETXATTR: case T_LREMOVEXATTR: if (v9fs_receive_status(proxy, reply, &retval) < 0) { goto close_error; } break; case T_LSTAT: case T_READLINK: case T_STATFS: case T_GETVERSION: if (v9fs_receive_response(proxy, type, &retval, response) < 0) { goto close_error; } break; case T_LGETXATTR: case T_LLISTXATTR: if (!size) { if (v9fs_receive_status(proxy, reply, &retval) < 0) { goto close_error; } } else { if (v9fs_receive_response(proxy, type, &retval, response) < 0) { goto close_error; } } break; } err_out: qemu_mutex_unlock(&proxy->mutex); return retval; close_error: close(proxy->sockfd); proxy->sockfd = -1; qemu_mutex_unlock(&proxy->mutex); return -EIO; }"} {"target": 1, "idx": 24968, "func": "static void scsi_req_set_status(SCSIDiskReq *r, int status, int sense_code) { SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); r->req.status = status; scsi_disk_set_sense(s, sense_code); }"} {"target": 0, "idx": 25005, "func": "static inline void sdhci_blkgap_write(SDHCIState *s, uint8_t value) { if ((value & SDHC_STOP_AT_GAP_REQ) && (s->blkgap & SDHC_STOP_AT_GAP_REQ)) { return; } s->blkgap = value & SDHC_STOP_AT_GAP_REQ; if ((value & SDHC_CONTINUE_REQ) && s->stopped_state && (s->blkgap & SDHC_STOP_AT_GAP_REQ) == 0) { if (s->stopped_state == sdhc_gap_read) { s->prnsts |= SDHC_DAT_LINE_ACTIVE | SDHC_DOING_READ; SDHCI_GET_CLASS(s)->read_block_from_card(s); } else { s->prnsts |= SDHC_DAT_LINE_ACTIVE | SDHC_DOING_WRITE; SDHCI_GET_CLASS(s)->write_block_to_card(s); } s->stopped_state = sdhc_not_stopped; } else if (!s->stopped_state && (value & SDHC_STOP_AT_GAP_REQ)) { if (s->prnsts & SDHC_DOING_READ) { s->stopped_state = sdhc_gap_read; } else if (s->prnsts & SDHC_DOING_WRITE) { s->stopped_state = sdhc_gap_write; } } }"} {"target": 0, "idx": 25024, "func": "static void qemu_chr_parse_vc(QemuOpts *opts, ChardevBackend *backend, Error **errp) { int val; backend->vc = g_new0(ChardevVC, 1); val = qemu_opt_get_number(opts, \"width\", 0); if (val != 0) { backend->vc->has_width = true; backend->vc->width = val; } val = qemu_opt_get_number(opts, \"height\", 0); if (val != 0) { backend->vc->has_height = true; backend->vc->height = val; } val = qemu_opt_get_number(opts, \"cols\", 0); if (val != 0) { backend->vc->has_cols = true; backend->vc->cols = val; } val = qemu_opt_get_number(opts, \"rows\", 0); if (val != 0) { backend->vc->has_rows = true; backend->vc->rows = val; } }"} {"target": 1, "idx": 25066, "func": "static unsigned int find_best(struct vf_instance *vf){ int is_format_okay = vf->next->query_format(vf->next, IMGFMT_YV12); if ((is_format_okay & VFCAP_CSP_SUPPORTED_BY_HW) || (is_format_okay & VFCAP_CSP_SUPPORTED)) return IMGFMT_YV12; else return 0; }"} {"target": 1, "idx": 25075, "func": "void helper_idivl_EAX_T0(void) { int den, q, r; int64_t num; num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32); den = T0; if (den == 0) { raise_exception(EXCP00_DIVZ); } #ifdef BUGGY_GCC_DIV64 r = idiv32(&q, num, den); #else q = (num / den); r = (num % den); #endif EAX = (uint32_t)q; EDX = (uint32_t)r; }"} {"target": 1, "idx": 25081, "func": "static inline void idct_col(int16_t *blk, const uint8_t *quant) { int t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, tA, tB, tC, tD, tE, tF; int t10, t11, t12, t13; int s0, s1, s2, s3, s4, s5, s6, s7; s0 = (int) blk[0 * 8] * quant[0 * 8]; s1 = (int) blk[1 * 8] * quant[1 * 8]; s2 = (int) blk[2 * 8] * quant[2 * 8]; s3 = (int) blk[3 * 8] * quant[3 * 8]; s4 = (int) blk[4 * 8] * quant[4 * 8]; s5 = (int) blk[5 * 8] * quant[5 * 8]; s6 = (int) blk[6 * 8] * quant[6 * 8]; s7 = (int) blk[7 * 8] * quant[7 * 8]; t0 = (s3 * 19266 + s5 * 12873) >> 15; t1 = (s5 * 19266 - s3 * 12873) >> 15; t2 = ((s7 * 4520 + s1 * 22725) >> 15) - t0; t3 = ((s1 * 4520 - s7 * 22725) >> 15) - t1; t4 = t0 * 2 + t2; t5 = t1 * 2 + t3; t6 = t2 - t3; t7 = t3 * 2 + t6; t8 = (t6 * 11585) >> 14; t9 = (t7 * 11585) >> 14; tA = (s2 * 8867 - s6 * 21407) >> 14; tB = (s6 * 8867 + s2 * 21407) >> 14; tC = (s0 >> 1) - (s4 >> 1); tD = (s4 >> 1) * 2 + tC; tE = tC - (tA >> 1); tF = tD - (tB >> 1); t10 = tF - t5; t11 = tE - t8; t12 = tE + (tA >> 1) * 2 - t9; t13 = tF + (tB >> 1) * 2 - t4; blk[0 * 8] = t13 + t4 * 2; blk[1 * 8] = t12 + t9 * 2; blk[2 * 8] = t11 + t8 * 2; blk[3 * 8] = t10 + t5 * 2; blk[4 * 8] = t10; blk[5 * 8] = t11; blk[6 * 8] = t12; blk[7 * 8] = t13; }"} {"target": 0, "idx": 25091, "func": "static int fourxm_probe(AVProbeData *p) { if (p->buf_size < 12) return 0; if ((AV_RL32(&p->buf[0]) != RIFF_TAG) || (AV_RL32(&p->buf[8]) != _4XMV_TAG)) return 0; return AVPROBE_SCORE_MAX; }"} {"target": 0, "idx": 25095, "func": "static void jpeg_table_header(AVCodecContext *avctx, PutBitContext *p, ScanTable *intra_scantable, uint16_t luma_intra_matrix[64], uint16_t chroma_intra_matrix[64], int hsample[3]) { int i, j, size; uint8_t *ptr; MpegEncContext *s = avctx->priv_data; if (avctx->codec_id != AV_CODEC_ID_LJPEG) { int matrix_count = 1 + !!memcmp(luma_intra_matrix, chroma_intra_matrix, sizeof(luma_intra_matrix[0]) * 64); if (s->force_duplicated_matrix) matrix_count = 2; /* quant matrixes */ put_marker(p, DQT); put_bits(p, 16, 2 + matrix_count * (1 + 64)); put_bits(p, 4, 0); /* 8 bit precision */ put_bits(p, 4, 0); /* table 0 */ for(i=0;i<64;i++) { j = intra_scantable->permutated[i]; put_bits(p, 8, luma_intra_matrix[j]); } if (matrix_count > 1) { put_bits(p, 4, 0); /* 8 bit precision */ put_bits(p, 4, 1); /* table 1 */ for(i=0;i<64;i++) { j = intra_scantable->permutated[i]; put_bits(p, 8, chroma_intra_matrix[j]); } } } if(avctx->active_thread_type & FF_THREAD_SLICE){ put_marker(p, DRI); put_bits(p, 16, 4); put_bits(p, 16, (avctx->width-1)/(8*hsample[0]) + 1); } /* huffman table */ put_marker(p, DHT); flush_put_bits(p); ptr = put_bits_ptr(p); put_bits(p, 16, 0); /* patched later */ size = 2; // Only MJPEG can have a variable Huffman variable. All other // formats use the default Huffman table. if (s->out_format == FMT_MJPEG && s->huffman == HUFFMAN_TABLE_OPTIMAL) { size += put_huffman_table(p, 0, 0, s->mjpeg_ctx->bits_dc_luminance, s->mjpeg_ctx->val_dc_luminance); size += put_huffman_table(p, 0, 1, s->mjpeg_ctx->bits_dc_chrominance, s->mjpeg_ctx->val_dc_chrominance); size += put_huffman_table(p, 1, 0, s->mjpeg_ctx->bits_ac_luminance, s->mjpeg_ctx->val_ac_luminance); size += put_huffman_table(p, 1, 1, s->mjpeg_ctx->bits_ac_chrominance, s->mjpeg_ctx->val_ac_chrominance); } else { size += put_huffman_table(p, 0, 0, avpriv_mjpeg_bits_dc_luminance, avpriv_mjpeg_val_dc); size += put_huffman_table(p, 0, 1, avpriv_mjpeg_bits_dc_chrominance, avpriv_mjpeg_val_dc); size += put_huffman_table(p, 1, 0, avpriv_mjpeg_bits_ac_luminance, avpriv_mjpeg_val_ac_luminance); size += put_huffman_table(p, 1, 1, avpriv_mjpeg_bits_ac_chrominance, avpriv_mjpeg_val_ac_chrominance); } AV_WB16(ptr, size); }"} {"target": 0, "idx": 25103, "func": "static void ppc_spapr_init(MachineState *machine) { sPAPRMachineState *spapr = SPAPR_MACHINE(machine); sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine); const char *kernel_filename = machine->kernel_filename; const char *kernel_cmdline = machine->kernel_cmdline; const char *initrd_filename = machine->initrd_filename; PowerPCCPU *cpu; PCIHostState *phb; int i; MemoryRegion *sysmem = get_system_memory(); MemoryRegion *ram = g_new(MemoryRegion, 1); MemoryRegion *rma_region; void *rma = NULL; hwaddr rma_alloc_size; hwaddr node0_size = spapr_node0_size(); uint32_t initrd_base = 0; long kernel_size = 0, initrd_size = 0; long load_limit, fw_size; bool kernel_le = false; char *filename; msi_supported = true; QLIST_INIT(&spapr->phbs); cpu_ppc_hypercall = emulate_spapr_hypercall; /* Allocate RMA if necessary */ rma_alloc_size = kvmppc_alloc_rma(&rma); if (rma_alloc_size == -1) { error_report(\"Unable to create RMA\"); exit(1); } if (rma_alloc_size && (rma_alloc_size < node0_size)) { spapr->rma_size = rma_alloc_size; } else { spapr->rma_size = node0_size; /* With KVM, we don't actually know whether KVM supports an * unbounded RMA (PR KVM) or is limited by the hash table size * (HV KVM using VRMA), so we always assume the latter * * In that case, we also limit the initial allocations for RTAS * etc... to 256M since we have no way to know what the VRMA size * is going to be as it depends on the size of the hash table * isn't determined yet. */ if (kvm_enabled()) { spapr->vrma_adjust = 1; spapr->rma_size = MIN(spapr->rma_size, 0x10000000); } } if (spapr->rma_size > node0_size) { error_report(\"Numa node 0 has to span the RMA (%#08\"HWADDR_PRIx\")\", spapr->rma_size); exit(1); } /* Setup a load limit for the ramdisk leaving room for SLOF and FDT */ load_limit = MIN(spapr->rma_size, RTAS_MAX_ADDR) - FW_OVERHEAD; /* We aim for a hash table of size 1/128 the size of RAM. The * normal rule of thumb is 1/64 the size of RAM, but that's much * more than needed for the Linux guests we support. */ spapr->htab_shift = 18; /* Minimum architected size */ while (spapr->htab_shift <= 46) { if ((1ULL << (spapr->htab_shift + 7)) >= machine->maxram_size) { break; } spapr->htab_shift++; } spapr_alloc_htab(spapr); /* Set up Interrupt Controller before we create the VCPUs */ spapr->icp = xics_system_init(machine, DIV_ROUND_UP(max_cpus * kvmppc_smt_threads(), smp_threads), XICS_IRQS, &error_fatal); if (smc->dr_lmb_enabled) { spapr_validate_node_memory(machine, &error_fatal); } /* init CPUs */ if (machine->cpu_model == NULL) { machine->cpu_model = kvm_enabled() ? \"host\" : \"POWER7\"; } for (i = 0; i < smp_cpus; i++) { cpu = cpu_ppc_init(machine->cpu_model); if (cpu == NULL) { error_report(\"Unable to find PowerPC CPU definition\"); exit(1); } spapr_cpu_init(spapr, cpu, &error_fatal); } if (kvm_enabled()) { /* Enable H_LOGICAL_CI_* so SLOF can talk to in-kernel devices */ kvmppc_enable_logical_ci_hcalls(); kvmppc_enable_set_mode_hcall(); } /* allocate RAM */ memory_region_allocate_system_memory(ram, NULL, \"ppc_spapr.ram\", machine->ram_size); memory_region_add_subregion(sysmem, 0, ram); if (rma_alloc_size && rma) { rma_region = g_new(MemoryRegion, 1); memory_region_init_ram_ptr(rma_region, NULL, \"ppc_spapr.rma\", rma_alloc_size, rma); vmstate_register_ram_global(rma_region); memory_region_add_subregion(sysmem, 0, rma_region); } /* initialize hotplug memory address space */ if (machine->ram_size < machine->maxram_size) { ram_addr_t hotplug_mem_size = machine->maxram_size - machine->ram_size; if (machine->ram_slots > SPAPR_MAX_RAM_SLOTS) { error_report(\"Specified number of memory slots %\" PRIu64\" exceeds max supported %d\", machine->ram_slots, SPAPR_MAX_RAM_SLOTS); exit(1); } spapr->hotplug_memory.base = ROUND_UP(machine->ram_size, SPAPR_HOTPLUG_MEM_ALIGN); memory_region_init(&spapr->hotplug_memory.mr, OBJECT(spapr), \"hotplug-memory\", hotplug_mem_size); memory_region_add_subregion(sysmem, spapr->hotplug_memory.base, &spapr->hotplug_memory.mr); } if (smc->dr_lmb_enabled) { spapr_create_lmb_dr_connectors(spapr); } filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, \"spapr-rtas.bin\"); if (!filename) { error_report(\"Could not find LPAR rtas '%s'\", \"spapr-rtas.bin\"); exit(1); } spapr->rtas_size = get_image_size(filename); spapr->rtas_blob = g_malloc(spapr->rtas_size); if (load_image_size(filename, spapr->rtas_blob, spapr->rtas_size) < 0) { error_report(\"Could not load LPAR rtas '%s'\", filename); exit(1); } if (spapr->rtas_size > RTAS_MAX_SIZE) { error_report(\"RTAS too big ! 0x%zx bytes (max is 0x%x)\", (size_t)spapr->rtas_size, RTAS_MAX_SIZE); exit(1); } g_free(filename); /* Set up EPOW events infrastructure */ spapr_events_init(spapr); /* Set up the RTC RTAS interfaces */ spapr_rtc_create(spapr); /* Set up VIO bus */ spapr->vio_bus = spapr_vio_bus_init(); for (i = 0; i < MAX_SERIAL_PORTS; i++) { if (serial_hds[i]) { spapr_vty_create(spapr->vio_bus, serial_hds[i]); } } /* We always have at least the nvram device on VIO */ spapr_create_nvram(spapr); /* Set up PCI */ spapr_pci_rtas_init(); phb = spapr_create_phb(spapr, 0); for (i = 0; i < nb_nics; i++) { NICInfo *nd = &nd_table[i]; if (!nd->model) { nd->model = g_strdup(\"ibmveth\"); } if (strcmp(nd->model, \"ibmveth\") == 0) { spapr_vlan_create(spapr->vio_bus, nd); } else { pci_nic_init_nofail(&nd_table[i], phb->bus, nd->model, NULL); } } for (i = 0; i <= drive_get_max_bus(IF_SCSI); i++) { spapr_vscsi_create(spapr->vio_bus); } /* Graphics */ if (spapr_vga_init(phb->bus, &error_fatal)) { spapr->has_graphics = true; machine->usb |= defaults_enabled() && !machine->usb_disabled; } if (machine->usb) { if (smc->use_ohci_by_default) { pci_create_simple(phb->bus, -1, \"pci-ohci\"); } else { pci_create_simple(phb->bus, -1, \"nec-usb-xhci\"); } if (spapr->has_graphics) { USBBus *usb_bus = usb_bus_find(-1); usb_create_simple(usb_bus, \"usb-kbd\"); usb_create_simple(usb_bus, \"usb-mouse\"); } } if (spapr->rma_size < (MIN_RMA_SLOF << 20)) { error_report( \"pSeries SLOF firmware requires >= %ldM guest RMA (Real Mode Area memory)\", MIN_RMA_SLOF); exit(1); } if (kernel_filename) { uint64_t lowaddr = 0; kernel_size = load_elf(kernel_filename, translate_kernel_address, NULL, NULL, &lowaddr, NULL, 1, PPC_ELF_MACHINE, 0); if (kernel_size == ELF_LOAD_WRONG_ENDIAN) { kernel_size = load_elf(kernel_filename, translate_kernel_address, NULL, NULL, &lowaddr, NULL, 0, PPC_ELF_MACHINE, 0); kernel_le = kernel_size > 0; } if (kernel_size < 0) { error_report(\"error loading %s: %s\", kernel_filename, load_elf_strerror(kernel_size)); exit(1); } /* load initrd */ if (initrd_filename) { /* Try to locate the initrd in the gap between the kernel * and the firmware. Add a bit of space just in case */ initrd_base = (KERNEL_LOAD_ADDR + kernel_size + 0x1ffff) & ~0xffff; initrd_size = load_image_targphys(initrd_filename, initrd_base, load_limit - initrd_base); if (initrd_size < 0) { error_report(\"could not load initial ram disk '%s'\", initrd_filename); exit(1); } } else { initrd_base = 0; initrd_size = 0; } } if (bios_name == NULL) { bios_name = FW_FILE_NAME; } filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); if (!filename) { error_report(\"Could not find LPAR firmware '%s'\", bios_name); exit(1); } fw_size = load_image_targphys(filename, 0, FW_MAX_SIZE); if (fw_size <= 0) { error_report(\"Could not load LPAR firmware '%s'\", filename); exit(1); } g_free(filename); /* FIXME: Should register things through the MachineState's qdev * interface, this is a legacy from the sPAPREnvironment structure * which predated MachineState but had a similar function */ vmstate_register(NULL, 0, &vmstate_spapr, spapr); register_savevm_live(NULL, \"spapr/htab\", -1, 1, &savevm_htab_handlers, spapr); /* Prepare the device tree */ spapr->fdt_skel = spapr_create_fdt_skel(initrd_base, initrd_size, kernel_size, kernel_le, kernel_cmdline, spapr->check_exception_irq); assert(spapr->fdt_skel != NULL); /* used by RTAS */ QTAILQ_INIT(&spapr->ccs_list); qemu_register_reset(spapr_ccs_reset_hook, spapr); qemu_register_boot_set(spapr_boot_set, spapr); }"} {"target": 0, "idx": 25113, "func": "static void n8x0_nand_setup(struct n800_s *s) { char *otp_region; DriveInfo *dinfo; s->nand = qdev_create(NULL, \"onenand\"); qdev_prop_set_uint16(s->nand, \"manufacturer_id\", NAND_MFR_SAMSUNG); /* Either 0x40 or 0x48 are OK for the device ID */ qdev_prop_set_uint16(s->nand, \"device_id\", 0x48); qdev_prop_set_uint16(s->nand, \"version_id\", 0); qdev_prop_set_int32(s->nand, \"shift\", 1); dinfo = drive_get(IF_MTD, 0, 0); if (dinfo) { qdev_prop_set_drive_nofail(s->nand, \"drive\", blk_bs(blk_by_legacy_dinfo(dinfo))); } qdev_init_nofail(s->nand); sysbus_connect_irq(SYS_BUS_DEVICE(s->nand), 0, qdev_get_gpio_in(s->mpu->gpio, N8X0_ONENAND_GPIO)); omap_gpmc_attach(s->mpu->gpmc, N8X0_ONENAND_CS, sysbus_mmio_get_region(SYS_BUS_DEVICE(s->nand), 0)); otp_region = onenand_raw_otp(s->nand); memcpy(otp_region + 0x000, n8x0_cal_wlan_mac, sizeof(n8x0_cal_wlan_mac)); memcpy(otp_region + 0x800, n8x0_cal_bt_id, sizeof(n8x0_cal_bt_id)); /* XXX: in theory should also update the OOB for both pages */ }"} {"target": 0, "idx": 25119, "func": "void cpu_dump_state (CPUState *env, FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...), int flags) { uint32_t c0_status; int i; cpu_fprintf(f, \"pc=0x\" TARGET_FMT_lx \" HI=0x\" TARGET_FMT_lx \" LO=0x\" TARGET_FMT_lx \" ds %04x \" TARGET_FMT_lx \" %d\\n\", env->PC, env->HI, env->LO, env->hflags, env->btarget, env->bcond); for (i = 0; i < 32; i++) { if ((i & 3) == 0) cpu_fprintf(f, \"GPR%02d:\", i); cpu_fprintf(f, \" %s \" TARGET_FMT_lx, regnames[i], env->gpr[i]); if ((i & 3) == 3) cpu_fprintf(f, \"\\n\"); } c0_status = env->CP0_Status; cpu_fprintf(f, \"CP0 Status 0x%08x Cause 0x%08x EPC 0x\" TARGET_FMT_lx \"\\n\", c0_status, env->CP0_Cause, env->CP0_EPC); cpu_fprintf(f, \" Config0 0x%08x Config1 0x%08x LLAddr 0x\" TARGET_FMT_lx \"\\n\", env->CP0_Config0, env->CP0_Config1, env->CP0_LLAddr); if (c0_status & (1 << CP0St_CU1)) fpu_dump_state(env, f, cpu_fprintf, flags); #if defined(TARGET_MIPS64) && defined(MIPS_DEBUG_SIGN_EXTENSIONS) cpu_mips_check_sign_extensions(env, f, cpu_fprintf, flags); #endif }"} {"target": 1, "idx": 25130, "func": "static int aac_sync(uint64_t state, AACAC3ParseContext *hdr_info, int *need_next_header, int *new_frame_start) { GetBitContext bits; AACADTSHeaderInfo hdr; int size; union { uint64_t u64; uint8_t u8[8]; } tmp; tmp.u64 = av_be2ne64(state); init_get_bits(&bits, tmp.u8+8-AAC_ADTS_HEADER_SIZE, AAC_ADTS_HEADER_SIZE * 8); if ((size = avpriv_aac_parse_header(&bits, &hdr)) < 0) return 0; *need_next_header = 0; *new_frame_start = 1; hdr_info->sample_rate = hdr.sample_rate; hdr_info->channels = ff_mpeg4audio_channels[hdr.chan_config]; hdr_info->samples = hdr.samples; hdr_info->bit_rate = hdr.bit_rate; return size; }"} {"target": 1, "idx": 25134, "func": "uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr, int asi, int size, int sign) { uint64_t ret = 0; #if defined(DEBUG_ASI) target_ulong last_addr = addr; #endif if (asi < 0x80) { helper_raise_exception(env, TT_PRIV_ACT); } helper_check_align(env, addr, size - 1); addr = asi_address_mask(env, asi, addr); switch (asi) { case 0x82: /* Primary no-fault */ case 0x8a: /* Primary no-fault LE */ if (page_check_range(addr, size, PAGE_READ) == -1) { #ifdef DEBUG_ASI dump_asi(\"read \", last_addr, asi, size, ret); #endif return 0; } /* Fall through */ case 0x80: /* Primary */ case 0x88: /* Primary LE */ { switch (size) { case 1: ret = ldub_raw(addr); break; case 2: ret = lduw_raw(addr); break; case 4: ret = ldl_raw(addr); break; default: case 8: ret = ldq_raw(addr); break; } } break; case 0x83: /* Secondary no-fault */ case 0x8b: /* Secondary no-fault LE */ if (page_check_range(addr, size, PAGE_READ) == -1) { #ifdef DEBUG_ASI dump_asi(\"read \", last_addr, asi, size, ret); #endif return 0; } /* Fall through */ case 0x81: /* Secondary */ case 0x89: /* Secondary LE */ /* XXX */ break; default: break; } /* Convert from little endian */ switch (asi) { case 0x88: /* Primary LE */ case 0x89: /* Secondary LE */ case 0x8a: /* Primary no-fault LE */ case 0x8b: /* Secondary no-fault LE */ switch (size) { case 2: ret = bswap16(ret); break; case 4: ret = bswap32(ret); break; case 8: ret = bswap64(ret); break; default: break; } default: break; } /* Convert to signed number */ if (sign) { switch (size) { case 1: ret = (int8_t) ret; break; case 2: ret = (int16_t) ret; break; case 4: ret = (int32_t) ret; break; default: break; } } #ifdef DEBUG_ASI dump_asi(\"read \", last_addr, asi, size, ret); #endif return ret; }"} {"target": 1, "idx": 25135, "func": "static av_cold int vc2_encode_init(AVCodecContext *avctx) { Plane *p; SubBand *b; int i, j, level, o, shift, ret; const AVPixFmtDescriptor *fmt = av_pix_fmt_desc_get(avctx->pix_fmt); const int depth = fmt->comp[0].depth; VC2EncContext *s = avctx->priv_data; s->picture_number = 0; /* Total allowed quantization range */ s->q_ceil = DIRAC_MAX_QUANT_INDEX; s->ver.major = 2; s->ver.minor = 0; s->profile = 3; s->level = 3; s->base_vf = -1; s->strict_compliance = 1; s->q_avg = 0; s->slice_max_bytes = 0; s->slice_min_bytes = 0; /* Mark unknown as progressive */ s->interlaced = !((avctx->field_order == AV_FIELD_UNKNOWN) || (avctx->field_order == AV_FIELD_PROGRESSIVE)); for (i = 0; i < base_video_fmts_len; i++) { const VC2BaseVideoFormat *fmt = &base_video_fmts[i]; if (avctx->pix_fmt != fmt->pix_fmt) continue; if (avctx->time_base.num != fmt->time_base.num) continue; if (avctx->time_base.den != fmt->time_base.den) continue; if (avctx->width != fmt->width) continue; if (avctx->height != fmt->height) continue; if (s->interlaced != fmt->interlaced) continue; s->base_vf = i; s->level = base_video_fmts[i].level; break; } if (s->interlaced) av_log(avctx, AV_LOG_WARNING, \"Interlacing enabled!\\n\"); if ((s->slice_width & (s->slice_width - 1)) || (s->slice_height & (s->slice_height - 1))) { av_log(avctx, AV_LOG_ERROR, \"Slice size is not a power of two!\\n\"); return AVERROR_UNKNOWN; } if ((s->slice_width > avctx->width) || (s->slice_height > avctx->height)) { av_log(avctx, AV_LOG_ERROR, \"Slice size is bigger than the image!\\n\"); return AVERROR_UNKNOWN; } if (s->base_vf <= 0) { if (avctx->strict_std_compliance < FF_COMPLIANCE_STRICT) { s->strict_compliance = s->base_vf = 0; av_log(avctx, AV_LOG_WARNING, \"Format does not strictly comply with VC2 specs\\n\"); } else { av_log(avctx, AV_LOG_WARNING, \"Given format does not strictly comply with \" \"the specifications, decrease strictness to use it.\\n\"); return AVERROR_UNKNOWN; } } else { av_log(avctx, AV_LOG_INFO, \"Selected base video format = %i (%s)\\n\", s->base_vf, base_video_fmts[s->base_vf].name); } /* Chroma subsampling */ ret = av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &s->chroma_x_shift, &s->chroma_y_shift); if (ret) return ret; /* Bit depth and color range index */ if (depth == 8 && avctx->color_range == AVCOL_RANGE_JPEG) { s->bpp = 1; s->bpp_idx = 1; s->diff_offset = 128; } else if (depth == 8 && (avctx->color_range == AVCOL_RANGE_MPEG || avctx->color_range == AVCOL_RANGE_UNSPECIFIED)) { s->bpp = 1; s->bpp_idx = 2; s->diff_offset = 128; } else if (depth == 10) { s->bpp = 2; s->bpp_idx = 3; s->diff_offset = 512; } else { s->bpp = 2; s->bpp_idx = 4; s->diff_offset = 2048; } /* Planes initialization */ for (i = 0; i < 3; i++) { int w, h; p = &s->plane[i]; p->width = avctx->width >> (i ? s->chroma_x_shift : 0); p->height = avctx->height >> (i ? s->chroma_y_shift : 0); if (s->interlaced) p->height >>= 1; p->dwt_width = w = FFALIGN(p->width, (1 << s->wavelet_depth)); p->dwt_height = h = FFALIGN(p->height, (1 << s->wavelet_depth)); p->coef_stride = FFALIGN(p->dwt_width, 32); p->coef_buf = av_malloc(p->coef_stride*p->dwt_height*sizeof(dwtcoef)); if (!p->coef_buf) goto alloc_fail; for (level = s->wavelet_depth-1; level >= 0; level--) { w = w >> 1; h = h >> 1; for (o = 0; o < 4; o++) { b = &p->band[level][o]; b->width = w; b->height = h; b->stride = p->coef_stride; shift = (o > 1)*b->height*b->stride + (o & 1)*b->width; b->buf = p->coef_buf + shift; } } /* DWT init */ if (ff_vc2enc_init_transforms(&s->transform_args[i].t, s->plane[i].coef_stride, s->plane[i].dwt_height)) goto alloc_fail; } /* Slices */ s->num_x = s->plane[0].dwt_width/s->slice_width; s->num_y = s->plane[0].dwt_height/s->slice_height; s->slice_args = av_calloc(s->num_x*s->num_y, sizeof(SliceArgs)); if (!s->slice_args) goto alloc_fail; /* Lookup tables */ s->coef_lut_len = av_malloc(COEF_LUT_TAB*(s->q_ceil+1)*sizeof(*s->coef_lut_len)); if (!s->coef_lut_len) goto alloc_fail; s->coef_lut_val = av_malloc(COEF_LUT_TAB*(s->q_ceil+1)*sizeof(*s->coef_lut_val)); if (!s->coef_lut_val) goto alloc_fail; for (i = 0; i < s->q_ceil; i++) { uint8_t *len_lut = &s->coef_lut_len[i*COEF_LUT_TAB]; uint32_t *val_lut = &s->coef_lut_val[i*COEF_LUT_TAB]; for (j = 0; j < COEF_LUT_TAB; j++) { get_vc2_ue_uint(QUANT(j, ff_dirac_qscale_tab[i]), &len_lut[j], &val_lut[j]); if (len_lut[j] != 1) { len_lut[j] += 1; val_lut[j] <<= 1; } else { val_lut[j] = 1; } } } return 0; alloc_fail: vc2_encode_end(avctx); av_log(avctx, AV_LOG_ERROR, \"Unable to allocate memory!\\n\"); return AVERROR(ENOMEM); }"} {"target": 1, "idx": 25149, "func": "static av_cold int rv40_decode_init(AVCodecContext *avctx) { RV34DecContext *r = avctx->priv_data; r->rv30 = 0; ff_rv34_decode_init(avctx); if(!aic_top_vlc.bits) rv40_init_tables(); r->parse_slice_header = rv40_parse_slice_header; r->decode_intra_types = rv40_decode_intra_types; r->decode_mb_info = rv40_decode_mb_info; r->loop_filter = rv40_loop_filter; r->luma_dc_quant_i = rv40_luma_dc_quant[0]; r->luma_dc_quant_p = rv40_luma_dc_quant[1]; return 0; }"} {"target": 0, "idx": 25163, "func": "static void spr_write_dbatu_h (void *opaque, int sprn) { DisasContext *ctx = opaque; gen_op_store_dbatu((sprn - SPR_DBAT4U) / 2); RET_STOP(ctx); }"} {"target": 0, "idx": 25167, "func": "sprintf_len(char *string, const char *format, ...) #else sprintf_len(va_alist) va_dcl #endif { va_list args; #ifdef __STDC__ va_start(args, format); #else char *string; char *format; va_start(args); string = va_arg(args, char *); format = va_arg(args, char *); #endif vsprintf(string, format, args); return strlen(string); }"} {"target": 1, "idx": 25179, "func": "static int extract_extradata_h2645(AVBSFContext *ctx, AVPacket *pkt, uint8_t **data, int *size) { static const int extradata_nal_types_hevc[] = { HEVC_NAL_VPS, HEVC_NAL_SPS, HEVC_NAL_PPS, }; static const int extradata_nal_types_h264[] = { H264_NAL_SPS, H264_NAL_PPS, }; ExtractExtradataContext *s = ctx->priv_data; H2645Packet h2645_pkt = { 0 }; int extradata_size = 0; const int *extradata_nal_types; int nb_extradata_nal_types; int i, has_sps = 0, has_vps = 0, ret = 0; if (ctx->par_in->codec_id == AV_CODEC_ID_HEVC) { extradata_nal_types = extradata_nal_types_hevc; nb_extradata_nal_types = FF_ARRAY_ELEMS(extradata_nal_types_hevc); } else { extradata_nal_types = extradata_nal_types_h264; nb_extradata_nal_types = FF_ARRAY_ELEMS(extradata_nal_types_h264); } ret = ff_h2645_packet_split(&h2645_pkt, pkt->data, pkt->size, ctx, 0, 0, ctx->par_in->codec_id, 1); if (ret < 0) return ret; for (i = 0; i < h2645_pkt.nb_nals; i++) { H2645NAL *nal = &h2645_pkt.nals[i]; if (val_in_array(extradata_nal_types, nb_extradata_nal_types, nal->type)) { extradata_size += nal->raw_size + 3; if (ctx->par_in->codec_id == AV_CODEC_ID_HEVC) { if (nal->type == HEVC_NAL_SPS) has_sps = 1; if (nal->type == HEVC_NAL_VPS) has_vps = 1; } else { if (nal->type == H264_NAL_SPS) has_sps = 1; } } } if (extradata_size && ((ctx->par_in->codec_id == AV_CODEC_ID_HEVC && has_sps && has_vps) || (ctx->par_in->codec_id == AV_CODEC_ID_H264 && has_sps))) { AVBufferRef *filtered_buf; uint8_t *extradata, *filtered_data; if (s->remove) { filtered_buf = av_buffer_alloc(pkt->size + AV_INPUT_BUFFER_PADDING_SIZE); if (!filtered_buf) { ret = AVERROR(ENOMEM); goto fail; } filtered_data = filtered_buf->data; } extradata = av_malloc(extradata_size + AV_INPUT_BUFFER_PADDING_SIZE); if (!extradata) { av_buffer_unref(&filtered_buf); ret = AVERROR(ENOMEM); goto fail; } *data = extradata; *size = extradata_size; for (i = 0; i < h2645_pkt.nb_nals; i++) { H2645NAL *nal = &h2645_pkt.nals[i]; if (val_in_array(extradata_nal_types, nb_extradata_nal_types, nal->type)) { AV_WB24(extradata, 1); // startcode memcpy(extradata + 3, nal->raw_data, nal->raw_size); extradata += 3 + nal->raw_size; } else if (s->remove) { AV_WB24(filtered_data, 1); // startcode memcpy(filtered_data + 3, nal->raw_data, nal->raw_size); filtered_data += 3 + nal->raw_size; } } if (s->remove) { av_buffer_unref(&pkt->buf); pkt->buf = filtered_buf; pkt->data = filtered_buf->data; pkt->size = filtered_data - filtered_buf->data; } } fail: ff_h2645_packet_uninit(&h2645_pkt); return ret; }"} {"target": 1, "idx": 25181, "func": "static void FUNC(put_hevc_epel_bi_w_h)(uint8_t *_dst, ptrdiff_t _dststride, uint8_t *_src, ptrdiff_t _srcstride, int16_t *src2, int height, int denom, int wx0, int wx1, int ox0, int ox1, intptr_t mx, intptr_t my, int width) { int x, y; pixel *src = (pixel *)_src; ptrdiff_t srcstride = _srcstride / sizeof(pixel); pixel *dst = (pixel *)_dst; ptrdiff_t dststride = _dststride / sizeof(pixel); const int8_t *filter = ff_hevc_epel_filters[mx - 1]; int shift = 14 + 1 - BIT_DEPTH; int log2Wd = denom + shift - 1; ox0 = ox0 * (1 << (BIT_DEPTH - 8)); ox1 = ox1 * (1 << (BIT_DEPTH - 8)); for (y = 0; y < height; y++) { for (x = 0; x < width; x++) dst[x] = av_clip_pixel(((EPEL_FILTER(src, 1) >> (BIT_DEPTH - 8)) * wx1 + src2[x] * wx0 + ((ox0 + ox1 + 1) << log2Wd)) >> (log2Wd + 1)); src += srcstride; dst += dststride; src2 += MAX_PB_SIZE; } }"} {"target": 1, "idx": 25182, "func": "void sdl2_gl_scanout(DisplayChangeListener *dcl, uint32_t backing_id, bool backing_y_0_top, uint32_t x, uint32_t y, uint32_t w, uint32_t h) { struct sdl2_console *scon = container_of(dcl, struct sdl2_console, dcl); assert(scon->opengl); scon->x = x; scon->y = y; scon->w = w; scon->h = h; scon->tex_id = backing_id; scon->y0_top = backing_y_0_top; SDL_GL_MakeCurrent(scon->real_window, scon->winctx); if (scon->tex_id == 0 || scon->w == 0 || scon->h == 0) { sdl2_set_scanout_mode(scon, false); return; } sdl2_set_scanout_mode(scon, true); if (!scon->fbo_id) { glGenFramebuffers(1, &scon->fbo_id); } glBindFramebuffer(GL_FRAMEBUFFER_EXT, scon->fbo_id); glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, GL_COLOR_ATTACHMENT0_EXT, GL_TEXTURE_2D, scon->tex_id, 0); }"} {"target": 1, "idx": 25185, "func": "static void mm_stop_timer(struct qemu_alarm_timer *t) { timeKillEvent(mm_timer); timeEndPeriod(mm_period); }"} {"target": 1, "idx": 25189, "func": "static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr, TCGMemOpIdx oi) { TCGMemOp memop = get_memop(oi); #ifdef CONFIG_SOFTMMU unsigned memi = get_mmuidx(oi); TCGReg addrz, param; tcg_insn_unit *func; tcg_insn_unit *label_ptr; addrz = tcg_out_tlb_load(s, addr, memi, memop, offsetof(CPUTLBEntry, addr_write)); /* The fast path is exactly one insn. Thus we can perform the entire TLB Hit in the (annulled) delay slot of the branch over TLB Miss. */ /* beq,a,pt %[xi]cc, label0 */ label_ptr = s->code_ptr; tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0); /* delay slot */ tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1, qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]); /* TLB Miss. */ param = TCG_REG_O1; if (!SPARC64 && TARGET_LONG_BITS == 64) { /* Skip the high-part; we'll perform the extract in the trampoline. */ param++; } tcg_out_mov(s, TCG_TYPE_REG, param++, addr); if (!SPARC64 && (memop & MO_SIZE) == MO_64) { /* Skip the high-part; we'll perform the extract in the trampoline. */ param++; } tcg_out_mov(s, TCG_TYPE_REG, param++, data); func = qemu_st_trampoline[memop & (MO_BSWAP | MO_SIZE)]; tcg_debug_assert(func != NULL); tcg_out_call_nodelay(s, func); /* delay slot */ tcg_out_movi(s, TCG_TYPE_I32, param, oi); *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr)); #else if (SPARC64 && TARGET_LONG_BITS == 32) { tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL); addr = TCG_REG_T1; } tcg_out_ldst_rr(s, data, addr, (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0), qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]); #endif /* CONFIG_SOFTMMU */ }"} {"target": 0, "idx": 25223, "func": "QEMUFile *qemu_fopen_fd(int fd) { QEMUFileFD *s = qemu_mallocz(sizeof(QEMUFileFD)); if (s == NULL) return NULL; s->fd = fd; s->file = qemu_fopen_ops(s, fd_put_buffer, fd_get_buffer, fd_close, NULL); return s->file; }"} {"target": 1, "idx": 25245, "func": "static void pred_spatial_direct_motion(const H264Context *const h, H264SliceContext *sl, int *mb_type) { int b8_stride = 2; int b4_stride = h->b_stride; int mb_xy = sl->mb_xy, mb_y = sl->mb_y; int mb_type_col[2]; const int16_t (*l1mv0)[2], (*l1mv1)[2]; const int8_t *l1ref0, *l1ref1; const int is_b8x8 = IS_8X8(*mb_type); unsigned int sub_mb_type = MB_TYPE_L0L1; int i8, i4; int ref[2]; int mv[2]; int list; assert(sl->ref_list[1][0].reference & 3); await_reference_mb_row(h, sl->ref_list[1][0].parent, sl->mb_y + !!IS_INTERLACED(*mb_type)); #define MB_TYPE_16x16_OR_INTRA (MB_TYPE_16x16 | MB_TYPE_INTRA4x4 | \\ MB_TYPE_INTRA16x16 | MB_TYPE_INTRA_PCM) /* ref = min(neighbors) */ for (list = 0; list < 2; list++) { int left_ref = sl->ref_cache[list][scan8[0] - 1]; int top_ref = sl->ref_cache[list][scan8[0] - 8]; int refc = sl->ref_cache[list][scan8[0] - 8 + 4]; const int16_t *C = sl->mv_cache[list][scan8[0] - 8 + 4]; if (refc == PART_NOT_AVAILABLE) { refc = sl->ref_cache[list][scan8[0] - 8 - 1]; C = sl->mv_cache[list][scan8[0] - 8 - 1]; } ref[list] = FFMIN3((unsigned)left_ref, (unsigned)top_ref, (unsigned)refc); if (ref[list] >= 0) { /* This is just pred_motion() but with the cases removed that * cannot happen for direct blocks. */ const int16_t *const A = sl->mv_cache[list][scan8[0] - 1]; const int16_t *const B = sl->mv_cache[list][scan8[0] - 8]; int match_count = (left_ref == ref[list]) + (top_ref == ref[list]) + (refc == ref[list]); if (match_count > 1) { // most common mv[list] = pack16to32(mid_pred(A[0], B[0], C[0]), mid_pred(A[1], B[1], C[1])); } else { assert(match_count == 1); if (left_ref == ref[list]) mv[list] = AV_RN32A(A); else if (top_ref == ref[list]) mv[list] = AV_RN32A(B); else mv[list] = AV_RN32A(C); } } else { int mask = ~(MB_TYPE_L0 << (2 * list)); mv[list] = 0; ref[list] = -1; if (!is_b8x8) *mb_type &= mask; sub_mb_type &= mask; } } if (ref[0] < 0 && ref[1] < 0) { ref[0] = ref[1] = 0; if (!is_b8x8) *mb_type |= MB_TYPE_L0L1; sub_mb_type |= MB_TYPE_L0L1; } if (!(is_b8x8 | mv[0] | mv[1])) { fill_rectangle(&sl->ref_cache[0][scan8[0]], 4, 4, 8, (uint8_t)ref[0], 1); fill_rectangle(&sl->ref_cache[1][scan8[0]], 4, 4, 8, (uint8_t)ref[1], 1); fill_rectangle(&sl->mv_cache[0][scan8[0]], 4, 4, 8, 0, 4); fill_rectangle(&sl->mv_cache[1][scan8[0]], 4, 4, 8, 0, 4); *mb_type = (*mb_type & ~(MB_TYPE_8x8 | MB_TYPE_16x8 | MB_TYPE_8x16 | MB_TYPE_P1L0 | MB_TYPE_P1L1)) | MB_TYPE_16x16 | MB_TYPE_DIRECT2; return; } if (IS_INTERLACED(sl->ref_list[1][0].parent->mb_type[mb_xy])) { // AFL/AFR/FR/FL -> AFL/FL if (!IS_INTERLACED(*mb_type)) { // AFR/FR -> AFL/FL mb_y = (sl->mb_y & ~1) + sl->col_parity; mb_xy = sl->mb_x + ((sl->mb_y & ~1) + sl->col_parity) * h->mb_stride; b8_stride = 0; } else { mb_y += sl->col_fieldoff; mb_xy += h->mb_stride * sl->col_fieldoff; // non-zero for FL -> FL & differ parity } goto single_col; } else { // AFL/AFR/FR/FL -> AFR/FR if (IS_INTERLACED(*mb_type)) { // AFL /FL -> AFR/FR mb_y = sl->mb_y & ~1; mb_xy = (sl->mb_y & ~1) * h->mb_stride + sl->mb_x; mb_type_col[0] = sl->ref_list[1][0].parent->mb_type[mb_xy]; mb_type_col[1] = sl->ref_list[1][0].parent->mb_type[mb_xy + h->mb_stride]; b8_stride = 2 + 4 * h->mb_stride; b4_stride *= 6; if (IS_INTERLACED(mb_type_col[0]) != IS_INTERLACED(mb_type_col[1])) { mb_type_col[0] &= ~MB_TYPE_INTERLACED; mb_type_col[1] &= ~MB_TYPE_INTERLACED; } sub_mb_type |= MB_TYPE_16x16 | MB_TYPE_DIRECT2; /* B_SUB_8x8 */ if ((mb_type_col[0] & MB_TYPE_16x16_OR_INTRA) && (mb_type_col[1] & MB_TYPE_16x16_OR_INTRA) && !is_b8x8) { *mb_type |= MB_TYPE_16x8 | MB_TYPE_DIRECT2; /* B_16x8 */ } else { *mb_type |= MB_TYPE_8x8; } } else { // AFR/FR -> AFR/FR single_col: mb_type_col[0] = mb_type_col[1] = sl->ref_list[1][0].parent->mb_type[mb_xy]; sub_mb_type |= MB_TYPE_16x16 | MB_TYPE_DIRECT2; /* B_SUB_8x8 */ if (!is_b8x8 && (mb_type_col[0] & MB_TYPE_16x16_OR_INTRA)) { *mb_type |= MB_TYPE_16x16 | MB_TYPE_DIRECT2; /* B_16x16 */ } else if (!is_b8x8 && (mb_type_col[0] & (MB_TYPE_16x8 | MB_TYPE_8x16))) { *mb_type |= MB_TYPE_DIRECT2 | (mb_type_col[0] & (MB_TYPE_16x8 | MB_TYPE_8x16)); } else { if (!h->ps.sps->direct_8x8_inference_flag) { /* FIXME: Save sub mb types from previous frames (or derive * from MVs) so we know exactly what block size to use. */ sub_mb_type += (MB_TYPE_8x8 - MB_TYPE_16x16); /* B_SUB_4x4 */ } *mb_type |= MB_TYPE_8x8; } } } await_reference_mb_row(h, sl->ref_list[1][0].parent, mb_y); l1mv0 = &sl->ref_list[1][0].parent->motion_val[0][h->mb2b_xy[mb_xy]]; l1mv1 = &sl->ref_list[1][0].parent->motion_val[1][h->mb2b_xy[mb_xy]]; l1ref0 = &sl->ref_list[1][0].parent->ref_index[0][4 * mb_xy]; l1ref1 = &sl->ref_list[1][0].parent->ref_index[1][4 * mb_xy]; if (!b8_stride) { if (sl->mb_y & 1) { l1ref0 += 2; l1ref1 += 2; l1mv0 += 2 * b4_stride; l1mv1 += 2 * b4_stride; } } if (IS_INTERLACED(*mb_type) != IS_INTERLACED(mb_type_col[0])) { int n = 0; for (i8 = 0; i8 < 4; i8++) { int x8 = i8 & 1; int y8 = i8 >> 1; int xy8 = x8 + y8 * b8_stride; int xy4 = x8 * 3 + y8 * b4_stride; int a, b; if (is_b8x8 && !IS_DIRECT(sl->sub_mb_type[i8])) continue; sl->sub_mb_type[i8] = sub_mb_type; fill_rectangle(&sl->ref_cache[0][scan8[i8 * 4]], 2, 2, 8, (uint8_t)ref[0], 1); fill_rectangle(&sl->ref_cache[1][scan8[i8 * 4]], 2, 2, 8, (uint8_t)ref[1], 1); if (!IS_INTRA(mb_type_col[y8]) && !sl->ref_list[1][0].parent->long_ref && ((l1ref0[xy8] == 0 && FFABS(l1mv0[xy4][0]) <= 1 && FFABS(l1mv0[xy4][1]) <= 1) || (l1ref0[xy8] < 0 && l1ref1[xy8] == 0 && FFABS(l1mv1[xy4][0]) <= 1 && FFABS(l1mv1[xy4][1]) <= 1))) { a = b = 0; if (ref[0] > 0) a = mv[0]; if (ref[1] > 0) b = mv[1]; n++; } else { a = mv[0]; b = mv[1]; } fill_rectangle(&sl->mv_cache[0][scan8[i8 * 4]], 2, 2, 8, a, 4); fill_rectangle(&sl->mv_cache[1][scan8[i8 * 4]], 2, 2, 8, b, 4); } if (!is_b8x8 && !(n & 3)) *mb_type = (*mb_type & ~(MB_TYPE_8x8 | MB_TYPE_16x8 | MB_TYPE_8x16 | MB_TYPE_P1L0 | MB_TYPE_P1L1)) | MB_TYPE_16x16 | MB_TYPE_DIRECT2; } else if (IS_16X16(*mb_type)) { int a, b; fill_rectangle(&sl->ref_cache[0][scan8[0]], 4, 4, 8, (uint8_t)ref[0], 1); fill_rectangle(&sl->ref_cache[1][scan8[0]], 4, 4, 8, (uint8_t)ref[1], 1); if (!IS_INTRA(mb_type_col[0]) && !sl->ref_list[1][0].parent->long_ref && ((l1ref0[0] == 0 && FFABS(l1mv0[0][0]) <= 1 && FFABS(l1mv0[0][1]) <= 1) || (l1ref0[0] < 0 && !l1ref1[0] && FFABS(l1mv1[0][0]) <= 1 && FFABS(l1mv1[0][1]) <= 1 && h->sei.unregistered.x264_build > 33U))) { a = b = 0; if (ref[0] > 0) a = mv[0]; if (ref[1] > 0) b = mv[1]; } else { a = mv[0]; b = mv[1]; } fill_rectangle(&sl->mv_cache[0][scan8[0]], 4, 4, 8, a, 4); fill_rectangle(&sl->mv_cache[1][scan8[0]], 4, 4, 8, b, 4); } else { int n = 0; for (i8 = 0; i8 < 4; i8++) { const int x8 = i8 & 1; const int y8 = i8 >> 1; if (is_b8x8 && !IS_DIRECT(sl->sub_mb_type[i8])) continue; sl->sub_mb_type[i8] = sub_mb_type; fill_rectangle(&sl->mv_cache[0][scan8[i8 * 4]], 2, 2, 8, mv[0], 4); fill_rectangle(&sl->mv_cache[1][scan8[i8 * 4]], 2, 2, 8, mv[1], 4); fill_rectangle(&sl->ref_cache[0][scan8[i8 * 4]], 2, 2, 8, (uint8_t)ref[0], 1); fill_rectangle(&sl->ref_cache[1][scan8[i8 * 4]], 2, 2, 8, (uint8_t)ref[1], 1); assert(b8_stride == 2); /* col_zero_flag */ if (!IS_INTRA(mb_type_col[0]) && !sl->ref_list[1][0].parent->long_ref && (l1ref0[i8] == 0 || (l1ref0[i8] < 0 && l1ref1[i8] == 0 && h->sei.unregistered.x264_build > 33U))) { const int16_t (*l1mv)[2] = l1ref0[i8] == 0 ? l1mv0 : l1mv1; if (IS_SUB_8X8(sub_mb_type)) { const int16_t *mv_col = l1mv[x8 * 3 + y8 * 3 * b4_stride]; if (FFABS(mv_col[0]) <= 1 && FFABS(mv_col[1]) <= 1) { if (ref[0] == 0) fill_rectangle(&sl->mv_cache[0][scan8[i8 * 4]], 2, 2, 8, 0, 4); if (ref[1] == 0) fill_rectangle(&sl->mv_cache[1][scan8[i8 * 4]], 2, 2, 8, 0, 4); n += 4; } } else { int m = 0; for (i4 = 0; i4 < 4; i4++) { const int16_t *mv_col = l1mv[x8 * 2 + (i4 & 1) + (y8 * 2 + (i4 >> 1)) * b4_stride]; if (FFABS(mv_col[0]) <= 1 && FFABS(mv_col[1]) <= 1) { if (ref[0] == 0) AV_ZERO32(sl->mv_cache[0][scan8[i8 * 4 + i4]]); if (ref[1] == 0) AV_ZERO32(sl->mv_cache[1][scan8[i8 * 4 + i4]]); m++; } } if (!(m & 3)) sl->sub_mb_type[i8] += MB_TYPE_16x16 - MB_TYPE_8x8; n += m; } } } if (!is_b8x8 && !(n & 15)) *mb_type = (*mb_type & ~(MB_TYPE_8x8 | MB_TYPE_16x8 | MB_TYPE_8x16 | MB_TYPE_P1L0 | MB_TYPE_P1L1)) | MB_TYPE_16x16 | MB_TYPE_DIRECT2; } }"} {"target": 1, "idx": 25247, "func": "static void boston_lcd_event(void *opaque, int event) { BostonState *s = opaque; if (event == CHR_EVENT_OPENED && !s->lcd_inited) { qemu_chr_fe_printf(&s->lcd_display, \" \"); s->lcd_inited = true; } }"} {"target": 1, "idx": 25252, "func": "void add_command(const cmdinfo_t *ci) { cmdtab = realloc((void *)cmdtab, ++ncmds * sizeof(*cmdtab)); cmdtab[ncmds - 1] = *ci; qsort(cmdtab, ncmds, sizeof(*cmdtab), compare); }"} {"target": 1, "idx": 25260, "func": "static void openpic_update_irq(OpenPICState *opp, int n_IRQ) { IRQ_src_t *src; int i; src = &opp->src[n_IRQ]; if (!src->pending) { /* no irq pending */ DPRINTF(\"%s: IRQ %d is not pending\\n\", __func__, n_IRQ); return; } if (src->ipvp & IPVP_MASK_MASK) { /* Interrupt source is disabled */ DPRINTF(\"%s: IRQ %d is disabled\\n\", __func__, n_IRQ); return; } if (IPVP_PRIORITY(src->ipvp) == 0) { /* Priority set to zero */ DPRINTF(\"%s: IRQ %d has 0 priority\\n\", __func__, n_IRQ); return; } if (src->ipvp & IPVP_ACTIVITY_MASK) { /* IRQ already active */ DPRINTF(\"%s: IRQ %d is already active\\n\", __func__, n_IRQ); return; } if (src->ide == 0) { /* No target */ DPRINTF(\"%s: IRQ %d has no target\\n\", __func__, n_IRQ); return; } if (src->ide == (1 << src->last_cpu)) { /* Only one CPU is allowed to receive this IRQ */ IRQ_local_pipe(opp, src->last_cpu, n_IRQ); } else if (!(src->ipvp & IPVP_MODE_MASK)) { /* Directed delivery mode */ for (i = 0; i < opp->nb_cpus; i++) { if (src->ide & (1 << i)) { IRQ_local_pipe(opp, i, n_IRQ); } } } else { /* Distributed delivery mode */ for (i = src->last_cpu + 1; i != src->last_cpu; i++) { if (i == opp->nb_cpus) i = 0; if (src->ide & (1 << i)) { IRQ_local_pipe(opp, i, n_IRQ); src->last_cpu = i; break; } } } }"} {"target": 1, "idx": 25263, "func": "static inline void usb_bt_fifo_out_enqueue(struct USBBtState *s, struct usb_hci_out_fifo_s *fifo, void (*send)(struct HCIInfo *, const uint8_t *, int), int (*complete)(const uint8_t *, int), const uint8_t *data, int len) { if (fifo->len) { memcpy(fifo->data + fifo->len, data, len); fifo->len += len; if (complete(fifo->data, fifo->len)) { send(s->hci, fifo->data, fifo->len); fifo->len = 0; } } else if (complete(data, len)) send(s->hci, data, len); else { memcpy(fifo->data, data, len); fifo->len = len; } /* TODO: do we need to loop? */ }"} {"target": 0, "idx": 25265, "func": "static void compute_status(HTTPContext *c) { HTTPContext *c1; FFStream *stream; char *p; time_t ti; int i, len; AVIOContext *pb; if (avio_open_dyn_buf(&pb) < 0) { /* XXX: return an error ? */ c->buffer_ptr = c->buffer; c->buffer_end = c->buffer; return; } avio_printf(pb, \"HTTP/1.0 200 OK\\r\\n\"); avio_printf(pb, \"Content-type: %s\\r\\n\", \"text/html\"); avio_printf(pb, \"Pragma: no-cache\\r\\n\"); avio_printf(pb, \"\\r\\n\"); avio_printf(pb, \"%s Status\\n\", program_name); if (c->stream->feed_filename[0]) avio_printf(pb, \"\\n\", c->stream->feed_filename); avio_printf(pb, \"\\n\"); avio_printf(pb, \"

%s Status

\\n\", program_name); /* format status */ avio_printf(pb, \"

Available Streams

\\n\"); avio_printf(pb, \"\\n\"); avio_printf(pb, \"
PathServed
Conns

bytes
FormatBit rate
kbits/s
Video
kbits/s

Codec
Audio
kbits/s

Codec
Feed\\n\"); stream = first_stream; while (stream != NULL) { char sfilename[1024]; char *eosf; if (stream->feed != stream) { av_strlcpy(sfilename, stream->filename, sizeof(sfilename) - 10); eosf = sfilename + strlen(sfilename); if (eosf - sfilename >= 4) { if (strcmp(eosf - 4, \".asf\") == 0) strcpy(eosf - 4, \".asx\"); else if (strcmp(eosf - 3, \".rm\") == 0) strcpy(eosf - 3, \".ram\"); else if (stream->fmt && !strcmp(stream->fmt->name, \"rtp\")) { /* generate a sample RTSP director if unicast. Generate an SDP redirector if multicast */ eosf = strrchr(sfilename, '.'); if (!eosf) eosf = sfilename + strlen(sfilename); if (stream->is_multicast) strcpy(eosf, \".sdp\"); else strcpy(eosf, \".rtsp\"); } } avio_printf(pb, \"
%s \", sfilename, stream->filename); avio_printf(pb, \" %d \", stream->conns_served); fmt_bytecount(pb, stream->bytes_served); switch(stream->stream_type) { case STREAM_TYPE_LIVE: { int audio_bit_rate = 0; int video_bit_rate = 0; const char *audio_codec_name = \"\"; const char *video_codec_name = \"\"; const char *audio_codec_name_extra = \"\"; const char *video_codec_name_extra = \"\"; for(i=0;inb_streams;i++) { AVStream *st = stream->streams[i]; AVCodec *codec = avcodec_find_encoder(st->codec->codec_id); switch(st->codec->codec_type) { case AVMEDIA_TYPE_AUDIO: audio_bit_rate += st->codec->bit_rate; if (codec) { if (*audio_codec_name) audio_codec_name_extra = \"...\"; audio_codec_name = codec->name; } break; case AVMEDIA_TYPE_VIDEO: video_bit_rate += st->codec->bit_rate; if (codec) { if (*video_codec_name) video_codec_name_extra = \"...\"; video_codec_name = codec->name; } break; case AVMEDIA_TYPE_DATA: video_bit_rate += st->codec->bit_rate; break; default: abort(); } } avio_printf(pb, \" %s %d %d %s %s %d %s %s\", stream->fmt->name, stream->bandwidth, video_bit_rate / 1000, video_codec_name, video_codec_name_extra, audio_bit_rate / 1000, audio_codec_name, audio_codec_name_extra); if (stream->feed) avio_printf(pb, \"%s\", stream->feed->filename); else avio_printf(pb, \"%s\", stream->feed_filename); avio_printf(pb, \"\\n\"); } break; default: avio_printf(pb, \" - - - - \\n\"); break; } } stream = stream->next; } avio_printf(pb, \"
\\n\"); stream = first_stream; while (stream != NULL) { if (stream->feed == stream) { avio_printf(pb, \"

Feed %s

\", stream->filename); if (stream->pid) { avio_printf(pb, \"Running as pid %d.\\n\", stream->pid); #if defined(linux) && !defined(CONFIG_NOCUTILS) { FILE *pid_stat; char ps_cmd[64]; /* This is somewhat linux specific I guess */ snprintf(ps_cmd, sizeof(ps_cmd), \"ps -o \\\"%%cpu,cputime\\\" --no-headers %d\", stream->pid); pid_stat = popen(ps_cmd, \"r\"); if (pid_stat) { char cpuperc[10]; char cpuused[64]; if (fscanf(pid_stat, \"%9s %63s\", cpuperc, cpuused) == 2) { avio_printf(pb, \"Currently using %s%% of the cpu. Total time used %s.\\n\", cpuperc, cpuused); } fclose(pid_stat); } } #endif avio_printf(pb, \"

\"); } avio_printf(pb, \"
Streamtypekbits/scodecParameters\\n\"); for (i = 0; i < stream->nb_streams; i++) { AVStream *st = stream->streams[i]; AVCodec *codec = avcodec_find_encoder(st->codec->codec_id); const char *type = \"unknown\"; char parameters[64]; parameters[0] = 0; switch(st->codec->codec_type) { case AVMEDIA_TYPE_AUDIO: type = \"audio\"; snprintf(parameters, sizeof(parameters), \"%d channel(s), %d Hz\", st->codec->channels, st->codec->sample_rate); break; case AVMEDIA_TYPE_VIDEO: type = \"video\"; snprintf(parameters, sizeof(parameters), \"%dx%d, q=%d-%d, fps=%d\", st->codec->width, st->codec->height, st->codec->qmin, st->codec->qmax, st->codec->time_base.den / st->codec->time_base.num); break; default: abort(); } avio_printf(pb, \"
%d%s%d%s%s\\n\", i, type, st->codec->bit_rate/1000, codec ? codec->name : \"\", parameters); } avio_printf(pb, \"
\\n\"); } stream = stream->next; } /* connection status */ avio_printf(pb, \"

Connection Status

\\n\"); avio_printf(pb, \"Number of connections: %d / %d
\\n\", nb_connections, nb_max_connections); avio_printf(pb, \"Bandwidth in use: %\"PRIu64\"k / %\"PRIu64\"k
\\n\", current_bandwidth, max_bandwidth); avio_printf(pb, \"\\n\"); avio_printf(pb, \"
#FileIPProtoStateTarget bits/secActual bits/secBytes transferred\\n\"); c1 = first_http_ctx; i = 0; while (c1 != NULL) { int bitrate; int j; bitrate = 0; if (c1->stream) { for (j = 0; j < c1->stream->nb_streams; j++) { if (!c1->stream->feed) bitrate += c1->stream->streams[j]->codec->bit_rate; else if (c1->feed_streams[j] >= 0) bitrate += c1->stream->feed->streams[c1->feed_streams[j]]->codec->bit_rate; } } i++; p = inet_ntoa(c1->from_addr.sin_addr); avio_printf(pb, \"
%d%s%s%s%s%s\", i, c1->stream ? c1->stream->filename : \"\", c1->state == HTTPSTATE_RECEIVE_DATA ? \"(input)\" : \"\", p, c1->protocol, http_state[c1->state]); fmt_bytecount(pb, bitrate); avio_printf(pb, \"\"); fmt_bytecount(pb, compute_datarate(&c1->datarate, c1->data_count) * 8); avio_printf(pb, \"\"); fmt_bytecount(pb, c1->data_count); avio_printf(pb, \"\\n\"); c1 = c1->next; } avio_printf(pb, \"
\\n\"); /* date */ ti = time(NULL); p = ctime(&ti); avio_printf(pb, \"
Generated at %s\", p); avio_printf(pb, \"\\n\\n\"); len = avio_close_dyn_buf(pb, &c->pb_buffer); c->buffer_ptr = c->pb_buffer; c->buffer_end = c->pb_buffer + len; }"} {"target": 0, "idx": 25284, "func": "static int ram_save_compressed_page(QEMUFile *f, PageSearchStatus *pss, bool last_stage, uint64_t *bytes_transferred) { int pages = -1; uint64_t bytes_xmit; uint8_t *p; int ret; RAMBlock *block = pss->block; ram_addr_t offset = pss->offset; p = block->host + offset; bytes_xmit = 0; ret = ram_control_save_page(f, block->offset, offset, TARGET_PAGE_SIZE, &bytes_xmit); if (bytes_xmit) { *bytes_transferred += bytes_xmit; pages = 1; } if (block == last_sent_block) { offset |= RAM_SAVE_FLAG_CONTINUE; } if (ret != RAM_SAVE_CONTROL_NOT_SUPP) { if (ret != RAM_SAVE_CONTROL_DELAYED) { if (bytes_xmit > 0) { acct_info.norm_pages++; } else if (bytes_xmit == 0) { acct_info.dup_pages++; } } } else { /* When starting the process of a new block, the first page of * the block should be sent out before other pages in the same * block, and all the pages in last block should have been sent * out, keeping this order is important, because the 'cont' flag * is used to avoid resending the block name. */ if (block != last_sent_block) { flush_compressed_data(f); pages = save_zero_page(f, block, offset, p, bytes_transferred); if (pages == -1) { set_compress_params(&comp_param[0], block, offset); /* Use the qemu thread to compress the data to make sure the * first page is sent out before other pages */ bytes_xmit = do_compress_ram_page(&comp_param[0]); acct_info.norm_pages++; qemu_put_qemu_file(f, comp_param[0].file); *bytes_transferred += bytes_xmit; pages = 1; } } else { pages = save_zero_page(f, block, offset, p, bytes_transferred); if (pages == -1) { pages = compress_page_with_multi_thread(f, block, offset, bytes_transferred); } } } return pages; }"} {"target": 0, "idx": 25310, "func": "void qmp_block_stream(bool has_job_id, const char *job_id, const char *device, bool has_base, const char *base, bool has_backing_file, const char *backing_file, bool has_speed, int64_t speed, bool has_on_error, BlockdevOnError on_error, Error **errp) { BlockDriverState *bs; BlockDriverState *base_bs = NULL; AioContext *aio_context; Error *local_err = NULL; const char *base_name = NULL; if (!has_on_error) { on_error = BLOCKDEV_ON_ERROR_REPORT; } bs = qmp_get_root_bs(device, errp); if (!bs) { return; } aio_context = bdrv_get_aio_context(bs); aio_context_acquire(aio_context); if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_STREAM, errp)) { goto out; } if (has_base) { base_bs = bdrv_find_backing_image(bs, base); if (base_bs == NULL) { error_setg(errp, QERR_BASE_NOT_FOUND, base); goto out; } assert(bdrv_get_aio_context(base_bs) == aio_context); base_name = base; } /* if we are streaming the entire chain, the result will have no backing * file, and specifying one is therefore an error */ if (base_bs == NULL && has_backing_file) { error_setg(errp, \"backing file specified, but streaming the \" \"entire chain\"); goto out; } /* backing_file string overrides base bs filename */ base_name = has_backing_file ? backing_file : base_name; stream_start(has_job_id ? job_id : NULL, bs, base_bs, base_name, has_speed ? speed : 0, on_error, block_job_cb, bs, &local_err); if (local_err) { error_propagate(errp, local_err); goto out; } trace_qmp_block_stream(bs, bs->job); out: aio_context_release(aio_context); }"} {"target": 0, "idx": 25344, "func": "int ff_dxva2_commit_buffer(AVCodecContext *avctx, AVDXVAContext *ctx, DECODER_BUFFER_DESC *dsc, unsigned type, const void *data, unsigned size, unsigned mb_count) { void *dxva_data; unsigned dxva_size; int result; HRESULT hr; #if CONFIG_D3D11VA if (avctx->pix_fmt == AV_PIX_FMT_D3D11VA_VLD) hr = ID3D11VideoContext_GetDecoderBuffer(D3D11VA_CONTEXT(ctx)->video_context, D3D11VA_CONTEXT(ctx)->decoder, type, &dxva_size, &dxva_data); #endif #if CONFIG_DXVA2 if (avctx->pix_fmt == AV_PIX_FMT_DXVA2_VLD) hr = IDirectXVideoDecoder_GetBuffer(DXVA2_CONTEXT(ctx)->decoder, type, &dxva_data, &dxva_size); #endif if (FAILED(hr)) { av_log(avctx, AV_LOG_ERROR, \"Failed to get a buffer for %u: 0x%x\\n\", type, hr); return -1; } if (size <= dxva_size) { memcpy(dxva_data, data, size); #if CONFIG_D3D11VA if (avctx->pix_fmt == AV_PIX_FMT_D3D11VA_VLD) { D3D11_VIDEO_DECODER_BUFFER_DESC *dsc11 = dsc; memset(dsc11, 0, sizeof(*dsc11)); dsc11->BufferType = type; dsc11->DataSize = size; dsc11->NumMBsInBuffer = mb_count; } #endif #if CONFIG_DXVA2 if (avctx->pix_fmt == AV_PIX_FMT_DXVA2_VLD) { DXVA2_DecodeBufferDesc *dsc2 = dsc; memset(dsc2, 0, sizeof(*dsc2)); dsc2->CompressedBufferType = type; dsc2->DataSize = size; dsc2->NumMBsInBuffer = mb_count; } #endif result = 0; } else { av_log(avctx, AV_LOG_ERROR, \"Buffer for type %u was too small\\n\", type); result = -1; } #if CONFIG_D3D11VA if (avctx->pix_fmt == AV_PIX_FMT_D3D11VA_VLD) hr = ID3D11VideoContext_ReleaseDecoderBuffer(D3D11VA_CONTEXT(ctx)->video_context, D3D11VA_CONTEXT(ctx)->decoder, type); #endif #if CONFIG_DXVA2 if (avctx->pix_fmt == AV_PIX_FMT_DXVA2_VLD) hr = IDirectXVideoDecoder_ReleaseBuffer(DXVA2_CONTEXT(ctx)->decoder, type); #endif if (FAILED(hr)) { av_log(avctx, AV_LOG_ERROR, \"Failed to release buffer type %u: 0x%x\\n\", type, hr); result = -1; } return result; }"} {"target": 0, "idx": 25353, "func": "static int inet_listen_saddr(InetSocketAddress *saddr, int port_offset, bool update_addr, Error **errp) { struct addrinfo ai,*res,*e; char port[33]; char uaddr[INET6_ADDRSTRLEN+1]; char uport[33]; int slisten, rc, port_min, port_max, p; Error *err = NULL; memset(&ai,0, sizeof(ai)); ai.ai_flags = AI_PASSIVE; if (saddr->has_numeric && saddr->numeric) { ai.ai_flags |= AI_NUMERICHOST | AI_NUMERICSERV; } ai.ai_family = inet_ai_family_from_address(saddr, &err); ai.ai_socktype = SOCK_STREAM; if (err) { error_propagate(errp, err); return -1; } if (saddr->host == NULL) { error_setg(errp, \"host not specified\"); return -1; } if (saddr->port != NULL) { pstrcpy(port, sizeof(port), saddr->port); } else { port[0] = '\\0'; } /* lookup */ if (port_offset) { unsigned long long baseport; if (strlen(port) == 0) { error_setg(errp, \"port not specified\"); return -1; } if (parse_uint_full(port, &baseport, 10) < 0) { error_setg(errp, \"can't convert to a number: %s\", port); return -1; } if (baseport > 65535 || baseport + port_offset > 65535) { error_setg(errp, \"port %s out of range\", port); return -1; } snprintf(port, sizeof(port), \"%d\", (int)baseport + port_offset); } rc = getaddrinfo(strlen(saddr->host) ? saddr->host : NULL, strlen(port) ? port : NULL, &ai, &res); if (rc != 0) { error_setg(errp, \"address resolution failed for %s:%s: %s\", saddr->host, port, gai_strerror(rc)); return -1; } /* create socket + bind */ for (e = res; e != NULL; e = e->ai_next) { getnameinfo((struct sockaddr*)e->ai_addr,e->ai_addrlen, uaddr,INET6_ADDRSTRLEN,uport,32, NI_NUMERICHOST | NI_NUMERICSERV); slisten = qemu_socket(e->ai_family, e->ai_socktype, e->ai_protocol); if (slisten < 0) { if (!e->ai_next) { error_setg_errno(errp, errno, \"Failed to create socket\"); } continue; } socket_set_fast_reuse(slisten); port_min = inet_getport(e); port_max = saddr->has_to ? saddr->to + port_offset : port_min; for (p = port_min; p <= port_max; p++) { inet_setport(e, p); if (try_bind(slisten, saddr, e) >= 0) { goto listen; } if (p == port_max) { if (!e->ai_next) { error_setg_errno(errp, errno, \"Failed to bind socket\"); } } } closesocket(slisten); } freeaddrinfo(res); return -1; listen: if (listen(slisten,1) != 0) { error_setg_errno(errp, errno, \"Failed to listen on socket\"); closesocket(slisten); freeaddrinfo(res); return -1; } if (update_addr) { g_free(saddr->host); saddr->host = g_strdup(uaddr); g_free(saddr->port); saddr->port = g_strdup_printf(\"%d\", inet_getport(e) - port_offset); saddr->has_ipv6 = saddr->ipv6 = e->ai_family == PF_INET6; saddr->has_ipv4 = saddr->ipv4 = e->ai_family != PF_INET6; } freeaddrinfo(res); return slisten; }"} {"target": 0, "idx": 25357, "func": "static int mov_read_ares(MOVContext *c, AVIOContext *pb, MOVAtom atom) { AVCodecContext *codec = c->fc->streams[c->fc->nb_streams-1]->codec; if (codec->codec_tag == MKTAG('A', 'V', 'i', 'n') && codec->codec_id == AV_CODEC_ID_H264 && atom.size > 11) { avio_skip(pb, 10); /* For AVID AVCI50, force width of 1440 to be able to select the correct SPS and PPS */ if (avio_rb16(pb) == 0xd4d) codec->width = 1440; return 0; } return mov_read_avid(c, pb, atom); }"} {"target": 0, "idx": 25370, "func": "void vmstate_unregister(const VMStateDescription *vmsd, void *opaque) { SaveStateEntry *se, *new_se; TAILQ_FOREACH_SAFE(se, &savevm_handlers, entry, new_se) { if (se->vmsd == vmsd && se->opaque == opaque) { TAILQ_REMOVE(&savevm_handlers, se, entry); qemu_free(se); } } }"} {"target": 1, "idx": 25382, "func": "static int virtio_net_handle_mac(VirtIONet *n, uint8_t cmd, struct iovec *iov, unsigned int iov_cnt) { struct virtio_net_ctrl_mac mac_data; size_t s; NetClientState *nc = qemu_get_queue(n->nic); if (cmd == VIRTIO_NET_CTRL_MAC_ADDR_SET) { if (iov_size(iov, iov_cnt) != sizeof(n->mac)) { return VIRTIO_NET_ERR; } s = iov_to_buf(iov, iov_cnt, 0, &n->mac, sizeof(n->mac)); assert(s == sizeof(n->mac)); qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac); rxfilter_notify(nc); return VIRTIO_NET_OK; } if (cmd != VIRTIO_NET_CTRL_MAC_TABLE_SET) { return VIRTIO_NET_ERR; } int in_use = 0; int first_multi = 0; uint8_t uni_overflow = 0; uint8_t multi_overflow = 0; uint8_t *macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN); s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries, sizeof(mac_data.entries)); mac_data.entries = ldl_p(&mac_data.entries); if (s != sizeof(mac_data.entries)) { goto error; } iov_discard_front(&iov, &iov_cnt, s); if (mac_data.entries * ETH_ALEN > iov_size(iov, iov_cnt)) { goto error; } if (mac_data.entries <= MAC_TABLE_ENTRIES) { s = iov_to_buf(iov, iov_cnt, 0, macs, mac_data.entries * ETH_ALEN); if (s != mac_data.entries * ETH_ALEN) { goto error; } in_use += mac_data.entries; } else { uni_overflow = 1; } iov_discard_front(&iov, &iov_cnt, mac_data.entries * ETH_ALEN); first_multi = in_use; s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries, sizeof(mac_data.entries)); mac_data.entries = ldl_p(&mac_data.entries); if (s != sizeof(mac_data.entries)) { goto error; } iov_discard_front(&iov, &iov_cnt, s); if (mac_data.entries * ETH_ALEN != iov_size(iov, iov_cnt)) { goto error; } if (in_use + mac_data.entries <= MAC_TABLE_ENTRIES) { s = iov_to_buf(iov, iov_cnt, 0, &macs[in_use * ETH_ALEN], mac_data.entries * ETH_ALEN); if (s != mac_data.entries * ETH_ALEN) { goto error; } in_use += mac_data.entries; } else { multi_overflow = 1; } n->mac_table.in_use = in_use; n->mac_table.first_multi = first_multi; n->mac_table.uni_overflow = uni_overflow; n->mac_table.multi_overflow = multi_overflow; memcpy(n->mac_table.macs, macs, MAC_TABLE_ENTRIES * ETH_ALEN); g_free(macs); rxfilter_notify(nc); return VIRTIO_NET_OK; error: g_free(macs); return VIRTIO_NET_ERR; }"} {"target": 1, "idx": 25401, "func": "static av_cold int ffmmal_init_decoder(AVCodecContext *avctx) { MMALDecodeContext *ctx = avctx->priv_data; MMAL_STATUS_T status; MMAL_ES_FORMAT_T *format_in; MMAL_COMPONENT_T *decoder; char tmp[32]; int ret = 0; bcm_host_init(); if (mmal_vc_init()) { av_log(avctx, AV_LOG_ERROR, \"Cannot initialize MMAL VC driver!\\n\"); return AVERROR(ENOSYS); if ((ret = ff_get_format(avctx, avctx->codec->pix_fmts)) < 0) return ret; avctx->pix_fmt = ret; if ((status = mmal_component_create(MMAL_COMPONENT_DEFAULT_VIDEO_DECODER, &ctx->decoder))) goto fail; decoder = ctx->decoder; format_in = decoder->input[0]->format; format_in->type = MMAL_ES_TYPE_VIDEO; switch (avctx->codec_id) { case AV_CODEC_ID_MPEG2VIDEO: format_in->encoding = MMAL_ENCODING_MP2V; break; case AV_CODEC_ID_MPEG4: format_in->encoding = MMAL_ENCODING_MP4V; break; case AV_CODEC_ID_VC1: format_in->encoding = MMAL_ENCODING_WVC1; break; case AV_CODEC_ID_H264: default: format_in->encoding = MMAL_ENCODING_H264; break; format_in->es->video.width = FFALIGN(avctx->width, 32); format_in->es->video.height = FFALIGN(avctx->height, 16); format_in->es->video.crop.width = avctx->width; format_in->es->video.crop.height = avctx->height; format_in->es->video.frame_rate.num = 24000; format_in->es->video.frame_rate.den = 1001; format_in->es->video.par.num = avctx->sample_aspect_ratio.num; format_in->es->video.par.den = avctx->sample_aspect_ratio.den; format_in->flags = MMAL_ES_FORMAT_FLAG_FRAMED; av_get_codec_tag_string(tmp, sizeof(tmp), format_in->encoding); av_log(avctx, AV_LOG_DEBUG, \"Using MMAL %s encoding.\\n\", tmp); if ((status = mmal_port_format_commit(decoder->input[0]))) goto fail; decoder->input[0]->buffer_num = FFMAX(decoder->input[0]->buffer_num_min, 20); decoder->input[0]->buffer_size = FFMAX(decoder->input[0]->buffer_size_min, 512 * 1024); ctx->pool_in = mmal_pool_create(decoder->input[0]->buffer_num, 0); if (!ctx->pool_in) { ret = AVERROR(ENOMEM); goto fail; if ((ret = ffmal_update_format(avctx)) < 0) goto fail; ctx->queue_decoded_frames = mmal_queue_create(); if (!ctx->queue_decoded_frames) goto fail; decoder->input[0]->userdata = (void*)avctx; decoder->output[0]->userdata = (void*)avctx; decoder->control->userdata = (void*)avctx; if ((status = mmal_port_enable(decoder->control, control_port_cb))) goto fail; if ((status = mmal_port_enable(decoder->input[0], input_callback))) goto fail; if ((status = mmal_port_enable(decoder->output[0], output_callback))) goto fail; if ((status = mmal_component_enable(decoder))) goto fail; return 0; fail: ffmmal_close_decoder(avctx); return ret < 0 ? ret : AVERROR_UNKNOWN;"} {"target": 1, "idx": 25404, "func": "static int ogg_get_length(AVFormatContext *s) { struct ogg *ogg = s->priv_data; int i; int64_t size, end; int streams_left=0; if(!s->pb->seekable) return 0; // already set if (s->duration != AV_NOPTS_VALUE) return 0; size = avio_size(s->pb); if(size < 0) return 0; end = size > MAX_PAGE_SIZE? size - MAX_PAGE_SIZE: 0; ogg_save (s); avio_seek (s->pb, end, SEEK_SET); while (!ogg_read_page (s, &i)){ if (ogg->streams[i].granule != -1 && ogg->streams[i].granule != 0 && ogg->streams[i].codec) { s->streams[i]->duration = ogg_gptopts (s, i, ogg->streams[i].granule, NULL); if (s->streams[i]->start_time != AV_NOPTS_VALUE){ s->streams[i]->duration -= s->streams[i]->start_time; streams_left-= (ogg->streams[i].got_start==-1); ogg->streams[i].got_start= 1; }else if(!ogg->streams[i].got_start){ ogg->streams[i].got_start= -1; streams_left++; } } } ogg_restore (s, 0); ogg_save (s); avio_seek (s->pb, s->data_offset, SEEK_SET); ogg_reset(s); while (!ogg_packet(s, &i, NULL, NULL, NULL)) { int64_t pts = ogg_calc_pts(s, i, NULL); if (pts != AV_NOPTS_VALUE && s->streams[i]->start_time == AV_NOPTS_VALUE && !ogg->streams[i].got_start){ s->streams[i]->duration -= pts; ogg->streams[i].got_start= 1; streams_left--; }else if(s->streams[i]->start_time != AV_NOPTS_VALUE && !ogg->streams[i].got_start){ ogg->streams[i].got_start= 1; streams_left--; } } if(streams_left<=0) break; } ogg_restore (s, 0); return 0; }"} {"target": 1, "idx": 25413, "func": "static int mimic_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; MimicContext *ctx = avctx->priv_data; GetByteContext gb; int is_pframe; int width, height; int quality, num_coeffs; int swap_buf_size = buf_size - MIMIC_HEADER_SIZE; if (buf_size <= MIMIC_HEADER_SIZE) { av_log(avctx, AV_LOG_ERROR, \"insufficient data\\n\"); return -1; } bytestream2_init(&gb, buf, MIMIC_HEADER_SIZE); bytestream2_skip(&gb, 2); /* some constant (always 256) */ quality = bytestream2_get_le16u(&gb); width = bytestream2_get_le16u(&gb); height = bytestream2_get_le16u(&gb); bytestream2_skip(&gb, 4); /* some constant */ is_pframe = bytestream2_get_le32u(&gb); num_coeffs = bytestream2_get_byteu(&gb); bytestream2_skip(&gb, 3); /* some constant */ if(!ctx->avctx) { int i; if(!(width == 160 && height == 120) && !(width == 320 && height == 240)) { av_log(avctx, AV_LOG_ERROR, \"invalid width/height!\\n\"); return -1; } ctx->avctx = avctx; avctx->width = width; avctx->height = height; avctx->pix_fmt = PIX_FMT_YUV420P; for(i = 0; i < 3; i++) { ctx->num_vblocks[i] = -((-height) >> (3 + !!i)); ctx->num_hblocks[i] = width >> (3 + !!i) ; } } else if(width != ctx->avctx->width || height != ctx->avctx->height) { av_log(avctx, AV_LOG_ERROR, \"resolution changing is not supported\\n\"); return -1; } if(is_pframe && !ctx->buf_ptrs[ctx->prev_index].data[0]) { av_log(avctx, AV_LOG_ERROR, \"decoding must start with keyframe\\n\"); return -1; } ctx->buf_ptrs[ctx->cur_index].reference = 1; ctx->buf_ptrs[ctx->cur_index].pict_type = is_pframe ? AV_PICTURE_TYPE_P:AV_PICTURE_TYPE_I; if(ff_thread_get_buffer(avctx, &ctx->buf_ptrs[ctx->cur_index])) { av_log(avctx, AV_LOG_ERROR, \"get_buffer() failed\\n\"); return -1; } ctx->next_prev_index = ctx->cur_index; ctx->next_cur_index = (ctx->cur_index - 1) & 15; prepare_avpic(ctx, &ctx->flipped_ptrs[ctx->cur_index], (AVPicture*) &ctx->buf_ptrs[ctx->cur_index]); ff_thread_finish_setup(avctx); av_fast_malloc(&ctx->swap_buf, &ctx->swap_buf_size, swap_buf_size + FF_INPUT_BUFFER_PADDING_SIZE); if(!ctx->swap_buf) return AVERROR(ENOMEM); ctx->dsp.bswap_buf(ctx->swap_buf, (const uint32_t*) (buf + MIMIC_HEADER_SIZE), swap_buf_size>>2); init_get_bits(&ctx->gb, ctx->swap_buf, swap_buf_size << 3); if(!decode(ctx, quality, num_coeffs, !is_pframe)) { if (avctx->active_thread_type&FF_THREAD_FRAME) ff_thread_report_progress(&ctx->buf_ptrs[ctx->cur_index], INT_MAX, 0); else { ff_thread_release_buffer(avctx, &ctx->buf_ptrs[ctx->cur_index]); return -1; } } *(AVFrame*)data = ctx->buf_ptrs[ctx->cur_index]; *data_size = sizeof(AVFrame); ctx->prev_index = ctx->next_prev_index; ctx->cur_index = ctx->next_cur_index; /* Only release frames that aren't used for backreferences anymore */ if(ctx->buf_ptrs[ctx->cur_index].data[0]) ff_thread_release_buffer(avctx, &ctx->buf_ptrs[ctx->cur_index]); return buf_size; }"} {"target": 0, "idx": 25448, "func": "static int ipvideo_decode_block_opcode_0x8(IpvideoContext *s) { int x, y; unsigned char P[2]; unsigned int flags = 0; /* 2-color encoding for each 4x4 quadrant, or 2-color encoding on * either top and bottom or left and right halves */ CHECK_STREAM_PTR(2); P[0] = *s->stream_ptr++; P[1] = *s->stream_ptr++; if (P[0] <= P[1]) { CHECK_STREAM_PTR(14); s->stream_ptr -= 2; for (y = 0; y < 16; y++) { // new values for each 4x4 block if (!(y & 3)) { P[0] = *s->stream_ptr++; P[1] = *s->stream_ptr++; flags = bytestream_get_le16(&s->stream_ptr); } for (x = 0; x < 4; x++, flags >>= 1) *s->pixel_ptr++ = P[flags & 1]; s->pixel_ptr += s->stride - 4; // switch to right half if (y == 7) s->pixel_ptr -= 8 * s->stride - 4; } } else { /* need 10 more bytes */ CHECK_STREAM_PTR(10); if (s->stream_ptr[4] <= s->stream_ptr[5]) { flags = bytestream_get_le32(&s->stream_ptr); /* vertical split; left & right halves are 2-color encoded */ for (y = 0; y < 16; y++) { for (x = 0; x < 4; x++, flags >>= 1) *s->pixel_ptr++ = P[flags & 1]; s->pixel_ptr += s->stride - 4; // switch to right half if (y == 7) { s->pixel_ptr -= 8 * s->stride - 4; P[0] = *s->stream_ptr++; P[1] = *s->stream_ptr++; flags = bytestream_get_le32(&s->stream_ptr); } } } else { /* horizontal split; top & bottom halves are 2-color encoded */ for (y = 0; y < 8; y++) { if (y == 4) { P[0] = *s->stream_ptr++; P[1] = *s->stream_ptr++; } flags = *s->stream_ptr++ | 0x100; for (; flags != 1; flags >>= 1) *s->pixel_ptr++ = P[flags & 1]; s->pixel_ptr += s->line_inc; } } } /* report success */ return 0; }"} {"target": 1, "idx": 25459, "func": "static int gdb_set_avr_reg(CPUState *env, uint8_t *mem_buf, int n) { if (n < 32) { #ifdef WORDS_BIGENDIAN env->avr[n].u64[0] = ldq_p(mem_buf); env->avr[n].u64[1] = ldq_p(mem_buf+8); #else env->avr[n].u64[1] = ldq_p(mem_buf); env->avr[n].u64[0] = ldq_p(mem_buf+8); #endif return 16; } if (n == 33) { env->vscr = ldl_p(mem_buf); return 4; } if (n == 34) { env->spr[SPR_VRSAVE] = (target_ulong)ldl_p(mem_buf); return 4; } return 0; }"} {"target": 1, "idx": 25472, "func": "static void vga_draw_text(VGACommonState *s, int full_update) { DisplaySurface *surface = qemu_console_surface(s->con); int cx, cy, cheight, cw, ch, cattr, height, width, ch_attr; int cx_min, cx_max, linesize, x_incr, line, line1; uint32_t offset, fgcol, bgcol, v, cursor_offset; uint8_t *d1, *d, *src, *dest, *cursor_ptr; const uint8_t *font_ptr, *font_base[2]; int dup9, line_offset; uint32_t *palette; uint32_t *ch_attr_ptr; int64_t now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL); /* compute font data address (in plane 2) */ v = s->sr[VGA_SEQ_CHARACTER_MAP]; offset = (((v >> 4) & 1) | ((v << 1) & 6)) * 8192 * 4 + 2; if (offset != s->font_offsets[0]) { s->font_offsets[0] = offset; full_update = 1; } font_base[0] = s->vram_ptr + offset; offset = (((v >> 5) & 1) | ((v >> 1) & 6)) * 8192 * 4 + 2; font_base[1] = s->vram_ptr + offset; if (offset != s->font_offsets[1]) { s->font_offsets[1] = offset; full_update = 1; } if (s->plane_updated & (1 << 2) || s->has_chain4_alias) { /* if the plane 2 was modified since the last display, it indicates the font may have been modified */ s->plane_updated = 0; full_update = 1; } full_update |= update_basic_params(s); line_offset = s->line_offset; vga_get_text_resolution(s, &width, &height, &cw, &cheight); if ((height * width) <= 1) { /* better than nothing: exit if transient size is too small */ return; } if ((height * width) > CH_ATTR_SIZE) { /* better than nothing: exit if transient size is too big */ return; } if (width != s->last_width || height != s->last_height || cw != s->last_cw || cheight != s->last_ch || s->last_depth) { s->last_scr_width = width * cw; s->last_scr_height = height * cheight; qemu_console_resize(s->con, s->last_scr_width, s->last_scr_height); surface = qemu_console_surface(s->con); dpy_text_resize(s->con, width, height); s->last_depth = 0; s->last_width = width; s->last_height = height; s->last_ch = cheight; s->last_cw = cw; full_update = 1; } full_update |= update_palette16(s); palette = s->last_palette; x_incr = cw * surface_bytes_per_pixel(surface); if (full_update) { s->full_update_text = 1; } if (s->full_update_gfx) { s->full_update_gfx = 0; full_update |= 1; } cursor_offset = ((s->cr[VGA_CRTC_CURSOR_HI] << 8) | s->cr[VGA_CRTC_CURSOR_LO]) - s->start_addr; if (cursor_offset != s->cursor_offset || s->cr[VGA_CRTC_CURSOR_START] != s->cursor_start || s->cr[VGA_CRTC_CURSOR_END] != s->cursor_end) { /* if the cursor position changed, we update the old and new chars */ if (s->cursor_offset < CH_ATTR_SIZE) s->last_ch_attr[s->cursor_offset] = -1; if (cursor_offset < CH_ATTR_SIZE) s->last_ch_attr[cursor_offset] = -1; s->cursor_offset = cursor_offset; s->cursor_start = s->cr[VGA_CRTC_CURSOR_START]; s->cursor_end = s->cr[VGA_CRTC_CURSOR_END]; } cursor_ptr = s->vram_ptr + (s->start_addr + cursor_offset) * 4; if (now >= s->cursor_blink_time) { s->cursor_blink_time = now + VGA_TEXT_CURSOR_PERIOD_MS / 2; s->cursor_visible_phase = !s->cursor_visible_phase; } dest = surface_data(surface); linesize = surface_stride(surface); ch_attr_ptr = s->last_ch_attr; line = 0; offset = s->start_addr * 4; for(cy = 0; cy < height; cy++) { d1 = dest; src = s->vram_ptr + offset; cx_min = width; cx_max = -1; for(cx = 0; cx < width; cx++) { ch_attr = *(uint16_t *)src; if (full_update || ch_attr != *ch_attr_ptr || src == cursor_ptr) { if (cx < cx_min) cx_min = cx; if (cx > cx_max) cx_max = cx; *ch_attr_ptr = ch_attr; #ifdef HOST_WORDS_BIGENDIAN ch = ch_attr >> 8; cattr = ch_attr & 0xff; #else ch = ch_attr & 0xff; cattr = ch_attr >> 8; #endif font_ptr = font_base[(cattr >> 3) & 1]; font_ptr += 32 * 4 * ch; bgcol = palette[cattr >> 4]; fgcol = palette[cattr & 0x0f]; if (cw == 16) { vga_draw_glyph16(d1, linesize, font_ptr, cheight, fgcol, bgcol); } else if (cw != 9) { vga_draw_glyph8(d1, linesize, font_ptr, cheight, fgcol, bgcol); } else { dup9 = 0; if (ch >= 0xb0 && ch <= 0xdf && (s->ar[VGA_ATC_MODE] & 0x04)) { dup9 = 1; } vga_draw_glyph9(d1, linesize, font_ptr, cheight, fgcol, bgcol, dup9); } if (src == cursor_ptr && !(s->cr[VGA_CRTC_CURSOR_START] & 0x20) && s->cursor_visible_phase) { int line_start, line_last, h; /* draw the cursor */ line_start = s->cr[VGA_CRTC_CURSOR_START] & 0x1f; line_last = s->cr[VGA_CRTC_CURSOR_END] & 0x1f; /* XXX: check that */ if (line_last > cheight - 1) line_last = cheight - 1; if (line_last >= line_start && line_start < cheight) { h = line_last - line_start + 1; d = d1 + linesize * line_start; if (cw == 16) { vga_draw_glyph16(d, linesize, cursor_glyph, h, fgcol, bgcol); } else if (cw != 9) { vga_draw_glyph8(d, linesize, cursor_glyph, h, fgcol, bgcol); } else { vga_draw_glyph9(d, linesize, cursor_glyph, h, fgcol, bgcol, 1); } } } } d1 += x_incr; src += 4; ch_attr_ptr++; } if (cx_max != -1) { dpy_gfx_update(s->con, cx_min * cw, cy * cheight, (cx_max - cx_min + 1) * cw, cheight); } dest += linesize * cheight; line1 = line + cheight; offset += line_offset; if (line < s->line_compare && line1 >= s->line_compare) { offset = 0; } line = line1; } }"} {"target": 1, "idx": 25492, "func": "int64_t qemu_ftell(QEMUFile *f) { qemu_fflush(f); return f->pos; }"} {"target": 1, "idx": 25501, "func": "static int libquvi_read_packet(AVFormatContext *s, AVPacket *pkt) { LibQuviContext *qc = s->priv_data; return av_read_frame(qc->fmtctx, pkt); }"} {"target": 1, "idx": 25505, "func": "static PCIDevice *qemu_pci_hot_add_storage(Monitor *mon, const char *devaddr, const char *opts) { PCIDevice *dev; DriveInfo *dinfo = NULL; int type = -1; char buf[128]; PCIBus *bus; int devfn; if (get_param_value(buf, sizeof(buf), \"if\", opts)) { if (!strcmp(buf, \"scsi\")) type = IF_SCSI; else if (!strcmp(buf, \"virtio\")) { type = IF_VIRTIO; } else { monitor_printf(mon, \"type %s not a hotpluggable PCI device.\\n\", buf); return NULL; } } else { monitor_printf(mon, \"no if= specified\\n\"); return NULL; } if (get_param_value(buf, sizeof(buf), \"file\", opts)) { dinfo = add_init_drive(opts); if (!dinfo) return NULL; if (dinfo->devaddr) { monitor_printf(mon, \"Parameter addr not supported\\n\"); return NULL; } } else { dinfo = NULL; } bus = pci_get_bus_devfn(&devfn, devaddr); if (!bus) { monitor_printf(mon, \"Invalid PCI device address %s\\n\", devaddr); return NULL; } switch (type) { case IF_SCSI: if (!dinfo) { monitor_printf(mon, \"scsi requires a backing file/device.\\n\"); return NULL; } dev = pci_create(bus, devfn, \"lsi53c895a\"); if (qdev_init(&dev->qdev) < 0) dev = NULL; if (dev) { BusState *scsibus = QLIST_FIRST(&dev->qdev.child_bus); scsi_bus_legacy_add_drive(DO_UPCAST(SCSIBus, qbus, scsibus), dinfo, dinfo->unit); } break; case IF_VIRTIO: if (!dinfo) { monitor_printf(mon, \"virtio requires a backing file/device.\\n\"); return NULL; } dev = pci_create(bus, devfn, \"virtio-blk-pci\"); qdev_prop_set_drive(&dev->qdev, \"drive\", dinfo); if (qdev_init(&dev->qdev) < 0) dev = NULL; break; default: dev = NULL; } return dev; }"} {"target": 0, "idx": 25510, "func": "uint16_t eeprom93xx_read(eeprom_t *eeprom) { /* Return status of pin DO (0 or 1). */ logout(\"CS=%u DO=%u\\n\", eeprom->eecs, eeprom->eedo); return (eeprom->eedo); }"} {"target": 0, "idx": 25515, "func": "static void sysctl_write(void *opaque, target_phys_addr_t addr, uint32_t value) { MilkymistSysctlState *s = opaque; trace_milkymist_sysctl_memory_write(addr, value); addr >>= 2; switch (addr) { case R_GPIO_OUT: case R_GPIO_INTEN: case R_TIMER0_COUNTER: if (value > s->regs[R_TIMER0_COUNTER]) { value = s->regs[R_TIMER0_COUNTER]; error_report(\"milkymist_sysctl: timer0: trying to write a \" \"value greater than the limit. Clipping.\"); } /* milkymist timer counts up */ value = s->regs[R_TIMER0_COUNTER] - value; ptimer_set_count(s->ptimer0, value); break; case R_TIMER1_COUNTER: if (value > s->regs[R_TIMER1_COUNTER]) { value = s->regs[R_TIMER1_COUNTER]; error_report(\"milkymist_sysctl: timer1: trying to write a \" \"value greater than the limit. Clipping.\"); } /* milkymist timer counts up */ value = s->regs[R_TIMER1_COUNTER] - value; ptimer_set_count(s->ptimer1, value); break; case R_TIMER0_COMPARE: ptimer_set_limit(s->ptimer0, value, 0); s->regs[addr] = value; break; case R_TIMER1_COMPARE: ptimer_set_limit(s->ptimer1, value, 0); s->regs[addr] = value; break; case R_TIMER0_CONTROL: s->regs[addr] = value; if (s->regs[R_TIMER0_CONTROL] & CTRL_ENABLE) { trace_milkymist_sysctl_start_timer1(); ptimer_run(s->ptimer0, 0); } else { trace_milkymist_sysctl_stop_timer1(); ptimer_stop(s->ptimer0); } break; case R_TIMER1_CONTROL: s->regs[addr] = value; if (s->regs[R_TIMER1_CONTROL] & CTRL_ENABLE) { trace_milkymist_sysctl_start_timer1(); ptimer_run(s->ptimer1, 0); } else { trace_milkymist_sysctl_stop_timer1(); ptimer_stop(s->ptimer1); } break; case R_ICAP: sysctl_icap_write(s, value); break; case R_SYSTEM_ID: qemu_system_reset_request(); break; case R_GPIO_IN: case R_CAPABILITIES: error_report(\"milkymist_sysctl: write to read-only register 0x\" TARGET_FMT_plx, addr << 2); break; default: error_report(\"milkymist_sysctl: write access to unkown register 0x\" TARGET_FMT_plx, addr << 2); break; } }"} {"target": 0, "idx": 25526, "func": "AVFrame *avcodec_alloc_frame(void) { AVFrame *frame = av_mallocz(sizeof(AVFrame)); if (frame == NULL) return NULL; FF_DISABLE_DEPRECATION_WARNINGS avcodec_get_frame_defaults(frame); FF_ENABLE_DEPRECATION_WARNINGS return frame; }"} {"target": 1, "idx": 25530, "func": "static int add_hfyu_left_prediction_int16_c(uint16_t *dst, const uint16_t *src, unsigned mask, int w, int acc){ int i; for(i=0; icpu_env = env; s->int_pending[0] = 0; s->int_pending[1] = 0; s->int_enabled[0] = 0; s->int_enabled[1] = 0; s->is_fiq[0] = 0; s->is_fiq[1] = 0; qi = qemu_allocate_irqs(pxa2xx_pic_set_irq, s, PXA2XX_PIC_SRCS); /* Enable IC memory-mapped registers access. */ iomemtype = cpu_register_io_memory(pxa2xx_pic_readfn, pxa2xx_pic_writefn, s, DEVICE_NATIVE_ENDIAN); cpu_register_physical_memory(base, 0x00100000, iomemtype); /* Enable IC coprocessor access. */ cpu_arm_set_cp_io(env, 6, pxa2xx_pic_cp_read, pxa2xx_pic_cp_write, s); register_savevm(NULL, \"pxa2xx_pic\", 0, 0, pxa2xx_pic_save, pxa2xx_pic_load, s); return qi; }"} {"target": 0, "idx": 25550, "func": "int net_init_vde(QemuOpts *opts, const NetClientOptions *new_opts, const char *name, VLANState *vlan) { const char *sock; const char *group; int port, mode; sock = qemu_opt_get(opts, \"sock\"); group = qemu_opt_get(opts, \"group\"); port = qemu_opt_get_number(opts, \"port\", 0); mode = qemu_opt_get_number(opts, \"mode\", 0700); if (net_vde_init(vlan, \"vde\", name, sock, port, group, mode) == -1) { return -1; } return 0; }"} {"target": 1, "idx": 25557, "func": "static const ID3v2EMFunc *get_extra_meta_func(const char *tag, int isv34) { int i = 0; while (ff_id3v2_extra_meta_funcs[i].tag3) { if (!memcmp(tag, (isv34 ? ff_id3v2_extra_meta_funcs[i].tag4 : ff_id3v2_extra_meta_funcs[i].tag3), (isv34 ? 4 : 3))) return &ff_id3v2_extra_meta_funcs[i]; i++; } return NULL; }"} {"target": 1, "idx": 25566, "func": "int av_buffersrc_add_ref(AVFilterContext *buffer_filter, AVFilterBufferRef *picref, int flags) { BufferSourceContext *c = buffer_filter->priv; AVFilterBufferRef *buf; int ret; if (!picref) { c->eof = 1; return 0; } else if (c->eof) return AVERROR(EINVAL); if (!av_fifo_space(c->fifo) && (ret = av_fifo_realloc2(c->fifo, av_fifo_size(c->fifo) + sizeof(buf))) < 0) return ret; if (!(flags & AV_BUFFERSRC_FLAG_NO_CHECK_FORMAT)) { ret = check_format_change(buffer_filter, picref); if (ret < 0) return ret; } if (flags & AV_BUFFERSRC_FLAG_NO_COPY) buf = picref; else buf = copy_buffer_ref(buffer_filter, picref); if ((ret = av_fifo_generic_write(c->fifo, &buf, sizeof(buf), NULL)) < 0) { if (buf != picref) avfilter_unref_buffer(buf); return ret; } c->nb_failed_requests = 0; return 0; }"} {"target": 0, "idx": 25571, "func": "static void qstring_destroy_obj(QObject *obj) { QString *qs; assert(obj != NULL); qs = qobject_to_qstring(obj); g_free(qs->string); g_free(qs); }"} {"target": 0, "idx": 25574, "func": "bool aio_pending(AioContext *ctx) { AioHandler *node; bool result = false; /* * We have to walk very carefully in case aio_set_fd_handler is * called while we're walking. */ qemu_lockcnt_inc(&ctx->list_lock); QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) { if (node->pfd.revents && node->io_notify) { result = true; break; } if ((node->pfd.revents & G_IO_IN) && node->io_read) { result = true; break; } if ((node->pfd.revents & G_IO_OUT) && node->io_write) { result = true; break; } } qemu_lockcnt_dec(&ctx->list_lock); return result; }"} {"target": 0, "idx": 25578, "func": "static size_t get_request_size(VirtQueue *vq) { unsigned int in, out; virtqueue_get_avail_bytes(vq, &in, &out); return in; }"} {"target": 0, "idx": 25580, "func": "static void do_quit(int argc, const char **argv) { exit(0); }"} {"target": 0, "idx": 25583, "func": "static void pc_init1(ram_addr_t ram_size, const char *boot_device, const char *kernel_filename, const char *kernel_cmdline, const char *initrd_filename, const char *cpu_model, int pci_enabled, int kvmclock_enabled) { int i; ram_addr_t below_4g_mem_size, above_4g_mem_size; PCIBus *pci_bus; PCII440FXState *i440fx_state; int piix3_devfn = -1; qemu_irq *cpu_irq; qemu_irq *isa_irq; qemu_irq *i8259; qemu_irq *cmos_s3; qemu_irq *smi_irq; IsaIrqState *isa_irq_state; DriveInfo *hd[MAX_IDE_BUS * MAX_IDE_DEVS]; FDCtrl *floppy_controller; BusState *idebus[MAX_IDE_BUS]; ISADevice *rtc_state; pc_cpus_init(cpu_model); if (kvmclock_enabled) { kvmclock_create(); } /* allocate ram and load rom/bios */ pc_memory_init(ram_size, kernel_filename, kernel_cmdline, initrd_filename, &below_4g_mem_size, &above_4g_mem_size); cpu_irq = pc_allocate_cpu_irq(); i8259 = i8259_init(cpu_irq[0]); isa_irq_state = qemu_mallocz(sizeof(*isa_irq_state)); isa_irq_state->i8259 = i8259; if (pci_enabled) { ioapic_init(isa_irq_state); } isa_irq = qemu_allocate_irqs(isa_irq_handler, isa_irq_state, 24); if (pci_enabled) { pci_bus = i440fx_init(&i440fx_state, &piix3_devfn, isa_irq, ram_size); } else { pci_bus = NULL; i440fx_state = NULL; isa_bus_new(NULL); } isa_bus_irqs(isa_irq); pc_register_ferr_irq(isa_reserve_irq(13)); pc_vga_init(pci_enabled? pci_bus: NULL); /* init basic PC hardware */ pc_basic_device_init(isa_irq, &floppy_controller, &rtc_state); for(i = 0; i < nb_nics; i++) { NICInfo *nd = &nd_table[i]; if (!pci_enabled || (nd->model && strcmp(nd->model, \"ne2k_isa\") == 0)) pc_init_ne2k_isa(nd); else pci_nic_init_nofail(nd, \"e1000\", NULL); } if (drive_get_max_bus(IF_IDE) >= MAX_IDE_BUS) { fprintf(stderr, \"qemu: too many IDE bus\\n\"); exit(1); } for(i = 0; i < MAX_IDE_BUS * MAX_IDE_DEVS; i++) { hd[i] = drive_get(IF_IDE, i / MAX_IDE_DEVS, i % MAX_IDE_DEVS); } if (pci_enabled) { PCIDevice *dev; dev = pci_piix3_ide_init(pci_bus, hd, piix3_devfn + 1); idebus[0] = qdev_get_child_bus(&dev->qdev, \"ide.0\"); idebus[1] = qdev_get_child_bus(&dev->qdev, \"ide.1\"); } else { for(i = 0; i < MAX_IDE_BUS; i++) { ISADevice *dev; dev = isa_ide_init(ide_iobase[i], ide_iobase2[i], ide_irq[i], hd[MAX_IDE_DEVS * i], hd[MAX_IDE_DEVS * i + 1]); idebus[i] = qdev_get_child_bus(&dev->qdev, \"ide.0\"); } } audio_init(isa_irq, pci_enabled ? pci_bus : NULL); pc_cmos_init(below_4g_mem_size, above_4g_mem_size, boot_device, idebus[0], idebus[1], floppy_controller, rtc_state); if (pci_enabled && usb_enabled) { usb_uhci_piix3_init(pci_bus, piix3_devfn + 2); } if (pci_enabled && acpi_enabled) { uint8_t *eeprom_buf = qemu_mallocz(8 * 256); /* XXX: make this persistent */ i2c_bus *smbus; cmos_s3 = qemu_allocate_irqs(pc_cmos_set_s3_resume, rtc_state, 1); smi_irq = qemu_allocate_irqs(pc_acpi_smi_interrupt, first_cpu, 1); /* TODO: Populate SPD eeprom data. */ smbus = piix4_pm_init(pci_bus, piix3_devfn + 3, 0xb100, isa_reserve_irq(9), *cmos_s3, *smi_irq, kvm_enabled()); for (i = 0; i < 8; i++) { DeviceState *eeprom; eeprom = qdev_create((BusState *)smbus, \"smbus-eeprom\"); qdev_prop_set_uint8(eeprom, \"address\", 0x50 + i); qdev_prop_set_ptr(eeprom, \"data\", eeprom_buf + (i * 256)); qdev_init_nofail(eeprom); } } if (i440fx_state) { i440fx_init_memory_mappings(i440fx_state); } if (pci_enabled) { pc_pci_device_init(pci_bus); } }"} {"target": 0, "idx": 25593, "func": "static av_cold int roq_decode_init(AVCodecContext *avctx) { RoqContext *s = avctx->priv_data; s->avctx = avctx; if (avctx->width % 16 || avctx->height % 16) { av_log(avctx, AV_LOG_ERROR, \"Dimensions must be a multiple of 16\\n\"); return AVERROR_PATCHWELCOME; } s->width = avctx->width; s->height = avctx->height; s->last_frame = av_frame_alloc(); s->current_frame = av_frame_alloc(); if (!s->current_frame || !s->last_frame) { av_frame_free(&s->current_frame); av_frame_free(&s->last_frame); return AVERROR(ENOMEM); } avctx->pix_fmt = AV_PIX_FMT_YUV444P; return 0; }"} {"target": 1, "idx": 25602, "func": "static int usb_bt_handle_data(USBDevice *dev, USBPacket *p) { struct USBBtState *s = (struct USBBtState *) dev->opaque; int ret = 0; if (!s->config) goto fail; switch (p->pid) { case USB_TOKEN_IN: switch (p->devep & 0xf) { case USB_EVT_EP: ret = usb_bt_fifo_dequeue(&s->evt, p); break; case USB_ACL_EP: ret = usb_bt_fifo_dequeue(&s->acl, p); break; case USB_SCO_EP: ret = usb_bt_fifo_dequeue(&s->sco, p); break; default: goto fail; } break; case USB_TOKEN_OUT: switch (p->devep & 0xf) { case USB_ACL_EP: usb_bt_fifo_out_enqueue(s, &s->outacl, s->hci->acl_send, usb_bt_hci_acl_complete, p->data, p->len); break; case USB_SCO_EP: usb_bt_fifo_out_enqueue(s, &s->outsco, s->hci->sco_send, usb_bt_hci_sco_complete, p->data, p->len); break; default: goto fail; } break; default: fail: ret = USB_RET_STALL; break; } return ret; }"} {"target": 0, "idx": 25619, "func": "static void rtas_ibm_read_slot_reset_state2(PowerPCCPU *cpu, sPAPREnvironment *spapr, uint32_t token, uint32_t nargs, target_ulong args, uint32_t nret, target_ulong rets) { sPAPRPHBState *sphb; sPAPRPHBClass *spc; uint64_t buid; int state, ret; if ((nargs != 3) || (nret != 4 && nret != 5)) { goto param_error_exit; } buid = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 2); sphb = find_phb(spapr, buid); if (!sphb) { goto param_error_exit; } spc = SPAPR_PCI_HOST_BRIDGE_GET_CLASS(sphb); if (!spc->eeh_get_state) { goto param_error_exit; } ret = spc->eeh_get_state(sphb, &state); rtas_st(rets, 0, ret); if (ret != RTAS_OUT_SUCCESS) { return; } rtas_st(rets, 1, state); rtas_st(rets, 2, RTAS_EEH_SUPPORT); rtas_st(rets, 3, RTAS_EEH_PE_UNAVAIL_INFO); if (nret >= 5) { rtas_st(rets, 4, RTAS_EEH_PE_RECOVER_INFO); } return; param_error_exit: rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); }"} {"target": 0, "idx": 25636, "func": "void qmp_migrate_set_cache_size(int64_t value, Error **errp) { MigrationState *s = migrate_get_current(); /* Check for truncation */ if (value != (size_t)value) { error_set(errp, QERR_INVALID_PARAMETER_VALUE, \"cache size\", \"exceeding address space\"); return; } s->xbzrle_cache_size = xbzrle_cache_resize(value); }"} {"target": 1, "idx": 25669, "func": "int path_is_absolute(const char *path) { const char *p; #ifdef _WIN32 /* specific case for names like: \"\\\\.\\d:\" */ if (*path == '/' || *path == '\\\\') return 1; #endif p = strchr(path, ':'); if (p) p++; else p = path; #ifdef _WIN32 return (*p == '/' || *p == '\\\\'); #else return (*p == '/'); #endif }"} {"target": 1, "idx": 25670, "func": "opts_end_struct(Visitor *v, Error **errp) { OptsVisitor *ov = to_ov(v); GHashTableIter iter; GQueue *any; if (--ov->depth > 0) { return; } /* we should have processed all (distinct) QemuOpt instances */ g_hash_table_iter_init(&iter, ov->unprocessed_opts); if (g_hash_table_iter_next(&iter, NULL, (void **)&any)) { const QemuOpt *first; first = g_queue_peek_head(any); error_setg(errp, QERR_INVALID_PARAMETER, first->name); } g_hash_table_destroy(ov->unprocessed_opts); ov->unprocessed_opts = NULL; if (ov->fake_id_opt) { g_free(ov->fake_id_opt->name); g_free(ov->fake_id_opt->str); g_free(ov->fake_id_opt); } ov->fake_id_opt = NULL; }"} {"target": 1, "idx": 25672, "func": "static av_cold int shorten_decode_close(AVCodecContext *avctx) { ShortenContext *s = avctx->priv_data; int i; for (i = 0; i < s->channels; i++) { s->decoded[i] -= s->nwrap; av_freep(&s->decoded[i]); av_freep(&s->offset[i]); } av_freep(&s->bitstream); av_freep(&s->coeffs); return 0; }"} {"target": 0, "idx": 25693, "func": "void ff_avg_h264_qpel4_mc13_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride) { avc_luma_hv_qrt_and_aver_dst_4x4_msa(src + stride - 2, src - (stride * 2), stride, dst, stride); }"} {"target": 0, "idx": 25704, "func": "static uint32_t apic_mem_readl(void *opaque, target_phys_addr_t addr) { DeviceState *d; APICCommonState *s; uint32_t val; int index; d = cpu_get_current_apic(); if (!d) { return 0; } s = DO_UPCAST(APICCommonState, busdev.qdev, d); index = (addr >> 4) & 0xff; switch(index) { case 0x02: /* id */ val = s->id << 24; break; case 0x03: /* version */ val = 0x11 | ((APIC_LVT_NB - 1) << 16); /* version 0x11 */ break; case 0x08: apic_sync_vapic(s, SYNC_FROM_VAPIC); if (apic_report_tpr_access) { cpu_report_tpr_access(s->cpu_env, TPR_ACCESS_READ); } val = s->tpr; break; case 0x09: val = apic_get_arb_pri(s); break; case 0x0a: /* ppr */ val = apic_get_ppr(s); break; case 0x0b: val = 0; break; case 0x0d: val = s->log_dest << 24; break; case 0x0e: val = s->dest_mode << 28; break; case 0x0f: val = s->spurious_vec; break; case 0x10 ... 0x17: val = s->isr[index & 7]; break; case 0x18 ... 0x1f: val = s->tmr[index & 7]; break; case 0x20 ... 0x27: val = s->irr[index & 7]; break; case 0x28: val = s->esr; break; case 0x30: case 0x31: val = s->icr[index & 1]; break; case 0x32 ... 0x37: val = s->lvt[index - 0x32]; break; case 0x38: val = s->initial_count; break; case 0x39: val = apic_get_current_count(s); break; case 0x3e: val = s->divide_conf; break; default: s->esr |= ESR_ILLEGAL_ADDRESS; val = 0; break; } trace_apic_mem_readl(addr, val); return val; }"} {"target": 0, "idx": 25711, "func": "static SocketAddressLegacy *tcp_build_address(const char *host_port, Error **errp) { InetSocketAddress *iaddr = g_new(InetSocketAddress, 1); SocketAddressLegacy *saddr; if (inet_parse(iaddr, host_port, errp)) { qapi_free_InetSocketAddress(iaddr); return NULL; } saddr = g_new0(SocketAddressLegacy, 1); saddr->type = SOCKET_ADDRESS_LEGACY_KIND_INET; saddr->u.inet.data = iaddr; return saddr; }"} {"target": 1, "idx": 25729, "func": "static void render_line(int x0, uint8_t y0, int x1, int y1, float *buf) { int dy = y1 - y0; int adx = x1 - x0; int ady = FFABS(dy); int sy = dy < 0 ? -1 : 1; buf[x0] = ff_vorbis_floor1_inverse_db_table[y0]; if (ady*2 <= adx) { // optimized common case render_line_unrolled(x0, y0, x1, sy, ady, adx, buf); } else { int base = dy / adx; int x = x0; uint8_t y = y0; int err = -adx; ady -= FFABS(base) * adx; while (++x < x1) { y += base; err += ady; if (err >= 0) { err -= adx; y += sy; } buf[x] = ff_vorbis_floor1_inverse_db_table[y]; } } }"} {"target": 1, "idx": 25734, "func": "static const QObject *qmp_input_get_object(QmpInputVisitor *qiv, const char *name) { const QObject *qobj; if (qiv->nb_stack == 0) { qobj = qiv->obj; } else { qobj = qiv->stack[qiv->nb_stack - 1].obj; } if (name && qobject_type(qobj) == QTYPE_QDICT) { return qdict_get(qobject_to_qdict(qobj), name); } else if (qiv->nb_stack > 0 && qobject_type(qobj) == QTYPE_QLIST) { return qlist_entry_obj(qiv->stack[qiv->nb_stack - 1].entry); } return qobj; }"} {"target": 1, "idx": 25737, "func": "static BufferPoolEntry *get_pool(AVBufferPool *pool) { BufferPoolEntry *cur = NULL, *last = NULL; do { FFSWAP(BufferPoolEntry*, cur, last); cur = avpriv_atomic_ptr_cas((void * volatile *)&pool->pool, last, NULL); if (!cur) return NULL; } while (cur != last); return cur; }"} {"target": 1, "idx": 25738, "func": "static void switch_buffer(MPADecodeContext *s, int *pos, int *end_pos, int *end_pos2) { if (s->in_gb.buffer && *pos >= s->gb.size_in_bits) { s->gb = s->in_gb; s->in_gb.buffer = NULL; assert((get_bits_count(&s->gb) & 7) == 0); skip_bits_long(&s->gb, *pos - *end_pos); *end_pos2 = *end_pos = *end_pos2 + get_bits_count(&s->gb) - *pos; *pos = get_bits_count(&s->gb); } }"} {"target": 1, "idx": 25741, "func": "static int vpc_write(BlockDriverState *bs, int64_t sector_num, const uint8_t *buf, int nb_sectors) { BDRVVPCState *s = bs->opaque; int64_t offset; int64_t sectors, sectors_per_block; int ret; VHDFooter *footer = (VHDFooter *) s->footer_buf; if (cpu_to_be32(footer->type) == VHD_FIXED) { return bdrv_write(bs->file, sector_num, buf, nb_sectors); } while (nb_sectors > 0) { offset = get_sector_offset(bs, sector_num, 1); sectors_per_block = s->block_size >> BDRV_SECTOR_BITS; sectors = sectors_per_block - (sector_num % sectors_per_block); if (sectors > nb_sectors) { sectors = nb_sectors; } if (offset == -1) { offset = alloc_block(bs, sector_num); if (offset < 0) return -1; } ret = bdrv_pwrite(bs->file, offset, buf, sectors * BDRV_SECTOR_SIZE); if (ret != sectors * BDRV_SECTOR_SIZE) { return -1; } nb_sectors -= sectors; sector_num += sectors; buf += sectors * BDRV_SECTOR_SIZE; } return 0; }"} {"target": 1, "idx": 25748, "func": "static int read_old_huffman_tables(HYuvContext *s){ #if 1 GetBitContext gb; int i; init_get_bits(&gb, classic_shift_luma, sizeof(classic_shift_luma)*8); if(read_len_table(s->len[0], &gb)<0) return -1; init_get_bits(&gb, classic_shift_chroma, sizeof(classic_shift_chroma)*8); if(read_len_table(s->len[1], &gb)<0) return -1; for(i=0; i<256; i++) s->bits[0][i] = classic_add_luma [i]; for(i=0; i<256; i++) s->bits[1][i] = classic_add_chroma[i]; if(s->bitstream_bpp >= 24){ memcpy(s->bits[1], s->bits[0], 256*sizeof(uint32_t)); memcpy(s->len[1] , s->len [0], 256*sizeof(uint8_t)); } memcpy(s->bits[2], s->bits[1], 256*sizeof(uint32_t)); memcpy(s->len[2] , s->len [1], 256*sizeof(uint8_t)); for(i=0; i<3; i++){ ff_free_vlc(&s->vlc[i]); init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0); } generate_joint_tables(s); return 0; #else av_log(s->avctx, AV_LOG_DEBUG, \"v1 huffyuv is not supported \\n\"); return -1; #endif }"} {"target": 0, "idx": 25753, "func": "exynos4_boards_init_common(MachineState *machine, Exynos4BoardType board_type) { Exynos4BoardState *s = g_new(Exynos4BoardState, 1); MachineClass *mc = MACHINE_GET_CLASS(machine); if (smp_cpus != EXYNOS4210_NCPUS && !qtest_enabled()) { error_report(\"%s board supports only %d CPU cores, ignoring smp_cpus\" \" value\", mc->name, EXYNOS4210_NCPUS); } exynos4_board_binfo.ram_size = exynos4_board_ram_size[board_type]; exynos4_board_binfo.board_id = exynos4_board_id[board_type]; exynos4_board_binfo.smp_bootreg_addr = exynos4_board_smp_bootreg_addr[board_type]; exynos4_board_binfo.kernel_filename = machine->kernel_filename; exynos4_board_binfo.initrd_filename = machine->initrd_filename; exynos4_board_binfo.kernel_cmdline = machine->kernel_cmdline; exynos4_board_binfo.gic_cpu_if_addr = EXYNOS4210_SMP_PRIVATE_BASE_ADDR + 0x100; PRINT_DEBUG(\"\\n ram_size: %luMiB [0x%08lx]\\n\" \" kernel_filename: %s\\n\" \" kernel_cmdline: %s\\n\" \" initrd_filename: %s\\n\", exynos4_board_ram_size[board_type] / 1048576, exynos4_board_ram_size[board_type], machine->kernel_filename, machine->kernel_cmdline, machine->initrd_filename); exynos4_boards_init_ram(s, get_system_memory(), exynos4_board_ram_size[board_type]); s->soc = exynos4210_init(get_system_memory()); return s; }"} {"target": 1, "idx": 25787, "func": "static void json_print_section_header(WriterContext *wctx) { JSONContext *json = wctx->priv; AVBPrint buf; const struct section *section = wctx->section[wctx->level]; const struct section *parent_section = wctx->level ? wctx->section[wctx->level-1] : NULL; if (wctx->level && wctx->nb_item[wctx->level-1]) printf(\",\\n\"); if (section->flags & SECTION_FLAG_IS_WRAPPER) { printf(\"{\\n\"); json->indent_level++; } else { av_bprint_init(&buf, 1, AV_BPRINT_SIZE_UNLIMITED); json_escape_str(&buf, section->name, wctx); JSON_INDENT(); json->indent_level++; if (section->flags & SECTION_FLAG_IS_ARRAY) { printf(\"\\\"%s\\\": [\\n\", buf.str); } else if (!(parent_section->flags & SECTION_FLAG_IS_ARRAY)) { printf(\"\\\"%s\\\": {%s\", buf.str, json->item_start_end); } else { printf(\"{%s\", json->item_start_end); /* this is required so the parser can distinguish between packets and frames */ if (parent_section->id == SECTION_ID_PACKETS_AND_FRAMES) { if (!json->compact) JSON_INDENT(); printf(\"\\\"type\\\": \\\"%s\\\"%s\", section->name, json->item_sep); } } av_bprint_finalize(&buf, NULL); } }"} {"target": 0, "idx": 25810, "func": "build_madt(GArray *table_data, BIOSLinker *linker, VirtGuestInfo *guest_info) { int madt_start = table_data->len; const MemMapEntry *memmap = guest_info->memmap; const int *irqmap = guest_info->irqmap; AcpiMultipleApicTable *madt; AcpiMadtGenericDistributor *gicd; AcpiMadtGenericMsiFrame *gic_msi; int i; madt = acpi_data_push(table_data, sizeof *madt); gicd = acpi_data_push(table_data, sizeof *gicd); gicd->type = ACPI_APIC_GENERIC_DISTRIBUTOR; gicd->length = sizeof(*gicd); gicd->base_address = memmap[VIRT_GIC_DIST].base; gicd->version = guest_info->gic_version; for (i = 0; i < guest_info->smp_cpus; i++) { AcpiMadtGenericInterrupt *gicc = acpi_data_push(table_data, sizeof *gicc); ARMCPU *armcpu = ARM_CPU(qemu_get_cpu(i)); gicc->type = ACPI_APIC_GENERIC_INTERRUPT; gicc->length = sizeof(*gicc); if (guest_info->gic_version == 2) { gicc->base_address = memmap[VIRT_GIC_CPU].base; } gicc->cpu_interface_number = i; gicc->arm_mpidr = armcpu->mp_affinity; gicc->uid = i; gicc->flags = cpu_to_le32(ACPI_GICC_ENABLED); if (armcpu->has_pmu) { gicc->performance_interrupt = cpu_to_le32(PPI(VIRTUAL_PMU_IRQ)); } } if (guest_info->gic_version == 3) { AcpiMadtGenericTranslator *gic_its; AcpiMadtGenericRedistributor *gicr = acpi_data_push(table_data, sizeof *gicr); gicr->type = ACPI_APIC_GENERIC_REDISTRIBUTOR; gicr->length = sizeof(*gicr); gicr->base_address = cpu_to_le64(memmap[VIRT_GIC_REDIST].base); gicr->range_length = cpu_to_le32(memmap[VIRT_GIC_REDIST].size); if (its_class_name()) { gic_its = acpi_data_push(table_data, sizeof *gic_its); gic_its->type = ACPI_APIC_GENERIC_TRANSLATOR; gic_its->length = sizeof(*gic_its); gic_its->translation_id = 0; gic_its->base_address = cpu_to_le64(memmap[VIRT_GIC_ITS].base); } } else { gic_msi = acpi_data_push(table_data, sizeof *gic_msi); gic_msi->type = ACPI_APIC_GENERIC_MSI_FRAME; gic_msi->length = sizeof(*gic_msi); gic_msi->gic_msi_frame_id = 0; gic_msi->base_address = cpu_to_le64(memmap[VIRT_GIC_V2M].base); gic_msi->flags = cpu_to_le32(1); gic_msi->spi_count = cpu_to_le16(NUM_GICV2M_SPIS); gic_msi->spi_base = cpu_to_le16(irqmap[VIRT_GIC_V2M] + ARM_SPI_BASE); } build_header(linker, table_data, (void *)(table_data->data + madt_start), \"APIC\", table_data->len - madt_start, 3, NULL, NULL); }"} {"target": 0, "idx": 25813, "func": "int slirp_can_output(void) { return !slirp_vc || qemu_can_send_packet(slirp_vc); }"} {"target": 0, "idx": 25814, "func": "static void usb_tablet_class_initfn(ObjectClass *klass, void *data) { USBDeviceClass *uc = USB_DEVICE_CLASS(klass); uc->init = usb_tablet_initfn; uc->product_desc = \"QEMU USB Tablet\"; uc->usb_desc = &desc_tablet; uc->handle_packet = usb_generic_handle_packet; uc->handle_reset = usb_hid_handle_reset; uc->handle_control = usb_hid_handle_control; uc->handle_data = usb_hid_handle_data; uc->handle_destroy = usb_hid_handle_destroy; }"} {"target": 1, "idx": 25828, "func": "static int pixlet_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { PixletContext *ctx = avctx->priv_data; int i, w, h, width, height, ret, version; AVFrame *p = data; ThreadFrame frame = { .f = data }; uint32_t pktsize; bytestream2_init(&ctx->gb, avpkt->data, avpkt->size); pktsize = bytestream2_get_be32(&ctx->gb); if (pktsize <= 44 || pktsize - 4 > bytestream2_get_bytes_left(&ctx->gb)) { av_log(avctx, AV_LOG_ERROR, \"Invalid packet size %\"PRIu32\"\\n\", pktsize); } version = bytestream2_get_le32(&ctx->gb); if (version != 1) avpriv_request_sample(avctx, \"Version %d\", version); bytestream2_skip(&ctx->gb, 4); if (bytestream2_get_be32(&ctx->gb) != 1) bytestream2_skip(&ctx->gb, 4); width = bytestream2_get_be32(&ctx->gb); height = bytestream2_get_be32(&ctx->gb); w = FFALIGN(width, 1 << (NB_LEVELS + 1)); h = FFALIGN(height, 1 << (NB_LEVELS + 1)); ctx->levels = bytestream2_get_be32(&ctx->gb); if (ctx->levels != NB_LEVELS) ctx->depth = bytestream2_get_be32(&ctx->gb); if (ctx->depth < 8 || ctx->depth > 15) { avpriv_request_sample(avctx, \"Depth %d\", ctx->depth); } ret = ff_set_dimensions(avctx, w, h); if (ret < 0) return ret; avctx->width = width; avctx->height = height; if (ctx->w != w || ctx->h != h) { free_buffers(avctx); ctx->w = w; ctx->h = h; ret = init_decoder(avctx); if (ret < 0) { free_buffers(avctx); ctx->w = 0; ctx->h = 0; return ret; } } bytestream2_skip(&ctx->gb, 8); p->pict_type = AV_PICTURE_TYPE_I; p->key_frame = 1; p->color_range = AVCOL_RANGE_JPEG; ret = ff_thread_get_buffer(avctx, &frame, 0); if (ret < 0) return ret; for (i = 0; i < 3; i++) { ret = decode_plane(avctx, i, avpkt, frame.f); if (ret < 0) return ret; if (avctx->flags & AV_CODEC_FLAG_GRAY) break; } postprocess_luma(frame.f, ctx->w, ctx->h, ctx->depth); postprocess_chroma(frame.f, ctx->w >> 1, ctx->h >> 1, ctx->depth); *got_frame = 1; return pktsize; }"} {"target": 1, "idx": 25831, "func": "static void ccw_machine_2_9_class_options(MachineClass *mc) { S390CcwMachineClass *s390mc = S390_MACHINE_CLASS(mc); s390mc->gs_allowed = false; ccw_machine_2_10_class_options(mc); SET_MACHINE_COMPAT(mc, CCW_COMPAT_2_9); s390mc->css_migration_enabled = false; }"} {"target": 1, "idx": 25854, "func": "static int matroska_read_header(AVFormatContext *s) { MatroskaDemuxContext *matroska = s->priv_data; EbmlList *attachements_list = &matroska->attachments; MatroskaAttachement *attachements; EbmlList *chapters_list = &matroska->chapters; MatroskaChapter *chapters; MatroskaTrack *tracks; uint64_t max_start = 0; int64_t pos; Ebml ebml = { 0 }; AVStream *st; int i, j, k, res; matroska->ctx = s; /* First read the EBML header. */ if (ebml_parse(matroska, ebml_syntax, &ebml) || ebml.version > EBML_VERSION || ebml.max_size > sizeof(uint64_t) || ebml.id_length > sizeof(uint32_t) || ebml.doctype_version > 3) { av_log(matroska->ctx, AV_LOG_ERROR, \"EBML header using unsupported features\\n\" \"(EBML version %\"PRIu64\", doctype %s, doc version %\"PRIu64\")\\n\", ebml.version, ebml.doctype, ebml.doctype_version); ebml_free(ebml_syntax, &ebml); return AVERROR_PATCHWELCOME; } else if (ebml.doctype_version == 3) { av_log(matroska->ctx, AV_LOG_WARNING, \"EBML header using unsupported features\\n\" \"(EBML version %\"PRIu64\", doctype %s, doc version %\"PRIu64\")\\n\", ebml.version, ebml.doctype, ebml.doctype_version); } for (i = 0; i < FF_ARRAY_ELEMS(matroska_doctypes); i++) if (!strcmp(ebml.doctype, matroska_doctypes[i])) break; if (i >= FF_ARRAY_ELEMS(matroska_doctypes)) { av_log(s, AV_LOG_WARNING, \"Unknown EBML doctype '%s'\\n\", ebml.doctype); } ebml_free(ebml_syntax, &ebml); /* The next thing is a segment. */ pos = avio_tell(matroska->ctx->pb); res = ebml_parse(matroska, matroska_segments, matroska); // try resyncing until we find a EBML_STOP type element. while (res != 1) { res = matroska_resync(matroska, pos); if (res < 0) return res; pos = avio_tell(matroska->ctx->pb); res = ebml_parse(matroska, matroska_segment, matroska); } matroska_execute_seekhead(matroska); if (!matroska->time_scale) matroska->time_scale = 1000000; if (matroska->duration) matroska->ctx->duration = matroska->duration * matroska->time_scale * 1000 / AV_TIME_BASE; av_dict_set(&s->metadata, \"title\", matroska->title, 0); if (matroska->date_utc.size == 8) matroska_metadata_creation_time(&s->metadata, AV_RB64(matroska->date_utc.data)); tracks = matroska->tracks.elem; for (i=0; i < matroska->tracks.nb_elem; i++) { MatroskaTrack *track = &tracks[i]; enum CodecID codec_id = CODEC_ID_NONE; EbmlList *encodings_list = &track->encodings; MatroskaTrackEncoding *encodings = encodings_list->elem; uint8_t *extradata = NULL; int extradata_size = 0; int extradata_offset = 0; uint32_t fourcc = 0; AVIOContext b; /* Apply some sanity checks. */ if (track->type != MATROSKA_TRACK_TYPE_VIDEO && track->type != MATROSKA_TRACK_TYPE_AUDIO && track->type != MATROSKA_TRACK_TYPE_SUBTITLE) { av_log(matroska->ctx, AV_LOG_INFO, \"Unknown or unsupported track type %\"PRIu64\"\\n\", track->type); continue; } if (track->codec_id == NULL) continue; if (track->type == MATROSKA_TRACK_TYPE_VIDEO) { if (!track->default_duration) track->default_duration = 1000000000/track->video.frame_rate; if (!track->video.display_width) track->video.display_width = track->video.pixel_width; if (!track->video.display_height) track->video.display_height = track->video.pixel_height; if (track->video.color_space.size == 4) fourcc = AV_RL32(track->video.color_space.data); } else if (track->type == MATROSKA_TRACK_TYPE_AUDIO) { if (!track->audio.out_samplerate) track->audio.out_samplerate = track->audio.samplerate; } if (encodings_list->nb_elem > 1) { av_log(matroska->ctx, AV_LOG_ERROR, \"Multiple combined encodings not supported\"); } else if (encodings_list->nb_elem == 1) { if (encodings[0].type || (encodings[0].compression.algo != MATROSKA_TRACK_ENCODING_COMP_HEADERSTRIP && #if CONFIG_ZLIB encodings[0].compression.algo != MATROSKA_TRACK_ENCODING_COMP_ZLIB && #endif #if CONFIG_BZLIB encodings[0].compression.algo != MATROSKA_TRACK_ENCODING_COMP_BZLIB && #endif encodings[0].compression.algo != MATROSKA_TRACK_ENCODING_COMP_LZO)) { encodings[0].scope = 0; av_log(matroska->ctx, AV_LOG_ERROR, \"Unsupported encoding type\"); } else if (track->codec_priv.size && encodings[0].scope&2) { uint8_t *codec_priv = track->codec_priv.data; int offset = matroska_decode_buffer(&track->codec_priv.data, &track->codec_priv.size, track); if (offset < 0) { track->codec_priv.data = NULL; track->codec_priv.size = 0; av_log(matroska->ctx, AV_LOG_ERROR, \"Failed to decode codec private data\\n\"); } else if (offset > 0) { track->codec_priv.data = av_malloc(track->codec_priv.size + offset); memcpy(track->codec_priv.data, encodings[0].compression.settings.data, offset); memcpy(track->codec_priv.data+offset, codec_priv, track->codec_priv.size); track->codec_priv.size += offset; } if (codec_priv != track->codec_priv.data) av_free(codec_priv); } } for(j=0; ff_mkv_codec_tags[j].id != CODEC_ID_NONE; j++){ if(!strncmp(ff_mkv_codec_tags[j].str, track->codec_id, strlen(ff_mkv_codec_tags[j].str))){ codec_id= ff_mkv_codec_tags[j].id; break; } } st = track->stream = avformat_new_stream(s, NULL); if (st == NULL) return AVERROR(ENOMEM); if (!strcmp(track->codec_id, \"V_MS/VFW/FOURCC\") && track->codec_priv.size >= 40 && track->codec_priv.data != NULL) { track->ms_compat = 1; fourcc = AV_RL32(track->codec_priv.data + 16); codec_id = ff_codec_get_id(ff_codec_bmp_tags, fourcc); extradata_offset = 40; } else if (!strcmp(track->codec_id, \"A_MS/ACM\") && track->codec_priv.size >= 14 && track->codec_priv.data != NULL) { int ret; ffio_init_context(&b, track->codec_priv.data, track->codec_priv.size, AVIO_FLAG_READ, NULL, NULL, NULL, NULL); ret = ff_get_wav_header(&b, st->codec, track->codec_priv.size); if (ret < 0) return ret; codec_id = st->codec->codec_id; extradata_offset = FFMIN(track->codec_priv.size, 18); } else if (!strcmp(track->codec_id, \"V_QUICKTIME\") && (track->codec_priv.size >= 86) && (track->codec_priv.data != NULL)) { fourcc = AV_RL32(track->codec_priv.data); codec_id = ff_codec_get_id(ff_codec_movvideo_tags, fourcc); } else if (codec_id == CODEC_ID_PCM_S16BE) { switch (track->audio.bitdepth) { case 8: codec_id = CODEC_ID_PCM_U8; break; case 24: codec_id = CODEC_ID_PCM_S24BE; break; case 32: codec_id = CODEC_ID_PCM_S32BE; break; } } else if (codec_id == CODEC_ID_PCM_S16LE) { switch (track->audio.bitdepth) { case 8: codec_id = CODEC_ID_PCM_U8; break; case 24: codec_id = CODEC_ID_PCM_S24LE; break; case 32: codec_id = CODEC_ID_PCM_S32LE; break; } } else if (codec_id==CODEC_ID_PCM_F32LE && track->audio.bitdepth==64) { codec_id = CODEC_ID_PCM_F64LE; } else if (codec_id == CODEC_ID_AAC && !track->codec_priv.size) { int profile = matroska_aac_profile(track->codec_id); int sri = matroska_aac_sri(track->audio.samplerate); extradata = av_mallocz(5 + FF_INPUT_BUFFER_PADDING_SIZE); if (extradata == NULL) return AVERROR(ENOMEM); extradata[0] = (profile << 3) | ((sri&0x0E) >> 1); extradata[1] = ((sri&0x01) << 7) | (track->audio.channels<<3); if (strstr(track->codec_id, \"SBR\")) { sri = matroska_aac_sri(track->audio.out_samplerate); extradata[2] = 0x56; extradata[3] = 0xE5; extradata[4] = 0x80 | (sri<<3); extradata_size = 5; } else extradata_size = 2; } else if (codec_id == CODEC_ID_TTA) { extradata_size = 30; extradata = av_mallocz(extradata_size + FF_INPUT_BUFFER_PADDING_SIZE); if (extradata == NULL) return AVERROR(ENOMEM); ffio_init_context(&b, extradata, extradata_size, 1, NULL, NULL, NULL, NULL); avio_write(&b, \"TTA1\", 4); avio_wl16(&b, 1); avio_wl16(&b, track->audio.channels); avio_wl16(&b, track->audio.bitdepth); avio_wl32(&b, track->audio.out_samplerate); avio_wl32(&b, matroska->ctx->duration * track->audio.out_samplerate); } else if (codec_id == CODEC_ID_RV10 || codec_id == CODEC_ID_RV20 || codec_id == CODEC_ID_RV30 || codec_id == CODEC_ID_RV40) { extradata_offset = 26; } else if (codec_id == CODEC_ID_RA_144) { track->audio.out_samplerate = 8000; track->audio.channels = 1; } else if (codec_id == CODEC_ID_RA_288 || codec_id == CODEC_ID_COOK || codec_id == CODEC_ID_ATRAC3 || codec_id == CODEC_ID_SIPR) { int flavor; ffio_init_context(&b, track->codec_priv.data,track->codec_priv.size, 0, NULL, NULL, NULL, NULL); avio_skip(&b, 22); flavor = avio_rb16(&b); track->audio.coded_framesize = avio_rb32(&b); avio_skip(&b, 12); track->audio.sub_packet_h = avio_rb16(&b); track->audio.frame_size = avio_rb16(&b); track->audio.sub_packet_size = avio_rb16(&b); track->audio.buf = av_malloc(track->audio.frame_size * track->audio.sub_packet_h); if (codec_id == CODEC_ID_RA_288) { st->codec->block_align = track->audio.coded_framesize; track->codec_priv.size = 0; } else { if (codec_id == CODEC_ID_SIPR && flavor < 4) { const int sipr_bit_rate[4] = { 6504, 8496, 5000, 16000 }; track->audio.sub_packet_size = ff_sipr_subpk_size[flavor]; st->codec->bit_rate = sipr_bit_rate[flavor]; } st->codec->block_align = track->audio.sub_packet_size; extradata_offset = 78; } } track->codec_priv.size -= extradata_offset; if (codec_id == CODEC_ID_NONE) av_log(matroska->ctx, AV_LOG_INFO, \"Unknown/unsupported CodecID %s.\\n\", track->codec_id); if (track->time_scale < 0.01) track->time_scale = 1.0; avpriv_set_pts_info(st, 64, matroska->time_scale*track->time_scale, 1000*1000*1000); /* 64 bit pts in ns */ st->codec->codec_id = codec_id; st->start_time = 0; if (strcmp(track->language, \"und\")) av_dict_set(&st->metadata, \"language\", track->language, 0); av_dict_set(&st->metadata, \"title\", track->name, 0); if (track->flag_default) st->disposition |= AV_DISPOSITION_DEFAULT; if (track->flag_forced) st->disposition |= AV_DISPOSITION_FORCED; if (!st->codec->extradata) { if(extradata){ st->codec->extradata = extradata; st->codec->extradata_size = extradata_size; } else if(track->codec_priv.data && track->codec_priv.size > 0){ st->codec->extradata = av_mallocz(track->codec_priv.size + FF_INPUT_BUFFER_PADDING_SIZE); if(st->codec->extradata == NULL) return AVERROR(ENOMEM); st->codec->extradata_size = track->codec_priv.size; memcpy(st->codec->extradata, track->codec_priv.data + extradata_offset, track->codec_priv.size); } } if (track->type == MATROSKA_TRACK_TYPE_VIDEO) { MatroskaTrackPlane *planes = track->operation.combine_planes.elem; st->codec->codec_type = AVMEDIA_TYPE_VIDEO; st->codec->codec_tag = fourcc; st->codec->width = track->video.pixel_width; st->codec->height = track->video.pixel_height; av_reduce(&st->sample_aspect_ratio.num, &st->sample_aspect_ratio.den, st->codec->height * track->video.display_width, st->codec-> width * track->video.display_height, 255); st->need_parsing = AVSTREAM_PARSE_HEADERS; if (track->default_duration) st->avg_frame_rate = av_d2q(1000000000.0/track->default_duration, INT_MAX); /* export stereo mode flag as metadata tag */ if (track->video.stereo_mode && track->video.stereo_mode < MATROSKA_VIDEO_STEREO_MODE_COUNT) av_dict_set(&st->metadata, \"stereo_mode\", matroska_video_stereo_mode[track->video.stereo_mode], 0); /* if we have virtual track, mark the real tracks */ for (j=0; j < track->operation.combine_planes.nb_elem; j++) { char buf[32]; if (planes[j].type >= MATROSKA_VIDEO_STEREO_PLANE_COUNT) continue; snprintf(buf, sizeof(buf), \"%s_%d\", matroska_video_stereo_plane[planes[j].type], i); for (k=0; k < matroska->tracks.nb_elem; k++) if (planes[j].uid == tracks[k].uid) { av_dict_set(&s->streams[k]->metadata, \"stereo_mode\", buf, 0); break; } } } else if (track->type == MATROSKA_TRACK_TYPE_AUDIO) { st->codec->codec_type = AVMEDIA_TYPE_AUDIO; st->codec->sample_rate = track->audio.out_samplerate; st->codec->channels = track->audio.channels; if (st->codec->codec_id != CODEC_ID_AAC) st->need_parsing = AVSTREAM_PARSE_HEADERS; } else if (track->type == MATROSKA_TRACK_TYPE_SUBTITLE) { st->codec->codec_type = AVMEDIA_TYPE_SUBTITLE; } } attachements = attachements_list->elem; for (j=0; jnb_elem; j++) { if (!(attachements[j].filename && attachements[j].mime && attachements[j].bin.data && attachements[j].bin.size > 0)) { av_log(matroska->ctx, AV_LOG_ERROR, \"incomplete attachment\\n\"); } else { AVStream *st = avformat_new_stream(s, NULL); if (st == NULL) break; av_dict_set(&st->metadata, \"filename\",attachements[j].filename, 0); av_dict_set(&st->metadata, \"mimetype\", attachements[j].mime, 0); st->codec->codec_id = CODEC_ID_NONE; st->codec->codec_type = AVMEDIA_TYPE_ATTACHMENT; st->codec->extradata = av_malloc(attachements[j].bin.size + FF_INPUT_BUFFER_PADDING_SIZE); if(st->codec->extradata == NULL) break; st->codec->extradata_size = attachements[j].bin.size; memcpy(st->codec->extradata, attachements[j].bin.data, attachements[j].bin.size); for (i=0; ff_mkv_mime_tags[i].id != CODEC_ID_NONE; i++) { if (!strncmp(ff_mkv_mime_tags[i].str, attachements[j].mime, strlen(ff_mkv_mime_tags[i].str))) { st->codec->codec_id = ff_mkv_mime_tags[i].id; break; } } attachements[j].stream = st; } } chapters = chapters_list->elem; for (i=0; inb_elem; i++) if (chapters[i].start != AV_NOPTS_VALUE && chapters[i].uid && (max_start==0 || chapters[i].start > max_start)) { chapters[i].chapter = avpriv_new_chapter(s, chapters[i].uid, (AVRational){1, 1000000000}, chapters[i].start, chapters[i].end, chapters[i].title); av_dict_set(&chapters[i].chapter->metadata, \"title\", chapters[i].title, 0); max_start = chapters[i].start; } matroska_add_index_entries(matroska); matroska_convert_tags(s); return 0; }"} {"target": 1, "idx": 25859, "func": "static int filter_frame(AVFilterLink *inlink, AVFrame *picref) { AVFilterContext *ctx = inlink->dst; SignatureContext *sic = ctx->priv; StreamContext *sc = &(sic->streamcontexts[FF_INLINK_IDX(inlink)]); FineSignature* fs; static const uint8_t pot3[5] = { 3*3*3*3, 3*3*3, 3*3, 3, 1 }; /* indexes of words : 210,217,219,274,334 44,175,233,270,273 57,70,103,237,269 100,285,295,337,354 101,102,111,275,296 s2usw = sorted to unsorted wordvec: 44 is at index 5, 57 at index 10... */ static const unsigned int wordvec[25] = {44,57,70,100,101,102,103,111,175,210,217,219,233,237,269,270,273,274,275,285,295,296,334,337,354}; static const uint8_t s2usw[25] = { 5,10,11, 15, 20, 21, 12, 22, 6, 0, 1, 2, 7, 13, 14, 8, 9, 3, 23, 16, 17, 24, 4, 18, 19}; uint8_t wordt2b[5] = { 0, 0, 0, 0, 0 }; /* word ternary to binary */ uint64_t intpic[32][32]; uint64_t rowcount; uint8_t *p = picref->data[0]; int inti, intj; int *intjlut; uint64_t conflist[DIFFELEM_SIZE]; int f = 0, g = 0, w = 0; int32_t dh1 = 1, dh2 = 1, dw1 = 1, dw2 = 1, a, b; int64_t denom; int i, j, k, ternary; uint64_t blocksum; int blocksize; int64_t th; /* threshold */ int64_t sum; int64_t precfactor = (sc->divide) ? 65536 : BLOCK_LCM; /* initialize fs */ if (sc->curfinesig) { fs = av_mallocz(sizeof(FineSignature)); if (!fs) return AVERROR(ENOMEM); sc->curfinesig->next = fs; fs->prev = sc->curfinesig; sc->curfinesig = fs; } else { fs = sc->curfinesig = sc->finesiglist; sc->curcoarsesig1->first = fs; } fs->pts = picref->pts; fs->index = sc->lastindex++; memset(intpic, 0, sizeof(uint64_t)*32*32); intjlut = av_malloc_array(inlink->w, sizeof(int)); if (!intjlut) return AVERROR(ENOMEM); for (i = 0; i < inlink->w; i++) { intjlut[i] = (i*32)/inlink->w; } for (i = 0; i < inlink->h; i++) { inti = (i*32)/inlink->h; for (j = 0; j < inlink->w; j++) { intj = intjlut[j]; intpic[inti][intj] += p[j]; } p += picref->linesize[0]; } av_freep(&intjlut); /* The following calculates a summed area table (intpic) and brings the numbers * in intpic to the same denominator. * So you only have to handle the numinator in the following sections. */ dh1 = inlink->h / 32; if (inlink->h % 32) dh2 = dh1 + 1; dw1 = inlink->w / 32; if (inlink->w % 32) dw2 = dw1 + 1; denom = (sc->divide) ? dh1 * dh2 * dw1 * dw2 : 1; for (i = 0; i < 32; i++) { rowcount = 0; a = 1; if (dh2 > 1) { a = ((inlink->h*(i+1))%32 == 0) ? (inlink->h*(i+1))/32 - 1 : (inlink->h*(i+1))/32; a -= ((inlink->h*i)%32 == 0) ? (inlink->h*i)/32 - 1 : (inlink->h*i)/32; a = (a == dh1)? dh2 : dh1; } for (j = 0; j < 32; j++) { b = 1; if (dw2 > 1) { b = ((inlink->w*(j+1))%32 == 0) ? (inlink->w*(j+1))/32 - 1 : (inlink->w*(j+1))/32; b -= ((inlink->w*j)%32 == 0) ? (inlink->w*j)/32 - 1 : (inlink->w*j)/32; b = (b == dw1)? dw2 : dw1; } rowcount += intpic[i][j] * a * b * precfactor / denom; if (i > 0) { intpic[i][j] = intpic[i-1][j] + rowcount; } else { intpic[i][j] = rowcount; } } } denom = (sc->divide) ? 1 : dh1 * dh2 * dw1 * dw2; for (i = 0; i < ELEMENT_COUNT; i++) { const ElemCat* elemcat = elements[i]; int64_t* elemsignature; uint64_t* sortsignature; elemsignature = av_malloc_array(elemcat->elem_count, sizeof(int64_t)); if (!elemsignature) return AVERROR(ENOMEM); sortsignature = av_malloc_array(elemcat->elem_count, sizeof(int64_t)); if (!sortsignature) return AVERROR(ENOMEM); for (j = 0; j < elemcat->elem_count; j++) { blocksum = 0; blocksize = 0; for (k = 0; k < elemcat->left_count; k++) { blocksum += get_block_sum(sc, intpic, &elemcat->blocks[j*elemcat->block_count+k]); blocksize += get_block_size(&elemcat->blocks[j*elemcat->block_count+k]); } sum = blocksum / blocksize; if (elemcat->av_elem) { sum -= 128 * precfactor * denom; } else { blocksum = 0; blocksize = 0; for (; k < elemcat->block_count; k++) { blocksum += get_block_sum(sc, intpic, &elemcat->blocks[j*elemcat->block_count+k]); blocksize += get_block_size(&elemcat->blocks[j*elemcat->block_count+k]); } sum -= blocksum / blocksize; conflist[g++] = FFABS(sum * 8 / (precfactor * denom)); } elemsignature[j] = sum; sortsignature[j] = FFABS(sum); } /* get threshold */ qsort(sortsignature, elemcat->elem_count, sizeof(uint64_t), (void*) cmp); th = sortsignature[(int) (elemcat->elem_count*0.333)]; /* ternarize */ for (j = 0; j < elemcat->elem_count; j++) { if (elemsignature[j] < -th) { ternary = 0; } else if (elemsignature[j] <= th) { ternary = 1; } else { ternary = 2; } fs->framesig[f/5] += ternary * pot3[f%5]; if (f == wordvec[w]) { fs->words[s2usw[w]/5] += ternary * pot3[wordt2b[s2usw[w]/5]++]; if (w < 24) w++; } f++; } av_freep(&elemsignature); av_freep(&sortsignature); } /* confidence */ qsort(conflist, DIFFELEM_SIZE, sizeof(uint64_t), (void*) cmp); fs->confidence = FFMIN(conflist[DIFFELEM_SIZE/2], 255); /* coarsesignature */ if (sc->coarsecount == 0) { if (sc->curcoarsesig2) { sc->curcoarsesig1 = av_mallocz(sizeof(CoarseSignature)); if (!sc->curcoarsesig1) return AVERROR(ENOMEM); sc->curcoarsesig1->first = fs; sc->curcoarsesig2->next = sc->curcoarsesig1; sc->coarseend = sc->curcoarsesig1; } } if (sc->coarsecount == 45) { sc->midcoarse = 1; sc->curcoarsesig2 = av_mallocz(sizeof(CoarseSignature)); if (!sc->curcoarsesig2) return AVERROR(ENOMEM); sc->curcoarsesig2->first = fs; sc->curcoarsesig1->next = sc->curcoarsesig2; sc->coarseend = sc->curcoarsesig2; } for (i = 0; i < 5; i++) { set_bit(sc->curcoarsesig1->data[i], fs->words[i]); } /* assuming the actual frame is the last */ sc->curcoarsesig1->last = fs; if (sc->midcoarse) { for (i = 0; i < 5; i++) { set_bit(sc->curcoarsesig2->data[i], fs->words[i]); } sc->curcoarsesig2->last = fs; } sc->coarsecount = (sc->coarsecount+1)%90; /* debug printing finesignature */ if (av_log_get_level() == AV_LOG_DEBUG) { av_log(ctx, AV_LOG_DEBUG, \"input %d, confidence: %d\\n\", FF_INLINK_IDX(inlink), fs->confidence); av_log(ctx, AV_LOG_DEBUG, \"words:\"); for (i = 0; i < 5; i++) { av_log(ctx, AV_LOG_DEBUG, \" %d:\", fs->words[i] ); av_log(ctx, AV_LOG_DEBUG, \" %d\", fs->words[i] / pot3[0] ); for (j = 1; j < 5; j++) av_log(ctx, AV_LOG_DEBUG, \",%d\", fs->words[i] % pot3[j-1] / pot3[j] ); av_log(ctx, AV_LOG_DEBUG, \";\"); } av_log(ctx, AV_LOG_DEBUG, \"\\n\"); av_log(ctx, AV_LOG_DEBUG, \"framesignature:\"); for (i = 0; i < SIGELEM_SIZE/5; i++) { av_log(ctx, AV_LOG_DEBUG, \" %d\", fs->framesig[i] / pot3[0] ); for (j = 1; j < 5; j++) av_log(ctx, AV_LOG_DEBUG, \",%d\", fs->framesig[i] % pot3[j-1] / pot3[j] ); } av_log(ctx, AV_LOG_DEBUG, \"\\n\"); } if (FF_INLINK_IDX(inlink) == 0) return ff_filter_frame(inlink->dst->outputs[0], picref); return 1; }"} {"target": 1, "idx": 25862, "func": "static void test_dma_fragmented(void) { AHCIQState *ahci; AHCICommand *cmd; uint8_t px; size_t bufsize = 4096; unsigned char *tx = g_malloc(bufsize); unsigned char *rx = g_malloc0(bufsize); uint64_t ptr; ahci = ahci_boot_and_enable(NULL); px = ahci_port_select(ahci); ahci_port_clear(ahci, px); /* create pattern */ generate_pattern(tx, bufsize, AHCI_SECTOR_SIZE); /* Create a DMA buffer in guest memory, and write our pattern to it. */ ptr = guest_alloc(ahci->parent->alloc, bufsize); g_assert(ptr); bufwrite(ptr, tx, bufsize); cmd = ahci_command_create(CMD_WRITE_DMA); ahci_command_adjust(cmd, 0, ptr, bufsize, 32); ahci_command_commit(ahci, cmd, px); ahci_command_issue(ahci, cmd); ahci_command_verify(ahci, cmd); g_free(cmd); cmd = ahci_command_create(CMD_READ_DMA); ahci_command_adjust(cmd, 0, ptr, bufsize, 32); ahci_command_commit(ahci, cmd, px); ahci_command_issue(ahci, cmd); ahci_command_verify(ahci, cmd); g_free(cmd); /* Read back the guest's receive buffer into local memory */ bufread(ptr, rx, bufsize); guest_free(ahci->parent->alloc, ptr); g_assert_cmphex(memcmp(tx, rx, bufsize), ==, 0); ahci_shutdown(ahci); g_free(rx); g_free(tx); }"} {"target": 1, "idx": 25865, "func": "static uint32_t pcihotplug_read(void *opaque, uint32_t addr) { uint32_t val = 0; struct pci_status *g = opaque; switch (addr) { case PCI_BASE: val = g->up; break; case PCI_BASE + 4: val = g->down; break; default: break; } PIIX4_DPRINTF(\"pcihotplug read %x == %x\\n\", addr, val); return val; }"} {"target": 1, "idx": 25868, "func": "static int w64_read_header(AVFormatContext *s, AVFormatParameters *ap) { int64_t size; AVIOContext *pb = s->pb; WAVContext *wav = s->priv_data; AVStream *st; uint8_t guid[16]; avio_read(pb, guid, 16); if (memcmp(guid, guid_riff, 16)) return -1; if (avio_rl64(pb) < 16 + 8 + 16 + 8 + 16 + 8) /* riff + wave + fmt + sizes */ return -1; avio_read(pb, guid, 16); if (memcmp(guid, guid_wave, 16)) { av_log(s, AV_LOG_ERROR, \"could not find wave guid\\n\"); return -1; } size = find_guid(pb, guid_fmt); if (size < 0) { av_log(s, AV_LOG_ERROR, \"could not find fmt guid\\n\"); return -1; } st = av_new_stream(s, 0); if (!st) return AVERROR(ENOMEM); /* subtract chunk header size - normal wav file doesn't count it */ ff_get_wav_header(pb, st->codec, size - 24); avio_skip(pb, FFALIGN(size, INT64_C(8)) - size); st->need_parsing = AVSTREAM_PARSE_FULL; av_set_pts_info(st, 64, 1, st->codec->sample_rate); size = find_guid(pb, guid_data); if (size < 0) { av_log(s, AV_LOG_ERROR, \"could not find data guid\\n\"); return -1; } wav->data_end = avio_tell(pb) + size - 24; wav->w64 = 1; return 0; }"} {"target": 1, "idx": 25869, "func": "void rgb15tobgr15(const uint8_t *src, uint8_t *dst, long src_size) { long i; long num_pixels = src_size >> 1; for(i=0; i>5; b = (rgb&0x7C00)>>10; dst[2*i] = (b&0x1F) | ((g&0x1F)<<5) | ((r&0x1F)<<10); } }"} {"target": 1, "idx": 25872, "func": "int vnc_job_add_rect(VncJob *job, int x, int y, int w, int h) { VncRectEntry *entry = g_malloc0(sizeof(VncRectEntry)); entry->rect.x = x; entry->rect.y = y; entry->rect.w = w; entry->rect.h = h; vnc_lock_queue(queue); QLIST_INSERT_HEAD(&job->rectangles, entry, next); vnc_unlock_queue(queue); return 1; }"} {"target": 0, "idx": 25925, "func": "static void uhci_async_complete_packet(USBPacket * packet, void *opaque) { UHCIState *s = opaque; UHCI_QH qh; UHCI_TD td; uint32_t link; uint32_t old_td_ctrl; uint32_t val; uint32_t frame_addr; int ret; /* Handle async isochronous packet completion */ frame_addr = s->async_frame_addr; if (frame_addr) { cpu_physical_memory_read(frame_addr, (uint8_t *)&link, 4); le32_to_cpus(&link); cpu_physical_memory_read(link & ~0xf, (uint8_t *)&td, sizeof(td)); le32_to_cpus(&td.link); le32_to_cpus(&td.ctrl); le32_to_cpus(&td.token); le32_to_cpus(&td.buffer); old_td_ctrl = td.ctrl; ret = uhci_handle_td(s, &td, &s->pending_int_mask, 1); /* update the status bits of the TD */ if (old_td_ctrl != td.ctrl) { val = cpu_to_le32(td.ctrl); cpu_physical_memory_write((link & ~0xf) + 4, (const uint8_t *)&val, sizeof(val)); } if (ret == 2) { s->async_frame_addr = frame_addr; } else if (ret == 0) { /* update qh element link */ val = cpu_to_le32(td.link); cpu_physical_memory_write(frame_addr, (const uint8_t *)&val, sizeof(val)); } return; } link = s->async_qh; if (!link) { /* This should never happen. It means a TD somehow got removed without cancelling the associated async IO request. */ return; } cpu_physical_memory_read(link & ~0xf, (uint8_t *)&qh, sizeof(qh)); le32_to_cpus(&qh.link); le32_to_cpus(&qh.el_link); /* Re-process the queue containing the async packet. */ while (1) { cpu_physical_memory_read(qh.el_link & ~0xf, (uint8_t *)&td, sizeof(td)); le32_to_cpus(&td.link); le32_to_cpus(&td.ctrl); le32_to_cpus(&td.token); le32_to_cpus(&td.buffer); old_td_ctrl = td.ctrl; ret = uhci_handle_td(s, &td, &s->pending_int_mask, 1); /* update the status bits of the TD */ if (old_td_ctrl != td.ctrl) { val = cpu_to_le32(td.ctrl); cpu_physical_memory_write((qh.el_link & ~0xf) + 4, (const uint8_t *)&val, sizeof(val)); } if (ret < 0) break; /* interrupted frame */ if (ret == 2) { s->async_qh = link; break; } else if (ret == 0) { /* update qh element link */ qh.el_link = td.link; val = cpu_to_le32(qh.el_link); cpu_physical_memory_write((link & ~0xf) + 4, (const uint8_t *)&val, sizeof(val)); if (!(qh.el_link & 4)) break; } break; } }"} {"target": 0, "idx": 25932, "func": "static void tracked_request_end(BdrvTrackedRequest *req) { if (req->serialising) { req->bs->serialising_in_flight--; } QLIST_REMOVE(req, list); qemu_co_queue_restart_all(&req->wait_queue); }"} {"target": 0, "idx": 25936, "func": "static int adpcm_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; ADPCMDecodeContext *c = avctx->priv_data; ADPCMChannelStatus *cs; int n, m, channel, i; int block_predictor[2]; short *samples; short *samples_end; const uint8_t *src; int st; /* stereo */ /* DK3 ADPCM accounting variables */ unsigned char last_byte = 0; unsigned char nibble; int decode_top_nibble_next = 0; int diff_channel; /* EA ADPCM state variables */ uint32_t samples_in_chunk; int32_t previous_left_sample, previous_right_sample; int32_t current_left_sample, current_right_sample; int32_t next_left_sample, next_right_sample; int32_t coeff1l, coeff2l, coeff1r, coeff2r; uint8_t shift_left, shift_right; int count1, count2; int coeff[2][2], shift[2];//used in EA MAXIS ADPCM if (!buf_size) return 0; //should protect all 4bit ADPCM variants //8 is needed for CODEC_ID_ADPCM_IMA_WAV with 2 channels // if(*data_size/4 < buf_size + 8) return -1; samples = data; samples_end= samples + *data_size/2; *data_size= 0; src = buf; st = avctx->channels == 2 ? 1 : 0; switch(avctx->codec->id) { case CODEC_ID_ADPCM_IMA_QT: n = buf_size - 2*avctx->channels; for (channel = 0; channel < avctx->channels; channel++) { int16_t predictor; int step_index; cs = &(c->status[channel]); /* (pppppp) (piiiiiii) */ /* Bits 15-7 are the _top_ 9 bits of the 16-bit initial predictor value */ predictor = AV_RB16(src); step_index = predictor & 0x7F; predictor &= 0xFF80; src += 2; if (cs->step_index == step_index) { int diff = (int)predictor - cs->predictor; if (diff < 0) diff = - diff; if (diff > 0x7f) goto update; } else { update: cs->step_index = step_index; cs->predictor = predictor; } if (cs->step_index > 88){ av_log(avctx, AV_LOG_ERROR, \"ERROR: step_index = %i\\n\", cs->step_index); cs->step_index = 88; } samples = (short*)data + channel; for(m=32; n>0 && m>0; n--, m--) { /* in QuickTime, IMA is encoded by chuncks of 34 bytes (=64 samples) */ *samples = adpcm_ima_qt_expand_nibble(cs, src[0] & 0x0F, 3); samples += avctx->channels; *samples = adpcm_ima_qt_expand_nibble(cs, src[0] >> 4 , 3); samples += avctx->channels; src ++; } } if (st) samples--; break; case CODEC_ID_ADPCM_IMA_WAV: if (avctx->block_align != 0 && buf_size > avctx->block_align) buf_size = avctx->block_align; // samples_per_block= (block_align-4*chanels)*8 / (bits_per_sample * chanels) + 1; for(i=0; ichannels; i++){ cs = &(c->status[i]); cs->predictor = *samples++ = (int16_t)bytestream_get_le16(&src); cs->step_index = *src++; if (cs->step_index > 88){ av_log(avctx, AV_LOG_ERROR, \"ERROR: step_index = %i\\n\", cs->step_index); cs->step_index = 88; } if (*src++) av_log(avctx, AV_LOG_ERROR, \"unused byte should be null but is %d!!\\n\", src[-1]); /* unused */ } while(src < buf + buf_size){ for(m=0; m<4; m++){ for(i=0; i<=st; i++) *samples++ = adpcm_ima_expand_nibble(&c->status[i], src[4*i] & 0x0F, 3); for(i=0; i<=st; i++) *samples++ = adpcm_ima_expand_nibble(&c->status[i], src[4*i] >> 4 , 3); src++; } src += 4*st; } break; case CODEC_ID_ADPCM_4XM: cs = &(c->status[0]); c->status[0].predictor= (int16_t)bytestream_get_le16(&src); if(st){ c->status[1].predictor= (int16_t)bytestream_get_le16(&src); } c->status[0].step_index= (int16_t)bytestream_get_le16(&src); if(st){ c->status[1].step_index= (int16_t)bytestream_get_le16(&src); } if (cs->step_index < 0) cs->step_index = 0; if (cs->step_index > 88) cs->step_index = 88; m= (buf_size - (src - buf))>>st; for(i=0; istatus[0], src[i] & 0x0F, 4); if (st) *samples++ = adpcm_ima_expand_nibble(&c->status[1], src[i+m] & 0x0F, 4); *samples++ = adpcm_ima_expand_nibble(&c->status[0], src[i] >> 4, 4); if (st) *samples++ = adpcm_ima_expand_nibble(&c->status[1], src[i+m] >> 4, 4); } src += m<block_align != 0 && buf_size > avctx->block_align) buf_size = avctx->block_align; n = buf_size - 7 * avctx->channels; if (n < 0) return -1; block_predictor[0] = av_clip(*src++, 0, 6); block_predictor[1] = 0; if (st) block_predictor[1] = av_clip(*src++, 0, 6); c->status[0].idelta = (int16_t)bytestream_get_le16(&src); if (st){ c->status[1].idelta = (int16_t)bytestream_get_le16(&src); } c->status[0].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor[0]]; c->status[0].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor[0]]; c->status[1].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor[1]]; c->status[1].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor[1]]; c->status[0].sample1 = bytestream_get_le16(&src); if (st) c->status[1].sample1 = bytestream_get_le16(&src); c->status[0].sample2 = bytestream_get_le16(&src); if (st) c->status[1].sample2 = bytestream_get_le16(&src); *samples++ = c->status[0].sample2; if (st) *samples++ = c->status[1].sample2; *samples++ = c->status[0].sample1; if (st) *samples++ = c->status[1].sample1; for(;n>0;n--) { *samples++ = adpcm_ms_expand_nibble(&c->status[0 ], src[0] >> 4 ); *samples++ = adpcm_ms_expand_nibble(&c->status[st], src[0] & 0x0F); src ++; } break; case CODEC_ID_ADPCM_IMA_DK4: if (avctx->block_align != 0 && buf_size > avctx->block_align) buf_size = avctx->block_align; c->status[0].predictor = (int16_t)bytestream_get_le16(&src); c->status[0].step_index = *src++; src++; *samples++ = c->status[0].predictor; if (st) { c->status[1].predictor = (int16_t)bytestream_get_le16(&src); c->status[1].step_index = *src++; src++; *samples++ = c->status[1].predictor; } while (src < buf + buf_size) { uint8_t v = *src++; *samples++ = adpcm_ima_expand_nibble(&c->status[0 ], v >> 4 , 3); *samples++ = adpcm_ima_expand_nibble(&c->status[st], v & 0x0F, 3); } break; case CODEC_ID_ADPCM_IMA_DK3: if (avctx->block_align != 0 && buf_size > avctx->block_align) buf_size = avctx->block_align; if(buf_size + 16 > (samples_end - samples)*3/8) return -1; c->status[0].predictor = (int16_t)AV_RL16(src + 10); c->status[1].predictor = (int16_t)AV_RL16(src + 12); c->status[0].step_index = src[14]; c->status[1].step_index = src[15]; /* sign extend the predictors */ src += 16; diff_channel = c->status[1].predictor; /* the DK3_GET_NEXT_NIBBLE macro issues the break statement when * the buffer is consumed */ while (1) { /* for this algorithm, c->status[0] is the sum channel and * c->status[1] is the diff channel */ /* process the first predictor of the sum channel */ DK3_GET_NEXT_NIBBLE(); adpcm_ima_expand_nibble(&c->status[0], nibble, 3); /* process the diff channel predictor */ DK3_GET_NEXT_NIBBLE(); adpcm_ima_expand_nibble(&c->status[1], nibble, 3); /* process the first pair of stereo PCM samples */ diff_channel = (diff_channel + c->status[1].predictor) / 2; *samples++ = c->status[0].predictor + c->status[1].predictor; *samples++ = c->status[0].predictor - c->status[1].predictor; /* process the second predictor of the sum channel */ DK3_GET_NEXT_NIBBLE(); adpcm_ima_expand_nibble(&c->status[0], nibble, 3); /* process the second pair of stereo PCM samples */ diff_channel = (diff_channel + c->status[1].predictor) / 2; *samples++ = c->status[0].predictor + c->status[1].predictor; *samples++ = c->status[0].predictor - c->status[1].predictor; } break; case CODEC_ID_ADPCM_IMA_ISS: c->status[0].predictor = (int16_t)AV_RL16(src + 0); c->status[0].step_index = src[2]; src += 4; if(st) { c->status[1].predictor = (int16_t)AV_RL16(src + 0); c->status[1].step_index = src[2]; src += 4; } while (src < buf + buf_size) { uint8_t v1, v2; uint8_t v = *src++; /* nibbles are swapped for mono */ if (st) { v1 = v >> 4; v2 = v & 0x0F; } else { v2 = v >> 4; v1 = v & 0x0F; } *samples++ = adpcm_ima_expand_nibble(&c->status[0 ], v1, 3); *samples++ = adpcm_ima_expand_nibble(&c->status[st], v2, 3); } break; case CODEC_ID_ADPCM_IMA_WS: while (src < buf + buf_size) { uint8_t v = *src++; *samples++ = adpcm_ima_expand_nibble(&c->status[0], v >> 4 , 3); *samples++ = adpcm_ima_expand_nibble(&c->status[st], v & 0x0F, 3); } break; case CODEC_ID_ADPCM_XA: while (buf_size >= 128) { xa_decode(samples, src, &c->status[0], &c->status[1], avctx->channels); src += 128; samples += 28 * 8; buf_size -= 128; } break; case CODEC_ID_ADPCM_IMA_EA_EACS: samples_in_chunk = bytestream_get_le32(&src) >> (1-st); if (samples_in_chunk > buf_size-4-(8<status[i].step_index = bytestream_get_le32(&src); for (i=0; i<=st; i++) c->status[i].predictor = bytestream_get_le32(&src); for (; samples_in_chunk; samples_in_chunk--, src++) { *samples++ = adpcm_ima_expand_nibble(&c->status[0], *src>>4, 3); *samples++ = adpcm_ima_expand_nibble(&c->status[st], *src&0x0F, 3); } break; case CODEC_ID_ADPCM_IMA_EA_SEAD: for (; src < buf+buf_size; src++) { *samples++ = adpcm_ima_expand_nibble(&c->status[0], src[0] >> 4, 6); *samples++ = adpcm_ima_expand_nibble(&c->status[st],src[0]&0x0F, 6); } break; case CODEC_ID_ADPCM_EA: /* Each EA ADPCM frame has a 12-byte header followed by 30-byte pieces, each coding 28 stereo samples. */ if (buf_size < 12) { av_log(avctx, AV_LOG_ERROR, \"frame too small\\n\"); return AVERROR(EINVAL); } samples_in_chunk = AV_RL32(src); if (samples_in_chunk / 28 > (buf_size - 12) / 30) { av_log(avctx, AV_LOG_ERROR, \"invalid frame\\n\"); return AVERROR(EINVAL); } src += 4; current_left_sample = (int16_t)bytestream_get_le16(&src); previous_left_sample = (int16_t)bytestream_get_le16(&src); current_right_sample = (int16_t)bytestream_get_le16(&src); previous_right_sample = (int16_t)bytestream_get_le16(&src); for (count1 = 0; count1 < samples_in_chunk/28;count1++) { coeff1l = ea_adpcm_table[ *src >> 4 ]; coeff2l = ea_adpcm_table[(*src >> 4 ) + 4]; coeff1r = ea_adpcm_table[*src & 0x0F]; coeff2r = ea_adpcm_table[(*src & 0x0F) + 4]; src++; shift_left = (*src >> 4 ) + 8; shift_right = (*src & 0x0F) + 8; src++; for (count2 = 0; count2 < 28; count2++) { next_left_sample = (int32_t)((*src & 0xF0) << 24) >> shift_left; next_right_sample = (int32_t)((*src & 0x0F) << 28) >> shift_right; src++; next_left_sample = (next_left_sample + (current_left_sample * coeff1l) + (previous_left_sample * coeff2l) + 0x80) >> 8; next_right_sample = (next_right_sample + (current_right_sample * coeff1r) + (previous_right_sample * coeff2r) + 0x80) >> 8; previous_left_sample = current_left_sample; current_left_sample = av_clip_int16(next_left_sample); previous_right_sample = current_right_sample; current_right_sample = av_clip_int16(next_right_sample); *samples++ = (unsigned short)current_left_sample; *samples++ = (unsigned short)current_right_sample; } } if (src - buf == buf_size - 2) src += 2; // Skip terminating 0x0000 break; case CODEC_ID_ADPCM_EA_MAXIS_XA: for(channel = 0; channel < avctx->channels; channel++) { for (i=0; i<2; i++) coeff[channel][i] = ea_adpcm_table[(*src >> 4) + 4*i]; shift[channel] = (*src & 0x0F) + 8; src++; } for (count1 = 0; count1 < (buf_size - avctx->channels) / avctx->channels; count1++) { for(i = 4; i >= 0; i-=4) { /* Pairwise samples LL RR (st) or LL LL (mono) */ for(channel = 0; channel < avctx->channels; channel++) { int32_t sample = (int32_t)(((*(src+channel) >> i) & 0x0F) << 0x1C) >> shift[channel]; sample = (sample + c->status[channel].sample1 * coeff[channel][0] + c->status[channel].sample2 * coeff[channel][1] + 0x80) >> 8; c->status[channel].sample2 = c->status[channel].sample1; c->status[channel].sample1 = av_clip_int16(sample); *samples++ = c->status[channel].sample1; } } src+=avctx->channels; } break; case CODEC_ID_ADPCM_EA_R1: case CODEC_ID_ADPCM_EA_R2: case CODEC_ID_ADPCM_EA_R3: { /* channel numbering 2chan: 0=fl, 1=fr 4chan: 0=fl, 1=rl, 2=fr, 3=rr 6chan: 0=fl, 1=c, 2=fr, 3=rl, 4=rr, 5=sub */ const int big_endian = avctx->codec->id == CODEC_ID_ADPCM_EA_R3; int32_t previous_sample, current_sample, next_sample; int32_t coeff1, coeff2; uint8_t shift; unsigned int channel; uint16_t *samplesC; const uint8_t *srcC; const uint8_t *src_end = buf + buf_size; samples_in_chunk = (big_endian ? bytestream_get_be32(&src) : bytestream_get_le32(&src)) / 28; if (samples_in_chunk > UINT32_MAX/(28*avctx->channels) || 28*samples_in_chunk*avctx->channels > samples_end-samples) { src += buf_size - 4; break; } for (channel=0; channelchannels; channel++) { int32_t offset = (big_endian ? bytestream_get_be32(&src) : bytestream_get_le32(&src)) + (avctx->channels-channel-1) * 4; if ((offset < 0) || (offset >= src_end - src - 4)) break; srcC = src + offset; samplesC = samples + channel; if (avctx->codec->id == CODEC_ID_ADPCM_EA_R1) { current_sample = (int16_t)bytestream_get_le16(&srcC); previous_sample = (int16_t)bytestream_get_le16(&srcC); } else { current_sample = c->status[channel].predictor; previous_sample = c->status[channel].prev_sample; } for (count1=0; count1 src_end - 30*2) break; current_sample = (int16_t)bytestream_get_be16(&srcC); previous_sample = (int16_t)bytestream_get_be16(&srcC); for (count2=0; count2<28; count2++) { *samplesC = (int16_t)bytestream_get_be16(&srcC); samplesC += avctx->channels; } } else { coeff1 = ea_adpcm_table[ *srcC>>4 ]; coeff2 = ea_adpcm_table[(*srcC>>4) + 4]; shift = (*srcC++ & 0x0F) + 8; if (srcC > src_end - 14) break; for (count2=0; count2<28; count2++) { if (count2 & 1) next_sample = (int32_t)((*srcC++ & 0x0F) << 28) >> shift; else next_sample = (int32_t)((*srcC & 0xF0) << 24) >> shift; next_sample += (current_sample * coeff1) + (previous_sample * coeff2); next_sample = av_clip_int16(next_sample >> 8); previous_sample = current_sample; current_sample = next_sample; *samplesC = current_sample; samplesC += avctx->channels; } } } if (avctx->codec->id != CODEC_ID_ADPCM_EA_R1) { c->status[channel].predictor = current_sample; c->status[channel].prev_sample = previous_sample; } } src = src + buf_size - (4 + 4*avctx->channels); samples += 28 * samples_in_chunk * avctx->channels; break; } case CODEC_ID_ADPCM_EA_XAS: if (samples_end-samples < 32*4*avctx->channels || buf_size < (4+15)*4*avctx->channels) { src += buf_size; break; } for (channel=0; channelchannels; channel++) { int coeff[2][4], shift[4]; short *s2, *s = &samples[channel]; for (n=0; n<4; n++, s+=32*avctx->channels) { for (i=0; i<2; i++) coeff[i][n] = ea_adpcm_table[(src[0]&0x0F)+4*i]; shift[n] = (src[2]&0x0F) + 8; for (s2=s, i=0; i<2; i++, src+=2, s2+=avctx->channels) s2[0] = (src[0]&0xF0) + (src[1]<<8); } for (m=2; m<32; m+=2) { s = &samples[m*avctx->channels + channel]; for (n=0; n<4; n++, src++, s+=32*avctx->channels) { for (s2=s, i=0; i<8; i+=4, s2+=avctx->channels) { int level = (int32_t)((*src & (0xF0>>i)) << (24+i)) >> shift[n]; int pred = s2[-1*avctx->channels] * coeff[0][n] + s2[-2*avctx->channels] * coeff[1][n]; s2[0] = av_clip_int16((level + pred + 0x80) >> 8); } } } } samples += 32*4*avctx->channels; break; case CODEC_ID_ADPCM_IMA_AMV: case CODEC_ID_ADPCM_IMA_SMJPEG: c->status[0].predictor = (int16_t)bytestream_get_le16(&src); c->status[0].step_index = bytestream_get_le16(&src); if (avctx->codec->id == CODEC_ID_ADPCM_IMA_AMV) src+=4; while (src < buf + buf_size) { char hi, lo; lo = *src & 0x0F; hi = *src >> 4; if (avctx->codec->id == CODEC_ID_ADPCM_IMA_AMV) FFSWAP(char, hi, lo); *samples++ = adpcm_ima_expand_nibble(&c->status[0], lo, 3); *samples++ = adpcm_ima_expand_nibble(&c->status[0], hi, 3); src++; } break; case CODEC_ID_ADPCM_CT: while (src < buf + buf_size) { uint8_t v = *src++; *samples++ = adpcm_ct_expand_nibble(&c->status[0 ], v >> 4 ); *samples++ = adpcm_ct_expand_nibble(&c->status[st], v & 0x0F); } break; case CODEC_ID_ADPCM_SBPRO_4: case CODEC_ID_ADPCM_SBPRO_3: case CODEC_ID_ADPCM_SBPRO_2: if (!c->status[0].step_index) { /* the first byte is a raw sample */ *samples++ = 128 * (*src++ - 0x80); if (st) *samples++ = 128 * (*src++ - 0x80); c->status[0].step_index = 1; } if (avctx->codec->id == CODEC_ID_ADPCM_SBPRO_4) { while (src < buf + buf_size) { *samples++ = adpcm_sbpro_expand_nibble(&c->status[0], src[0] >> 4, 4, 0); *samples++ = adpcm_sbpro_expand_nibble(&c->status[st], src[0] & 0x0F, 4, 0); src++; } } else if (avctx->codec->id == CODEC_ID_ADPCM_SBPRO_3) { while (src < buf + buf_size && samples + 2 < samples_end) { *samples++ = adpcm_sbpro_expand_nibble(&c->status[0], src[0] >> 5 , 3, 0); *samples++ = adpcm_sbpro_expand_nibble(&c->status[0], (src[0] >> 2) & 0x07, 3, 0); *samples++ = adpcm_sbpro_expand_nibble(&c->status[0], src[0] & 0x03, 2, 0); src++; } } else { while (src < buf + buf_size && samples + 3 < samples_end) { *samples++ = adpcm_sbpro_expand_nibble(&c->status[0], src[0] >> 6 , 2, 2); *samples++ = adpcm_sbpro_expand_nibble(&c->status[st], (src[0] >> 4) & 0x03, 2, 2); *samples++ = adpcm_sbpro_expand_nibble(&c->status[0], (src[0] >> 2) & 0x03, 2, 2); *samples++ = adpcm_sbpro_expand_nibble(&c->status[st], src[0] & 0x03, 2, 2); src++; } } break; case CODEC_ID_ADPCM_SWF: { GetBitContext gb; const int *table; int k0, signmask, nb_bits, count; int size = buf_size*8; init_get_bits(&gb, buf, size); //read bits & initial values nb_bits = get_bits(&gb, 2)+2; //av_log(NULL,AV_LOG_INFO,\"nb_bits: %d\\n\", nb_bits); table = swf_index_tables[nb_bits-2]; k0 = 1 << (nb_bits-2); signmask = 1 << (nb_bits-1); while (get_bits_count(&gb) <= size - 22*avctx->channels) { for (i = 0; i < avctx->channels; i++) { *samples++ = c->status[i].predictor = get_sbits(&gb, 16); c->status[i].step_index = get_bits(&gb, 6); } for (count = 0; get_bits_count(&gb) <= size - nb_bits*avctx->channels && count < 4095; count++) { int i; for (i = 0; i < avctx->channels; i++) { // similar to IMA adpcm int delta = get_bits(&gb, nb_bits); int step = ff_adpcm_step_table[c->status[i].step_index]; long vpdiff = 0; // vpdiff = (delta+0.5)*step/4 int k = k0; do { if (delta & k) vpdiff += step; step >>= 1; k >>= 1; } while(k); vpdiff += step; if (delta & signmask) c->status[i].predictor -= vpdiff; else c->status[i].predictor += vpdiff; c->status[i].step_index += table[delta & (~signmask)]; c->status[i].step_index = av_clip(c->status[i].step_index, 0, 88); c->status[i].predictor = av_clip_int16(c->status[i].predictor); *samples++ = c->status[i].predictor; if (samples >= samples_end) { av_log(avctx, AV_LOG_ERROR, \"allocated output buffer is too small\\n\"); return -1; } } } } src += buf_size; break; } case CODEC_ID_ADPCM_YAMAHA: while (src < buf + buf_size) { uint8_t v = *src++; *samples++ = adpcm_yamaha_expand_nibble(&c->status[0 ], v & 0x0F); *samples++ = adpcm_yamaha_expand_nibble(&c->status[st], v >> 4 ); } break; case CODEC_ID_ADPCM_THP: { int table[2][16]; unsigned int samplecnt; int prev[2][2]; int ch; if (buf_size < 80) { av_log(avctx, AV_LOG_ERROR, \"frame too small\\n\"); return -1; } src+=4; samplecnt = bytestream_get_be32(&src); for (i = 0; i < 32; i++) table[0][i] = (int16_t)bytestream_get_be16(&src); /* Initialize the previous sample. */ for (i = 0; i < 4; i++) prev[0][i] = (int16_t)bytestream_get_be16(&src); if (samplecnt >= (samples_end - samples) / (st + 1)) { av_log(avctx, AV_LOG_ERROR, \"allocated output buffer is too small\\n\"); return -1; } for (ch = 0; ch <= st; ch++) { samples = (unsigned short *) data + ch; /* Read in every sample for this channel. */ for (i = 0; i < samplecnt / 14; i++) { int index = (*src >> 4) & 7; unsigned int exp = 28 - (*src++ & 15); int factor1 = table[ch][index * 2]; int factor2 = table[ch][index * 2 + 1]; /* Decode 14 samples. */ for (n = 0; n < 14; n++) { int32_t sampledat; if(n&1) sampledat= *src++ <<28; else sampledat= (*src&0xF0)<<24; sampledat = ((prev[ch][0]*factor1 + prev[ch][1]*factor2) >> 11) + (sampledat>>exp); *samples = av_clip_int16(sampledat); prev[ch][1] = prev[ch][0]; prev[ch][0] = *samples++; /* In case of stereo, skip one sample, this sample is for the other channel. */ samples += st; } } } /* In the previous loop, in case stereo is used, samples is increased exactly one time too often. */ samples -= st; break; } default: return -1; } *data_size = (uint8_t *)samples - (uint8_t *)data; return src - buf; }"} {"target": 0, "idx": 25937, "func": "static int smacker_decode_header_tree(SmackVContext *smk, GetBitContext *gb, int **recodes, int *last, int size) { int res; HuffContext huff; HuffContext tmp1, tmp2; VLC vlc[2] = { { 0 } }; int escapes[3]; DBCtx ctx; int err = 0; if(size >= UINT_MAX>>4){ // (((size + 3) >> 2) + 3) << 2 must not overflow av_log(smk->avctx, AV_LOG_ERROR, \"size too large\\n\"); return AVERROR_INVALIDDATA; } tmp1.length = 256; tmp1.maxlength = 0; tmp1.current = 0; tmp1.bits = av_mallocz(256 * 4); tmp1.lengths = av_mallocz(256 * sizeof(int)); tmp1.values = av_mallocz(256 * sizeof(int)); tmp2.length = 256; tmp2.maxlength = 0; tmp2.current = 0; tmp2.bits = av_mallocz(256 * 4); tmp2.lengths = av_mallocz(256 * sizeof(int)); tmp2.values = av_mallocz(256 * sizeof(int)); if(get_bits1(gb)) { smacker_decode_tree(gb, &tmp1, 0, 0); skip_bits1(gb); if(tmp1.current > 1) { res = init_vlc(&vlc[0], SMKTREE_BITS, tmp1.length, tmp1.lengths, sizeof(int), sizeof(int), tmp1.bits, sizeof(uint32_t), sizeof(uint32_t), INIT_VLC_LE); if(res < 0) { av_log(smk->avctx, AV_LOG_ERROR, \"Cannot build VLC table\\n\"); return AVERROR_INVALIDDATA; } } } if (!vlc[0].table) { av_log(smk->avctx, AV_LOG_ERROR, \"Skipping low bytes tree\\n\"); } if(get_bits1(gb)){ smacker_decode_tree(gb, &tmp2, 0, 0); skip_bits1(gb); if(tmp2.current > 1) { res = init_vlc(&vlc[1], SMKTREE_BITS, tmp2.length, tmp2.lengths, sizeof(int), sizeof(int), tmp2.bits, sizeof(uint32_t), sizeof(uint32_t), INIT_VLC_LE); if(res < 0) { av_log(smk->avctx, AV_LOG_ERROR, \"Cannot build VLC table\\n\"); return AVERROR_INVALIDDATA; } } } if (!vlc[1].table) { av_log(smk->avctx, AV_LOG_ERROR, \"Skipping high bytes tree\\n\"); } escapes[0] = get_bits(gb, 16); escapes[1] = get_bits(gb, 16); escapes[2] = get_bits(gb, 16); last[0] = last[1] = last[2] = -1; ctx.escapes[0] = escapes[0]; ctx.escapes[1] = escapes[1]; ctx.escapes[2] = escapes[2]; ctx.v1 = &vlc[0]; ctx.v2 = &vlc[1]; ctx.recode1 = tmp1.values; ctx.recode2 = tmp2.values; ctx.last = last; huff.length = ((size + 3) >> 2) + 3; huff.maxlength = 0; huff.current = 0; huff.values = av_mallocz(huff.length * sizeof(int)); if (smacker_decode_bigtree(gb, &huff, &ctx) < 0) err = -1; skip_bits1(gb); if(ctx.last[0] == -1) ctx.last[0] = huff.current++; if(ctx.last[1] == -1) ctx.last[1] = huff.current++; if(ctx.last[2] == -1) ctx.last[2] = huff.current++; if(huff.current > huff.length){ ctx.last[0] = ctx.last[1] = ctx.last[2] = 1; av_log(smk->avctx, AV_LOG_ERROR, \"bigtree damaged\\n\"); return AVERROR_INVALIDDATA; } *recodes = huff.values; if(vlc[0].table) ff_free_vlc(&vlc[0]); if(vlc[1].table) ff_free_vlc(&vlc[1]); av_free(tmp1.bits); av_free(tmp1.lengths); av_free(tmp1.values); av_free(tmp2.bits); av_free(tmp2.lengths); av_free(tmp2.values); return err; }"} {"target": 1, "idx": 25947, "func": "static int standard_decode_picture_primary_header(VC9Context *v) { GetBitContext *gb = &v->s.gb; int status = 0; if (v->finterpflag) v->interpfrm = get_bits(gb, 1); skip_bits(gb, 2); //framecnt unused if (v->rangered) v->rangeredfrm = get_bits(gb, 1); v->s.pict_type = get_bits(gb, 1); if (v->s.avctx->max_b_frames) { if (!v->s.pict_type) { if (get_bits(gb, 1)) v->s.pict_type = I_TYPE; else v->s.pict_type = B_TYPE; } else v->s.pict_type = P_TYPE; } else v->s.pict_type++; switch (v->s.pict_type) { case I_TYPE: status = decode_i_picture_header(v); break; case P_TYPE: status = decode_p_picture_primary_header(v); break; case BI_TYPE: case B_TYPE: status = decode_b_picture_primary_header(v); break; } if (status == FRAME_SKIPED) { av_log(v->s.avctx, AV_LOG_INFO, \"Skipping frame...\\n\"); return status; } return 0; }"} {"target": 0, "idx": 25948, "func": "static FFServerIPAddressACL* parse_dynamic_acl(FFServerStream *stream, HTTPContext *c) { FILE* f; char line[1024]; char cmd[1024]; FFServerIPAddressACL *acl = NULL; int line_num = 0; const char *p; f = fopen(stream->dynamic_acl, \"r\"); if (!f) { perror(stream->dynamic_acl); return NULL; } acl = av_mallocz(sizeof(FFServerIPAddressACL)); /* Build ACL */ for(;;) { if (fgets(line, sizeof(line), f) == NULL) break; line_num++; p = line; while (av_isspace(*p)) p++; if (*p == '\\0' || *p == '#') continue; ffserver_get_arg(cmd, sizeof(cmd), &p); if (!av_strcasecmp(cmd, \"ACL\")) ffserver_parse_acl_row(NULL, NULL, acl, p, stream->dynamic_acl, line_num); } fclose(f); return acl; }"} {"target": 0, "idx": 25958, "func": "static int discard_f(BlockBackend *blk, int argc, char **argv) { struct timeval t1, t2; int Cflag = 0, qflag = 0; int c, ret; int64_t offset, count; while ((c = getopt(argc, argv, \"Cq\")) != -1) { switch (c) { case 'C': Cflag = 1; break; case 'q': qflag = 1; break; default: return qemuio_command_usage(&discard_cmd); } } if (optind != argc - 2) { return qemuio_command_usage(&discard_cmd); } offset = cvtnum(argv[optind]); if (offset < 0) { print_cvtnum_err(offset, argv[optind]); return 0; } optind++; count = cvtnum(argv[optind]); if (count < 0) { print_cvtnum_err(count, argv[optind]); return 0; } else if (count >> BDRV_SECTOR_BITS > INT_MAX) { printf(\"length cannot exceed %\"PRIu64\", given %s\\n\", (uint64_t)INT_MAX << BDRV_SECTOR_BITS, argv[optind]); return 0; } gettimeofday(&t1, NULL); ret = blk_discard(blk, offset >> BDRV_SECTOR_BITS, count >> BDRV_SECTOR_BITS); gettimeofday(&t2, NULL); if (ret < 0) { printf(\"discard failed: %s\\n\", strerror(-ret)); goto out; } /* Finally, report back -- -C gives a parsable format */ if (!qflag) { t2 = tsub(t2, t1); print_report(\"discard\", &t2, offset, count, count, 1, Cflag); } out: return 0; }"} {"target": 0, "idx": 25997, "func": "void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr, hwaddr size) { assert(mr->terminates); cpu_physical_memory_set_dirty_range(mr->ram_addr + addr, size, memory_region_get_dirty_log_mask(mr)); }"} {"target": 0, "idx": 26003, "func": "static void vnc_display_print_local_addr(VncDisplay *vd) { SocketAddressLegacy *addr; Error *err = NULL; if (!vd->nlsock) { return; } addr = qio_channel_socket_get_local_address(vd->lsock[0], &err); if (!addr) { return; } if (addr->type != SOCKET_ADDRESS_LEGACY_KIND_INET) { qapi_free_SocketAddressLegacy(addr); return; } error_printf_unless_qmp(\"VNC server running on %s:%s\\n\", addr->u.inet.data->host, addr->u.inet.data->port); qapi_free_SocketAddressLegacy(addr); }"} {"target": 0, "idx": 26008, "func": "static int v9fs_synth_lstat(FsContext *fs_ctx, V9fsPath *fs_path, struct stat *stbuf) { V9fsSynthNode *node = *(V9fsSynthNode **)fs_path->data; v9fs_synth_fill_statbuf(node, stbuf); return 0; }"} {"target": 1, "idx": 26030, "func": "static av_cold int qsv_decode_close(AVCodecContext *avctx) { QSVOtherContext *s = avctx->priv_data; ff_qsv_decode_close(&s->qsv); qsv_clear_buffers(s); av_fifo_free(s->packet_fifo); return 0; }"} {"target": 1, "idx": 26031, "func": "int ff_ass_split_override_codes(const ASSCodesCallbacks *callbacks, void *priv, const char *buf) { const char *text = NULL; char new_line[2]; int text_len = 0; while (*buf) { if (text && callbacks->text && (sscanf(buf, \"\\\\%1[nN]\", new_line) == 1 || !strncmp(buf, \"{\\\\\", 2))) { callbacks->text(priv, text, text_len); text = NULL; } if (sscanf(buf, \"\\\\%1[nN]\", new_line) == 1) { if (callbacks->new_line) callbacks->new_line(priv, new_line[0] == 'N'); buf += 2; } else if (!strncmp(buf, \"{\\\\\", 2)) { buf++; while (*buf == '\\\\') { char style[2], c[2], sep[2], c_num[2] = \"0\", tmp[128] = {0}; unsigned int color = 0xFFFFFFFF; int len, size = -1, an = -1, alpha = -1; int x1, y1, x2, y2, t1 = -1, t2 = -1; if (sscanf(buf, \"\\\\%1[bisu]%1[01\\\\}]%n\", style, c, &len) > 1) { int close = c[0] == '0' ? 1 : c[0] == '1' ? 0 : -1; len += close != -1; if (callbacks->style) callbacks->style(priv, style[0], close); } else if (sscanf(buf, \"\\\\c%1[\\\\}]%n\", sep, &len) > 0 || sscanf(buf, \"\\\\c&H%X&%1[\\\\}]%n\", &color, sep, &len) > 1 || sscanf(buf, \"\\\\%1[1234]c%1[\\\\}]%n\", c_num, sep, &len) > 1 || sscanf(buf, \"\\\\%1[1234]c&H%X&%1[\\\\}]%n\", c_num, &color, sep, &len) > 2) { if (callbacks->color) callbacks->color(priv, color, c_num[0] - '0'); } else if (sscanf(buf, \"\\\\alpha%1[\\\\}]%n\", sep, &len) > 0 || sscanf(buf, \"\\\\alpha&H%2X&%1[\\\\}]%n\", &alpha, sep, &len) > 1 || sscanf(buf, \"\\\\%1[1234]a%1[\\\\}]%n\", c_num, sep, &len) > 1 || sscanf(buf, \"\\\\%1[1234]a&H%2X&%1[\\\\}]%n\", c_num, &alpha, sep, &len) > 2) { if (callbacks->alpha) callbacks->alpha(priv, alpha, c_num[0] - '0'); } else if (sscanf(buf, \"\\\\fn%1[\\\\}]%n\", sep, &len) > 0 || sscanf(buf, \"\\\\fn%127[^\\\\}]%1[\\\\}]%n\", tmp, sep, &len) > 1) { if (callbacks->font_name) callbacks->font_name(priv, tmp[0] ? tmp : NULL); } else if (sscanf(buf, \"\\\\fs%1[\\\\}]%n\", sep, &len) > 0 || sscanf(buf, \"\\\\fs%u%1[\\\\}]%n\", &size, sep, &len) > 1) { if (callbacks->font_size) callbacks->font_size(priv, size); } else if (sscanf(buf, \"\\\\a%1[\\\\}]%n\", sep, &len) > 0 || sscanf(buf, \"\\\\a%2u%1[\\\\}]%n\", &an, sep, &len) > 1 || sscanf(buf, \"\\\\an%1[\\\\}]%n\", sep, &len) > 0 || sscanf(buf, \"\\\\an%1u%1[\\\\}]%n\", &an, sep, &len) > 1) { if (an != -1 && buf[2] != 'n') an = (an&3) + (an&4 ? 6 : an&8 ? 3 : 0); if (callbacks->alignment) callbacks->alignment(priv, an); } else if (sscanf(buf, \"\\\\r%1[\\\\}]%n\", sep, &len) > 0 || sscanf(buf, \"\\\\r%127[^\\\\}]%1[\\\\}]%n\", tmp, sep, &len) > 1) { if (callbacks->cancel_overrides) callbacks->cancel_overrides(priv, tmp); } else if (sscanf(buf, \"\\\\move(%d,%d,%d,%d)%1[\\\\}]%n\", &x1, &y1, &x2, &y2, sep, &len) > 4 || sscanf(buf, \"\\\\move(%d,%d,%d,%d,%d,%d)%1[\\\\}]%n\", &x1, &y1, &x2, &y2, &t1, &t2, sep, &len) > 6) { if (callbacks->move) callbacks->move(priv, x1, y1, x2, y2, t1, t2); } else if (sscanf(buf, \"\\\\pos(%d,%d)%1[\\\\}]%n\", &x1, &y1, sep, &len) > 2) { if (callbacks->move) callbacks->move(priv, x1, y1, x1, y1, -1, -1); } else if (sscanf(buf, \"\\\\org(%d,%d)%1[\\\\}]%n\", &x1, &y1, sep, &len) > 2) { if (callbacks->origin) callbacks->origin(priv, x1, y1); } else { len = strcspn(buf+1, \"\\\\}\") + 2; /* skip unknown code */ } buf += len - 1; } if (*buf++ != '}') return AVERROR_INVALIDDATA; } else { if (!text) { text = buf; text_len = 1; } else text_len++; buf++; } } if (text && callbacks->text) callbacks->text(priv, text, text_len); if (callbacks->end) callbacks->end(priv); return 0; }"} {"target": 1, "idx": 26038, "func": "static void compute_scale_factors(unsigned char scale_code[SBLIMIT], unsigned char scale_factors[SBLIMIT][3], int sb_samples[3][12][SBLIMIT], int sblimit) { int *p, vmax, v, n, i, j, k, code; int index, d1, d2; unsigned char *sf = &scale_factors[0][0]; for(j=0;j vmax) vmax = v; } /* compute the scale factor index using log 2 computations */ if (vmax > 0) { n = av_log2(vmax); /* n is the position of the MSB of vmax. now use at most 2 compares to find the index */ index = (21 - n) * 3 - 3; if (index >= 0) { while (vmax <= scale_factor_table[index+1]) index++; } else { index = 0; /* very unlikely case of overflow */ } } else { index = 62; /* value 63 is not allowed */ } #if 0 printf(\"%2d:%d in=%x %x %d\\n\", j, i, vmax, scale_factor_table[index], index); #endif /* store the scale factor */ assert(index >=0 && index <= 63); sf[i] = index; } /* compute the transmission factor : look if the scale factors are close enough to each other */ d1 = scale_diff_table[sf[0] - sf[1] + 64]; d2 = scale_diff_table[sf[1] - sf[2] + 64]; /* handle the 25 cases */ switch(d1 * 5 + d2) { case 0*5+0: case 0*5+4: case 3*5+4: case 4*5+0: case 4*5+4: code = 0; break; case 0*5+1: case 0*5+2: case 4*5+1: case 4*5+2: code = 3; sf[2] = sf[1]; break; case 0*5+3: case 4*5+3: code = 3; sf[1] = sf[2]; break; case 1*5+0: case 1*5+4: case 2*5+4: code = 1; sf[1] = sf[0]; break; case 1*5+1: case 1*5+2: case 2*5+0: case 2*5+1: case 2*5+2: code = 2; sf[1] = sf[2] = sf[0]; break; case 2*5+3: case 3*5+3: code = 2; sf[0] = sf[1] = sf[2]; break; case 3*5+0: case 3*5+1: case 3*5+2: code = 2; sf[0] = sf[2] = sf[1]; break; case 1*5+3: code = 2; if (sf[0] > sf[2]) sf[0] = sf[2]; sf[1] = sf[2] = sf[0]; break; default: assert(0); //cannot happen code = 0; /* kill warning */ } #if 0 printf(\"%d: %2d %2d %2d %d %d -> %d\\n\", j, sf[0], sf[1], sf[2], d1, d2, code); #endif scale_code[j] = code; sf += 3; } }"} {"target": 0, "idx": 26059, "func": "static void armv7m_nvic_clear_pending(void *opaque, int irq) { NVICState *s = (NVICState *)opaque; VecInfo *vec; assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq); vec = &s->vectors[irq]; trace_nvic_clear_pending(irq, vec->enabled, vec->prio); if (vec->pending) { vec->pending = 0; nvic_irq_update(s); } }"} {"target": 0, "idx": 26061, "func": "static void vararg_string(void) { int i; struct { const char *decoded; } test_cases[] = { { \"hello world\" }, { \"the quick brown fox jumped over the fence\" }, {} }; for (i = 0; test_cases[i].decoded; i++) { QObject *obj; QString *str; obj = qobject_from_jsonf(\"%s\", test_cases[i].decoded); g_assert(obj != NULL); g_assert(qobject_type(obj) == QTYPE_QSTRING); str = qobject_to_qstring(obj); g_assert(strcmp(qstring_get_str(str), test_cases[i].decoded) == 0); QDECREF(str); } }"} {"target": 0, "idx": 26069, "func": "static int scsi_qdev_exit(DeviceState *qdev) { SCSIDevice *dev = SCSI_DEVICE(qdev); if (dev->vmsentry) { qemu_del_vm_change_state_handler(dev->vmsentry); } scsi_device_destroy(dev); return 0; }"} {"target": 0, "idx": 26075, "func": "static int gen_sub_bitmap(TeletextContext *ctx, AVSubtitleRect *sub_rect, vbi_page *page, int chop_top) { int resx = page->columns * BITMAP_CHAR_WIDTH; int resy = (page->rows - chop_top) * BITMAP_CHAR_HEIGHT; uint8_t ci, cmax = 0; int ret; vbi_char *vc = page->text + (chop_top * page->columns); vbi_char *vcend = page->text + (page->rows * page->columns); for (; vc < vcend; vc++) { if (vc->opacity != VBI_TRANSPARENT_SPACE) { cmax = VBI_NB_COLORS; break; } } if (cmax == 0) { av_log(ctx, AV_LOG_DEBUG, \"dropping empty page %3x\\n\", page->pgno); sub_rect->type = SUBTITLE_NONE; return 0; } if ((ret = avpicture_alloc(&sub_rect->pict, AV_PIX_FMT_PAL8, resx, resy)) < 0) return ret; // Yes, we want to allocate the palette on our own because AVSubtitle works this way sub_rect->pict.data[1] = NULL; vbi_draw_vt_page_region(page, VBI_PIXFMT_PAL8, sub_rect->pict.data[0], sub_rect->pict.linesize[0], 0, chop_top, page->columns, page->rows - chop_top, /*reveal*/ 1, /*flash*/ 1); fix_transparency(ctx, sub_rect, page, chop_top, cmax, resx, resy); sub_rect->x = ctx->x_offset; sub_rect->y = ctx->y_offset + chop_top * BITMAP_CHAR_HEIGHT; sub_rect->w = resx; sub_rect->h = resy; sub_rect->nb_colors = (int)cmax + 1; sub_rect->pict.data[1] = av_mallocz(AVPALETTE_SIZE); if (!sub_rect->pict.data[1]) { av_freep(&sub_rect->pict.data[0]); return AVERROR(ENOMEM); } for (ci = 0; ci < cmax; ci++) { int r, g, b, a; r = VBI_R(page->color_map[ci]); g = VBI_G(page->color_map[ci]); b = VBI_B(page->color_map[ci]); a = VBI_A(page->color_map[ci]); ((uint32_t *)sub_rect->pict.data[1])[ci] = RGBA(r, g, b, a); av_dlog(ctx, \"palette %0x\\n\", ((uint32_t *)sub_rect->pict.data[1])[ci]); } ((uint32_t *)sub_rect->pict.data[1])[cmax] = RGBA(0, 0, 0, 0); sub_rect->type = SUBTITLE_BITMAP; return 0; }"} {"target": 1, "idx": 26087, "func": "int opt_default(const char *opt, const char *arg) { const AVOption *oc, *of, *os, *oswr; char opt_stripped[128]; const char *p; const AVClass *cc = avcodec_get_class(), *fc = avformat_get_class(), *sc, *swr_class; if (!(p = strchr(opt, ':'))) p = opt + strlen(opt); av_strlcpy(opt_stripped, opt, FFMIN(sizeof(opt_stripped), p - opt + 1)); if ((oc = av_opt_find(&cc, opt_stripped, NULL, 0, AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ)) || ((opt[0] == 'v' || opt[0] == 'a' || opt[0] == 's') && (oc = av_opt_find(&cc, opt + 1, NULL, 0, AV_OPT_SEARCH_FAKE_OBJ)))) av_dict_set(&codec_opts, opt, arg, FLAGS(oc)); if ((of = av_opt_find(&fc, opt, NULL, 0, AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ))) av_dict_set(&format_opts, opt, arg, FLAGS(of)); #if CONFIG_SWSCALE sc = sws_get_class(); if ((os = av_opt_find(&sc, opt, NULL, 0, AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ))) { // XXX we only support sws_flags, not arbitrary sws options int ret = av_opt_set(sws_opts, opt, arg, 0); if (ret < 0) { av_log(NULL, AV_LOG_ERROR, \"Error setting option %s.\\n\", opt); return ret; } } #endif swr_class = swr_get_class(); if (!oc && !of && !os && (oswr = av_opt_find(&swr_class, opt, NULL, 0, AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ))) { int ret = av_opt_set(swr_opts, opt, arg, 0); if (ret < 0) { av_log(NULL, AV_LOG_ERROR, \"Error setting option %s.\\n\", opt); return ret; } } if (oc || of || os || oswr) return 0; av_log(NULL, AV_LOG_ERROR, \"Unrecognized option '%s'\\n\", opt); return AVERROR_OPTION_NOT_FOUND; }"} {"target": 1, "idx": 26090, "func": "static void slavio_misc_init(target_phys_addr_t base, target_phys_addr_t aux1_base, target_phys_addr_t aux2_base, qemu_irq irq, qemu_irq fdc_tc) { DeviceState *dev; SysBusDevice *s; dev = qdev_create(NULL, \"slavio_misc\"); qdev_init(dev); s = sysbus_from_qdev(dev); if (base) { /* 8 bit registers */ /* Slavio control */ sysbus_mmio_map(s, 0, base + MISC_CFG); /* Diagnostics */ sysbus_mmio_map(s, 1, base + MISC_DIAG); /* Modem control */ sysbus_mmio_map(s, 2, base + MISC_MDM); /* 16 bit registers */ /* ss600mp diag LEDs */ sysbus_mmio_map(s, 3, base + MISC_LEDS); /* 32 bit registers */ /* System control */ sysbus_mmio_map(s, 4, base + MISC_SYS); } if (aux1_base) { /* AUX 1 (Misc System Functions) */ sysbus_mmio_map(s, 5, aux1_base); } if (aux2_base) { /* AUX 2 (Software Powerdown Control) */ sysbus_mmio_map(s, 6, aux2_base); } sysbus_connect_irq(s, 0, irq); sysbus_connect_irq(s, 1, fdc_tc); qemu_system_powerdown = qdev_get_gpio_in(dev, 0); }"} {"target": 1, "idx": 26100, "func": "int check_tm_pred4x4_mode(int mode, int mb_x, int mb_y) { if (!mb_x) { return mb_y ? VERT_VP8_PRED : DC_129_PRED; } else { return mb_y ? mode : HOR_VP8_PRED; } }"} {"target": 1, "idx": 26110, "func": "static struct ResampleContext *create(struct ResampleContext *c, int out_rate, int in_rate, int filter_size, int phase_shift, int linear, double cutoff, enum AVSampleFormat format, enum SwrFilterType filter_type, double kaiser_beta, double precision, int cheby, int exact_rational){ soxr_error_t error; soxr_datatype_t type = format == AV_SAMPLE_FMT_S16P? SOXR_INT16_S : format == AV_SAMPLE_FMT_S16 ? SOXR_INT16_I : format == AV_SAMPLE_FMT_S32P? SOXR_INT32_S : format == AV_SAMPLE_FMT_S32 ? SOXR_INT32_I : format == AV_SAMPLE_FMT_FLTP? SOXR_FLOAT32_S : format == AV_SAMPLE_FMT_FLT ? SOXR_FLOAT32_I : format == AV_SAMPLE_FMT_DBLP? SOXR_FLOAT64_S : format == AV_SAMPLE_FMT_DBL ? SOXR_FLOAT64_I : (soxr_datatype_t)-1; soxr_io_spec_t io_spec = soxr_io_spec(type, type); soxr_quality_spec_t q_spec = soxr_quality_spec((int)((precision-2)/4), (SOXR_HI_PREC_CLOCK|SOXR_ROLLOFF_NONE)*!!cheby); q_spec.precision = linear? 0 : precision; #if !defined SOXR_VERSION /* Deprecated @ March 2013: */ q_spec.bw_pc = cutoff? FFMAX(FFMIN(cutoff,.995),.8)*100 : q_spec.bw_pc; #else q_spec.passband_end = cutoff? FFMAX(FFMIN(cutoff,.995),.8) : q_spec.passband_end; #endif soxr_delete((soxr_t)c); c = (struct ResampleContext *) soxr_create(in_rate, out_rate, 0, &error, &io_spec, &q_spec, 0); if (!c) av_log(NULL, AV_LOG_ERROR, \"soxr_create: %s\\n\", error); return c; }"} {"target": 0, "idx": 26113, "func": "static void bdrv_put_buffer(void *opaque, const uint8_t *buf, int64_t pos, int size) { QEMUFileBdrv *s = opaque; bdrv_pwrite(s->bs, s->base_offset + pos, buf, size); }"} {"target": 0, "idx": 26117, "func": "static void qemu_clock_init(QEMUClockType type) { QEMUClock *clock = qemu_clock_ptr(type); /* Assert that the clock of type TYPE has not been initialized yet. */ assert(main_loop_tlg.tl[type] == NULL); clock->type = type; clock->enabled = (type == QEMU_CLOCK_VIRTUAL ? false : true); clock->last = INT64_MIN; QLIST_INIT(&clock->timerlists); notifier_list_init(&clock->reset_notifiers); main_loop_tlg.tl[type] = timerlist_new(type, NULL, NULL); }"} {"target": 1, "idx": 26136, "func": "static CharDriverState *qemu_chr_open_pty(QemuOpts *opts) { CharDriverState *chr; PtyCharDriver *s; struct termios tty; int master_fd, slave_fd, len; #if defined(__OpenBSD__) || defined(__DragonFly__) char pty_name[PATH_MAX]; #define q_ptsname(x) pty_name #else char *pty_name = NULL; #define q_ptsname(x) ptsname(x) #endif if (openpty(&master_fd, &slave_fd, pty_name, NULL, NULL) < 0) { return NULL; } /* Set raw attributes on the pty. */ tcgetattr(slave_fd, &tty); cfmakeraw(&tty); tcsetattr(slave_fd, TCSAFLUSH, &tty); close(slave_fd); chr = g_malloc0(sizeof(CharDriverState)); len = strlen(q_ptsname(master_fd)) + 5; chr->filename = g_malloc(len); snprintf(chr->filename, len, \"pty:%s\", q_ptsname(master_fd)); qemu_opt_set(opts, \"path\", q_ptsname(master_fd)); fprintf(stderr, \"char device redirected to %s\\n\", q_ptsname(master_fd)); s = g_malloc0(sizeof(PtyCharDriver)); chr->opaque = s; chr->chr_write = pty_chr_write; chr->chr_update_read_handler = pty_chr_update_read_handler; chr->chr_close = pty_chr_close; s->fd = master_fd; s->timer = qemu_new_timer_ms(rt_clock, pty_chr_timer, chr); return chr; }"} {"target": 0, "idx": 26152, "func": "uint32_t HELPER(sigp)(CPUS390XState *env, uint64_t order_code, uint32_t r1, uint64_t cpu_addr) { int cc = SIGP_CC_ORDER_CODE_ACCEPTED; HELPER_LOG(\"%s: %016\" PRIx64 \" %08x %016\" PRIx64 \"\\n\", __func__, order_code, r1, cpu_addr); /* Remember: Use \"R1 or R1 + 1, whichever is the odd-numbered register\" as parameter (input). Status (output) is always R1. */ switch (order_code) { case SIGP_SET_ARCH: /* switch arch */ break; case SIGP_SENSE: /* enumerate CPU status */ if (cpu_addr) { /* XXX implement when SMP comes */ return 3; } env->regs[r1] &= 0xffffffff00000000ULL; cc = 1; break; #if !defined(CONFIG_USER_ONLY) case SIGP_RESTART: qemu_system_reset_request(); cpu_loop_exit(CPU(s390_env_get_cpu(env))); break; case SIGP_STOP: qemu_system_shutdown_request(); cpu_loop_exit(CPU(s390_env_get_cpu(env))); break; #endif default: /* unknown sigp */ fprintf(stderr, \"XXX unknown sigp: 0x%\" PRIx64 \"\\n\", order_code); cc = SIGP_CC_NOT_OPERATIONAL; } return cc; }"} {"target": 0, "idx": 26154, "func": "yuv2rgb_1_c_template(SwsContext *c, const uint16_t *buf0, const uint16_t *ubuf0, const uint16_t *ubuf1, const uint16_t *vbuf0, const uint16_t *vbuf1, const uint16_t *abuf0, uint8_t *dest, int dstW, int uvalpha, enum PixelFormat dstFormat, int flags, int y, enum PixelFormat target, int hasAlpha) { int i; if (uvalpha < 2048) { for (i = 0; i < (dstW >> 1); i++) { int Y1 = buf0[i * 2] >> 7; int Y2 = buf0[i * 2 + 1] >> 7; int U = ubuf1[i] >> 7; int V = vbuf1[i] >> 7; int A1, A2; const void *r = c->table_rV[V], *g = (c->table_gU[U] + c->table_gV[V]), *b = c->table_bU[U]; if (hasAlpha) { A1 = abuf0[i * 2 ] >> 7; A2 = abuf0[i * 2 + 1] >> 7; } yuv2rgb_write(dest, i, Y1, Y2, U, V, hasAlpha ? A1 : 0, hasAlpha ? A2 : 0, r, g, b, y, target, hasAlpha); } } else { for (i = 0; i < (dstW >> 1); i++) { int Y1 = buf0[i * 2] >> 7; int Y2 = buf0[i * 2 + 1] >> 7; int U = (ubuf0[i] + ubuf1[i]) >> 8; int V = (vbuf0[i] + vbuf1[i]) >> 8; int A1, A2; const void *r = c->table_rV[V], *g = (c->table_gU[U] + c->table_gV[V]), *b = c->table_bU[U]; if (hasAlpha) { A1 = abuf0[i * 2 ] >> 7; A2 = abuf0[i * 2 + 1] >> 7; } yuv2rgb_write(dest, i, Y1, Y2, U, V, hasAlpha ? A1 : 0, hasAlpha ? A2 : 0, r, g, b, y, target, hasAlpha); } } }"} {"target": 0, "idx": 26158, "func": "static inline int onenand_prog_main(OneNANDState *s, int sec, int secn, void *src) { int result = 0; if (secn > 0) { uint32_t size = (uint32_t)secn * 512; const uint8_t *sp = (const uint8_t *)src; uint8_t *dp = 0; if (s->bdrv_cur) { dp = g_malloc(size); if (!dp || bdrv_read(s->bdrv_cur, sec, dp, secn) < 0) { result = 1; } } else { if (sec + secn > s->secs_cur) { result = 1; } else { dp = (uint8_t *)s->current + (sec << 9); } } if (!result) { uint32_t i; for (i = 0; i < size; i++) { dp[i] &= sp[i]; } if (s->bdrv_cur) { result = bdrv_write(s->bdrv_cur, sec, dp, secn) < 0; } } if (dp && s->bdrv_cur) { g_free(dp); } } return result; }"} {"target": 0, "idx": 26160, "func": "static int nbd_receive_list(QIOChannel *ioc, char **name, Error **errp) { uint64_t magic; uint32_t opt; uint32_t type; uint32_t len; uint32_t namelen; int error; *name = NULL; if (read_sync(ioc, &magic, sizeof(magic)) != sizeof(magic)) { error_setg(errp, \"failed to read list option magic\"); return -1; } magic = be64_to_cpu(magic); if (magic != NBD_REP_MAGIC) { error_setg(errp, \"Unexpected option list magic\"); return -1; } if (read_sync(ioc, &opt, sizeof(opt)) != sizeof(opt)) { error_setg(errp, \"failed to read list option\"); return -1; } opt = be32_to_cpu(opt); if (opt != NBD_OPT_LIST) { error_setg(errp, \"Unexpected option type %\" PRIx32 \" expected %x\", opt, NBD_OPT_LIST); return -1; } if (read_sync(ioc, &type, sizeof(type)) != sizeof(type)) { error_setg(errp, \"failed to read list option type\"); return -1; } type = be32_to_cpu(type); error = nbd_handle_reply_err(ioc, opt, type, errp); if (error <= 0) { return error; } if (read_sync(ioc, &len, sizeof(len)) != sizeof(len)) { error_setg(errp, \"failed to read option length\"); return -1; } len = be32_to_cpu(len); if (type == NBD_REP_ACK) { if (len != 0) { error_setg(errp, \"length too long for option end\"); return -1; } } else if (type == NBD_REP_SERVER) { if (len < sizeof(namelen) || len > NBD_MAX_BUFFER_SIZE) { error_setg(errp, \"incorrect option length\"); return -1; } if (read_sync(ioc, &namelen, sizeof(namelen)) != sizeof(namelen)) { error_setg(errp, \"failed to read option name length\"); return -1; } namelen = be32_to_cpu(namelen); len -= sizeof(namelen); if (len < namelen) { error_setg(errp, \"incorrect option name length\"); return -1; } if (namelen > 255) { error_setg(errp, \"export name length too long %\" PRIu32, namelen); return -1; } *name = g_new0(char, namelen + 1); if (read_sync(ioc, *name, namelen) != namelen) { error_setg(errp, \"failed to read export name\"); g_free(*name); *name = NULL; return -1; } (*name)[namelen] = '\\0'; len -= namelen; if (len) { char *buf = g_malloc(len + 1); if (read_sync(ioc, buf, len) != len) { error_setg(errp, \"failed to read export description\"); g_free(*name); g_free(buf); *name = NULL; return -1; } buf[len] = '\\0'; TRACE(\"Ignoring export description: %s\", buf); g_free(buf); } } else { error_setg(errp, \"Unexpected reply type %\" PRIx32 \" expected %x\", type, NBD_REP_SERVER); return -1; } return 1; }"} {"target": 1, "idx": 26173, "func": "static int hwupload_query_formats(AVFilterContext *avctx) { HWUploadContext *ctx = avctx->priv; AVHWFramesConstraints *constraints = NULL; const enum AVPixelFormat *input_pix_fmts, *output_pix_fmts; AVFilterFormats *input_formats = NULL; int err, i; if (!avctx->hw_device_ctx) { av_log(ctx, AV_LOG_ERROR, \"A hardware device reference is required \" \"to upload frames to.\\n\"); return AVERROR(EINVAL); } ctx->hwdevice_ref = av_buffer_ref(avctx->hw_device_ctx); if (!ctx->hwdevice_ref) return AVERROR(ENOMEM); ctx->hwdevice = (AVHWDeviceContext*)ctx->hwdevice_ref->data; constraints = av_hwdevice_get_hwframe_constraints(ctx->hwdevice_ref, NULL); if (!constraints) { err = AVERROR(EINVAL); goto fail; } input_pix_fmts = constraints->valid_sw_formats; output_pix_fmts = constraints->valid_hw_formats; input_formats = ff_make_format_list(output_pix_fmts); if (!input_formats) { err = AVERROR(ENOMEM); goto fail; } if (input_pix_fmts) { for (i = 0; input_pix_fmts[i] != AV_PIX_FMT_NONE; i++) { err = ff_add_format(&input_formats, input_pix_fmts[i]); if (err < 0) { ff_formats_unref(&input_formats); goto fail; } } } ff_formats_ref(input_formats, &avctx->inputs[0]->out_formats); ff_formats_ref(ff_make_format_list(output_pix_fmts), &avctx->outputs[0]->in_formats); av_hwframe_constraints_free(&constraints); return 0; fail: av_buffer_unref(&ctx->hwdevice_ref); av_hwframe_constraints_free(&constraints); return err; }"} {"target": 1, "idx": 26175, "func": "void error_setg_win32(Error **errp, int win32_err, const char *fmt, ...) { va_list ap; char *msg1, *msg2; if (errp == NULL) { return; } va_start(ap, fmt); error_setv(errp, ERROR_CLASS_GENERIC_ERROR, fmt, ap); va_end(ap); if (win32_err != 0) { msg1 = (*errp)->msg; msg2 = g_win32_error_message(win32_err); (*errp)->msg = g_strdup_printf(\"%s: %s (error: %x)\", msg1, msg2, (unsigned)win32_err); g_free(msg2); g_free(msg1); } }"} {"target": 1, "idx": 26183, "func": "void ff_MPV_frame_end(MpegEncContext *s) { int i; /* redraw edges for the frame if decoding didn't complete */ // just to make sure that all data is rendered. if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) { ff_xvmc_field_end(s); } else if((s->error_count || s->encoding || !(s->avctx->codec->capabilities&CODEC_CAP_DRAW_HORIZ_BAND)) && !s->avctx->hwaccel && !(s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) && s->unrestricted_mv && s->current_picture.f.reference && !s->intra_only && !(s->flags & CODEC_FLAG_EMU_EDGE)) { int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w; int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h; s->dsp.draw_edges(s->current_picture.f.data[0], s->current_picture.f.linesize[0], s->h_edge_pos, s->v_edge_pos, EDGE_WIDTH, EDGE_WIDTH, EDGE_TOP | EDGE_BOTTOM); s->dsp.draw_edges(s->current_picture.f.data[1], s->current_picture.f.linesize[1], s->h_edge_pos >> hshift, s->v_edge_pos >> vshift, EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, EDGE_TOP | EDGE_BOTTOM); s->dsp.draw_edges(s->current_picture.f.data[2], s->current_picture.f.linesize[2], s->h_edge_pos >> hshift, s->v_edge_pos >> vshift, EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, EDGE_TOP | EDGE_BOTTOM); } emms_c(); s->last_pict_type = s->pict_type; s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f.quality; if (s->pict_type!= AV_PICTURE_TYPE_B) { s->last_non_b_pict_type = s->pict_type; } #if 0 /* copy back current_picture variables */ for (i = 0; i < MAX_PICTURE_COUNT; i++) { if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) { s->picture[i] = s->current_picture; break; } } assert(i < MAX_PICTURE_COUNT); #endif if (s->encoding) { /* release non-reference frames */ for (i = 0; i < s->picture_count; i++) { if (s->picture[i].f.data[0] && !s->picture[i].f.reference /* && s->picture[i].type != FF_BUFFER_TYPE_SHARED */) { free_frame_buffer(s, &s->picture[i]); } } } // clear copies, to avoid confusion #if 0 memset(&s->last_picture, 0, sizeof(Picture)); memset(&s->next_picture, 0, sizeof(Picture)); memset(&s->current_picture, 0, sizeof(Picture)); #endif s->avctx->coded_frame = &s->current_picture_ptr->f; if (s->codec_id != AV_CODEC_ID_H264 && s->current_picture.f.reference) { ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX, 0); } }"} {"target": 0, "idx": 26191, "func": "int qemu_get_buffer(QEMUFile *f, uint8_t *buf, int size1) { int size, l; if (f->is_write) abort(); size = size1; while (size > 0) { l = f->buf_size - f->buf_index; if (l == 0) { qemu_fill_buffer(f); l = f->buf_size - f->buf_index; if (l == 0) break; } if (l > size) l = size; memcpy(buf, f->buf + f->buf_index, l); f->buf_index += l; buf += l; size -= l; } return size1 - size; }"} {"target": 0, "idx": 26225, "func": "void *rom_ptr(target_phys_addr_t addr) { Rom *rom; rom = find_rom(addr); if (!rom || !rom->data) return NULL; return rom->data + (addr - rom->addr); }"} {"target": 0, "idx": 26226, "func": "static void *do_data_decompress(void *opaque) { DecompressParam *param = opaque; unsigned long pagesize; while (!quit_decomp_thread) { qemu_mutex_lock(¶m->mutex); while (!param->start && !quit_decomp_thread) { qemu_cond_wait(¶m->cond, ¶m->mutex); } if (!quit_decomp_thread) { pagesize = TARGET_PAGE_SIZE; /* uncompress() will return failed in some case, especially * when the page is dirted when doing the compression, it's * not a problem because the dirty page will be retransferred * and uncompress() won't break the data in other pages. */ uncompress((Bytef *)param->des, &pagesize, (const Bytef *)param->compbuf, param->len); } param->start = false; qemu_mutex_unlock(¶m->mutex); qemu_mutex_lock(&decomp_done_lock); param->done = true; qemu_cond_signal(&decomp_done_cond); qemu_mutex_unlock(&decomp_done_lock); } return NULL; }"} {"target": 1, "idx": 26234, "func": "long do_sigreturn(CPUSH4State *regs) { struct target_sigframe *frame; abi_ulong frame_addr; sigset_t blocked; target_sigset_t target_set; target_ulong r0; int i; int err = 0; #if defined(DEBUG_SIGNAL) fprintf(stderr, \"do_sigreturn\\n\"); #endif frame_addr = regs->gregs[15]; if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) goto badframe; __get_user(target_set.sig[0], &frame->sc.oldmask); for(i = 1; i < TARGET_NSIG_WORDS; i++) { __get_user(target_set.sig[i], &frame->extramask[i - 1]); } if (err) goto badframe; target_to_host_sigset_internal(&blocked, &target_set); do_sigprocmask(SIG_SETMASK, &blocked, NULL); if (restore_sigcontext(regs, &frame->sc, &r0)) goto badframe; unlock_user_struct(frame, frame_addr, 0); return r0; badframe: unlock_user_struct(frame, frame_addr, 0); force_sig(TARGET_SIGSEGV); return 0; }"} {"target": 0, "idx": 26242, "func": "static void apic_reset_common(DeviceState *dev) { APICCommonState *s = APIC_COMMON(dev); APICCommonClass *info = APIC_COMMON_GET_CLASS(s); bool bsp; bsp = cpu_is_bsp(s->cpu); s->apicbase = APIC_DEFAULT_ADDRESS | (bsp ? MSR_IA32_APICBASE_BSP : 0) | MSR_IA32_APICBASE_ENABLE; s->vapic_paddr = 0; info->vapic_base_update(s); apic_init_reset(dev); if (bsp) { /* * LINT0 delivery mode on CPU #0 is set to ExtInt at initialization * time typically by BIOS, so PIC interrupt can be delivered to the * processor when local APIC is enabled. */ s->lvt[APIC_LVT_LINT0] = 0x700; } }"} {"target": 0, "idx": 26243, "func": "static void omap_pwt_init(target_phys_addr_t base, struct omap_mpu_state_s *s, omap_clk clk) { int iomemtype; s->pwt.base = base; s->pwt.clk = clk; omap_pwt_reset(s); iomemtype = cpu_register_io_memory(0, omap_pwt_readfn, omap_pwt_writefn, s); cpu_register_physical_memory(s->pwt.base, 0x800, iomemtype); }"} {"target": 0, "idx": 26245, "func": "int vnc_hextile_send_framebuffer_update(VncState *vs, int x, int y, int w, int h) { int i, j; int has_fg, has_bg; uint8_t *last_fg, *last_bg; VncDisplay *vd = vs->vd; last_fg = (uint8_t *) qemu_malloc(vd->server->pf.bytes_per_pixel); last_bg = (uint8_t *) qemu_malloc(vd->server->pf.bytes_per_pixel); has_fg = has_bg = 0; for (j = y; j < (y + h); j += 16) { for (i = x; i < (x + w); i += 16) { vs->send_hextile_tile(vs, i, j, MIN(16, x + w - i), MIN(16, y + h - j), last_bg, last_fg, &has_bg, &has_fg); } } free(last_fg); free(last_bg); return 1; }"} {"target": 0, "idx": 26251, "func": "static int bdrv_read_em(BlockDriverState *bs, int64_t sector_num, uint8_t *buf, int nb_sectors) { int async_ret; BlockDriverAIOCB *acb; struct iovec iov; QEMUIOVector qiov; async_ret = NOT_DONE; iov.iov_base = (void *)buf; iov.iov_len = nb_sectors * BDRV_SECTOR_SIZE; qemu_iovec_init_external(&qiov, &iov, 1); acb = bs->drv->bdrv_aio_readv(bs, sector_num, &qiov, nb_sectors, bdrv_rw_em_cb, &async_ret); if (acb == NULL) { async_ret = -1; goto fail; } while (async_ret == NOT_DONE) { qemu_aio_wait(); } fail: return async_ret; }"} {"target": 0, "idx": 26257, "func": "static int kvmppc_get_books_sregs(PowerPCCPU *cpu) { CPUPPCState *env = &cpu->env; struct kvm_sregs sregs; int ret; int i; ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs); if (ret < 0) { return ret; } if (!env->external_htab) { ppc_store_sdr1(env, sregs.u.s.sdr1); } /* Sync SLB */ #ifdef TARGET_PPC64 /* * The packed SLB array we get from KVM_GET_SREGS only contains * information about valid entries. So we flush our internal copy * to get rid of stale ones, then put all valid SLB entries back * in. */ memset(env->slb, 0, sizeof(env->slb)); for (i = 0; i < ARRAY_SIZE(env->slb); i++) { target_ulong rb = sregs.u.s.ppc64.slb[i].slbe; target_ulong rs = sregs.u.s.ppc64.slb[i].slbv; /* * Only restore valid entries */ if (rb & SLB_ESID_V) { ppc_store_slb(cpu, rb & 0xfff, rb & ~0xfffULL, rs); } } #endif /* Sync SRs */ for (i = 0; i < 16; i++) { env->sr[i] = sregs.u.s.ppc32.sr[i]; } /* Sync BATs */ for (i = 0; i < 8; i++) { env->DBAT[0][i] = sregs.u.s.ppc32.dbat[i] & 0xffffffff; env->DBAT[1][i] = sregs.u.s.ppc32.dbat[i] >> 32; env->IBAT[0][i] = sregs.u.s.ppc32.ibat[i] & 0xffffffff; env->IBAT[1][i] = sregs.u.s.ppc32.ibat[i] >> 32; } return 0; }"} {"target": 0, "idx": 26259, "func": "static void integratorcp_init(ram_addr_t ram_size, int vga_ram_size, const char *boot_device, DisplayState *ds, const char *kernel_filename, const char *kernel_cmdline, const char *initrd_filename, const char *cpu_model) { CPUState *env; uint32_t ram_offset; qemu_irq *pic; qemu_irq *cpu_pic; int sd; if (!cpu_model) cpu_model = \"arm926\"; env = cpu_init(cpu_model); if (!env) { fprintf(stderr, \"Unable to find CPU definition\\n\"); exit(1); } ram_offset = qemu_ram_alloc(ram_size); /* ??? On a real system the first 1Mb is mapped as SSRAM or boot flash. */ /* ??? RAM should repeat to fill physical memory space. */ /* SDRAM at address zero*/ cpu_register_physical_memory(0, ram_size, ram_offset | IO_MEM_RAM); /* And again at address 0x80000000 */ cpu_register_physical_memory(0x80000000, ram_size, ram_offset | IO_MEM_RAM); integratorcm_init(ram_size >> 20); cpu_pic = arm_pic_init_cpu(env); pic = icp_pic_init(0x14000000, cpu_pic[ARM_PIC_CPU_IRQ], cpu_pic[ARM_PIC_CPU_FIQ]); icp_pic_init(0xca000000, pic[26], NULL); icp_pit_init(0x13000000, pic, 5); pl031_init(0x15000000, pic[8]); pl011_init(0x16000000, pic[1], serial_hds[0], PL011_ARM); pl011_init(0x17000000, pic[2], serial_hds[1], PL011_ARM); icp_control_init(0xcb000000); pl050_init(0x18000000, pic[3], 0); pl050_init(0x19000000, pic[4], 1); sd = drive_get_index(IF_SD, 0, 0); if (sd == -1) { fprintf(stderr, \"qemu: missing SecureDigital card\\n\"); exit(1); } pl181_init(0x1c000000, drives_table[sd].bdrv, pic[23], pic[24]); if (nd_table[0].vlan) { if (nd_table[0].model == NULL || strcmp(nd_table[0].model, \"smc91c111\") == 0) { smc91c111_init(&nd_table[0], 0xc8000000, pic[27]); } else if (strcmp(nd_table[0].model, \"?\") == 0) { fprintf(stderr, \"qemu: Supported NICs: smc91c111\\n\"); exit (1); } else { fprintf(stderr, \"qemu: Unsupported NIC: %s\\n\", nd_table[0].model); exit (1); } } pl110_init(ds, 0xc0000000, pic[22], 0); integrator_binfo.ram_size = ram_size; integrator_binfo.kernel_filename = kernel_filename; integrator_binfo.kernel_cmdline = kernel_cmdline; integrator_binfo.initrd_filename = initrd_filename; arm_load_kernel(env, &integrator_binfo); }"} {"target": 1, "idx": 26291, "func": "void hmp_cont(Monitor *mon, const QDict *qdict) { BlockInfoList *bdev_list, *bdev; Error *err = NULL; bdev_list = qmp_query_block(NULL); for (bdev = bdev_list; bdev; bdev = bdev->next) { if (key_is_missing(bdev->value)) { monitor_read_block_device_key(mon, bdev->value->device, hmp_cont_cb, NULL); goto out; } } qmp_cont(&err); hmp_handle_error(mon, &err); out: qapi_free_BlockInfoList(bdev_list); }"} {"target": 1, "idx": 26294, "func": "static int decode_5(SANMVideoContext *ctx) { #if HAVE_BIGENDIAN uint16_t *frm; int npixels; #endif uint8_t *dst = (uint8_t*)ctx->frm0; if (rle_decode(ctx, dst, ctx->buf_size)) return AVERROR_INVALIDDATA; #if HAVE_BIGENDIAN npixels = ctx->npixels; frm = ctx->frm0; while (npixels--) *frm++ = av_bswap16(*frm); #endif return 0; }"} {"target": 1, "idx": 26306, "func": "static void aux_bridge_init(Object *obj) { AUXTOI2CState *s = AUXTOI2C(obj); s->i2c_bus = i2c_init_bus(DEVICE(obj), \"aux-i2c\"); }"} {"target": 1, "idx": 26315, "func": "static void bdrv_co_drain_bh_cb(void *opaque) { BdrvCoDrainData *data = opaque; Coroutine *co = data->co; qemu_bh_delete(data->bh); bdrv_drain_poll(data->bs); data->done = true; qemu_coroutine_enter(co, NULL); }"} {"target": 1, "idx": 26320, "func": "static int add_graphics_client(Monitor *mon, const QDict *qdict, QObject **ret_data) { const char *protocol = qdict_get_str(qdict, \"protocol\"); const char *fdname = qdict_get_str(qdict, \"fdname\"); CharDriverState *s; if (strcmp(protocol, \"spice\") == 0) { int fd = monitor_get_fd(mon, fdname, NULL); int skipauth = qdict_get_try_bool(qdict, \"skipauth\", 0); int tls = qdict_get_try_bool(qdict, \"tls\", 0); if (!using_spice) { /* correct one? spice isn't a device ,,, */ qerror_report(QERR_DEVICE_NOT_ACTIVE, \"spice\"); return -1; } if (qemu_spice_display_add_client(fd, skipauth, tls) < 0) { close(fd); } return 0; #ifdef CONFIG_VNC } else if (strcmp(protocol, \"vnc\") == 0) { int fd = monitor_get_fd(mon, fdname, NULL); int skipauth = qdict_get_try_bool(qdict, \"skipauth\", 0); vnc_display_add_client(NULL, fd, skipauth); return 0; #endif } else if ((s = qemu_chr_find(protocol)) != NULL) { int fd = monitor_get_fd(mon, fdname, NULL); if (qemu_chr_add_client(s, fd) < 0) { qerror_report(QERR_ADD_CLIENT_FAILED); return -1; } return 0; } qerror_report(QERR_INVALID_PARAMETER, \"protocol\"); return -1; }"} {"target": 1, "idx": 26330, "func": "static gboolean qio_channel_tls_handshake_io(QIOChannel *ioc, GIOCondition condition, gpointer user_data) { QIOTask *task = user_data; QIOChannelTLS *tioc = QIO_CHANNEL_TLS( qio_task_get_source(task)); qio_channel_tls_handshake_task( tioc, task); object_unref(OBJECT(tioc)); return FALSE; }"} {"target": 1, "idx": 26333, "func": "static void pm_update_sci(VT686PMState *s) { int sci_level, pmsts; pmsts = acpi_pm1_evt_get_sts(&s->ar, s->ar.tmr.overflow_time); sci_level = (((pmsts & s->ar.pm1.evt.en) & (ACPI_BITMASK_RT_CLOCK_ENABLE | ACPI_BITMASK_POWER_BUTTON_ENABLE | ACPI_BITMASK_GLOBAL_LOCK_ENABLE | ACPI_BITMASK_TIMER_ENABLE)) != 0); qemu_set_irq(s->dev.irq[0], sci_level); /* schedule a timer interruption if needed */ acpi_pm_tmr_update(&s->ar, (s->ar.pm1.evt.en & ACPI_BITMASK_TIMER_ENABLE) && !(pmsts & ACPI_BITMASK_TIMER_STATUS)); }"} {"target": 0, "idx": 26360, "func": "static void flat_print_key_prefix(WriterContext *wctx) { FlatContext *flat = wctx->priv; const struct section *parent_section = wctx->section[wctx->level-1]; printf(\"%s\", flat->section_header[wctx->level].str); if (parent_section->flags & SECTION_FLAG_IS_ARRAY) { int n = parent_section->id == SECTION_ID_PACKETS_AND_FRAMES ? wctx->nb_section_packet_frame : wctx->nb_item[wctx->level-1]; printf(\"%d%s\", n, flat->sep_str); } }"} {"target": 1, "idx": 26367, "func": "static int sdp_parse_fmtp_config_h264(AVStream *stream, PayloadContext *h264_data, char *attr, char *value) { AVCodecContext *codec = stream->codec; assert(codec->codec_id == CODEC_ID_H264); assert(h264_data != NULL); if (!strcmp(attr, \"packetization-mode\")) { av_log(codec, AV_LOG_DEBUG, \"RTP Packetization Mode: %d\\n\", atoi(value)); h264_data->packetization_mode = atoi(value); /* * Packetization Mode: * 0 or not present: Single NAL mode (Only nals from 1-23 are allowed) * 1: Non-interleaved Mode: 1-23, 24 (STAP-A), 28 (FU-A) are allowed. * 2: Interleaved Mode: 25 (STAP-B), 26 (MTAP16), 27 (MTAP24), 28 (FU-A), * and 29 (FU-B) are allowed. */ if (h264_data->packetization_mode > 1) av_log(codec, AV_LOG_ERROR, \"Interleaved RTP mode is not supported yet.\"); } else if (!strcmp(attr, \"profile-level-id\")) { if (strlen(value) == 6) { char buffer[3]; // 6 characters=3 bytes, in hex. uint8_t profile_idc; uint8_t profile_iop; uint8_t level_idc; buffer[0] = value[0]; buffer[1] = value[1]; buffer[2] = '\\0'; profile_idc = strtol(buffer, NULL, 16); buffer[0] = value[2]; buffer[1] = value[3]; profile_iop = strtol(buffer, NULL, 16); buffer[0] = value[4]; buffer[1] = value[5]; level_idc = strtol(buffer, NULL, 16); av_log(codec, AV_LOG_DEBUG, \"RTP Profile IDC: %x Profile IOP: %x Level: %x\\n\", profile_idc, profile_iop, level_idc); h264_data->profile_idc = profile_idc; h264_data->profile_iop = profile_iop; h264_data->level_idc = level_idc; } } else if (!strcmp(attr, \"sprop-parameter-sets\")) { codec->extradata_size = 0; codec->extradata = NULL; while (*value) { char base64packet[1024]; uint8_t decoded_packet[1024]; int packet_size; char *dst = base64packet; while (*value && *value != ',' && (dst - base64packet) < sizeof(base64packet) - 1) { *dst++ = *value++; } *dst++ = '\\0'; if (*value == ',') value++; packet_size = av_base64_decode(decoded_packet, base64packet, sizeof(decoded_packet)); if (packet_size > 0) { uint8_t *dest = av_malloc(packet_size + sizeof(start_sequence) + codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE); if (!dest) { av_log(codec, AV_LOG_ERROR, \"Unable to allocate memory for extradata!\"); return AVERROR(ENOMEM); } if (codec->extradata_size) { memcpy(dest, codec->extradata, codec->extradata_size); av_free(codec->extradata); } memcpy(dest + codec->extradata_size, start_sequence, sizeof(start_sequence)); memcpy(dest + codec->extradata_size + sizeof(start_sequence), decoded_packet, packet_size); memset(dest + codec->extradata_size + sizeof(start_sequence) + packet_size, 0, FF_INPUT_BUFFER_PADDING_SIZE); codec->extradata = dest; codec->extradata_size += sizeof(start_sequence) + packet_size; } } av_log(codec, AV_LOG_DEBUG, \"Extradata set to %p (size: %d)!\", codec->extradata, codec->extradata_size); } return 0; }"} {"target": 1, "idx": 26388, "func": "static int qcow2_create2(const char *filename, int64_t total_size, const char *backing_file, const char *backing_format, int flags, size_t cluster_size, int prealloc, QEMUOptionParameter *options, int version, Error **errp) { /* Calculate cluster_bits */ int cluster_bits; cluster_bits = ffs(cluster_size) - 1; if (cluster_bits < MIN_CLUSTER_BITS || cluster_bits > MAX_CLUSTER_BITS || (1 << cluster_bits) != cluster_size) { error_setg(errp, \"Cluster size must be a power of two between %d and \" \"%dk\", 1 << MIN_CLUSTER_BITS, 1 << (MAX_CLUSTER_BITS - 10)); return -EINVAL; /* * Open the image file and write a minimal qcow2 header. * * We keep things simple and start with a zero-sized image. We also * do without refcount blocks or a L1 table for now. We'll fix the * inconsistency later. * * We do need a refcount table because growing the refcount table means * allocating two new refcount blocks - the seconds of which would be at * 2 GB for 64k clusters, and we don't want to have a 2 GB initial file * size for any qcow2 image. */ BlockDriverState* bs; QCowHeader header; uint8_t* refcount_table; Error *local_err = NULL; int ret; ret = bdrv_create_file(filename, options, &local_err); if (ret < 0) { return ret; ret = bdrv_file_open(&bs, filename, NULL, BDRV_O_RDWR, &local_err); if (ret < 0) { return ret; /* Write the header */ memset(&header, 0, sizeof(header)); header.magic = cpu_to_be32(QCOW_MAGIC); header.version = cpu_to_be32(version); header.cluster_bits = cpu_to_be32(cluster_bits); header.size = cpu_to_be64(0); header.l1_table_offset = cpu_to_be64(0); header.l1_size = cpu_to_be32(0); header.refcount_table_offset = cpu_to_be64(cluster_size); header.refcount_table_clusters = cpu_to_be32(1); header.refcount_order = cpu_to_be32(3 + REFCOUNT_SHIFT); header.header_length = cpu_to_be32(sizeof(header)); if (flags & BLOCK_FLAG_ENCRYPT) { header.crypt_method = cpu_to_be32(QCOW_CRYPT_AES); } else { header.crypt_method = cpu_to_be32(QCOW_CRYPT_NONE); if (flags & BLOCK_FLAG_LAZY_REFCOUNTS) { header.compatible_features |= cpu_to_be64(QCOW2_COMPAT_LAZY_REFCOUNTS); ret = bdrv_pwrite(bs, 0, &header, sizeof(header)); if (ret < 0) { error_setg_errno(errp, -ret, \"Could not write qcow2 header\"); /* Write an empty refcount table */ refcount_table = g_malloc0(cluster_size); ret = bdrv_pwrite(bs, cluster_size, refcount_table, cluster_size); g_free(refcount_table); if (ret < 0) { error_setg_errno(errp, -ret, \"Could not write refcount table\"); /* * And now open the image and make it consistent first (i.e. increase the * refcount of the cluster that is occupied by the header and the refcount * table) */ BlockDriver* drv = bdrv_find_format(\"qcow2\"); assert(drv != NULL); BDRV_O_RDWR | BDRV_O_CACHE_WB | BDRV_O_NO_FLUSH, drv, &local_err); if (ret < 0) { ret = qcow2_alloc_clusters(bs, 2 * cluster_size); if (ret < 0) { error_setg_errno(errp, -ret, \"Could not allocate clusters for qcow2 \" \"header and refcount table\"); } else if (ret != 0) { error_report(\"Huh, first cluster in empty image is already in use?\"); abort(); /* Okay, now that we have a valid image, let's give it the right size */ ret = bdrv_truncate(bs, total_size * BDRV_SECTOR_SIZE); if (ret < 0) { error_setg_errno(errp, -ret, \"Could not resize image\"); /* Want a backing file? There you go.*/ if (backing_file) { ret = bdrv_change_backing_file(bs, backing_file, backing_format); if (ret < 0) { error_setg_errno(errp, -ret, \"Could not assign backing file '%s' \" \"with format '%s'\", backing_file, backing_format); /* And if we're supposed to preallocate metadata, do that now */ if (prealloc) { BDRVQcowState *s = bs->opaque; qemu_co_mutex_lock(&s->lock); ret = preallocate(bs); qemu_co_mutex_unlock(&s->lock); if (ret < 0) { error_setg_errno(errp, -ret, \"Could not preallocate metadata\"); ret = 0; out: bdrv_unref(bs); return ret;"} {"target": 0, "idx": 26390, "func": "static void idr(H264Context *h){ int i; ff_h264_remove_all_refs(h); h->prev_frame_num= -1; h->prev_frame_num_offset= 0; h->prev_poc_msb= 1<<16; h->prev_poc_lsb= 0; for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++) h->last_pocs[i] = INT_MIN; }"} {"target": 0, "idx": 26396, "func": "static void intel_hda_mmio_writel(void *opaque, target_phys_addr_t addr, uint32_t val) { IntelHDAState *d = opaque; const IntelHDAReg *reg = intel_hda_reg_find(d, addr); intel_hda_reg_write(d, reg, val, 0xffffffff); }"} {"target": 1, "idx": 26428, "func": "static void abort_codec_experimental(AVCodec *c, int encoder) { const char *codec_string = encoder ? \"encoder\" : \"decoder\"; AVCodec *codec; av_log(NULL, AV_LOG_FATAL, \"%s '%s' is experimental and might produce bad \" \"results.\\nAdd '-strict experimental' if you want to use it.\\n\", codec_string, c->name); codec = encoder ? avcodec_find_encoder(c->id) : avcodec_find_decoder(c->id); if (!(codec->capabilities & CODEC_CAP_EXPERIMENTAL)) av_log(NULL, AV_LOG_FATAL, \"Or use the non experimental %s '%s'.\\n\", codec_string, codec->name); exit(1); }"} {"target": 1, "idx": 26435, "func": "static inline void hyscale_fast_c(SwsContext *c, int16_t *dst, int dstWidth, const uint8_t *src, int srcW, int xInc) { int i; unsigned int xpos=0; for (i=0;i>16; register unsigned int xalpha=(xpos&0xFFFF)>>9; dst[i]= (src[xx]<<7) + (src[xx+1] - src[xx])*xalpha; xpos+=xInc; } }"} {"target": 1, "idx": 26437, "func": "static int standard_decode_i_mbs(VC9Context *v) { GetBitContext *gb = &v->s.gb; MpegEncContext *s = &v->s; int current_mb = 0; /* MB/Block Position info */ uint8_t cbpcy[4], previous_cbpcy[4], predicted_cbpcy, *p_cbpcy /* Pointer to skip some math */; /* Reset CBPCY predictors */ memset(v->previous_line_cbpcy, 0, s->mb_stride<<2); /* Select ttmb table depending on pq */ if (v->pq < 5) v->ttmb_vlc = &vc9_ttmb_vlc[0]; else if (v->pq < 13) v->ttmb_vlc = &vc9_ttmb_vlc[1]; else v->ttmb_vlc = &vc9_ttmb_vlc[2]; for (s->mb_y=0; s->mb_ymb_height; s->mb_y++) { /* Init CBPCY for line */ *((uint32_t*)previous_cbpcy) = 0x00000000; p_cbpcy = v->previous_line_cbpcy+4; for (s->mb_x=0; s->mb_xmb_width; s->mb_x++, p_cbpcy += 4) { /* Get CBPCY */ GET_CBPCY(ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS); s->ac_pred = get_bits(gb, 1); /* TODO: Decode blocks from that mb wrt cbpcy */ /* Update for next block */ #if TRACE > 2 av_log(s->avctx, AV_LOG_DEBUG, \"Block %4i: p_cbpcy=%i%i%i%i, previous_cbpcy=%i%i%i%i,\" \" cbpcy=%i%i%i%i\\n\", current_mb, p_cbpcy[0], p_cbpcy[1], p_cbpcy[2], p_cbpcy[3], previous_cbpcy[0], previous_cbpcy[1], previous_cbpcy[2], previous_cbpcy[3], cbpcy[0], cbpcy[1], cbpcy[2], cbpcy[3]); #endif *((uint32_t*)p_cbpcy) = *((uint32_t*)previous_cbpcy); *((uint32_t*)previous_cbpcy) = *((uint32_t*)cbpcy); current_mb++; } } return 0; }"} {"target": 1, "idx": 26443, "func": "static int jazz_led_init(SysBusDevice *dev) { LedState *s = FROM_SYSBUS(LedState, dev); memory_region_init_io(&s->iomem, &led_ops, s, \"led\", 1); sysbus_init_mmio(dev, &s->iomem); s->ds = graphic_console_init(jazz_led_update_display, jazz_led_invalidate_display, jazz_led_screen_dump, jazz_led_text_update, s); return 0; }"} {"target": 0, "idx": 26451, "func": "dma_read(void *opaque, target_phys_addr_t addr, unsigned int size) { struct fs_dma_ctrl *ctrl = opaque; int c; uint32_t r = 0; if (size != 4) { dma_rinvalid(opaque, addr); } /* Make addr relative to this channel and bounded to nr regs. */ c = fs_channel(addr); addr &= 0xff; addr >>= 2; switch (addr) { case RW_STAT: r = ctrl->channels[c].state & 7; r |= ctrl->channels[c].eol << 5; r |= ctrl->channels[c].stream_cmd_src << 8; break; default: r = ctrl->channels[c].regs[addr]; D(printf (\"%s c=%d addr=\" TARGET_FMT_plx \"\\n\", __func__, c, addr)); break; } return r; }"} {"target": 0, "idx": 26458, "func": "static void pxa2xx_descriptor_load(struct pxa2xx_lcdc_s *s) { struct pxa_frame_descriptor_s desc; target_phys_addr_t descptr; int i; for (i = 0; i < PXA_LCDDMA_CHANS; i ++) { s->dma_ch[i].source = 0; if (!s->dma_ch[i].up) continue; if (s->dma_ch[i].branch & FBR_BRA) { descptr = s->dma_ch[i].branch & FBR_SRCADDR; if (s->dma_ch[i].branch & FBR_BINT) pxa2xx_dma_bs_set(s, i); s->dma_ch[i].branch &= ~FBR_BRA; } else descptr = s->dma_ch[i].descriptor; if (!(descptr >= PXA2XX_SDRAM_BASE && descptr + sizeof(desc) <= PXA2XX_SDRAM_BASE + phys_ram_size)) continue; cpu_physical_memory_read(descptr, (void *)&desc, sizeof(desc)); s->dma_ch[i].descriptor = tswap32(desc.fdaddr); s->dma_ch[i].source = tswap32(desc.fsaddr); s->dma_ch[i].id = tswap32(desc.fidr); s->dma_ch[i].command = tswap32(desc.ldcmd); } }"} {"target": 0, "idx": 26468, "func": "int spapr_populate_vdevice(VIOsPAPRBus *bus, void *fdt) { DeviceState *qdev, **qdevs; BusChild *kid; int i, num, ret = 0; /* Count qdevs on the bus list */ num = 0; QTAILQ_FOREACH(kid, &bus->bus.children, sibling) { num++; } /* Copy out into an array of pointers */ qdevs = g_malloc(sizeof(qdev) * num); num = 0; QTAILQ_FOREACH(kid, &bus->bus.children, sibling) { qdevs[num++] = kid->child; } /* Sort the array */ qsort(qdevs, num, sizeof(qdev), compare_reg); /* Hack alert. Give the devices to libfdt in reverse order, we happen * to know that will mean they are in forward order in the tree. */ for (i = num - 1; i >= 0; i--) { VIOsPAPRDevice *dev = (VIOsPAPRDevice *)(qdevs[i]); ret = vio_make_devnode(dev, fdt); if (ret < 0) { goto out; } } ret = 0; out: free(qdevs); return ret; }"} {"target": 1, "idx": 26472, "func": "void ppc_tlb_invalidate_all(CPUPPCState *env) { switch (env->mmu_model) { case POWERPC_MMU_SOFT_6xx: case POWERPC_MMU_SOFT_74xx: ppc6xx_tlb_invalidate_all(env); break; case POWERPC_MMU_SOFT_4xx: case POWERPC_MMU_SOFT_4xx_Z: ppc4xx_tlb_invalidate_all(env); break; case POWERPC_MMU_REAL: cpu_abort(env, \"No TLB for PowerPC 4xx in real mode\\n\"); break; case POWERPC_MMU_MPC8xx: /* XXX: TODO */ cpu_abort(env, \"MPC8xx MMU model is not implemented\\n\"); break; case POWERPC_MMU_BOOKE: tlb_flush(env, 1); break; case POWERPC_MMU_BOOKE206: booke206_flush_tlb(env, -1, 0); break; case POWERPC_MMU_32B: case POWERPC_MMU_601: #if defined(TARGET_PPC64) case POWERPC_MMU_620: case POWERPC_MMU_64B: case POWERPC_MMU_2_06: #endif /* defined(TARGET_PPC64) */ tlb_flush(env, 1); break; default: /* XXX: TODO */ cpu_abort(env, \"Unknown MMU model\\n\"); break; } }"} {"target": 0, "idx": 26480, "func": "uint32_t HELPER(lcebr)(CPUS390XState *env, uint32_t f1, uint32_t f2) { env->fregs[f1].l.upper = float32_chs(env->fregs[f2].l.upper); return set_cc_nz_f32(env->fregs[f1].l.upper); }"} {"target": 0, "idx": 26481, "func": "static int open_url(AVFormatContext *s, AVIOContext **pb, const char *url, AVDictionary *opts, AVDictionary *opts2, int *is_http) { HLSContext *c = s->priv_data; AVDictionary *tmp = NULL; const char *proto_name = NULL; int ret; av_dict_copy(&tmp, opts, 0); av_dict_copy(&tmp, opts2, 0); if (av_strstart(url, \"crypto\", NULL)) { if (url[6] == '+' || url[6] == ':') proto_name = avio_find_protocol_name(url + 7); } if (!proto_name) proto_name = avio_find_protocol_name(url); if (!proto_name) return AVERROR_INVALIDDATA; // only http(s) & file are allowed if (av_strstart(proto_name, \"file\", NULL)) { if (strcmp(c->allowed_extensions, \"ALL\") && !av_match_ext(url, c->allowed_extensions)) { av_log(s, AV_LOG_ERROR, \"Filename extension of \\'%s\\' is not a common multimedia extension, blocked for security reasons.\\n\" \"If you wish to override this adjust allowed_extensions, you can set it to \\'ALL\\' to allow all\\n\", url); return AVERROR_INVALIDDATA; } } else if (av_strstart(proto_name, \"http\", NULL)) { ; } else return AVERROR_INVALIDDATA; if (!strncmp(proto_name, url, strlen(proto_name)) && url[strlen(proto_name)] == ':') ; else if (av_strstart(url, \"crypto\", NULL) && !strncmp(proto_name, url + 7, strlen(proto_name)) && url[7 + strlen(proto_name)] == ':') ; else if (strcmp(proto_name, \"file\") || !strncmp(url, \"file,\", 5)) return AVERROR_INVALIDDATA; if (c->http_persistent && *pb && av_strstart(proto_name, \"http\", NULL)) { ret = open_url_keepalive(c->ctx, pb, url); if (ret == AVERROR_EXIT) { return ret; } else if (ret < 0) { if (ret != AVERROR_EOF) av_log(s, AV_LOG_WARNING, \"keepalive request failed for '%s', retrying with new connection: %s\\n\", url, av_err2str(ret)); ret = s->io_open(s, pb, url, AVIO_FLAG_READ, &tmp); } } else { ret = s->io_open(s, pb, url, AVIO_FLAG_READ, &tmp); } if (ret >= 0) { // update cookies on http response with setcookies. char *new_cookies = NULL; if (!(s->flags & AVFMT_FLAG_CUSTOM_IO)) av_opt_get(*pb, \"cookies\", AV_OPT_SEARCH_CHILDREN, (uint8_t**)&new_cookies); if (new_cookies) { av_free(c->cookies); c->cookies = new_cookies; } av_dict_set(&opts, \"cookies\", c->cookies, 0); } av_dict_free(&tmp); if (is_http) *is_http = av_strstart(proto_name, \"http\", NULL); return ret; }"} {"target": 0, "idx": 26485, "func": "static int qemu_rdma_dest_init(RDMAContext *rdma, Error **errp) { int ret = -EINVAL, idx; struct rdma_cm_id *listen_id; char ip[40] = \"unknown\"; struct rdma_addrinfo *res; char port_str[16]; for (idx = 0; idx < RDMA_WRID_MAX; idx++) { rdma->wr_data[idx].control_len = 0; rdma->wr_data[idx].control_curr = NULL; } if (rdma->host == NULL) { ERROR(errp, \"RDMA host is not set!\"); rdma->error_state = -EINVAL; return -1; } /* create CM channel */ rdma->channel = rdma_create_event_channel(); if (!rdma->channel) { ERROR(errp, \"could not create rdma event channel\"); rdma->error_state = -EINVAL; return -1; } /* create CM id */ ret = rdma_create_id(rdma->channel, &listen_id, NULL, RDMA_PS_TCP); if (ret) { ERROR(errp, \"could not create cm_id!\"); goto err_dest_init_create_listen_id; } snprintf(port_str, 16, \"%d\", rdma->port); port_str[15] = '\\0'; if (rdma->host && strcmp(\"\", rdma->host)) { struct rdma_addrinfo *e; ret = rdma_getaddrinfo(rdma->host, port_str, NULL, &res); if (ret < 0) { ERROR(errp, \"could not rdma_getaddrinfo address %s\", rdma->host); goto err_dest_init_bind_addr; } for (e = res; e != NULL; e = e->ai_next) { inet_ntop(e->ai_family, &((struct sockaddr_in *) e->ai_dst_addr)->sin_addr, ip, sizeof ip); trace_qemu_rdma_dest_init_trying(rdma->host, ip); ret = rdma_bind_addr(listen_id, e->ai_dst_addr); if (!ret) { if (e->ai_family == AF_INET6) { ret = qemu_rdma_broken_ipv6_kernel(errp, listen_id->verbs); if (ret) { continue; } } goto listen; } } ERROR(errp, \"Error: could not rdma_bind_addr!\"); goto err_dest_init_bind_addr; } else { ERROR(errp, \"migration host and port not specified!\"); ret = -EINVAL; goto err_dest_init_bind_addr; } listen: rdma->listen_id = listen_id; qemu_rdma_dump_gid(\"dest_init\", listen_id); return 0; err_dest_init_bind_addr: rdma_destroy_id(listen_id); err_dest_init_create_listen_id: rdma_destroy_event_channel(rdma->channel); rdma->channel = NULL; rdma->error_state = ret; return ret; }"} {"target": 0, "idx": 26489, "func": "int cksum(struct mbuf *m, int len) { register uint16_t *w; register int sum = 0; register int mlen = 0; int byte_swapped = 0; union { uint8_t c[2]; uint16_t s; } s_util; union { uint16_t s[2]; uint32_t l; } l_util; if (m->m_len == 0) goto cont; w = mtod(m, uint16_t *); mlen = m->m_len; if (len < mlen) mlen = len; #ifdef DEBUG len -= mlen; #endif /* * Force to even boundary. */ if ((1 & (long) w) && (mlen > 0)) { REDUCE; sum <<= 8; s_util.c[0] = *(uint8_t *)w; w = (uint16_t *)((int8_t *)w + 1); mlen--; byte_swapped = 1; } /* * Unroll the loop to make overhead from * branches &c small. */ while ((mlen -= 32) >= 0) { sum += w[0]; sum += w[1]; sum += w[2]; sum += w[3]; sum += w[4]; sum += w[5]; sum += w[6]; sum += w[7]; sum += w[8]; sum += w[9]; sum += w[10]; sum += w[11]; sum += w[12]; sum += w[13]; sum += w[14]; sum += w[15]; w += 16; } mlen += 32; while ((mlen -= 8) >= 0) { sum += w[0]; sum += w[1]; sum += w[2]; sum += w[3]; w += 4; } mlen += 8; if (mlen == 0 && byte_swapped == 0) goto cont; REDUCE; while ((mlen -= 2) >= 0) { sum += *w++; } if (byte_swapped) { REDUCE; sum <<= 8; if (mlen == -1) { s_util.c[1] = *(uint8_t *)w; sum += s_util.s; mlen = 0; } else mlen = -1; } else if (mlen == -1) s_util.c[0] = *(uint8_t *)w; cont: #ifdef DEBUG if (len) { DEBUG_ERROR((dfd, \"cksum: out of data\\n\")); DEBUG_ERROR((dfd, \" len = %d\\n\", len)); } #endif if (mlen == -1) { /* The last mbuf has odd # of bytes. Follow the standard (the odd byte may be shifted left by 8 bits or not as determined by endian-ness of the machine) */ s_util.c[1] = 0; sum += s_util.s; } REDUCE; return (~sum & 0xffff); }"} {"target": 0, "idx": 26496, "func": "void address_space_unmap(AddressSpace *as, void *buffer, target_phys_addr_t len, int is_write, target_phys_addr_t access_len) { if (buffer != bounce.buffer) { if (is_write) { ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer); while (access_len) { unsigned l; l = TARGET_PAGE_SIZE; if (l > access_len) l = access_len; invalidate_and_set_dirty(addr1, l); addr1 += l; access_len -= l; } } if (xen_enabled()) { xen_invalidate_map_cache_entry(buffer); } return; } if (is_write) { address_space_write(as, bounce.addr, bounce.buffer, access_len); } qemu_vfree(bounce.buffer); bounce.buffer = NULL; cpu_notify_map_clients(); }"} {"target": 1, "idx": 26512, "func": "static av_cold int atrac3_decode_init(AVCodecContext *avctx) { int i, ret; int version, delay, samples_per_frame, frame_factor; const uint8_t *edata_ptr = avctx->extradata; ATRAC3Context *q = avctx->priv_data; if (avctx->channels <= 0 || avctx->channels > 2) { av_log(avctx, AV_LOG_ERROR, \"Channel configuration error!\\n\"); } /* Take care of the codec-specific extradata. */ if (avctx->extradata_size == 14) { /* Parse the extradata, WAV format */ av_log(avctx, AV_LOG_DEBUG, \"[0-1] %d\\n\", bytestream_get_le16(&edata_ptr)); // Unknown value always 1 edata_ptr += 4; // samples per channel q->coding_mode = bytestream_get_le16(&edata_ptr); av_log(avctx, AV_LOG_DEBUG,\"[8-9] %d\\n\", bytestream_get_le16(&edata_ptr)); //Dupe of coding mode frame_factor = bytestream_get_le16(&edata_ptr); // Unknown always 1 av_log(avctx, AV_LOG_DEBUG,\"[12-13] %d\\n\", bytestream_get_le16(&edata_ptr)); // Unknown always 0 /* setup */ samples_per_frame = SAMPLES_PER_FRAME * avctx->channels; version = 4; delay = 0x88E; q->coding_mode = q->coding_mode ? JOINT_STEREO : STEREO; q->scrambled_stream = 0; if (avctx->block_align != 96 * avctx->channels * frame_factor && avctx->block_align != 152 * avctx->channels * frame_factor && avctx->block_align != 192 * avctx->channels * frame_factor) { av_log(avctx, AV_LOG_ERROR, \"Unknown frame/channel/frame_factor \" \"configuration %d/%d/%d\\n\", avctx->block_align, avctx->channels, frame_factor); return AVERROR_INVALIDDATA; } } else if (avctx->extradata_size == 10) { /* Parse the extradata, RM format. */ version = bytestream_get_be32(&edata_ptr); samples_per_frame = bytestream_get_be16(&edata_ptr); delay = bytestream_get_be16(&edata_ptr); q->coding_mode = bytestream_get_be16(&edata_ptr); q->scrambled_stream = 1; } else { av_log(NULL, AV_LOG_ERROR, \"Unknown extradata size %d.\\n\", avctx->extradata_size); } /* Check the extradata */ if (version != 4) { av_log(avctx, AV_LOG_ERROR, \"Version %d != 4.\\n\", version); return AVERROR_INVALIDDATA; } if (samples_per_frame != SAMPLES_PER_FRAME && samples_per_frame != SAMPLES_PER_FRAME * 2) { av_log(avctx, AV_LOG_ERROR, \"Unknown amount of samples per frame %d.\\n\", samples_per_frame); return AVERROR_INVALIDDATA; } if (delay != 0x88E) { av_log(avctx, AV_LOG_ERROR, \"Unknown amount of delay %x != 0x88E.\\n\", delay); return AVERROR_INVALIDDATA; } if (q->coding_mode == STEREO) av_log(avctx, AV_LOG_DEBUG, \"Normal stereo detected.\\n\"); else if (q->coding_mode == JOINT_STEREO) av_log(avctx, AV_LOG_DEBUG, \"Joint stereo detected.\\n\"); else { av_log(avctx, AV_LOG_ERROR, \"Unknown channel coding mode %x!\\n\", q->coding_mode); return AVERROR_INVALIDDATA; } if (avctx->block_align >= UINT_MAX / 2) q->decoded_bytes_buffer = av_mallocz(FFALIGN(avctx->block_align, 4) + FF_INPUT_BUFFER_PADDING_SIZE); if (q->decoded_bytes_buffer == NULL) return AVERROR(ENOMEM); avctx->sample_fmt = AV_SAMPLE_FMT_FLTP; /* initialize the MDCT transform */ if ((ret = ff_mdct_init(&q->mdct_ctx, 9, 1, 1.0 / 32768)) < 0) { av_log(avctx, AV_LOG_ERROR, \"Error initializing MDCT\\n\"); av_freep(&q->decoded_bytes_buffer); return ret; } /* init the joint-stereo decoding data */ q->weighting_delay[0] = 0; q->weighting_delay[1] = 7; q->weighting_delay[2] = 0; q->weighting_delay[3] = 7; q->weighting_delay[4] = 0; q->weighting_delay[5] = 7; for (i = 0; i < 4; i++) { q->matrix_coeff_index_prev[i] = 3; q->matrix_coeff_index_now[i] = 3; q->matrix_coeff_index_next[i] = 3; } avpriv_float_dsp_init(&q->fdsp, avctx->flags & CODEC_FLAG_BITEXACT); ff_fmt_convert_init(&q->fmt_conv, avctx); q->units = av_mallocz(sizeof(*q->units) * avctx->channels); if (!q->units) { atrac3_decode_close(avctx); return AVERROR(ENOMEM); } avcodec_get_frame_defaults(&q->frame); avctx->coded_frame = &q->frame; return 0; }"} {"target": 1, "idx": 26530, "func": "static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr) { #ifdef DEBUG_UNASSIGNED printf(\"Unassigned mem read \" TARGET_FMT_plx \"\\n\", addr); #endif #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE) do_unassigned_access(addr, 0, 0, 0, 4); #endif return 0; }"} {"target": 1, "idx": 26531, "func": "static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv b) { TCGv_i64 tmp64 = tcg_temp_new_i64(); tcg_gen_extu_i32_i64(tmp64, b); dead_tmp(b); tcg_gen_shli_i64(tmp64, tmp64, 32); tcg_gen_add_i64(a, tmp64, a); tcg_temp_free_i64(tmp64); return a; }"} {"target": 0, "idx": 26535, "func": "static void qvirtio_scsi_pci_free(QVirtIOSCSI *vs) { int i; for (i = 0; i < vs->num_queues + 2; i++) { qvirtqueue_cleanup(vs->dev->bus, vs->vq[i], vs->alloc); } pc_alloc_uninit(vs->alloc); qvirtio_pci_device_disable(container_of(vs->dev, QVirtioPCIDevice, vdev)); g_free(vs->dev); qpci_free_pc(vs->bus); g_free(vs); }"} {"target": 0, "idx": 26538, "func": "void spapr_clear_pending_events(sPAPRMachineState *spapr) { sPAPREventLogEntry *entry = NULL; QTAILQ_FOREACH(entry, &spapr->pending_events, next) { QTAILQ_REMOVE(&spapr->pending_events, entry, next); g_free(entry->extended_log); g_free(entry); } }"} {"target": 0, "idx": 26540, "func": "static void sclp_set_write_mask(void) { WriteEventMask *sccb = (void*)_sccb; sccb->h.length = sizeof(WriteEventMask); sccb->mask_length = sizeof(unsigned int); sccb->receive_mask = SCLP_EVENT_MASK_MSG_ASCII; sccb->cp_receive_mask = SCLP_EVENT_MASK_MSG_ASCII; sccb->send_mask = SCLP_EVENT_MASK_MSG_ASCII; sccb->cp_send_mask = SCLP_EVENT_MASK_MSG_ASCII; sclp_service_call(SCLP_CMD_WRITE_EVENT_MASK, sccb); }"} {"target": 0, "idx": 26543, "func": "void json_prop_int(QJSON *json, const char *name, int64_t val) { json_emit_element(json, name); qstring_append_int(json->str, val); }"} {"target": 0, "idx": 26552, "func": "static void pm_update_sci(PIIX4PMState *s) { int sci_level, pmsts; pmsts = acpi_pm1_evt_get_sts(&s->ar); sci_level = (((pmsts & s->ar.pm1.evt.en) & (ACPI_BITMASK_RT_CLOCK_ENABLE | ACPI_BITMASK_POWER_BUTTON_ENABLE | ACPI_BITMASK_GLOBAL_LOCK_ENABLE | ACPI_BITMASK_TIMER_ENABLE)) != 0) || (((s->ar.gpe.sts[0] & s->ar.gpe.en[0]) & (PIIX4_PCI_HOTPLUG_STATUS | PIIX4_CPU_HOTPLUG_STATUS)) != 0); qemu_set_irq(s->irq, sci_level); /* schedule a timer interruption if needed */ acpi_pm_tmr_update(&s->ar, (s->ar.pm1.evt.en & ACPI_BITMASK_TIMER_ENABLE) && !(pmsts & ACPI_BITMASK_TIMER_STATUS)); }"} {"target": 1, "idx": 26559, "func": "static inline int wv_get_value_integer(WavpackFrameContext *s, uint32_t *crc, int S) { int bit; if(s->extra_bits){ S <<= s->extra_bits; if(s->got_extra_bits){ S |= get_bits(&s->gb_extra_bits, s->extra_bits); *crc = *crc * 9 + (S&0xffff) * 3 + ((unsigned)S>>16); } } bit = (S & s->and) | s->or; return (((S + bit) << s->shift) - bit) << s->post_shift; }"} {"target": 0, "idx": 26568, "func": "static int mxf_read_seek(AVFormatContext *s, int stream_index, int64_t sample_time, int flags) { AVStream *st = s->streams[stream_index]; int64_t seconds; MXFContext* mxf = s->priv_data; int64_t seekpos; int ret; MXFIndexTable *t; if (mxf->nb_index_tables <= 0) { if (!s->bit_rate) return AVERROR_INVALIDDATA; if (sample_time < 0) sample_time = 0; seconds = av_rescale(sample_time, st->time_base.num, st->time_base.den); if ((ret = avio_seek(s->pb, (s->bit_rate * seconds) >> 3, SEEK_SET)) < 0) return ret; ff_update_cur_dts(s, st, sample_time); mxf->current_edit_unit = sample_time; } else { t = &mxf->index_tables[0]; /* clamp above zero, else ff_index_search_timestamp() returns negative * this also means we allow seeking before the start */ sample_time = FFMAX(sample_time, 0); if (t->fake_index) { /* behave as if we have a proper index */ if ((sample_time = ff_index_search_timestamp(t->fake_index, t->nb_ptses, sample_time, flags)) < 0) return sample_time; } else { /* no IndexEntryArray (one or more CBR segments) * make sure we don't seek past the end */ sample_time = FFMIN(sample_time, st->duration - 1); } if ((ret = mxf_edit_unit_absolute_offset(mxf, t, sample_time, &sample_time, &seekpos, 1)) << 0) return ret; ff_update_cur_dts(s, st, sample_time); mxf->current_edit_unit = sample_time; avio_seek(s->pb, seekpos, SEEK_SET); } return 0; }"} {"target": 1, "idx": 26587, "func": "if_start(Slirp *slirp) { uint64_t now = qemu_get_clock_ns(rt_clock); int requeued = 0; struct mbuf *ifm, *ifqt; DEBUG_CALL(\"if_start\"); if (slirp->if_queued == 0) return; /* Nothing to do */ again: /* check if we can really output */ if (!slirp_can_output(slirp->opaque)) return; /* * See which queue to get next packet from * If there's something in the fastq, select it immediately */ if (slirp->if_fastq.ifq_next != &slirp->if_fastq) { ifm = slirp->if_fastq.ifq_next; } else { /* Nothing on fastq, see if next_m is valid */ if (slirp->next_m != &slirp->if_batchq) ifm = slirp->next_m; else ifm = slirp->if_batchq.ifq_next; /* Set which packet to send on next iteration */ slirp->next_m = ifm->ifq_next; } /* Remove it from the queue */ ifqt = ifm->ifq_prev; remque(ifm); slirp->if_queued--; /* If there are more packets for this session, re-queue them */ if (ifm->ifs_next != /* ifm->ifs_prev != */ ifm) { insque(ifm->ifs_next, ifqt); ifs_remque(ifm); } /* Update so_queued */ if (ifm->ifq_so) { if (--ifm->ifq_so->so_queued == 0) /* If there's no more queued, reset nqueued */ ifm->ifq_so->so_nqueued = 0; } if (ifm->expiration_date < now) { /* Expired */ m_free(ifm); } else { /* Encapsulate the packet for sending */ if (if_encap(slirp, ifm)) { m_free(ifm); } else { /* re-queue */ insque(ifm, ifqt); requeued++; } } if (slirp->if_queued) goto again; slirp->if_queued = requeued; }"} {"target": 0, "idx": 26595, "func": "static void dump_human_image_check(ImageCheck *check) { if (!(check->corruptions || check->leaks || check->check_errors)) { printf(\"No errors were found on the image.\\n\"); } else { if (check->corruptions) { printf(\"\\n%\" PRId64 \" errors were found on the image.\\n\" \"Data may be corrupted, or further writes to the image \" \"may corrupt it.\\n\", check->corruptions); } if (check->leaks) { printf(\"\\n%\" PRId64 \" leaked clusters were found on the image.\\n\" \"This means waste of disk space, but no harm to data.\\n\", check->leaks); } if (check->check_errors) { printf(\"\\n%\" PRId64 \" internal errors have occurred during the check.\\n\", check->check_errors); } } if (check->total_clusters != 0 && check->allocated_clusters != 0) { printf(\"%\" PRId64 \"/%\" PRId64 \"= %0.2f%% allocated, %0.2f%% fragmented\\n\", check->allocated_clusters, check->total_clusters, check->allocated_clusters * 100.0 / check->total_clusters, check->fragmented_clusters * 100.0 / check->allocated_clusters); } if (check->image_end_offset) { printf(\"Image end offset: %\" PRId64 \"\\n\", check->image_end_offset); } }"} {"target": 0, "idx": 26599, "func": "float32 int32_to_float32( int32 a STATUS_PARAM ) { flag zSign; if ( a == 0 ) return 0; if ( a == (sbits32) 0x80000000 ) return packFloat32( 1, 0x9E, 0 ); zSign = ( a < 0 ); return normalizeRoundAndPackFloat32( zSign, 0x9C, zSign ? - a : a STATUS_VAR ); }"} {"target": 1, "idx": 26620, "func": "static int vfio_start_eventfd_injection(VFIOINTp *intp) { int ret; ret = vfio_set_trigger_eventfd(intp, vfio_intp_interrupt); if (ret) { error_report(\"vfio: Error: Failed to pass IRQ fd to the driver: %m\"); } return ret; }"} {"target": 1, "idx": 26642, "func": "static int bt_hid_in(struct bt_hid_device_s *s) { USBPacket p; p.pid = USB_TOKEN_IN; p.devep = 1; p.data = s->datain.buffer; p.len = sizeof(s->datain.buffer); s->datain.len = s->usbdev->info->handle_data(s->usbdev, &p); return s->datain.len; }"} {"target": 0, "idx": 26653, "func": "void qmp_inject_nmi(Error **errp) { #if defined(TARGET_I386) CPUState *cs; CPU_FOREACH(cs) { X86CPU *cpu = X86_CPU(cs); if (!cpu->apic_state) { cpu_interrupt(cs, CPU_INTERRUPT_NMI); } else { apic_deliver_nmi(cpu->apic_state); } } #else nmi_monitor_handle(monitor_get_cpu_index(), errp); #endif }"} {"target": 0, "idx": 26663, "func": "eth_setup_ip4_fragmentation(const void *l2hdr, size_t l2hdr_len, void *l3hdr, size_t l3hdr_len, size_t l3payload_len, size_t frag_offset, bool more_frags) { if (eth_get_l3_proto(l2hdr, l2hdr_len) == ETH_P_IP) { uint16_t orig_flags; struct ip_header *iphdr = (struct ip_header *) l3hdr; uint16_t frag_off_units = frag_offset / IP_FRAG_UNIT_SIZE; uint16_t new_ip_off; assert(frag_offset % IP_FRAG_UNIT_SIZE == 0); assert((frag_off_units & ~IP_OFFMASK) == 0); orig_flags = be16_to_cpu(iphdr->ip_off) & ~(IP_OFFMASK|IP_MF); new_ip_off = frag_off_units | orig_flags | (more_frags ? IP_MF : 0); iphdr->ip_off = cpu_to_be16(new_ip_off); iphdr->ip_len = cpu_to_be16(l3payload_len + l3hdr_len); } }"} {"target": 0, "idx": 26671, "func": "float32 HELPER(ucf64_subs)(float32 a, float32 b, CPUUniCore32State *env) { return float32_sub(a, b, &env->ucf64.fp_status); }"} {"target": 0, "idx": 26673, "func": "do_cksum(uint8_t *dp, uint8_t *de) { unsigned int bsum[2] = {0, 0}, i, sum; for (i = 1; dp < de; bsum[i^=1] += *dp++) ; sum = (bsum[0] << 8) + bsum[1]; sum = (sum >> 16) + (sum & 0xffff); return ~(sum + (sum >> 16)); }"} {"target": 0, "idx": 26680, "func": "static void ics_simple_realize(DeviceState *dev, Error **errp) { ICSState *ics = ICS_SIMPLE(dev); if (!ics->nr_irqs) { error_setg(errp, \"Number of interrupts needs to be greater 0\"); return; } ics->irqs = g_malloc0(ics->nr_irqs * sizeof(ICSIRQState)); ics->qirqs = qemu_allocate_irqs(ics_simple_set_irq, ics, ics->nr_irqs); qemu_register_reset(ics_simple_reset, dev); }"} {"target": 0, "idx": 26701, "func": "static void vnc_flush(VncState *vs) { if (vs->output.offset) vnc_client_write(vs); }"} {"target": 0, "idx": 26711, "func": "static void do_subchannel_work(SubchDev *sch, ORB *orb) { SCSW *s = &sch->curr_status.scsw; if (s->ctrl & SCSW_FCTL_CLEAR_FUNC) { sch_handle_clear_func(sch); } else if (s->ctrl & SCSW_FCTL_HALT_FUNC) { sch_handle_halt_func(sch); } else if (s->ctrl & SCSW_FCTL_START_FUNC) { /* Triggered by both ssch and rsch. */ sch_handle_start_func(sch, orb); } else { /* Cannot happen. */ return; } css_inject_io_interrupt(sch); }"} {"target": 1, "idx": 26725, "func": "static int sd_snapshot_delete(BlockDriverState *bs, const char *snapshot_id, const char *name, Error **errp) { unsigned long snap_id = 0; char snap_tag[SD_MAX_VDI_TAG_LEN]; Error *local_err = NULL; int fd, ret; char buf[SD_MAX_VDI_LEN + SD_MAX_VDI_TAG_LEN]; BDRVSheepdogState *s = bs->opaque; unsigned int wlen = SD_MAX_VDI_LEN + SD_MAX_VDI_TAG_LEN, rlen = 0; uint32_t vid; SheepdogVdiReq hdr = { .opcode = SD_OP_DEL_VDI, .data_length = wlen, .flags = SD_FLAG_CMD_WRITE, }; SheepdogVdiRsp *rsp = (SheepdogVdiRsp *)&hdr; if (!remove_objects(s)) { return -1; } memset(buf, 0, sizeof(buf)); memset(snap_tag, 0, sizeof(snap_tag)); pstrcpy(buf, SD_MAX_VDI_LEN, s->name); ret = qemu_strtoul(snapshot_id, NULL, 10, &snap_id); if (ret || snap_id > UINT32_MAX) { error_setg(errp, \"Invalid snapshot ID: %s\", snapshot_id ? snapshot_id : \"\"); return -EINVAL; } if (snap_id) { hdr.snapid = (uint32_t) snap_id; } else { pstrcpy(snap_tag, sizeof(snap_tag), snapshot_id); pstrcpy(buf + SD_MAX_VDI_LEN, SD_MAX_VDI_TAG_LEN, snap_tag); } ret = find_vdi_name(s, s->name, snap_id, snap_tag, &vid, true, &local_err); if (ret) { return ret; } fd = connect_to_sdog(s, &local_err); if (fd < 0) { error_report_err(local_err); return -1; } ret = do_req(fd, s->bs, (SheepdogReq *)&hdr, buf, &wlen, &rlen); closesocket(fd); if (ret) { return ret; } switch (rsp->result) { case SD_RES_NO_VDI: error_report(\"%s was already deleted\", s->name); case SD_RES_SUCCESS: break; default: error_report(\"%s, %s\", sd_strerror(rsp->result), s->name); return -1; } return ret; }"} {"target": 1, "idx": 26726, "func": "static int block_save_iterate(QEMUFile *f, void *opaque) { int ret; int64_t last_ftell = qemu_ftell(f); DPRINTF(\"Enter save live iterate submitted %d transferred %d\\n\", block_mig_state.submitted, block_mig_state.transferred); ret = flush_blks(f); if (ret) { return ret; } blk_mig_reset_dirty_cursor(); /* control the rate of transfer */ blk_mig_lock(); while ((block_mig_state.submitted + block_mig_state.read_done) * BLOCK_SIZE < qemu_file_get_rate_limit(f)) { blk_mig_unlock(); if (block_mig_state.bulk_completed == 0) { /* first finish the bulk phase */ if (blk_mig_save_bulked_block(f) == 0) { /* finished saving bulk on all devices */ block_mig_state.bulk_completed = 1; } ret = 0; } else { /* Always called with iothread lock taken for * simplicity, block_save_complete also calls it. */ qemu_mutex_lock_iothread(); ret = blk_mig_save_dirty_block(f, 1); qemu_mutex_unlock_iothread(); } if (ret < 0) { return ret; } blk_mig_lock(); if (ret != 0) { /* no more dirty blocks */ break; } } blk_mig_unlock(); ret = flush_blks(f); if (ret) { return ret; } qemu_put_be64(f, BLK_MIG_FLAG_EOS); return qemu_ftell(f) - last_ftell; }"} {"target": 0, "idx": 26730, "func": "static av_cold int fft_init(AVCodecContext *avctx, AC3MDCTContext *mdct, int ln) { int i, n, n2; float alpha; n = 1 << ln; n2 = n >> 1; FF_ALLOC_OR_GOTO(avctx, mdct->costab, n2 * sizeof(*mdct->costab), fft_alloc_fail); FF_ALLOC_OR_GOTO(avctx, mdct->sintab, n2 * sizeof(*mdct->sintab), fft_alloc_fail); for (i = 0; i < n2; i++) { alpha = 2.0 * M_PI * i / n; mdct->costab[i] = FIX15(cos(alpha)); mdct->sintab[i] = FIX15(sin(alpha)); } return 0; fft_alloc_fail: mdct_end(mdct); return AVERROR(ENOMEM); }"} {"target": 1, "idx": 26734, "func": "static always_inline void gen_intermediate_code_internal (CPUState *env, TranslationBlock *tb, int search_pc) { #if defined ALPHA_DEBUG_DISAS static int insn_count; #endif DisasContext ctx, *ctxp = &ctx; target_ulong pc_start; uint32_t insn; uint16_t *gen_opc_end; CPUBreakpoint *bp; int j, lj = -1; int ret; int num_insns; int max_insns; pc_start = tb->pc; gen_opc_end = gen_opc_buf + OPC_MAX_SIZE; ctx.pc = pc_start; ctx.amask = env->amask; #if defined (CONFIG_USER_ONLY) ctx.mem_idx = 0; #else ctx.mem_idx = ((env->ps >> 3) & 3); ctx.pal_mode = env->ipr[IPR_EXC_ADDR] & 1; #endif num_insns = 0; max_insns = tb->cflags & CF_COUNT_MASK; if (max_insns == 0) max_insns = CF_COUNT_MASK; gen_icount_start(); for (ret = 0; ret == 0;) { if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) { TAILQ_FOREACH(bp, &env->breakpoints, entry) { if (bp->pc == ctx.pc) { gen_excp(&ctx, EXCP_DEBUG, 0); break; } } } if (search_pc) { j = gen_opc_ptr - gen_opc_buf; if (lj < j) { lj++; while (lj < j) gen_opc_instr_start[lj++] = 0; gen_opc_pc[lj] = ctx.pc; gen_opc_instr_start[lj] = 1; gen_opc_icount[lj] = num_insns; } } if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) gen_io_start(); #if defined ALPHA_DEBUG_DISAS insn_count++; LOG_DISAS(\"pc \" TARGET_FMT_lx \" mem_idx %d\\n\", ctx.pc, ctx.mem_idx); #endif insn = ldl_code(ctx.pc); #if defined ALPHA_DEBUG_DISAS insn_count++; LOG_DISAS(\"opcode %08x %d\\n\", insn, insn_count); #endif num_insns++; ctx.pc += 4; ret = translate_one(ctxp, insn); if (ret != 0) break; /* if we reach a page boundary or are single stepping, stop * generation */ if (((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0) || num_insns >= max_insns) { break; } if (env->singlestep_enabled) { gen_excp(&ctx, EXCP_DEBUG, 0); break; } #if defined (DO_SINGLE_STEP) break; #endif } if (ret != 1 && ret != 3) { tcg_gen_movi_i64(cpu_pc, ctx.pc); } #if defined (DO_TB_FLUSH) gen_helper_tb_flush(); #endif if (tb->cflags & CF_LAST_IO) gen_io_end(); /* Generate the return instruction */ tcg_gen_exit_tb(0); gen_icount_end(tb, num_insns); *gen_opc_ptr = INDEX_op_end; if (search_pc) { j = gen_opc_ptr - gen_opc_buf; lj++; while (lj <= j) gen_opc_instr_start[lj++] = 0; } else { tb->size = ctx.pc - pc_start; tb->icount = num_insns; } #if defined ALPHA_DEBUG_DISAS log_cpu_state_mask(CPU_LOG_TB_CPU, env, 0); if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) { qemu_log(\"IN: %s\\n\", lookup_symbol(pc_start)); log_target_disas(pc_start, ctx.pc - pc_start, 1); qemu_log(\"\\n\"); } #endif }"} {"target": 1, "idx": 26738, "func": "VncJob *vnc_job_new(VncState *vs) { VncJob *job = g_malloc0(sizeof(VncJob)); job->vs = vs; vnc_lock_queue(queue); QLIST_INIT(&job->rectangles); vnc_unlock_queue(queue); return job; }"} {"target": 1, "idx": 26745, "func": "static void nbd_client_closed(NBDClient *client) { nb_fds--; if (nb_fds == 0 && !persistent && state == RUNNING) { state = TERMINATE; } nbd_update_server_watch(); nbd_client_put(client); }"} {"target": 1, "idx": 26749, "func": "static uint16_t nvme_create_sq(NvmeCtrl *n, NvmeCmd *cmd) { NvmeSQueue *sq; NvmeCreateSq *c = (NvmeCreateSq *)cmd; uint16_t cqid = le16_to_cpu(c->cqid); uint16_t sqid = le16_to_cpu(c->sqid); uint16_t qsize = le16_to_cpu(c->qsize); uint16_t qflags = le16_to_cpu(c->sq_flags); uint64_t prp1 = le64_to_cpu(c->prp1); if (!cqid || nvme_check_cqid(n, cqid)) { return NVME_INVALID_CQID | NVME_DNR; } if (!sqid || !nvme_check_sqid(n, sqid)) { return NVME_INVALID_QID | NVME_DNR; } if (!qsize || qsize > NVME_CAP_MQES(n->bar.cap)) { return NVME_MAX_QSIZE_EXCEEDED | NVME_DNR; } if (!prp1 || prp1 & (n->page_size - 1)) { return NVME_INVALID_FIELD | NVME_DNR; } if (!(NVME_SQ_FLAGS_PC(qflags))) { return NVME_INVALID_FIELD | NVME_DNR; } sq = g_malloc0(sizeof(*sq)); nvme_init_sq(sq, n, prp1, sqid, cqid, qsize + 1); return NVME_SUCCESS; }"} {"target": 0, "idx": 26774, "func": "void ff_ac3_bit_alloc_calc_mask(AC3BitAllocParameters *s, int16_t *band_psd, int start, int end, int fast_gain, int is_lfe, int dba_mode, int dba_nsegs, uint8_t *dba_offsets, uint8_t *dba_lengths, uint8_t *dba_values, int16_t *mask) { int16_t excite[50]; /* excitation */ int bin, k; int bndstrt, bndend, begin, end1, tmp; int lowcomp, fastleak, slowleak; /* excitation function */ bndstrt = bin_to_band_tab[start]; bndend = bin_to_band_tab[end-1] + 1; if (bndstrt == 0) { lowcomp = 0; lowcomp = calc_lowcomp1(lowcomp, band_psd[0], band_psd[1], 384); excite[0] = band_psd[0] - fast_gain - lowcomp; lowcomp = calc_lowcomp1(lowcomp, band_psd[1], band_psd[2], 384); excite[1] = band_psd[1] - fast_gain - lowcomp; begin = 7; for (bin = 2; bin < 7; bin++) { if (!(is_lfe && bin == 6)) lowcomp = calc_lowcomp1(lowcomp, band_psd[bin], band_psd[bin+1], 384); fastleak = band_psd[bin] - fast_gain; slowleak = band_psd[bin] - s->slow_gain; excite[bin] = fastleak - lowcomp; if (!(is_lfe && bin == 6)) { if (band_psd[bin] <= band_psd[bin+1]) { begin = bin + 1; break; } } } end1=bndend; if (end1 > 22) end1=22; for (bin = begin; bin < end1; bin++) { if (!(is_lfe && bin == 6)) lowcomp = calc_lowcomp(lowcomp, band_psd[bin], band_psd[bin+1], bin); fastleak = FFMAX(fastleak - s->fast_decay, band_psd[bin] - fast_gain); slowleak = FFMAX(slowleak - s->slow_decay, band_psd[bin] - s->slow_gain); excite[bin] = FFMAX(fastleak - lowcomp, slowleak); } begin = 22; } else { /* coupling channel */ begin = bndstrt; fastleak = (s->cpl_fast_leak << 8) + 768; slowleak = (s->cpl_slow_leak << 8) + 768; } for (bin = begin; bin < bndend; bin++) { fastleak = FFMAX(fastleak - s->fast_decay, band_psd[bin] - fast_gain); slowleak = FFMAX(slowleak - s->slow_decay, band_psd[bin] - s->slow_gain); excite[bin] = FFMAX(fastleak, slowleak); } /* compute masking curve */ for (bin = bndstrt; bin < bndend; bin++) { tmp = s->db_per_bit - band_psd[bin]; if (tmp > 0) { excite[bin] += tmp >> 2; } mask[bin] = FFMAX(ff_ac3_hearing_threshold_tab[bin >> s->sr_shift][s->sr_code], excite[bin]); } /* delta bit allocation */ if (dba_mode == DBA_REUSE || dba_mode == DBA_NEW) { int band, seg, delta; band = 0; for (seg = 0; seg < FFMIN(8, dba_nsegs); seg++) { band = FFMIN(49, band + dba_offsets[seg]); if (dba_values[seg] >= 4) { delta = (dba_values[seg] - 3) << 7; } else { delta = (dba_values[seg] - 4) << 7; } for (k = 0; k < dba_lengths[seg]; k++) { mask[band] += delta; band++; } } } }"} {"target": 0, "idx": 26776, "func": "static inline void vmsvga_update_rect_delayed(struct vmsvga_state_s *s, int x, int y, int w, int h) { struct vmsvga_rect_s *rect = &s->redraw_fifo[s->redraw_fifo_last ++]; s->redraw_fifo_last &= REDRAW_FIFO_LEN - 1; rect->x = x; rect->y = y; rect->w = w; rect->h = h; }"} {"target": 0, "idx": 26777, "func": "static inline void t_gen_addx_carry(DisasContext *dc, TCGv d) { if (dc->flagx_known) { if (dc->flags_x) { TCGv c; c = tcg_temp_new(TCG_TYPE_TL); t_gen_mov_TN_preg(c, PR_CCS); /* C flag is already at bit 0. */ tcg_gen_andi_tl(c, c, C_FLAG); tcg_gen_add_tl(d, d, c); tcg_temp_free(c); } } else { TCGv x, c; x = tcg_temp_new(TCG_TYPE_TL); c = tcg_temp_new(TCG_TYPE_TL); t_gen_mov_TN_preg(x, PR_CCS); tcg_gen_mov_tl(c, x); /* Propagate carry into d if X is set. Branch free. */ tcg_gen_andi_tl(c, c, C_FLAG); tcg_gen_andi_tl(x, x, X_FLAG); tcg_gen_shri_tl(x, x, 4); tcg_gen_and_tl(x, x, c); tcg_gen_add_tl(d, d, x); tcg_temp_free(x); tcg_temp_free(c); } }"} {"target": 0, "idx": 26780, "func": "static void rtas_start_cpu(sPAPREnvironment *spapr, uint32_t token, uint32_t nargs, target_ulong args, uint32_t nret, target_ulong rets) { target_ulong id, start, r3; CPUState *cs; if (nargs != 3 || nret != 1) { rtas_st(rets, 0, -3); return; } id = rtas_ld(args, 0); start = rtas_ld(args, 1); r3 = rtas_ld(args, 2); cs = qemu_get_cpu(id); if (cs != NULL) { PowerPCCPU *cpu = POWERPC_CPU(cs); CPUPPCState *env = &cpu->env; if (!cs->halted) { rtas_st(rets, 0, -1); return; } /* This will make sure qemu state is up to date with kvm, and * mark it dirty so our changes get flushed back before the * new cpu enters */ kvm_cpu_synchronize_state(cs); env->msr = (1ULL << MSR_SF) | (1ULL << MSR_ME); env->nip = start; env->gpr[3] = r3; cs->halted = 0; qemu_cpu_kick(cs); rtas_st(rets, 0, 0); return; } /* Didn't find a matching cpu */ rtas_st(rets, 0, -3); }"} {"target": 0, "idx": 26792, "func": "int i2c_start_transfer(i2c_bus *bus, int address, int recv) { DeviceState *qdev; i2c_slave *slave = NULL; LIST_FOREACH(qdev, &bus->qbus.children, sibling) { slave = I2C_SLAVE_FROM_QDEV(qdev); if (slave->address == address) break; } if (!slave) return 1; /* If the bus is already busy, assume this is a repeated start condition. */ bus->current_dev = slave; slave->info->event(slave, recv ? I2C_START_RECV : I2C_START_SEND); return 0; }"} {"target": 0, "idx": 26819, "func": "static int aasc_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; AascContext *s = avctx->priv_data; int compr, i, stride; s->frame.reference = 3; s->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE; if (avctx->reget_buffer(avctx, &s->frame)) { av_log(avctx, AV_LOG_ERROR, \"reget_buffer() failed\\n\"); return -1; } compr = AV_RL32(buf); buf += 4; buf_size -= 4; switch (avctx->codec_tag) { case MKTAG('A', 'A', 'S', '4'): bytestream2_init(&s->gb, buf - 4, buf_size + 4); ff_msrle_decode(avctx, (AVPicture*)&s->frame, 8, &s->gb); break; case MKTAG('A', 'A', 'S', 'C'): switch(compr){ case 0: stride = (avctx->width * 3 + 3) & ~3; for(i = avctx->height - 1; i >= 0; i--){ if(avctx->width*3 > buf_size){ av_log(avctx, AV_LOG_ERROR, \"Next line is beyond buffer bounds\\n\"); break; } memcpy(s->frame.data[0] + i*s->frame.linesize[0], buf, avctx->width*3); buf += stride; buf_size -= stride; } break; case 1: bytestream2_init(&s->gb, buf, buf_size); ff_msrle_decode(avctx, (AVPicture*)&s->frame, 8, &s->gb); break; default: av_log(avctx, AV_LOG_ERROR, \"Unknown compression type %d\\n\", compr); return -1; } break; default: av_log(avctx, AV_LOG_ERROR, \"Unknown FourCC: %X\\n\", avctx->codec_tag); return -1; } *data_size = sizeof(AVFrame); *(AVFrame*)data = s->frame; /* report that the buffer was completely consumed */ return buf_size; }"} {"target": 1, "idx": 26836, "func": "int ff_h264_field_end(H264Context *h, int in_setup) { AVCodecContext *const avctx = h->avctx; int err = 0; h->mb_y = 0; if (CONFIG_H264_VDPAU_DECODER && h->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) ff_vdpau_h264_set_reference_frames(h); if (in_setup || !(avctx->active_thread_type & FF_THREAD_FRAME)) { if (!h->droppable) { err = ff_h264_execute_ref_pic_marking(h, h->mmco, h->mmco_index); h->prev_poc_msb = h->poc_msb; h->prev_poc_lsb = h->poc_lsb; } h->prev_frame_num_offset = h->frame_num_offset; h->prev_frame_num = h->frame_num; h->outputed_poc = h->next_outputed_poc; } if (avctx->hwaccel) { if (avctx->hwaccel->end_frame(avctx) < 0) av_log(avctx, AV_LOG_ERROR, \"hardware accelerator failed to decode picture\\n\"); } if (CONFIG_H264_VDPAU_DECODER && h->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) ff_vdpau_h264_picture_complete(h); #if CONFIG_ERROR_RESILIENCE /* * FIXME: Error handling code does not seem to support interlaced * when slices span multiple rows * The ff_er_add_slice calls don't work right for bottom * fields; they cause massive erroneous error concealing * Error marking covers both fields (top and bottom). * This causes a mismatched s->error_count * and a bad error table. Further, the error count goes to * INT_MAX when called for bottom field, because mb_y is * past end by one (callers fault) and resync_mb_y != 0 * causes problems for the first MB line, too. */ if (!FIELD_PICTURE(h) && h->current_slice && !h->sps.new) { ff_h264_set_erpic(&h->er.cur_pic, h->cur_pic_ptr); ff_er_frame_end(&h->er); } #endif /* CONFIG_ERROR_RESILIENCE */ if (!in_setup && !h->droppable) ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, h->picture_structure == PICT_BOTTOM_FIELD); emms_c(); h->current_slice = 0; return err; }"} {"target": 1, "idx": 26841, "func": "static void sdhci_write_dataport(SDHCIState *s, uint32_t value, unsigned size) { unsigned i; /* Check that there is free space left in a buffer */ if (!(s->prnsts & SDHC_SPACE_AVAILABLE)) { ERRPRINT(\"Can't write to data buffer: buffer full\\n\"); return; } for (i = 0; i < size; i++) { s->fifo_buffer[s->data_count] = value & 0xFF; s->data_count++; value >>= 8; if (s->data_count >= (s->blksize & 0x0fff)) { DPRINT_L2(\"write buffer filled with %u bytes of data\\n\", s->data_count); s->data_count = 0; s->prnsts &= ~SDHC_SPACE_AVAILABLE; if (s->prnsts & SDHC_DOING_WRITE) { sdhci_write_block_to_card(s); } } } }"} {"target": 1, "idx": 26848, "func": "static CharDriverState *qemu_chr_open_pipe(ChardevHostdev *opts) { const char *filename = opts->device; CharDriverState *chr; WinCharState *s; chr = qemu_chr_alloc(); s = g_malloc0(sizeof(WinCharState)); chr->opaque = s; chr->chr_write = win_chr_write; chr->chr_close = win_chr_close; if (win_chr_pipe_init(chr, filename) < 0) { g_free(s); g_free(chr); return NULL; } return chr; }"} {"target": 1, "idx": 26862, "func": "static int sbr_hf_calc_npatches(AACContext *ac, SpectralBandReplication *sbr) { int i, k, sb = 0; int msb = sbr->k[0]; int usb = sbr->kx[1]; int goal_sb = ((1000 << 11) + (sbr->sample_rate >> 1)) / sbr->sample_rate; sbr->num_patches = 0; if (goal_sb < sbr->kx[1] + sbr->m[1]) { for (k = 0; sbr->f_master[k] < goal_sb; k++) ; } else k = sbr->n_master; do { int odd = 0; for (i = k; i == k || sb > (sbr->k[0] - 1 + msb - odd); i--) { sb = sbr->f_master[i]; odd = (sb + sbr->k[0]) & 1; } sbr->patch_num_subbands[sbr->num_patches] = FFMAX(sb - usb, 0); sbr->patch_start_subband[sbr->num_patches] = sbr->k[0] - odd - sbr->patch_num_subbands[sbr->num_patches]; if (sbr->patch_num_subbands[sbr->num_patches] > 0) { usb = sb; msb = sb; sbr->num_patches++; } else msb = sbr->kx[1]; if (sbr->f_master[k] - sb < 3) k = sbr->n_master; } while (sb != sbr->kx[1] + sbr->m[1]); if (sbr->patch_num_subbands[sbr->num_patches-1] < 3 && sbr->num_patches > 1) sbr->num_patches--; // Requirements (14496-3 sp04 p205) sets the maximum number of patches to 5 // However the Coding Technologies decoder check uses 6 patches if (sbr->num_patches > 6) { av_log(ac->avccontext, AV_LOG_ERROR, \"Too many patches: %d\\n\", sbr->num_patches); return -1; } return 0; }"} {"target": 0, "idx": 26873, "func": "static void *thread_func(void *p) { struct thread_info *info = p; rcu_register_thread(); atomic_inc(&n_ready_threads); while (!atomic_mb_read(&test_start)) { cpu_relax(); } rcu_read_lock(); while (!atomic_read(&test_stop)) { info->r = xorshift64star(info->r); info->func(info); } rcu_read_unlock(); rcu_unregister_thread(); return NULL; }"} {"target": 0, "idx": 26880, "func": "static int http_start_receive_data(HTTPContext *c) { int fd; if (c->stream->feed_opened) return -1; /* Don't permit writing to this one */ if (c->stream->readonly) return -1; /* open feed */ fd = open(c->stream->feed_filename, O_RDWR); if (fd < 0) { http_log(\"Error opening feeder file: %s\\n\", strerror(errno)); return -1; } c->feed_fd = fd; if (c->stream->truncate) { /* truncate feed file */ ffm_write_write_index(c->feed_fd, FFM_PACKET_SIZE); ftruncate(c->feed_fd, FFM_PACKET_SIZE); http_log(\"Truncating feed file '%s'\\n\", c->stream->feed_filename); } else { if ((c->stream->feed_write_index = ffm_read_write_index(fd)) < 0) { http_log(\"Error reading write index from feed file: %s\\n\", strerror(errno)); return -1; } } c->stream->feed_write_index = FFMAX(ffm_read_write_index(fd), FFM_PACKET_SIZE); c->stream->feed_size = lseek(fd, 0, SEEK_END); lseek(fd, 0, SEEK_SET); /* init buffer input */ c->buffer_ptr = c->buffer; c->buffer_end = c->buffer + FFM_PACKET_SIZE; c->stream->feed_opened = 1; c->chunked_encoding = !!av_stristr(c->buffer, \"Transfer-Encoding: chunked\"); return 0; }"} {"target": 0, "idx": 26884, "func": "static int usb_host_scan_dev(void *opaque, USBScanFunc *func) { FILE *f = NULL; char line[1024]; char buf[1024]; int bus_num, addr, speed, device_count, class_id, product_id, vendor_id; char product_name[512]; int ret = 0; if (!usb_host_device_path) { perror(\"husb: USB Host Device Path not set\"); goto the_end; } snprintf(line, sizeof(line), \"%s/devices\", usb_host_device_path); f = fopen(line, \"r\"); if (!f) { perror(\"husb: cannot open devices file\"); goto the_end; } device_count = 0; bus_num = addr = class_id = product_id = vendor_id = 0; speed = -1; /* Can't get the speed from /[proc|dev]/bus/usb/devices */ for(;;) { if (fgets(line, sizeof(line), f) == NULL) { break; } if (strlen(line) > 0) { line[strlen(line) - 1] = '\\0'; } if (line[0] == 'T' && line[1] == ':') { if (device_count && (vendor_id || product_id)) { /* New device. Add the previously discovered device. */ ret = func(opaque, bus_num, addr, 0, class_id, vendor_id, product_id, product_name, speed); if (ret) { goto the_end; } } if (get_tag_value(buf, sizeof(buf), line, \"Bus=\", \" \") < 0) { goto fail; } bus_num = atoi(buf); if (get_tag_value(buf, sizeof(buf), line, \"Dev#=\", \" \") < 0) { goto fail; } addr = atoi(buf); if (get_tag_value(buf, sizeof(buf), line, \"Spd=\", \" \") < 0) { goto fail; } if (!strcmp(buf, \"5000\")) { speed = USB_SPEED_SUPER; } else if (!strcmp(buf, \"480\")) { speed = USB_SPEED_HIGH; } else if (!strcmp(buf, \"1.5\")) { speed = USB_SPEED_LOW; } else { speed = USB_SPEED_FULL; } product_name[0] = '\\0'; class_id = 0xff; device_count++; product_id = 0; vendor_id = 0; } else if (line[0] == 'P' && line[1] == ':') { if (get_tag_value(buf, sizeof(buf), line, \"Vendor=\", \" \") < 0) { goto fail; } vendor_id = strtoul(buf, NULL, 16); if (get_tag_value(buf, sizeof(buf), line, \"ProdID=\", \" \") < 0) { goto fail; } product_id = strtoul(buf, NULL, 16); } else if (line[0] == 'S' && line[1] == ':') { if (get_tag_value(buf, sizeof(buf), line, \"Product=\", \"\") < 0) { goto fail; } pstrcpy(product_name, sizeof(product_name), buf); } else if (line[0] == 'D' && line[1] == ':') { if (get_tag_value(buf, sizeof(buf), line, \"Cls=\", \" (\") < 0) { goto fail; } class_id = strtoul(buf, NULL, 16); } fail: ; } if (device_count && (vendor_id || product_id)) { /* Add the last device. */ ret = func(opaque, bus_num, addr, 0, class_id, vendor_id, product_id, product_name, speed); } the_end: if (f) { fclose(f); } return ret; }"} {"target": 0, "idx": 26886, "func": "static void ppc_cpu_class_init(ObjectClass *oc, void *data) { PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); CPUClass *cc = CPU_CLASS(oc); DeviceClass *dc = DEVICE_CLASS(oc); pcc->parent_realize = dc->realize; pcc->pvr = CPU_POWERPC_DEFAULT_MASK; pcc->pvr_mask = CPU_POWERPC_DEFAULT_MASK; pcc->interrupts_big_endian = ppc_cpu_interrupts_big_endian_always; dc->realize = ppc_cpu_realizefn; dc->unrealize = ppc_cpu_unrealizefn; pcc->parent_reset = cc->reset; cc->reset = ppc_cpu_reset; cc->class_by_name = ppc_cpu_class_by_name; cc->has_work = ppc_cpu_has_work; cc->do_interrupt = ppc_cpu_do_interrupt; cc->dump_state = ppc_cpu_dump_state; cc->dump_statistics = ppc_cpu_dump_statistics; cc->set_pc = ppc_cpu_set_pc; cc->gdb_read_register = ppc_cpu_gdb_read_register; cc->gdb_write_register = ppc_cpu_gdb_write_register; #ifdef CONFIG_USER_ONLY cc->handle_mmu_fault = ppc_cpu_handle_mmu_fault; #else cc->get_phys_page_debug = ppc_cpu_get_phys_page_debug; cc->vmsd = &vmstate_ppc_cpu; #if defined(TARGET_PPC64) cc->write_elf64_note = ppc64_cpu_write_elf64_note; cc->write_elf64_qemunote = ppc64_cpu_write_elf64_qemunote; #endif #endif cc->gdb_num_core_regs = 71; #ifdef USE_APPLE_GDB cc->gdb_read_register = ppc_cpu_gdb_read_register_apple; cc->gdb_write_register = ppc_cpu_gdb_write_register_apple; cc->gdb_num_core_regs = 71 + 32; #endif #if defined(TARGET_PPC64) cc->gdb_core_xml_file = \"power64-core.xml\"; #else cc->gdb_core_xml_file = \"power-core.xml\"; #endif #ifndef CONFIG_USER_ONLY cc->virtio_is_big_endian = ppc_cpu_is_big_endian; #endif dc->fw_name = \"PowerPC,UNKNOWN\"; }"} {"target": 0, "idx": 26904, "func": "qemu_deliver_packet(VLANClientState *sender, const uint8_t *buf, int size) { VLANClientState *vc; for (vc = sender->vlan->first_client; vc != NULL; vc = vc->next) { if (vc != sender && !vc->link_down) { vc->receive(vc->opaque, buf, size); } } }"} {"target": 1, "idx": 26935, "func": "void ppc970_irq_init (CPUState *env) { env->irq_inputs = (void **)qemu_allocate_irqs(&ppc970_set_irq, env, 7); }"} {"target": 0, "idx": 26941, "func": "static void do_interrupt_protected(CPUX86State *env, int intno, int is_int, int error_code, unsigned int next_eip, int is_hw) { SegmentCache *dt; target_ulong ptr, ssp; int type, dpl, selector, ss_dpl, cpl; int has_error_code, new_stack, shift; uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0; uint32_t old_eip, sp_mask; int vm86 = env->eflags & VM_MASK; has_error_code = 0; if (!is_int && !is_hw) { has_error_code = exception_has_error_code(intno); } if (is_int) { old_eip = next_eip; } else { old_eip = env->eip; } dt = &env->idt; if (intno * 8 + 7 > dt->limit) { raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); } ptr = dt->base + intno * 8; e1 = cpu_ldl_kernel(env, ptr); e2 = cpu_ldl_kernel(env, ptr + 4); /* check gate type */ type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; switch (type) { case 5: /* task gate */ /* must do that check here to return the correct error code */ if (!(e2 & DESC_P_MASK)) { raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2); } switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip); if (has_error_code) { int type; uint32_t mask; /* push the error code */ type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf; shift = type >> 3; if (env->segs[R_SS].flags & DESC_B_MASK) { mask = 0xffffffff; } else { mask = 0xffff; } esp = (env->regs[R_ESP] - (2 << shift)) & mask; ssp = env->segs[R_SS].base + esp; if (shift) { cpu_stl_kernel(env, ssp, error_code); } else { cpu_stw_kernel(env, ssp, error_code); } SET_ESP(esp, mask); } return; case 6: /* 286 interrupt gate */ case 7: /* 286 trap gate */ case 14: /* 386 interrupt gate */ case 15: /* 386 trap gate */ break; default: raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); break; } dpl = (e2 >> DESC_DPL_SHIFT) & 3; cpl = env->hflags & HF_CPL_MASK; /* check privilege if software int */ if (is_int && dpl < cpl) { raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); } /* check valid bit */ if (!(e2 & DESC_P_MASK)) { raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2); } selector = e1 >> 16; offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff); if ((selector & 0xfffc) == 0) { raise_exception_err(env, EXCP0D_GPF, 0); } if (load_segment(env, &e1, &e2, selector) != 0) { raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); } if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) { raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); } dpl = (e2 >> DESC_DPL_SHIFT) & 3; if (dpl > cpl) { raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); } if (!(e2 & DESC_P_MASK)) { raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc); } if (!(e2 & DESC_C_MASK) && dpl < cpl) { /* to inner privilege */ get_ss_esp_from_tss(env, &ss, &esp, dpl, 0); if ((ss & 0xfffc) == 0) { raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); } if ((ss & 3) != dpl) { raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); } if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) { raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); } ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; if (ss_dpl != dpl) { raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); } if (!(ss_e2 & DESC_S_MASK) || (ss_e2 & DESC_CS_MASK) || !(ss_e2 & DESC_W_MASK)) { raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); } if (!(ss_e2 & DESC_P_MASK)) { raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); } new_stack = 1; sp_mask = get_sp_mask(ss_e2); ssp = get_seg_base(ss_e1, ss_e2); } else if ((e2 & DESC_C_MASK) || dpl == cpl) { /* to same privilege */ if (vm86) { raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); } new_stack = 0; sp_mask = get_sp_mask(env->segs[R_SS].flags); ssp = env->segs[R_SS].base; esp = env->regs[R_ESP]; dpl = cpl; } else { raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); new_stack = 0; /* avoid warning */ sp_mask = 0; /* avoid warning */ ssp = 0; /* avoid warning */ esp = 0; /* avoid warning */ } shift = type >> 3; #if 0 /* XXX: check that enough room is available */ push_size = 6 + (new_stack << 2) + (has_error_code << 1); if (vm86) { push_size += 8; } push_size <<= shift; #endif if (shift == 1) { if (new_stack) { if (vm86) { PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector); PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector); PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector); PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector); } PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector); PUSHL(ssp, esp, sp_mask, env->regs[R_ESP]); } PUSHL(ssp, esp, sp_mask, cpu_compute_eflags(env)); PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector); PUSHL(ssp, esp, sp_mask, old_eip); if (has_error_code) { PUSHL(ssp, esp, sp_mask, error_code); } } else { if (new_stack) { if (vm86) { PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector); PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector); PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector); PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector); } PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector); PUSHW(ssp, esp, sp_mask, env->regs[R_ESP]); } PUSHW(ssp, esp, sp_mask, cpu_compute_eflags(env)); PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector); PUSHW(ssp, esp, sp_mask, old_eip); if (has_error_code) { PUSHW(ssp, esp, sp_mask, error_code); } } /* interrupt gate clear IF mask */ if ((type & 1) == 0) { env->eflags &= ~IF_MASK; } env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK); if (new_stack) { if (vm86) { cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0); cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0); cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0); cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0); } ss = (ss & ~3) | dpl; cpu_x86_load_seg_cache(env, R_SS, ss, ssp, get_seg_limit(ss_e1, ss_e2), ss_e2); } SET_ESP(esp, sp_mask); selector = (selector & ~3) | dpl; cpu_x86_load_seg_cache(env, R_CS, selector, get_seg_base(e1, e2), get_seg_limit(e1, e2), e2); env->eip = offset; }"} {"target": 0, "idx": 26945, "func": "static void kvm_start_vcpu(CPUState *env) { env->thread = qemu_mallocz(sizeof(QemuThread)); env->halt_cond = qemu_mallocz(sizeof(QemuCond)); qemu_cond_init(env->halt_cond); qemu_thread_create(env->thread, kvm_cpu_thread_fn, env); while (env->created == 0) qemu_cond_timedwait(&qemu_cpu_cond, &qemu_global_mutex, 100); }"} {"target": 0, "idx": 26946, "func": "static hwaddr ppc_hash64_pteg_search(PowerPCCPU *cpu, hwaddr hash, bool secondary, target_ulong ptem, ppc_hash_pte64_t *pte) { CPUPPCState *env = &cpu->env; int i; uint64_t token; target_ulong pte0, pte1; target_ulong pte_index; pte_index = (hash & env->htab_mask) * HPTES_PER_GROUP; token = ppc_hash64_start_access(cpu, pte_index); if (!token) { return -1; } for (i = 0; i < HPTES_PER_GROUP; i++) { pte0 = ppc_hash64_load_hpte0(cpu, token, i); pte1 = ppc_hash64_load_hpte1(cpu, token, i); if ((pte0 & HPTE64_V_VALID) && (secondary == !!(pte0 & HPTE64_V_SECONDARY)) && HPTE64_V_COMPARE(pte0, ptem)) { pte->pte0 = pte0; pte->pte1 = pte1; ppc_hash64_stop_access(token); return (pte_index + i) * HASH_PTE_SIZE_64; } } ppc_hash64_stop_access(token); /* * We didn't find a valid entry. */ return -1; }"} {"target": 0, "idx": 26947, "func": "static av_cold int dcadec_init(AVCodecContext *avctx) { DCAContext *s = avctx->priv_data; s->avctx = avctx; s->core.avctx = avctx; s->exss.avctx = avctx; s->xll.avctx = avctx; s->lbr.avctx = avctx; ff_dca_init_vlcs(); if (ff_dca_core_init(&s->core) < 0) return AVERROR(ENOMEM); if (ff_dca_lbr_init(&s->lbr) < 0) return AVERROR(ENOMEM); ff_dcadsp_init(&s->dcadsp); s->core.dcadsp = &s->dcadsp; s->xll.dcadsp = &s->dcadsp; s->lbr.dcadsp = &s->dcadsp; s->crctab = av_crc_get_table(AV_CRC_16_CCITT); switch (avctx->request_channel_layout & ~AV_CH_LAYOUT_NATIVE) { case 0: s->request_channel_layout = 0; break; case AV_CH_LAYOUT_STEREO: case AV_CH_LAYOUT_STEREO_DOWNMIX: s->request_channel_layout = DCA_SPEAKER_LAYOUT_STEREO; break; case AV_CH_LAYOUT_5POINT0: s->request_channel_layout = DCA_SPEAKER_LAYOUT_5POINT0; break; case AV_CH_LAYOUT_5POINT1: s->request_channel_layout = DCA_SPEAKER_LAYOUT_5POINT1; break; default: av_log(avctx, AV_LOG_WARNING, \"Invalid request_channel_layout\\n\"); break; } avctx->sample_fmt = AV_SAMPLE_FMT_S32P; avctx->bits_per_raw_sample = 24; return 0; }"} {"target": 0, "idx": 26971, "func": "static int vorbis_parse_setup_hdr_mappings(vorbis_context *vc) { GetBitContext *gb=&vc->gb; uint_fast8_t i, j; vc->mapping_count=get_bits(gb, 6)+1; vc->mappings=(vorbis_mapping *)av_mallocz(vc->mapping_count * sizeof(vorbis_mapping)); AV_DEBUG(\" There are %d mappings. \\n\", vc->mapping_count); for(i=0;imapping_count;++i) { vorbis_mapping *mapping_setup=&vc->mappings[i]; if (get_bits(gb, 16)) { av_log(vc->avccontext, AV_LOG_ERROR, \"Other mappings than type 0 are not compliant with the Vorbis I specification. \\n\"); return 1; } if (get_bits1(gb)) { mapping_setup->submaps=get_bits(gb, 4)+1; } else { mapping_setup->submaps=1; } if (get_bits1(gb)) { mapping_setup->coupling_steps=get_bits(gb, 8)+1; mapping_setup->magnitude=(uint_fast8_t *)av_mallocz(mapping_setup->coupling_steps * sizeof(uint_fast8_t)); mapping_setup->angle=(uint_fast8_t *)av_mallocz(mapping_setup->coupling_steps * sizeof(uint_fast8_t)); for(j=0;jcoupling_steps;++j) { mapping_setup->magnitude[j]=get_bits(gb, ilog(vc->audio_channels-1)); mapping_setup->angle[j]=get_bits(gb, ilog(vc->audio_channels-1)); // FIXME: sanity checks } } else { mapping_setup->coupling_steps=0; } AV_DEBUG(\" %d mapping coupling steps: %d \\n\", i, mapping_setup->coupling_steps); if(get_bits(gb, 2)) { av_log(vc->avccontext, AV_LOG_ERROR, \"%d. mapping setup data invalid. \\n\", i); return 1; // following spec. } if (mapping_setup->submaps>1) { mapping_setup->mux=(uint_fast8_t *)av_mallocz(vc->audio_channels * sizeof(uint_fast8_t)); for(j=0;jaudio_channels;++j) { mapping_setup->mux[j]=get_bits(gb, 4); } } for(j=0;jsubmaps;++j) { skip_bits(gb, 8); // FIXME check? mapping_setup->submap_floor[j]=get_bits(gb, 8); mapping_setup->submap_residue[j]=get_bits(gb, 8); AV_DEBUG(\" %d mapping %d submap : floor %d, residue %d \\n\", i, j, mapping_setup->submap_floor[j], mapping_setup->submap_residue[j]); } } return 0; }"} {"target": 1, "idx": 26987, "func": "void stq_tce(VIOsPAPRDevice *dev, uint64_t taddr, uint64_t val) { val = tswap64(val); spapr_tce_dma_write(dev, taddr, &val, sizeof(val)); }"} {"target": 0, "idx": 26988, "func": "static void vp6_parse_coeff_models(VP56Context *s) { VP56RangeCoder *c = &s->c; VP56Model *model = s->modelp; int def_prob[11]; int node, cg, ctx, pos; int ct; /* code type */ int pt; /* plane type (0 for Y, 1 for U or V) */ memset(def_prob, 0x80, sizeof(def_prob)); for (pt=0; pt<2; pt++) for (node=0; node<11; node++) if (vp56_rac_get_prob(c, vp6_dccv_pct[pt][node])) { def_prob[node] = vp56_rac_gets_nn(c, 7); model->coeff_dccv[pt][node] = def_prob[node]; } else if (s->framep[VP56_FRAME_CURRENT]->key_frame) { model->coeff_dccv[pt][node] = def_prob[node]; } if (vp56_rac_get(c)) { for (pos=1; pos<64; pos++) if (vp56_rac_get_prob(c, vp6_coeff_reorder_pct[pos])) model->coeff_reorder[pos] = vp56_rac_gets(c, 4); vp6_coeff_order_table_init(s); } for (cg=0; cg<2; cg++) for (node=0; node<14; node++) if (vp56_rac_get_prob(c, vp6_runv_pct[cg][node])) model->coeff_runv[cg][node] = vp56_rac_gets_nn(c, 7); for (ct=0; ct<3; ct++) for (pt=0; pt<2; pt++) for (cg=0; cg<6; cg++) for (node=0; node<11; node++) if (vp56_rac_get_prob(c, vp6_ract_pct[ct][pt][cg][node])) { def_prob[node] = vp56_rac_gets_nn(c, 7); model->coeff_ract[pt][ct][cg][node] = def_prob[node]; } else if (s->framep[VP56_FRAME_CURRENT]->key_frame) { model->coeff_ract[pt][ct][cg][node] = def_prob[node]; } if (s->use_huffman) { for (pt=0; pt<2; pt++) { vp6_build_huff_tree(s, model->coeff_dccv[pt], vp6_huff_coeff_map, 12, &s->dccv_vlc[pt]); vp6_build_huff_tree(s, model->coeff_runv[pt], vp6_huff_run_map, 9, &s->runv_vlc[pt]); for (ct=0; ct<3; ct++) for (cg = 0; cg < 6; cg++) vp6_build_huff_tree(s, model->coeff_ract[pt][ct][cg], vp6_huff_coeff_map, 12, &s->ract_vlc[pt][ct][cg]); } memset(s->nb_null, 0, sizeof(s->nb_null)); } else { /* coeff_dcct is a linear combination of coeff_dccv */ for (pt=0; pt<2; pt++) for (ctx=0; ctx<3; ctx++) for (node=0; node<5; node++) model->coeff_dcct[pt][ctx][node] = av_clip(((model->coeff_dccv[pt][node] * vp6_dccv_lc[ctx][node][0] + 128) >> 8) + vp6_dccv_lc[ctx][node][1], 1, 255); } }"} {"target": 1, "idx": 26993, "func": "test_opts_dict_unvisited(void) { QemuOpts *opts; Visitor *v; UserDefOptions *userdef; opts = qemu_opts_parse(qemu_find_opts(\"userdef\"), \"i64x=0,bogus=1\", false, &error_abort); v = opts_visitor_new(opts); /* BUG: bogus should be diagnosed */ visit_type_UserDefOptions(v, NULL, &userdef, &error_abort); visit_free(v); qemu_opts_del(opts); qapi_free_UserDefOptions(userdef); }"} {"target": 0, "idx": 27000, "func": "static int64_t coroutine_fn parallels_co_get_block_status(BlockDriverState *bs, int64_t sector_num, int nb_sectors, int *pnum) { BDRVParallelsState *s = bs->opaque; int64_t offset; qemu_co_mutex_lock(&s->lock); offset = block_status(s, sector_num, nb_sectors, pnum); qemu_co_mutex_unlock(&s->lock); if (offset < 0) { return 0; } return (offset << BDRV_SECTOR_BITS) | BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID; }"} {"target": 0, "idx": 27007, "func": "void helper_4xx_tlbwe_hi (target_ulong entry, target_ulong val) { ppcemb_tlb_t *tlb; target_ulong page, end; LOG_SWTLB(\"%s entry %d val \" TARGET_FMT_lx \"\\n\", __func__, (int)entry, val); entry &= 0x3F; tlb = &env->tlb[entry].tlbe; /* Invalidate previous TLB (if it's valid) */ if (tlb->prot & PAGE_VALID) { end = tlb->EPN + tlb->size; LOG_SWTLB(\"%s: invalidate old TLB %d start \" TARGET_FMT_lx \" end \" TARGET_FMT_lx \"\\n\", __func__, (int)entry, tlb->EPN, end); for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) tlb_flush_page(env, page); } tlb->size = booke_tlb_to_page_size((val >> 7) & 0x7); /* We cannot handle TLB size < TARGET_PAGE_SIZE. * If this ever occurs, one should use the ppcemb target instead * of the ppc or ppc64 one */ if ((val & 0x40) && tlb->size < TARGET_PAGE_SIZE) { cpu_abort(env, \"TLB size \" TARGET_FMT_lu \" < %u \" \"are not supported (%d)\\n\", tlb->size, TARGET_PAGE_SIZE, (int)((val >> 7) & 0x7)); } tlb->EPN = val & ~(tlb->size - 1); if (val & 0x40) tlb->prot |= PAGE_VALID; else tlb->prot &= ~PAGE_VALID; if (val & 0x20) { /* XXX: TO BE FIXED */ cpu_abort(env, \"Little-endian TLB entries are not supported by now\\n\"); } tlb->PID = env->spr[SPR_40x_PID]; /* PID */ LOG_SWTLB(\"%s: set up TLB %d RPN \" TARGET_FMT_plx \" EPN \" TARGET_FMT_lx \" size \" TARGET_FMT_lx \" prot %c%c%c%c PID %d\\n\", __func__, (int)entry, tlb->RPN, tlb->EPN, tlb->size, tlb->prot & PAGE_READ ? 'r' : '-', tlb->prot & PAGE_WRITE ? 'w' : '-', tlb->prot & PAGE_EXEC ? 'x' : '-', tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID); /* Invalidate new TLB (if valid) */ if (tlb->prot & PAGE_VALID) { end = tlb->EPN + tlb->size; LOG_SWTLB(\"%s: invalidate TLB %d start \" TARGET_FMT_lx \" end \" TARGET_FMT_lx \"\\n\", __func__, (int)entry, tlb->EPN, end); for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) tlb_flush_page(env, page); } }"} {"target": 1, "idx": 27021, "func": "static void kvm_update_msi_routes_all(void *private, bool global, uint32_t index, uint32_t mask) { int cnt = 0; MSIRouteEntry *entry; MSIMessage msg; /* TODO: explicit route update */ QLIST_FOREACH(entry, &msi_route_list, list) { cnt++; msg = pci_get_msi_message(entry->dev, entry->vector); kvm_irqchip_update_msi_route(kvm_state, entry->virq, msg, entry->dev); } kvm_irqchip_commit_routes(kvm_state); trace_kvm_x86_update_msi_routes(cnt); }"} {"target": 0, "idx": 27045, "func": "static void spr_write_601_ubatu (void *opaque, int sprn) { DisasContext *ctx = opaque; gen_op_store_601_batu((sprn - SPR_IBAT0U) / 2); RET_STOP(ctx); }"} {"target": 0, "idx": 27046, "func": "static void ide_sector_write(IDEState *s) { int64_t sector_num; int ret, n, n1; s->status = READY_STAT | SEEK_STAT; sector_num = ide_get_sector(s); #if defined(DEBUG_IDE) printf(\"write sector=%Ld\\n\", sector_num); #endif n = s->nsector; if (n > s->req_nb_sectors) n = s->req_nb_sectors; ret = bdrv_write(s->bs, sector_num, s->io_buffer, n); s->nsector -= n; if (s->nsector == 0) { /* no more sector to write */ ide_transfer_stop(s); } else { n1 = s->nsector; if (n1 > s->req_nb_sectors) n1 = s->req_nb_sectors; ide_transfer_start(s, s->io_buffer, 512 * n1, ide_sector_write); } ide_set_sector(s, sector_num + n); #ifdef TARGET_I386 if (win2k_install_hack) { /* It seems there is a bug in the Windows 2000 installer HDD IDE driver which fills the disk with empty logs when the IDE write IRQ comes too early. This hack tries to correct that at the expense of slower write performances. Use this option _only_ to install Windows 2000. You must disable it for normal use. */ qemu_mod_timer(s->sector_write_timer, qemu_get_clock(vm_clock) + (ticks_per_sec / 1000)); } else #endif { ide_set_irq(s); } }"} {"target": 0, "idx": 27048, "func": "AioContext *iohandler_get_aio_context(void) { iohandler_init(); return iohandler_ctx; }"} {"target": 0, "idx": 27056, "func": "abi_long do_syscall(void *cpu_env, int num, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5, abi_long arg6) { abi_long ret; struct stat st; struct statfs stfs; void *p; #ifdef DEBUG gemu_log(\"syscall %d\", num); #endif if(do_strace) print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6); switch(num) { case TARGET_NR_exit: #ifdef CONFIG_USE_NPTL /* In old applications this may be used to implement _exit(2). However in threaded applictions it is used for thread termination, and _exit_group is used for application termination. Do thread termination if we have more then one thread. */ /* FIXME: This probably breaks if a signal arrives. We should probably be disabling signals. */ if (first_cpu->next_cpu) { TaskState *ts; CPUState **lastp; CPUState *p; cpu_list_lock(); lastp = &first_cpu; p = first_cpu; while (p && p != (CPUState *)cpu_env) { lastp = &p->next_cpu; p = p->next_cpu; } /* If we didn't find the CPU for this thread then something is horribly wrong. */ if (!p) abort(); /* Remove the CPU from the list. */ *lastp = p->next_cpu; cpu_list_unlock(); ts = ((CPUState *)cpu_env)->opaque; if (ts->child_tidptr) { put_user_u32(0, ts->child_tidptr); sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX, NULL, NULL, 0); } /* TODO: Free CPU state. */ pthread_exit(NULL); } #endif #ifdef TARGET_GPROF _mcleanup(); #endif gdb_exit(cpu_env, arg1); _exit(arg1); ret = 0; /* avoid warning */ break; case TARGET_NR_read: if (arg3 == 0) ret = 0; else { if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0))) goto efault; ret = get_errno(read(arg1, p, arg3)); unlock_user(p, arg2, ret); } break; case TARGET_NR_write: if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1))) goto efault; ret = get_errno(write(arg1, p, arg3)); unlock_user(p, arg2, 0); break; case TARGET_NR_open: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(open(path(p), target_to_host_bitmask(arg2, fcntl_flags_tbl), arg3)); unlock_user(p, arg1, 0); break; #if defined(TARGET_NR_openat) && defined(__NR_openat) case TARGET_NR_openat: if (!(p = lock_user_string(arg2))) goto efault; ret = get_errno(sys_openat(arg1, path(p), target_to_host_bitmask(arg3, fcntl_flags_tbl), arg4)); unlock_user(p, arg2, 0); break; #endif case TARGET_NR_close: ret = get_errno(close(arg1)); break; case TARGET_NR_brk: ret = do_brk(arg1); break; case TARGET_NR_fork: ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0)); break; #ifdef TARGET_NR_waitpid case TARGET_NR_waitpid: { int status; ret = get_errno(waitpid(arg1, &status, arg3)); if (!is_error(ret) && arg2 && put_user_s32(host_to_target_waitstatus(status), arg2)) goto efault; } break; #endif #ifdef TARGET_NR_waitid case TARGET_NR_waitid: { siginfo_t info; info.si_pid = 0; ret = get_errno(waitid(arg1, arg2, &info, arg4)); if (!is_error(ret) && arg3 && info.si_pid != 0) { if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0))) goto efault; host_to_target_siginfo(p, &info); unlock_user(p, arg3, sizeof(target_siginfo_t)); } } break; #endif #ifdef TARGET_NR_creat /* not on alpha */ case TARGET_NR_creat: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(creat(p, arg2)); unlock_user(p, arg1, 0); break; #endif case TARGET_NR_link: { void * p2; p = lock_user_string(arg1); p2 = lock_user_string(arg2); if (!p || !p2) ret = -TARGET_EFAULT; else ret = get_errno(link(p, p2)); unlock_user(p2, arg2, 0); unlock_user(p, arg1, 0); } break; #if defined(TARGET_NR_linkat) && defined(__NR_linkat) case TARGET_NR_linkat: { void * p2 = NULL; if (!arg2 || !arg4) goto efault; p = lock_user_string(arg2); p2 = lock_user_string(arg4); if (!p || !p2) ret = -TARGET_EFAULT; else ret = get_errno(sys_linkat(arg1, p, arg3, p2, arg5)); unlock_user(p, arg2, 0); unlock_user(p2, arg4, 0); } break; #endif case TARGET_NR_unlink: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(unlink(p)); unlock_user(p, arg1, 0); break; #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat) case TARGET_NR_unlinkat: if (!(p = lock_user_string(arg2))) goto efault; ret = get_errno(sys_unlinkat(arg1, p, arg3)); unlock_user(p, arg2, 0); break; #endif case TARGET_NR_execve: { char **argp, **envp; int argc, envc; abi_ulong gp; abi_ulong guest_argp; abi_ulong guest_envp; abi_ulong addr; char **q; argc = 0; guest_argp = arg2; for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) { if (get_user_ual(addr, gp)) goto efault; if (!addr) break; argc++; } envc = 0; guest_envp = arg3; for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) { if (get_user_ual(addr, gp)) goto efault; if (!addr) break; envc++; } argp = alloca((argc + 1) * sizeof(void *)); envp = alloca((envc + 1) * sizeof(void *)); for (gp = guest_argp, q = argp; gp; gp += sizeof(abi_ulong), q++) { if (get_user_ual(addr, gp)) goto execve_efault; if (!addr) break; if (!(*q = lock_user_string(addr))) goto execve_efault; } *q = NULL; for (gp = guest_envp, q = envp; gp; gp += sizeof(abi_ulong), q++) { if (get_user_ual(addr, gp)) goto execve_efault; if (!addr) break; if (!(*q = lock_user_string(addr))) goto execve_efault; } *q = NULL; if (!(p = lock_user_string(arg1))) goto execve_efault; ret = get_errno(execve(p, argp, envp)); unlock_user(p, arg1, 0); goto execve_end; execve_efault: ret = -TARGET_EFAULT; execve_end: for (gp = guest_argp, q = argp; *q; gp += sizeof(abi_ulong), q++) { if (get_user_ual(addr, gp) || !addr) break; unlock_user(*q, addr, 0); } for (gp = guest_envp, q = envp; *q; gp += sizeof(abi_ulong), q++) { if (get_user_ual(addr, gp) || !addr) break; unlock_user(*q, addr, 0); } } break; case TARGET_NR_chdir: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(chdir(p)); unlock_user(p, arg1, 0); break; #ifdef TARGET_NR_time case TARGET_NR_time: { time_t host_time; ret = get_errno(time(&host_time)); if (!is_error(ret) && arg1 && put_user_sal(host_time, arg1)) goto efault; } break; #endif case TARGET_NR_mknod: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(mknod(p, arg2, arg3)); unlock_user(p, arg1, 0); break; #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat) case TARGET_NR_mknodat: if (!(p = lock_user_string(arg2))) goto efault; ret = get_errno(sys_mknodat(arg1, p, arg3, arg4)); unlock_user(p, arg2, 0); break; #endif case TARGET_NR_chmod: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(chmod(p, arg2)); unlock_user(p, arg1, 0); break; #ifdef TARGET_NR_break case TARGET_NR_break: goto unimplemented; #endif #ifdef TARGET_NR_oldstat case TARGET_NR_oldstat: goto unimplemented; #endif case TARGET_NR_lseek: ret = get_errno(lseek(arg1, arg2, arg3)); break; #ifdef TARGET_NR_getxpid case TARGET_NR_getxpid: #else case TARGET_NR_getpid: #endif ret = get_errno(getpid()); break; case TARGET_NR_mount: { /* need to look at the data field */ void *p2, *p3; p = lock_user_string(arg1); p2 = lock_user_string(arg2); p3 = lock_user_string(arg3); if (!p || !p2 || !p3) ret = -TARGET_EFAULT; else { /* FIXME - arg5 should be locked, but it isn't clear how to * do that since it's not guaranteed to be a NULL-terminated * string. */ if ( ! arg5 ) ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, NULL)); else ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5))); } unlock_user(p, arg1, 0); unlock_user(p2, arg2, 0); unlock_user(p3, arg3, 0); break; } #ifdef TARGET_NR_umount case TARGET_NR_umount: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(umount(p)); unlock_user(p, arg1, 0); break; #endif #ifdef TARGET_NR_stime /* not on alpha */ case TARGET_NR_stime: { time_t host_time; if (get_user_sal(host_time, arg1)) goto efault; ret = get_errno(stime(&host_time)); } break; #endif case TARGET_NR_ptrace: goto unimplemented; #ifdef TARGET_NR_alarm /* not on alpha */ case TARGET_NR_alarm: ret = alarm(arg1); break; #endif #ifdef TARGET_NR_oldfstat case TARGET_NR_oldfstat: goto unimplemented; #endif #ifdef TARGET_NR_pause /* not on alpha */ case TARGET_NR_pause: ret = get_errno(pause()); break; #endif #ifdef TARGET_NR_utime case TARGET_NR_utime: { struct utimbuf tbuf, *host_tbuf; struct target_utimbuf *target_tbuf; if (arg2) { if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1)) goto efault; tbuf.actime = tswapl(target_tbuf->actime); tbuf.modtime = tswapl(target_tbuf->modtime); unlock_user_struct(target_tbuf, arg2, 0); host_tbuf = &tbuf; } else { host_tbuf = NULL; } if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(utime(p, host_tbuf)); unlock_user(p, arg1, 0); } break; #endif case TARGET_NR_utimes: { struct timeval *tvp, tv[2]; if (arg2) { if (copy_from_user_timeval(&tv[0], arg2) || copy_from_user_timeval(&tv[1], arg2 + sizeof(struct target_timeval))) goto efault; tvp = tv; } else { tvp = NULL; } if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(utimes(p, tvp)); unlock_user(p, arg1, 0); } break; #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat) case TARGET_NR_futimesat: { struct timeval *tvp, tv[2]; if (arg3) { if (copy_from_user_timeval(&tv[0], arg3) || copy_from_user_timeval(&tv[1], arg3 + sizeof(struct target_timeval))) goto efault; tvp = tv; } else { tvp = NULL; } if (!(p = lock_user_string(arg2))) goto efault; ret = get_errno(sys_futimesat(arg1, path(p), tvp)); unlock_user(p, arg2, 0); } break; #endif #ifdef TARGET_NR_stty case TARGET_NR_stty: goto unimplemented; #endif #ifdef TARGET_NR_gtty case TARGET_NR_gtty: goto unimplemented; #endif case TARGET_NR_access: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(access(path(p), arg2)); unlock_user(p, arg1, 0); break; #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat) case TARGET_NR_faccessat: if (!(p = lock_user_string(arg2))) goto efault; ret = get_errno(sys_faccessat(arg1, p, arg3)); unlock_user(p, arg2, 0); break; #endif #ifdef TARGET_NR_nice /* not on alpha */ case TARGET_NR_nice: ret = get_errno(nice(arg1)); break; #endif #ifdef TARGET_NR_ftime case TARGET_NR_ftime: goto unimplemented; #endif case TARGET_NR_sync: sync(); ret = 0; break; case TARGET_NR_kill: ret = get_errno(kill(arg1, target_to_host_signal(arg2))); break; case TARGET_NR_rename: { void *p2; p = lock_user_string(arg1); p2 = lock_user_string(arg2); if (!p || !p2) ret = -TARGET_EFAULT; else ret = get_errno(rename(p, p2)); unlock_user(p2, arg2, 0); unlock_user(p, arg1, 0); } break; #if defined(TARGET_NR_renameat) && defined(__NR_renameat) case TARGET_NR_renameat: { void *p2; p = lock_user_string(arg2); p2 = lock_user_string(arg4); if (!p || !p2) ret = -TARGET_EFAULT; else ret = get_errno(sys_renameat(arg1, p, arg3, p2)); unlock_user(p2, arg4, 0); unlock_user(p, arg2, 0); } break; #endif case TARGET_NR_mkdir: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(mkdir(p, arg2)); unlock_user(p, arg1, 0); break; #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat) case TARGET_NR_mkdirat: if (!(p = lock_user_string(arg2))) goto efault; ret = get_errno(sys_mkdirat(arg1, p, arg3)); unlock_user(p, arg2, 0); break; #endif case TARGET_NR_rmdir: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(rmdir(p)); unlock_user(p, arg1, 0); break; case TARGET_NR_dup: ret = get_errno(dup(arg1)); break; case TARGET_NR_pipe: ret = do_pipe(cpu_env, arg1, 0); break; #ifdef TARGET_NR_pipe2 case TARGET_NR_pipe2: ret = do_pipe(cpu_env, arg1, arg2); break; #endif case TARGET_NR_times: { struct target_tms *tmsp; struct tms tms; ret = get_errno(times(&tms)); if (arg1) { tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0); if (!tmsp) goto efault; tmsp->tms_utime = tswapl(host_to_target_clock_t(tms.tms_utime)); tmsp->tms_stime = tswapl(host_to_target_clock_t(tms.tms_stime)); tmsp->tms_cutime = tswapl(host_to_target_clock_t(tms.tms_cutime)); tmsp->tms_cstime = tswapl(host_to_target_clock_t(tms.tms_cstime)); } if (!is_error(ret)) ret = host_to_target_clock_t(ret); } break; #ifdef TARGET_NR_prof case TARGET_NR_prof: goto unimplemented; #endif #ifdef TARGET_NR_signal case TARGET_NR_signal: goto unimplemented; #endif case TARGET_NR_acct: if (arg1 == 0) { ret = get_errno(acct(NULL)); } else { if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(acct(path(p))); unlock_user(p, arg1, 0); } break; #ifdef TARGET_NR_umount2 /* not on alpha */ case TARGET_NR_umount2: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(umount2(p, arg2)); unlock_user(p, arg1, 0); break; #endif #ifdef TARGET_NR_lock case TARGET_NR_lock: goto unimplemented; #endif case TARGET_NR_ioctl: ret = do_ioctl(arg1, arg2, arg3); break; case TARGET_NR_fcntl: ret = do_fcntl(arg1, arg2, arg3); break; #ifdef TARGET_NR_mpx case TARGET_NR_mpx: goto unimplemented; #endif case TARGET_NR_setpgid: ret = get_errno(setpgid(arg1, arg2)); break; #ifdef TARGET_NR_ulimit case TARGET_NR_ulimit: goto unimplemented; #endif #ifdef TARGET_NR_oldolduname case TARGET_NR_oldolduname: goto unimplemented; #endif case TARGET_NR_umask: ret = get_errno(umask(arg1)); break; case TARGET_NR_chroot: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(chroot(p)); unlock_user(p, arg1, 0); break; case TARGET_NR_ustat: goto unimplemented; case TARGET_NR_dup2: ret = get_errno(dup2(arg1, arg2)); break; #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3) case TARGET_NR_dup3: ret = get_errno(dup3(arg1, arg2, arg3)); break; #endif #ifdef TARGET_NR_getppid /* not on alpha */ case TARGET_NR_getppid: ret = get_errno(getppid()); break; #endif case TARGET_NR_getpgrp: ret = get_errno(getpgrp()); break; case TARGET_NR_setsid: ret = get_errno(setsid()); break; #ifdef TARGET_NR_sigaction case TARGET_NR_sigaction: { #if !defined(TARGET_MIPS) struct target_old_sigaction *old_act; struct target_sigaction act, oact, *pact; if (arg2) { if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) goto efault; act._sa_handler = old_act->_sa_handler; target_siginitset(&act.sa_mask, old_act->sa_mask); act.sa_flags = old_act->sa_flags; act.sa_restorer = old_act->sa_restorer; unlock_user_struct(old_act, arg2, 0); pact = &act; } else { pact = NULL; } ret = get_errno(do_sigaction(arg1, pact, &oact)); if (!is_error(ret) && arg3) { if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) goto efault; old_act->_sa_handler = oact._sa_handler; old_act->sa_mask = oact.sa_mask.sig[0]; old_act->sa_flags = oact.sa_flags; old_act->sa_restorer = oact.sa_restorer; unlock_user_struct(old_act, arg3, 1); } #else struct target_sigaction act, oact, *pact, *old_act; if (arg2) { if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) goto efault; act._sa_handler = old_act->_sa_handler; target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]); act.sa_flags = old_act->sa_flags; unlock_user_struct(old_act, arg2, 0); pact = &act; } else { pact = NULL; } ret = get_errno(do_sigaction(arg1, pact, &oact)); if (!is_error(ret) && arg3) { if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) goto efault; old_act->_sa_handler = oact._sa_handler; old_act->sa_flags = oact.sa_flags; old_act->sa_mask.sig[0] = oact.sa_mask.sig[0]; old_act->sa_mask.sig[1] = 0; old_act->sa_mask.sig[2] = 0; old_act->sa_mask.sig[3] = 0; unlock_user_struct(old_act, arg3, 1); } #endif } break; #endif case TARGET_NR_rt_sigaction: { struct target_sigaction *act; struct target_sigaction *oact; if (arg2) { if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) goto efault; } else act = NULL; if (arg3) { if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) { ret = -TARGET_EFAULT; goto rt_sigaction_fail; } } else oact = NULL; ret = get_errno(do_sigaction(arg1, act, oact)); rt_sigaction_fail: if (act) unlock_user_struct(act, arg2, 0); if (oact) unlock_user_struct(oact, arg3, 1); } break; #ifdef TARGET_NR_sgetmask /* not on alpha */ case TARGET_NR_sgetmask: { sigset_t cur_set; abi_ulong target_set; sigprocmask(0, NULL, &cur_set); host_to_target_old_sigset(&target_set, &cur_set); ret = target_set; } break; #endif #ifdef TARGET_NR_ssetmask /* not on alpha */ case TARGET_NR_ssetmask: { sigset_t set, oset, cur_set; abi_ulong target_set = arg1; sigprocmask(0, NULL, &cur_set); target_to_host_old_sigset(&set, &target_set); sigorset(&set, &set, &cur_set); sigprocmask(SIG_SETMASK, &set, &oset); host_to_target_old_sigset(&target_set, &oset); ret = target_set; } break; #endif #ifdef TARGET_NR_sigprocmask case TARGET_NR_sigprocmask: { int how = arg1; sigset_t set, oldset, *set_ptr; if (arg2) { switch(how) { case TARGET_SIG_BLOCK: how = SIG_BLOCK; break; case TARGET_SIG_UNBLOCK: how = SIG_UNBLOCK; break; case TARGET_SIG_SETMASK: how = SIG_SETMASK; break; default: ret = -TARGET_EINVAL; goto fail; } if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1))) goto efault; target_to_host_old_sigset(&set, p); unlock_user(p, arg2, 0); set_ptr = &set; } else { how = 0; set_ptr = NULL; } ret = get_errno(sigprocmask(arg1, set_ptr, &oldset)); if (!is_error(ret) && arg3) { if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0))) goto efault; host_to_target_old_sigset(p, &oldset); unlock_user(p, arg3, sizeof(target_sigset_t)); } } break; #endif case TARGET_NR_rt_sigprocmask: { int how = arg1; sigset_t set, oldset, *set_ptr; if (arg2) { switch(how) { case TARGET_SIG_BLOCK: how = SIG_BLOCK; break; case TARGET_SIG_UNBLOCK: how = SIG_UNBLOCK; break; case TARGET_SIG_SETMASK: how = SIG_SETMASK; break; default: ret = -TARGET_EINVAL; goto fail; } if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1))) goto efault; target_to_host_sigset(&set, p); unlock_user(p, arg2, 0); set_ptr = &set; } else { how = 0; set_ptr = NULL; } ret = get_errno(sigprocmask(how, set_ptr, &oldset)); if (!is_error(ret) && arg3) { if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0))) goto efault; host_to_target_sigset(p, &oldset); unlock_user(p, arg3, sizeof(target_sigset_t)); } } break; #ifdef TARGET_NR_sigpending case TARGET_NR_sigpending: { sigset_t set; ret = get_errno(sigpending(&set)); if (!is_error(ret)) { if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0))) goto efault; host_to_target_old_sigset(p, &set); unlock_user(p, arg1, sizeof(target_sigset_t)); } } break; #endif case TARGET_NR_rt_sigpending: { sigset_t set; ret = get_errno(sigpending(&set)); if (!is_error(ret)) { if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0))) goto efault; host_to_target_sigset(p, &set); unlock_user(p, arg1, sizeof(target_sigset_t)); } } break; #ifdef TARGET_NR_sigsuspend case TARGET_NR_sigsuspend: { sigset_t set; if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) goto efault; target_to_host_old_sigset(&set, p); unlock_user(p, arg1, 0); ret = get_errno(sigsuspend(&set)); } break; #endif case TARGET_NR_rt_sigsuspend: { sigset_t set; if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) goto efault; target_to_host_sigset(&set, p); unlock_user(p, arg1, 0); ret = get_errno(sigsuspend(&set)); } break; case TARGET_NR_rt_sigtimedwait: { sigset_t set; struct timespec uts, *puts; siginfo_t uinfo; if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) goto efault; target_to_host_sigset(&set, p); unlock_user(p, arg1, 0); if (arg3) { puts = &uts; target_to_host_timespec(puts, arg3); } else { puts = NULL; } ret = get_errno(sigtimedwait(&set, &uinfo, puts)); if (!is_error(ret) && arg2) { if (!(p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 0))) goto efault; host_to_target_siginfo(p, &uinfo); unlock_user(p, arg2, sizeof(target_siginfo_t)); } } break; case TARGET_NR_rt_sigqueueinfo: { siginfo_t uinfo; if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1))) goto efault; target_to_host_siginfo(&uinfo, p); unlock_user(p, arg1, 0); ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo)); } break; #ifdef TARGET_NR_sigreturn case TARGET_NR_sigreturn: /* NOTE: ret is eax, so not transcoding must be done */ ret = do_sigreturn(cpu_env); break; #endif case TARGET_NR_rt_sigreturn: /* NOTE: ret is eax, so not transcoding must be done */ ret = do_rt_sigreturn(cpu_env); break; case TARGET_NR_sethostname: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(sethostname(p, arg2)); unlock_user(p, arg1, 0); break; case TARGET_NR_setrlimit: { /* XXX: convert resource ? */ int resource = arg1; struct target_rlimit *target_rlim; struct rlimit rlim; if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1)) goto efault; rlim.rlim_cur = tswapl(target_rlim->rlim_cur); rlim.rlim_max = tswapl(target_rlim->rlim_max); unlock_user_struct(target_rlim, arg2, 0); ret = get_errno(setrlimit(resource, &rlim)); } break; case TARGET_NR_getrlimit: { /* XXX: convert resource ? */ int resource = arg1; struct target_rlimit *target_rlim; struct rlimit rlim; ret = get_errno(getrlimit(resource, &rlim)); if (!is_error(ret)) { if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0)) goto efault; target_rlim->rlim_cur = tswapl(rlim.rlim_cur); target_rlim->rlim_max = tswapl(rlim.rlim_max); unlock_user_struct(target_rlim, arg2, 1); } } break; case TARGET_NR_getrusage: { struct rusage rusage; ret = get_errno(getrusage(arg1, &rusage)); if (!is_error(ret)) { host_to_target_rusage(arg2, &rusage); } } break; case TARGET_NR_gettimeofday: { struct timeval tv; ret = get_errno(gettimeofday(&tv, NULL)); if (!is_error(ret)) { if (copy_to_user_timeval(arg1, &tv)) goto efault; } } break; case TARGET_NR_settimeofday: { struct timeval tv; if (copy_from_user_timeval(&tv, arg1)) goto efault; ret = get_errno(settimeofday(&tv, NULL)); } break; #ifdef TARGET_NR_select case TARGET_NR_select: { struct target_sel_arg_struct *sel; abi_ulong inp, outp, exp, tvp; long nsel; if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) goto efault; nsel = tswapl(sel->n); inp = tswapl(sel->inp); outp = tswapl(sel->outp); exp = tswapl(sel->exp); tvp = tswapl(sel->tvp); unlock_user_struct(sel, arg1, 0); ret = do_select(nsel, inp, outp, exp, tvp); } break; #endif case TARGET_NR_symlink: { void *p2; p = lock_user_string(arg1); p2 = lock_user_string(arg2); if (!p || !p2) ret = -TARGET_EFAULT; else ret = get_errno(symlink(p, p2)); unlock_user(p2, arg2, 0); unlock_user(p, arg1, 0); } break; #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat) case TARGET_NR_symlinkat: { void *p2; p = lock_user_string(arg1); p2 = lock_user_string(arg3); if (!p || !p2) ret = -TARGET_EFAULT; else ret = get_errno(sys_symlinkat(p, arg2, p2)); unlock_user(p2, arg3, 0); unlock_user(p, arg1, 0); } break; #endif #ifdef TARGET_NR_oldlstat case TARGET_NR_oldlstat: goto unimplemented; #endif case TARGET_NR_readlink: { void *p2, *temp; p = lock_user_string(arg1); p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0); if (!p || !p2) ret = -TARGET_EFAULT; else { if (strncmp((const char *)p, \"/proc/self/exe\", 14) == 0) { char real[PATH_MAX]; temp = realpath(exec_path,real); ret = (temp==NULL) ? get_errno(-1) : strlen(real) ; snprintf((char *)p2, arg3, \"%s\", real); } else ret = get_errno(readlink(path(p), p2, arg3)); } unlock_user(p2, arg2, ret); unlock_user(p, arg1, 0); } break; #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat) case TARGET_NR_readlinkat: { void *p2; p = lock_user_string(arg2); p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0); if (!p || !p2) ret = -TARGET_EFAULT; else ret = get_errno(sys_readlinkat(arg1, path(p), p2, arg4)); unlock_user(p2, arg3, ret); unlock_user(p, arg2, 0); } break; #endif #ifdef TARGET_NR_uselib case TARGET_NR_uselib: goto unimplemented; #endif #ifdef TARGET_NR_swapon case TARGET_NR_swapon: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(swapon(p, arg2)); unlock_user(p, arg1, 0); break; #endif case TARGET_NR_reboot: goto unimplemented; #ifdef TARGET_NR_readdir case TARGET_NR_readdir: goto unimplemented; #endif #ifdef TARGET_NR_mmap case TARGET_NR_mmap: #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) { abi_ulong *v; abi_ulong v1, v2, v3, v4, v5, v6; if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1))) goto efault; v1 = tswapl(v[0]); v2 = tswapl(v[1]); v3 = tswapl(v[2]); v4 = tswapl(v[3]); v5 = tswapl(v[4]); v6 = tswapl(v[5]); unlock_user(v, arg1, 0); ret = get_errno(target_mmap(v1, v2, v3, target_to_host_bitmask(v4, mmap_flags_tbl), v5, v6)); } #else ret = get_errno(target_mmap(arg1, arg2, arg3, target_to_host_bitmask(arg4, mmap_flags_tbl), arg5, arg6)); #endif break; #endif #ifdef TARGET_NR_mmap2 case TARGET_NR_mmap2: #ifndef MMAP_SHIFT #define MMAP_SHIFT 12 #endif ret = get_errno(target_mmap(arg1, arg2, arg3, target_to_host_bitmask(arg4, mmap_flags_tbl), arg5, arg6 << MMAP_SHIFT)); break; #endif case TARGET_NR_munmap: ret = get_errno(target_munmap(arg1, arg2)); break; case TARGET_NR_mprotect: ret = get_errno(target_mprotect(arg1, arg2, arg3)); break; #ifdef TARGET_NR_mremap case TARGET_NR_mremap: ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5)); break; #endif /* ??? msync/mlock/munlock are broken for softmmu. */ #ifdef TARGET_NR_msync case TARGET_NR_msync: ret = get_errno(msync(g2h(arg1), arg2, arg3)); break; #endif #ifdef TARGET_NR_mlock case TARGET_NR_mlock: ret = get_errno(mlock(g2h(arg1), arg2)); break; #endif #ifdef TARGET_NR_munlock case TARGET_NR_munlock: ret = get_errno(munlock(g2h(arg1), arg2)); break; #endif #ifdef TARGET_NR_mlockall case TARGET_NR_mlockall: ret = get_errno(mlockall(arg1)); break; #endif #ifdef TARGET_NR_munlockall case TARGET_NR_munlockall: ret = get_errno(munlockall()); break; #endif case TARGET_NR_truncate: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(truncate(p, arg2)); unlock_user(p, arg1, 0); break; case TARGET_NR_ftruncate: ret = get_errno(ftruncate(arg1, arg2)); break; case TARGET_NR_fchmod: ret = get_errno(fchmod(arg1, arg2)); break; #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat) case TARGET_NR_fchmodat: if (!(p = lock_user_string(arg2))) goto efault; ret = get_errno(sys_fchmodat(arg1, p, arg3)); unlock_user(p, arg2, 0); break; #endif case TARGET_NR_getpriority: /* libc does special remapping of the return value of * sys_getpriority() so it's just easiest to call * sys_getpriority() directly rather than through libc. */ ret = get_errno(sys_getpriority(arg1, arg2)); break; case TARGET_NR_setpriority: ret = get_errno(setpriority(arg1, arg2, arg3)); break; #ifdef TARGET_NR_profil case TARGET_NR_profil: goto unimplemented; #endif case TARGET_NR_statfs: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(statfs(path(p), &stfs)); unlock_user(p, arg1, 0); convert_statfs: if (!is_error(ret)) { struct target_statfs *target_stfs; if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0)) goto efault; __put_user(stfs.f_type, &target_stfs->f_type); __put_user(stfs.f_bsize, &target_stfs->f_bsize); __put_user(stfs.f_blocks, &target_stfs->f_blocks); __put_user(stfs.f_bfree, &target_stfs->f_bfree); __put_user(stfs.f_bavail, &target_stfs->f_bavail); __put_user(stfs.f_files, &target_stfs->f_files); __put_user(stfs.f_ffree, &target_stfs->f_ffree); __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]); __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]); __put_user(stfs.f_namelen, &target_stfs->f_namelen); unlock_user_struct(target_stfs, arg2, 1); } break; case TARGET_NR_fstatfs: ret = get_errno(fstatfs(arg1, &stfs)); goto convert_statfs; #ifdef TARGET_NR_statfs64 case TARGET_NR_statfs64: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(statfs(path(p), &stfs)); unlock_user(p, arg1, 0); convert_statfs64: if (!is_error(ret)) { struct target_statfs64 *target_stfs; if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0)) goto efault; __put_user(stfs.f_type, &target_stfs->f_type); __put_user(stfs.f_bsize, &target_stfs->f_bsize); __put_user(stfs.f_blocks, &target_stfs->f_blocks); __put_user(stfs.f_bfree, &target_stfs->f_bfree); __put_user(stfs.f_bavail, &target_stfs->f_bavail); __put_user(stfs.f_files, &target_stfs->f_files); __put_user(stfs.f_ffree, &target_stfs->f_ffree); __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]); __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]); __put_user(stfs.f_namelen, &target_stfs->f_namelen); unlock_user_struct(target_stfs, arg3, 1); } break; case TARGET_NR_fstatfs64: ret = get_errno(fstatfs(arg1, &stfs)); goto convert_statfs64; #endif #ifdef TARGET_NR_ioperm case TARGET_NR_ioperm: goto unimplemented; #endif #ifdef TARGET_NR_socketcall case TARGET_NR_socketcall: ret = do_socketcall(arg1, arg2); break; #endif #ifdef TARGET_NR_accept case TARGET_NR_accept: ret = do_accept(arg1, arg2, arg3); break; #endif #ifdef TARGET_NR_bind case TARGET_NR_bind: ret = do_bind(arg1, arg2, arg3); break; #endif #ifdef TARGET_NR_connect case TARGET_NR_connect: ret = do_connect(arg1, arg2, arg3); break; #endif #ifdef TARGET_NR_getpeername case TARGET_NR_getpeername: ret = do_getpeername(arg1, arg2, arg3); break; #endif #ifdef TARGET_NR_getsockname case TARGET_NR_getsockname: ret = do_getsockname(arg1, arg2, arg3); break; #endif #ifdef TARGET_NR_getsockopt case TARGET_NR_getsockopt: ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5); break; #endif #ifdef TARGET_NR_listen case TARGET_NR_listen: ret = get_errno(listen(arg1, arg2)); break; #endif #ifdef TARGET_NR_recv case TARGET_NR_recv: ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0); break; #endif #ifdef TARGET_NR_recvfrom case TARGET_NR_recvfrom: ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6); break; #endif #ifdef TARGET_NR_recvmsg case TARGET_NR_recvmsg: ret = do_sendrecvmsg(arg1, arg2, arg3, 0); break; #endif #ifdef TARGET_NR_send case TARGET_NR_send: ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0); break; #endif #ifdef TARGET_NR_sendmsg case TARGET_NR_sendmsg: ret = do_sendrecvmsg(arg1, arg2, arg3, 1); break; #endif #ifdef TARGET_NR_sendto case TARGET_NR_sendto: ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6); break; #endif #ifdef TARGET_NR_shutdown case TARGET_NR_shutdown: ret = get_errno(shutdown(arg1, arg2)); break; #endif #ifdef TARGET_NR_socket case TARGET_NR_socket: ret = do_socket(arg1, arg2, arg3); break; #endif #ifdef TARGET_NR_socketpair case TARGET_NR_socketpair: ret = do_socketpair(arg1, arg2, arg3, arg4); break; #endif #ifdef TARGET_NR_setsockopt case TARGET_NR_setsockopt: ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5); break; #endif case TARGET_NR_syslog: if (!(p = lock_user_string(arg2))) goto efault; ret = get_errno(sys_syslog((int)arg1, p, (int)arg3)); unlock_user(p, arg2, 0); break; case TARGET_NR_setitimer: { struct itimerval value, ovalue, *pvalue; if (arg2) { pvalue = &value; if (copy_from_user_timeval(&pvalue->it_interval, arg2) || copy_from_user_timeval(&pvalue->it_value, arg2 + sizeof(struct target_timeval))) goto efault; } else { pvalue = NULL; } ret = get_errno(setitimer(arg1, pvalue, &ovalue)); if (!is_error(ret) && arg3) { if (copy_to_user_timeval(arg3, &ovalue.it_interval) || copy_to_user_timeval(arg3 + sizeof(struct target_timeval), &ovalue.it_value)) goto efault; } } break; case TARGET_NR_getitimer: { struct itimerval value; ret = get_errno(getitimer(arg1, &value)); if (!is_error(ret) && arg2) { if (copy_to_user_timeval(arg2, &value.it_interval) || copy_to_user_timeval(arg2 + sizeof(struct target_timeval), &value.it_value)) goto efault; } } break; case TARGET_NR_stat: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(stat(path(p), &st)); unlock_user(p, arg1, 0); goto do_stat; case TARGET_NR_lstat: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(lstat(path(p), &st)); unlock_user(p, arg1, 0); goto do_stat; case TARGET_NR_fstat: { ret = get_errno(fstat(arg1, &st)); do_stat: if (!is_error(ret)) { struct target_stat *target_st; if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0)) goto efault; memset(target_st, 0, sizeof(*target_st)); __put_user(st.st_dev, &target_st->st_dev); __put_user(st.st_ino, &target_st->st_ino); __put_user(st.st_mode, &target_st->st_mode); __put_user(st.st_uid, &target_st->st_uid); __put_user(st.st_gid, &target_st->st_gid); __put_user(st.st_nlink, &target_st->st_nlink); __put_user(st.st_rdev, &target_st->st_rdev); __put_user(st.st_size, &target_st->st_size); __put_user(st.st_blksize, &target_st->st_blksize); __put_user(st.st_blocks, &target_st->st_blocks); __put_user(st.st_atime, &target_st->target_st_atime); __put_user(st.st_mtime, &target_st->target_st_mtime); __put_user(st.st_ctime, &target_st->target_st_ctime); unlock_user_struct(target_st, arg2, 1); } } break; #ifdef TARGET_NR_olduname case TARGET_NR_olduname: goto unimplemented; #endif #ifdef TARGET_NR_iopl case TARGET_NR_iopl: goto unimplemented; #endif case TARGET_NR_vhangup: ret = get_errno(vhangup()); break; #ifdef TARGET_NR_idle case TARGET_NR_idle: goto unimplemented; #endif #ifdef TARGET_NR_syscall case TARGET_NR_syscall: ret = do_syscall(cpu_env,arg1 & 0xffff,arg2,arg3,arg4,arg5,arg6,0); break; #endif case TARGET_NR_wait4: { int status; abi_long status_ptr = arg2; struct rusage rusage, *rusage_ptr; abi_ulong target_rusage = arg4; if (target_rusage) rusage_ptr = &rusage; else rusage_ptr = NULL; ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr)); if (!is_error(ret)) { if (status_ptr) { status = host_to_target_waitstatus(status); if (put_user_s32(status, status_ptr)) goto efault; } if (target_rusage) host_to_target_rusage(target_rusage, &rusage); } } break; #ifdef TARGET_NR_swapoff case TARGET_NR_swapoff: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(swapoff(p)); unlock_user(p, arg1, 0); break; #endif case TARGET_NR_sysinfo: { struct target_sysinfo *target_value; struct sysinfo value; ret = get_errno(sysinfo(&value)); if (!is_error(ret) && arg1) { if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0)) goto efault; __put_user(value.uptime, &target_value->uptime); __put_user(value.loads[0], &target_value->loads[0]); __put_user(value.loads[1], &target_value->loads[1]); __put_user(value.loads[2], &target_value->loads[2]); __put_user(value.totalram, &target_value->totalram); __put_user(value.freeram, &target_value->freeram); __put_user(value.sharedram, &target_value->sharedram); __put_user(value.bufferram, &target_value->bufferram); __put_user(value.totalswap, &target_value->totalswap); __put_user(value.freeswap, &target_value->freeswap); __put_user(value.procs, &target_value->procs); __put_user(value.totalhigh, &target_value->totalhigh); __put_user(value.freehigh, &target_value->freehigh); __put_user(value.mem_unit, &target_value->mem_unit); unlock_user_struct(target_value, arg1, 1); } } break; #ifdef TARGET_NR_ipc case TARGET_NR_ipc: ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6); break; #endif #ifdef TARGET_NR_semget case TARGET_NR_semget: ret = get_errno(semget(arg1, arg2, arg3)); break; #endif #ifdef TARGET_NR_semop case TARGET_NR_semop: ret = get_errno(do_semop(arg1, arg2, arg3)); break; #endif #ifdef TARGET_NR_semctl case TARGET_NR_semctl: ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4); break; #endif #ifdef TARGET_NR_msgctl case TARGET_NR_msgctl: ret = do_msgctl(arg1, arg2, arg3); break; #endif #ifdef TARGET_NR_msgget case TARGET_NR_msgget: ret = get_errno(msgget(arg1, arg2)); break; #endif #ifdef TARGET_NR_msgrcv case TARGET_NR_msgrcv: ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5); break; #endif #ifdef TARGET_NR_msgsnd case TARGET_NR_msgsnd: ret = do_msgsnd(arg1, arg2, arg3, arg4); break; #endif #ifdef TARGET_NR_shmget case TARGET_NR_shmget: ret = get_errno(shmget(arg1, arg2, arg3)); break; #endif #ifdef TARGET_NR_shmctl case TARGET_NR_shmctl: ret = do_shmctl(arg1, arg2, arg3); break; #endif #ifdef TARGET_NR_shmat case TARGET_NR_shmat: ret = do_shmat(arg1, arg2, arg3); break; #endif #ifdef TARGET_NR_shmdt case TARGET_NR_shmdt: ret = do_shmdt(arg1); break; #endif case TARGET_NR_fsync: ret = get_errno(fsync(arg1)); break; case TARGET_NR_clone: #if defined(TARGET_SH4) ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4)); #elif defined(TARGET_CRIS) ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg4, arg5)); #else ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5)); #endif break; #ifdef __NR_exit_group /* new thread calls */ case TARGET_NR_exit_group: #ifdef TARGET_GPROF _mcleanup(); #endif gdb_exit(cpu_env, arg1); ret = get_errno(exit_group(arg1)); break; #endif case TARGET_NR_setdomainname: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(setdomainname(p, arg2)); unlock_user(p, arg1, 0); break; case TARGET_NR_uname: /* no need to transcode because we use the linux syscall */ { struct new_utsname * buf; if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0)) goto efault; ret = get_errno(sys_uname(buf)); if (!is_error(ret)) { /* Overrite the native machine name with whatever is being emulated. */ strcpy (buf->machine, cpu_to_uname_machine(cpu_env)); /* Allow the user to override the reported release. */ if (qemu_uname_release && *qemu_uname_release) strcpy (buf->release, qemu_uname_release); } unlock_user_struct(buf, arg1, 1); } break; #ifdef TARGET_I386 case TARGET_NR_modify_ldt: ret = do_modify_ldt(cpu_env, arg1, arg2, arg3); break; #if !defined(TARGET_X86_64) case TARGET_NR_vm86old: goto unimplemented; case TARGET_NR_vm86: ret = do_vm86(cpu_env, arg1, arg2); break; #endif #endif case TARGET_NR_adjtimex: goto unimplemented; #ifdef TARGET_NR_create_module case TARGET_NR_create_module: #endif case TARGET_NR_init_module: case TARGET_NR_delete_module: #ifdef TARGET_NR_get_kernel_syms case TARGET_NR_get_kernel_syms: #endif goto unimplemented; case TARGET_NR_quotactl: goto unimplemented; case TARGET_NR_getpgid: ret = get_errno(getpgid(arg1)); break; case TARGET_NR_fchdir: ret = get_errno(fchdir(arg1)); break; #ifdef TARGET_NR_bdflush /* not on x86_64 */ case TARGET_NR_bdflush: goto unimplemented; #endif #ifdef TARGET_NR_sysfs case TARGET_NR_sysfs: goto unimplemented; #endif case TARGET_NR_personality: ret = get_errno(personality(arg1)); break; #ifdef TARGET_NR_afs_syscall case TARGET_NR_afs_syscall: goto unimplemented; #endif #ifdef TARGET_NR__llseek /* Not on alpha */ case TARGET_NR__llseek: { #if defined (__x86_64__) ret = get_errno(lseek(arg1, ((uint64_t )arg2 << 32) | arg3, arg5)); if (put_user_s64(ret, arg4)) goto efault; #else int64_t res; ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5)); if (put_user_s64(res, arg4)) goto efault; #endif } break; #endif case TARGET_NR_getdents: #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64 { struct target_dirent *target_dirp; struct linux_dirent *dirp; abi_long count = arg3; dirp = malloc(count); if (!dirp) { ret = -TARGET_ENOMEM; goto fail; } ret = get_errno(sys_getdents(arg1, dirp, count)); if (!is_error(ret)) { struct linux_dirent *de; struct target_dirent *tde; int len = ret; int reclen, treclen; int count1, tnamelen; count1 = 0; de = dirp; if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) goto efault; tde = target_dirp; while (len > 0) { reclen = de->d_reclen; treclen = reclen - (2 * (sizeof(long) - sizeof(abi_long))); tde->d_reclen = tswap16(treclen); tde->d_ino = tswapl(de->d_ino); tde->d_off = tswapl(de->d_off); tnamelen = treclen - (2 * sizeof(abi_long) + 2); if (tnamelen > 256) tnamelen = 256; /* XXX: may not be correct */ pstrcpy(tde->d_name, tnamelen, de->d_name); de = (struct linux_dirent *)((char *)de + reclen); len -= reclen; tde = (struct target_dirent *)((char *)tde + treclen); count1 += treclen; } ret = count1; unlock_user(target_dirp, arg2, ret); } free(dirp); } #else { struct linux_dirent *dirp; abi_long count = arg3; if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) goto efault; ret = get_errno(sys_getdents(arg1, dirp, count)); if (!is_error(ret)) { struct linux_dirent *de; int len = ret; int reclen; de = dirp; while (len > 0) { reclen = de->d_reclen; if (reclen > len) break; de->d_reclen = tswap16(reclen); tswapls(&de->d_ino); tswapls(&de->d_off); de = (struct linux_dirent *)((char *)de + reclen); len -= reclen; } } unlock_user(dirp, arg2, ret); } #endif break; #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64) case TARGET_NR_getdents64: { struct linux_dirent64 *dirp; abi_long count = arg3; if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) goto efault; ret = get_errno(sys_getdents64(arg1, dirp, count)); if (!is_error(ret)) { struct linux_dirent64 *de; int len = ret; int reclen; de = dirp; while (len > 0) { reclen = de->d_reclen; if (reclen > len) break; de->d_reclen = tswap16(reclen); tswap64s((uint64_t *)&de->d_ino); tswap64s((uint64_t *)&de->d_off); de = (struct linux_dirent64 *)((char *)de + reclen); len -= reclen; } } unlock_user(dirp, arg2, ret); } break; #endif /* TARGET_NR_getdents64 */ #ifdef TARGET_NR__newselect case TARGET_NR__newselect: ret = do_select(arg1, arg2, arg3, arg4, arg5); break; #endif #ifdef TARGET_NR_poll case TARGET_NR_poll: { struct target_pollfd *target_pfd; unsigned int nfds = arg2; int timeout = arg3; struct pollfd *pfd; unsigned int i; target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1); if (!target_pfd) goto efault; pfd = alloca(sizeof(struct pollfd) * nfds); for(i = 0; i < nfds; i++) { pfd[i].fd = tswap32(target_pfd[i].fd); pfd[i].events = tswap16(target_pfd[i].events); } ret = get_errno(poll(pfd, nfds, timeout)); if (!is_error(ret)) { for(i = 0; i < nfds; i++) { target_pfd[i].revents = tswap16(pfd[i].revents); } ret += nfds * (sizeof(struct target_pollfd) - sizeof(struct pollfd)); } unlock_user(target_pfd, arg1, ret); } break; #endif case TARGET_NR_flock: /* NOTE: the flock constant seems to be the same for every Linux platform */ ret = get_errno(flock(arg1, arg2)); break; case TARGET_NR_readv: { int count = arg3; struct iovec *vec; vec = alloca(count * sizeof(struct iovec)); if (lock_iovec(VERIFY_WRITE, vec, arg2, count, 0) < 0) goto efault; ret = get_errno(readv(arg1, vec, count)); unlock_iovec(vec, arg2, count, 1); } break; case TARGET_NR_writev: { int count = arg3; struct iovec *vec; vec = alloca(count * sizeof(struct iovec)); if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0) goto efault; ret = get_errno(writev(arg1, vec, count)); unlock_iovec(vec, arg2, count, 0); } break; case TARGET_NR_getsid: ret = get_errno(getsid(arg1)); break; #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */ case TARGET_NR_fdatasync: ret = get_errno(fdatasync(arg1)); break; #endif case TARGET_NR__sysctl: /* We don't implement this, but ENOTDIR is always a safe return value. */ ret = -TARGET_ENOTDIR; break; case TARGET_NR_sched_setparam: { struct sched_param *target_schp; struct sched_param schp; if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) goto efault; schp.sched_priority = tswap32(target_schp->sched_priority); unlock_user_struct(target_schp, arg2, 0); ret = get_errno(sched_setparam(arg1, &schp)); } break; case TARGET_NR_sched_getparam: { struct sched_param *target_schp; struct sched_param schp; ret = get_errno(sched_getparam(arg1, &schp)); if (!is_error(ret)) { if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) goto efault; target_schp->sched_priority = tswap32(schp.sched_priority); unlock_user_struct(target_schp, arg2, 1); } } break; case TARGET_NR_sched_setscheduler: { struct sched_param *target_schp; struct sched_param schp; if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) goto efault; schp.sched_priority = tswap32(target_schp->sched_priority); unlock_user_struct(target_schp, arg3, 0); ret = get_errno(sched_setscheduler(arg1, arg2, &schp)); } break; case TARGET_NR_sched_getscheduler: ret = get_errno(sched_getscheduler(arg1)); break; case TARGET_NR_sched_yield: ret = get_errno(sched_yield()); break; case TARGET_NR_sched_get_priority_max: ret = get_errno(sched_get_priority_max(arg1)); break; case TARGET_NR_sched_get_priority_min: ret = get_errno(sched_get_priority_min(arg1)); break; case TARGET_NR_sched_rr_get_interval: { struct timespec ts; ret = get_errno(sched_rr_get_interval(arg1, &ts)); if (!is_error(ret)) { host_to_target_timespec(arg2, &ts); } } break; case TARGET_NR_nanosleep: { struct timespec req, rem; target_to_host_timespec(&req, arg1); ret = get_errno(nanosleep(&req, &rem)); if (is_error(ret) && arg2) { host_to_target_timespec(arg2, &rem); } } break; #ifdef TARGET_NR_query_module case TARGET_NR_query_module: goto unimplemented; #endif #ifdef TARGET_NR_nfsservctl case TARGET_NR_nfsservctl: goto unimplemented; #endif case TARGET_NR_prctl: switch (arg1) { case PR_GET_PDEATHSIG: { int deathsig; ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5)); if (!is_error(ret) && arg2 && put_user_ual(deathsig, arg2)) goto efault; } break; default: ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5)); break; } break; #ifdef TARGET_NR_arch_prctl case TARGET_NR_arch_prctl: #if defined(TARGET_I386) && !defined(TARGET_ABI32) ret = do_arch_prctl(cpu_env, arg1, arg2); break; #else goto unimplemented; #endif #endif #ifdef TARGET_NR_pread case TARGET_NR_pread: #ifdef TARGET_ARM if (((CPUARMState *)cpu_env)->eabi) arg4 = arg5; #endif if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0))) goto efault; ret = get_errno(pread(arg1, p, arg3, arg4)); unlock_user(p, arg2, ret); break; case TARGET_NR_pwrite: #ifdef TARGET_ARM if (((CPUARMState *)cpu_env)->eabi) arg4 = arg5; #endif if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1))) goto efault; ret = get_errno(pwrite(arg1, p, arg3, arg4)); unlock_user(p, arg2, 0); break; #endif #ifdef TARGET_NR_pread64 case TARGET_NR_pread64: if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0))) goto efault; ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5))); unlock_user(p, arg2, ret); break; case TARGET_NR_pwrite64: if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1))) goto efault; ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5))); unlock_user(p, arg2, 0); break; #endif case TARGET_NR_getcwd: if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0))) goto efault; ret = get_errno(sys_getcwd1(p, arg2)); unlock_user(p, arg1, ret); break; case TARGET_NR_capget: goto unimplemented; case TARGET_NR_capset: goto unimplemented; case TARGET_NR_sigaltstack: #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \\ defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \\ defined(TARGET_M68K) ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUState *)cpu_env)); break; #else goto unimplemented; #endif case TARGET_NR_sendfile: goto unimplemented; #ifdef TARGET_NR_getpmsg case TARGET_NR_getpmsg: goto unimplemented; #endif #ifdef TARGET_NR_putpmsg case TARGET_NR_putpmsg: goto unimplemented; #endif #ifdef TARGET_NR_vfork case TARGET_NR_vfork: ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD, 0, 0, 0, 0)); break; #endif #ifdef TARGET_NR_ugetrlimit case TARGET_NR_ugetrlimit: { struct rlimit rlim; ret = get_errno(getrlimit(arg1, &rlim)); if (!is_error(ret)) { struct target_rlimit *target_rlim; if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0)) goto efault; target_rlim->rlim_cur = tswapl(rlim.rlim_cur); target_rlim->rlim_max = tswapl(rlim.rlim_max); unlock_user_struct(target_rlim, arg2, 1); } break; } #endif #ifdef TARGET_NR_truncate64 case TARGET_NR_truncate64: if (!(p = lock_user_string(arg1))) goto efault; ret = target_truncate64(cpu_env, p, arg2, arg3, arg4); unlock_user(p, arg1, 0); break; #endif #ifdef TARGET_NR_ftruncate64 case TARGET_NR_ftruncate64: ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4); break; #endif #ifdef TARGET_NR_stat64 case TARGET_NR_stat64: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(stat(path(p), &st)); unlock_user(p, arg1, 0); if (!is_error(ret)) ret = host_to_target_stat64(cpu_env, arg2, &st); break; #endif #ifdef TARGET_NR_lstat64 case TARGET_NR_lstat64: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(lstat(path(p), &st)); unlock_user(p, arg1, 0); if (!is_error(ret)) ret = host_to_target_stat64(cpu_env, arg2, &st); break; #endif #ifdef TARGET_NR_fstat64 case TARGET_NR_fstat64: ret = get_errno(fstat(arg1, &st)); if (!is_error(ret)) ret = host_to_target_stat64(cpu_env, arg2, &st); break; #endif #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \\ (defined(__NR_fstatat64) || defined(__NR_newfstatat)) #ifdef TARGET_NR_fstatat64 case TARGET_NR_fstatat64: #endif #ifdef TARGET_NR_newfstatat case TARGET_NR_newfstatat: #endif if (!(p = lock_user_string(arg2))) goto efault; #ifdef __NR_fstatat64 ret = get_errno(sys_fstatat64(arg1, path(p), &st, arg4)); #else ret = get_errno(sys_newfstatat(arg1, path(p), &st, arg4)); #endif if (!is_error(ret)) ret = host_to_target_stat64(cpu_env, arg3, &st); break; #endif #ifdef USE_UID16 case TARGET_NR_lchown: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3))); unlock_user(p, arg1, 0); break; case TARGET_NR_getuid: ret = get_errno(high2lowuid(getuid())); break; case TARGET_NR_getgid: ret = get_errno(high2lowgid(getgid())); break; case TARGET_NR_geteuid: ret = get_errno(high2lowuid(geteuid())); break; case TARGET_NR_getegid: ret = get_errno(high2lowgid(getegid())); break; case TARGET_NR_setreuid: ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2))); break; case TARGET_NR_setregid: ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2))); break; case TARGET_NR_getgroups: { int gidsetsize = arg1; uint16_t *target_grouplist; gid_t *grouplist; int i; grouplist = alloca(gidsetsize * sizeof(gid_t)); ret = get_errno(getgroups(gidsetsize, grouplist)); if (gidsetsize == 0) break; if (!is_error(ret)) { target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 2, 0); if (!target_grouplist) goto efault; for(i = 0;i < ret; i++) target_grouplist[i] = tswap16(grouplist[i]); unlock_user(target_grouplist, arg2, gidsetsize * 2); } } break; case TARGET_NR_setgroups: { int gidsetsize = arg1; uint16_t *target_grouplist; gid_t *grouplist; int i; grouplist = alloca(gidsetsize * sizeof(gid_t)); target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 2, 1); if (!target_grouplist) { ret = -TARGET_EFAULT; goto fail; } for(i = 0;i < gidsetsize; i++) grouplist[i] = tswap16(target_grouplist[i]); unlock_user(target_grouplist, arg2, 0); ret = get_errno(setgroups(gidsetsize, grouplist)); } break; case TARGET_NR_fchown: ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3))); break; #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat) case TARGET_NR_fchownat: if (!(p = lock_user_string(arg2))) goto efault; ret = get_errno(sys_fchownat(arg1, p, low2highuid(arg3), low2highgid(arg4), arg5)); unlock_user(p, arg2, 0); break; #endif #ifdef TARGET_NR_setresuid case TARGET_NR_setresuid: ret = get_errno(setresuid(low2highuid(arg1), low2highuid(arg2), low2highuid(arg3))); break; #endif #ifdef TARGET_NR_getresuid case TARGET_NR_getresuid: { uid_t ruid, euid, suid; ret = get_errno(getresuid(&ruid, &euid, &suid)); if (!is_error(ret)) { if (put_user_u16(high2lowuid(ruid), arg1) || put_user_u16(high2lowuid(euid), arg2) || put_user_u16(high2lowuid(suid), arg3)) goto efault; } } break; #endif #ifdef TARGET_NR_getresgid case TARGET_NR_setresgid: ret = get_errno(setresgid(low2highgid(arg1), low2highgid(arg2), low2highgid(arg3))); break; #endif #ifdef TARGET_NR_getresgid case TARGET_NR_getresgid: { gid_t rgid, egid, sgid; ret = get_errno(getresgid(&rgid, &egid, &sgid)); if (!is_error(ret)) { if (put_user_u16(high2lowgid(rgid), arg1) || put_user_u16(high2lowgid(egid), arg2) || put_user_u16(high2lowgid(sgid), arg3)) goto efault; } } break; #endif case TARGET_NR_chown: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3))); unlock_user(p, arg1, 0); break; case TARGET_NR_setuid: ret = get_errno(setuid(low2highuid(arg1))); break; case TARGET_NR_setgid: ret = get_errno(setgid(low2highgid(arg1))); break; case TARGET_NR_setfsuid: ret = get_errno(setfsuid(arg1)); break; case TARGET_NR_setfsgid: ret = get_errno(setfsgid(arg1)); break; #endif /* USE_UID16 */ #ifdef TARGET_NR_lchown32 case TARGET_NR_lchown32: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(lchown(p, arg2, arg3)); unlock_user(p, arg1, 0); break; #endif #ifdef TARGET_NR_getuid32 case TARGET_NR_getuid32: ret = get_errno(getuid()); break; #endif #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA) /* Alpha specific */ case TARGET_NR_getxuid: { uid_t euid; euid=geteuid(); ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid; } ret = get_errno(getuid()); break; #endif #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA) /* Alpha specific */ case TARGET_NR_getxgid: { uid_t egid; egid=getegid(); ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid; } ret = get_errno(getgid()); break; #endif #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA) /* Alpha specific */ case TARGET_NR_osf_getsysinfo: ret = -TARGET_EOPNOTSUPP; switch (arg1) { case TARGET_GSI_IEEE_FP_CONTROL: { uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env); /* Copied from linux ieee_fpcr_to_swcr. */ swcr = (fpcr >> 35) & SWCR_STATUS_MASK; swcr |= (fpcr >> 36) & SWCR_MAP_DMZ; swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV | SWCR_TRAP_ENABLE_DZE | SWCR_TRAP_ENABLE_OVF); swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF | SWCR_TRAP_ENABLE_INE); swcr |= (fpcr >> 47) & SWCR_MAP_UMZ; swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO; if (put_user_u64 (swcr, arg2)) goto efault; ret = 0; } break; /* case GSI_IEEE_STATE_AT_SIGNAL: -- Not implemented in linux kernel. case GSI_UACPROC: -- Retrieves current unaligned access state; not much used. case GSI_PROC_TYPE: -- Retrieves implver information; surely not used. case GSI_GET_HWRPB: -- Grabs a copy of the HWRPB; surely not used. */ } break; #endif #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA) /* Alpha specific */ case TARGET_NR_osf_setsysinfo: ret = -TARGET_EOPNOTSUPP; switch (arg1) { case TARGET_SSI_IEEE_FP_CONTROL: case TARGET_SSI_IEEE_RAISE_EXCEPTION: { uint64_t swcr, fpcr, orig_fpcr; if (get_user_u64 (swcr, arg2)) goto efault; orig_fpcr = cpu_alpha_load_fpcr (cpu_env); fpcr = orig_fpcr & FPCR_DYN_MASK; /* Copied from linux ieee_swcr_to_fpcr. */ fpcr |= (swcr & SWCR_STATUS_MASK) << 35; fpcr |= (swcr & SWCR_MAP_DMZ) << 36; fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV | SWCR_TRAP_ENABLE_DZE | SWCR_TRAP_ENABLE_OVF)) << 48; fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF | SWCR_TRAP_ENABLE_INE)) << 57; fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0); fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41; cpu_alpha_store_fpcr (cpu_env, fpcr); ret = 0; if (arg1 == TARGET_SSI_IEEE_RAISE_EXCEPTION) { /* Old exceptions are not signaled. */ fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK); /* If any exceptions set by this call, and are unmasked, send a signal. */ /* ??? FIXME */ } } break; /* case SSI_NVPAIRS: -- Used with SSIN_UACPROC to enable unaligned accesses. case SSI_IEEE_STATE_AT_SIGNAL: case SSI_IEEE_IGNORE_STATE_AT_SIGNAL: -- Not implemented in linux kernel */ } break; #endif #ifdef TARGET_NR_osf_sigprocmask /* Alpha specific. */ case TARGET_NR_osf_sigprocmask: { abi_ulong mask; int how = arg1; sigset_t set, oldset; switch(arg1) { case TARGET_SIG_BLOCK: how = SIG_BLOCK; break; case TARGET_SIG_UNBLOCK: how = SIG_UNBLOCK; break; case TARGET_SIG_SETMASK: how = SIG_SETMASK; break; default: ret = -TARGET_EINVAL; goto fail; } mask = arg2; target_to_host_old_sigset(&set, &mask); sigprocmask(arg1, &set, &oldset); host_to_target_old_sigset(&mask, &oldset); ret = mask; } break; #endif #ifdef TARGET_NR_getgid32 case TARGET_NR_getgid32: ret = get_errno(getgid()); break; #endif #ifdef TARGET_NR_geteuid32 case TARGET_NR_geteuid32: ret = get_errno(geteuid()); break; #endif #ifdef TARGET_NR_getegid32 case TARGET_NR_getegid32: ret = get_errno(getegid()); break; #endif #ifdef TARGET_NR_setreuid32 case TARGET_NR_setreuid32: ret = get_errno(setreuid(arg1, arg2)); break; #endif #ifdef TARGET_NR_setregid32 case TARGET_NR_setregid32: ret = get_errno(setregid(arg1, arg2)); break; #endif #ifdef TARGET_NR_getgroups32 case TARGET_NR_getgroups32: { int gidsetsize = arg1; uint32_t *target_grouplist; gid_t *grouplist; int i; grouplist = alloca(gidsetsize * sizeof(gid_t)); ret = get_errno(getgroups(gidsetsize, grouplist)); if (gidsetsize == 0) break; if (!is_error(ret)) { target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0); if (!target_grouplist) { ret = -TARGET_EFAULT; goto fail; } for(i = 0;i < ret; i++) target_grouplist[i] = tswap32(grouplist[i]); unlock_user(target_grouplist, arg2, gidsetsize * 4); } } break; #endif #ifdef TARGET_NR_setgroups32 case TARGET_NR_setgroups32: { int gidsetsize = arg1; uint32_t *target_grouplist; gid_t *grouplist; int i; grouplist = alloca(gidsetsize * sizeof(gid_t)); target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1); if (!target_grouplist) { ret = -TARGET_EFAULT; goto fail; } for(i = 0;i < gidsetsize; i++) grouplist[i] = tswap32(target_grouplist[i]); unlock_user(target_grouplist, arg2, 0); ret = get_errno(setgroups(gidsetsize, grouplist)); } break; #endif #ifdef TARGET_NR_fchown32 case TARGET_NR_fchown32: ret = get_errno(fchown(arg1, arg2, arg3)); break; #endif #ifdef TARGET_NR_setresuid32 case TARGET_NR_setresuid32: ret = get_errno(setresuid(arg1, arg2, arg3)); break; #endif #ifdef TARGET_NR_getresuid32 case TARGET_NR_getresuid32: { uid_t ruid, euid, suid; ret = get_errno(getresuid(&ruid, &euid, &suid)); if (!is_error(ret)) { if (put_user_u32(ruid, arg1) || put_user_u32(euid, arg2) || put_user_u32(suid, arg3)) goto efault; } } break; #endif #ifdef TARGET_NR_setresgid32 case TARGET_NR_setresgid32: ret = get_errno(setresgid(arg1, arg2, arg3)); break; #endif #ifdef TARGET_NR_getresgid32 case TARGET_NR_getresgid32: { gid_t rgid, egid, sgid; ret = get_errno(getresgid(&rgid, &egid, &sgid)); if (!is_error(ret)) { if (put_user_u32(rgid, arg1) || put_user_u32(egid, arg2) || put_user_u32(sgid, arg3)) goto efault; } } break; #endif #ifdef TARGET_NR_chown32 case TARGET_NR_chown32: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(chown(p, arg2, arg3)); unlock_user(p, arg1, 0); break; #endif #ifdef TARGET_NR_setuid32 case TARGET_NR_setuid32: ret = get_errno(setuid(arg1)); break; #endif #ifdef TARGET_NR_setgid32 case TARGET_NR_setgid32: ret = get_errno(setgid(arg1)); break; #endif #ifdef TARGET_NR_setfsuid32 case TARGET_NR_setfsuid32: ret = get_errno(setfsuid(arg1)); break; #endif #ifdef TARGET_NR_setfsgid32 case TARGET_NR_setfsgid32: ret = get_errno(setfsgid(arg1)); break; #endif case TARGET_NR_pivot_root: goto unimplemented; #ifdef TARGET_NR_mincore case TARGET_NR_mincore: { void *a; ret = -TARGET_EFAULT; if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0))) goto efault; if (!(p = lock_user_string(arg3))) goto mincore_fail; ret = get_errno(mincore(a, arg2, p)); unlock_user(p, arg3, ret); mincore_fail: unlock_user(a, arg1, 0); } break; #endif #ifdef TARGET_NR_arm_fadvise64_64 case TARGET_NR_arm_fadvise64_64: { /* * arm_fadvise64_64 looks like fadvise64_64 but * with different argument order */ abi_long temp; temp = arg3; arg3 = arg4; arg4 = temp; } #endif #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64) #ifdef TARGET_NR_fadvise64_64 case TARGET_NR_fadvise64_64: #endif #ifdef TARGET_NR_fadvise64 case TARGET_NR_fadvise64: #endif #ifdef TARGET_S390X switch (arg4) { case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */ case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */ case 6: arg4 = POSIX_FADV_DONTNEED; break; case 7: arg4 = POSIX_FADV_NOREUSE; break; default: break; } #endif ret = -posix_fadvise(arg1, arg2, arg3, arg4); break; #endif #ifdef TARGET_NR_madvise case TARGET_NR_madvise: /* A straight passthrough may not be safe because qemu sometimes turns private flie-backed mappings into anonymous mappings. This will break MADV_DONTNEED. This is a hint, so ignoring and returning success is ok. */ ret = get_errno(0); break; #endif #if TARGET_ABI_BITS == 32 case TARGET_NR_fcntl64: { int cmd; struct flock64 fl; struct target_flock64 *target_fl; #ifdef TARGET_ARM struct target_eabi_flock64 *target_efl; #endif cmd = target_to_host_fcntl_cmd(arg2); if (cmd == -TARGET_EINVAL) return cmd; switch(arg2) { case TARGET_F_GETLK64: #ifdef TARGET_ARM if (((CPUARMState *)cpu_env)->eabi) { if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1)) goto efault; fl.l_type = tswap16(target_efl->l_type); fl.l_whence = tswap16(target_efl->l_whence); fl.l_start = tswap64(target_efl->l_start); fl.l_len = tswap64(target_efl->l_len); fl.l_pid = tswap32(target_efl->l_pid); unlock_user_struct(target_efl, arg3, 0); } else #endif { if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1)) goto efault; fl.l_type = tswap16(target_fl->l_type); fl.l_whence = tswap16(target_fl->l_whence); fl.l_start = tswap64(target_fl->l_start); fl.l_len = tswap64(target_fl->l_len); fl.l_pid = tswap32(target_fl->l_pid); unlock_user_struct(target_fl, arg3, 0); } ret = get_errno(fcntl(arg1, cmd, &fl)); if (ret == 0) { #ifdef TARGET_ARM if (((CPUARMState *)cpu_env)->eabi) { if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0)) goto efault; target_efl->l_type = tswap16(fl.l_type); target_efl->l_whence = tswap16(fl.l_whence); target_efl->l_start = tswap64(fl.l_start); target_efl->l_len = tswap64(fl.l_len); target_efl->l_pid = tswap32(fl.l_pid); unlock_user_struct(target_efl, arg3, 1); } else #endif { if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0)) goto efault; target_fl->l_type = tswap16(fl.l_type); target_fl->l_whence = tswap16(fl.l_whence); target_fl->l_start = tswap64(fl.l_start); target_fl->l_len = tswap64(fl.l_len); target_fl->l_pid = tswap32(fl.l_pid); unlock_user_struct(target_fl, arg3, 1); } } break; case TARGET_F_SETLK64: case TARGET_F_SETLKW64: #ifdef TARGET_ARM if (((CPUARMState *)cpu_env)->eabi) { if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1)) goto efault; fl.l_type = tswap16(target_efl->l_type); fl.l_whence = tswap16(target_efl->l_whence); fl.l_start = tswap64(target_efl->l_start); fl.l_len = tswap64(target_efl->l_len); fl.l_pid = tswap32(target_efl->l_pid); unlock_user_struct(target_efl, arg3, 0); } else #endif { if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1)) goto efault; fl.l_type = tswap16(target_fl->l_type); fl.l_whence = tswap16(target_fl->l_whence); fl.l_start = tswap64(target_fl->l_start); fl.l_len = tswap64(target_fl->l_len); fl.l_pid = tswap32(target_fl->l_pid); unlock_user_struct(target_fl, arg3, 0); } ret = get_errno(fcntl(arg1, cmd, &fl)); break; default: ret = do_fcntl(arg1, arg2, arg3); break; } break; } #endif #ifdef TARGET_NR_cacheflush case TARGET_NR_cacheflush: /* self-modifying code is handled automatically, so nothing needed */ ret = 0; break; #endif #ifdef TARGET_NR_security case TARGET_NR_security: goto unimplemented; #endif #ifdef TARGET_NR_getpagesize case TARGET_NR_getpagesize: ret = TARGET_PAGE_SIZE; break; #endif case TARGET_NR_gettid: ret = get_errno(gettid()); break; #ifdef TARGET_NR_readahead case TARGET_NR_readahead: #if TARGET_ABI_BITS == 32 #ifdef TARGET_ARM if (((CPUARMState *)cpu_env)->eabi) { arg2 = arg3; arg3 = arg4; arg4 = arg5; } #endif ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4)); #else ret = get_errno(readahead(arg1, arg2, arg3)); #endif break; #endif #ifdef TARGET_NR_setxattr case TARGET_NR_setxattr: case TARGET_NR_lsetxattr: case TARGET_NR_fsetxattr: case TARGET_NR_getxattr: case TARGET_NR_lgetxattr: case TARGET_NR_fgetxattr: case TARGET_NR_listxattr: case TARGET_NR_llistxattr: case TARGET_NR_flistxattr: case TARGET_NR_removexattr: case TARGET_NR_lremovexattr: case TARGET_NR_fremovexattr: ret = -TARGET_EOPNOTSUPP; break; #endif #ifdef TARGET_NR_set_thread_area case TARGET_NR_set_thread_area: #if defined(TARGET_MIPS) ((CPUMIPSState *) cpu_env)->tls_value = arg1; ret = 0; break; #elif defined(TARGET_CRIS) if (arg1 & 0xff) ret = -TARGET_EINVAL; else { ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1; ret = 0; } break; #elif defined(TARGET_I386) && defined(TARGET_ABI32) ret = do_set_thread_area(cpu_env, arg1); break; #else goto unimplemented_nowarn; #endif #endif #ifdef TARGET_NR_get_thread_area case TARGET_NR_get_thread_area: #if defined(TARGET_I386) && defined(TARGET_ABI32) ret = do_get_thread_area(cpu_env, arg1); #else goto unimplemented_nowarn; #endif #endif #ifdef TARGET_NR_getdomainname case TARGET_NR_getdomainname: goto unimplemented_nowarn; #endif #ifdef TARGET_NR_clock_gettime case TARGET_NR_clock_gettime: { struct timespec ts; ret = get_errno(clock_gettime(arg1, &ts)); if (!is_error(ret)) { host_to_target_timespec(arg2, &ts); } break; } #endif #ifdef TARGET_NR_clock_getres case TARGET_NR_clock_getres: { struct timespec ts; ret = get_errno(clock_getres(arg1, &ts)); if (!is_error(ret)) { host_to_target_timespec(arg2, &ts); } break; } #endif #ifdef TARGET_NR_clock_nanosleep case TARGET_NR_clock_nanosleep: { struct timespec ts; target_to_host_timespec(&ts, arg3); ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL)); if (arg4) host_to_target_timespec(arg4, &ts); break; } #endif #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address) case TARGET_NR_set_tid_address: ret = get_errno(set_tid_address((int *)g2h(arg1))); break; #endif #if defined(TARGET_NR_tkill) && defined(__NR_tkill) case TARGET_NR_tkill: ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2))); break; #endif #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill) case TARGET_NR_tgkill: ret = get_errno(sys_tgkill((int)arg1, (int)arg2, target_to_host_signal(arg3))); break; #endif #ifdef TARGET_NR_set_robust_list case TARGET_NR_set_robust_list: goto unimplemented_nowarn; #endif #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat) case TARGET_NR_utimensat: { struct timespec *tsp, ts[2]; if (!arg3) { tsp = NULL; } else { target_to_host_timespec(ts, arg3); target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec)); tsp = ts; } if (!arg2) ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4)); else { if (!(p = lock_user_string(arg2))) { ret = -TARGET_EFAULT; goto fail; } ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4)); unlock_user(p, arg2, 0); } } break; #endif #if defined(CONFIG_USE_NPTL) case TARGET_NR_futex: ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6); break; #endif #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init) case TARGET_NR_inotify_init: ret = get_errno(sys_inotify_init()); break; #endif #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch) case TARGET_NR_inotify_add_watch: p = lock_user_string(arg2); ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3)); unlock_user(p, arg2, 0); break; #endif #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch) case TARGET_NR_inotify_rm_watch: ret = get_errno(sys_inotify_rm_watch(arg1, arg2)); break; #endif #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open) case TARGET_NR_mq_open: { struct mq_attr posix_mq_attr; p = lock_user_string(arg1 - 1); if (arg4 != 0) copy_from_user_mq_attr (&posix_mq_attr, arg4); ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr)); unlock_user (p, arg1, 0); } break; case TARGET_NR_mq_unlink: p = lock_user_string(arg1 - 1); ret = get_errno(mq_unlink(p)); unlock_user (p, arg1, 0); break; case TARGET_NR_mq_timedsend: { struct timespec ts; p = lock_user (VERIFY_READ, arg2, arg3, 1); if (arg5 != 0) { target_to_host_timespec(&ts, arg5); ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts)); host_to_target_timespec(arg5, &ts); } else ret = get_errno(mq_send(arg1, p, arg3, arg4)); unlock_user (p, arg2, arg3); } break; case TARGET_NR_mq_timedreceive: { struct timespec ts; unsigned int prio; p = lock_user (VERIFY_READ, arg2, arg3, 1); if (arg5 != 0) { target_to_host_timespec(&ts, arg5); ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts)); host_to_target_timespec(arg5, &ts); } else ret = get_errno(mq_receive(arg1, p, arg3, &prio)); unlock_user (p, arg2, arg3); if (arg4 != 0) put_user_u32(prio, arg4); } break; /* Not implemented for now... */ /* case TARGET_NR_mq_notify: */ /* break; */ case TARGET_NR_mq_getsetattr: { struct mq_attr posix_mq_attr_in, posix_mq_attr_out; ret = 0; if (arg3 != 0) { ret = mq_getattr(arg1, &posix_mq_attr_out); copy_to_user_mq_attr(arg3, &posix_mq_attr_out); } if (arg2 != 0) { copy_from_user_mq_attr(&posix_mq_attr_in, arg2); ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out); } } break; #endif #ifdef CONFIG_SPLICE #ifdef TARGET_NR_tee case TARGET_NR_tee: { ret = get_errno(tee(arg1,arg2,arg3,arg4)); } break; #endif #ifdef TARGET_NR_splice case TARGET_NR_splice: { loff_t loff_in, loff_out; loff_t *ploff_in = NULL, *ploff_out = NULL; if(arg2) { get_user_u64(loff_in, arg2); ploff_in = &loff_in; } if(arg4) { get_user_u64(loff_out, arg2); ploff_out = &loff_out; } ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6)); } break; #endif #ifdef TARGET_NR_vmsplice case TARGET_NR_vmsplice: { int count = arg3; struct iovec *vec; vec = alloca(count * sizeof(struct iovec)); if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0) goto efault; ret = get_errno(vmsplice(arg1, vec, count, arg4)); unlock_iovec(vec, arg2, count, 0); } break; #endif #endif /* CONFIG_SPLICE */ #ifdef CONFIG_EVENTFD #if defined(TARGET_NR_eventfd) case TARGET_NR_eventfd: ret = get_errno(eventfd(arg1, 0)); break; #endif #if defined(TARGET_NR_eventfd2) case TARGET_NR_eventfd2: ret = get_errno(eventfd(arg1, arg2)); break; #endif #endif /* CONFIG_EVENTFD */ #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate) case TARGET_NR_fallocate: ret = get_errno(fallocate(arg1, arg2, arg3, arg4)); break; #endif default: unimplemented: gemu_log(\"qemu: Unsupported syscall: %d\\n\", num); #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list) unimplemented_nowarn: #endif ret = -TARGET_ENOSYS; break; } fail: #ifdef DEBUG gemu_log(\" = \" TARGET_ABI_FMT_ld \"\\n\", ret); #endif if(do_strace) print_syscall_ret(num, ret); return ret; efault: ret = -TARGET_EFAULT; goto fail; }"} {"target": 0, "idx": 27058, "func": "cpu_x86_dump_seg_cache(CPUState *env, FILE *f, fprintf_function cpu_fprintf, const char *name, struct SegmentCache *sc) { #ifdef TARGET_X86_64 if (env->hflags & HF_CS64_MASK) { cpu_fprintf(f, \"%-3s=%04x %016\" PRIx64 \" %08x %08x\", name, sc->selector, sc->base, sc->limit, sc->flags); } else #endif { cpu_fprintf(f, \"%-3s=%04x %08x %08x %08x\", name, sc->selector, (uint32_t)sc->base, sc->limit, sc->flags); } if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK)) goto done; cpu_fprintf(f, \" DPL=%d \", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT); if (sc->flags & DESC_S_MASK) { if (sc->flags & DESC_CS_MASK) { cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? \"CS64\" : ((sc->flags & DESC_B_MASK) ? \"CS32\" : \"CS16\")); cpu_fprintf(f, \" [%c%c\", (sc->flags & DESC_C_MASK) ? 'C' : '-', (sc->flags & DESC_R_MASK) ? 'R' : '-'); } else { cpu_fprintf(f, (sc->flags & DESC_B_MASK) ? \"DS \" : \"DS16\"); cpu_fprintf(f, \" [%c%c\", (sc->flags & DESC_E_MASK) ? 'E' : '-', (sc->flags & DESC_W_MASK) ? 'W' : '-'); } cpu_fprintf(f, \"%c]\", (sc->flags & DESC_A_MASK) ? 'A' : '-'); } else { static const char *sys_type_name[2][16] = { { /* 32 bit mode */ \"Reserved\", \"TSS16-avl\", \"LDT\", \"TSS16-busy\", \"CallGate16\", \"TaskGate\", \"IntGate16\", \"TrapGate16\", \"Reserved\", \"TSS32-avl\", \"Reserved\", \"TSS32-busy\", \"CallGate32\", \"Reserved\", \"IntGate32\", \"TrapGate32\" }, { /* 64 bit mode */ \"\", \"Reserved\", \"LDT\", \"Reserved\", \"Reserved\", \"Reserved\", \"Reserved\", \"Reserved\", \"Reserved\", \"TSS64-avl\", \"Reserved\", \"TSS64-busy\", \"CallGate64\", \"Reserved\", \"IntGate64\", \"TrapGate64\" } }; cpu_fprintf(f, \"%s\", sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0] [(sc->flags & DESC_TYPE_MASK) >> DESC_TYPE_SHIFT]); } done: cpu_fprintf(f, \"\\n\"); }"} {"target": 1, "idx": 27081, "func": "void FUNCC(ff_h264_idct_add)(uint8_t *_dst, DCTELEM *_block, int stride) { int i; INIT_CLIP pixel *dst = (pixel*)_dst; dctcoef *block = (dctcoef*)_block; stride /= sizeof(pixel); block[0] += 1 << 5; for(i=0; i<4; i++){ const int z0= block[i + 4*0] + block[i + 4*2]; const int z1= block[i + 4*0] - block[i + 4*2]; const int z2= (block[i + 4*1]>>1) - block[i + 4*3]; const int z3= block[i + 4*1] + (block[i + 4*3]>>1); block[i + 4*0]= z0 + z3; block[i + 4*1]= z1 + z2; block[i + 4*2]= z1 - z2; block[i + 4*3]= z0 - z3; } for(i=0; i<4; i++){ const int z0= block[0 + 4*i] + block[2 + 4*i]; const int z1= block[0 + 4*i] - block[2 + 4*i]; const int z2= (block[1 + 4*i]>>1) - block[3 + 4*i]; const int z3= block[1 + 4*i] + (block[3 + 4*i]>>1); dst[i + 0*stride]= CLIP(dst[i + 0*stride] + ((z0 + z3) >> 6)); dst[i + 1*stride]= CLIP(dst[i + 1*stride] + ((z1 + z2) >> 6)); dst[i + 2*stride]= CLIP(dst[i + 2*stride] + ((z1 - z2) >> 6)); dst[i + 3*stride]= CLIP(dst[i + 3*stride] + ((z0 - z3) >> 6)); } }"} {"target": 0, "idx": 27083, "func": "static void disas_fp_ccomp(DisasContext *s, uint32_t insn) { unsigned int mos, type, rm, cond, rn, op, nzcv; TCGv_i64 tcg_flags; int label_continue = -1; mos = extract32(insn, 29, 3); type = extract32(insn, 22, 2); /* 0 = single, 1 = double */ rm = extract32(insn, 16, 5); cond = extract32(insn, 12, 4); rn = extract32(insn, 5, 5); op = extract32(insn, 4, 1); nzcv = extract32(insn, 0, 4); if (mos || type > 1) { unallocated_encoding(s); return; } if (!fp_access_check(s)) { return; } if (cond < 0x0e) { /* not always */ int label_match = gen_new_label(); label_continue = gen_new_label(); arm_gen_test_cc(cond, label_match); /* nomatch: */ tcg_flags = tcg_const_i64(nzcv << 28); gen_set_nzcv(tcg_flags); tcg_temp_free_i64(tcg_flags); tcg_gen_br(label_continue); gen_set_label(label_match); } handle_fp_compare(s, type, rn, rm, false, op); if (cond < 0x0e) { gen_set_label(label_continue); } }"} {"target": 0, "idx": 27088, "func": "static inline int test_bit(unsigned int bit, const unsigned long *map) { return !!((map)[(bit) / BITS_PER_LONG] & (1UL << ((bit) % BITS_PER_LONG))); }"} {"target": 0, "idx": 27100, "func": "static bool try_poll_mode(AioContext *ctx, bool blocking) { if (blocking && ctx->poll_max_ns && ctx->poll_disable_cnt == 0) { /* See qemu_soonest_timeout() uint64_t hack */ int64_t max_ns = MIN((uint64_t)aio_compute_timeout(ctx), (uint64_t)ctx->poll_ns); if (max_ns) { poll_set_started(ctx, true); if (run_poll_handlers(ctx, max_ns)) { return true; } } } poll_set_started(ctx, false); /* Even if we don't run busy polling, try polling once in case it can make * progress and the caller will be able to avoid ppoll(2)/epoll_wait(2). */ return run_poll_handlers_once(ctx); }"} {"target": 1, "idx": 27107, "func": "static int qmp_tmp105_get_temperature(const char *id) { QDict *response; int ret; response = qmp(\"{ 'execute': 'qom-get', 'arguments': { 'path': '%s', \" \"'property': 'temperature' } }\", id); g_assert(qdict_haskey(response, \"return\")); ret = qdict_get_int(response, \"return\"); QDECREF(response); return ret; }"} {"target": 1, "idx": 27129, "func": "static void decode_sys_interrupts(CPUTriCoreState *env, DisasContext *ctx) { uint32_t op2; uint32_t r1; TCGLabel *l1; TCGv tmp; op2 = MASK_OP_SYS_OP2(ctx->opcode); r1 = MASK_OP_SYS_S1D(ctx->opcode); switch (op2) { case OPC2_32_SYS_DEBUG: /* raise EXCP_DEBUG */ break; case OPC2_32_SYS_DISABLE: tcg_gen_andi_tl(cpu_ICR, cpu_ICR, ~MASK_ICR_IE); break; case OPC2_32_SYS_DSYNC: break; case OPC2_32_SYS_ENABLE: tcg_gen_ori_tl(cpu_ICR, cpu_ICR, MASK_ICR_IE); break; case OPC2_32_SYS_ISYNC: break; case OPC2_32_SYS_NOP: break; case OPC2_32_SYS_RET: gen_compute_branch(ctx, op2, 0, 0, 0, 0); break; case OPC2_32_SYS_FRET: gen_fret(ctx); break; case OPC2_32_SYS_RFE: gen_helper_rfe(cpu_env); tcg_gen_exit_tb(0); ctx->bstate = BS_BRANCH; break; case OPC2_32_SYS_RFM: if ((ctx->hflags & TRICORE_HFLAG_KUU) == TRICORE_HFLAG_SM) { tmp = tcg_temp_new(); l1 = gen_new_label(); tcg_gen_ld32u_tl(tmp, cpu_env, offsetof(CPUTriCoreState, DBGSR)); tcg_gen_andi_tl(tmp, tmp, MASK_DBGSR_DE); tcg_gen_brcondi_tl(TCG_COND_NE, tmp, 1, l1); gen_helper_rfm(cpu_env); gen_set_label(l1); tcg_gen_exit_tb(0); ctx->bstate = BS_BRANCH; tcg_temp_free(tmp); } else { /* generate privilege trap */ } break; case OPC2_32_SYS_RSLCX: gen_helper_rslcx(cpu_env); break; case OPC2_32_SYS_SVLCX: gen_helper_svlcx(cpu_env); break; case OPC2_32_SYS_RESTORE: if (tricore_feature(env, TRICORE_FEATURE_16)) { if ((ctx->hflags & TRICORE_HFLAG_KUU) == TRICORE_HFLAG_SM || (ctx->hflags & TRICORE_HFLAG_KUU) == TRICORE_HFLAG_UM1) { tcg_gen_deposit_tl(cpu_ICR, cpu_ICR, cpu_gpr_d[r1], 8, 1); } /* else raise privilege trap */ } /* else raise illegal opcode trap */ break; case OPC2_32_SYS_TRAPSV: l1 = gen_new_label(); tcg_gen_brcondi_tl(TCG_COND_GE, cpu_PSW_SV, 0, l1); generate_trap(ctx, TRAPC_ASSERT, TIN5_SOVF); gen_set_label(l1); break; case OPC2_32_SYS_TRAPV: l1 = gen_new_label(); tcg_gen_brcondi_tl(TCG_COND_GE, cpu_PSW_V, 0, l1); generate_trap(ctx, TRAPC_ASSERT, TIN5_OVF); gen_set_label(l1); break; } }"} {"target": 1, "idx": 27148, "func": "static void pci_hotplug(void) { QVirtioPCIDevice *dev; QOSState *qs; const char *arch = qtest_get_arch(); qs = pci_test_start(); /* plug secondary disk */ qpci_plug_device_test(\"virtio-blk-pci\", \"drv1\", PCI_SLOT_HP, \"'drive': 'drive1'\"); dev = virtio_blk_pci_init(qs->pcibus, PCI_SLOT_HP); g_assert(dev); qvirtio_pci_device_disable(dev); g_free(dev); /* unplug secondary disk */ if (strcmp(arch, \"i386\") == 0 || strcmp(arch, \"x86_64\") == 0) { qpci_unplug_acpi_device_test(\"drv1\", PCI_SLOT_HP); } qtest_shutdown(qs); }"} {"target": 1, "idx": 27151, "func": "static unsigned long iv_decode_frame(Indeo3DecodeContext *s, unsigned char *buf, int buf_size) { unsigned int hdr_width, hdr_height, chroma_width, chroma_height; unsigned long fflags1, fflags2, fflags3, offs1, offs2, offs3, offs; unsigned char *hdr_pos, *buf_pos; buf_pos = buf; buf_pos += 18; fflags1 = le2me_16(*(uint16_t *)buf_pos); buf_pos += 2; fflags3 = le2me_32(*(uint32_t *)buf_pos); buf_pos += 4; fflags2 = *buf_pos++; buf_pos += 3; hdr_height = le2me_16(*(uint16_t *)buf_pos); buf_pos += 2; hdr_width = le2me_16(*(uint16_t *)buf_pos); buf_pos += 2; chroma_height = ((hdr_height >> 2) + 3) & 0x7ffc; chroma_width = ((hdr_width >> 2) + 3) & 0x7ffc; offs1 = le2me_32(*(uint32_t *)buf_pos); buf_pos += 4; offs2 = le2me_32(*(uint32_t *)buf_pos); buf_pos += 4; offs3 = le2me_32(*(uint32_t *)buf_pos); buf_pos += 8; hdr_pos = buf_pos; if(fflags3 == 0x80) return 4; if(fflags1 & 0x200) { s->cur_frame = s->iv_frame + 1; s->ref_frame = s->iv_frame; } else { s->cur_frame = s->iv_frame; s->ref_frame = s->iv_frame + 1; } buf_pos = buf + 16 + offs1; offs = le2me_32(*(uint32_t *)buf_pos); buf_pos += 4; iv_Decode_Chunk(s, s->cur_frame->Ybuf, s->ref_frame->Ybuf, hdr_width, hdr_height, buf_pos + offs * 2, fflags2, hdr_pos, buf_pos, min(hdr_width, 160)); if (!(s->avctx->flags & CODEC_FLAG_GRAY)) { buf_pos = buf + 16 + offs2; offs = le2me_32(*(uint32_t *)buf_pos); buf_pos += 4; iv_Decode_Chunk(s, s->cur_frame->Vbuf, s->ref_frame->Vbuf, chroma_width, chroma_height, buf_pos + offs * 2, fflags2, hdr_pos, buf_pos, min(chroma_width, 40)); buf_pos = buf + 16 + offs3; offs = le2me_32(*(uint32_t *)buf_pos); buf_pos += 4; iv_Decode_Chunk(s, s->cur_frame->Ubuf, s->ref_frame->Ubuf, chroma_width, chroma_height, buf_pos + offs * 2, fflags2, hdr_pos, buf_pos, min(chroma_width, 40)); } return 8; }"} {"target": 1, "idx": 27169, "func": "int gen_intermediate_code_internal (CPUState *env, TranslationBlock *tb, int search_pc) { DisasContext ctx, *ctxp = &ctx; opc_handler_t **table, *handler; target_ulong pc_start; uint16_t *gen_opc_end; int j, lj = -1; pc_start = tb->pc; gen_opc_ptr = gen_opc_buf; gen_opc_end = gen_opc_buf + OPC_MAX_SIZE; gen_opparam_ptr = gen_opparam_buf; nb_gen_labels = 0; ctx.nip = pc_start; ctx.tb = tb; ctx.exception = EXCP_NONE; ctx.spr_cb = env->spr_cb; #if defined(CONFIG_USER_ONLY) ctx.mem_idx = msr_le; #else ctx.supervisor = 1 - msr_pr; ctx.mem_idx = ((1 - msr_pr) << 1) | msr_le; #endif ctx.fpu_enabled = msr_fp; ctx.singlestep_enabled = env->singlestep_enabled; #if defined (DO_SINGLE_STEP) && 0 /* Single step trace mode */ msr_se = 1; #endif /* Set env in case of segfault during code fetch */ while (ctx.exception == EXCP_NONE && gen_opc_ptr < gen_opc_end) { if (unlikely(env->nb_breakpoints > 0)) { for (j = 0; j < env->nb_breakpoints; j++) { if (env->breakpoints[j] == ctx.nip) { gen_op_update_nip(ctx.nip); gen_op_debug(); break; } } } if (unlikely(search_pc)) { j = gen_opc_ptr - gen_opc_buf; if (lj < j) { lj++; while (lj < j) gen_opc_instr_start[lj++] = 0; gen_opc_pc[lj] = ctx.nip; gen_opc_instr_start[lj] = 1; } } #if defined PPC_DEBUG_DISAS if (loglevel & CPU_LOG_TB_IN_ASM) { fprintf(logfile, \"----------------\\n\"); fprintf(logfile, \"nip=%08x super=%d ir=%d\\n\", ctx.nip, 1 - msr_pr, msr_ir); } #endif ctx.opcode = ldl_code(ctx.nip); if (msr_le) { ctx.opcode = ((ctx.opcode & 0xFF000000) >> 24) | ((ctx.opcode & 0x00FF0000) >> 8) | ((ctx.opcode & 0x0000FF00) << 8) | ((ctx.opcode & 0x000000FF) << 24); } #if defined PPC_DEBUG_DISAS if (loglevel & CPU_LOG_TB_IN_ASM) { fprintf(logfile, \"translate opcode %08x (%02x %02x %02x) (%s)\\n\", ctx.opcode, opc1(ctx.opcode), opc2(ctx.opcode), opc3(ctx.opcode), msr_le ? \"little\" : \"big\"); } #endif ctx.nip += 4; table = env->opcodes; handler = table[opc1(ctx.opcode)]; if (is_indirect_opcode(handler)) { table = ind_table(handler); handler = table[opc2(ctx.opcode)]; if (is_indirect_opcode(handler)) { table = ind_table(handler); handler = table[opc3(ctx.opcode)]; } } /* Is opcode *REALLY* valid ? */ if (unlikely(handler->handler == &gen_invalid)) { if (loglevel > 0) { fprintf(logfile, \"invalid/unsupported opcode: \" \"%02x - %02x - %02x (%08x) 0x%08x %d\\n\", opc1(ctx.opcode), opc2(ctx.opcode), opc3(ctx.opcode), ctx.opcode, ctx.nip - 4, msr_ir); } else { printf(\"invalid/unsupported opcode: \" \"%02x - %02x - %02x (%08x) 0x%08x %d\\n\", opc1(ctx.opcode), opc2(ctx.opcode), opc3(ctx.opcode), ctx.opcode, ctx.nip - 4, msr_ir); } } else { if (unlikely((ctx.opcode & handler->inval) != 0)) { if (loglevel > 0) { fprintf(logfile, \"invalid bits: %08x for opcode: \" \"%02x -%02x - %02x (0x%08x) (0x%08x)\\n\", ctx.opcode & handler->inval, opc1(ctx.opcode), opc2(ctx.opcode), opc3(ctx.opcode), ctx.opcode, ctx.nip - 4); } else { printf(\"invalid bits: %08x for opcode: \" \"%02x -%02x - %02x (0x%08x) (0x%08x)\\n\", ctx.opcode & handler->inval, opc1(ctx.opcode), opc2(ctx.opcode), opc3(ctx.opcode), ctx.opcode, ctx.nip - 4); } RET_INVAL(ctxp); break; } } (*(handler->handler))(&ctx); #if defined(DO_PPC_STATISTICS) handler->count++; #endif /* Check trace mode exceptions */ if (unlikely((msr_be && ctx.exception == EXCP_BRANCH) || /* Check in single step trace mode * we need to stop except if: * - rfi, trap or syscall * - first instruction of an exception handler */ (msr_se && (ctx.nip < 0x100 || ctx.nip > 0xF00 || (ctx.nip & 0xFC) != 0x04) && ctx.exception != EXCP_SYSCALL && ctx.exception != EXCP_SYSCALL_USER && ctx.exception != EXCP_TRAP))) { RET_EXCP(ctxp, EXCP_TRACE, 0); } /* if we reach a page boundary or are single stepping, stop * generation */ if (unlikely(((ctx.nip & (TARGET_PAGE_SIZE - 1)) == 0) || (env->singlestep_enabled))) { break; } #if defined (DO_SINGLE_STEP) break; #endif } if (ctx.exception == EXCP_NONE) { gen_goto_tb(&ctx, 0, ctx.nip); } else if (ctx.exception != EXCP_BRANCH) { gen_op_reset_T0(); /* Generate the return instruction */ gen_op_exit_tb(); } *gen_opc_ptr = INDEX_op_end; if (unlikely(search_pc)) { j = gen_opc_ptr - gen_opc_buf; lj++; while (lj <= j) gen_opc_instr_start[lj++] = 0; tb->size = 0; } else { tb->size = ctx.nip - pc_start; } #ifdef DEBUG_DISAS if (loglevel & CPU_LOG_TB_CPU) { fprintf(logfile, \"---------------- excp: %04x\\n\", ctx.exception); cpu_dump_state(env, logfile, fprintf, 0); } if (loglevel & CPU_LOG_TB_IN_ASM) { int flags; flags = msr_le; fprintf(logfile, \"IN: %s\\n\", lookup_symbol(pc_start)); target_disas(logfile, pc_start, ctx.nip - pc_start, flags); fprintf(logfile, \"\\n\"); } if (loglevel & CPU_LOG_TB_OP) { fprintf(logfile, \"OP:\\n\"); dump_ops(gen_opc_buf, gen_opparam_buf); fprintf(logfile, \"\\n\"); } #endif return 0; }"} {"target": 1, "idx": 27171, "func": "static void test_ide_drive_cd_0(void) { char *argv[256]; int argc, ide_idx; Backend i; argc = setup_common(argv, ARRAY_SIZE(argv)); for (i = 0; i <= backend_empty; i++) { ide_idx = backend_empty - i; cur_ide[ide_idx] = &hd_chst[i][mbr_blank]; argc = setup_ide(argc, argv, ARRAY_SIZE(argv), ide_idx, NULL, i, mbr_blank, \"\"); } qtest_start(g_strjoinv(\" \", argv)); test_cmos(); qtest_end(); }"} {"target": 0, "idx": 27201, "func": "static int decode_vol_header(MpegEncContext *s, GetBitContext *gb){ int width, height, vo_ver_id; /* vol header */ skip_bits(gb, 1); /* random access */ s->vo_type= get_bits(gb, 8); if (get_bits1(gb) != 0) { /* is_ol_id */ vo_ver_id = get_bits(gb, 4); /* vo_ver_id */ skip_bits(gb, 3); /* vo_priority */ } else { vo_ver_id = 1; } //printf(\"vo type:%d\\n\",s->vo_type); s->aspect_ratio_info= get_bits(gb, 4); if(s->aspect_ratio_info == FF_ASPECT_EXTENDED){ s->aspected_width = get_bits(gb, 8); // par_width s->aspected_height = get_bits(gb, 8); // par_height }else{ s->aspected_width = pixel_aspect[s->aspect_ratio_info][0]; s->aspected_height= pixel_aspect[s->aspect_ratio_info][1]; } if ((s->vol_control_parameters=get_bits1(gb))) { /* vol control parameter */ int chroma_format= get_bits(gb, 2); if(chroma_format!=1){ printf(\"illegal chroma format\\n\"); } s->low_delay= get_bits1(gb); if(get_bits1(gb)){ /* vbv parameters */ get_bits(gb, 15); /* first_half_bitrate */ skip_bits1(gb); /* marker */ get_bits(gb, 15); /* latter_half_bitrate */ skip_bits1(gb); /* marker */ get_bits(gb, 15); /* first_half_vbv_buffer_size */ skip_bits1(gb); /* marker */ get_bits(gb, 3); /* latter_half_vbv_buffer_size */ get_bits(gb, 11); /* first_half_vbv_occupancy */ skip_bits1(gb); /* marker */ get_bits(gb, 15); /* latter_half_vbv_occupancy */ skip_bits1(gb); /* marker */ } }else{ // set low delay flag only once so the smart? low delay detection wont be overriden if(s->picture_number==0) s->low_delay=0; } s->shape = get_bits(gb, 2); /* vol shape */ if(s->shape != RECT_SHAPE) printf(\"only rectangular vol supported\\n\"); if(s->shape == GRAY_SHAPE && vo_ver_id != 1){ printf(\"Gray shape not supported\\n\"); skip_bits(gb, 4); //video_object_layer_shape_extension } skip_bits1(gb); /* marker */ s->time_increment_resolution = get_bits(gb, 16); s->time_increment_bits = av_log2(s->time_increment_resolution - 1) + 1; if (s->time_increment_bits < 1) s->time_increment_bits = 1; skip_bits1(gb); /* marker */ if (get_bits1(gb) != 0) { /* fixed_vop_rate */ skip_bits(gb, s->time_increment_bits); } if (s->shape != BIN_ONLY_SHAPE) { if (s->shape == RECT_SHAPE) { skip_bits1(gb); /* marker */ width = get_bits(gb, 13); skip_bits1(gb); /* marker */ height = get_bits(gb, 13); skip_bits1(gb); /* marker */ if(width && height){ /* they should be non zero but who knows ... */ s->width = width; s->height = height; // printf(\"width/height: %d %d\\n\", width, height); } } s->progressive_sequence= get_bits1(gb)^1; if(!get_bits1(gb)) printf(\"OBMC not supported (very likely buggy encoder)\\n\"); /* OBMC Disable */ if (vo_ver_id == 1) { s->vol_sprite_usage = get_bits1(gb); /* vol_sprite_usage */ } else { s->vol_sprite_usage = get_bits(gb, 2); /* vol_sprite_usage */ } if(s->vol_sprite_usage==STATIC_SPRITE) printf(\"Static Sprites not supported\\n\"); if(s->vol_sprite_usage==STATIC_SPRITE || s->vol_sprite_usage==GMC_SPRITE){ if(s->vol_sprite_usage==STATIC_SPRITE){ s->sprite_width = get_bits(gb, 13); skip_bits1(gb); /* marker */ s->sprite_height= get_bits(gb, 13); skip_bits1(gb); /* marker */ s->sprite_left = get_bits(gb, 13); skip_bits1(gb); /* marker */ s->sprite_top = get_bits(gb, 13); skip_bits1(gb); /* marker */ } s->num_sprite_warping_points= get_bits(gb, 6); s->sprite_warping_accuracy = get_bits(gb, 2); s->sprite_brightness_change= get_bits1(gb); if(s->vol_sprite_usage==STATIC_SPRITE) s->low_latency_sprite= get_bits1(gb); } // FIXME sadct disable bit if verid!=1 && shape not rect if (get_bits1(gb) == 1) { /* not_8_bit */ s->quant_precision = get_bits(gb, 4); /* quant_precision */ if(get_bits(gb, 4)!=8) printf(\"N-bit not supported\\n\"); /* bits_per_pixel */ if(s->quant_precision!=5) printf(\"quant precission %d\\n\", s->quant_precision); } else { s->quant_precision = 5; } // FIXME a bunch of grayscale shape things if((s->mpeg_quant=get_bits1(gb))){ /* vol_quant_type */ int i, v; /* load default matrixes */ for(i=0; i<64; i++){ int j= s->dsp.idct_permutation[i]; v= ff_mpeg4_default_intra_matrix[i]; s->intra_matrix[j]= v; s->chroma_intra_matrix[j]= v; v= ff_mpeg4_default_non_intra_matrix[i]; s->inter_matrix[j]= v; s->chroma_inter_matrix[j]= v; } /* load custom intra matrix */ if(get_bits1(gb)){ int last=0; for(i=0; i<64; i++){ int j; v= get_bits(gb, 8); if(v==0) break; last= v; j= s->dsp.idct_permutation[ ff_zigzag_direct[i] ]; s->intra_matrix[j]= v; s->chroma_intra_matrix[j]= v; } /* replicate last value */ for(; i<64; i++){ int j= s->dsp.idct_permutation[ ff_zigzag_direct[i] ]; s->intra_matrix[j]= v; s->chroma_intra_matrix[j]= v; } } /* load custom non intra matrix */ if(get_bits1(gb)){ int last=0; for(i=0; i<64; i++){ int j; v= get_bits(gb, 8); if(v==0) break; last= v; j= s->dsp.idct_permutation[ ff_zigzag_direct[i] ]; s->inter_matrix[j]= v; s->chroma_inter_matrix[j]= v; } /* replicate last value */ for(; i<64; i++){ int j= s->dsp.idct_permutation[ ff_zigzag_direct[i] ]; s->inter_matrix[j]= last; s->chroma_inter_matrix[j]= last; } } // FIXME a bunch of grayscale shape things } if(vo_ver_id != 1) s->quarter_sample= get_bits1(gb); else s->quarter_sample=0; if(!get_bits1(gb)) printf(\"Complexity estimation not supported\\n\"); s->resync_marker= !get_bits1(gb); /* resync_marker_disabled */ s->data_partitioning= get_bits1(gb); if(s->data_partitioning){ s->rvlc= get_bits1(gb); if(s->rvlc){ printf(\"reversible vlc not supported\\n\"); } } if(vo_ver_id != 1) { s->new_pred= get_bits1(gb); if(s->new_pred){ printf(\"new pred not supported\\n\"); skip_bits(gb, 2); /* requested upstream message type */ skip_bits1(gb); /* newpred segment type */ } s->reduced_res_vop= get_bits1(gb); if(s->reduced_res_vop) printf(\"reduced resolution VOP not supported\\n\"); } else{ s->new_pred=0; s->reduced_res_vop= 0; } s->scalability= get_bits1(gb); if (s->scalability) { GetBitContext bak= *gb; int ref_layer_id; int ref_layer_sampling_dir; int h_sampling_factor_n; int h_sampling_factor_m; int v_sampling_factor_n; int v_sampling_factor_m; s->hierachy_type= get_bits1(gb); ref_layer_id= get_bits(gb, 4); ref_layer_sampling_dir= get_bits1(gb); h_sampling_factor_n= get_bits(gb, 5); h_sampling_factor_m= get_bits(gb, 5); v_sampling_factor_n= get_bits(gb, 5); v_sampling_factor_m= get_bits(gb, 5); s->enhancement_type= get_bits1(gb); if( h_sampling_factor_n==0 || h_sampling_factor_m==0 || v_sampling_factor_n==0 || v_sampling_factor_m==0){ // fprintf(stderr, \"illegal scalability header (VERY broken encoder), trying to workaround\\n\"); s->scalability=0; *gb= bak; }else printf(\"scalability not supported\\n\"); // bin shape stuff FIXME } } return 0; }"} {"target": 0, "idx": 27203, "func": "void ff_init_me(MpegEncContext *s){ MotionEstContext * const c= &s->me; c->avctx= s->avctx; ff_set_cmp(&s->dsp, s->dsp.me_pre_cmp, c->avctx->me_pre_cmp); ff_set_cmp(&s->dsp, s->dsp.me_cmp, c->avctx->me_cmp); ff_set_cmp(&s->dsp, s->dsp.me_sub_cmp, c->avctx->me_sub_cmp); ff_set_cmp(&s->dsp, s->dsp.mb_cmp, c->avctx->mb_cmp); c->flags = get_flags(c, 0, c->avctx->me_cmp &FF_CMP_CHROMA); c->sub_flags= get_flags(c, 0, c->avctx->me_sub_cmp&FF_CMP_CHROMA); c->mb_flags = get_flags(c, 0, c->avctx->mb_cmp &FF_CMP_CHROMA); /*FIXME s->no_rounding b_type*/ if(s->flags&CODEC_FLAG_QPEL){ c->sub_motion_search= qpel_motion_search; c->qpel_avg= s->dsp.avg_qpel_pixels_tab; if(s->no_rounding) c->qpel_put= s->dsp.put_no_rnd_qpel_pixels_tab; else c->qpel_put= s->dsp.put_qpel_pixels_tab; }else{ if(c->avctx->me_sub_cmp&FF_CMP_CHROMA) c->sub_motion_search= hpel_motion_search; else if( c->avctx->me_sub_cmp == FF_CMP_SAD && c->avctx-> me_cmp == FF_CMP_SAD && c->avctx-> mb_cmp == FF_CMP_SAD) c->sub_motion_search= sad_hpel_motion_search; // 2050 vs. 2450 cycles else c->sub_motion_search= hpel_motion_search; } c->hpel_avg= s->dsp.avg_pixels_tab; if(s->no_rounding) c->hpel_put= s->dsp.put_no_rnd_pixels_tab; else c->hpel_put= s->dsp.put_pixels_tab; if(s->linesize){ c->stride = s->linesize; c->uvstride= s->uvlinesize; }else{ c->stride = 16*s->mb_width + 32; c->uvstride= 8*s->mb_width + 16; } // 8x8 fullpel search would need a 4x4 chroma compare, which we dont have yet, and even if we had the motion estimation code doesnt expect it if((c->avctx->me_cmp&FF_CMP_CHROMA) && !s->dsp.me_cmp[2]){ s->dsp.me_cmp[2]= zero_cmp; } if((c->avctx->me_sub_cmp&FF_CMP_CHROMA) && !s->dsp.me_sub_cmp[2]){ s->dsp.me_sub_cmp[2]= zero_cmp; } c->hpel_put[2][0]= c->hpel_put[2][1]= c->hpel_put[2][2]= c->hpel_put[2][3]= zero_hpel; c->temp= c->scratchpad; }"} {"target": 1, "idx": 27237, "func": "static void filter(MpegAudioContext *s, int ch, short *samples, int incr) { short *p, *q; int sum, offset, i, j, norm, n; short tmp[64]; int tmp1[32]; int *out; // print_pow1(samples, 1152); offset = s->samples_offset[ch]; out = &s->sb_samples[ch][0][0][0]; for(j=0;j<36;j++) { /* 32 samples at once */ for(i=0;i<32;i++) { s->samples_buf[ch][offset + (31 - i)] = samples[0]; samples += incr; } /* filter */ p = s->samples_buf[ch] + offset; q = filter_bank; /* maxsum = 23169 */ for(i=0;i<64;i++) { sum = p[0*64] * q[0*64]; sum += p[1*64] * q[1*64]; sum += p[2*64] * q[2*64]; sum += p[3*64] * q[3*64]; sum += p[4*64] * q[4*64]; sum += p[5*64] * q[5*64]; sum += p[6*64] * q[6*64]; sum += p[7*64] * q[7*64]; tmp[i] = sum >> 14; p++; q++; } tmp1[0] = tmp[16]; for( i=1; i<=16; i++ ) tmp1[i] = tmp[i+16]+tmp[16-i]; for( i=17; i<=31; i++ ) tmp1[i] = tmp[i+16]-tmp[80-i]; /* integer IDCT 32 with normalization. XXX: There may be some overflow left */ norm = 0; for(i=0;i<32;i++) { norm |= abs(tmp1[i]); } n = av_log2(norm) - 12; if (n > 0) { for(i=0;i<32;i++) tmp1[i] >>= n; } else { n = 0; } idct32(out, tmp1, s->sblimit, n); /* advance of 32 samples */ offset -= 32; out += 32; /* handle the wrap around */ if (offset < 0) { memmove(s->samples_buf[ch] + SAMPLES_BUF_SIZE - (512 - 32), s->samples_buf[ch], (512 - 32) * 2); offset = SAMPLES_BUF_SIZE - 512; } } s->samples_offset[ch] = offset; // print_pow(s->sb_samples, 1152); }"} {"target": 0, "idx": 27254, "func": "int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) { S390CPU *cpu = S390_CPU(cs); int ret = 0; switch (run->exit_reason) { case KVM_EXIT_S390_SIEIC: ret = handle_intercept(cpu); break; case KVM_EXIT_S390_RESET: qemu_system_reset_request(); break; case KVM_EXIT_S390_TSCH: ret = handle_tsch(cpu); break; case KVM_EXIT_DEBUG: ret = kvm_arch_handle_debug_exit(cpu); break; default: fprintf(stderr, \"Unknown KVM exit: %d\\n\", run->exit_reason); break; } if (ret == 0) { ret = EXCP_INTERRUPT; } return ret; }"} {"target": 0, "idx": 27256, "func": "AddressSpace *pci_device_iommu_address_space(PCIDevice *dev) { PCIBus *bus = PCI_BUS(dev->bus); PCIBus *iommu_bus = bus; while(iommu_bus && !iommu_bus->iommu_fn && iommu_bus->parent_dev) { iommu_bus = PCI_BUS(iommu_bus->parent_dev->bus); } if (iommu_bus && iommu_bus->iommu_fn) { return iommu_bus->iommu_fn(bus, iommu_bus->iommu_opaque, dev->devfn); } return &address_space_memory; }"} {"target": 0, "idx": 27257, "func": "static inline void gen_op_eval_fbo(TCGv dst, TCGv src, unsigned int fcc_offset) { gen_mov_reg_FCC0(dst, src, fcc_offset); gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset); tcg_gen_and_tl(dst, dst, cpu_tmp0); tcg_gen_xori_tl(dst, dst, 0x1); }"} {"target": 0, "idx": 27261, "func": "static int dvbsub_decode(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; DVBSubContext *ctx = avctx->priv_data; AVSubtitle *sub = data; const uint8_t *p, *p_end; int segment_type; int page_id; int segment_length; int i; av_dlog(avctx, \"DVB sub packet:\\n\"); for (i=0; i < buf_size; i++) { av_dlog(avctx, \"%02x \", buf[i]); if (i % 16 == 15) av_dlog(avctx, \"\\n\"); } if (i % 16) av_dlog(avctx, \"\\n\"); if (buf_size <= 6 || *buf != 0x0f) { av_dlog(avctx, \"incomplete or broken packet\"); return -1; } p = buf; p_end = buf + buf_size; while (p_end - p >= 6 && *p == 0x0f) { p += 1; segment_type = *p++; page_id = AV_RB16(p); p += 2; segment_length = AV_RB16(p); p += 2; if (p_end - p < segment_length) { av_dlog(avctx, \"incomplete or broken packet\"); return -1; } if (page_id == ctx->composition_id || page_id == ctx->ancillary_id || ctx->composition_id == -1 || ctx->ancillary_id == -1) { switch (segment_type) { case DVBSUB_PAGE_SEGMENT: dvbsub_parse_page_segment(avctx, p, segment_length); break; case DVBSUB_REGION_SEGMENT: dvbsub_parse_region_segment(avctx, p, segment_length); break; case DVBSUB_CLUT_SEGMENT: dvbsub_parse_clut_segment(avctx, p, segment_length); break; case DVBSUB_OBJECT_SEGMENT: dvbsub_parse_object_segment(avctx, p, segment_length); break; case DVBSUB_DISPLAYDEFINITION_SEGMENT: dvbsub_parse_display_definition_segment(avctx, p, segment_length); break; case DVBSUB_DISPLAY_SEGMENT: *data_size = dvbsub_display_end_segment(avctx, p, segment_length, sub); break; default: av_dlog(avctx, \"Subtitling segment type 0x%x, page id %d, length %d\\n\", segment_type, page_id, segment_length); break; } } p += segment_length; } return p - buf; }"} {"target": 0, "idx": 27266, "func": "static void *mpc8544_load_device_tree(target_phys_addr_t addr, uint32_t ramsize, target_phys_addr_t initrd_base, target_phys_addr_t initrd_size, const char *kernel_cmdline) { void *fdt = NULL; #ifdef CONFIG_FDT uint32_t mem_reg_property[] = {0, ramsize}; char *filename; int fdt_size; int ret; filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, BINARY_DEVICE_TREE_FILE); if (!filename) { goto out; } fdt = load_device_tree(filename, &fdt_size); qemu_free(filename); if (fdt == NULL) { goto out; } /* Manipulate device tree in memory. */ ret = qemu_devtree_setprop(fdt, \"/memory\", \"reg\", mem_reg_property, sizeof(mem_reg_property)); if (ret < 0) fprintf(stderr, \"couldn't set /memory/reg\\n\"); ret = qemu_devtree_setprop_cell(fdt, \"/chosen\", \"linux,initrd-start\", initrd_base); if (ret < 0) fprintf(stderr, \"couldn't set /chosen/linux,initrd-start\\n\"); ret = qemu_devtree_setprop_cell(fdt, \"/chosen\", \"linux,initrd-end\", (initrd_base + initrd_size)); if (ret < 0) fprintf(stderr, \"couldn't set /chosen/linux,initrd-end\\n\"); ret = qemu_devtree_setprop_string(fdt, \"/chosen\", \"bootargs\", kernel_cmdline); if (ret < 0) fprintf(stderr, \"couldn't set /chosen/bootargs\\n\"); if (kvm_enabled()) { struct dirent *dirp; DIR *dp; char buf[128]; if ((dp = opendir(\"/proc/device-tree/cpus/\")) == NULL) { printf(\"Can't open directory /proc/device-tree/cpus/\\n\"); goto out; } buf[0] = '\\0'; while ((dirp = readdir(dp)) != NULL) { if (strncmp(dirp->d_name, \"PowerPC\", 7) == 0) { snprintf(buf, 128, \"/cpus/%s\", dirp->d_name); break; } } closedir(dp); if (buf[0] == '\\0') { printf(\"Unknow host!\\n\"); goto out; } mpc8544_copy_soc_cell(fdt, buf, \"clock-frequency\"); mpc8544_copy_soc_cell(fdt, buf, \"timebase-frequency\"); } cpu_physical_memory_write (addr, (void *)fdt, fdt_size); out: #endif return fdt; }"} {"target": 0, "idx": 27272, "func": "static int mov_read_trak(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom) { AVStream *st; MOVStreamContext *sc; int ret; st = av_new_stream(c->fc, c->fc->nb_streams); if (!st) return AVERROR(ENOMEM); sc = av_mallocz(sizeof(MOVStreamContext)); if (!sc) return AVERROR(ENOMEM); st->priv_data = sc; st->codec->codec_type = CODEC_TYPE_DATA; st->start_time = 0; /* XXX: check */ if ((ret = mov_read_default(c, pb, atom)) < 0) return ret; /* sanity checks */ if(sc->chunk_count && (!sc->stts_count || !sc->sample_to_chunk_sz || (!sc->sample_size && !sc->sample_count))){ av_log(c->fc, AV_LOG_ERROR, \"stream %d, missing mandatory atoms, broken header\\n\", st->index); sc->sample_count = 0; //ignore track return 0; } if(!sc->time_rate) sc->time_rate=1; if(!sc->time_scale) sc->time_scale= c->time_scale; av_set_pts_info(st, 64, sc->time_rate, sc->time_scale); if (st->codec->codec_type == CODEC_TYPE_AUDIO && !st->codec->frame_size && sc->stts_count == 1) st->codec->frame_size = av_rescale(sc->time_rate, st->codec->sample_rate, sc->time_scale); if(st->duration != AV_NOPTS_VALUE){ assert(st->duration % sc->time_rate == 0); st->duration /= sc->time_rate; } sc->ffindex = st->index; mov_build_index(c, st); if (sc->dref_id-1 < sc->drefs_count && sc->drefs[sc->dref_id-1].path) { if (url_fopen(&sc->pb, sc->drefs[sc->dref_id-1].path, URL_RDONLY) < 0) av_log(c->fc, AV_LOG_ERROR, \"stream %d, error opening file %s: %s\\n\", st->index, sc->drefs[sc->dref_id-1].path, strerror(errno)); } else sc->pb = c->fc->pb; switch (st->codec->codec_id) { #ifdef CONFIG_H261_DECODER case CODEC_ID_H261: #endif #ifdef CONFIG_H263_DECODER case CODEC_ID_H263: #endif #ifdef CONFIG_MPEG4_DECODER case CODEC_ID_MPEG4: #endif st->codec->width= 0; /* let decoder init width/height */ st->codec->height= 0; break; #ifdef CONFIG_VORBIS_DECODER case CODEC_ID_VORBIS: #endif st->codec->sample_rate= 0; /* let decoder init parameters properly */ break; } /* Do not need those anymore. */ av_freep(&sc->chunk_offsets); av_freep(&sc->sample_to_chunk); av_freep(&sc->sample_sizes); av_freep(&sc->keyframes); av_freep(&sc->stts_data); return 0; }"} {"target": 0, "idx": 27296, "func": "static av_cold int libschroedinger_encode_init(AVCodecContext *avctx) { SchroEncoderParams *p_schro_params = avctx->priv_data; SchroVideoFormatEnum preset; /* Initialize the libraries that libschroedinger depends on. */ schro_init(); /* Create an encoder object. */ p_schro_params->encoder = schro_encoder_new(); if (!p_schro_params->encoder) { av_log(avctx, AV_LOG_ERROR, \"Unrecoverable Error: schro_encoder_new failed. \"); return -1; } /* Initialize the format. */ preset = ff_get_schro_video_format_preset(avctx); p_schro_params->format = schro_encoder_get_video_format(p_schro_params->encoder); schro_video_format_set_std_video_format(p_schro_params->format, preset); p_schro_params->format->width = avctx->width; p_schro_params->format->height = avctx->height; if (set_chroma_format(avctx) == -1) return -1; if (avctx->color_primaries == AVCOL_PRI_BT709) { p_schro_params->format->colour_primaries = SCHRO_COLOUR_PRIMARY_HDTV; } else if (avctx->color_primaries == AVCOL_PRI_BT470BG) { p_schro_params->format->colour_primaries = SCHRO_COLOUR_PRIMARY_SDTV_625; } else if (avctx->color_primaries == AVCOL_PRI_SMPTE170M) { p_schro_params->format->colour_primaries = SCHRO_COLOUR_PRIMARY_SDTV_525; } if (avctx->colorspace == AVCOL_SPC_BT709) { p_schro_params->format->colour_matrix = SCHRO_COLOUR_MATRIX_HDTV; } else if (avctx->colorspace == AVCOL_SPC_BT470BG) { p_schro_params->format->colour_matrix = SCHRO_COLOUR_MATRIX_SDTV; } if (avctx->color_trc == AVCOL_TRC_BT709) { p_schro_params->format->transfer_function = SCHRO_TRANSFER_CHAR_TV_GAMMA; } if (ff_get_schro_frame_format(p_schro_params->format->chroma_format, &p_schro_params->frame_format) == -1) { av_log(avctx, AV_LOG_ERROR, \"This codec currently supports only planar YUV 4:2:0, 4:2:2\" \" and 4:4:4 formats.\\n\"); return -1; } p_schro_params->format->frame_rate_numerator = avctx->time_base.den; p_schro_params->format->frame_rate_denominator = avctx->time_base.num; p_schro_params->frame_size = avpicture_get_size(avctx->pix_fmt, avctx->width, avctx->height); avctx->coded_frame = av_frame_alloc(); if (!avctx->coded_frame) return AVERROR(ENOMEM); if (!avctx->gop_size) { schro_encoder_setting_set_double(p_schro_params->encoder, \"gop_structure\", SCHRO_ENCODER_GOP_INTRA_ONLY); if (avctx->coder_type == FF_CODER_TYPE_VLC) schro_encoder_setting_set_double(p_schro_params->encoder, \"enable_noarith\", 1); } else { schro_encoder_setting_set_double(p_schro_params->encoder, \"au_distance\", avctx->gop_size); avctx->has_b_frames = 1; p_schro_params->dts = -1; } /* FIXME - Need to handle SCHRO_ENCODER_RATE_CONTROL_LOW_DELAY. */ if (avctx->flags & CODEC_FLAG_QSCALE) { if (!avctx->global_quality) { /* lossless coding */ schro_encoder_setting_set_double(p_schro_params->encoder, \"rate_control\", SCHRO_ENCODER_RATE_CONTROL_LOSSLESS); } else { int quality; schro_encoder_setting_set_double(p_schro_params->encoder, \"rate_control\", SCHRO_ENCODER_RATE_CONTROL_CONSTANT_QUALITY); quality = avctx->global_quality / FF_QP2LAMBDA; if (quality > 10) quality = 10; schro_encoder_setting_set_double(p_schro_params->encoder, \"quality\", quality); } } else { schro_encoder_setting_set_double(p_schro_params->encoder, \"rate_control\", SCHRO_ENCODER_RATE_CONTROL_CONSTANT_BITRATE); schro_encoder_setting_set_double(p_schro_params->encoder, \"bitrate\", avctx->bit_rate); } if (avctx->flags & CODEC_FLAG_INTERLACED_ME) /* All material can be coded as interlaced or progressive irrespective of the type of source material. */ schro_encoder_setting_set_double(p_schro_params->encoder, \"interlaced_coding\", 1); schro_encoder_setting_set_double(p_schro_params->encoder, \"open_gop\", !(avctx->flags & CODEC_FLAG_CLOSED_GOP)); /* FIXME: Signal range hardcoded to 8-bit data until both libschroedinger * and libdirac support other bit-depth data. */ schro_video_format_set_std_signal_range(p_schro_params->format, SCHRO_SIGNAL_RANGE_8BIT_VIDEO); /* Set the encoder format. */ schro_encoder_set_video_format(p_schro_params->encoder, p_schro_params->format); /* Set the debug level. */ schro_debug_set_level(avctx->debug); schro_encoder_start(p_schro_params->encoder); /* Initialize the encoded frame queue. */ ff_schro_queue_init(&p_schro_params->enc_frame_queue); return 0; }"} {"target": 0, "idx": 27311, "func": "static struct omap_mcbsp_s *omap_mcbsp_init(MemoryRegion *system_memory, target_phys_addr_t base, qemu_irq txirq, qemu_irq rxirq, qemu_irq *dma, omap_clk clk) { struct omap_mcbsp_s *s = (struct omap_mcbsp_s *) g_malloc0(sizeof(struct omap_mcbsp_s)); s->txirq = txirq; s->rxirq = rxirq; s->txdrq = dma[0]; s->rxdrq = dma[1]; s->sink_timer = qemu_new_timer_ns(vm_clock, omap_mcbsp_sink_tick, s); s->source_timer = qemu_new_timer_ns(vm_clock, omap_mcbsp_source_tick, s); omap_mcbsp_reset(s); memory_region_init_io(&s->iomem, &omap_mcbsp_ops, s, \"omap-mcbsp\", 0x800); memory_region_add_subregion(system_memory, base, &s->iomem); return s; }"} {"target": 0, "idx": 27317, "func": "static void rv34_pred_mv(RV34DecContext *r, int block_type, int subblock_no, int dmv_no) { MpegEncContext *s = &r->s; int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride; int A[2] = {0}, B[2], C[2]; int i, j; int mx, my; int avail_index = avail_indexes[subblock_no]; int c_off = part_sizes_w[block_type]; mv_pos += (subblock_no & 1) + (subblock_no >> 1)*s->b8_stride; if(subblock_no == 3) c_off = -1; if(r->avail_cache[avail_index - 1]){ A[0] = s->current_picture_ptr->f.motion_val[0][mv_pos-1][0]; A[1] = s->current_picture_ptr->f.motion_val[0][mv_pos-1][1]; } if(r->avail_cache[avail_index - 4]){ B[0] = s->current_picture_ptr->f.motion_val[0][mv_pos-s->b8_stride][0]; B[1] = s->current_picture_ptr->f.motion_val[0][mv_pos-s->b8_stride][1]; }else{ B[0] = A[0]; B[1] = A[1]; } if(!r->avail_cache[avail_index - 4 + c_off]){ if(r->avail_cache[avail_index - 4] && (r->avail_cache[avail_index - 1] || r->rv30)){ C[0] = s->current_picture_ptr->f.motion_val[0][mv_pos-s->b8_stride-1][0]; C[1] = s->current_picture_ptr->f.motion_val[0][mv_pos-s->b8_stride-1][1]; }else{ C[0] = A[0]; C[1] = A[1]; } }else{ C[0] = s->current_picture_ptr->f.motion_val[0][mv_pos-s->b8_stride+c_off][0]; C[1] = s->current_picture_ptr->f.motion_val[0][mv_pos-s->b8_stride+c_off][1]; } mx = mid_pred(A[0], B[0], C[0]); my = mid_pred(A[1], B[1], C[1]); mx += r->dmv[dmv_no][0]; my += r->dmv[dmv_no][1]; for(j = 0; j < part_sizes_h[block_type]; j++){ for(i = 0; i < part_sizes_w[block_type]; i++){ s->current_picture_ptr->f.motion_val[0][mv_pos + i + j*s->b8_stride][0] = mx; s->current_picture_ptr->f.motion_val[0][mv_pos + i + j*s->b8_stride][1] = my; } } }"}