target
int64
0
1
idx
int64
0
27.3k
func
stringlengths
23
97k
1
242
static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr) { int n, i, ch, st, pkt_size, ret; const int16_t *samples; int16_t **samples_p; uint8_t *dst; ADPCMEncodeContext *c = avctx->priv_data; uint8_t *buf; samples = (const int16_t *)frame->data[0]; samples_p = (int16_t **)frame->extended_data; st = avctx->channels == 2; if (avctx->codec_id == AV_CODEC_ID_ADPCM_SWF) pkt_size = (2 + avctx->channels * (22 + 4 * (frame->nb_samples - 1)) + 7) / 8; else pkt_size = avctx->block_align; if ((ret = ff_alloc_packet(avpkt, pkt_size))) { av_log(avctx, AV_LOG_ERROR, "Error getting output packet\n"); return ret; } dst = avpkt->data; switch(avctx->codec->id) { case AV_CODEC_ID_ADPCM_IMA_WAV: { int blocks, j; blocks = (frame->nb_samples - 1) / 8; for (ch = 0; ch < avctx->channels; ch++) { ADPCMChannelStatus *status = &c->status[ch]; status->prev_sample = samples_p[ch][0]; /* status->step_index = 0; XXX: not sure how to init the state machine */ bytestream_put_le16(&dst, status->prev_sample); *dst++ = status->step_index; *dst++ = 0; /* unknown */ } /* stereo: 4 bytes (8 samples) for left, 4 bytes for right */ if (avctx->trellis > 0) { FF_ALLOC_OR_GOTO(avctx, buf, avctx->channels * blocks * 8, error); for (ch = 0; ch < avctx->channels; ch++) { adpcm_compress_trellis(avctx, &samples_p[ch][1], buf + ch * blocks * 8, &c->status[ch], blocks * 8, 1); } for (i = 0; i < blocks; i++) { for (ch = 0; ch < avctx->channels; ch++) { uint8_t *buf1 = buf + ch * blocks * 8 + i * 8; for (j = 0; j < 8; j += 2) *dst++ = buf1[j] | (buf1[j + 1] << 4); } } av_free(buf); } else { for (i = 0; i < blocks; i++) { for (ch = 0; ch < avctx->channels; ch++) { ADPCMChannelStatus *status = &c->status[ch]; const int16_t *smp = &samples_p[ch][1 + i * 8]; for (j = 0; j < 8; j += 2) { uint8_t v = adpcm_ima_compress_sample(status, smp[j ]); v |= adpcm_ima_compress_sample(status, smp[j + 1]) << 4; *dst++ = v; } } } } break; } case AV_CODEC_ID_ADPCM_IMA_QT: { PutBitContext pb; init_put_bits(&pb, dst, pkt_size * 8); for (ch = 0; ch < avctx->channels; ch++) { ADPCMChannelStatus *status = &c->status[ch]; put_bits(&pb, 9, (status->prev_sample & 0xFFFF) >> 7); put_bits(&pb, 7, status->step_index); if (avctx->trellis > 0) { uint8_t buf[64]; adpcm_compress_trellis(avctx, &samples_p[ch][1], buf, status, 64, 1); for (i = 0; i < 64; i++) put_bits(&pb, 4, buf[i ^ 1]); } else { for (i = 0; i < 64; i += 2) { int t1, t2; t1 = adpcm_ima_qt_compress_sample(status, samples_p[ch][i ]); t2 = adpcm_ima_qt_compress_sample(status, samples_p[ch][i + 1]); put_bits(&pb, 4, t2); put_bits(&pb, 4, t1); } } } flush_put_bits(&pb); break; } case AV_CODEC_ID_ADPCM_SWF: { PutBitContext pb; init_put_bits(&pb, dst, pkt_size * 8); n = frame->nb_samples - 1; // store AdpcmCodeSize put_bits(&pb, 2, 2); // set 4-bit flash adpcm format // init the encoder state for (i = 0; i < avctx->channels; i++) { // clip step so it fits 6 bits c->status[i].step_index = av_clip(c->status[i].step_index, 0, 63); put_sbits(&pb, 16, samples[i]); put_bits(&pb, 6, c->status[i].step_index); c->status[i].prev_sample = samples[i]; } if (avctx->trellis > 0) { FF_ALLOC_OR_GOTO(avctx, buf, 2 * n, error); adpcm_compress_trellis(avctx, samples + avctx->channels, buf, &c->status[0], n, avctx->channels); if (avctx->channels == 2) adpcm_compress_trellis(avctx, samples + avctx->channels + 1, buf + n, &c->status[1], n, avctx->channels); for (i = 0; i < n; i++) { put_bits(&pb, 4, buf[i]); if (avctx->channels == 2) put_bits(&pb, 4, buf[n + i]); } av_free(buf); } else { for (i = 1; i < frame->nb_samples; i++) { put_bits(&pb, 4, adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * i])); if (avctx->channels == 2) put_bits(&pb, 4, adpcm_ima_compress_sample(&c->status[1], samples[2 * i + 1])); } } flush_put_bits(&pb); break; } case AV_CODEC_ID_ADPCM_MS: for (i = 0; i < avctx->channels; i++) { int predictor = 0; *dst++ = predictor; c->status[i].coeff1 = ff_adpcm_AdaptCoeff1[predictor]; c->status[i].coeff2 = ff_adpcm_AdaptCoeff2[predictor]; } for (i = 0; i < avctx->channels; i++) { if (c->status[i].idelta < 16) c->status[i].idelta = 16; bytestream_put_le16(&dst, c->status[i].idelta); } for (i = 0; i < avctx->channels; i++) c->status[i].sample2= *samples++; for (i = 0; i < avctx->channels; i++) { c->status[i].sample1 = *samples++; bytestream_put_le16(&dst, c->status[i].sample1); } for (i = 0; i < avctx->channels; i++) bytestream_put_le16(&dst, c->status[i].sample2); if (avctx->trellis > 0) { n = avctx->block_align - 7 * avctx->channels; FF_ALLOC_OR_GOTO(avctx, buf, 2 * n, error); if (avctx->channels == 1) { adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n, avctx->channels); for (i = 0; i < n; i += 2) *dst++ = (buf[i] << 4) | buf[i + 1]; } else { adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n, avctx->channels); adpcm_compress_trellis(avctx, samples + 1, buf + n, &c->status[1], n, avctx->channels); for (i = 0; i < n; i++) *dst++ = (buf[i] << 4) | buf[n + i]; } av_free(buf); } else { for (i = 7 * avctx->channels; i < avctx->block_align; i++) { int nibble; nibble = adpcm_ms_compress_sample(&c->status[ 0], *samples++) << 4; nibble |= adpcm_ms_compress_sample(&c->status[st], *samples++); *dst++ = nibble; } } break; case AV_CODEC_ID_ADPCM_YAMAHA: n = frame->nb_samples / 2; if (avctx->trellis > 0) { FF_ALLOC_OR_GOTO(avctx, buf, 2 * n * 2, error); n *= 2; if (avctx->channels == 1) { adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n, avctx->channels); for (i = 0; i < n; i += 2) *dst++ = buf[i] | (buf[i + 1] << 4); } else { adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n, avctx->channels); adpcm_compress_trellis(avctx, samples + 1, buf + n, &c->status[1], n, avctx->channels); for (i = 0; i < n; i++) *dst++ = buf[i] | (buf[n + i] << 4); } av_free(buf); } else for (n *= avctx->channels; n > 0; n--) { int nibble; nibble = adpcm_yamaha_compress_sample(&c->status[ 0], *samples++); nibble |= adpcm_yamaha_compress_sample(&c->status[st], *samples++) << 4; *dst++ = nibble; } break; default: return AVERROR(EINVAL); } avpkt->size = pkt_size; *got_packet_ptr = 1; return 0; error: return AVERROR(ENOMEM); }
1
243
yuv2rgb_full_2_c_template(SwsContext *c, const int16_t *buf[2], const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y, enum AVPixelFormat target, int hasAlpha) { const int16_t *buf0 = buf[0], *buf1 = buf[1], *ubuf0 = ubuf[0], *ubuf1 = ubuf[1], *vbuf0 = vbuf[0], *vbuf1 = vbuf[1], *abuf0 = hasAlpha ? abuf[0] : NULL, *abuf1 = hasAlpha ? abuf[1] : NULL; int yalpha1 = 4096 - yalpha; int uvalpha1 = 4096 - uvalpha; int i; int step = (target == AV_PIX_FMT_RGB24 || target == AV_PIX_FMT_BGR24) ? 3 : 4; int err[4] = {0}; if( target == AV_PIX_FMT_BGR4_BYTE || target == AV_PIX_FMT_RGB4_BYTE || target == AV_PIX_FMT_BGR8 || target == AV_PIX_FMT_RGB8) step = 1; for (i = 0; i < dstW; i++) { int Y = ( buf0[i] * yalpha1 + buf1[i] * yalpha ) >> 10; //FIXME rounding int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha-(128 << 19)) >> 10; int V = (vbuf0[i] * uvalpha1 + vbuf1[i] * uvalpha-(128 << 19)) >> 10; int A; if (hasAlpha) { A = (abuf0[i] * yalpha1 + abuf1[i] * yalpha + (1<<18)) >> 19; if (A & 0x100) A = av_clip_uint8(A); } yuv2rgb_write_full(c, dest, i, Y, A, U, V, y, target, hasAlpha, err); dest += step; } c->dither_error[0][i] = err[0]; c->dither_error[1][i] = err[1]; c->dither_error[2][i] = err[2]; }
1
247
static void clone_slice(H264Context *dst, H264Context *src) { memcpy(dst->block_offset, src->block_offset, sizeof(dst->block_offset)); dst->s.current_picture_ptr = src->s.current_picture_ptr; dst->s.current_picture = src->s.current_picture; dst->s.linesize = src->s.linesize; dst->s.uvlinesize = src->s.uvlinesize; dst->s.first_field = src->s.first_field; dst->prev_poc_msb = src->prev_poc_msb; dst->prev_poc_lsb = src->prev_poc_lsb; dst->prev_frame_num_offset = src->prev_frame_num_offset; dst->prev_frame_num = src->prev_frame_num; dst->short_ref_count = src->short_ref_count; memcpy(dst->short_ref, src->short_ref, sizeof(dst->short_ref)); memcpy(dst->long_ref, src->long_ref, sizeof(dst->long_ref)); memcpy(dst->default_ref_list, src->default_ref_list, sizeof(dst->default_ref_list)); memcpy(dst->ref_list, src->ref_list, sizeof(dst->ref_list)); memcpy(dst->dequant4_coeff, src->dequant4_coeff, sizeof(src->dequant4_coeff)); memcpy(dst->dequant8_coeff, src->dequant8_coeff, sizeof(src->dequant8_coeff)); }
1
249
static inline void gen_op_arith_subf(DisasContext *ctx, TCGv ret, TCGv arg1, TCGv arg2, int add_ca, int compute_ca, int compute_ov) { TCGv t0, t1; if ((!compute_ca && !compute_ov) || (!TCGV_EQUAL(ret, arg1) && !TCGV_EQUAL(ret, arg2))) { t0 = ret; } else { t0 = tcg_temp_local_new(); } if (add_ca) { t1 = tcg_temp_local_new(); tcg_gen_mov_tl(t1, cpu_ca); } else { TCGV_UNUSED(t1); } if (compute_ca) { /* Start with XER CA disabled, the most likely case */ tcg_gen_movi_tl(cpu_ca, 0); } if (compute_ov) { /* Start with XER OV disabled, the most likely case */ tcg_gen_movi_tl(cpu_ov, 0); } if (add_ca) { tcg_gen_not_tl(t0, arg1); tcg_gen_add_tl(t0, t0, arg2); gen_op_arith_compute_ca(ctx, t0, arg2, 0); tcg_gen_add_tl(t0, t0, t1); gen_op_arith_compute_ca(ctx, t0, t1, 0); tcg_temp_free(t1); } else { tcg_gen_sub_tl(t0, arg2, arg1); if (compute_ca) { gen_op_arith_compute_ca(ctx, t0, arg2, 1); } } if (compute_ov) { gen_op_arith_compute_ov(ctx, t0, arg1, arg2, 1); } if (unlikely(Rc(ctx->opcode) != 0)) gen_set_Rc0(ctx, t0); if (!TCGV_EQUAL(t0, ret)) { tcg_gen_mov_tl(ret, t0); tcg_temp_free(t0); } }
1
250
static void mxf_packet_timestamps(MXFContext *mxf, AVPacket *pkt) { int64_t last_ofs = -1, next_ofs; MXFIndexTable *t = &mxf->index_tables[0]; /* this is called from the OP1a demuxing logic, which means there * may be no index tables */ if (mxf->nb_index_tables <= 0) return; /* find mxf->current_edit_unit so that the next edit unit starts ahead of pkt->pos */ for (;;) { if (mxf_edit_unit_absolute_offset(mxf, t, mxf->current_edit_unit + 1, NULL, &next_ofs, 0) < 0) break; if (next_ofs <= last_ofs) { /* large next_ofs didn't change or current_edit_unit wrapped * around this fixes the infinite loop on zzuf3.mxf */ av_log(mxf->fc, AV_LOG_ERROR, "next_ofs didn't change. not deriving packet timestamps\n"); return; } if (next_ofs > pkt->pos) break; last_ofs = next_ofs; mxf->current_edit_unit++; } if (mxf->current_edit_unit >= t->nb_ptses) return; pkt->dts = mxf->current_edit_unit + t->first_dts; pkt->pts = t->ptses[mxf->current_edit_unit]; }
0
251
static void ff_h264_idct_add16intra_mmx(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){ int i; for(i=0; i<16; i++){ if(nnzc[ scan8[i] ] || block[i*16]) ff_h264_idct_add_mmx(dst + block_offset[i], block + i*16, stride); } }
0
252
static void process_subpacket_9 (QDM2Context *q, QDM2SubPNode *node) { GetBitContext gb; int i, j, k, n, ch, run, level, diff; init_get_bits(&gb, node->packet->data, node->packet->size*8); n = coeff_per_sb_for_avg[q->coeff_per_sb_select][QDM2_SB_USED(q->sub_sampling) - 1] + 1; // same as averagesomething function for (i = 1; i < n; i++) for (ch=0; ch < q->nb_channels; ch++) { level = qdm2_get_vlc(&gb, &vlc_tab_level, 0, 2); q->quantized_coeffs[ch][i][0] = level; for (j = 0; j < (8 - 1); ) { run = qdm2_get_vlc(&gb, &vlc_tab_run, 0, 1) + 1; diff = qdm2_get_se_vlc(&vlc_tab_diff, &gb, 2); for (k = 1; k <= run; k++) q->quantized_coeffs[ch][i][j + k] = (level + ((k*diff) / run)); level += diff; j += run; } } for (ch = 0; ch < q->nb_channels; ch++) for (i = 0; i < 8; i++) q->quantized_coeffs[ch][0][i] = 0; }
0
254
static inline void h264_loop_filter_chroma_c(uint8_t *pix, int xstride, int ystride, int alpha, int beta, int8_t *tc0) { int i, d; for( i = 0; i < 4; i++ ) { const int tc = tc0[i]; if( tc <= 0 ) { pix += 2*ystride; continue; } for( d = 0; d < 2; d++ ) { const int p0 = pix[-1*xstride]; const int p1 = pix[-2*xstride]; const int q0 = pix[0]; const int q1 = pix[1*xstride]; if( FFABS( p0 - q0 ) < alpha && FFABS( p1 - p0 ) < beta && FFABS( q1 - q0 ) < beta ) { int delta = av_clip( (((q0 - p0 ) << 2) + (p1 - q1) + 4) >> 3, -tc, tc ); pix[-xstride] = av_clip_uint8( p0 + delta ); /* p0' */ pix[0] = av_clip_uint8( q0 - delta ); /* q0' */ } pix += ystride; } } }
1
256
static int lag_decode_zero_run_line(LagarithContext *l, uint8_t *dst, const uint8_t *src, const uint8_t *src_end, int width, int esc_count) { int i = 0; int count; uint8_t zero_run = 0; const uint8_t *src_start = src; uint8_t mask1 = -(esc_count < 2); uint8_t mask2 = -(esc_count < 3); uint8_t *end = dst + (width - 2); output_zeros: if (l->zeros_rem) { count = FFMIN(l->zeros_rem, width - i); if (end - dst < count) { av_log(l->avctx, AV_LOG_ERROR, "Too many zeros remaining.\n"); return AVERROR_INVALIDDATA; } memset(dst, 0, count); l->zeros_rem -= count; dst += count; } while (dst < end) { i = 0; while (!zero_run && dst + i < end) { i++; if (i+2 >= src_end - src) return AVERROR_INVALIDDATA; zero_run = !(src[i] | (src[i + 1] & mask1) | (src[i + 2] & mask2)); } if (zero_run) { zero_run = 0; i += esc_count; memcpy(dst, src, i); dst += i; l->zeros_rem = lag_calc_zero_run(src[i]); src += i + 1; goto output_zeros; } else { memcpy(dst, src, i); src += i; dst += i; } } return src - src_start; }
1
258
static int ffmmal_read_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame) { MMALDecodeContext *ctx = avctx->priv_data; MMAL_BUFFER_HEADER_T *buffer = NULL; MMAL_STATUS_T status = 0; int ret = 0; if (ctx->eos_received) goto done; while (1) { // To ensure decoding in lockstep with a constant delay between fed packets // and output frames, we always wait until an output buffer is available. // Except during start we don't know after how many input packets the decoder // is going to return the first buffer, and we can't distinguish decoder // being busy from decoder waiting for input. So just poll at the start and // keep feeding new data to the buffer. // We are pretty sure the decoder will produce output if we sent more input // frames than what a h264 decoder could logically delay. This avoids too // excessive buffering. // We also wait if we sent eos, but didn't receive it yet (think of decoding // stream with a very low number of frames). if (ctx->frames_output || ctx->packets_sent > MAX_DELAYED_FRAMES || (ctx->packets_sent && ctx->eos_sent)) { // MMAL will ignore broken input packets, which means the frame we // expect here may never arrive. Dealing with this correctly is // complicated, so here's a hack to avoid that it freezes forever // in this unlikely situation. buffer = mmal_queue_timedwait(ctx->queue_decoded_frames, 100); if (!buffer) { av_log(avctx, AV_LOG_ERROR, "Did not get output frame from MMAL.\n"); ret = AVERROR_UNKNOWN; goto done; } } else { buffer = mmal_queue_get(ctx->queue_decoded_frames); if (!buffer) goto done; } ctx->eos_received |= !!(buffer->flags & MMAL_BUFFER_HEADER_FLAG_EOS); if (ctx->eos_received) goto done; if (buffer->cmd == MMAL_EVENT_FORMAT_CHANGED) { MMAL_COMPONENT_T *decoder = ctx->decoder; MMAL_EVENT_FORMAT_CHANGED_T *ev = mmal_event_format_changed_get(buffer); MMAL_BUFFER_HEADER_T *stale_buffer; av_log(avctx, AV_LOG_INFO, "Changing output format.\n"); if ((status = mmal_port_disable(decoder->output[0]))) goto done; while ((stale_buffer = mmal_queue_get(ctx->queue_decoded_frames))) mmal_buffer_header_release(stale_buffer); mmal_format_copy(decoder->output[0]->format, ev->format); if ((ret = ffmal_update_format(avctx)) < 0) goto done; if ((status = mmal_port_enable(decoder->output[0], output_callback))) goto done; if ((ret = ffmmal_fill_output_port(avctx)) < 0) goto done; if ((ret = ffmmal_fill_input_port(avctx)) < 0) goto done; mmal_buffer_header_release(buffer); continue; } else if (buffer->cmd) { char s[20]; av_get_codec_tag_string(s, sizeof(s), buffer->cmd); av_log(avctx, AV_LOG_WARNING, "Unknown MMAL event %s on output port\n", s); goto done; } else if (buffer->length == 0) { // Unused output buffer that got drained after format change. mmal_buffer_header_release(buffer); continue; } ctx->frames_output++; if ((ret = ffmal_copy_frame(avctx, frame, buffer)) < 0) goto done; *got_frame = 1; break; } done: if (buffer) mmal_buffer_header_release(buffer); if (status && ret >= 0) ret = AVERROR_UNKNOWN; return ret; }
1
259
static int send_dma_request(int cmd, uint64_t sector, int nb_sectors, PrdtEntry *prdt, int prdt_entries, void(*post_exec)(QPCIDevice *dev, void *ide_base, uint64_t sector, int nb_sectors)) { QPCIDevice *dev; void *bmdma_base; void *ide_base; uintptr_t guest_prdt; size_t len; bool from_dev; uint8_t status; int flags; dev = get_pci_device(&bmdma_base, &ide_base); flags = cmd & ~0xff; cmd &= 0xff; switch (cmd) { case CMD_READ_DMA: case CMD_PACKET: /* Assuming we only test data reads w/ ATAPI, otherwise we need to know * the SCSI command being sent in the packet, too. */ from_dev = true; break; case CMD_WRITE_DMA: from_dev = false; break; default: g_assert_not_reached(); } if (flags & CMDF_NO_BM) { qpci_config_writew(dev, PCI_COMMAND, PCI_COMMAND_IO | PCI_COMMAND_MEMORY); } /* Select device 0 */ qpci_io_writeb(dev, ide_base + reg_device, 0 | LBA); /* Stop any running transfer, clear any pending interrupt */ qpci_io_writeb(dev, bmdma_base + bmreg_cmd, 0); qpci_io_writeb(dev, bmdma_base + bmreg_status, BM_STS_INTR); /* Setup PRDT */ len = sizeof(*prdt) * prdt_entries; guest_prdt = guest_alloc(guest_malloc, len); memwrite(guest_prdt, prdt, len); qpci_io_writel(dev, bmdma_base + bmreg_prdt, guest_prdt); /* ATA DMA command */ if (cmd == CMD_PACKET) { /* Enables ATAPI DMA; otherwise PIO is attempted */ qpci_io_writeb(dev, ide_base + reg_feature, 0x01); } else { qpci_io_writeb(dev, ide_base + reg_nsectors, nb_sectors); qpci_io_writeb(dev, ide_base + reg_lba_low, sector & 0xff); qpci_io_writeb(dev, ide_base + reg_lba_middle, (sector >> 8) & 0xff); qpci_io_writeb(dev, ide_base + reg_lba_high, (sector >> 16) & 0xff); } qpci_io_writeb(dev, ide_base + reg_command, cmd); if (post_exec) { post_exec(dev, ide_base, sector, nb_sectors); } /* Start DMA transfer */ qpci_io_writeb(dev, bmdma_base + bmreg_cmd, BM_CMD_START | (from_dev ? BM_CMD_WRITE : 0)); if (flags & CMDF_ABORT) { qpci_io_writeb(dev, bmdma_base + bmreg_cmd, 0); } /* Wait for the DMA transfer to complete */ do { status = qpci_io_readb(dev, bmdma_base + bmreg_status); } while ((status & (BM_STS_ACTIVE | BM_STS_INTR)) == BM_STS_ACTIVE); g_assert_cmpint(get_irq(IDE_PRIMARY_IRQ), ==, !!(status & BM_STS_INTR)); /* Check IDE status code */ assert_bit_set(qpci_io_readb(dev, ide_base + reg_status), DRDY); assert_bit_clear(qpci_io_readb(dev, ide_base + reg_status), BSY | DRQ); /* Reading the status register clears the IRQ */ g_assert(!get_irq(IDE_PRIMARY_IRQ)); /* Stop DMA transfer if still active */ if (status & BM_STS_ACTIVE) { qpci_io_writeb(dev, bmdma_base + bmreg_cmd, 0); } free_pci_device(dev); return status; }
1
260
static int mxf_read_close(AVFormatContext *s) { MXFContext *mxf = s->priv_data; MXFIndexTableSegment *seg; int i; av_freep(&mxf->packages_refs); for (i = 0; i < s->nb_streams; i++) s->streams[i]->priv_data = NULL; for (i = 0; i < mxf->metadata_sets_count; i++) { switch (mxf->metadata_sets[i]->type) { case MultipleDescriptor: av_freep(&((MXFDescriptor *)mxf->metadata_sets[i])->sub_descriptors_refs); break; case Sequence: av_freep(&((MXFSequence *)mxf->metadata_sets[i])->structural_components_refs); break; case SourcePackage: case MaterialPackage: av_freep(&((MXFPackage *)mxf->metadata_sets[i])->tracks_refs); break; case IndexTableSegment: seg = (MXFIndexTableSegment *)mxf->metadata_sets[i]; av_freep(&seg->temporal_offset_entries); av_freep(&seg->flag_entries); av_freep(&seg->stream_offset_entries); break; default: break; } av_freep(&mxf->metadata_sets[i]); } av_freep(&mxf->partitions); av_freep(&mxf->metadata_sets); av_freep(&mxf->aesc); av_freep(&mxf->local_tags); for (i = 0; i < mxf->nb_index_tables; i++) { av_freep(&mxf->index_tables[i].segments); av_freep(&mxf->index_tables[i].ptses); av_freep(&mxf->index_tables[i].fake_index); } av_freep(&mxf->index_tables); return 0; }
1
261
static int adts_aac_read_packet(AVFormatContext *s, AVPacket *pkt) { int ret, fsize; ret = av_get_packet(s->pb, pkt, ADTS_HEADER_SIZE); if (ret < 0) return ret; if (ret < ADTS_HEADER_SIZE) { av_packet_unref(pkt); return AVERROR(EIO); } if ((AV_RB16(pkt->data) >> 4) != 0xfff) { av_packet_unref(pkt); return AVERROR_INVALIDDATA; } fsize = (AV_RB32(pkt->data + 3) >> 13) & 0x1FFF; if (fsize < ADTS_HEADER_SIZE) { av_packet_unref(pkt); return AVERROR_INVALIDDATA; } return av_append_packet(s->pb, pkt, fsize - ADTS_HEADER_SIZE); }
1
263
static int bgr24ToYv12Wrapper(SwsContext *c, const uint8_t *src[], int srcStride[], int srcSliceY, int srcSliceH, uint8_t *dst[], int dstStride[]) { rgb24toyv12( src[0], dst[0] + srcSliceY * dstStride[0], dst[1] + (srcSliceY >> 1) * dstStride[1], dst[2] + (srcSliceY >> 1) * dstStride[2], c->srcW, srcSliceH, dstStride[0], dstStride[1], srcStride[0]); if (dst[3]) fillPlane(dst[3], dstStride[3], c->srcW, srcSliceH, srcSliceY, 255); return srcSliceH; }
1
264
void rgb15tobgr32(const uint8_t *src, uint8_t *dst, unsigned int src_size) { const uint16_t *end; uint8_t *d = (uint8_t *)dst; const uint16_t *s = (const uint16_t *)src; end = s + src_size/2; while(s < end) { register uint16_t bgr; bgr = *s++; *d++ = (bgr&0x7C00)>>7; *d++ = (bgr&0x3E0)>>2; *d++ = (bgr&0x1F)<<3; *d++ = 0; } }
1
266
static int vp5_parse_header(VP56Context *s, const uint8_t *buf, int buf_size) { VP56RangeCoder *c = &s->c; int rows, cols; ff_vp56_init_range_decoder(&s->c, buf, buf_size); s->frames[VP56_FRAME_CURRENT]->key_frame = !vp56_rac_get(c); vp56_rac_get(c); ff_vp56_init_dequant(s, vp56_rac_gets(c, 6)); if (s->frames[VP56_FRAME_CURRENT]->key_frame) { vp56_rac_gets(c, 8); if(vp56_rac_gets(c, 5) > 5) return AVERROR_INVALIDDATA; vp56_rac_gets(c, 2); if (vp56_rac_get(c)) { av_log(s->avctx, AV_LOG_ERROR, "interlacing not supported\n"); return AVERROR_PATCHWELCOME; } rows = vp56_rac_gets(c, 8); /* number of stored macroblock rows */ cols = vp56_rac_gets(c, 8); /* number of stored macroblock cols */ if (!rows || !cols) { av_log(s->avctx, AV_LOG_ERROR, "Invalid size %dx%d\n", cols << 4, rows << 4); return AVERROR_INVALIDDATA; } vp56_rac_gets(c, 8); /* number of displayed macroblock rows */ vp56_rac_gets(c, 8); /* number of displayed macroblock cols */ vp56_rac_gets(c, 2); if (!s->macroblocks || /* first frame */ 16*cols != s->avctx->coded_width || 16*rows != s->avctx->coded_height) { int ret = ff_set_dimensions(s->avctx, 16 * cols, 16 * rows); if (ret < 0) return ret; return VP56_SIZE_CHANGE; } } else if (!s->macroblocks) return AVERROR_INVALIDDATA; return 0; }
1
267
static inline int coeff_unpack_golomb(GetBitContext *gb, int qfactor, int qoffset) { int coeff = dirac_get_se_golomb(gb); const int sign = FFSIGN(coeff); if (coeff) coeff = sign*((sign * coeff * qfactor + qoffset) >> 2); return coeff; }
1
269
int ff_h264_frame_start(H264Context *h) { Picture *pic; int i, ret; const int pixel_shift = h->pixel_shift; int c[4] = { 1<<(h->sps.bit_depth_luma-1), 1<<(h->sps.bit_depth_chroma-1), 1<<(h->sps.bit_depth_chroma-1), -1 }; if (!ff_thread_can_start_frame(h->avctx)) { av_log(h->avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n"); return -1; } release_unused_pictures(h, 1); h->cur_pic_ptr = NULL; i = find_unused_picture(h); if (i < 0) { av_log(h->avctx, AV_LOG_ERROR, "no frame buffer available\n"); return i; } pic = &h->DPB[i]; pic->f.reference = h->droppable ? 0 : h->picture_structure; pic->f.coded_picture_number = h->coded_picture_number++; pic->field_picture = h->picture_structure != PICT_FRAME; /* * Zero key_frame here; IDR markings per slice in frame or fields are ORed * in later. * See decode_nal_units(). */ pic->f.key_frame = 0; pic->sync = 0; pic->mmco_reset = 0; if ((ret = alloc_picture(h, pic)) < 0) return ret; if(!h->sync && !h->avctx->hwaccel) avpriv_color_frame(&pic->f, c); h->cur_pic_ptr = pic; h->cur_pic = *h->cur_pic_ptr; h->cur_pic.f.extended_data = h->cur_pic.f.data; ff_er_frame_start(&h->er); assert(h->linesize && h->uvlinesize); for (i = 0; i < 16; i++) { h->block_offset[i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 4 * h->linesize * ((scan8[i] - scan8[0]) >> 3); h->block_offset[48 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 8 * h->linesize * ((scan8[i] - scan8[0]) >> 3); } for (i = 0; i < 16; i++) { h->block_offset[16 + i] = h->block_offset[32 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 4 * h->uvlinesize * ((scan8[i] - scan8[0]) >> 3); h->block_offset[48 + 16 + i] = h->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 8 * h->uvlinesize * ((scan8[i] - scan8[0]) >> 3); } /* can't be in alloc_tables because linesize isn't known there. * FIXME: redo bipred weight to not require extra buffer? */ for (i = 0; i < h->slice_context_count; i++) if (h->thread_context[i]) { ret = alloc_scratch_buffers(h->thread_context[i], h->linesize); if (ret < 0) return ret; } /* Some macroblocks can be accessed before they're available in case * of lost slices, MBAFF or threading. */ memset(h->slice_table, -1, (h->mb_height * h->mb_stride - 1) * sizeof(*h->slice_table)); // s->decode = (h->flags & CODEC_FLAG_PSNR) || !s->encoding || // h->cur_pic.f.reference /* || h->contains_intra */ || 1; /* We mark the current picture as non-reference after allocating it, so * that if we break out due to an error it can be released automatically * in the next ff_MPV_frame_start(). * SVQ3 as well as most other codecs have only last/next/current and thus * get released even with set reference, besides SVQ3 and others do not * mark frames as reference later "naturally". */ if (h->avctx->codec_id != AV_CODEC_ID_SVQ3) h->cur_pic_ptr->f.reference = 0; h->cur_pic_ptr->field_poc[0] = h->cur_pic_ptr->field_poc[1] = INT_MAX; h->next_output_pic = NULL; assert(h->cur_pic_ptr->long_ref == 0); return 0; }
1
270
static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr) { gen_set_cpsr(cpsr, CPSR_ERET_MASK); tcg_temp_free_i32(cpsr); store_reg(s, 15, pc); s->is_jmp = DISAS_UPDATE; }
1
271
static void gen_mfc0(DisasContext *ctx, TCGv arg, int reg, int sel) { const char *rn = "invalid"; if (sel != 0) check_insn(ctx, ISA_MIPS32); switch (reg) { case 0: switch (sel) { case 0: gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Index)); rn = "Index"; break; case 1: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mfc0_mvpcontrol(arg, cpu_env); rn = "MVPControl"; break; case 2: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mfc0_mvpconf0(arg, cpu_env); rn = "MVPConf0"; break; case 3: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mfc0_mvpconf1(arg, cpu_env); rn = "MVPConf1"; break; case 4: CP0_CHECK(ctx->vp); gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_VPControl)); rn = "VPControl"; break; default: goto cp0_unimplemented; } break; case 1: switch (sel) { case 0: CP0_CHECK(!(ctx->insn_flags & ISA_MIPS32R6)); gen_helper_mfc0_random(arg, cpu_env); rn = "Random"; break; case 1: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_VPEControl)); rn = "VPEControl"; break; case 2: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_VPEConf0)); rn = "VPEConf0"; break; case 3: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_VPEConf1)); rn = "VPEConf1"; break; case 4: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_mfc0_load64(arg, offsetof(CPUMIPSState, CP0_YQMask)); rn = "YQMask"; break; case 5: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_mfc0_load64(arg, offsetof(CPUMIPSState, CP0_VPESchedule)); rn = "VPESchedule"; break; case 6: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_mfc0_load64(arg, offsetof(CPUMIPSState, CP0_VPEScheFBack)); rn = "VPEScheFBack"; break; case 7: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_VPEOpt)); rn = "VPEOpt"; break; default: goto cp0_unimplemented; } break; case 2: switch (sel) { case 0: { TCGv_i64 tmp = tcg_temp_new_i64(); tcg_gen_ld_i64(tmp, cpu_env, offsetof(CPUMIPSState, CP0_EntryLo0)); #if defined(TARGET_MIPS64) if (ctx->rxi) { /* Move RI/XI fields to bits 31:30 */ tcg_gen_shri_tl(arg, tmp, CP0EnLo_XI); tcg_gen_deposit_tl(tmp, tmp, arg, 30, 2); } #endif gen_move_low32(arg, tmp); tcg_temp_free_i64(tmp); } rn = "EntryLo0"; break; case 1: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mfc0_tcstatus(arg, cpu_env); rn = "TCStatus"; break; case 2: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mfc0_tcbind(arg, cpu_env); rn = "TCBind"; break; case 3: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mfc0_tcrestart(arg, cpu_env); rn = "TCRestart"; break; case 4: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mfc0_tchalt(arg, cpu_env); rn = "TCHalt"; break; case 5: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mfc0_tccontext(arg, cpu_env); rn = "TCContext"; break; case 6: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mfc0_tcschedule(arg, cpu_env); rn = "TCSchedule"; break; case 7: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mfc0_tcschefback(arg, cpu_env); rn = "TCScheFBack"; break; default: goto cp0_unimplemented; } break; case 3: switch (sel) { case 0: { TCGv_i64 tmp = tcg_temp_new_i64(); tcg_gen_ld_i64(tmp, cpu_env, offsetof(CPUMIPSState, CP0_EntryLo1)); #if defined(TARGET_MIPS64) if (ctx->rxi) { /* Move RI/XI fields to bits 31:30 */ tcg_gen_shri_tl(arg, tmp, CP0EnLo_XI); tcg_gen_deposit_tl(tmp, tmp, arg, 30, 2); } #endif gen_move_low32(arg, tmp); tcg_temp_free_i64(tmp); } rn = "EntryLo1"; break; case 1: CP0_CHECK(ctx->vp); gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_GlobalNumber)); rn = "GlobalNumber"; break; default: goto cp0_unimplemented; } break; case 4: switch (sel) { case 0: tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_Context)); tcg_gen_ext32s_tl(arg, arg); rn = "Context"; break; case 1: // gen_helper_mfc0_contextconfig(arg); /* SmartMIPS ASE */ rn = "ContextConfig"; goto cp0_unimplemented; case 2: CP0_CHECK(ctx->ulri); tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, active_tc.CP0_UserLocal)); tcg_gen_ext32s_tl(arg, arg); rn = "UserLocal"; break; default: goto cp0_unimplemented; } break; case 5: switch (sel) { case 0: gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_PageMask)); rn = "PageMask"; break; case 1: check_insn(ctx, ISA_MIPS32R2); gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_PageGrain)); rn = "PageGrain"; break; case 2: CP0_CHECK(ctx->sc); tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_SegCtl0)); tcg_gen_ext32s_tl(arg, arg); rn = "SegCtl0"; break; case 3: CP0_CHECK(ctx->sc); tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_SegCtl1)); tcg_gen_ext32s_tl(arg, arg); rn = "SegCtl1"; break; case 4: CP0_CHECK(ctx->sc); tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_SegCtl2)); tcg_gen_ext32s_tl(arg, arg); rn = "SegCtl2"; break; default: goto cp0_unimplemented; } break; case 6: switch (sel) { case 0: gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Wired)); rn = "Wired"; break; case 1: check_insn(ctx, ISA_MIPS32R2); gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_SRSConf0)); rn = "SRSConf0"; break; case 2: check_insn(ctx, ISA_MIPS32R2); gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_SRSConf1)); rn = "SRSConf1"; break; case 3: check_insn(ctx, ISA_MIPS32R2); gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_SRSConf2)); rn = "SRSConf2"; break; case 4: check_insn(ctx, ISA_MIPS32R2); gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_SRSConf3)); rn = "SRSConf3"; break; case 5: check_insn(ctx, ISA_MIPS32R2); gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_SRSConf4)); rn = "SRSConf4"; break; default: goto cp0_unimplemented; } break; case 7: switch (sel) { case 0: check_insn(ctx, ISA_MIPS32R2); gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_HWREna)); rn = "HWREna"; break; default: goto cp0_unimplemented; } break; case 8: switch (sel) { case 0: tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_BadVAddr)); tcg_gen_ext32s_tl(arg, arg); rn = "BadVAddr"; break; case 1: CP0_CHECK(ctx->bi); gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_BadInstr)); rn = "BadInstr"; break; case 2: CP0_CHECK(ctx->bp); gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_BadInstrP)); rn = "BadInstrP"; break; default: goto cp0_unimplemented; } break; case 9: switch (sel) { case 0: /* Mark as an IO operation because we read the time. */ if (ctx->tb->cflags & CF_USE_ICOUNT) { gen_io_start(); } gen_helper_mfc0_count(arg, cpu_env); if (ctx->tb->cflags & CF_USE_ICOUNT) { gen_io_end(); } /* Break the TB to be able to take timer interrupts immediately after reading count. BS_STOP isn't sufficient, we need to ensure we break completely out of translated code. */ gen_save_pc(ctx->pc + 4); ctx->bstate = BS_EXCP; rn = "Count"; break; /* 6,7 are implementation dependent */ default: goto cp0_unimplemented; } break; case 10: switch (sel) { case 0: tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_EntryHi)); tcg_gen_ext32s_tl(arg, arg); rn = "EntryHi"; break; default: goto cp0_unimplemented; } break; case 11: switch (sel) { case 0: gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Compare)); rn = "Compare"; break; /* 6,7 are implementation dependent */ default: goto cp0_unimplemented; } break; case 12: switch (sel) { case 0: gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Status)); rn = "Status"; break; case 1: check_insn(ctx, ISA_MIPS32R2); gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_IntCtl)); rn = "IntCtl"; break; case 2: check_insn(ctx, ISA_MIPS32R2); gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_SRSCtl)); rn = "SRSCtl"; break; case 3: check_insn(ctx, ISA_MIPS32R2); gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_SRSMap)); rn = "SRSMap"; break; default: goto cp0_unimplemented; } break; case 13: switch (sel) { case 0: gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Cause)); rn = "Cause"; break; default: goto cp0_unimplemented; } break; case 14: switch (sel) { case 0: tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_EPC)); tcg_gen_ext32s_tl(arg, arg); rn = "EPC"; break; default: goto cp0_unimplemented; } break; case 15: switch (sel) { case 0: gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_PRid)); rn = "PRid"; break; case 1: check_insn(ctx, ISA_MIPS32R2); tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_EBase)); tcg_gen_ext32s_tl(arg, arg); rn = "EBase"; break; case 3: check_insn(ctx, ISA_MIPS32R2); CP0_CHECK(ctx->cmgcr); tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_CMGCRBase)); tcg_gen_ext32s_tl(arg, arg); rn = "CMGCRBase"; break; default: goto cp0_unimplemented; } break; case 16: switch (sel) { case 0: gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Config0)); rn = "Config"; break; case 1: gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Config1)); rn = "Config1"; break; case 2: gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Config2)); rn = "Config2"; break; case 3: gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Config3)); rn = "Config3"; break; case 4: gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Config4)); rn = "Config4"; break; case 5: gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Config5)); rn = "Config5"; break; /* 6,7 are implementation dependent */ case 6: gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Config6)); rn = "Config6"; break; case 7: gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Config7)); rn = "Config7"; break; default: goto cp0_unimplemented; } break; case 17: switch (sel) { case 0: gen_helper_mfc0_lladdr(arg, cpu_env); rn = "LLAddr"; break; case 1: CP0_CHECK(ctx->mrp); gen_helper_mfc0_maar(arg, cpu_env); rn = "MAAR"; break; case 2: CP0_CHECK(ctx->mrp); gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_MAARI)); rn = "MAARI"; break; default: goto cp0_unimplemented; } break; case 18: switch (sel) { case 0 ... 7: gen_helper_1e0i(mfc0_watchlo, arg, sel); rn = "WatchLo"; break; default: goto cp0_unimplemented; } break; case 19: switch (sel) { case 0 ...7: gen_helper_1e0i(mfc0_watchhi, arg, sel); rn = "WatchHi"; break; default: goto cp0_unimplemented; } break; case 20: switch (sel) { case 0: #if defined(TARGET_MIPS64) check_insn(ctx, ISA_MIPS3); tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_XContext)); tcg_gen_ext32s_tl(arg, arg); rn = "XContext"; break; #endif default: goto cp0_unimplemented; } break; case 21: /* Officially reserved, but sel 0 is used for R1x000 framemask */ CP0_CHECK(!(ctx->insn_flags & ISA_MIPS32R6)); switch (sel) { case 0: gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Framemask)); rn = "Framemask"; break; default: goto cp0_unimplemented; } break; case 22: tcg_gen_movi_tl(arg, 0); /* unimplemented */ rn = "'Diagnostic"; /* implementation dependent */ break; case 23: switch (sel) { case 0: gen_helper_mfc0_debug(arg, cpu_env); /* EJTAG support */ rn = "Debug"; break; case 1: // gen_helper_mfc0_tracecontrol(arg); /* PDtrace support */ rn = "TraceControl"; goto cp0_unimplemented; case 2: // gen_helper_mfc0_tracecontrol2(arg); /* PDtrace support */ rn = "TraceControl2"; goto cp0_unimplemented; case 3: // gen_helper_mfc0_usertracedata(arg); /* PDtrace support */ rn = "UserTraceData"; goto cp0_unimplemented; case 4: // gen_helper_mfc0_tracebpc(arg); /* PDtrace support */ rn = "TraceBPC"; goto cp0_unimplemented; default: goto cp0_unimplemented; } break; case 24: switch (sel) { case 0: /* EJTAG support */ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_DEPC)); tcg_gen_ext32s_tl(arg, arg); rn = "DEPC"; break; default: goto cp0_unimplemented; } break; case 25: switch (sel) { case 0: gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Performance0)); rn = "Performance0"; break; case 1: // gen_helper_mfc0_performance1(arg); rn = "Performance1"; goto cp0_unimplemented; case 2: // gen_helper_mfc0_performance2(arg); rn = "Performance2"; goto cp0_unimplemented; case 3: // gen_helper_mfc0_performance3(arg); rn = "Performance3"; goto cp0_unimplemented; case 4: // gen_helper_mfc0_performance4(arg); rn = "Performance4"; goto cp0_unimplemented; case 5: // gen_helper_mfc0_performance5(arg); rn = "Performance5"; goto cp0_unimplemented; case 6: // gen_helper_mfc0_performance6(arg); rn = "Performance6"; goto cp0_unimplemented; case 7: // gen_helper_mfc0_performance7(arg); rn = "Performance7"; goto cp0_unimplemented; default: goto cp0_unimplemented; } break; case 26: switch (sel) { case 0: gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_ErrCtl)); rn = "ErrCtl"; break; default: goto cp0_unimplemented; } break; case 27: switch (sel) { case 0 ... 3: tcg_gen_movi_tl(arg, 0); /* unimplemented */ rn = "CacheErr"; break; default: goto cp0_unimplemented; } break; case 28: switch (sel) { case 0: case 2: case 4: case 6: { TCGv_i64 tmp = tcg_temp_new_i64(); tcg_gen_ld_i64(tmp, cpu_env, offsetof(CPUMIPSState, CP0_TagLo)); gen_move_low32(arg, tmp); tcg_temp_free_i64(tmp); } rn = "TagLo"; break; case 1: case 3: case 5: case 7: gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_DataLo)); rn = "DataLo"; break; default: goto cp0_unimplemented; } break; case 29: switch (sel) { case 0: case 2: case 4: case 6: gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_TagHi)); rn = "TagHi"; break; case 1: case 3: case 5: case 7: gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_DataHi)); rn = "DataHi"; break; default: goto cp0_unimplemented; } break; case 30: switch (sel) { case 0: tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_ErrorEPC)); tcg_gen_ext32s_tl(arg, arg); rn = "ErrorEPC"; break; default: goto cp0_unimplemented; } break; case 31: switch (sel) { case 0: /* EJTAG support */ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_DESAVE)); rn = "DESAVE"; break; case 2 ... 7: CP0_CHECK(ctx->kscrexist & (1 << sel)); tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_KScratch[sel-2])); tcg_gen_ext32s_tl(arg, arg); rn = "KScratch"; break; default: goto cp0_unimplemented; } break; default: goto cp0_unimplemented; } trace_mips_translate_c0("mfc0", rn, reg, sel); return; cp0_unimplemented: qemu_log_mask(LOG_UNIMP, "mfc0 %s (reg %d sel %d)\n", rn, reg, sel); gen_mfc0_unimplemented(ctx, arg); }
1
273
static int gdb_handle_packet(GDBState *s, CPUState *env, const char *line_buf) { const char *p; int ch, reg_size, type; char buf[4096]; uint8_t mem_buf[4096]; uint32_t *registers; target_ulong addr, len; #ifdef DEBUG_GDB printf("command='%s'\n", line_buf); #endif p = line_buf; ch = *p++; switch(ch) { case '?': /* TODO: Make this return the correct value for user-mode. */ snprintf(buf, sizeof(buf), "S%02x", SIGTRAP); put_packet(s, buf); /* Remove all the breakpoints when this query is issued, * because gdb is doing and initial connect and the state * should be cleaned up. */ case 'c': if (*p != '\0') { addr = strtoull(p, (char **)&p, 16); #if defined(TARGET_I386) env->eip = addr; #elif defined (TARGET_PPC) env->nip = addr; #elif defined (TARGET_SPARC) env->pc = addr; env->npc = addr + 4; #elif defined (TARGET_ARM) env->regs[15] = addr; #elif defined (TARGET_SH4) env->pc = addr; #elif defined (TARGET_MIPS) env->PC[env->current_tc] = addr; #elif defined (TARGET_CRIS) env->pc = addr; #endif } return RS_IDLE; case 's': if (*p != '\0') { addr = strtoull(p, (char **)&p, 16); #if defined(TARGET_I386) env->eip = addr; #elif defined (TARGET_PPC) env->nip = addr; #elif defined (TARGET_SPARC) env->pc = addr; env->npc = addr + 4; #elif defined (TARGET_ARM) env->regs[15] = addr; #elif defined (TARGET_SH4) env->pc = addr; #elif defined (TARGET_MIPS) env->PC[env->current_tc] = addr; #elif defined (TARGET_CRIS) env->pc = addr; #endif } cpu_single_step(env, sstep_flags); return RS_IDLE; case 'F': { target_ulong ret; target_ulong err; ret = strtoull(p, (char **)&p, 16); if (*p == ',') { p++; err = strtoull(p, (char **)&p, 16); } else { err = 0; } if (*p == ',') p++; type = *p; if (gdb_current_syscall_cb) gdb_current_syscall_cb(s->env, ret, err); if (type == 'C') { put_packet(s, "T02"); } else { } } case 'g': reg_size = cpu_gdb_read_registers(env, mem_buf); memtohex(buf, mem_buf, reg_size); put_packet(s, buf); case 'G': registers = (void *)mem_buf; len = strlen(p) / 2; hextomem((uint8_t *)registers, p, len); cpu_gdb_write_registers(env, mem_buf, len); case 'm': addr = strtoull(p, (char **)&p, 16); if (*p == ',') p++; len = strtoull(p, NULL, 16); if (cpu_memory_rw_debug(env, addr, mem_buf, len, 0) != 0) { put_packet (s, "E14"); } else { memtohex(buf, mem_buf, len); put_packet(s, buf); } case 'M': addr = strtoull(p, (char **)&p, 16); if (*p == ',') p++; len = strtoull(p, (char **)&p, 16); if (*p == ':') p++; hextomem(mem_buf, p, len); if (cpu_memory_rw_debug(env, addr, mem_buf, len, 1) != 0) put_packet(s, "E14"); else case 'Z': type = strtoul(p, (char **)&p, 16); if (*p == ',') p++; addr = strtoull(p, (char **)&p, 16); if (*p == ',') p++; len = strtoull(p, (char **)&p, 16); if (type == 0 || type == 1) { if (cpu_breakpoint_insert(env, addr) < 0) goto breakpoint_error; #ifndef CONFIG_USER_ONLY } else if (type == 2) { if (cpu_watchpoint_insert(env, addr) < 0) goto breakpoint_error; #endif } else { breakpoint_error: put_packet(s, "E22"); } case 'z': type = strtoul(p, (char **)&p, 16); if (*p == ',') p++; addr = strtoull(p, (char **)&p, 16); if (*p == ',') p++; len = strtoull(p, (char **)&p, 16); if (type == 0 || type == 1) { cpu_breakpoint_remove(env, addr); #ifndef CONFIG_USER_ONLY } else if (type == 2) { cpu_watchpoint_remove(env, addr); #endif } else { goto breakpoint_error; } case 'q': case 'Q': /* parse any 'q' packets here */ if (!strcmp(p,"qemu.sstepbits")) { /* Query Breakpoint bit definitions */ sprintf(buf,"ENABLE=%x,NOIRQ=%x,NOTIMER=%x", SSTEP_ENABLE, SSTEP_NOIRQ, SSTEP_NOTIMER); put_packet(s, buf); } else if (strncmp(p,"qemu.sstep",10) == 0) { /* Display or change the sstep_flags */ p += 10; if (*p != '=') { /* Display current setting */ sprintf(buf,"0x%x", sstep_flags); put_packet(s, buf); } p++; type = strtoul(p, (char **)&p, 16); sstep_flags = type; } #ifdef CONFIG_LINUX_USER else if (strncmp(p, "Offsets", 7) == 0) { TaskState *ts = env->opaque; sprintf(buf, "Text=" TARGET_ABI_FMT_lx ";Data=" TARGET_ABI_FMT_lx ";Bss=" TARGET_ABI_FMT_lx, ts->info->code_offset, ts->info->data_offset, ts->info->data_offset); put_packet(s, buf); } #endif /* Fall through. */ default: /* put empty packet */ buf[0] = '\0'; put_packet(s, buf); } return RS_IDLE; }
1
275
static void decode_ac_filter(WmallDecodeCtx *s) { int i; s->acfilter_order = get_bits(&s->gb, 4) + 1; s->acfilter_scaling = get_bits(&s->gb, 4); for(i = 0; i < s->acfilter_order; i++) { s->acfilter_coeffs[i] = get_bits(&s->gb, s->acfilter_scaling) + 1; } }
1
276
static void check_loopfilter(void) { LOCAL_ALIGNED_32(uint8_t, base0, [32 + 16 * 16 * 2]); LOCAL_ALIGNED_32(uint8_t, base1, [32 + 16 * 16 * 2]); VP9DSPContext dsp; int dir, wd, wd2, bit_depth; static const char *const dir_name[2] = { "h", "v" }; static const int E[2] = { 20, 28 }, I[2] = { 10, 16 }; static const int H[2] = { 7, 11 }, F[2] = { 1, 1 }; declare_func(void, uint8_t *dst, ptrdiff_t stride, int E, int I, int H); for (bit_depth = 8; bit_depth <= 12; bit_depth += 2) { ff_vp9dsp_init(&dsp, bit_depth, 0); for (dir = 0; dir < 2; dir++) { int midoff = (dir ? 8 * 8 : 8) * SIZEOF_PIXEL; int midoff_aligned = (dir ? 8 * 8 : 16) * SIZEOF_PIXEL; uint8_t *buf0 = base0 + midoff_aligned; uint8_t *buf1 = base1 + midoff_aligned; for (wd = 0; wd < 3; wd++) { // 4/8/16wd_8px if (check_func(dsp.loop_filter_8[wd][dir], "vp9_loop_filter_%s_%d_8_%dbpp", dir_name[dir], 4 << wd, bit_depth)) { randomize_buffers(0, 0, 8); memcpy(buf1 - midoff, buf0 - midoff, 16 * 8 * SIZEOF_PIXEL); call_ref(buf0, 16 * SIZEOF_PIXEL >> dir, E[0], I[0], H[0]); call_new(buf1, 16 * SIZEOF_PIXEL >> dir, E[0], I[0], H[0]); if (memcmp(buf0 - midoff, buf1 - midoff, 16 * 8 * SIZEOF_PIXEL)) fail(); bench_new(buf1, 16 * SIZEOF_PIXEL >> dir, E[0], I[0], H[0]); } } midoff = (dir ? 16 * 8 : 8) * SIZEOF_PIXEL; midoff_aligned = (dir ? 16 * 8 : 16) * SIZEOF_PIXEL; buf0 = base0 + midoff_aligned; buf1 = base1 + midoff_aligned; // 16wd_16px loopfilter if (check_func(dsp.loop_filter_16[dir], "vp9_loop_filter_%s_16_16_%dbpp", dir_name[dir], bit_depth)) { randomize_buffers(0, 0, 16); randomize_buffers(0, 8, 16); memcpy(buf1 - midoff, buf0 - midoff, 16 * 16 * SIZEOF_PIXEL); call_ref(buf0, 16 * SIZEOF_PIXEL, E[0], I[0], H[0]); call_new(buf1, 16 * SIZEOF_PIXEL, E[0], I[0], H[0]); if (memcmp(buf0 - midoff, buf1 - midoff, 16 * 16 * SIZEOF_PIXEL)) fail(); bench_new(buf1, 16 * SIZEOF_PIXEL, E[0], I[0], H[0]); } for (wd = 0; wd < 2; wd++) { for (wd2 = 0; wd2 < 2; wd2++) { // mix2 loopfilter if (check_func(dsp.loop_filter_mix2[wd][wd2][dir], "vp9_loop_filter_mix2_%s_%d%d_16_%dbpp", dir_name[dir], 4 << wd, 4 << wd2, bit_depth)) { randomize_buffers(0, 0, 16); randomize_buffers(1, 8, 16); memcpy(buf1 - midoff, buf0 - midoff, 16 * 16 * SIZEOF_PIXEL); #define M(a) (((a)[1] << 8) | (a)[0]) call_ref(buf0, 16 * SIZEOF_PIXEL, M(E), M(I), M(H)); call_new(buf1, 16 * SIZEOF_PIXEL, M(E), M(I), M(H)); if (memcmp(buf0 - midoff, buf1 - midoff, 16 * 16 * SIZEOF_PIXEL)) fail(); bench_new(buf1, 16 * SIZEOF_PIXEL, M(E), M(I), M(H)); #undef M } } } } } report("loopfilter"); }
1
277
av_cold void ff_dsputil_init_x86(DSPContext *c, AVCodecContext *avctx) { int cpu_flags = av_get_cpu_flags(); #if HAVE_7REGS && HAVE_INLINE_ASM if (HAVE_MMX && cpu_flags & AV_CPU_FLAG_CMOV) c->add_hfyu_median_prediction = ff_add_hfyu_median_prediction_cmov; #endif if (X86_MMX(cpu_flags)) { #if HAVE_INLINE_ASM const int idct_algo = avctx->idct_algo; if (avctx->lowres == 0 && avctx->bits_per_raw_sample <= 8) { if (idct_algo == FF_IDCT_AUTO || idct_algo == FF_IDCT_SIMPLEMMX) { c->idct_put = ff_simple_idct_put_mmx; c->idct_add = ff_simple_idct_add_mmx; c->idct = ff_simple_idct_mmx; c->idct_permutation_type = FF_SIMPLE_IDCT_PERM; } else if (idct_algo == FF_IDCT_XVIDMMX) { if (cpu_flags & AV_CPU_FLAG_SSE2) { c->idct_put = ff_idct_xvid_sse2_put; c->idct_add = ff_idct_xvid_sse2_add; c->idct = ff_idct_xvid_sse2; c->idct_permutation_type = FF_SSE2_IDCT_PERM; } else if (cpu_flags & AV_CPU_FLAG_MMXEXT) { c->idct_put = ff_idct_xvid_mmxext_put; c->idct_add = ff_idct_xvid_mmxext_add; c->idct = ff_idct_xvid_mmxext; } else { c->idct_put = ff_idct_xvid_mmx_put; c->idct_add = ff_idct_xvid_mmx_add; c->idct = ff_idct_xvid_mmx; } } } #endif /* HAVE_INLINE_ASM */ dsputil_init_mmx(c, avctx, cpu_flags); } if (X86_MMXEXT(cpu_flags)) dsputil_init_mmxext(c, avctx, cpu_flags); if (X86_SSE(cpu_flags)) dsputil_init_sse(c, avctx, cpu_flags); if (X86_SSE2(cpu_flags)) dsputil_init_sse2(c, avctx, cpu_flags); if (EXTERNAL_SSSE3(cpu_flags)) dsputil_init_ssse3(c, avctx, cpu_flags); if (EXTERNAL_SSE4(cpu_flags)) dsputil_init_sse4(c, avctx, cpu_flags); if (CONFIG_ENCODERS) ff_dsputilenc_init_mmx(c, avctx); }
1
278
static void search_for_quantizers_anmr(AVCodecContext *avctx, AACEncContext *s, SingleChannelElement *sce, const float lambda) { int q, w, w2, g, start = 0; int i, j; int idx; TrellisPath paths[TRELLIS_STAGES][TRELLIS_STATES]; int bandaddr[TRELLIS_STAGES]; int minq; float mincost; float q0f = FLT_MAX, q1f = 0.0f, qnrgf = 0.0f; int q0, q1, qcnt = 0; for (i = 0; i < 1024; i++) { float t = fabsf(sce->coeffs[i]); if (t > 0.0f) { q0f = FFMIN(q0f, t); q1f = FFMAX(q1f, t); qnrgf += t*t; qcnt++; } } if (!qcnt) { memset(sce->sf_idx, 0, sizeof(sce->sf_idx)); memset(sce->zeroes, 1, sizeof(sce->zeroes)); return; } //minimum scalefactor index is when minimum nonzero coefficient after quantizing is not clipped q0 = coef2minsf(q0f); //maximum scalefactor index is when maximum coefficient after quantizing is still not zero q1 = coef2maxsf(q1f); if (q1 - q0 > 60) { int q0low = q0; int q1high = q1; //minimum scalefactor index is when maximum nonzero coefficient after quantizing is not clipped int qnrg = av_clip_uint8(log2f(sqrtf(qnrgf/qcnt))*4 - 31 + SCALE_ONE_POS - SCALE_DIV_512); q1 = qnrg + 30; q0 = qnrg - 30; if (q0 < q0low) { q1 += q0low - q0; q0 = q0low; } else if (q1 > q1high) { q0 -= q1 - q1high; q1 = q1high; } } for (i = 0; i < TRELLIS_STATES; i++) { paths[0][i].cost = 0.0f; paths[0][i].prev = -1; } for (j = 1; j < TRELLIS_STAGES; j++) { for (i = 0; i < TRELLIS_STATES; i++) { paths[j][i].cost = INFINITY; paths[j][i].prev = -2; } } idx = 1; abs_pow34_v(s->scoefs, sce->coeffs, 1024); for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) { start = w*128; for (g = 0; g < sce->ics.num_swb; g++) { const float *coefs = &sce->coeffs[start]; float qmin, qmax; int nz = 0; bandaddr[idx] = w * 16 + g; qmin = INT_MAX; qmax = 0.0f; for (w2 = 0; w2 < sce->ics.group_len[w]; w2++) { FFPsyBand *band = &s->psy.ch[s->cur_channel].psy_bands[(w+w2)*16+g]; if (band->energy <= band->threshold || band->threshold == 0.0f) { sce->zeroes[(w+w2)*16+g] = 1; continue; } sce->zeroes[(w+w2)*16+g] = 0; nz = 1; for (i = 0; i < sce->ics.swb_sizes[g]; i++) { float t = fabsf(coefs[w2*128+i]); if (t > 0.0f) qmin = FFMIN(qmin, t); qmax = FFMAX(qmax, t); } } if (nz) { int minscale, maxscale; float minrd = INFINITY; float maxval; //minimum scalefactor index is when minimum nonzero coefficient after quantizing is not clipped minscale = coef2minsf(qmin); //maximum scalefactor index is when maximum coefficient after quantizing is still not zero maxscale = coef2maxsf(qmax); minscale = av_clip(minscale - q0, 0, TRELLIS_STATES - 1); maxscale = av_clip(maxscale - q0, 0, TRELLIS_STATES); maxval = find_max_val(sce->ics.group_len[w], sce->ics.swb_sizes[g], s->scoefs+start); for (q = minscale; q < maxscale; q++) { float dist = 0; int cb = find_min_book(maxval, sce->sf_idx[w*16+g]); for (w2 = 0; w2 < sce->ics.group_len[w]; w2++) { FFPsyBand *band = &s->psy.ch[s->cur_channel].psy_bands[(w+w2)*16+g]; dist += quantize_band_cost(s, coefs + w2*128, s->scoefs + start + w2*128, sce->ics.swb_sizes[g], q + q0, cb, lambda / band->threshold, INFINITY, NULL, NULL, 0); } minrd = FFMIN(minrd, dist); for (i = 0; i < q1 - q0; i++) { float cost; cost = paths[idx - 1][i].cost + dist + ff_aac_scalefactor_bits[q - i + SCALE_DIFF_ZERO]; if (cost < paths[idx][q].cost) { paths[idx][q].cost = cost; paths[idx][q].prev = i; } } } } else { for (q = 0; q < q1 - q0; q++) { paths[idx][q].cost = paths[idx - 1][q].cost + 1; paths[idx][q].prev = q; } } sce->zeroes[w*16+g] = !nz; start += sce->ics.swb_sizes[g]; idx++; } } idx--; mincost = paths[idx][0].cost; minq = 0; for (i = 1; i < TRELLIS_STATES; i++) { if (paths[idx][i].cost < mincost) { mincost = paths[idx][i].cost; minq = i; } } while (idx) { sce->sf_idx[bandaddr[idx]] = minq + q0; minq = paths[idx][minq].prev; idx--; } //set the same quantizers inside window groups for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) for (g = 0; g < sce->ics.num_swb; g++) for (w2 = 1; w2 < sce->ics.group_len[w]; w2++) sce->sf_idx[(w+w2)*16+g] = sce->sf_idx[w*16+g]; }
1
279
QDict *qdict_get_qdict(const QDict *qdict, const char *key) { return qobject_to_qdict(qdict_get_obj(qdict, key, QTYPE_QDICT)); }
1
281
static int guess_ni_flag(AVFormatContext *s){ int i; int64_t last_start=0; int64_t first_end= INT64_MAX; int64_t oldpos= avio_tell(s->pb); int *idx; int64_t min_pos, pos; for(i=0; i<s->nb_streams; i++){ AVStream *st = s->streams[i]; int n= st->nb_index_entries; unsigned int size; if(n <= 0) continue; if(n >= 2){ int64_t pos= st->index_entries[0].pos; avio_seek(s->pb, pos + 4, SEEK_SET); size= avio_rl32(s->pb); if(pos + size > st->index_entries[1].pos) last_start= INT64_MAX; } if(st->index_entries[0].pos > last_start) last_start= st->index_entries[0].pos; if(st->index_entries[n-1].pos < first_end) first_end= st->index_entries[n-1].pos; } avio_seek(s->pb, oldpos, SEEK_SET); if (last_start > first_end) return 1; idx= av_mallocz(sizeof(*idx) * s->nb_streams); for (min_pos=pos=0; min_pos!=INT64_MAX; pos= min_pos+1) { int64_t max_dts = INT64_MIN/2, min_dts= INT64_MAX/2; min_pos = INT64_MAX; for (i=0; i<s->nb_streams; i++) { AVStream *st = s->streams[i]; int n= st->nb_index_entries; while (idx[i]<n && st->index_entries[idx[i]].pos < pos) idx[i]++; if (idx[i] < n) { min_dts = FFMIN(min_dts, av_rescale_q(st->index_entries[idx[i]].timestamp, st->time_base, AV_TIME_BASE_Q)); min_pos = FFMIN(min_pos, st->index_entries[idx[i]].pos); } if (idx[i]) max_dts = FFMAX(max_dts, av_rescale_q(st->index_entries[idx[i]-1].timestamp, st->time_base, AV_TIME_BASE_Q)); } if(max_dts - min_dts > 2*AV_TIME_BASE) { av_free(idx); return 1; } } av_free(idx); return 0; }
1
282
static int nut_write_trailer(AVFormatContext *s) { NUTContext *nut = s->priv_data; AVIOContext *bc = s->pb, *dyn_bc; int i, ret; while (nut->header_count < 3) write_headers(s, bc); ret = avio_open_dyn_buf(&dyn_bc); if (ret >= 0 && nut->sp_count) { av_assert1(nut->write_index); write_index(nut, dyn_bc); put_packet(nut, bc, dyn_bc, 1, INDEX_STARTCODE); } ff_nut_free_sp(nut); for (i=0; i<s->nb_streams; i++) av_freep(&nut->stream[i].keyframe_pts); av_freep(&nut->stream); av_freep(&nut->chapter); av_freep(&nut->time_base); return 0; }
1
283
static uint32_t arm_ldl_ptw(CPUState *cs, hwaddr addr, bool is_secure, ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi) { ARMCPU *cpu = ARM_CPU(cs); CPUARMState *env = &cpu->env; MemTxAttrs attrs = {}; AddressSpace *as; attrs.secure = is_secure; as = arm_addressspace(cs, attrs); addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fi); if (fi->s1ptw) { return 0; } if (regime_translation_big_endian(env, mmu_idx)) { return address_space_ldl_be(as, addr, attrs, NULL); } else { return address_space_ldl_le(as, addr, attrs, NULL); } }
0
284
void aio_context_acquire(AioContext *ctx) { qemu_rec_mutex_lock(&ctx->lock); }
0
285
static void qjson_finalizefn(Object *obj) { QJSON *json = QJSON(obj); qobject_decref(QOBJECT(json->str)); }
0
286
static void cchip_write(void *opaque, hwaddr addr, uint64_t v32, unsigned size) { TyphoonState *s = opaque; uint64_t val, oldval, newval; if (addr & 4) { val = v32 << 32 | s->latch_tmp; addr ^= 4; } else { s->latch_tmp = v32; return; } switch (addr) { case 0x0000: /* CSC: Cchip System Configuration Register. */ /* All sorts of data here; nothing relevant RW. */ break; case 0x0040: /* MTR: Memory Timing Register. */ /* All sorts of stuff related to real DRAM. */ break; case 0x0080: /* MISC: Miscellaneous Register. */ newval = oldval = s->cchip.misc; newval &= ~(val & 0x10000ff0); /* W1C fields */ if (val & 0x100000) { newval &= ~0xff0000ull; /* ACL clears ABT and ABW */ } else { newval |= val & 0x00f00000; /* ABT field is W1S */ if ((newval & 0xf0000) == 0) { newval |= val & 0xf0000; /* ABW field is W1S iff zero */ } } newval |= (val & 0xf000) >> 4; /* IPREQ field sets IPINTR. */ newval &= ~0xf0000000000ull; /* WO and RW fields */ newval |= val & 0xf0000000000ull; s->cchip.misc = newval; /* Pass on changes to IPI and ITI state. */ if ((newval ^ oldval) & 0xff0) { int i; for (i = 0; i < 4; ++i) { AlphaCPU *cpu = s->cchip.cpu[i]; if (cpu != NULL) { CPUState *cs = CPU(cpu); /* IPI can be either cleared or set by the write. */ if (newval & (1 << (i + 8))) { cpu_interrupt(cs, CPU_INTERRUPT_SMP); } else { cpu_reset_interrupt(cs, CPU_INTERRUPT_SMP); } /* ITI can only be cleared by the write. */ if ((newval & (1 << (i + 4))) == 0) { cpu_reset_interrupt(cs, CPU_INTERRUPT_TIMER); } } } } break; case 0x00c0: /* MPD: Memory Presence Detect Register. */ break; case 0x0100: /* AAR0 */ case 0x0140: /* AAR1 */ case 0x0180: /* AAR2 */ case 0x01c0: /* AAR3 */ /* AAR: Array Address Register. */ /* All sorts of information about DRAM. */ break; case 0x0200: /* DIM0 */ /* DIM: Device Interrupt Mask Register, CPU0. */ s->cchip.dim[0] = val; cpu_irq_change(s->cchip.cpu[0], val & s->cchip.drir); break; case 0x0240: /* DIM1 */ /* DIM: Device Interrupt Mask Register, CPU1. */ s->cchip.dim[0] = val; cpu_irq_change(s->cchip.cpu[1], val & s->cchip.drir); break; case 0x0280: /* DIR0 (RO) */ case 0x02c0: /* DIR1 (RO) */ case 0x0300: /* DRIR (RO) */ break; case 0x0340: /* PRBEN: Probe Enable Register. */ break; case 0x0380: /* IIC0 */ s->cchip.iic[0] = val & 0xffffff; break; case 0x03c0: /* IIC1 */ s->cchip.iic[1] = val & 0xffffff; break; case 0x0400: /* MPR0 */ case 0x0440: /* MPR1 */ case 0x0480: /* MPR2 */ case 0x04c0: /* MPR3 */ /* MPR: Memory Programming Register. */ break; case 0x0580: /* TTR: TIGbus Timing Register. */ /* All sorts of stuff related to interrupt delivery timings. */ break; case 0x05c0: /* TDR: TIGbug Device Timing Register. */ break; case 0x0600: /* DIM2: Device Interrupt Mask Register, CPU2. */ s->cchip.dim[2] = val; cpu_irq_change(s->cchip.cpu[2], val & s->cchip.drir); break; case 0x0640: /* DIM3: Device Interrupt Mask Register, CPU3. */ s->cchip.dim[3] = val; cpu_irq_change(s->cchip.cpu[3], val & s->cchip.drir); break; case 0x0680: /* DIR2 (RO) */ case 0x06c0: /* DIR3 (RO) */ break; case 0x0700: /* IIC2 */ s->cchip.iic[2] = val & 0xffffff; break; case 0x0740: /* IIC3 */ s->cchip.iic[3] = val & 0xffffff; break; case 0x0780: /* PWR: Power Management Control. */ break; case 0x0c00: /* CMONCTLA */ case 0x0c40: /* CMONCTLB */ case 0x0c80: /* CMONCNT01 */ case 0x0cc0: /* CMONCNT23 */ break; default: cpu_unassigned_access(current_cpu, addr, true, false, 0, size); return; } }
0
287
static void visitor_output_setup(TestOutputVisitorData *data, const void *unused) { data->qov = qmp_output_visitor_new(); g_assert(data->qov != NULL); data->ov = qmp_output_get_visitor(data->qov); g_assert(data->ov != NULL); }
0
288
static void group_exponents(AC3EncodeContext *s) { int blk, ch, i; int group_size, nb_groups, bit_count; uint8_t *p; int delta0, delta1, delta2; int exp0, exp1; bit_count = 0; for (blk = 0; blk < AC3_MAX_BLOCKS; blk++) { AC3Block *block = &s->blocks[blk]; for (ch = 0; ch < s->channels; ch++) { if (s->exp_strategy[ch][blk] == EXP_REUSE) continue; group_size = s->exp_strategy[ch][blk] + (s->exp_strategy[ch][blk] == EXP_D45); nb_groups = exponent_group_tab[s->exp_strategy[ch][blk]-1][s->nb_coefs[ch]]; bit_count += 4 + (nb_groups * 7); p = block->exp[ch]; /* DC exponent */ exp1 = *p++; block->grouped_exp[ch][0] = exp1; /* remaining exponents are delta encoded */ for (i = 1; i <= nb_groups; i++) { /* merge three delta in one code */ exp0 = exp1; exp1 = p[0]; p += group_size; delta0 = exp1 - exp0 + 2; exp0 = exp1; exp1 = p[0]; p += group_size; delta1 = exp1 - exp0 + 2; exp0 = exp1; exp1 = p[0]; p += group_size; delta2 = exp1 - exp0 + 2; block->grouped_exp[ch][i] = ((delta0 * 5 + delta1) * 5) + delta2; } } } s->exponent_bits = bit_count; }
0
290
void sd_write_data(SDState *sd, uint8_t value) { int i; if (!sd->bdrv || !bdrv_is_inserted(sd->bdrv) || !sd->enable) return; if (sd->state != sd_receivingdata_state) { fprintf(stderr, "sd_write_data: not in Receiving-Data state\n"); return; } if (sd->card_status & (ADDRESS_ERROR | WP_VIOLATION)) return; switch (sd->current_cmd) { case 24: /* CMD24: WRITE_SINGLE_BLOCK */ sd->data[sd->data_offset ++] = value; if (sd->data_offset >= sd->blk_len) { /* TODO: Check CRC before committing */ sd->state = sd_programming_state; BLK_WRITE_BLOCK(sd->data_start, sd->data_offset); sd->blk_written ++; sd->csd[14] |= 0x40; /* Bzzzzzzztt .... Operation complete. */ sd->state = sd_transfer_state; } break; case 25: /* CMD25: WRITE_MULTIPLE_BLOCK */ if (sd->data_offset == 0) { /* Start of the block - let's check the address is valid */ if (sd->data_start + sd->blk_len > sd->size) { sd->card_status |= ADDRESS_ERROR; break; } if (sd_wp_addr(sd, sd->data_start)) { sd->card_status |= WP_VIOLATION; break; } } sd->data[sd->data_offset++] = value; if (sd->data_offset >= sd->blk_len) { /* TODO: Check CRC before committing */ sd->state = sd_programming_state; BLK_WRITE_BLOCK(sd->data_start, sd->data_offset); sd->blk_written++; sd->data_start += sd->blk_len; sd->data_offset = 0; sd->csd[14] |= 0x40; /* Bzzzzzzztt .... Operation complete. */ sd->state = sd_receivingdata_state; } break; case 26: /* CMD26: PROGRAM_CID */ sd->data[sd->data_offset ++] = value; if (sd->data_offset >= sizeof(sd->cid)) { /* TODO: Check CRC before committing */ sd->state = sd_programming_state; for (i = 0; i < sizeof(sd->cid); i ++) if ((sd->cid[i] | 0x00) != sd->data[i]) sd->card_status |= CID_CSD_OVERWRITE; if (!(sd->card_status & CID_CSD_OVERWRITE)) for (i = 0; i < sizeof(sd->cid); i ++) { sd->cid[i] |= 0x00; sd->cid[i] &= sd->data[i]; } /* Bzzzzzzztt .... Operation complete. */ sd->state = sd_transfer_state; } break; case 27: /* CMD27: PROGRAM_CSD */ sd->data[sd->data_offset ++] = value; if (sd->data_offset >= sizeof(sd->csd)) { /* TODO: Check CRC before committing */ sd->state = sd_programming_state; for (i = 0; i < sizeof(sd->csd); i ++) if ((sd->csd[i] | sd_csd_rw_mask[i]) != (sd->data[i] | sd_csd_rw_mask[i])) sd->card_status |= CID_CSD_OVERWRITE; /* Copy flag (OTP) & Permanent write protect */ if (sd->csd[14] & ~sd->data[14] & 0x60) sd->card_status |= CID_CSD_OVERWRITE; if (!(sd->card_status & CID_CSD_OVERWRITE)) for (i = 0; i < sizeof(sd->csd); i ++) { sd->csd[i] |= sd_csd_rw_mask[i]; sd->csd[i] &= sd->data[i]; } /* Bzzzzzzztt .... Operation complete. */ sd->state = sd_transfer_state; } break; case 42: /* CMD42: LOCK_UNLOCK */ sd->data[sd->data_offset ++] = value; if (sd->data_offset >= sd->blk_len) { /* TODO: Check CRC before committing */ sd->state = sd_programming_state; sd_lock_command(sd); /* Bzzzzzzztt .... Operation complete. */ sd->state = sd_transfer_state; } break; case 56: /* CMD56: GEN_CMD */ sd->data[sd->data_offset ++] = value; if (sd->data_offset >= sd->blk_len) { APP_WRITE_BLOCK(sd->data_start, sd->data_offset); sd->state = sd_transfer_state; } break; default: fprintf(stderr, "sd_write_data: unknown command\n"); break; } }
0
291
int event_notifier_get_fd(EventNotifier *e) { return e->fd; }
0
293
opts_type_size(Visitor *v, const char *name, uint64_t *obj, Error **errp) { OptsVisitor *ov = to_ov(v); const QemuOpt *opt; int64_t val; opt = lookup_scalar(ov, name, errp); if (!opt) { return; } val = qemu_strtosz(opt->str ? opt->str : "", NULL); if (val < 0) { error_setg(errp, QERR_INVALID_PARAMETER_VALUE, opt->name, "a size value representible as a non-negative int64"); return; } *obj = val; processed(ov, name); }
0
294
static void handle_mousemotion(SDL_Event *ev) { int max_x, max_y; struct sdl2_console *scon = get_scon_from_window(ev->key.windowID); if (qemu_input_is_absolute() || absolute_enabled) { int scr_w, scr_h; SDL_GetWindowSize(scon->real_window, &scr_w, &scr_h); max_x = scr_w - 1; max_y = scr_h - 1; if (gui_grab && (ev->motion.x == 0 || ev->motion.y == 0 || ev->motion.x == max_x || ev->motion.y == max_y)) { sdl_grab_end(scon); } if (!gui_grab && (ev->motion.x > 0 && ev->motion.x < max_x && ev->motion.y > 0 && ev->motion.y < max_y)) { sdl_grab_start(scon); } } if (gui_grab || qemu_input_is_absolute() || absolute_enabled) { sdl_send_mouse_event(scon, ev->motion.xrel, ev->motion.yrel, ev->motion.x, ev->motion.y, ev->motion.state); } }
0
295
static int qemu_rdma_registration_stop(QEMUFile *f, void *opaque, uint64_t flags) { Error *local_err = NULL, **errp = &local_err; QEMUFileRDMA *rfile = opaque; RDMAContext *rdma = rfile->rdma; RDMAControlHeader head = { .len = 0, .repeat = 1 }; int ret = 0; CHECK_ERROR_STATE(); qemu_fflush(f); ret = qemu_rdma_drain_cq(f, rdma); if (ret < 0) { goto err; } if (flags == RAM_CONTROL_SETUP) { RDMAControlHeader resp = {.type = RDMA_CONTROL_RAM_BLOCKS_RESULT }; RDMALocalBlocks *local = &rdma->local_ram_blocks; int reg_result_idx, i, j, nb_remote_blocks; head.type = RDMA_CONTROL_RAM_BLOCKS_REQUEST; DPRINTF("Sending registration setup for ram blocks...\n"); /* * Make sure that we parallelize the pinning on both sides. * For very large guests, doing this serially takes a really * long time, so we have to 'interleave' the pinning locally * with the control messages by performing the pinning on this * side before we receive the control response from the other * side that the pinning has completed. */ ret = qemu_rdma_exchange_send(rdma, &head, NULL, &resp, &reg_result_idx, rdma->pin_all ? qemu_rdma_reg_whole_ram_blocks : NULL); if (ret < 0) { ERROR(errp, "receiving remote info!"); return ret; } qemu_rdma_move_header(rdma, reg_result_idx, &resp); memcpy(rdma->block, rdma->wr_data[reg_result_idx].control_curr, resp.len); nb_remote_blocks = resp.len / sizeof(RDMARemoteBlock); /* * The protocol uses two different sets of rkeys (mutually exclusive): * 1. One key to represent the virtual address of the entire ram block. * (dynamic chunk registration disabled - pin everything with one rkey.) * 2. One to represent individual chunks within a ram block. * (dynamic chunk registration enabled - pin individual chunks.) * * Once the capability is successfully negotiated, the destination transmits * the keys to use (or sends them later) including the virtual addresses * and then propagates the remote ram block descriptions to his local copy. */ if (local->nb_blocks != nb_remote_blocks) { ERROR(errp, "ram blocks mismatch #1! " "Your QEMU command line parameters are probably " "not identical on both the source and destination."); return -EINVAL; } for (i = 0; i < nb_remote_blocks; i++) { network_to_remote_block(&rdma->block[i]); /* search local ram blocks */ for (j = 0; j < local->nb_blocks; j++) { if (rdma->block[i].offset != local->block[j].offset) { continue; } if (rdma->block[i].length != local->block[j].length) { ERROR(errp, "ram blocks mismatch #2! " "Your QEMU command line parameters are probably " "not identical on both the source and destination."); return -EINVAL; } local->block[j].remote_host_addr = rdma->block[i].remote_host_addr; local->block[j].remote_rkey = rdma->block[i].remote_rkey; break; } if (j >= local->nb_blocks) { ERROR(errp, "ram blocks mismatch #3! " "Your QEMU command line parameters are probably " "not identical on both the source and destination."); return -EINVAL; } } } DDDPRINTF("Sending registration finish %" PRIu64 "...\n", flags); head.type = RDMA_CONTROL_REGISTER_FINISHED; ret = qemu_rdma_exchange_send(rdma, &head, NULL, NULL, NULL, NULL); if (ret < 0) { goto err; } return 0; err: rdma->error_state = ret; return ret; }
0
296
static void xen_platform_ioport_writeb(void *opaque, uint32_t addr, uint32_t val) { PCIXenPlatformState *s = opaque; addr &= 0xff; val &= 0xff; switch (addr) { case 0: /* Platform flags */ platform_fixed_ioport_writeb(opaque, XEN_PLATFORM_IOPORT, val); break; case 8: log_writeb(s, val); break; default: break; } }
0
297
static uint32_t nabm_readb (void *opaque, uint32_t addr) { PCIAC97LinkState *d = opaque; AC97LinkState *s = &d->ac97; AC97BusMasterRegs *r = NULL; uint32_t index = addr - s->base[1]; uint32_t val = ~0U; switch (index) { case CAS: dolog ("CAS %d\n", s->cas); val = s->cas; s->cas = 1; break; case PI_CIV: case PO_CIV: case MC_CIV: r = &s->bm_regs[GET_BM (index)]; val = r->civ; dolog ("CIV[%d] -> %#x\n", GET_BM (index), val); break; case PI_LVI: case PO_LVI: case MC_LVI: r = &s->bm_regs[GET_BM (index)]; val = r->lvi; dolog ("LVI[%d] -> %#x\n", GET_BM (index), val); break; case PI_PIV: case PO_PIV: case MC_PIV: r = &s->bm_regs[GET_BM (index)]; val = r->piv; dolog ("PIV[%d] -> %#x\n", GET_BM (index), val); break; case PI_CR: case PO_CR: case MC_CR: r = &s->bm_regs[GET_BM (index)]; val = r->cr; dolog ("CR[%d] -> %#x\n", GET_BM (index), val); break; case PI_SR: case PO_SR: case MC_SR: r = &s->bm_regs[GET_BM (index)]; val = r->sr & 0xff; dolog ("SRb[%d] -> %#x\n", GET_BM (index), val); break; default: dolog ("U nabm readb %#x -> %#x\n", addr, val); break; } return val; }
0
299
static int ac3_encode_frame(AVCodecContext *avctx, unsigned char *frame, int buf_size, void *data) { AC3EncodeContext *s = avctx->priv_data; const int16_t *samples = data; int16_t planar_samples[AC3_MAX_CHANNELS][AC3_BLOCK_SIZE+AC3_FRAME_SIZE]; int32_t mdct_coef[AC3_MAX_BLOCKS][AC3_MAX_CHANNELS][AC3_MAX_COEFS]; uint8_t exp[AC3_MAX_BLOCKS][AC3_MAX_CHANNELS][AC3_MAX_COEFS]; uint8_t exp_strategy[AC3_MAX_BLOCKS][AC3_MAX_CHANNELS]; uint8_t encoded_exp[AC3_MAX_BLOCKS][AC3_MAX_CHANNELS][AC3_MAX_COEFS]; uint8_t num_exp_groups[AC3_MAX_BLOCKS][AC3_MAX_CHANNELS]; uint8_t grouped_exp[AC3_MAX_BLOCKS][AC3_MAX_CHANNELS][AC3_MAX_EXP_GROUPS]; uint8_t bap[AC3_MAX_BLOCKS][AC3_MAX_CHANNELS][AC3_MAX_COEFS]; int8_t exp_shift[AC3_MAX_BLOCKS][AC3_MAX_CHANNELS]; uint16_t qmant[AC3_MAX_BLOCKS][AC3_MAX_CHANNELS][AC3_MAX_COEFS]; int frame_bits; if (s->bit_alloc.sr_code == 1) adjust_frame_size(s); deinterleave_input_samples(s, samples, planar_samples); apply_mdct(s, planar_samples, exp_shift, mdct_coef); frame_bits = process_exponents(s, mdct_coef, exp_shift, exp, exp_strategy, encoded_exp, num_exp_groups, grouped_exp); compute_bit_allocation(s, bap, encoded_exp, exp_strategy, frame_bits); quantize_mantissas(s, mdct_coef, exp_shift, encoded_exp, bap, qmant); output_frame(s, frame, exp_strategy, num_exp_groups, grouped_exp, bap, qmant); return s->frame_size; }
0
302
void cpu_physical_memory_write_rom(target_phys_addr_t addr, const uint8_t *buf, int len) { AddressSpaceDispatch *d = address_space_memory.dispatch; int l; uint8_t *ptr; target_phys_addr_t page; MemoryRegionSection *section; while (len > 0) { page = addr & TARGET_PAGE_MASK; l = (page + TARGET_PAGE_SIZE) - addr; if (l > len) l = len; section = phys_page_find(d, page >> TARGET_PAGE_BITS); if (!(memory_region_is_ram(section->mr) || memory_region_is_romd(section->mr))) { /* do nothing */ } else { unsigned long addr1; addr1 = memory_region_get_ram_addr(section->mr) + memory_region_section_addr(section, addr); /* ROM/RAM case */ ptr = qemu_get_ram_ptr(addr1); memcpy(ptr, buf, l); invalidate_and_set_dirty(addr1, l); qemu_put_ram_ptr(ptr); } len -= l; buf += l; addr += l; } }
1
303
av_cold int ff_lpc_init(LPCContext *s, int blocksize, int max_order, enum FFLPCType lpc_type) { s->blocksize = blocksize; s->max_order = max_order; s->lpc_type = lpc_type; if (lpc_type == FF_LPC_TYPE_LEVINSON) { s->windowed_samples = av_mallocz((blocksize + max_order + 2) * sizeof(*s->windowed_samples)); if (!s->windowed_samples) return AVERROR(ENOMEM); } else { s->windowed_samples = NULL; } s->lpc_apply_welch_window = lpc_apply_welch_window_c; s->lpc_compute_autocorr = lpc_compute_autocorr_c; if (HAVE_MMX) ff_lpc_init_x86(s); return 0; }
0
306
static int audio_decode_frame(VideoState *is, double *pts_ptr) { AVPacket *pkt_temp = &is->audio_pkt_temp; AVPacket *pkt = &is->audio_pkt; AVCodecContext *dec = is->audio_st->codec; int len1, len2, data_size, resampled_data_size; int64_t dec_channel_layout; int got_frame; double pts; int new_packet = 0; int flush_complete = 0; int wanted_nb_samples; for (;;) { /* NOTE: the audio packet can contain several frames */ while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) { if (!is->frame) { if (!(is->frame = avcodec_alloc_frame())) return AVERROR(ENOMEM); } else avcodec_get_frame_defaults(is->frame); if (is->paused) return -1; if (flush_complete) break; new_packet = 0; len1 = avcodec_decode_audio4(dec, is->frame, &got_frame, pkt_temp); if (len1 < 0) { /* if error, we skip the frame */ pkt_temp->size = 0; break; } pkt_temp->data += len1; pkt_temp->size -= len1; if (!got_frame) { /* stop sending empty packets if the decoder is finished */ if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY) flush_complete = 1; continue; } data_size = av_samples_get_buffer_size(NULL, dec->channels, is->frame->nb_samples, dec->sample_fmt, 1); dec_channel_layout = (dec->channel_layout && dec->channels == av_get_channel_layout_nb_channels(dec->channel_layout)) ? dec->channel_layout : av_get_default_channel_layout(dec->channels); wanted_nb_samples = synchronize_audio(is, is->frame->nb_samples); if (dec->sample_fmt != is->audio_src.fmt || dec_channel_layout != is->audio_src.channel_layout || dec->sample_rate != is->audio_src.freq || (wanted_nb_samples != is->frame->nb_samples && !is->swr_ctx)) { if (is->swr_ctx) swr_free(&is->swr_ctx); is->swr_ctx = swr_alloc_set_opts(NULL, is->audio_tgt.channel_layout, is->audio_tgt.fmt, is->audio_tgt.freq, dec_channel_layout, dec->sample_fmt, dec->sample_rate, 0, NULL); if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) { fprintf(stderr, "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n", dec->sample_rate, av_get_sample_fmt_name(dec->sample_fmt), dec->channels, is->audio_tgt.freq, av_get_sample_fmt_name(is->audio_tgt.fmt), is->audio_tgt.channels); break; } is->audio_src.channel_layout = dec_channel_layout; is->audio_src.channels = dec->channels; is->audio_src.freq = dec->sample_rate; is->audio_src.fmt = dec->sample_fmt; } resampled_data_size = data_size; if (is->swr_ctx) { const uint8_t *in[] = { is->frame->data[0] }; uint8_t *out[] = {is->audio_buf2}; if (wanted_nb_samples != is->frame->nb_samples) { if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - is->frame->nb_samples) * is->audio_tgt.freq / dec->sample_rate, wanted_nb_samples * is->audio_tgt.freq / dec->sample_rate) < 0) { fprintf(stderr, "swr_set_compensation() failed\n"); break; } } len2 = swr_convert(is->swr_ctx, out, sizeof(is->audio_buf2) / is->audio_tgt.channels / av_get_bytes_per_sample(is->audio_tgt.fmt), in, is->frame->nb_samples); if (len2 < 0) { fprintf(stderr, "swr_convert() failed\n"); break; } if (len2 == sizeof(is->audio_buf2) / is->audio_tgt.channels / av_get_bytes_per_sample(is->audio_tgt.fmt)) { fprintf(stderr, "warning: audio buffer is probably too small\n"); swr_init(is->swr_ctx); } is->audio_buf = is->audio_buf2; resampled_data_size = len2 * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt); } else { is->audio_buf = is->frame->data[0]; } /* if no pts, then compute it */ pts = is->audio_clock; *pts_ptr = pts; is->audio_clock += (double)data_size / (dec->channels * dec->sample_rate * av_get_bytes_per_sample(dec->sample_fmt)); #ifdef DEBUG { static double last_clock; printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n", is->audio_clock - last_clock, is->audio_clock, pts); last_clock = is->audio_clock; } #endif return resampled_data_size; } /* free the current packet */ if (pkt->data) av_free_packet(pkt); memset(pkt_temp, 0, sizeof(*pkt_temp)); if (is->paused || is->audioq.abort_request) { return -1; } /* read next packet */ if ((new_packet = packet_queue_get(&is->audioq, pkt, 1)) < 0) return -1; if (pkt->data == flush_pkt.data) { avcodec_flush_buffers(dec); flush_complete = 0; } *pkt_temp = *pkt; /* if update the audio clock with the pts */ if (pkt->pts != AV_NOPTS_VALUE) { is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts; } } }
0
307
void YM3812UpdateOne(FM_OPL *OPL, INT16 *buffer, int length) { int i; int data; OPLSAMPLE *buf = buffer; UINT32 amsCnt = OPL->amsCnt; UINT32 vibCnt = OPL->vibCnt; UINT8 rythm = OPL->rythm&0x20; OPL_CH *CH,*R_CH; if( (void *)OPL != cur_chip ){ cur_chip = (void *)OPL; /* channel pointers */ S_CH = OPL->P_CH; E_CH = &S_CH[9]; /* rythm slot */ SLOT7_1 = &S_CH[7].SLOT[SLOT1]; SLOT7_2 = &S_CH[7].SLOT[SLOT2]; SLOT8_1 = &S_CH[8].SLOT[SLOT1]; SLOT8_2 = &S_CH[8].SLOT[SLOT2]; /* LFO state */ amsIncr = OPL->amsIncr; vibIncr = OPL->vibIncr; ams_table = OPL->ams_table; vib_table = OPL->vib_table; } R_CH = rythm ? &S_CH[6] : E_CH; for( i=0; i < length ; i++ ) { /* channel A channel B channel C */ /* LFO */ ams = ams_table[(amsCnt+=amsIncr)>>AMS_SHIFT]; vib = vib_table[(vibCnt+=vibIncr)>>VIB_SHIFT]; outd[0] = 0; /* FM part */ for(CH=S_CH ; CH < R_CH ; CH++) OPL_CALC_CH(CH); /* Rythn part */ if(rythm) OPL_CALC_RH(S_CH); /* limit check */ data = Limit( outd[0] , OPL_MAXOUT, OPL_MINOUT ); /* store to sound buffer */ buf[i] = data >> OPL_OUTSB; } OPL->amsCnt = amsCnt; OPL->vibCnt = vibCnt; #ifdef OPL_OUTPUT_LOG if(opl_dbg_fp) { for(opl_dbg_chip=0;opl_dbg_chip<opl_dbg_maxchip;opl_dbg_chip++) if( opl_dbg_opl[opl_dbg_chip] == OPL) break; fprintf(opl_dbg_fp,"%c%c%c",0x20+opl_dbg_chip,length&0xff,length/256); } #endif }
0
308
static void omap_tipb_bridge_write(void *opaque, target_phys_addr_t addr, uint64_t value, unsigned size) { struct omap_tipb_bridge_s *s = (struct omap_tipb_bridge_s *) opaque; if (size < 2) { return omap_badwidth_write16(opaque, addr, value); } switch (addr) { case 0x00: /* TIPB_CNTL */ s->control = value & 0xffff; break; case 0x04: /* TIPB_BUS_ALLOC */ s->alloc = value & 0x003f; break; case 0x08: /* MPU_TIPB_CNTL */ s->buffer = value & 0x0003; break; case 0x0c: /* ENHANCED_TIPB_CNTL */ s->width_intr = !(value & 2); s->enh_control = value & 0x000f; break; case 0x10: /* ADDRESS_DBG */ case 0x14: /* DATA_DEBUG_LOW */ case 0x18: /* DATA_DEBUG_HIGH */ case 0x1c: /* DEBUG_CNTR_SIG */ OMAP_RO_REG(addr); break; default: OMAP_BAD_REG(addr); } }
0
309
PCIDevice *pci_register_device(PCIBus *bus, const char *name, int instance_size, int devfn, PCIConfigReadFunc *config_read, PCIConfigWriteFunc *config_write) { PCIDevice *pci_dev; pci_dev = qemu_mallocz(instance_size); pci_dev = do_pci_register_device(pci_dev, bus, name, devfn, config_read, config_write); return pci_dev; }
0
310
int64 float64_to_int64_round_to_zero( float64 a STATUS_PARAM ) { flag aSign; int16 aExp, shiftCount; bits64 aSig; int64 z; aSig = extractFloat64Frac( a ); aExp = extractFloat64Exp( a ); aSign = extractFloat64Sign( a ); if ( aExp ) aSig |= LIT64( 0x0010000000000000 ); shiftCount = aExp - 0x433; if ( 0 <= shiftCount ) { if ( 0x43E <= aExp ) { if ( a != LIT64( 0xC3E0000000000000 ) ) { float_raise( float_flag_invalid STATUS_VAR); if ( ! aSign || ( ( aExp == 0x7FF ) && ( aSig != LIT64( 0x0010000000000000 ) ) ) ) { return LIT64( 0x7FFFFFFFFFFFFFFF ); } } return (sbits64) LIT64( 0x8000000000000000 ); } z = aSig<<shiftCount; } else { if ( aExp < 0x3FE ) { if ( aExp | aSig ) STATUS(float_exception_flags) |= float_flag_inexact; return 0; } z = aSig>>( - shiftCount ); if ( (bits64) ( aSig<<( shiftCount & 63 ) ) ) { STATUS(float_exception_flags) |= float_flag_inexact; } } if ( aSign ) z = - z; return z; }
0
312
static void icount_warp_rt(void) { unsigned seq; int64_t warp_start; /* The icount_warp_timer is rescheduled soon after vm_clock_warp_start * changes from -1 to another value, so the race here is okay. */ do { seq = seqlock_read_begin(&timers_state.vm_clock_seqlock); warp_start = vm_clock_warp_start; } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, seq)); if (warp_start == -1) { return; } seqlock_write_begin(&timers_state.vm_clock_seqlock); if (runstate_is_running()) { int64_t clock = REPLAY_CLOCK(REPLAY_CLOCK_VIRTUAL_RT, cpu_get_clock_locked()); int64_t warp_delta; warp_delta = clock - vm_clock_warp_start; if (use_icount == 2) { /* * In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too * far ahead of real time. */ int64_t cur_icount = cpu_get_icount_locked(); int64_t delta = clock - cur_icount; warp_delta = MIN(warp_delta, delta); } timers_state.qemu_icount_bias += warp_delta; } vm_clock_warp_start = -1; seqlock_write_end(&timers_state.vm_clock_seqlock); if (qemu_clock_expired(QEMU_CLOCK_VIRTUAL)) { qemu_clock_notify(QEMU_CLOCK_VIRTUAL); } }
0
313
void bdrv_aio_cancel(BlockAIOCB *acb) { qemu_aio_ref(acb); bdrv_aio_cancel_async(acb); while (acb->refcnt > 1) { if (acb->aiocb_info->get_aio_context) { aio_poll(acb->aiocb_info->get_aio_context(acb), true); } else if (acb->bs) { aio_poll(bdrv_get_aio_context(acb->bs), true); } else { abort(); } } qemu_aio_unref(acb); }
0
314
static void vnc_listen_read(void *opaque, bool websocket) { VncDisplay *vs = opaque; struct sockaddr_in addr; socklen_t addrlen = sizeof(addr); int csock; /* Catch-up */ graphic_hw_update(vs->dcl.con); #ifdef CONFIG_VNC_WS if (websocket) { csock = qemu_accept(vs->lwebsock, (struct sockaddr *)&addr, &addrlen); } else #endif /* CONFIG_VNC_WS */ { csock = qemu_accept(vs->lsock, (struct sockaddr *)&addr, &addrlen); } if (csock != -1) { socket_set_nodelay(csock); vnc_connect(vs, csock, false, websocket); } }
0
315
static void gen_sraq(DisasContext *ctx) { int l1 = gen_new_label(); int l2 = gen_new_label(); TCGv t0 = tcg_temp_new(); TCGv t1 = tcg_temp_local_new(); TCGv t2 = tcg_temp_local_new(); tcg_gen_andi_tl(t2, cpu_gpr[rB(ctx->opcode)], 0x1F); tcg_gen_shr_tl(t0, cpu_gpr[rS(ctx->opcode)], t2); tcg_gen_sar_tl(t1, cpu_gpr[rS(ctx->opcode)], t2); tcg_gen_subfi_tl(t2, 32, t2); tcg_gen_shl_tl(t2, cpu_gpr[rS(ctx->opcode)], t2); tcg_gen_or_tl(t0, t0, t2); gen_store_spr(SPR_MQ, t0); tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x20); tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, l1); tcg_gen_mov_tl(t2, cpu_gpr[rS(ctx->opcode)]); tcg_gen_sari_tl(t1, cpu_gpr[rS(ctx->opcode)], 31); gen_set_label(l1); tcg_temp_free(t0); tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t1); tcg_gen_movi_tl(cpu_ca, 0); tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l2); tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, l2); tcg_gen_movi_tl(cpu_ca, 1); gen_set_label(l2); tcg_temp_free(t1); tcg_temp_free(t2); if (unlikely(Rc(ctx->opcode) != 0)) gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); }
0
316
static void reverse_dc_prediction(Vp3DecodeContext *s, int first_fragment, int fragment_width, int fragment_height) { #define PUL 8 #define PU 4 #define PUR 2 #define PL 1 int x, y; int i = first_fragment; int predicted_dc; /* DC values for the left, up-left, up, and up-right fragments */ int vl, vul, vu, vur; /* indexes for the left, up-left, up, and up-right fragments */ int l, ul, u, ur; /* * The 6 fields mean: * 0: up-left multiplier * 1: up multiplier * 2: up-right multiplier * 3: left multiplier */ static const int predictor_transform[16][4] = { { 0, 0, 0, 0}, { 0, 0, 0,128}, // PL { 0, 0,128, 0}, // PUR { 0, 0, 53, 75}, // PUR|PL { 0,128, 0, 0}, // PU { 0, 64, 0, 64}, // PU|PL { 0,128, 0, 0}, // PU|PUR { 0, 0, 53, 75}, // PU|PUR|PL {128, 0, 0, 0}, // PUL { 0, 0, 0,128}, // PUL|PL { 64, 0, 64, 0}, // PUL|PUR { 0, 0, 53, 75}, // PUL|PUR|PL { 0,128, 0, 0}, // PUL|PU {-104,116, 0,116}, // PUL|PU|PL { 24, 80, 24, 0}, // PUL|PU|PUR {-104,116, 0,116} // PUL|PU|PUR|PL }; /* This table shows which types of blocks can use other blocks for * prediction. For example, INTRA is the only mode in this table to * have a frame number of 0. That means INTRA blocks can only predict * from other INTRA blocks. There are 2 golden frame coding types; * blocks encoding in these modes can only predict from other blocks * that were encoded with these 1 of these 2 modes. */ static const unsigned char compatible_frame[8] = { 1, /* MODE_INTER_NO_MV */ 0, /* MODE_INTRA */ 1, /* MODE_INTER_PLUS_MV */ 1, /* MODE_INTER_LAST_MV */ 1, /* MODE_INTER_PRIOR_MV */ 2, /* MODE_USING_GOLDEN */ 2, /* MODE_GOLDEN_MV */ 1 /* MODE_INTER_FOUR_MV */ }; int current_frame_type; /* there is a last DC predictor for each of the 3 frame types */ short last_dc[3]; int transform = 0; vul = vu = vur = vl = 0; last_dc[0] = last_dc[1] = last_dc[2] = 0; /* for each fragment row... */ for (y = 0; y < fragment_height; y++) { /* for each fragment in a row... */ for (x = 0; x < fragment_width; x++, i++) { /* reverse prediction if this block was coded */ if (s->all_fragments[i].coding_method != MODE_COPY) { current_frame_type = compatible_frame[s->all_fragments[i].coding_method]; transform= 0; if(x){ l= i-1; vl = DC_COEFF(l); if(FRAME_CODED(l) && COMPATIBLE_FRAME(l)) transform |= PL; } if(y){ u= i-fragment_width; vu = DC_COEFF(u); if(FRAME_CODED(u) && COMPATIBLE_FRAME(u)) transform |= PU; if(x){ ul= i-fragment_width-1; vul = DC_COEFF(ul); if(FRAME_CODED(ul) && COMPATIBLE_FRAME(ul)) transform |= PUL; } if(x + 1 < fragment_width){ ur= i-fragment_width+1; vur = DC_COEFF(ur); if(FRAME_CODED(ur) && COMPATIBLE_FRAME(ur)) transform |= PUR; } } if (transform == 0) { /* if there were no fragments to predict from, use last * DC saved */ predicted_dc = last_dc[current_frame_type]; } else { /* apply the appropriate predictor transform */ predicted_dc = (predictor_transform[transform][0] * vul) + (predictor_transform[transform][1] * vu) + (predictor_transform[transform][2] * vur) + (predictor_transform[transform][3] * vl); predicted_dc /= 128; /* check for outranging on the [ul u l] and * [ul u ur l] predictors */ if ((transform == 13) || (transform == 15)) { if (FFABS(predicted_dc - vu) > 128) predicted_dc = vu; else if (FFABS(predicted_dc - vl) > 128) predicted_dc = vl; else if (FFABS(predicted_dc - vul) > 128) predicted_dc = vul; } } /* at long last, apply the predictor */ if(s->coeffs[i].index){ *s->next_coeff= s->coeffs[i]; s->coeffs[i].index=0; s->coeffs[i].coeff=0; s->coeffs[i].next= s->next_coeff++; } s->coeffs[i].coeff += predicted_dc; /* save the DC */ last_dc[current_frame_type] = DC_COEFF(i); if(DC_COEFF(i) && !(s->coeff_counts[i]&127)){ s->coeff_counts[i]= 129; // s->all_fragments[i].next_coeff= s->next_coeff; s->coeffs[i].next= s->next_coeff; (s->next_coeff++)->next=NULL; } } } } }
0
317
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc) { void **lp, **p; p = (void **)l1_phys_map; #if TARGET_PHYS_ADDR_SPACE_BITS > 32 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS) #error unsupported TARGET_PHYS_ADDR_SPACE_BITS #endif lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1)); p = *lp; if (!p) { /* allocate if not found */ if (!alloc) return NULL; p = qemu_vmalloc(sizeof(void *) * L1_SIZE); memset(p, 0, sizeof(void *) * L1_SIZE); *lp = p; } #endif lp = p + ((index >> L2_BITS) & (L1_SIZE - 1)); p = *lp; if (!p) { /* allocate if not found */ if (!alloc) return NULL; p = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE); memset(p, 0, sizeof(PhysPageDesc) * L2_SIZE); *lp = p; } return ((PhysPageDesc *)p) + (index & (L2_SIZE - 1)); }
0
318
CPUState *cpu_mb_init (const char *cpu_model) { CPUState *env; static int tcg_initialized = 0; int i; env = qemu_mallocz(sizeof(CPUState)); cpu_exec_init(env); cpu_reset(env); env->pvr.regs[0] = PVR0_PVR_FULL_MASK \ | PVR0_USE_BARREL_MASK \ | PVR0_USE_DIV_MASK \ | PVR0_USE_HW_MUL_MASK \ | PVR0_USE_EXC_MASK \ | PVR0_USE_ICACHE_MASK \ | PVR0_USE_DCACHE_MASK \ | PVR0_USE_MMU \ | (0xb << 8); env->pvr.regs[2] = PVR2_D_OPB_MASK \ | PVR2_D_LMB_MASK \ | PVR2_I_OPB_MASK \ | PVR2_I_LMB_MASK \ | PVR2_USE_MSR_INSTR \ | PVR2_USE_PCMP_INSTR \ | PVR2_USE_BARREL_MASK \ | PVR2_USE_DIV_MASK \ | PVR2_USE_HW_MUL_MASK \ | PVR2_USE_MUL64_MASK \ | 0; env->pvr.regs[10] = 0x0c000000; /* Default to spartan 3a dsp family. */ env->pvr.regs[11] = PVR11_USE_MMU | (16 << 17); #if !defined(CONFIG_USER_ONLY) env->mmu.c_mmu = 3; env->mmu.c_mmu_tlb_access = 3; env->mmu.c_mmu_zones = 16; #endif if (tcg_initialized) return env; tcg_initialized = 1; cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env"); env_debug = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, debug), "debug0"); env_iflags = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, iflags), "iflags"); env_imm = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, imm), "imm"); env_btarget = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, btarget), "btarget"); env_btaken = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, btaken), "btaken"); for (i = 0; i < ARRAY_SIZE(cpu_R); i++) { cpu_R[i] = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, regs[i]), regnames[i]); } for (i = 0; i < ARRAY_SIZE(cpu_SR); i++) { cpu_SR[i] = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, sregs[i]), special_regnames[i]); } #define GEN_HELPER 2 #include "helper.h" return env; }
0
320
int cpu_x86_exec(CPUX86State *env1) { int saved_T0, saved_T1, saved_A0; CPUX86State *saved_env; #ifdef reg_EAX int saved_EAX; #endif #ifdef reg_ECX int saved_ECX; #endif #ifdef reg_EDX int saved_EDX; #endif #ifdef reg_EBX int saved_EBX; #endif #ifdef reg_ESP int saved_ESP; #endif #ifdef reg_EBP int saved_EBP; #endif #ifdef reg_ESI int saved_ESI; #endif #ifdef reg_EDI int saved_EDI; #endif #ifdef __sparc__ int saved_i7, tmp_T0; #endif int code_gen_size, ret; void (*gen_func)(void); TranslationBlock *tb, **ptb; uint8_t *tc_ptr, *cs_base, *pc; unsigned int flags; /* first we save global registers */ saved_T0 = T0; saved_T1 = T1; saved_A0 = A0; saved_env = env; env = env1; #ifdef reg_EAX saved_EAX = EAX; EAX = env->regs[R_EAX]; #endif #ifdef reg_ECX saved_ECX = ECX; ECX = env->regs[R_ECX]; #endif #ifdef reg_EDX saved_EDX = EDX; EDX = env->regs[R_EDX]; #endif #ifdef reg_EBX saved_EBX = EBX; EBX = env->regs[R_EBX]; #endif #ifdef reg_ESP saved_ESP = ESP; ESP = env->regs[R_ESP]; #endif #ifdef reg_EBP saved_EBP = EBP; EBP = env->regs[R_EBP]; #endif #ifdef reg_ESI saved_ESI = ESI; ESI = env->regs[R_ESI]; #endif #ifdef reg_EDI saved_EDI = EDI; EDI = env->regs[R_EDI]; #endif #ifdef __sparc__ /* we also save i7 because longjmp may not restore it */ asm volatile ("mov %%i7, %0" : "=r" (saved_i7)); #endif /* put eflags in CPU temporary format */ CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); DF = 1 - (2 * ((env->eflags >> 10) & 1)); CC_OP = CC_OP_EFLAGS; env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); env->interrupt_request = 0; /* prepare setjmp context for exception handling */ if (setjmp(env->jmp_env) == 0) { T0 = 0; /* force lookup of first TB */ for(;;) { #ifdef __sparc__ /* g1 can be modified by some libc? functions */ tmp_T0 = T0; #endif if (env->interrupt_request) { env->exception_index = EXCP_INTERRUPT; cpu_loop_exit(); } #ifdef DEBUG_EXEC if (loglevel) { /* XXX: save all volatile state in cpu state */ /* restore flags in standard format */ env->regs[R_EAX] = EAX; env->regs[R_EBX] = EBX; env->regs[R_ECX] = ECX; env->regs[R_EDX] = EDX; env->regs[R_ESI] = ESI; env->regs[R_EDI] = EDI; env->regs[R_EBP] = EBP; env->regs[R_ESP] = ESP; env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK); cpu_x86_dump_state(env, logfile, 0); env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); } #endif /* we compute the CPU state. We assume it will not change during the whole generated block. */ flags = env->seg_cache[R_CS].seg_32bit << GEN_FLAG_CODE32_SHIFT; flags |= env->seg_cache[R_SS].seg_32bit << GEN_FLAG_SS32_SHIFT; flags |= (((unsigned long)env->seg_cache[R_DS].base | (unsigned long)env->seg_cache[R_ES].base | (unsigned long)env->seg_cache[R_SS].base) != 0) << GEN_FLAG_ADDSEG_SHIFT; if (!(env->eflags & VM_MASK)) { flags |= (env->segs[R_CS] & 3) << GEN_FLAG_CPL_SHIFT; } else { /* NOTE: a dummy CPL is kept */ flags |= (1 << GEN_FLAG_VM_SHIFT); flags |= (3 << GEN_FLAG_CPL_SHIFT); } flags |= (env->eflags & (IOPL_MASK | TF_MASK)); cs_base = env->seg_cache[R_CS].base; pc = cs_base + env->eip; tb = tb_find(&ptb, (unsigned long)pc, (unsigned long)cs_base, flags); if (!tb) { spin_lock(&tb_lock); /* if no translated code available, then translate it now */ tb = tb_alloc((unsigned long)pc); if (!tb) { /* flush must be done */ tb_flush(); /* cannot fail at this point */ tb = tb_alloc((unsigned long)pc); /* don't forget to invalidate previous TB info */ ptb = &tb_hash[tb_hash_func((unsigned long)pc)]; T0 = 0; } tc_ptr = code_gen_ptr; tb->tc_ptr = tc_ptr; tb->cs_base = (unsigned long)cs_base; tb->flags = flags; ret = cpu_x86_gen_code(tb, CODE_GEN_MAX_SIZE, &code_gen_size); /* if invalid instruction, signal it */ if (ret != 0) { /* NOTE: the tb is allocated but not linked, so we can leave it */ spin_unlock(&tb_lock); raise_exception(EXCP06_ILLOP); } *ptb = tb; tb->hash_next = NULL; tb_link(tb); code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1)); spin_unlock(&tb_lock); } #ifdef DEBUG_EXEC if (loglevel) { fprintf(logfile, "Trace 0x%08lx [0x%08lx] %s\n", (long)tb->tc_ptr, (long)tb->pc, lookup_symbol((void *)tb->pc)); } #endif #ifdef __sparc__ T0 = tmp_T0; #endif /* see if we can patch the calling TB */ if (T0 != 0 && !(env->eflags & TF_MASK)) { spin_lock(&tb_lock); tb_add_jump((TranslationBlock *)(T0 & ~3), T0 & 3, tb); spin_unlock(&tb_lock); } tc_ptr = tb->tc_ptr; /* execute the generated code */ gen_func = (void *)tc_ptr; #if defined(__sparc__) __asm__ __volatile__("call %0\n\t" "mov %%o7,%%i0" : /* no outputs */ : "r" (gen_func) : "i0", "i1", "i2", "i3", "i4", "i5"); #elif defined(__arm__) asm volatile ("mov pc, %0\n\t" ".global exec_loop\n\t" "exec_loop:\n\t" : /* no outputs */ : "r" (gen_func) : "r1", "r2", "r3", "r8", "r9", "r10", "r12", "r14"); #else gen_func(); #endif } } ret = env->exception_index; /* restore flags in standard format */ env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK); /* restore global registers */ #ifdef reg_EAX EAX = saved_EAX; #endif #ifdef reg_ECX ECX = saved_ECX; #endif #ifdef reg_EDX EDX = saved_EDX; #endif #ifdef reg_EBX EBX = saved_EBX; #endif #ifdef reg_ESP ESP = saved_ESP; #endif #ifdef reg_EBP EBP = saved_EBP; #endif #ifdef reg_ESI ESI = saved_ESI; #endif #ifdef reg_EDI EDI = saved_EDI; #endif #ifdef __sparc__ asm volatile ("mov %0, %%i7" : : "r" (saved_i7)); #endif T0 = saved_T0; T1 = saved_T1; A0 = saved_A0; env = saved_env; return ret; }
0
322
void s390_pci_sclp_configure(SCCB *sccb) { PciCfgSccb *psccb = (PciCfgSccb *)sccb; S390PCIBusDevice *pbdev = s390_pci_find_dev_by_fid(be32_to_cpu(psccb->aid)); uint16_t rc; if (be16_to_cpu(sccb->h.length) < 16) { rc = SCLP_RC_INSUFFICIENT_SCCB_LENGTH; goto out; } if (pbdev) { if (pbdev->configured) { rc = SCLP_RC_NO_ACTION_REQUIRED; } else { pbdev->configured = true; rc = SCLP_RC_NORMAL_COMPLETION; } } else { DPRINTF("sclp config no dev found\n"); rc = SCLP_RC_ADAPTER_ID_NOT_RECOGNIZED; } out: psccb->header.response_code = cpu_to_be16(rc); }
0
323
static BlockJob *find_block_job(const char *device) { BlockDriverState *bs; bs = bdrv_find(device); if (!bs || !bs->job) { return NULL; } return bs->job; }
0
324
static void usb_host_realize(USBDevice *udev, Error **errp) { USBHostDevice *s = USB_HOST_DEVICE(udev); if (s->match.vendor_id > 0xffff) { error_setg(errp, "vendorid out of range"); return; } if (s->match.product_id > 0xffff) { error_setg(errp, "productid out of range"); return; } if (s->match.addr > 127) { error_setg(errp, "hostaddr out of range"); return; } loglevel = s->loglevel; udev->flags |= (1 << USB_DEV_FLAG_IS_HOST); udev->auto_attach = 0; QTAILQ_INIT(&s->requests); QTAILQ_INIT(&s->isorings); s->exit.notify = usb_host_exit_notifier; qemu_add_exit_notifier(&s->exit); QTAILQ_INSERT_TAIL(&hostdevs, s, next); usb_host_auto_check(NULL); }
0
326
static void rtas_int_off(sPAPREnvironment *spapr, uint32_t token, uint32_t nargs, target_ulong args, uint32_t nret, target_ulong rets) { struct ics_state *ics = spapr->icp->ics; uint32_t nr; if ((nargs != 1) || (nret != 1)) { rtas_st(rets, 0, -3); return; } nr = rtas_ld(args, 0); if (!ics_valid_irq(ics, nr)) { rtas_st(rets, 0, -3); return; } ics_write_xive(ics, nr, ics->irqs[nr - ics->offset].server, 0xff, ics->irqs[nr - ics->offset].priority); rtas_st(rets, 0, 0); /* Success */ }
0
327
static void filter_mb_mbaff_edgecv( H264Context *h, uint8_t *pix, int stride, int16_t bS[7], int bsi, int qp ) { int index_a = qp + h->slice_alpha_c0_offset; int alpha = alpha_table[index_a]; int beta = beta_table[qp + h->slice_beta_offset]; if (alpha ==0 || beta == 0) return; if( bS[0] < 4 ) { int8_t tc[4]; tc[0] = tc0_table[index_a][bS[0*bsi]] + 1; tc[1] = tc0_table[index_a][bS[1*bsi]] + 1; tc[2] = tc0_table[index_a][bS[2*bsi]] + 1; tc[3] = tc0_table[index_a][bS[3*bsi]] + 1; h->h264dsp.h264_h_loop_filter_chroma_mbaff(pix, stride, alpha, beta, tc); } else { h->h264dsp.h264_h_loop_filter_chroma_mbaff_intra(pix, stride, alpha, beta); } }
0
328
static uint32_t gic_dist_readb(void *opaque, target_phys_addr_t offset) { GICState *s = (GICState *)opaque; uint32_t res; int irq; int i; int cpu; int cm; int mask; cpu = gic_get_current_cpu(s); cm = 1 << cpu; if (offset < 0x100) { if (offset == 0) return s->enabled; if (offset == 4) return ((s->num_irq / 32) - 1) | ((NUM_CPU(s) - 1) << 5); if (offset < 0x08) return 0; if (offset >= 0x80) { /* Interrupt Security , RAZ/WI */ return 0; } goto bad_reg; } else if (offset < 0x200) { /* Interrupt Set/Clear Enable. */ if (offset < 0x180) irq = (offset - 0x100) * 8; else irq = (offset - 0x180) * 8; irq += GIC_BASE_IRQ; if (irq >= s->num_irq) goto bad_reg; res = 0; for (i = 0; i < 8; i++) { if (GIC_TEST_ENABLED(irq + i, cm)) { res |= (1 << i); } } } else if (offset < 0x300) { /* Interrupt Set/Clear Pending. */ if (offset < 0x280) irq = (offset - 0x200) * 8; else irq = (offset - 0x280) * 8; irq += GIC_BASE_IRQ; if (irq >= s->num_irq) goto bad_reg; res = 0; mask = (irq < GIC_INTERNAL) ? cm : ALL_CPU_MASK; for (i = 0; i < 8; i++) { if (GIC_TEST_PENDING(irq + i, mask)) { res |= (1 << i); } } } else if (offset < 0x400) { /* Interrupt Active. */ irq = (offset - 0x300) * 8 + GIC_BASE_IRQ; if (irq >= s->num_irq) goto bad_reg; res = 0; mask = (irq < GIC_INTERNAL) ? cm : ALL_CPU_MASK; for (i = 0; i < 8; i++) { if (GIC_TEST_ACTIVE(irq + i, mask)) { res |= (1 << i); } } } else if (offset < 0x800) { /* Interrupt Priority. */ irq = (offset - 0x400) + GIC_BASE_IRQ; if (irq >= s->num_irq) goto bad_reg; res = GIC_GET_PRIORITY(irq, cpu); } else if (offset < 0xc00) { /* Interrupt CPU Target. */ if (s->num_cpu == 1 && s->revision != REV_11MPCORE) { /* For uniprocessor GICs these RAZ/WI */ res = 0; } else { irq = (offset - 0x800) + GIC_BASE_IRQ; if (irq >= s->num_irq) { goto bad_reg; } if (irq >= 29 && irq <= 31) { res = cm; } else { res = GIC_TARGET(irq); } } } else if (offset < 0xf00) { /* Interrupt Configuration. */ irq = (offset - 0xc00) * 2 + GIC_BASE_IRQ; if (irq >= s->num_irq) goto bad_reg; res = 0; for (i = 0; i < 4; i++) { if (GIC_TEST_MODEL(irq + i)) res |= (1 << (i * 2)); if (GIC_TEST_TRIGGER(irq + i)) res |= (2 << (i * 2)); } } else if (offset < 0xfe0) { goto bad_reg; } else /* offset >= 0xfe0 */ { if (offset & 3) { res = 0; } else { res = gic_id[(offset - 0xfe0) >> 2]; } } return res; bad_reg: hw_error("gic_dist_readb: Bad offset %x\n", (int)offset); return 0; }
0
329
static void qmp_serialize(void *native_in, void **datap, VisitorFunc visit, Error **errp) { QmpSerializeData *d = g_malloc0(sizeof(*d)); d->qov = qmp_output_visitor_new(&d->obj); visit(d->qov, &native_in, errp); *datap = d; }
1
330
static void tcg_target_init(TCGContext *s) { tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff); tcg_regset_set32(tcg_target_call_clobber_regs, 0, (1 << TCG_REG_R0) | #ifdef _CALL_DARWIN (1 << TCG_REG_R2) | #endif (1 << TCG_REG_R3) | (1 << TCG_REG_R4) | (1 << TCG_REG_R5) | (1 << TCG_REG_R6) | (1 << TCG_REG_R7) | (1 << TCG_REG_R8) | (1 << TCG_REG_R9) | (1 << TCG_REG_R10) | (1 << TCG_REG_R11) | (1 << TCG_REG_R12) ); tcg_regset_clear(s->reserved_regs); tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0); tcg_regset_set_reg(s->reserved_regs, TCG_REG_R1); #ifndef _CALL_DARWIN tcg_regset_set_reg(s->reserved_regs, TCG_REG_R2); #endif #ifdef _CALL_SYSV tcg_regset_set_reg(s->reserved_regs, TCG_REG_R13); #endif tcg_add_target_add_op_defs(ppc_op_defs); }
1
331
static void emulate_spapr_hypercall(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu) { CPUPPCState *env = &cpu->env; if (msr_pr) { hcall_dprintf("Hypercall made with MSR[PR]=1\n"); env->gpr[3] = H_PRIVILEGE; } else { env->gpr[3] = spapr_hypercall(cpu, env->gpr[3], &env->gpr[4]); } }
1
332
test_opts_range_unvisited(void) { intList *list = NULL; intList *tail; QemuOpts *opts; Visitor *v; opts = qemu_opts_parse(qemu_find_opts("userdef"), "ilist=0-2", false, &error_abort); v = opts_visitor_new(opts); visit_start_struct(v, NULL, NULL, 0, &error_abort); /* Would be simpler if the visitor genuinely supported virtual walks */ visit_start_list(v, "ilist", (GenericList **)&list, sizeof(*list), &error_abort); tail = list; visit_type_int(v, NULL, &tail->value, &error_abort); g_assert_cmpint(tail->value, ==, 0); tail = (intList *)visit_next_list(v, (GenericList *)tail, sizeof(*list)); g_assert(tail); visit_type_int(v, NULL, &tail->value, &error_abort); g_assert_cmpint(tail->value, ==, 1); tail = (intList *)visit_next_list(v, (GenericList *)tail, sizeof(*list)); g_assert(tail); visit_check_list(v, &error_abort); /* BUG: unvisited tail not reported */ visit_end_list(v, (void **)&list); visit_check_struct(v, &error_abort); visit_end_struct(v, NULL); qapi_free_intList(list); visit_free(v); qemu_opts_del(opts); }
1
333
static int decode_subframe(WMAProDecodeCtx *s) { int offset = s->samples_per_frame; int subframe_len = s->samples_per_frame; int i; int total_samples = s->samples_per_frame * s->avctx->channels; int transmit_coeffs = 0; int cur_subwoofer_cutoff; s->subframe_offset = get_bits_count(&s->gb); /** reset channel context and find the next block offset and size == the next block of the channel with the smallest number of decoded samples */ for (i = 0; i < s->avctx->channels; i++) { s->channel[i].grouped = 0; if (offset > s->channel[i].decoded_samples) { offset = s->channel[i].decoded_samples; subframe_len = s->channel[i].subframe_len[s->channel[i].cur_subframe]; } } av_dlog(s->avctx, "processing subframe with offset %i len %i\n", offset, subframe_len); /** get a list of all channels that contain the estimated block */ s->channels_for_cur_subframe = 0; for (i = 0; i < s->avctx->channels; i++) { const int cur_subframe = s->channel[i].cur_subframe; /** subtract already processed samples */ total_samples -= s->channel[i].decoded_samples; /** and count if there are multiple subframes that match our profile */ if (offset == s->channel[i].decoded_samples && subframe_len == s->channel[i].subframe_len[cur_subframe]) { total_samples -= s->channel[i].subframe_len[cur_subframe]; s->channel[i].decoded_samples += s->channel[i].subframe_len[cur_subframe]; s->channel_indexes_for_cur_subframe[s->channels_for_cur_subframe] = i; ++s->channels_for_cur_subframe; } } /** check if the frame will be complete after processing the estimated block */ if (!total_samples) s->parsed_all_subframes = 1; av_dlog(s->avctx, "subframe is part of %i channels\n", s->channels_for_cur_subframe); /** calculate number of scale factor bands and their offsets */ s->table_idx = av_log2(s->samples_per_frame/subframe_len); s->num_bands = s->num_sfb[s->table_idx]; s->cur_sfb_offsets = s->sfb_offsets[s->table_idx]; cur_subwoofer_cutoff = s->subwoofer_cutoffs[s->table_idx]; /** configure the decoder for the current subframe */ for (i = 0; i < s->channels_for_cur_subframe; i++) { int c = s->channel_indexes_for_cur_subframe[i]; s->channel[c].coeffs = &s->channel[c].out[(s->samples_per_frame >> 1) + offset]; } s->subframe_len = subframe_len; s->esc_len = av_log2(s->subframe_len - 1) + 1; /** skip extended header if any */ if (get_bits1(&s->gb)) { int num_fill_bits; if (!(num_fill_bits = get_bits(&s->gb, 2))) { int len = get_bits(&s->gb, 4); num_fill_bits = get_bits(&s->gb, len) + 1; } if (num_fill_bits >= 0) { if (get_bits_count(&s->gb) + num_fill_bits > s->num_saved_bits) { av_log(s->avctx, AV_LOG_ERROR, "invalid number of fill bits\n"); return AVERROR_INVALIDDATA; } skip_bits_long(&s->gb, num_fill_bits); } } /** no idea for what the following bit is used */ if (get_bits1(&s->gb)) { avpriv_request_sample(s->avctx, "Reserved bit"); return AVERROR_PATCHWELCOME; } if (decode_channel_transform(s) < 0) return AVERROR_INVALIDDATA; for (i = 0; i < s->channels_for_cur_subframe; i++) { int c = s->channel_indexes_for_cur_subframe[i]; if ((s->channel[c].transmit_coefs = get_bits1(&s->gb))) transmit_coeffs = 1; } if (transmit_coeffs) { int step; int quant_step = 90 * s->bits_per_sample >> 4; /** decode number of vector coded coefficients */ if ((s->transmit_num_vec_coeffs = get_bits1(&s->gb))) { int num_bits = av_log2((s->subframe_len + 3)/4) + 1; for (i = 0; i < s->channels_for_cur_subframe; i++) { int c = s->channel_indexes_for_cur_subframe[i]; int num_vec_coeffs = get_bits(&s->gb, num_bits) << 2; if (num_vec_coeffs > WMAPRO_BLOCK_MAX_SIZE) { av_log(s->avctx, AV_LOG_ERROR, "num_vec_coeffs %d is too large\n", num_vec_coeffs); return AVERROR_INVALIDDATA; } s->channel[c].num_vec_coeffs = num_vec_coeffs; } } else { for (i = 0; i < s->channels_for_cur_subframe; i++) { int c = s->channel_indexes_for_cur_subframe[i]; s->channel[c].num_vec_coeffs = s->subframe_len; } } /** decode quantization step */ step = get_sbits(&s->gb, 6); quant_step += step; if (step == -32 || step == 31) { const int sign = (step == 31) - 1; int quant = 0; while (get_bits_count(&s->gb) + 5 < s->num_saved_bits && (step = get_bits(&s->gb, 5)) == 31) { quant += 31; } quant_step += ((quant + step) ^ sign) - sign; } if (quant_step < 0) { av_log(s->avctx, AV_LOG_DEBUG, "negative quant step\n"); } /** decode quantization step modifiers for every channel */ if (s->channels_for_cur_subframe == 1) { s->channel[s->channel_indexes_for_cur_subframe[0]].quant_step = quant_step; } else { int modifier_len = get_bits(&s->gb, 3); for (i = 0; i < s->channels_for_cur_subframe; i++) { int c = s->channel_indexes_for_cur_subframe[i]; s->channel[c].quant_step = quant_step; if (get_bits1(&s->gb)) { if (modifier_len) { s->channel[c].quant_step += get_bits(&s->gb, modifier_len) + 1; } else ++s->channel[c].quant_step; } } } /** decode scale factors */ if (decode_scale_factors(s) < 0) return AVERROR_INVALIDDATA; } av_dlog(s->avctx, "BITSTREAM: subframe header length was %i\n", get_bits_count(&s->gb) - s->subframe_offset); /** parse coefficients */ for (i = 0; i < s->channels_for_cur_subframe; i++) { int c = s->channel_indexes_for_cur_subframe[i]; if (s->channel[c].transmit_coefs && get_bits_count(&s->gb) < s->num_saved_bits) { decode_coeffs(s, c); } else memset(s->channel[c].coeffs, 0, sizeof(*s->channel[c].coeffs) * subframe_len); } av_dlog(s->avctx, "BITSTREAM: subframe length was %i\n", get_bits_count(&s->gb) - s->subframe_offset); if (transmit_coeffs) { FFTContext *mdct = &s->mdct_ctx[av_log2(subframe_len) - WMAPRO_BLOCK_MIN_BITS]; /** reconstruct the per channel data */ inverse_channel_transform(s); for (i = 0; i < s->channels_for_cur_subframe; i++) { int c = s->channel_indexes_for_cur_subframe[i]; const int* sf = s->channel[c].scale_factors; int b; if (c == s->lfe_channel) memset(&s->tmp[cur_subwoofer_cutoff], 0, sizeof(*s->tmp) * (subframe_len - cur_subwoofer_cutoff)); /** inverse quantization and rescaling */ for (b = 0; b < s->num_bands; b++) { const int end = FFMIN(s->cur_sfb_offsets[b+1], s->subframe_len); const int exp = s->channel[c].quant_step - (s->channel[c].max_scale_factor - *sf++) * s->channel[c].scale_factor_step; const float quant = pow(10.0, exp / 20.0); int start = s->cur_sfb_offsets[b]; s->fdsp.vector_fmul_scalar(s->tmp + start, s->channel[c].coeffs + start, quant, end - start); } /** apply imdct (imdct_half == DCTIV with reverse) */ mdct->imdct_half(mdct, s->channel[c].coeffs, s->tmp); } } /** window and overlapp-add */ wmapro_window(s); /** handled one subframe */ for (i = 0; i < s->channels_for_cur_subframe; i++) { int c = s->channel_indexes_for_cur_subframe[i]; if (s->channel[c].cur_subframe >= s->channel[c].num_subframes) { av_log(s->avctx, AV_LOG_ERROR, "broken subframe\n"); return AVERROR_INVALIDDATA; } ++s->channel[c].cur_subframe; } return 0; }
1
334
static int find_vdi_name(BDRVSheepdogState *s, char *filename, uint32_t snapid, char *tag, uint32_t *vid, int for_snapshot) { int ret, fd; SheepdogVdiReq hdr; SheepdogVdiRsp *rsp = (SheepdogVdiRsp *)&hdr; unsigned int wlen, rlen = 0; char buf[SD_MAX_VDI_LEN + SD_MAX_VDI_TAG_LEN]; fd = connect_to_sdog(s->addr, s->port); if (fd < 0) { return fd; } memset(buf, 0, sizeof(buf)); strncpy(buf, filename, SD_MAX_VDI_LEN); strncpy(buf + SD_MAX_VDI_LEN, tag, SD_MAX_VDI_TAG_LEN); memset(&hdr, 0, sizeof(hdr)); if (for_snapshot) { hdr.opcode = SD_OP_GET_VDI_INFO; } else { hdr.opcode = SD_OP_LOCK_VDI; } wlen = SD_MAX_VDI_LEN + SD_MAX_VDI_TAG_LEN; hdr.proto_ver = SD_PROTO_VER; hdr.data_length = wlen; hdr.snapid = snapid; hdr.flags = SD_FLAG_CMD_WRITE; ret = do_req(fd, (SheepdogReq *)&hdr, buf, &wlen, &rlen); if (ret) { goto out; } if (rsp->result != SD_RES_SUCCESS) { error_report("cannot get vdi info, %s, %s %d %s", sd_strerror(rsp->result), filename, snapid, tag); if (rsp->result == SD_RES_NO_VDI) { ret = -ENOENT; } else { ret = -EIO; } goto out; } *vid = rsp->vdi_id; ret = 0; out: closesocket(fd); return ret; }
1
335
static void setup_frame(int sig, struct target_sigaction *ka, target_sigset_t *set, CPUX86State *env) { abi_ulong frame_addr; struct sigframe *frame; int i, err = 0; frame_addr = get_sigframe(ka, env, sizeof(*frame)); if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) goto give_sigsegv; __put_user(current_exec_domain_sig(sig), &frame->sig); if (err) goto give_sigsegv; setup_sigcontext(&frame->sc, &frame->fpstate, env, set->sig[0], frame_addr + offsetof(struct sigframe, fpstate)); if (err) goto give_sigsegv; for(i = 1; i < TARGET_NSIG_WORDS; i++) { if (__put_user(set->sig[i], &frame->extramask[i - 1])) goto give_sigsegv; } /* Set up to return from userspace. If provided, use a stub already in userspace. */ if (ka->sa_flags & TARGET_SA_RESTORER) { __put_user(ka->sa_restorer, &frame->pretcode); } else { uint16_t val16; abi_ulong retcode_addr; retcode_addr = frame_addr + offsetof(struct sigframe, retcode); __put_user(retcode_addr, &frame->pretcode); /* This is popl %eax ; movl $,%eax ; int $0x80 */ val16 = 0xb858; __put_user(val16, (uint16_t *)(frame->retcode+0)); __put_user(TARGET_NR_sigreturn, (int *)(frame->retcode+2)); val16 = 0x80cd; __put_user(val16, (uint16_t *)(frame->retcode+6)); } if (err) goto give_sigsegv; /* Set up registers for signal handler */ env->regs[R_ESP] = frame_addr; env->eip = ka->_sa_handler; cpu_x86_load_seg(env, R_DS, __USER_DS); cpu_x86_load_seg(env, R_ES, __USER_DS); cpu_x86_load_seg(env, R_SS, __USER_DS); cpu_x86_load_seg(env, R_CS, __USER_CS); env->eflags &= ~TF_MASK; unlock_user_struct(frame, frame_addr, 1); return; give_sigsegv: unlock_user_struct(frame, frame_addr, 1); if (sig == TARGET_SIGSEGV) ka->_sa_handler = TARGET_SIG_DFL; force_sig(TARGET_SIGSEGV /* , current */); }
1
336
static void ahci_pci_enable(AHCIQState *ahci) { uint8_t reg; start_ahci_device(ahci); switch (ahci->fingerprint) { case AHCI_INTEL_ICH9: /* ICH9 has a register at PCI 0x92 that * acts as a master port enabler mask. */ reg = qpci_config_readb(ahci->dev, 0x92); reg |= 0x3F; qpci_config_writeb(ahci->dev, 0x92, reg); /* 0...0111111b -- bit significant, ports 0-5 enabled. */ ASSERT_BIT_SET(qpci_config_readb(ahci->dev, 0x92), 0x3F); break; } }
1
337
main( int argc, char *argv[] ) { GMainLoop *loop; GIOChannel *channel_stdin; char *qemu_host; char *qemu_port; VSCMsgHeader mhHeader; VCardEmulOptions *command_line_options = NULL; char *cert_names[MAX_CERTS]; char *emul_args = NULL; int cert_count = 0; int c, sock; if (socket_init() != 0) return 1; while ((c = getopt(argc, argv, "c:e:pd:")) != -1) { switch (c) { case 'c': if (cert_count >= MAX_CERTS) { printf("too many certificates (max = %d)\n", MAX_CERTS); exit(5); } cert_names[cert_count++] = optarg; break; case 'e': emul_args = optarg; break; case 'p': print_usage(); exit(4); break; case 'd': verbose = get_id_from_string(optarg, 1); break; } } if (argc - optind != 2) { print_usage(); exit(4); } if (cert_count > 0) { char *new_args; int len, i; /* if we've given some -c options, we clearly we want do so some * software emulation. add that emulation now. this is NSS Emulator * specific */ if (emul_args == NULL) { emul_args = (char *)"db=\"/etc/pki/nssdb\""; } #define SOFT_STRING ",soft=(,Virtual Reader,CAC,," /* 2 == close paren & null */ len = strlen(emul_args) + strlen(SOFT_STRING) + 2; for (i = 0; i < cert_count; i++) { len += strlen(cert_names[i])+1; /* 1 == comma */ } new_args = g_malloc(len); strcpy(new_args, emul_args); strcat(new_args, SOFT_STRING); for (i = 0; i < cert_count; i++) { strcat(new_args, cert_names[i]); strcat(new_args, ","); } strcat(new_args, ")"); emul_args = new_args; } if (emul_args) { command_line_options = vcard_emul_options(emul_args); } qemu_host = g_strdup(argv[argc - 2]); qemu_port = g_strdup(argv[argc - 1]); sock = connect_to_qemu(qemu_host, qemu_port); if (sock == -1) { fprintf(stderr, "error opening socket, exiting.\n"); exit(5); } socket_to_send = g_byte_array_new(); qemu_mutex_init(&socket_to_send_lock); qemu_mutex_init(&pending_reader_lock); qemu_cond_init(&pending_reader_condition); vcard_emul_init(command_line_options); loop = g_main_loop_new(NULL, true); printf("> "); fflush(stdout); #ifdef _WIN32 channel_stdin = g_io_channel_win32_new_fd(STDIN_FILENO); #else channel_stdin = g_io_channel_unix_new(STDIN_FILENO); #endif g_io_add_watch(channel_stdin, G_IO_IN, do_command, NULL); #ifdef _WIN32 channel_socket = g_io_channel_win32_new_socket(sock); #else channel_socket = g_io_channel_unix_new(sock); #endif g_io_channel_set_encoding(channel_socket, NULL, NULL); /* we buffer ourself for thread safety reasons */ g_io_channel_set_buffered(channel_socket, FALSE); /* Send init message, Host responds (and then we send reader attachments) */ VSCMsgInit init = { .version = htonl(VSCARD_VERSION), .magic = VSCARD_MAGIC, .capabilities = {0} }; send_msg(VSC_Init, mhHeader.reader_id, &init, sizeof(init)); g_main_loop_run(loop); g_main_loop_unref(loop); g_io_channel_unref(channel_stdin); g_io_channel_unref(channel_socket); g_byte_array_unref(socket_to_send); closesocket(sock); return 0; }
1
339
static VFIOINTp *vfio_init_intp(VFIODevice *vbasedev, struct vfio_irq_info info) { int ret; VFIOPlatformDevice *vdev = container_of(vbasedev, VFIOPlatformDevice, vbasedev); SysBusDevice *sbdev = SYS_BUS_DEVICE(vdev); VFIOINTp *intp; intp = g_malloc0(sizeof(*intp)); intp->vdev = vdev; intp->pin = info.index; intp->flags = info.flags; intp->state = VFIO_IRQ_INACTIVE; intp->kvm_accel = false; sysbus_init_irq(sbdev, &intp->qemuirq); /* Get an eventfd for trigger */ intp->interrupt = g_malloc0(sizeof(EventNotifier)); ret = event_notifier_init(intp->interrupt, 0); if (ret) { g_free(intp->interrupt); g_free(intp); error_report("vfio: Error: trigger event_notifier_init failed "); return NULL; } /* Get an eventfd for resample/unmask */ intp->unmask = g_malloc0(sizeof(EventNotifier)); ret = event_notifier_init(intp->unmask, 0); if (ret) { g_free(intp->interrupt); g_free(intp->unmask); g_free(intp); error_report("vfio: Error: resamplefd event_notifier_init failed"); return NULL; } QLIST_INSERT_HEAD(&vdev->intp_list, intp, next); return intp; }
1
340
void OPPROTO op_udivx_T1_T0(void) { T0 /= T1; FORCE_RET();
1
341
void address_space_init(AddressSpace *as, MemoryRegion *root) { memory_region_transaction_begin(); as->root = root; as->current_map = g_new(FlatView, 1); flatview_init(as->current_map); QTAILQ_INSERT_TAIL(&address_spaces, as, address_spaces_link); as->name = NULL; memory_region_transaction_commit(); address_space_init_dispatch(as); }
1
343
ebml_read_ascii (MatroskaDemuxContext *matroska, uint32_t *id, char **str) { ByteIOContext *pb = matroska->ctx->pb; int size, res; uint64_t rlength; if ((res = ebml_read_element_id(matroska, id, NULL)) < 0 || (res = ebml_read_element_length(matroska, &rlength)) < 0) return res; size = rlength; /* ebml strings are usually not 0-terminated, so we allocate one * byte more, read the string and NULL-terminate it ourselves. */ if (size < 0 || !(*str = av_malloc(size + 1))) { av_log(matroska->ctx, AV_LOG_ERROR, "Memory allocation failed\n"); return AVERROR(ENOMEM); } if (get_buffer(pb, (uint8_t *) *str, size) != size) { offset_t pos = url_ftell(pb); av_log(matroska->ctx, AV_LOG_ERROR, "Read error at pos. %"PRIu64" (0x%"PRIx64")\n", pos, pos); return AVERROR(EIO); } (*str)[size] = '\0'; return 0; }
1
344
static void memory_dump(Monitor *mon, int count, int format, int wsize, target_phys_addr_t addr, int is_physical) { CPUState *env; int l, line_size, i, max_digits, len; uint8_t buf[16]; uint64_t v; if (format == 'i') { int flags; flags = 0; env = mon_get_cpu(); if (!env && !is_physical) return; #ifdef TARGET_I386 if (wsize == 2) { flags = 1; } else if (wsize == 4) { flags = 0; } else { /* as default we use the current CS size */ flags = 0; if (env) { #ifdef TARGET_X86_64 if ((env->efer & MSR_EFER_LMA) && (env->segs[R_CS].flags & DESC_L_MASK)) flags = 2; else #endif if (!(env->segs[R_CS].flags & DESC_B_MASK)) flags = 1; } } #endif monitor_disas(mon, env, addr, count, is_physical, flags); return; } len = wsize * count; if (wsize == 1) line_size = 8; else line_size = 16; max_digits = 0; switch(format) { case 'o': max_digits = (wsize * 8 + 2) / 3; break; default: case 'x': max_digits = (wsize * 8) / 4; break; case 'u': case 'd': max_digits = (wsize * 8 * 10 + 32) / 33; break; case 'c': wsize = 1; break; } while (len > 0) { if (is_physical) monitor_printf(mon, TARGET_FMT_plx ":", addr); else monitor_printf(mon, TARGET_FMT_lx ":", (target_ulong)addr); l = len; if (l > line_size) l = line_size; if (is_physical) { cpu_physical_memory_rw(addr, buf, l, 0); } else { env = mon_get_cpu(); if (!env) break; if (cpu_memory_rw_debug(env, addr, buf, l, 0) < 0) { monitor_printf(mon, " Cannot access memory\n"); break; } } i = 0; while (i < l) { switch(wsize) { default: case 1: v = ldub_raw(buf + i); break; case 2: v = lduw_raw(buf + i); break; case 4: v = (uint32_t)ldl_raw(buf + i); break; case 8: v = ldq_raw(buf + i); break; } monitor_printf(mon, " "); switch(format) { case 'o': monitor_printf(mon, "%#*" PRIo64, max_digits, v); break; case 'x': monitor_printf(mon, "0x%0*" PRIx64, max_digits, v); break; case 'u': monitor_printf(mon, "%*" PRIu64, max_digits, v); break; case 'd': monitor_printf(mon, "%*" PRId64, max_digits, v); break; case 'c': monitor_printc(mon, v); break; } i += wsize; } monitor_printf(mon, "\n"); addr += l; len -= l; } }
0
347
RGB_FUNCTIONS(rgba32) #undef RGB_IN #undef RGB_OUT #undef BPP static void rgb24_to_rgb565(AVPicture *dst, AVPicture *src, int width, int height) { const unsigned char *p; unsigned char *q; int r, g, b, dst_wrap, src_wrap; int x, y; p = src->data[0]; src_wrap = src->linesize[0] - 3 * width; q = dst->data[0]; dst_wrap = dst->linesize[0] - 2 * width; for(y=0;y<height;y++) { for(x=0;x<width;x++) { r = p[0]; g = p[1]; b = p[2]; ((unsigned short *)q)[0] = ((r >> 3) << 11) | ((g >> 2) << 5) | (b >> 3); q += 2; p += 3; } p += src_wrap; q += dst_wrap; } }
0
348
static void avc_luma_mid_and_aver_dst_16x16_msa(const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride) { avc_luma_mid_and_aver_dst_8w_msa(src, src_stride, dst, dst_stride, 16); avc_luma_mid_and_aver_dst_8w_msa(src + 8, src_stride, dst + 8, dst_stride, 16); }
0
349
void MPV_common_init_armv4l(MpegEncContext *s) { int i; const int idct_algo= s->avctx->idct_algo; ff_put_pixels_clamped = s->avctx->dsp.put_pixels_clamped; ff_add_pixels_clamped = s->avctx->dsp.put_pixels_clamped; if(idct_algo==FF_IDCT_AUTO || idct_algo==FF_IDCT_ARM){ s->idct_put= arm_idct_put; s->idct_add= arm_idct_add; s->idct_permutation_type= FF_NO_IDCT_PERM; } }
0
350
static int sbr_hf_calc_npatches(AACContext *ac, SpectralBandReplication *sbr) { int i, k, sb = 0; int msb = sbr->k[0]; int usb = sbr->kx[1]; int goal_sb = ((1000 << 11) + (sbr->sample_rate >> 1)) / sbr->sample_rate; sbr->num_patches = 0; if (goal_sb < sbr->kx[1] + sbr->m[1]) { for (k = 0; sbr->f_master[k] < goal_sb; k++) ; } else k = sbr->n_master; do { int odd = 0; for (i = k; i == k || sb > (sbr->k[0] - 1 + msb - odd); i--) { sb = sbr->f_master[i]; odd = (sb + sbr->k[0]) & 1; } // Requirements (14496-3 sp04 p205) sets the maximum number of patches to 5. // After this check the final number of patches can still be six which is // illegal however the Coding Technologies decoder check stream has a final // count of 6 patches if (sbr->num_patches > 5) { av_log(ac->avctx, AV_LOG_ERROR, "Too many patches: %d\n", sbr->num_patches); return -1; } sbr->patch_num_subbands[sbr->num_patches] = FFMAX(sb - usb, 0); sbr->patch_start_subband[sbr->num_patches] = sbr->k[0] - odd - sbr->patch_num_subbands[sbr->num_patches]; if (sbr->patch_num_subbands[sbr->num_patches] > 0) { usb = sb; msb = sb; sbr->num_patches++; } else msb = sbr->kx[1]; if (sbr->f_master[k] - sb < 3) k = sbr->n_master; } while (sb != sbr->kx[1] + sbr->m[1]); if (sbr->patch_num_subbands[sbr->num_patches-1] < 3 && sbr->num_patches > 1) sbr->num_patches--; return 0; }
0
351
static void qdm2_fft_decode_tones (QDM2Context *q, int duration, GetBitContext *gb, int b) { int channel, stereo, phase, exp; int local_int_4, local_int_8, stereo_phase, local_int_10; int local_int_14, stereo_exp, local_int_20, local_int_28; int n, offset; local_int_4 = 0; local_int_28 = 0; local_int_20 = 2; local_int_8 = (4 - duration); local_int_10 = 1 << (q->group_order - duration - 1); offset = 1; while (1) { if (q->superblocktype_2_3) { while ((n = qdm2_get_vlc(gb, &vlc_tab_fft_tone_offset[local_int_8], 1, 2)) < 2) { offset = 1; if (n == 0) { local_int_4 += local_int_10; local_int_28 += (1 << local_int_8); } else { local_int_4 += 8*local_int_10; local_int_28 += (8 << local_int_8); } } offset += (n - 2); } else { offset += qdm2_get_vlc(gb, &vlc_tab_fft_tone_offset[local_int_8], 1, 2); while (offset >= (local_int_10 - 1)) { offset += (1 - (local_int_10 - 1)); local_int_4 += local_int_10; local_int_28 += (1 << local_int_8); } } if (local_int_4 >= q->group_size) return; local_int_14 = (offset >> local_int_8); if (local_int_14 >= FF_ARRAY_ELEMS(fft_level_index_table)) return; if (q->nb_channels > 1) { channel = get_bits1(gb); stereo = get_bits1(gb); } else { channel = 0; stereo = 0; } exp = qdm2_get_vlc(gb, (b ? &fft_level_exp_vlc : &fft_level_exp_alt_vlc), 0, 2); exp += q->fft_level_exp[fft_level_index_table[local_int_14]]; exp = (exp < 0) ? 0 : exp; phase = get_bits(gb, 3); stereo_exp = 0; stereo_phase = 0; if (stereo) { stereo_exp = (exp - qdm2_get_vlc(gb, &fft_stereo_exp_vlc, 0, 1)); stereo_phase = (phase - qdm2_get_vlc(gb, &fft_stereo_phase_vlc, 0, 1)); if (stereo_phase < 0) stereo_phase += 8; } if (q->frequency_range > (local_int_14 + 1)) { int sub_packet = (local_int_20 + local_int_28); qdm2_fft_init_coefficient(q, sub_packet, offset, duration, channel, exp, phase); if (stereo) qdm2_fft_init_coefficient(q, sub_packet, offset, duration, (1 - channel), stereo_exp, stereo_phase); } offset++; } }
1
352
void ff_rtp_send_h263(AVFormatContext *s1, const uint8_t *buf1, int size) { RTPMuxContext *s = s1->priv_data; int len, max_packet_size; uint8_t *q; max_packet_size = s->max_payload_size; while (size > 0) { q = s->buf; if ((buf1[0] == 0) && (buf1[1] == 0)) { *q++ = 0x04; buf1 += 2; size -= 2; } else { *q++ = 0; } *q++ = 0; len = FFMIN(max_packet_size - 2, size); /* Look for a better place to split the frame into packets. */ if (len < size) { const uint8_t *end = find_resync_marker_reverse(buf1, buf1 + len); len = end - buf1; } memcpy(q, buf1, len); q += len; /* 90 KHz time stamp */ s->timestamp = s->cur_timestamp; ff_rtp_send_data(s1, s->buf, q - s->buf, (len == size)); buf1 += len; size -= len; } }
1
353
void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args) { TCGContext *s = tcg_ctx; int i, real_args, nb_rets, pi; unsigned sizemask, flags; TCGHelperInfo *info; TCGOp *op; info = g_hash_table_lookup(helper_table, (gpointer)func); flags = info->flags; sizemask = info->sizemask; #if defined(__sparc__) && !defined(__arch64__) \ && !defined(CONFIG_TCG_INTERPRETER) /* We have 64-bit values in one register, but need to pass as two separate parameters. Split them. */ int orig_sizemask = sizemask; int orig_nargs = nargs; TCGv_i64 retl, reth; TCGTemp *split_args[MAX_OPC_PARAM]; retl = NULL; reth = NULL; if (sizemask != 0) { for (i = real_args = 0; i < nargs; ++i) { int is_64bit = sizemask & (1 << (i+1)*2); if (is_64bit) { TCGv_i64 orig = temp_tcgv_i64(args[i]); TCGv_i32 h = tcg_temp_new_i32(); TCGv_i32 l = tcg_temp_new_i32(); tcg_gen_extr_i64_i32(l, h, orig); split_args[real_args++] = tcgv_i32_temp(h); split_args[real_args++] = tcgv_i32_temp(l); } else { split_args[real_args++] = args[i]; } } nargs = real_args; args = split_args; sizemask = 0; } #elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64 for (i = 0; i < nargs; ++i) { int is_64bit = sizemask & (1 << (i+1)*2); int is_signed = sizemask & (2 << (i+1)*2); if (!is_64bit) { TCGv_i64 temp = tcg_temp_new_i64(); TCGv_i64 orig = temp_tcgv_i64(args[i]); if (is_signed) { tcg_gen_ext32s_i64(temp, orig); } else { tcg_gen_ext32u_i64(temp, orig); } args[i] = tcgv_i64_temp(temp); } } #endif /* TCG_TARGET_EXTEND_ARGS */ i = s->gen_next_op_idx; tcg_debug_assert(i < OPC_BUF_SIZE); s->gen_op_buf[0].prev = i; s->gen_next_op_idx = i + 1; op = &s->gen_op_buf[i]; /* Set links for sequential allocation during translation. */ memset(op, 0, offsetof(TCGOp, args)); op->opc = INDEX_op_call; op->prev = i - 1; op->next = i + 1; pi = 0; if (ret != NULL) { #if defined(__sparc__) && !defined(__arch64__) \ && !defined(CONFIG_TCG_INTERPRETER) if (orig_sizemask & 1) { /* The 32-bit ABI is going to return the 64-bit value in the %o0/%o1 register pair. Prepare for this by using two return temporaries, and reassemble below. */ retl = tcg_temp_new_i64(); reth = tcg_temp_new_i64(); op->args[pi++] = tcgv_i64_arg(reth); op->args[pi++] = tcgv_i64_arg(retl); nb_rets = 2; } else { op->args[pi++] = temp_arg(ret); nb_rets = 1; } #else if (TCG_TARGET_REG_BITS < 64 && (sizemask & 1)) { #ifdef HOST_WORDS_BIGENDIAN op->args[pi++] = temp_arg(ret + 1); op->args[pi++] = temp_arg(ret); #else op->args[pi++] = temp_arg(ret); op->args[pi++] = temp_arg(ret + 1); #endif nb_rets = 2; } else { op->args[pi++] = temp_arg(ret); nb_rets = 1; } #endif } else { nb_rets = 0; } op->callo = nb_rets; real_args = 0; for (i = 0; i < nargs; i++) { int is_64bit = sizemask & (1 << (i+1)*2); if (TCG_TARGET_REG_BITS < 64 && is_64bit) { #ifdef TCG_TARGET_CALL_ALIGN_ARGS /* some targets want aligned 64 bit args */ if (real_args & 1) { op->args[pi++] = TCG_CALL_DUMMY_ARG; real_args++; } #endif /* If stack grows up, then we will be placing successive arguments at lower addresses, which means we need to reverse the order compared to how we would normally treat either big or little-endian. For those arguments that will wind up in registers, this still works for HPPA (the only current STACK_GROWSUP target) since the argument registers are *also* allocated in decreasing order. If another such target is added, this logic may have to get more complicated to differentiate between stack arguments and register arguments. */ #if defined(HOST_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP) op->args[pi++] = temp_arg(args[i] + 1); op->args[pi++] = temp_arg(args[i]); #else op->args[pi++] = temp_arg(args[i]); op->args[pi++] = temp_arg(args[i] + 1); #endif real_args += 2; continue; } op->args[pi++] = temp_arg(args[i]); real_args++; } op->args[pi++] = (uintptr_t)func; op->args[pi++] = flags; op->calli = real_args; /* Make sure the fields didn't overflow. */ tcg_debug_assert(op->calli == real_args); tcg_debug_assert(pi <= ARRAY_SIZE(op->args)); #if defined(__sparc__) && !defined(__arch64__) \ && !defined(CONFIG_TCG_INTERPRETER) /* Free all of the parts we allocated above. */ for (i = real_args = 0; i < orig_nargs; ++i) { int is_64bit = orig_sizemask & (1 << (i+1)*2); if (is_64bit) { tcg_temp_free_internal(args[real_args++]); tcg_temp_free_internal(args[real_args++]); } else { real_args++; } } if (orig_sizemask & 1) { /* The 32-bit ABI returned two 32-bit pieces. Re-assemble them. Note that describing these as TCGv_i64 eliminates an unnecessary zero-extension that tcg_gen_concat_i32_i64 would create. */ tcg_gen_concat32_i64(temp_tcgv_i64(ret), retl, reth); tcg_temp_free_i64(retl); tcg_temp_free_i64(reth); } #elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64 for (i = 0; i < nargs; ++i) { int is_64bit = sizemask & (1 << (i+1)*2); if (!is_64bit) { tcg_temp_free_internal(args[i]); } } #endif /* TCG_TARGET_EXTEND_ARGS */ }
1
356
void block_job_enter(BlockJob *job) { if (!block_job_started(job)) { return; } if (job->deferred_to_main_loop) { return; } if (!job->busy) { bdrv_coroutine_enter(blk_bs(job->blk), job->co); } }
0
357
static int vorbis_decode_frame(AVCodecContext *avccontext, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; vorbis_context *vc = avccontext->priv_data ; GetBitContext *gb = &(vc->gb); const float *channel_ptrs[255]; int i, len; if (!buf_size) return 0; av_dlog(NULL, "packet length %d \n", buf_size); init_get_bits(gb, buf, buf_size*8); len = vorbis_parse_audio_packet(vc); if (len <= 0) { *data_size = 0; return buf_size; } if (!vc->first_frame) { vc->first_frame = 1; *data_size = 0; return buf_size ; } av_dlog(NULL, "parsed %d bytes %d bits, returned %d samples (*ch*bits) \n", get_bits_count(gb) / 8, get_bits_count(gb) % 8, len); if (vc->audio_channels > 8) { for (i = 0; i < vc->audio_channels; i++) channel_ptrs[i] = vc->channel_floors + i * len; } else { for (i = 0; i < vc->audio_channels; i++) channel_ptrs[i] = vc->channel_floors + len * ff_vorbis_channel_layout_offsets[vc->audio_channels - 1][i]; } if (avccontext->sample_fmt == AV_SAMPLE_FMT_FLT) vc->fmt_conv.float_interleave(data, channel_ptrs, len, vc->audio_channels); else vc->fmt_conv.float_to_int16_interleave(data, channel_ptrs, len, vc->audio_channels); *data_size = len * vc->audio_channels * av_get_bytes_per_sample(avccontext->sample_fmt); return buf_size ; }
1
358
void qmp_guest_shutdown(bool has_mode, const char *mode, Error **err) { UINT shutdown_flag = EWX_FORCE; slog("guest-shutdown called, mode: %s", mode); if (!has_mode || strcmp(mode, "powerdown") == 0) { shutdown_flag |= EWX_POWEROFF; } else if (strcmp(mode, "halt") == 0) { shutdown_flag |= EWX_SHUTDOWN; } else if (strcmp(mode, "reboot") == 0) { shutdown_flag |= EWX_REBOOT; } else { error_set(err, QERR_INVALID_PARAMETER_VALUE, "mode", "halt|powerdown|reboot"); return; } /* Request a shutdown privilege, but try to shut down the system anyway. */ acquire_privilege(SE_SHUTDOWN_NAME, err); if (error_is_set(err)) { return; } if (!ExitWindowsEx(shutdown_flag, SHTDN_REASON_FLAG_PLANNED)) { slog("guest-shutdown failed: %d", GetLastError()); error_set(err, QERR_UNDEFINED_ERROR); } }
1
359
void helper_rdmsr(void) { uint64_t val; helper_svm_check_intercept_param(SVM_EXIT_MSR, 0); switch((uint32_t)ECX) { case MSR_IA32_SYSENTER_CS: val = env->sysenter_cs; case MSR_IA32_SYSENTER_ESP: val = env->sysenter_esp; case MSR_IA32_SYSENTER_EIP: val = env->sysenter_eip; case MSR_IA32_APICBASE: val = cpu_get_apic_base(env); case MSR_EFER: val = env->efer; case MSR_STAR: val = env->star; case MSR_PAT: val = env->pat; case MSR_VM_HSAVE_PA: val = env->vm_hsave; case MSR_IA32_PERF_STATUS: /* tsc_increment_by_tick */ val = 1000ULL; /* CPU multiplier */ val |= (((uint64_t)4ULL) << 40); #ifdef TARGET_X86_64 case MSR_LSTAR: val = env->lstar; case MSR_CSTAR: val = env->cstar; case MSR_FMASK: val = env->fmask; case MSR_FSBASE: val = env->segs[R_FS].base; case MSR_GSBASE: val = env->segs[R_GS].base; case MSR_KERNELGSBASE: val = env->kernelgsbase; #endif #ifdef USE_KQEMU case MSR_QPI_COMMBASE: if (env->kqemu_enabled) { val = kqemu_comm_base; } else { val = 0; } #endif default: /* XXX: exception ? */ val = 0; } EAX = (uint32_t)(val); EDX = (uint32_t)(val >> 32); }
1
360
BlockJobInfoList *qmp_query_block_jobs(Error **errp) { BlockJobInfoList *head = NULL, **p_next = &head; BlockDriverState *bs; BdrvNextIterator *it = NULL; while ((it = bdrv_next(it, &bs))) { AioContext *aio_context = bdrv_get_aio_context(bs); aio_context_acquire(aio_context); if (bs->job) { BlockJobInfoList *elem = g_new0(BlockJobInfoList, 1); elem->value = block_job_query(bs->job); *p_next = elem; p_next = &elem->next; } aio_context_release(aio_context); } return head; }
1
361
DeviceState *ssi_create_slave(SSIBus *bus, const char *name) { DeviceState *dev; dev = qdev_create(&bus->qbus, name); qdev_init(dev); return dev; }
1
362
int ff_mjpeg_decode_sos(MJpegDecodeContext *s, const uint8_t *mb_bitmask, const AVFrame *reference) { int len, nb_components, i, h, v, predictor, point_transform; int index, id, ret; const int block_size = s->lossless ? 1 : 8; int ilv, prev_shift; if (!s->got_picture) { av_log(s->avctx, AV_LOG_WARNING, "Can not process SOS before SOF, skipping\n"); return -1; } av_assert0(s->picture_ptr->data[0]); /* XXX: verify len field validity */ len = get_bits(&s->gb, 16); nb_components = get_bits(&s->gb, 8); if (nb_components == 0 || nb_components > MAX_COMPONENTS) { av_log(s->avctx, AV_LOG_ERROR, "decode_sos: nb_components (%d) unsupported\n", nb_components); return AVERROR_PATCHWELCOME; } if (len != 6 + 2 * nb_components) { av_log(s->avctx, AV_LOG_ERROR, "decode_sos: invalid len (%d)\n", len); return AVERROR_INVALIDDATA; } for (i = 0; i < nb_components; i++) { id = get_bits(&s->gb, 8) - 1; av_log(s->avctx, AV_LOG_DEBUG, "component: %d\n", id); /* find component index */ for (index = 0; index < s->nb_components; index++) if (id == s->component_id[index]) break; if (index == s->nb_components) { av_log(s->avctx, AV_LOG_ERROR, "decode_sos: index(%d) out of components\n", index); return AVERROR_INVALIDDATA; } /* Metasoft MJPEG codec has Cb and Cr swapped */ if (s->avctx->codec_tag == MKTAG('M', 'T', 'S', 'J') && nb_components == 3 && s->nb_components == 3 && i) index = 3 - i; s->quant_sindex[i] = s->quant_index[index]; s->nb_blocks[i] = s->h_count[index] * s->v_count[index]; s->h_scount[i] = s->h_count[index]; s->v_scount[i] = s->v_count[index]; if(nb_components == 3 && s->nb_components == 3 && s->avctx->pix_fmt == AV_PIX_FMT_GBR24P) index = (i+2)%3; if(nb_components == 1 && s->nb_components == 3 && s->avctx->pix_fmt == AV_PIX_FMT_GBR24P) index = (index+2)%3; s->comp_index[i] = index; s->dc_index[i] = get_bits(&s->gb, 4); s->ac_index[i] = get_bits(&s->gb, 4); if (s->dc_index[i] < 0 || s->ac_index[i] < 0 || s->dc_index[i] >= 4 || s->ac_index[i] >= 4) goto out_of_range; if (!s->vlcs[0][s->dc_index[i]].table || !(s->progressive ? s->vlcs[2][s->ac_index[0]].table : s->vlcs[1][s->ac_index[i]].table)) goto out_of_range; } predictor = get_bits(&s->gb, 8); /* JPEG Ss / lossless JPEG predictor /JPEG-LS NEAR */ ilv = get_bits(&s->gb, 8); /* JPEG Se / JPEG-LS ILV */ if(s->avctx->codec_tag != AV_RL32("CJPG")){ prev_shift = get_bits(&s->gb, 4); /* Ah */ point_transform = get_bits(&s->gb, 4); /* Al */ }else prev_shift = point_transform = 0; if (nb_components > 1) { /* interleaved stream */ s->mb_width = (s->width + s->h_max * block_size - 1) / (s->h_max * block_size); s->mb_height = (s->height + s->v_max * block_size - 1) / (s->v_max * block_size); } else if (!s->ls) { /* skip this for JPEG-LS */ h = s->h_max / s->h_scount[0]; v = s->v_max / s->v_scount[0]; s->mb_width = (s->width + h * block_size - 1) / (h * block_size); s->mb_height = (s->height + v * block_size - 1) / (v * block_size); s->nb_blocks[0] = 1; s->h_scount[0] = 1; s->v_scount[0] = 1; } if (s->avctx->debug & FF_DEBUG_PICT_INFO) av_log(s->avctx, AV_LOG_DEBUG, "%s %s p:%d >>:%d ilv:%d bits:%d skip:%d %s comp:%d\n", s->lossless ? "lossless" : "sequential DCT", s->rgb ? "RGB" : "", predictor, point_transform, ilv, s->bits, s->mjpb_skiptosod, s->pegasus_rct ? "PRCT" : (s->rct ? "RCT" : ""), nb_components); /* mjpeg-b can have padding bytes between sos and image data, skip them */ for (i = s->mjpb_skiptosod; i > 0; i--) skip_bits(&s->gb, 8); next_field: for (i = 0; i < nb_components; i++) s->last_dc[i] = (4 << s->bits); if (s->lossless) { av_assert0(s->picture_ptr == s->picture); if (CONFIG_JPEGLS_DECODER && s->ls) { // for () { // reset_ls_coding_parameters(s, 0); if ((ret = ff_jpegls_decode_picture(s, predictor, point_transform, ilv)) < 0) return ret; } else { if (s->rgb) { if ((ret = ljpeg_decode_rgb_scan(s, nb_components, predictor, point_transform)) < 0) return ret; } else { if ((ret = ljpeg_decode_yuv_scan(s, predictor, point_transform, nb_components)) < 0) return ret; } } } else { if (s->progressive && predictor) { av_assert0(s->picture_ptr == s->picture); if ((ret = mjpeg_decode_scan_progressive_ac(s, predictor, ilv, prev_shift, point_transform)) < 0) return ret; } else { if ((ret = mjpeg_decode_scan(s, nb_components, prev_shift, point_transform, mb_bitmask, reference)) < 0) return ret; } } if (s->interlaced && get_bits_left(&s->gb) > 32 && show_bits(&s->gb, 8) == 0xFF) { GetBitContext bak = s->gb; align_get_bits(&bak); if (show_bits(&bak, 16) == 0xFFD1) { av_log(s->avctx, AV_LOG_DEBUG, "AVRn interlaced picture marker found\n"); s->gb = bak; skip_bits(&s->gb, 16); s->bottom_field ^= 1; goto next_field; } } emms_c(); return 0; out_of_range: av_log(s->avctx, AV_LOG_ERROR, "decode_sos: ac/dc index out of range\n"); return AVERROR_INVALIDDATA; }
1
363
static void v9fs_renameat(void *opaque) { ssize_t err = 0; size_t offset = 7; V9fsPDU *pdu = opaque; V9fsState *s = pdu->s; int32_t olddirfid, newdirfid; V9fsString old_name, new_name; v9fs_string_init(&old_name); v9fs_string_init(&new_name); err = pdu_unmarshal(pdu, offset, "dsds", &olddirfid, &old_name, &newdirfid, &new_name); if (err < 0) { if (name_is_illegal(old_name.data) || name_is_illegal(new_name.data)) { err = -ENOENT; v9fs_path_write_lock(s); err = v9fs_complete_renameat(pdu, olddirfid, &old_name, newdirfid, &new_name); v9fs_path_unlock(s); if (!err) { err = offset; out_err: pdu_complete(pdu, err); v9fs_string_free(&old_name); v9fs_string_free(&new_name);
1
364
if_start(void) { struct mbuf *ifm, *ifqt; DEBUG_CALL("if_start"); if (if_queued == 0) return; /* Nothing to do */ again: /* check if we can really output */ if (!slirp_can_output()) return; /* * See which queue to get next packet from * If there's something in the fastq, select it immediately */ if (if_fastq.ifq_next != &if_fastq) { ifm = if_fastq.ifq_next; } else { /* Nothing on fastq, see if next_m is valid */ if (next_m != &if_batchq) ifm = next_m; else ifm = if_batchq.ifq_next; /* Set which packet to send on next iteration */ next_m = ifm->ifq_next; } /* Remove it from the queue */ ifqt = ifm->ifq_prev; remque(ifm); --if_queued; /* If there are more packets for this session, re-queue them */ if (ifm->ifs_next != /* ifm->ifs_prev != */ ifm) { insque(ifm->ifs_next, ifqt); ifs_remque(ifm); } /* Update so_queued */ if (ifm->ifq_so) { if (--ifm->ifq_so->so_queued == 0) /* If there's no more queued, reset nqueued */ ifm->ifq_so->so_nqueued = 0; } /* Encapsulate the packet for sending */ if_encap(ifm->m_data, ifm->m_len); if (if_queued) goto again; }
1
365
static void mips_cpu_realizefn(DeviceState *dev, Error **errp) { MIPSCPU *cpu = MIPS_CPU(dev); MIPSCPUClass *mcc = MIPS_CPU_GET_CLASS(dev); cpu_reset(CPU(cpu)); mcc->parent_realize(dev, errp); }
0
366
int ff_wms_parse_sdp_a_line(AVFormatContext *s, const char *p) { int ret = 0; if (av_strstart(p, "pgmpu:data:application/vnd.ms.wms-hdr.asfv1;base64,", &p)) { AVIOContext pb; RTSPState *rt = s->priv_data; AVDictionary *opts = NULL; int len = strlen(p) * 6 / 8; char *buf = av_mallocz(len); av_base64_decode(buf, p, len); if (rtp_asf_fix_header(buf, len) < 0) av_log(s, AV_LOG_ERROR, "Failed to fix invalid RTSP-MS/ASF min_pktsize\n"); init_packetizer(&pb, buf, len); if (rt->asf_ctx) { avformat_close_input(&rt->asf_ctx); } if (!(rt->asf_ctx = avformat_alloc_context())) return AVERROR(ENOMEM); rt->asf_ctx->pb = &pb; av_dict_set(&opts, "no_resync_search", "1", 0); ret = avformat_open_input(&rt->asf_ctx, "", &ff_asf_demuxer, &opts); av_dict_free(&opts); if (ret < 0) return ret; av_dict_copy(&s->metadata, rt->asf_ctx->metadata, 0); rt->asf_pb_pos = avio_tell(&pb); av_free(buf); rt->asf_ctx->pb = NULL; } return ret; }
0
367
static void alloc_picture(VideoState *is) { VideoPicture *vp; int64_t bufferdiff; vp = &is->pictq[is->pictq_windex]; if (vp->bmp) SDL_FreeYUVOverlay(vp->bmp); video_open(is, 0, vp); vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height, SDL_YV12_OVERLAY, screen); bufferdiff = vp->bmp ? FFMAX(vp->bmp->pixels[0], vp->bmp->pixels[1]) - FFMIN(vp->bmp->pixels[0], vp->bmp->pixels[1]) : 0; if (!vp->bmp || vp->bmp->pitches[0] < vp->width || bufferdiff < vp->height * vp->bmp->pitches[0]) { /* SDL allocates a buffer smaller than requested if the video * overlay hardware is unable to support the requested size. */ av_log(NULL, AV_LOG_FATAL, "Error: the video system does not support an image\n" "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n" "to reduce the image size.\n", vp->width, vp->height ); do_exit(is); } SDL_LockMutex(is->pictq_mutex); vp->allocated = 1; SDL_CondSignal(is->pictq_cond); SDL_UnlockMutex(is->pictq_mutex); }
1
370
void virtqueue_discard(VirtQueue *vq, const VirtQueueElement *elem, unsigned int len) { vq->last_avail_idx--; virtqueue_unmap_sg(vq, elem, len); }
1
371
static void arm_gic_common_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->reset = arm_gic_common_reset; dc->realize = arm_gic_common_realize; dc->props = arm_gic_common_properties; dc->vmsd = &vmstate_gic; dc->no_user = 1; }
1
374
static CharDriverState *qemu_chr_open_tty(QemuOpts *opts) { const char *filename = qemu_opt_get(opts, "path"); CharDriverState *chr; int fd; TFR(fd = open(filename, O_RDWR | O_NONBLOCK)); if (fd < 0) { return NULL; } tty_serial_init(fd, 115200, 'N', 8, 1); chr = qemu_chr_open_fd(fd, fd); if (!chr) { close(fd); return NULL; } chr->chr_ioctl = tty_serial_ioctl; chr->chr_close = qemu_chr_close_tty; return chr; }
1
375
void object_property_set_qobject(Object *obj, QObject *value, const char *name, Error **errp) { Visitor *v; /* TODO: Should we reject, rather than ignore, excess input? */ v = qobject_input_visitor_new(value, false); object_property_set(obj, v, name, errp); visit_free(v); }