label
int64
0
1
func1
stringlengths
23
97k
id
int64
0
27.3k
1
static void select_vgahw (const char *p) { const char *opts; vga_interface_type = VGA_NONE; if (strstart(p, "std", &opts)) { if (vga_available()) { vga_interface_type = VGA_STD; fprintf(stderr, "Error: standard VGA not available\n"); exit(0); } else if (strstart(p, "cirrus", &opts)) { if (cirrus_vga_available()) { vga_interface_type = VGA_CIRRUS; fprintf(stderr, "Error: Cirrus VGA not available\n"); exit(0); } else if (strstart(p, "vmware", &opts)) { if (vmware_vga_available()) { vga_interface_type = VGA_VMWARE; fprintf(stderr, "Error: VMWare SVGA not available\n"); exit(0); } else if (strstart(p, "xenfb", &opts)) { vga_interface_type = VGA_XENFB; } else if (strstart(p, "qxl", &opts)) { vga_interface_type = VGA_QXL; } else if (!strstart(p, "none", &opts)) { invalid_vga: fprintf(stderr, "Unknown vga type: %s\n", p); exit(1); const char *nextopt; if (strstart(opts, ",retrace=", &nextopt)) { if (strstart(opts, "dumb", &nextopt)) vga_retrace_method = VGA_RETRACE_DUMB; else if (strstart(opts, "precise", &nextopt)) vga_retrace_method = VGA_RETRACE_PRECISE; else goto invalid_vga; } else goto invalid_vga;
1,597
1
void kvm_irqchip_commit_routes(KVMState *s) { int ret; s->irq_routes->flags = 0; trace_kvm_irqchip_commit_routes(); ret = kvm_vm_ioctl(s, KVM_SET_GSI_ROUTING, s->irq_routes); assert(ret == 0);
1,598
1
void cpu_exit(CPUState *cpu) { cpu->exit_request = 1; /* Ensure cpu_exec will see the exit request after TCG has exited. */ smp_wmb(); cpu->tcg_exit_req = 1; }
1,599
1
void FUNCC(ff_h264_idct8_add)(uint8_t *_dst, DCTELEM *_block, int stride){ int i; INIT_CLIP pixel *dst = (pixel*)_dst; dctcoef *block = (dctcoef*)_block; stride /= sizeof(pixel); block[0] += 32; for( i = 0; i < 8; i++ ) { const int a0 = block[i+0*8] + block[i+4*8]; const int a2 = block[i+0*8] - block[i+4*8]; const int a4 = (block[i+2*8]>>1) - block[i+6*8]; const int a6 = (block[i+6*8]>>1) + block[i+2*8]; const int b0 = a0 + a6; const int b2 = a2 + a4; const int b4 = a2 - a4; const int b6 = a0 - a6; const int a1 = -block[i+3*8] + block[i+5*8] - block[i+7*8] - (block[i+7*8]>>1); const int a3 = block[i+1*8] + block[i+7*8] - block[i+3*8] - (block[i+3*8]>>1); const int a5 = -block[i+1*8] + block[i+7*8] + block[i+5*8] + (block[i+5*8]>>1); const int a7 = block[i+3*8] + block[i+5*8] + block[i+1*8] + (block[i+1*8]>>1); const int b1 = (a7>>2) + a1; const int b3 = a3 + (a5>>2); const int b5 = (a3>>2) - a5; const int b7 = a7 - (a1>>2); block[i+0*8] = b0 + b7; block[i+7*8] = b0 - b7; block[i+1*8] = b2 + b5; block[i+6*8] = b2 - b5; block[i+2*8] = b4 + b3; block[i+5*8] = b4 - b3; block[i+3*8] = b6 + b1; block[i+4*8] = b6 - b1; } for( i = 0; i < 8; i++ ) { const int a0 = block[0+i*8] + block[4+i*8]; const int a2 = block[0+i*8] - block[4+i*8]; const int a4 = (block[2+i*8]>>1) - block[6+i*8]; const int a6 = (block[6+i*8]>>1) + block[2+i*8]; const int b0 = a0 + a6; const int b2 = a2 + a4; const int b4 = a2 - a4; const int b6 = a0 - a6; const int a1 = -block[3+i*8] + block[5+i*8] - block[7+i*8] - (block[7+i*8]>>1); const int a3 = block[1+i*8] + block[7+i*8] - block[3+i*8] - (block[3+i*8]>>1); const int a5 = -block[1+i*8] + block[7+i*8] + block[5+i*8] + (block[5+i*8]>>1); const int a7 = block[3+i*8] + block[5+i*8] + block[1+i*8] + (block[1+i*8]>>1); const int b1 = (a7>>2) + a1; const int b3 = a3 + (a5>>2); const int b5 = (a3>>2) - a5; const int b7 = a7 - (a1>>2); dst[i + 0*stride] = CLIP( dst[i + 0*stride] + ((b0 + b7) >> 6) ); dst[i + 1*stride] = CLIP( dst[i + 1*stride] + ((b2 + b5) >> 6) ); dst[i + 2*stride] = CLIP( dst[i + 2*stride] + ((b4 + b3) >> 6) ); dst[i + 3*stride] = CLIP( dst[i + 3*stride] + ((b6 + b1) >> 6) ); dst[i + 4*stride] = CLIP( dst[i + 4*stride] + ((b6 - b1) >> 6) ); dst[i + 5*stride] = CLIP( dst[i + 5*stride] + ((b4 - b3) >> 6) ); dst[i + 6*stride] = CLIP( dst[i + 6*stride] + ((b2 - b5) >> 6) ); dst[i + 7*stride] = CLIP( dst[i + 7*stride] + ((b0 - b7) >> 6) ); } }
1,600
1
static int huf_decode(const uint64_t *hcode, const HufDec *hdecod, GetByteContext *gb, int nbits, int rlc, int no, uint16_t *out) { uint64_t c = 0; uint16_t *outb = out; uint16_t *oe = out + no; const uint8_t *ie = gb->buffer + (nbits + 7) / 8; // input byte size uint8_t cs, s; int i, lc = 0; while (gb->buffer < ie) { get_char(c, lc, gb); while (lc >= HUF_DECBITS) { const HufDec pl = hdecod[(c >> (lc - HUF_DECBITS)) & HUF_DECMASK]; if (pl.len) { lc -= pl.len; get_code(pl.lit, rlc, c, lc, gb, out, oe); } else { int j; if (!pl.p) return AVERROR_INVALIDDATA; for (j = 0; j < pl.lit; j++) { int l = hcode[pl.p[j]] & 63; while (lc < l && bytestream2_get_bytes_left(gb) > 0) get_char(c, lc, gb); if (lc >= l) { if ((hcode[pl.p[j]] >> 6) == ((c >> (lc - l)) & ((1LL << l) - 1))) { lc -= l; get_code(pl.p[j], rlc, c, lc, gb, out, oe); break; } } } if (j == pl.lit) return AVERROR_INVALIDDATA; } } } i = (8 - nbits) & 7; c >>= i; lc -= i; while (lc > 0) { const HufDec pl = hdecod[(c << (HUF_DECBITS - lc)) & HUF_DECMASK]; if (pl.len) { lc -= pl.len; get_code(pl.lit, rlc, c, lc, gb, out, oe); } else { return AVERROR_INVALIDDATA; } } if (out - outb != no) return AVERROR_INVALIDDATA; return 0; }
1,601
0
static void gen_mfc0(DisasContext *ctx, TCGv arg, int reg, int sel) { const char *rn = "invalid"; if (sel != 0) check_insn(ctx, ISA_MIPS32); switch (reg) { case 0: switch (sel) { case 0: gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Index)); rn = "Index"; break; case 1: check_insn(ctx, ASE_MT); gen_helper_mfc0_mvpcontrol(arg, cpu_env); rn = "MVPControl"; break; case 2: check_insn(ctx, ASE_MT); gen_helper_mfc0_mvpconf0(arg, cpu_env); rn = "MVPConf0"; break; case 3: check_insn(ctx, ASE_MT); gen_helper_mfc0_mvpconf1(arg, cpu_env); rn = "MVPConf1"; break; default: goto die; } break; case 1: switch (sel) { case 0: gen_helper_mfc0_random(arg, cpu_env); rn = "Random"; break; case 1: check_insn(ctx, ASE_MT); gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_VPEControl)); rn = "VPEControl"; break; case 2: check_insn(ctx, ASE_MT); gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_VPEConf0)); rn = "VPEConf0"; break; case 3: check_insn(ctx, ASE_MT); gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_VPEConf1)); rn = "VPEConf1"; break; case 4: check_insn(ctx, ASE_MT); gen_mfc0_load64(arg, offsetof(CPUMIPSState, CP0_YQMask)); rn = "YQMask"; break; case 5: check_insn(ctx, ASE_MT); gen_mfc0_load64(arg, offsetof(CPUMIPSState, CP0_VPESchedule)); rn = "VPESchedule"; break; case 6: check_insn(ctx, ASE_MT); gen_mfc0_load64(arg, offsetof(CPUMIPSState, CP0_VPEScheFBack)); rn = "VPEScheFBack"; break; case 7: check_insn(ctx, ASE_MT); gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_VPEOpt)); rn = "VPEOpt"; break; default: goto die; } break; case 2: switch (sel) { case 0: tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_EntryLo0)); #if defined(TARGET_MIPS64) if (ctx->rxi) { TCGv tmp = tcg_temp_new(); tcg_gen_andi_tl(tmp, arg, (3ull << 62)); tcg_gen_shri_tl(tmp, tmp, 32); tcg_gen_or_tl(arg, arg, tmp); tcg_temp_free(tmp); } #endif tcg_gen_ext32s_tl(arg, arg); rn = "EntryLo0"; break; case 1: check_insn(ctx, ASE_MT); gen_helper_mfc0_tcstatus(arg, cpu_env); rn = "TCStatus"; break; case 2: check_insn(ctx, ASE_MT); gen_helper_mfc0_tcbind(arg, cpu_env); rn = "TCBind"; break; case 3: check_insn(ctx, ASE_MT); gen_helper_mfc0_tcrestart(arg, cpu_env); rn = "TCRestart"; break; case 4: check_insn(ctx, ASE_MT); gen_helper_mfc0_tchalt(arg, cpu_env); rn = "TCHalt"; break; case 5: check_insn(ctx, ASE_MT); gen_helper_mfc0_tccontext(arg, cpu_env); rn = "TCContext"; break; case 6: check_insn(ctx, ASE_MT); gen_helper_mfc0_tcschedule(arg, cpu_env); rn = "TCSchedule"; break; case 7: check_insn(ctx, ASE_MT); gen_helper_mfc0_tcschefback(arg, cpu_env); rn = "TCScheFBack"; break; default: goto die; } break; case 3: switch (sel) { case 0: tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_EntryLo1)); #if defined(TARGET_MIPS64) if (ctx->rxi) { TCGv tmp = tcg_temp_new(); tcg_gen_andi_tl(tmp, arg, (3ull << 62)); tcg_gen_shri_tl(tmp, tmp, 32); tcg_gen_or_tl(arg, arg, tmp); tcg_temp_free(tmp); } #endif tcg_gen_ext32s_tl(arg, arg); rn = "EntryLo1"; break; default: goto die; } break; case 4: switch (sel) { case 0: tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_Context)); tcg_gen_ext32s_tl(arg, arg); rn = "Context"; break; case 1: // gen_helper_mfc0_contextconfig(arg); /* SmartMIPS ASE */ rn = "ContextConfig"; goto die; // break; case 2: if (ctx->ulri) { tcg_gen_ld32s_tl(arg, cpu_env, offsetof(CPUMIPSState, active_tc.CP0_UserLocal)); rn = "UserLocal"; } else { tcg_gen_movi_tl(arg, 0); } break; default: goto die; } break; case 5: switch (sel) { case 0: gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_PageMask)); rn = "PageMask"; break; case 1: check_insn(ctx, ISA_MIPS32R2); gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_PageGrain)); rn = "PageGrain"; break; default: goto die; } break; case 6: switch (sel) { case 0: gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Wired)); rn = "Wired"; break; case 1: check_insn(ctx, ISA_MIPS32R2); gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_SRSConf0)); rn = "SRSConf0"; break; case 2: check_insn(ctx, ISA_MIPS32R2); gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_SRSConf1)); rn = "SRSConf1"; break; case 3: check_insn(ctx, ISA_MIPS32R2); gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_SRSConf2)); rn = "SRSConf2"; break; case 4: check_insn(ctx, ISA_MIPS32R2); gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_SRSConf3)); rn = "SRSConf3"; break; case 5: check_insn(ctx, ISA_MIPS32R2); gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_SRSConf4)); rn = "SRSConf4"; break; default: goto die; } break; case 7: switch (sel) { case 0: check_insn(ctx, ISA_MIPS32R2); gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_HWREna)); rn = "HWREna"; break; default: goto die; } break; case 8: switch (sel) { case 0: tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_BadVAddr)); tcg_gen_ext32s_tl(arg, arg); rn = "BadVAddr"; break; case 1: if (ctx->bi) { gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_BadInstr)); rn = "BadInstr"; } else { gen_mfc0_unimplemented(ctx, arg); } break; case 2: if (ctx->bp) { gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_BadInstrP)); rn = "BadInstrP"; } else { gen_mfc0_unimplemented(ctx, arg); } break; default: goto die; } break; case 9: switch (sel) { case 0: /* Mark as an IO operation because we read the time. */ if (use_icount) gen_io_start(); gen_helper_mfc0_count(arg, cpu_env); if (use_icount) { gen_io_end(); } /* Break the TB to be able to take timer interrupts immediately after reading count. */ ctx->bstate = BS_STOP; rn = "Count"; break; /* 6,7 are implementation dependent */ default: goto die; } break; case 10: switch (sel) { case 0: tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_EntryHi)); tcg_gen_ext32s_tl(arg, arg); rn = "EntryHi"; break; default: goto die; } break; case 11: switch (sel) { case 0: gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Compare)); rn = "Compare"; break; /* 6,7 are implementation dependent */ default: goto die; } break; case 12: switch (sel) { case 0: gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Status)); rn = "Status"; break; case 1: check_insn(ctx, ISA_MIPS32R2); gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_IntCtl)); rn = "IntCtl"; break; case 2: check_insn(ctx, ISA_MIPS32R2); gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_SRSCtl)); rn = "SRSCtl"; break; case 3: check_insn(ctx, ISA_MIPS32R2); gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_SRSMap)); rn = "SRSMap"; break; default: goto die; } break; case 13: switch (sel) { case 0: gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Cause)); rn = "Cause"; break; default: goto die; } break; case 14: switch (sel) { case 0: tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_EPC)); tcg_gen_ext32s_tl(arg, arg); rn = "EPC"; break; default: goto die; } break; case 15: switch (sel) { case 0: gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_PRid)); rn = "PRid"; break; case 1: check_insn(ctx, ISA_MIPS32R2); gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_EBase)); rn = "EBase"; break; default: goto die; } break; case 16: switch (sel) { case 0: gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Config0)); rn = "Config"; break; case 1: gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Config1)); rn = "Config1"; break; case 2: gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Config2)); rn = "Config2"; break; case 3: gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Config3)); rn = "Config3"; break; case 4: gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Config4)); rn = "Config4"; break; case 5: gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Config5)); rn = "Config5"; break; /* 6,7 are implementation dependent */ case 6: gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Config6)); rn = "Config6"; break; case 7: gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Config7)); rn = "Config7"; break; default: goto die; } break; case 17: switch (sel) { case 0: gen_helper_mfc0_lladdr(arg, cpu_env); rn = "LLAddr"; break; default: goto die; } break; case 18: switch (sel) { case 0 ... 7: gen_helper_1e0i(mfc0_watchlo, arg, sel); rn = "WatchLo"; break; default: goto die; } break; case 19: switch (sel) { case 0 ...7: gen_helper_1e0i(mfc0_watchhi, arg, sel); rn = "WatchHi"; break; default: goto die; } break; case 20: switch (sel) { case 0: #if defined(TARGET_MIPS64) check_insn(ctx, ISA_MIPS3); tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_XContext)); tcg_gen_ext32s_tl(arg, arg); rn = "XContext"; break; #endif default: goto die; } break; case 21: /* Officially reserved, but sel 0 is used for R1x000 framemask */ switch (sel) { case 0: gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Framemask)); rn = "Framemask"; break; default: goto die; } break; case 22: tcg_gen_movi_tl(arg, 0); /* unimplemented */ rn = "'Diagnostic"; /* implementation dependent */ break; case 23: switch (sel) { case 0: gen_helper_mfc0_debug(arg, cpu_env); /* EJTAG support */ rn = "Debug"; break; case 1: // gen_helper_mfc0_tracecontrol(arg); /* PDtrace support */ rn = "TraceControl"; // break; case 2: // gen_helper_mfc0_tracecontrol2(arg); /* PDtrace support */ rn = "TraceControl2"; // break; case 3: // gen_helper_mfc0_usertracedata(arg); /* PDtrace support */ rn = "UserTraceData"; // break; case 4: // gen_helper_mfc0_tracebpc(arg); /* PDtrace support */ rn = "TraceBPC"; // break; default: goto die; } break; case 24: switch (sel) { case 0: /* EJTAG support */ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_DEPC)); tcg_gen_ext32s_tl(arg, arg); rn = "DEPC"; break; default: goto die; } break; case 25: switch (sel) { case 0: gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Performance0)); rn = "Performance0"; break; case 1: // gen_helper_mfc0_performance1(arg); rn = "Performance1"; // break; case 2: // gen_helper_mfc0_performance2(arg); rn = "Performance2"; // break; case 3: // gen_helper_mfc0_performance3(arg); rn = "Performance3"; // break; case 4: // gen_helper_mfc0_performance4(arg); rn = "Performance4"; // break; case 5: // gen_helper_mfc0_performance5(arg); rn = "Performance5"; // break; case 6: // gen_helper_mfc0_performance6(arg); rn = "Performance6"; // break; case 7: // gen_helper_mfc0_performance7(arg); rn = "Performance7"; // break; default: goto die; } break; case 26: tcg_gen_movi_tl(arg, 0); /* unimplemented */ rn = "ECC"; break; case 27: switch (sel) { case 0 ... 3: tcg_gen_movi_tl(arg, 0); /* unimplemented */ rn = "CacheErr"; break; default: goto die; } break; case 28: switch (sel) { case 0: case 2: case 4: case 6: gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_TagLo)); rn = "TagLo"; break; case 1: case 3: case 5: case 7: gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_DataLo)); rn = "DataLo"; break; default: goto die; } break; case 29: switch (sel) { case 0: case 2: case 4: case 6: gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_TagHi)); rn = "TagHi"; break; case 1: case 3: case 5: case 7: gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_DataHi)); rn = "DataHi"; break; default: goto die; } break; case 30: switch (sel) { case 0: tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_ErrorEPC)); tcg_gen_ext32s_tl(arg, arg); rn = "ErrorEPC"; break; default: goto die; } break; case 31: switch (sel) { case 0: /* EJTAG support */ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_DESAVE)); rn = "DESAVE"; break; case 2 ... 7: if (ctx->kscrexist & (1 << sel)) { tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_KScratch[sel-2])); tcg_gen_ext32s_tl(arg, arg); rn = "KScratch"; } else { gen_mfc0_unimplemented(ctx, arg); } break; default: goto die; } break; default: goto die; } (void)rn; /* avoid a compiler warning */ LOG_DISAS("mfc0 %s (reg %d sel %d)\n", rn, reg, sel); return; die: LOG_DISAS("mfc0 %s (reg %d sel %d)\n", rn, reg, sel); generate_exception(ctx, EXCP_RI); }
1,602
0
static int64_t cvtnum(const char *s) { char *end; return qemu_strtosz_suffix(s, &end, QEMU_STRTOSZ_DEFSUFFIX_B); }
1,603
0
static void qpci_spapr_io_writeb(QPCIBus *bus, void *addr, uint8_t value) { QPCIBusSPAPR *s = container_of(bus, QPCIBusSPAPR, bus); uint64_t port = (uintptr_t)addr; if (port < s->pio.size) { writeb(s->pio_cpu_base + port, value); } else { writeb(s->mmio_cpu_base + port, value); } }
1,604
0
static void *rcu_q_updater(void *arg) { int j, target_el; long long n_updates_local = 0; long long n_removed_local = 0; struct list_element *el, *prev_el; *(struct rcu_reader_data **)arg = &rcu_reader; atomic_inc(&nthreadsrunning); while (goflag == GOFLAG_INIT) { g_usleep(1000); } while (goflag == GOFLAG_RUN) { target_el = select_random_el(RCU_Q_LEN); j = 0; /* FOREACH_RCU could work here but let's use both macros */ QLIST_FOREACH_SAFE_RCU(prev_el, &Q_list_head, entry, el) { j++; if (target_el == j) { QLIST_REMOVE_RCU(prev_el, entry); /* may be more than one updater in the future */ call_rcu1(&prev_el->rcu, reclaim_list_el); n_removed_local++; break; } } if (goflag == GOFLAG_STOP) { break; } target_el = select_random_el(RCU_Q_LEN); j = 0; QLIST_FOREACH_RCU(el, &Q_list_head, entry) { j++; if (target_el == j) { prev_el = g_new(struct list_element, 1); atomic_add(&n_nodes, 1); prev_el->val = atomic_read(&n_nodes); QLIST_INSERT_BEFORE_RCU(el, prev_el, entry); break; } } n_updates_local += 2; synchronize_rcu(); } synchronize_rcu(); atomic_add(&n_updates, n_updates_local); atomic_add(&n_nodes_removed, n_removed_local); return NULL; }
1,605
0
int Configure(void **ctxp, int argc, char *argv[]) { ContextInfo *ci; int c; *ctxp = av_mallocz(sizeof(ContextInfo)); ci = (ContextInfo *) *ctxp; optind = 1; ci->dir = "/tmp"; ci->threshold = 100; ci->file_limit = 100; ci->min_interval = 1000000; ci->inset = 10; /* Percent */ while ((c = getopt(argc, argv, "w:i:dh:s:v:zl:t:D:")) > 0) { switch (c) { case 'h': dorange(optarg, &ci->dark.h, &ci->bright.h, 360); break; case 's': dorange(optarg, &ci->dark.s, &ci->bright.s, 255); break; case 'v': dorange(optarg, &ci->dark.v, &ci->bright.v, 255); break; case 'z': ci->zapping = 1; break; case 'l': ci->file_limit = atoi(optarg); break; case 'i': ci->min_interval = 1000000 * atof(optarg); break; case 't': ci->threshold = atof(optarg) * 1000; if (ci->threshold > 1000 || ci->threshold < 0) { av_log(NULL, AV_LOG_ERROR, "Invalid threshold value '%s' (range is 0-1)\n", optarg); return -1; } break; case 'w': ci->min_width = atoi(optarg); break; case 'd': ci->debug++; break; case 'D': ci->dir = av_strdup(optarg); break; default: av_log(NULL, AV_LOG_ERROR, "Unrecognized argument '%s'\n", argv[optind]); return -1; } } av_log(NULL, AV_LOG_INFO, "Fish detector configured:\n"); av_log(NULL, AV_LOG_INFO, " HSV range: %d,%d,%d - %d,%d,%d\n", ci->dark.h, ci->dark.s, ci->dark.v, ci->bright.h, ci->bright.s, ci->bright.v); av_log(NULL, AV_LOG_INFO, " Threshold is %d%% pixels\n", ci->threshold / 10); return 0; }
1,606
0
static int local_chown(FsContext *ctx, const char *path, uid_t uid, gid_t gid) { return chown(rpath(ctx, path), uid, gid); }
1,607
0
static void disas_arm_insn(DisasContext *s, unsigned int insn) { unsigned int cond, val, op1, i, shift, rm, rs, rn, rd, sh; TCGv_i32 tmp; TCGv_i32 tmp2; TCGv_i32 tmp3; TCGv_i32 addr; TCGv_i64 tmp64; /* M variants do not implement ARM mode. */ if (arm_dc_feature(s, ARM_FEATURE_M)) { goto illegal_op; } cond = insn >> 28; if (cond == 0xf){ /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we * choose to UNDEF. In ARMv5 and above the space is used * for miscellaneous unconditional instructions. */ ARCH(5); /* Unconditional instructions. */ if (((insn >> 25) & 7) == 1) { /* NEON Data processing. */ if (!arm_dc_feature(s, ARM_FEATURE_NEON)) { goto illegal_op; } if (disas_neon_data_insn(s, insn)) { goto illegal_op; } return; } if ((insn & 0x0f100000) == 0x04000000) { /* NEON load/store. */ if (!arm_dc_feature(s, ARM_FEATURE_NEON)) { goto illegal_op; } if (disas_neon_ls_insn(s, insn)) { goto illegal_op; } return; } if ((insn & 0x0f000e10) == 0x0e000a00) { /* VFP. */ if (disas_vfp_insn(s, insn)) { goto illegal_op; } return; } if (((insn & 0x0f30f000) == 0x0510f000) || ((insn & 0x0f30f010) == 0x0710f000)) { if ((insn & (1 << 22)) == 0) { /* PLDW; v7MP */ if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) { goto illegal_op; } } /* Otherwise PLD; v5TE+ */ ARCH(5TE); return; } if (((insn & 0x0f70f000) == 0x0450f000) || ((insn & 0x0f70f010) == 0x0650f000)) { ARCH(7); return; /* PLI; V7 */ } if (((insn & 0x0f700000) == 0x04100000) || ((insn & 0x0f700010) == 0x06100000)) { if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) { goto illegal_op; } return; /* v7MP: Unallocated memory hint: must NOP */ } if ((insn & 0x0ffffdff) == 0x01010000) { ARCH(6); /* setend */ if (((insn >> 9) & 1) != s->bswap_code) { /* Dynamic endianness switching not implemented. */ qemu_log_mask(LOG_UNIMP, "arm: unimplemented setend\n"); goto illegal_op; } return; } else if ((insn & 0x0fffff00) == 0x057ff000) { switch ((insn >> 4) & 0xf) { case 1: /* clrex */ ARCH(6K); gen_clrex(s); return; case 4: /* dsb */ case 5: /* dmb */ ARCH(7); /* We don't emulate caches so these are a no-op. */ return; case 6: /* isb */ /* We need to break the TB after this insn to execute * self-modifying code correctly and also to take * any pending interrupts immediately. */ gen_lookup_tb(s); return; default: goto illegal_op; } } else if ((insn & 0x0e5fffe0) == 0x084d0500) { /* srs */ if (IS_USER(s)) { goto illegal_op; } ARCH(6); gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21)); return; } else if ((insn & 0x0e50ffe0) == 0x08100a00) { /* rfe */ int32_t offset; if (IS_USER(s)) goto illegal_op; ARCH(6); rn = (insn >> 16) & 0xf; addr = load_reg(s, rn); i = (insn >> 23) & 3; switch (i) { case 0: offset = -4; break; /* DA */ case 1: offset = 0; break; /* IA */ case 2: offset = -8; break; /* DB */ case 3: offset = 4; break; /* IB */ default: abort(); } if (offset) tcg_gen_addi_i32(addr, addr, offset); /* Load PC into tmp and CPSR into tmp2. */ tmp = tcg_temp_new_i32(); gen_aa32_ld32u(tmp, addr, get_mem_index(s)); tcg_gen_addi_i32(addr, addr, 4); tmp2 = tcg_temp_new_i32(); gen_aa32_ld32u(tmp2, addr, get_mem_index(s)); if (insn & (1 << 21)) { /* Base writeback. */ switch (i) { case 0: offset = -8; break; case 1: offset = 4; break; case 2: offset = -4; break; case 3: offset = 0; break; default: abort(); } if (offset) tcg_gen_addi_i32(addr, addr, offset); store_reg(s, rn, addr); } else { tcg_temp_free_i32(addr); } gen_rfe(s, tmp, tmp2); return; } else if ((insn & 0x0e000000) == 0x0a000000) { /* branch link and change to thumb (blx <offset>) */ int32_t offset; val = (uint32_t)s->pc; tmp = tcg_temp_new_i32(); tcg_gen_movi_i32(tmp, val); store_reg(s, 14, tmp); /* Sign-extend the 24-bit offset */ offset = (((int32_t)insn) << 8) >> 8; /* offset * 4 + bit24 * 2 + (thumb bit) */ val += (offset << 2) | ((insn >> 23) & 2) | 1; /* pipeline offset */ val += 4; /* protected by ARCH(5); above, near the start of uncond block */ gen_bx_im(s, val); return; } else if ((insn & 0x0e000f00) == 0x0c000100) { if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) { /* iWMMXt register transfer. */ if (extract32(s->c15_cpar, 1, 1)) { if (!disas_iwmmxt_insn(s, insn)) { return; } } } } else if ((insn & 0x0fe00000) == 0x0c400000) { /* Coprocessor double register transfer. */ ARCH(5TE); } else if ((insn & 0x0f000010) == 0x0e000010) { /* Additional coprocessor register transfer. */ } else if ((insn & 0x0ff10020) == 0x01000000) { uint32_t mask; uint32_t val; /* cps (privileged) */ if (IS_USER(s)) return; mask = val = 0; if (insn & (1 << 19)) { if (insn & (1 << 8)) mask |= CPSR_A; if (insn & (1 << 7)) mask |= CPSR_I; if (insn & (1 << 6)) mask |= CPSR_F; if (insn & (1 << 18)) val |= mask; } if (insn & (1 << 17)) { mask |= CPSR_M; val |= (insn & 0x1f); } if (mask) { gen_set_psr_im(s, mask, 0, val); } return; } goto illegal_op; } if (cond != 0xe) { /* if not always execute, we generate a conditional jump to next instruction */ s->condlabel = gen_new_label(); arm_gen_test_cc(cond ^ 1, s->condlabel); s->condjmp = 1; } if ((insn & 0x0f900000) == 0x03000000) { if ((insn & (1 << 21)) == 0) { ARCH(6T2); rd = (insn >> 12) & 0xf; val = ((insn >> 4) & 0xf000) | (insn & 0xfff); if ((insn & (1 << 22)) == 0) { /* MOVW */ tmp = tcg_temp_new_i32(); tcg_gen_movi_i32(tmp, val); } else { /* MOVT */ tmp = load_reg(s, rd); tcg_gen_ext16u_i32(tmp, tmp); tcg_gen_ori_i32(tmp, tmp, val << 16); } store_reg(s, rd, tmp); } else { if (((insn >> 12) & 0xf) != 0xf) goto illegal_op; if (((insn >> 16) & 0xf) == 0) { gen_nop_hint(s, insn & 0xff); } else { /* CPSR = immediate */ val = insn & 0xff; shift = ((insn >> 8) & 0xf) * 2; if (shift) val = (val >> shift) | (val << (32 - shift)); i = ((insn & (1 << 22)) != 0); if (gen_set_psr_im(s, msr_mask(s, (insn >> 16) & 0xf, i), i, val)) { goto illegal_op; } } } } else if ((insn & 0x0f900000) == 0x01000000 && (insn & 0x00000090) != 0x00000090) { /* miscellaneous instructions */ op1 = (insn >> 21) & 3; sh = (insn >> 4) & 0xf; rm = insn & 0xf; switch (sh) { case 0x0: /* move program status register */ if (op1 & 1) { /* PSR = reg */ tmp = load_reg(s, rm); i = ((op1 & 2) != 0); if (gen_set_psr(s, msr_mask(s, (insn >> 16) & 0xf, i), i, tmp)) goto illegal_op; } else { /* reg = PSR */ rd = (insn >> 12) & 0xf; if (op1 & 2) { if (IS_USER(s)) goto illegal_op; tmp = load_cpu_field(spsr); } else { tmp = tcg_temp_new_i32(); gen_helper_cpsr_read(tmp, cpu_env); } store_reg(s, rd, tmp); } break; case 0x1: if (op1 == 1) { /* branch/exchange thumb (bx). */ ARCH(4T); tmp = load_reg(s, rm); gen_bx(s, tmp); } else if (op1 == 3) { /* clz */ ARCH(5); rd = (insn >> 12) & 0xf; tmp = load_reg(s, rm); gen_helper_clz(tmp, tmp); store_reg(s, rd, tmp); } else { goto illegal_op; } break; case 0x2: if (op1 == 1) { ARCH(5J); /* bxj */ /* Trivial implementation equivalent to bx. */ tmp = load_reg(s, rm); gen_bx(s, tmp); } else { goto illegal_op; } break; case 0x3: if (op1 != 1) goto illegal_op; ARCH(5); /* branch link/exchange thumb (blx) */ tmp = load_reg(s, rm); tmp2 = tcg_temp_new_i32(); tcg_gen_movi_i32(tmp2, s->pc); store_reg(s, 14, tmp2); gen_bx(s, tmp); break; case 0x4: { /* crc32/crc32c */ uint32_t c = extract32(insn, 8, 4); /* Check this CPU supports ARMv8 CRC instructions. * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED. * Bits 8, 10 and 11 should be zero. */ if (!arm_dc_feature(s, ARM_FEATURE_CRC) || op1 == 0x3 || (c & 0xd) != 0) { goto illegal_op; } rn = extract32(insn, 16, 4); rd = extract32(insn, 12, 4); tmp = load_reg(s, rn); tmp2 = load_reg(s, rm); if (op1 == 0) { tcg_gen_andi_i32(tmp2, tmp2, 0xff); } else if (op1 == 1) { tcg_gen_andi_i32(tmp2, tmp2, 0xffff); } tmp3 = tcg_const_i32(1 << op1); if (c & 0x2) { gen_helper_crc32c(tmp, tmp, tmp2, tmp3); } else { gen_helper_crc32(tmp, tmp, tmp2, tmp3); } tcg_temp_free_i32(tmp2); tcg_temp_free_i32(tmp3); store_reg(s, rd, tmp); break; } case 0x5: /* saturating add/subtract */ ARCH(5TE); rd = (insn >> 12) & 0xf; rn = (insn >> 16) & 0xf; tmp = load_reg(s, rm); tmp2 = load_reg(s, rn); if (op1 & 2) gen_helper_double_saturate(tmp2, cpu_env, tmp2); if (op1 & 1) gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2); else gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2); tcg_temp_free_i32(tmp2); store_reg(s, rd, tmp); break; case 7: { int imm16 = extract32(insn, 0, 4) | (extract32(insn, 8, 12) << 4); switch (op1) { case 1: /* bkpt */ ARCH(5); gen_exception_insn(s, 4, EXCP_BKPT, syn_aa32_bkpt(imm16, false), default_exception_el(s)); break; case 2: /* Hypervisor call (v7) */ ARCH(7); if (IS_USER(s)) { goto illegal_op; } gen_hvc(s, imm16); break; case 3: /* Secure monitor call (v6+) */ ARCH(6K); if (IS_USER(s)) { goto illegal_op; } gen_smc(s); break; default: goto illegal_op; } break; } case 0x8: /* signed multiply */ case 0xa: case 0xc: case 0xe: ARCH(5TE); rs = (insn >> 8) & 0xf; rn = (insn >> 12) & 0xf; rd = (insn >> 16) & 0xf; if (op1 == 1) { /* (32 * 16) >> 16 */ tmp = load_reg(s, rm); tmp2 = load_reg(s, rs); if (sh & 4) tcg_gen_sari_i32(tmp2, tmp2, 16); else gen_sxth(tmp2); tmp64 = gen_muls_i64_i32(tmp, tmp2); tcg_gen_shri_i64(tmp64, tmp64, 16); tmp = tcg_temp_new_i32(); tcg_gen_extrl_i64_i32(tmp, tmp64); tcg_temp_free_i64(tmp64); if ((sh & 2) == 0) { tmp2 = load_reg(s, rn); gen_helper_add_setq(tmp, cpu_env, tmp, tmp2); tcg_temp_free_i32(tmp2); } store_reg(s, rd, tmp); } else { /* 16 * 16 */ tmp = load_reg(s, rm); tmp2 = load_reg(s, rs); gen_mulxy(tmp, tmp2, sh & 2, sh & 4); tcg_temp_free_i32(tmp2); if (op1 == 2) { tmp64 = tcg_temp_new_i64(); tcg_gen_ext_i32_i64(tmp64, tmp); tcg_temp_free_i32(tmp); gen_addq(s, tmp64, rn, rd); gen_storeq_reg(s, rn, rd, tmp64); tcg_temp_free_i64(tmp64); } else { if (op1 == 0) { tmp2 = load_reg(s, rn); gen_helper_add_setq(tmp, cpu_env, tmp, tmp2); tcg_temp_free_i32(tmp2); } store_reg(s, rd, tmp); } } break; default: goto illegal_op; } } else if (((insn & 0x0e000000) == 0 && (insn & 0x00000090) != 0x90) || ((insn & 0x0e000000) == (1 << 25))) { int set_cc, logic_cc, shiftop; op1 = (insn >> 21) & 0xf; set_cc = (insn >> 20) & 1; logic_cc = table_logic_cc[op1] & set_cc; /* data processing instruction */ if (insn & (1 << 25)) { /* immediate operand */ val = insn & 0xff; shift = ((insn >> 8) & 0xf) * 2; if (shift) { val = (val >> shift) | (val << (32 - shift)); } tmp2 = tcg_temp_new_i32(); tcg_gen_movi_i32(tmp2, val); if (logic_cc && shift) { gen_set_CF_bit31(tmp2); } } else { /* register */ rm = (insn) & 0xf; tmp2 = load_reg(s, rm); shiftop = (insn >> 5) & 3; if (!(insn & (1 << 4))) { shift = (insn >> 7) & 0x1f; gen_arm_shift_im(tmp2, shiftop, shift, logic_cc); } else { rs = (insn >> 8) & 0xf; tmp = load_reg(s, rs); gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc); } } if (op1 != 0x0f && op1 != 0x0d) { rn = (insn >> 16) & 0xf; tmp = load_reg(s, rn); } else { TCGV_UNUSED_I32(tmp); } rd = (insn >> 12) & 0xf; switch(op1) { case 0x00: tcg_gen_and_i32(tmp, tmp, tmp2); if (logic_cc) { gen_logic_CC(tmp); } store_reg_bx(s, rd, tmp); break; case 0x01: tcg_gen_xor_i32(tmp, tmp, tmp2); if (logic_cc) { gen_logic_CC(tmp); } store_reg_bx(s, rd, tmp); break; case 0x02: if (set_cc && rd == 15) { /* SUBS r15, ... is used for exception return. */ if (IS_USER(s)) { goto illegal_op; } gen_sub_CC(tmp, tmp, tmp2); gen_exception_return(s, tmp); } else { if (set_cc) { gen_sub_CC(tmp, tmp, tmp2); } else { tcg_gen_sub_i32(tmp, tmp, tmp2); } store_reg_bx(s, rd, tmp); } break; case 0x03: if (set_cc) { gen_sub_CC(tmp, tmp2, tmp); } else { tcg_gen_sub_i32(tmp, tmp2, tmp); } store_reg_bx(s, rd, tmp); break; case 0x04: if (set_cc) { gen_add_CC(tmp, tmp, tmp2); } else { tcg_gen_add_i32(tmp, tmp, tmp2); } store_reg_bx(s, rd, tmp); break; case 0x05: if (set_cc) { gen_adc_CC(tmp, tmp, tmp2); } else { gen_add_carry(tmp, tmp, tmp2); } store_reg_bx(s, rd, tmp); break; case 0x06: if (set_cc) { gen_sbc_CC(tmp, tmp, tmp2); } else { gen_sub_carry(tmp, tmp, tmp2); } store_reg_bx(s, rd, tmp); break; case 0x07: if (set_cc) { gen_sbc_CC(tmp, tmp2, tmp); } else { gen_sub_carry(tmp, tmp2, tmp); } store_reg_bx(s, rd, tmp); break; case 0x08: if (set_cc) { tcg_gen_and_i32(tmp, tmp, tmp2); gen_logic_CC(tmp); } tcg_temp_free_i32(tmp); break; case 0x09: if (set_cc) { tcg_gen_xor_i32(tmp, tmp, tmp2); gen_logic_CC(tmp); } tcg_temp_free_i32(tmp); break; case 0x0a: if (set_cc) { gen_sub_CC(tmp, tmp, tmp2); } tcg_temp_free_i32(tmp); break; case 0x0b: if (set_cc) { gen_add_CC(tmp, tmp, tmp2); } tcg_temp_free_i32(tmp); break; case 0x0c: tcg_gen_or_i32(tmp, tmp, tmp2); if (logic_cc) { gen_logic_CC(tmp); } store_reg_bx(s, rd, tmp); break; case 0x0d: if (logic_cc && rd == 15) { /* MOVS r15, ... is used for exception return. */ if (IS_USER(s)) { goto illegal_op; } gen_exception_return(s, tmp2); } else { if (logic_cc) { gen_logic_CC(tmp2); } store_reg_bx(s, rd, tmp2); } break; case 0x0e: tcg_gen_andc_i32(tmp, tmp, tmp2); if (logic_cc) { gen_logic_CC(tmp); } store_reg_bx(s, rd, tmp); break; default: case 0x0f: tcg_gen_not_i32(tmp2, tmp2); if (logic_cc) { gen_logic_CC(tmp2); } store_reg_bx(s, rd, tmp2); break; } if (op1 != 0x0f && op1 != 0x0d) { tcg_temp_free_i32(tmp2); } } else { /* other instructions */ op1 = (insn >> 24) & 0xf; switch(op1) { case 0x0: case 0x1: /* multiplies, extra load/stores */ sh = (insn >> 5) & 3; if (sh == 0) { if (op1 == 0x0) { rd = (insn >> 16) & 0xf; rn = (insn >> 12) & 0xf; rs = (insn >> 8) & 0xf; rm = (insn) & 0xf; op1 = (insn >> 20) & 0xf; switch (op1) { case 0: case 1: case 2: case 3: case 6: /* 32 bit mul */ tmp = load_reg(s, rs); tmp2 = load_reg(s, rm); tcg_gen_mul_i32(tmp, tmp, tmp2); tcg_temp_free_i32(tmp2); if (insn & (1 << 22)) { /* Subtract (mls) */ ARCH(6T2); tmp2 = load_reg(s, rn); tcg_gen_sub_i32(tmp, tmp2, tmp); tcg_temp_free_i32(tmp2); } else if (insn & (1 << 21)) { /* Add */ tmp2 = load_reg(s, rn); tcg_gen_add_i32(tmp, tmp, tmp2); tcg_temp_free_i32(tmp2); } if (insn & (1 << 20)) gen_logic_CC(tmp); store_reg(s, rd, tmp); break; case 4: /* 64 bit mul double accumulate (UMAAL) */ ARCH(6); tmp = load_reg(s, rs); tmp2 = load_reg(s, rm); tmp64 = gen_mulu_i64_i32(tmp, tmp2); gen_addq_lo(s, tmp64, rn); gen_addq_lo(s, tmp64, rd); gen_storeq_reg(s, rn, rd, tmp64); tcg_temp_free_i64(tmp64); break; case 8: case 9: case 10: case 11: case 12: case 13: case 14: case 15: /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */ tmp = load_reg(s, rs); tmp2 = load_reg(s, rm); if (insn & (1 << 22)) { tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2); } else { tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2); } if (insn & (1 << 21)) { /* mult accumulate */ TCGv_i32 al = load_reg(s, rn); TCGv_i32 ah = load_reg(s, rd); tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah); tcg_temp_free_i32(al); tcg_temp_free_i32(ah); } if (insn & (1 << 20)) { gen_logicq_cc(tmp, tmp2); } store_reg(s, rn, tmp); store_reg(s, rd, tmp2); break; default: goto illegal_op; } } else { rn = (insn >> 16) & 0xf; rd = (insn >> 12) & 0xf; if (insn & (1 << 23)) { /* load/store exclusive */ int op2 = (insn >> 8) & 3; op1 = (insn >> 21) & 0x3; switch (op2) { case 0: /* lda/stl */ if (op1 == 1) { goto illegal_op; } ARCH(8); break; case 1: /* reserved */ goto illegal_op; case 2: /* ldaex/stlex */ ARCH(8); break; case 3: /* ldrex/strex */ if (op1) { ARCH(6K); } else { ARCH(6); } break; } addr = tcg_temp_local_new_i32(); load_reg_var(s, addr, rn); /* Since the emulation does not have barriers, the acquire/release semantics need no special handling */ if (op2 == 0) { if (insn & (1 << 20)) { tmp = tcg_temp_new_i32(); switch (op1) { case 0: /* lda */ gen_aa32_ld32u(tmp, addr, get_mem_index(s)); break; case 2: /* ldab */ gen_aa32_ld8u(tmp, addr, get_mem_index(s)); break; case 3: /* ldah */ gen_aa32_ld16u(tmp, addr, get_mem_index(s)); break; default: abort(); } store_reg(s, rd, tmp); } else { rm = insn & 0xf; tmp = load_reg(s, rm); switch (op1) { case 0: /* stl */ gen_aa32_st32(tmp, addr, get_mem_index(s)); break; case 2: /* stlb */ gen_aa32_st8(tmp, addr, get_mem_index(s)); break; case 3: /* stlh */ gen_aa32_st16(tmp, addr, get_mem_index(s)); break; default: abort(); } tcg_temp_free_i32(tmp); } } else if (insn & (1 << 20)) { switch (op1) { case 0: /* ldrex */ gen_load_exclusive(s, rd, 15, addr, 2); break; case 1: /* ldrexd */ gen_load_exclusive(s, rd, rd + 1, addr, 3); break; case 2: /* ldrexb */ gen_load_exclusive(s, rd, 15, addr, 0); break; case 3: /* ldrexh */ gen_load_exclusive(s, rd, 15, addr, 1); break; default: abort(); } } else { rm = insn & 0xf; switch (op1) { case 0: /* strex */ gen_store_exclusive(s, rd, rm, 15, addr, 2); break; case 1: /* strexd */ gen_store_exclusive(s, rd, rm, rm + 1, addr, 3); break; case 2: /* strexb */ gen_store_exclusive(s, rd, rm, 15, addr, 0); break; case 3: /* strexh */ gen_store_exclusive(s, rd, rm, 15, addr, 1); break; default: abort(); } } tcg_temp_free_i32(addr); } else { /* SWP instruction */ rm = (insn) & 0xf; /* ??? This is not really atomic. However we know we never have multiple CPUs running in parallel, so it is good enough. */ addr = load_reg(s, rn); tmp = load_reg(s, rm); tmp2 = tcg_temp_new_i32(); if (insn & (1 << 22)) { gen_aa32_ld8u(tmp2, addr, get_mem_index(s)); gen_aa32_st8(tmp, addr, get_mem_index(s)); } else { gen_aa32_ld32u(tmp2, addr, get_mem_index(s)); gen_aa32_st32(tmp, addr, get_mem_index(s)); } tcg_temp_free_i32(tmp); tcg_temp_free_i32(addr); store_reg(s, rd, tmp2); } } } else { int address_offset; bool load = insn & (1 << 20); bool doubleword = false; /* Misc load/store */ rn = (insn >> 16) & 0xf; rd = (insn >> 12) & 0xf; if (!load && (sh & 2)) { /* doubleword */ ARCH(5TE); if (rd & 1) { /* UNPREDICTABLE; we choose to UNDEF */ goto illegal_op; } load = (sh & 1) == 0; doubleword = true; } addr = load_reg(s, rn); if (insn & (1 << 24)) gen_add_datah_offset(s, insn, 0, addr); address_offset = 0; if (doubleword) { if (!load) { /* store */ tmp = load_reg(s, rd); gen_aa32_st32(tmp, addr, get_mem_index(s)); tcg_temp_free_i32(tmp); tcg_gen_addi_i32(addr, addr, 4); tmp = load_reg(s, rd + 1); gen_aa32_st32(tmp, addr, get_mem_index(s)); tcg_temp_free_i32(tmp); } else { /* load */ tmp = tcg_temp_new_i32(); gen_aa32_ld32u(tmp, addr, get_mem_index(s)); store_reg(s, rd, tmp); tcg_gen_addi_i32(addr, addr, 4); tmp = tcg_temp_new_i32(); gen_aa32_ld32u(tmp, addr, get_mem_index(s)); rd++; } address_offset = -4; } else if (load) { /* load */ tmp = tcg_temp_new_i32(); switch (sh) { case 1: gen_aa32_ld16u(tmp, addr, get_mem_index(s)); break; case 2: gen_aa32_ld8s(tmp, addr, get_mem_index(s)); break; default: case 3: gen_aa32_ld16s(tmp, addr, get_mem_index(s)); break; } } else { /* store */ tmp = load_reg(s, rd); gen_aa32_st16(tmp, addr, get_mem_index(s)); tcg_temp_free_i32(tmp); } /* Perform base writeback before the loaded value to ensure correct behavior with overlapping index registers. ldrd with base writeback is undefined if the destination and index registers overlap. */ if (!(insn & (1 << 24))) { gen_add_datah_offset(s, insn, address_offset, addr); store_reg(s, rn, addr); } else if (insn & (1 << 21)) { if (address_offset) tcg_gen_addi_i32(addr, addr, address_offset); store_reg(s, rn, addr); } else { tcg_temp_free_i32(addr); } if (load) { /* Complete the load. */ store_reg(s, rd, tmp); } } break; case 0x4: case 0x5: goto do_ldst; case 0x6: case 0x7: if (insn & (1 << 4)) { ARCH(6); /* Armv6 Media instructions. */ rm = insn & 0xf; rn = (insn >> 16) & 0xf; rd = (insn >> 12) & 0xf; rs = (insn >> 8) & 0xf; switch ((insn >> 23) & 3) { case 0: /* Parallel add/subtract. */ op1 = (insn >> 20) & 7; tmp = load_reg(s, rn); tmp2 = load_reg(s, rm); sh = (insn >> 5) & 7; if ((op1 & 3) == 0 || sh == 5 || sh == 6) goto illegal_op; gen_arm_parallel_addsub(op1, sh, tmp, tmp2); tcg_temp_free_i32(tmp2); store_reg(s, rd, tmp); break; case 1: if ((insn & 0x00700020) == 0) { /* Halfword pack. */ tmp = load_reg(s, rn); tmp2 = load_reg(s, rm); shift = (insn >> 7) & 0x1f; if (insn & (1 << 6)) { /* pkhtb */ if (shift == 0) shift = 31; tcg_gen_sari_i32(tmp2, tmp2, shift); tcg_gen_andi_i32(tmp, tmp, 0xffff0000); tcg_gen_ext16u_i32(tmp2, tmp2); } else { /* pkhbt */ if (shift) tcg_gen_shli_i32(tmp2, tmp2, shift); tcg_gen_ext16u_i32(tmp, tmp); tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000); } tcg_gen_or_i32(tmp, tmp, tmp2); tcg_temp_free_i32(tmp2); store_reg(s, rd, tmp); } else if ((insn & 0x00200020) == 0x00200000) { /* [us]sat */ tmp = load_reg(s, rm); shift = (insn >> 7) & 0x1f; if (insn & (1 << 6)) { if (shift == 0) shift = 31; tcg_gen_sari_i32(tmp, tmp, shift); } else { tcg_gen_shli_i32(tmp, tmp, shift); } sh = (insn >> 16) & 0x1f; tmp2 = tcg_const_i32(sh); if (insn & (1 << 22)) gen_helper_usat(tmp, cpu_env, tmp, tmp2); else gen_helper_ssat(tmp, cpu_env, tmp, tmp2); tcg_temp_free_i32(tmp2); store_reg(s, rd, tmp); } else if ((insn & 0x00300fe0) == 0x00200f20) { /* [us]sat16 */ tmp = load_reg(s, rm); sh = (insn >> 16) & 0x1f; tmp2 = tcg_const_i32(sh); if (insn & (1 << 22)) gen_helper_usat16(tmp, cpu_env, tmp, tmp2); else gen_helper_ssat16(tmp, cpu_env, tmp, tmp2); tcg_temp_free_i32(tmp2); store_reg(s, rd, tmp); } else if ((insn & 0x00700fe0) == 0x00000fa0) { /* Select bytes. */ tmp = load_reg(s, rn); tmp2 = load_reg(s, rm); tmp3 = tcg_temp_new_i32(); tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE)); gen_helper_sel_flags(tmp, tmp3, tmp, tmp2); tcg_temp_free_i32(tmp3); tcg_temp_free_i32(tmp2); store_reg(s, rd, tmp); } else if ((insn & 0x000003e0) == 0x00000060) { tmp = load_reg(s, rm); shift = (insn >> 10) & 3; /* ??? In many cases it's not necessary to do a rotate, a shift is sufficient. */ if (shift != 0) tcg_gen_rotri_i32(tmp, tmp, shift * 8); op1 = (insn >> 20) & 7; switch (op1) { case 0: gen_sxtb16(tmp); break; case 2: gen_sxtb(tmp); break; case 3: gen_sxth(tmp); break; case 4: gen_uxtb16(tmp); break; case 6: gen_uxtb(tmp); break; case 7: gen_uxth(tmp); break; default: goto illegal_op; } if (rn != 15) { tmp2 = load_reg(s, rn); if ((op1 & 3) == 0) { gen_add16(tmp, tmp2); } else { tcg_gen_add_i32(tmp, tmp, tmp2); tcg_temp_free_i32(tmp2); } } store_reg(s, rd, tmp); } else if ((insn & 0x003f0f60) == 0x003f0f20) { /* rev */ tmp = load_reg(s, rm); if (insn & (1 << 22)) { if (insn & (1 << 7)) { gen_revsh(tmp); } else { ARCH(6T2); gen_helper_rbit(tmp, tmp); } } else { if (insn & (1 << 7)) gen_rev16(tmp); else tcg_gen_bswap32_i32(tmp, tmp); } store_reg(s, rd, tmp); } else { goto illegal_op; } break; case 2: /* Multiplies (Type 3). */ switch ((insn >> 20) & 0x7) { case 5: if (((insn >> 6) ^ (insn >> 7)) & 1) { /* op2 not 00x or 11x : UNDEF */ goto illegal_op; } /* Signed multiply most significant [accumulate]. (SMMUL, SMMLA, SMMLS) */ tmp = load_reg(s, rm); tmp2 = load_reg(s, rs); tmp64 = gen_muls_i64_i32(tmp, tmp2); if (rd != 15) { tmp = load_reg(s, rd); if (insn & (1 << 6)) { tmp64 = gen_subq_msw(tmp64, tmp); } else { tmp64 = gen_addq_msw(tmp64, tmp); } } if (insn & (1 << 5)) { tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u); } tcg_gen_shri_i64(tmp64, tmp64, 32); tmp = tcg_temp_new_i32(); tcg_gen_extrl_i64_i32(tmp, tmp64); tcg_temp_free_i64(tmp64); store_reg(s, rn, tmp); break; case 0: case 4: /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */ if (insn & (1 << 7)) { goto illegal_op; } tmp = load_reg(s, rm); tmp2 = load_reg(s, rs); if (insn & (1 << 5)) gen_swap_half(tmp2); gen_smul_dual(tmp, tmp2); if (insn & (1 << 22)) { /* smlald, smlsld */ TCGv_i64 tmp64_2; tmp64 = tcg_temp_new_i64(); tmp64_2 = tcg_temp_new_i64(); tcg_gen_ext_i32_i64(tmp64, tmp); tcg_gen_ext_i32_i64(tmp64_2, tmp2); tcg_temp_free_i32(tmp); tcg_temp_free_i32(tmp2); if (insn & (1 << 6)) { tcg_gen_sub_i64(tmp64, tmp64, tmp64_2); } else { tcg_gen_add_i64(tmp64, tmp64, tmp64_2); } tcg_temp_free_i64(tmp64_2); gen_addq(s, tmp64, rd, rn); gen_storeq_reg(s, rd, rn, tmp64); tcg_temp_free_i64(tmp64); } else { /* smuad, smusd, smlad, smlsd */ if (insn & (1 << 6)) { /* This subtraction cannot overflow. */ tcg_gen_sub_i32(tmp, tmp, tmp2); } else { /* This addition cannot overflow 32 bits; * however it may overflow considered as a * signed operation, in which case we must set * the Q flag. */ gen_helper_add_setq(tmp, cpu_env, tmp, tmp2); } tcg_temp_free_i32(tmp2); if (rd != 15) { tmp2 = load_reg(s, rd); gen_helper_add_setq(tmp, cpu_env, tmp, tmp2); tcg_temp_free_i32(tmp2); } store_reg(s, rn, tmp); } break; case 1: case 3: /* SDIV, UDIV */ if (!arm_dc_feature(s, ARM_FEATURE_ARM_DIV)) { goto illegal_op; } if (((insn >> 5) & 7) || (rd != 15)) { goto illegal_op; } tmp = load_reg(s, rm); tmp2 = load_reg(s, rs); if (insn & (1 << 21)) { gen_helper_udiv(tmp, tmp, tmp2); } else { gen_helper_sdiv(tmp, tmp, tmp2); } tcg_temp_free_i32(tmp2); store_reg(s, rn, tmp); break; default: goto illegal_op; } break; case 3: op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7); switch (op1) { case 0: /* Unsigned sum of absolute differences. */ ARCH(6); tmp = load_reg(s, rm); tmp2 = load_reg(s, rs); gen_helper_usad8(tmp, tmp, tmp2); tcg_temp_free_i32(tmp2); if (rd != 15) { tmp2 = load_reg(s, rd); tcg_gen_add_i32(tmp, tmp, tmp2); tcg_temp_free_i32(tmp2); } store_reg(s, rn, tmp); break; case 0x20: case 0x24: case 0x28: case 0x2c: /* Bitfield insert/clear. */ ARCH(6T2); shift = (insn >> 7) & 0x1f; i = (insn >> 16) & 0x1f; if (i < shift) { /* UNPREDICTABLE; we choose to UNDEF */ goto illegal_op; } i = i + 1 - shift; if (rm == 15) { tmp = tcg_temp_new_i32(); tcg_gen_movi_i32(tmp, 0); } else { tmp = load_reg(s, rm); } if (i != 32) { tmp2 = load_reg(s, rd); tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i); tcg_temp_free_i32(tmp2); } store_reg(s, rd, tmp); break; case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */ case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */ ARCH(6T2); tmp = load_reg(s, rm); shift = (insn >> 7) & 0x1f; i = ((insn >> 16) & 0x1f) + 1; if (shift + i > 32) goto illegal_op; if (i < 32) { if (op1 & 0x20) { gen_ubfx(tmp, shift, (1u << i) - 1); } else { gen_sbfx(tmp, shift, i); } } store_reg(s, rd, tmp); break; default: goto illegal_op; } break; } break; } do_ldst: /* Check for undefined extension instructions * per the ARM Bible IE: * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx */ sh = (0xf << 20) | (0xf << 4); if (op1 == 0x7 && ((insn & sh) == sh)) { goto illegal_op; } /* load/store byte/word */ rn = (insn >> 16) & 0xf; rd = (insn >> 12) & 0xf; tmp2 = load_reg(s, rn); if ((insn & 0x01200000) == 0x00200000) { /* ldrt/strt */ i = get_a32_user_mem_index(s); } else { i = get_mem_index(s); } if (insn & (1 << 24)) gen_add_data_offset(s, insn, tmp2); if (insn & (1 << 20)) { /* load */ tmp = tcg_temp_new_i32(); if (insn & (1 << 22)) { gen_aa32_ld8u(tmp, tmp2, i); } else { gen_aa32_ld32u(tmp, tmp2, i); } } else { /* store */ tmp = load_reg(s, rd); if (insn & (1 << 22)) { gen_aa32_st8(tmp, tmp2, i); } else { gen_aa32_st32(tmp, tmp2, i); } tcg_temp_free_i32(tmp); } if (!(insn & (1 << 24))) { gen_add_data_offset(s, insn, tmp2); store_reg(s, rn, tmp2); } else if (insn & (1 << 21)) { store_reg(s, rn, tmp2); } else { tcg_temp_free_i32(tmp2); } if (insn & (1 << 20)) { /* Complete the load. */ store_reg_from_load(s, rd, tmp); } break; case 0x08: case 0x09: { int j, n, loaded_base; bool exc_return = false; bool is_load = extract32(insn, 20, 1); bool user = false; TCGv_i32 loaded_var; /* load/store multiple words */ /* XXX: store correct base if write back */ if (insn & (1 << 22)) { /* LDM (user), LDM (exception return) and STM (user) */ if (IS_USER(s)) goto illegal_op; /* only usable in supervisor mode */ if (is_load && extract32(insn, 15, 1)) { exc_return = true; } else { user = true; } } rn = (insn >> 16) & 0xf; addr = load_reg(s, rn); /* compute total size */ loaded_base = 0; TCGV_UNUSED_I32(loaded_var); n = 0; for(i=0;i<16;i++) { if (insn & (1 << i)) n++; } /* XXX: test invalid n == 0 case ? */ if (insn & (1 << 23)) { if (insn & (1 << 24)) { /* pre increment */ tcg_gen_addi_i32(addr, addr, 4); } else { /* post increment */ } } else { if (insn & (1 << 24)) { /* pre decrement */ tcg_gen_addi_i32(addr, addr, -(n * 4)); } else { /* post decrement */ if (n != 1) tcg_gen_addi_i32(addr, addr, -((n - 1) * 4)); } } j = 0; for(i=0;i<16;i++) { if (insn & (1 << i)) { if (is_load) { /* load */ tmp = tcg_temp_new_i32(); gen_aa32_ld32u(tmp, addr, get_mem_index(s)); if (user) { tmp2 = tcg_const_i32(i); gen_helper_set_user_reg(cpu_env, tmp2, tmp); tcg_temp_free_i32(tmp2); tcg_temp_free_i32(tmp); } else if (i == rn) { loaded_var = tmp; loaded_base = 1; } else { store_reg_from_load(s, i, tmp); } } else { /* store */ if (i == 15) { /* special case: r15 = PC + 8 */ val = (long)s->pc + 4; tmp = tcg_temp_new_i32(); tcg_gen_movi_i32(tmp, val); } else if (user) { tmp = tcg_temp_new_i32(); tmp2 = tcg_const_i32(i); gen_helper_get_user_reg(tmp, cpu_env, tmp2); tcg_temp_free_i32(tmp2); } else { tmp = load_reg(s, i); } gen_aa32_st32(tmp, addr, get_mem_index(s)); tcg_temp_free_i32(tmp); } j++; /* no need to add after the last transfer */ if (j != n) tcg_gen_addi_i32(addr, addr, 4); } } if (insn & (1 << 21)) { /* write back */ if (insn & (1 << 23)) { if (insn & (1 << 24)) { /* pre increment */ } else { /* post increment */ tcg_gen_addi_i32(addr, addr, 4); } } else { if (insn & (1 << 24)) { /* pre decrement */ if (n != 1) tcg_gen_addi_i32(addr, addr, -((n - 1) * 4)); } else { /* post decrement */ tcg_gen_addi_i32(addr, addr, -(n * 4)); } } store_reg(s, rn, addr); } else { tcg_temp_free_i32(addr); } if (loaded_base) { store_reg(s, rn, loaded_var); } if (exc_return) { /* Restore CPSR from SPSR. */ tmp = load_cpu_field(spsr); gen_set_cpsr(tmp, CPSR_ERET_MASK); tcg_temp_free_i32(tmp); s->is_jmp = DISAS_JUMP; } } break; case 0xa: case 0xb: { int32_t offset; /* branch (and link) */ val = (int32_t)s->pc; if (insn & (1 << 24)) { tmp = tcg_temp_new_i32(); tcg_gen_movi_i32(tmp, val); store_reg(s, 14, tmp); } offset = sextract32(insn << 2, 0, 26); val += offset + 4; gen_jmp(s, val); } break; case 0xc: case 0xd: case 0xe: if (((insn >> 8) & 0xe) == 10) { /* VFP. */ if (disas_vfp_insn(s, insn)) { goto illegal_op; } } else if (disas_coproc_insn(s, insn)) { /* Coprocessor. */ goto illegal_op; } break; case 0xf: /* swi */ gen_set_pc_im(s, s->pc); s->svc_imm = extract32(insn, 0, 24); s->is_jmp = DISAS_SWI; break; default: illegal_op: gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), default_exception_el(s)); break; } } }
1,610
0
int64_t bdrv_getlength(BlockDriverState *bs) { BlockDriver *drv = bs->drv; if (!drv) return -ENOMEDIUM; if (bs->growable || bs->removable) { if (drv->bdrv_getlength) { return drv->bdrv_getlength(bs); } } return bs->total_sectors * BDRV_SECTOR_SIZE; }
1,611
0
static void test_after_failed_device_add(void) { QDict *response; QDict *error; qtest_start("-drive if=none,id=drive0"); /* Make device_add fail. If this leaks the virtio-blk-pci device then a * reference to drive0 will also be held (via qdev properties). */ response = qmp("{'execute': 'device_add'," " 'arguments': {" " 'driver': 'virtio-blk-pci'," " 'drive': 'drive0'" "}}"); g_assert(response); error = qdict_get_qdict(response, "error"); g_assert_cmpstr(qdict_get_try_str(error, "class"), ==, "GenericError"); QDECREF(response); /* Delete the drive */ drive_del(); /* Try to re-add the drive. This fails with duplicate IDs if a leaked * virtio-blk-pci exists that holds a reference to the old drive0. */ drive_add(); qtest_end(); }
1,612
0
static void xen_io_del(MemoryListener *listener, MemoryRegionSection *section) { XenIOState *state = container_of(listener, XenIOState, io_listener); xen_unmap_io_section(xen_xc, xen_domid, state->ioservid, section); memory_region_unref(section->mr); }
1,613
0
static void mipsnet_receive(void *opaque, const uint8_t *buf, size_t size) { MIPSnetState *s = opaque; #ifdef DEBUG_MIPSNET_RECEIVE printf("mipsnet: receiving len=%d\n", size); #endif if (!mipsnet_can_receive(opaque)) return; s->busy = 1; /* Just accept everything. */ /* Write packet data. */ memcpy(s->rx_buffer, buf, size); s->rx_count = size; s->rx_read = 0; /* Now we can signal we have received something. */ s->intctl |= MIPSNET_INTCTL_RXDONE; mipsnet_update_irq(s); }
1,614
0
int kvm_s390_set_mem_limit(KVMState *s, uint64_t new_limit, uint64_t *hw_limit) { int rc; struct kvm_device_attr attr = { .group = KVM_S390_VM_MEM_CTRL, .attr = KVM_S390_VM_MEM_LIMIT_SIZE, .addr = (uint64_t) &new_limit, }; if (!kvm_s390_supports_mem_limit(s)) { return 0; } rc = kvm_s390_query_mem_limit(s, hw_limit); if (rc) { return rc; } else if (*hw_limit < new_limit) { return -E2BIG; } return kvm_vm_ioctl(s, KVM_SET_DEVICE_ATTR, &attr); }
1,615
0
static int ra144_decode_frame(AVCodecContext * avctx, void *vdata, int *data_size, const uint8_t *buf, int buf_size) { static const uint8_t sizes[10] = {6, 5, 5, 4, 4, 3, 3, 3, 3, 2}; unsigned int refl_rms[4]; // RMS of the reflection coefficients uint16_t block_coefs[4][30]; // LPC coefficients of each sub-block unsigned int lpc_refl[10]; // LPC reflection coefficients of the frame int i, c; int16_t *data = vdata; unsigned int energy; RA144Context *ractx = avctx->priv_data; GetBitContext gb; if(buf_size < 20) { av_log(avctx, AV_LOG_ERROR, "Frame too small (%d bytes). Truncated file?\n", buf_size); *data_size = 0; return buf_size; } init_get_bits(&gb, buf, 20 * 8); for (i=0; i<10; i++) lpc_refl[i] = lpc_refl_cb[i][get_bits(&gb, sizes[i])]; eval_coefs(ractx->lpc_coef[0], lpc_refl); ractx->lpc_refl_rms[0] = rms(lpc_refl); energy = energy_tab[get_bits(&gb, 5)]; refl_rms[0] = interp(ractx, block_coefs[0], 0, 1, ractx->old_energy); refl_rms[1] = interp(ractx, block_coefs[1], 1, energy <= ractx->old_energy, t_sqrt(energy*ractx->old_energy) >> 12); refl_rms[2] = interp(ractx, block_coefs[2], 2, 0, energy); refl_rms[3] = rescale_rms(ractx->lpc_refl_rms[0], energy); int_to_int16(block_coefs[3], ractx->lpc_coef[0]); for (c=0; c<4; c++) { do_output_subblock(ractx, block_coefs[c], refl_rms[c], &gb); for (i=0; i<BLOCKSIZE; i++) *data++ = av_clip_int16(ractx->curr_sblock[i + 10] << 2); } ractx->old_energy = energy; ractx->lpc_refl_rms[1] = ractx->lpc_refl_rms[0]; FFSWAP(unsigned int *, ractx->lpc_coef[0], ractx->lpc_coef[1]); *data_size = 2*160; return 20; }
1,617
0
static void read_vec_element(DisasContext *s, TCGv_i64 tcg_dest, int srcidx, int element, TCGMemOp memop) { int vect_off = vec_reg_offset(srcidx, element, memop & MO_SIZE); switch (memop) { case MO_8: tcg_gen_ld8u_i64(tcg_dest, cpu_env, vect_off); break; case MO_16: tcg_gen_ld16u_i64(tcg_dest, cpu_env, vect_off); break; case MO_32: tcg_gen_ld32u_i64(tcg_dest, cpu_env, vect_off); break; case MO_8|MO_SIGN: tcg_gen_ld8s_i64(tcg_dest, cpu_env, vect_off); break; case MO_16|MO_SIGN: tcg_gen_ld16s_i64(tcg_dest, cpu_env, vect_off); break; case MO_32|MO_SIGN: tcg_gen_ld32s_i64(tcg_dest, cpu_env, vect_off); break; case MO_64: case MO_64|MO_SIGN: tcg_gen_ld_i64(tcg_dest, cpu_env, vect_off); break; default: g_assert_not_reached(); } }
1,618
0
int qemu_boot_set(const char *boot_order) { if (!boot_set_handler) { return -EINVAL; } return boot_set_handler(boot_set_opaque, boot_order); }
1,619
0
static ssize_t local_readlink(FsContext *ctx, const char *path, char *buf, size_t bufsz) { return readlink(rpath(ctx, path), buf, bufsz); }
1,620
0
void unix_start_incoming_migration(const char *path, Error **errp) { SocketAddressLegacy *saddr = unix_build_address(path); socket_start_incoming_migration(saddr, errp); }
1,623
0
void tcg_gen_atomic_cmpxchg_i32(TCGv_i32 retv, TCGv addr, TCGv_i32 cmpv, TCGv_i32 newv, TCGArg idx, TCGMemOp memop) { memop = tcg_canonicalize_memop(memop, 0, 0); if (!parallel_cpus) { TCGv_i32 t1 = tcg_temp_new_i32(); TCGv_i32 t2 = tcg_temp_new_i32(); tcg_gen_ext_i32(t2, cmpv, memop & MO_SIZE); tcg_gen_qemu_ld_i32(t1, addr, idx, memop & ~MO_SIGN); tcg_gen_movcond_i32(TCG_COND_EQ, t2, t1, t2, newv, t1); tcg_gen_qemu_st_i32(t2, addr, idx, memop); tcg_temp_free_i32(t2); if (memop & MO_SIGN) { tcg_gen_ext_i32(retv, t1, memop); } else { tcg_gen_mov_i32(retv, t1); } tcg_temp_free_i32(t1); } else { gen_atomic_cx_i32 gen; gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)]; tcg_debug_assert(gen != NULL); #ifdef CONFIG_SOFTMMU { TCGv_i32 oi = tcg_const_i32(make_memop_idx(memop & ~MO_SIGN, idx)); gen(retv, tcg_ctx.tcg_env, addr, cmpv, newv, oi); tcg_temp_free_i32(oi); } #else gen(retv, tcg_ctx.tcg_env, addr, cmpv, newv); #endif if (memop & MO_SIGN) { tcg_gen_ext_i32(retv, retv, memop); } } }
1,624
0
static void rtce_init(VIOsPAPRDevice *dev) { size_t size = (dev->rtce_window_size >> SPAPR_VIO_TCE_PAGE_SHIFT) * sizeof(VIOsPAPR_RTCE); if (size) { dev->rtce_table = g_malloc0(size); } }
1,625
0
BlockAIOCB *ide_issue_trim(BlockDriverState *bs, int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, BlockCompletionFunc *cb, void *opaque) { TrimAIOCB *iocb; iocb = qemu_aio_get(&trim_aiocb_info, bs, cb, opaque); iocb->bh = qemu_bh_new(ide_trim_bh_cb, iocb); iocb->ret = 0; iocb->qiov = qiov; iocb->i = -1; iocb->j = 0; ide_issue_trim_cb(iocb, 0); return &iocb->common; }
1,627
0
uint32_t lduw_phys(target_phys_addr_t addr) { return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN); }
1,629
0
static int dirac_decode_data_unit(AVCodecContext *avctx, const uint8_t *buf, int size) { DiracContext *s = avctx->priv_data; DiracFrame *pic = NULL; int ret, i, parse_code = buf[4]; unsigned tmp; if (size < DATA_UNIT_HEADER_SIZE) return -1; init_get_bits(&s->gb, &buf[13], 8*(size - DATA_UNIT_HEADER_SIZE)); if (parse_code == pc_seq_header) { if (s->seen_sequence_header) return 0; /* [DIRAC_STD] 10. Sequence header */ if (avpriv_dirac_parse_sequence_header(avctx, &s->gb, &s->source)) return -1; avcodec_get_chroma_sub_sample(avctx->pix_fmt, &s->chroma_x_shift, &s->chroma_y_shift); if (alloc_sequence_buffers(s)) return -1; s->seen_sequence_header = 1; } else if (parse_code == pc_eos) { /* [DIRAC_STD] End of Sequence */ free_sequence_buffers(s); s->seen_sequence_header = 0; } else if (parse_code == pc_aux_data) { if (buf[13] == 1) { /* encoder implementation/version */ int ver[3]; /* versions older than 1.0.8 don't store quant delta for subbands with only one codeblock */ if (sscanf(buf+14, "Schroedinger %d.%d.%d", ver, ver+1, ver+2) == 3) if (ver[0] == 1 && ver[1] == 0 && ver[2] <= 7) s->old_delta_quant = 1; } } else if (parse_code & 0x8) { /* picture data unit */ if (!s->seen_sequence_header) { av_log(avctx, AV_LOG_DEBUG, "Dropping frame without sequence header\n"); return -1; } /* find an unused frame */ for (i = 0; i < MAX_FRAMES; i++) if (s->all_frames[i].avframe->data[0] == NULL) pic = &s->all_frames[i]; if (!pic) { av_log(avctx, AV_LOG_ERROR, "framelist full\n"); return -1; } av_frame_unref(pic->avframe); /* [DIRAC_STD] Defined in 9.6.1 ... */ tmp = parse_code & 0x03; /* [DIRAC_STD] num_refs() */ if (tmp > 2) { av_log(avctx, AV_LOG_ERROR, "num_refs of 3\n"); return -1; } s->num_refs = tmp; s->is_arith = (parse_code & 0x48) == 0x08; /* [DIRAC_STD] using_ac() */ s->low_delay = (parse_code & 0x88) == 0x88; /* [DIRAC_STD] is_low_delay() */ pic->avframe->reference = (parse_code & 0x0C) == 0x0C; /* [DIRAC_STD] is_reference() */ pic->avframe->key_frame = s->num_refs == 0; /* [DIRAC_STD] is_intra() */ pic->avframe->pict_type = s->num_refs + 1; /* Definition of AVPictureType in avutil.h */ if ((ret = get_buffer_with_edge(avctx, pic->avframe, (parse_code & 0x0C) == 0x0C ? AV_GET_BUFFER_FLAG_REF : 0)) < 0) return ret; s->current_picture = pic; s->plane[0].stride = pic->avframe->linesize[0]; s->plane[1].stride = pic->avframe->linesize[1]; s->plane[2].stride = pic->avframe->linesize[2]; if (alloc_buffers(s, FFMAX3(FFABS(s->plane[0].stride), FFABS(s->plane[1].stride), FFABS(s->plane[2].stride))) < 0) return AVERROR(ENOMEM); /* [DIRAC_STD] 11.1 Picture parse. picture_parse() */ if (dirac_decode_picture_header(s)) return -1; /* [DIRAC_STD] 13.0 Transform data syntax. transform_data() */ if (dirac_decode_frame_internal(s)) return -1; } return 0; }
1,630
0
static int h264_parse(AVCodecParserContext *s, AVCodecContext *avctx, uint8_t **poutbuf, int *poutbuf_size, const uint8_t *buf, int buf_size) { H264Context *h = s->priv_data; ParseContext *pc = &h->s.parse_context; int next; if(s->flags & PARSER_FLAG_COMPLETE_FRAMES){ next= buf_size; }else{ next= find_frame_end(h, buf, buf_size); if (ff_combine_frame(pc, next, (uint8_t **)&buf, &buf_size) < 0) { *poutbuf = NULL; *poutbuf_size = 0; return buf_size; } if(next<0){ find_frame_end(h, &pc->buffer[pc->last_index + next], -next); //update state } } *poutbuf = (uint8_t *)buf; *poutbuf_size = buf_size; return next; }
1,631
0
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx) { const unsigned high_bit_depth = avctx->bits_per_raw_sample > 8; if (avctx->lowres==1) { c->idct_put = ff_jref_idct4_put; c->idct_add = ff_jref_idct4_add; c->idct = ff_j_rev_dct4; c->perm_type = FF_IDCT_PERM_NONE; } else if (avctx->lowres==2) { c->idct_put = ff_jref_idct2_put; c->idct_add = ff_jref_idct2_add; c->idct = ff_j_rev_dct2; c->perm_type = FF_IDCT_PERM_NONE; } else if (avctx->lowres==3) { c->idct_put = ff_jref_idct1_put; c->idct_add = ff_jref_idct1_add; c->idct = ff_j_rev_dct1; c->perm_type = FF_IDCT_PERM_NONE; } else { if (avctx->bits_per_raw_sample == 10) { c->idct_put = ff_simple_idct_put_10; c->idct_add = ff_simple_idct_add_10; c->idct = ff_simple_idct_10; c->perm_type = FF_IDCT_PERM_NONE; } else if (avctx->bits_per_raw_sample == 12) { c->idct_put = ff_simple_idct_put_12; c->idct_add = ff_simple_idct_add_12; c->idct = ff_simple_idct_12; c->perm_type = FF_IDCT_PERM_NONE; } else { if (avctx->idct_algo == FF_IDCT_INT) { c->idct_put = ff_jref_idct_put; c->idct_add = ff_jref_idct_add; c->idct = ff_j_rev_dct; c->perm_type = FF_IDCT_PERM_LIBMPEG2; } else if (avctx->idct_algo == FF_IDCT_FAAN) { c->idct_put = ff_faanidct_put; c->idct_add = ff_faanidct_add; c->idct = ff_faanidct; c->perm_type = FF_IDCT_PERM_NONE; } else { // accurate/default c->idct_put = ff_simple_idct_put_8; c->idct_add = ff_simple_idct_add_8; c->idct = ff_simple_idct_8; c->perm_type = FF_IDCT_PERM_NONE; } } } c->put_pixels_clamped = put_pixels_clamped_c; c->put_signed_pixels_clamped = put_signed_pixels_clamped_c; c->add_pixels_clamped = add_pixels_clamped_c; ff_put_pixels_clamped = c->put_pixels_clamped; ff_add_pixels_clamped = c->add_pixels_clamped; if (CONFIG_MPEG4_DECODER && avctx->idct_algo == FF_IDCT_XVID) ff_xvid_idct_init(c, avctx); if (ARCH_ALPHA) ff_idctdsp_init_alpha(c, avctx, high_bit_depth); if (ARCH_ARM) ff_idctdsp_init_arm(c, avctx, high_bit_depth); if (ARCH_PPC) ff_idctdsp_init_ppc(c, avctx, high_bit_depth); if (ARCH_X86) ff_idctdsp_init_x86(c, avctx, high_bit_depth); ff_init_scantable_permutation(c->idct_permutation, c->perm_type); }
1,632
1
do_command(GIOChannel *source, GIOCondition condition, gpointer data) { char *string; VCardEmulError error; static unsigned int default_reader_id; unsigned int reader_id; VReader *reader = NULL; GError *err = NULL; g_assert(condition & G_IO_IN); reader_id = default_reader_id; g_io_channel_read_line(source, &string, NULL, NULL, &err); if (err != NULL) { g_error("Error while reading command: %s", err->message); } if (string != NULL) { if (strncmp(string, "exit", 4) == 0) { /* remove all the readers */ VReaderList *list = vreader_get_reader_list(); VReaderListEntry *reader_entry; printf("Active Readers:\n"); for (reader_entry = vreader_list_get_first(list); reader_entry; reader_entry = vreader_list_get_next(reader_entry)) { VReader *reader = vreader_list_get_reader(reader_entry); vreader_id_t reader_id; reader_id = vreader_get_id(reader); if (reader_id == -1) { continue; } /* be nice and signal card removal first (qemu probably should * do this itself) */ if (vreader_card_is_present(reader) == VREADER_OK) { send_msg(VSC_CardRemove, reader_id, NULL, 0); } send_msg(VSC_ReaderRemove, reader_id, NULL, 0); } exit(0); } else if (strncmp(string, "insert", 6) == 0) { if (string[6] == ' ') { reader_id = get_id_from_string(&string[7], reader_id); } reader = vreader_get_reader_by_id(reader_id); if (reader != NULL) { error = vcard_emul_force_card_insert(reader); printf("insert %s, returned %d\n", vreader_get_name(reader), error); } else { printf("no reader by id %u found\n", reader_id); } } else if (strncmp(string, "remove", 6) == 0) { if (string[6] == ' ') { reader_id = get_id_from_string(&string[7], reader_id); } reader = vreader_get_reader_by_id(reader_id); if (reader != NULL) { error = vcard_emul_force_card_remove(reader); printf("remove %s, returned %d\n", vreader_get_name(reader), error); } else { printf("no reader by id %u found\n", reader_id); } } else if (strncmp(string, "select", 6) == 0) { if (string[6] == ' ') { reader_id = get_id_from_string(&string[7], VSCARD_UNDEFINED_READER_ID); } if (reader_id != VSCARD_UNDEFINED_READER_ID) { reader = vreader_get_reader_by_id(reader_id); } if (reader) { printf("Selecting reader %u, %s\n", reader_id, vreader_get_name(reader)); default_reader_id = reader_id; } else { printf("Reader with id %u not found\n", reader_id); } } else if (strncmp(string, "debug", 5) == 0) { if (string[5] == ' ') { verbose = get_id_from_string(&string[6], 0); } printf("debug level = %d\n", verbose); } else if (strncmp(string, "list", 4) == 0) { VReaderList *list = vreader_get_reader_list(); VReaderListEntry *reader_entry; printf("Active Readers:\n"); for (reader_entry = vreader_list_get_first(list); reader_entry; reader_entry = vreader_list_get_next(reader_entry)) { VReader *reader = vreader_list_get_reader(reader_entry); vreader_id_t reader_id; reader_id = vreader_get_id(reader); if (reader_id == -1) { continue; } printf("%3u %s %s\n", reader_id, vreader_card_is_present(reader) == VREADER_OK ? "CARD_PRESENT" : " ", vreader_get_name(reader)); } printf("Inactive Readers:\n"); for (reader_entry = vreader_list_get_first(list); reader_entry; reader_entry = vreader_list_get_next(reader_entry)) { VReader *reader = vreader_list_get_reader(reader_entry); vreader_id_t reader_id; reader_id = vreader_get_id(reader); if (reader_id != -1) { continue; } printf("INA %s %s\n", vreader_card_is_present(reader) == VREADER_OK ? "CARD_PRESENT" : " ", vreader_get_name(reader)); } } else if (*string != 0) { printf("valid commands:\n"); printf("insert [reader_id]\n"); printf("remove [reader_id]\n"); printf("select reader_id\n"); printf("list\n"); printf("debug [level]\n"); printf("exit\n"); } } vreader_free(reader); printf("> "); fflush(stdout); return TRUE; }
1,633
1
envlist_unsetenv(envlist_t *envlist, const char *env) { struct envlist_entry *entry; size_t envname_len; if ((envlist == NULL) || (env == NULL)) return (EINVAL); /* env is not allowed to contain '=' */ if (strchr(env, '=') != NULL) return (EINVAL); /* * Find out the requested entry and remove * it from the list. */ envname_len = strlen(env); for (entry = envlist->el_entries.lh_first; entry != NULL; entry = entry->ev_link.le_next) { if (strncmp(entry->ev_var, env, envname_len) == 0) break; } if (entry != NULL) { QLIST_REMOVE(entry, ev_link); free((char *)entry->ev_var); free(entry); envlist->el_count--; } return (0); }
1,634
1
static av_cold int mpc8_decode_init(AVCodecContext * avctx) { int i; MPCContext *c = avctx->priv_data; GetBitContext gb; static int vlc_initialized = 0; int channels; static VLC_TYPE band_table[542][2]; static VLC_TYPE q1_table[520][2]; static VLC_TYPE q9up_table[524][2]; static VLC_TYPE scfi0_table[1 << MPC8_SCFI0_BITS][2]; static VLC_TYPE scfi1_table[1 << MPC8_SCFI1_BITS][2]; static VLC_TYPE dscf0_table[560][2]; static VLC_TYPE dscf1_table[598][2]; static VLC_TYPE q3_0_table[512][2]; static VLC_TYPE q3_1_table[516][2]; static VLC_TYPE codes_table[5708][2]; if(avctx->extradata_size < 2){ av_log(avctx, AV_LOG_ERROR, "Too small extradata size (%i)!\n", avctx->extradata_size); return -1; } memset(c->oldDSCF, 0, sizeof(c->oldDSCF)); av_lfg_init(&c->rnd, 0xDEADBEEF); ff_dsputil_init(&c->dsp, avctx); ff_mpadsp_init(&c->mpadsp); ff_mpc_init(); init_get_bits(&gb, avctx->extradata, 16); skip_bits(&gb, 3);//sample rate c->maxbands = get_bits(&gb, 5) + 1; if (c->maxbands >= BANDS) { av_log(avctx,AV_LOG_ERROR, "maxbands %d too high\n", c->maxbands); return AVERROR_INVALIDDATA; } channels = get_bits(&gb, 4) + 1; if (channels > 2) { av_log_missing_feature(avctx, "Multichannel MPC SV8", 1); return -1; } c->MSS = get_bits1(&gb); c->frames = 1 << (get_bits(&gb, 3) * 2); avctx->sample_fmt = AV_SAMPLE_FMT_S16; avctx->channel_layout = (avctx->channels==2) ? AV_CH_LAYOUT_STEREO : AV_CH_LAYOUT_MONO; if(vlc_initialized) return 0; av_log(avctx, AV_LOG_DEBUG, "Initing VLC\n"); band_vlc.table = band_table; band_vlc.table_allocated = 542; init_vlc(&band_vlc, MPC8_BANDS_BITS, MPC8_BANDS_SIZE, mpc8_bands_bits, 1, 1, mpc8_bands_codes, 1, 1, INIT_VLC_USE_NEW_STATIC); q1_vlc.table = q1_table; q1_vlc.table_allocated = 520; init_vlc(&q1_vlc, MPC8_Q1_BITS, MPC8_Q1_SIZE, mpc8_q1_bits, 1, 1, mpc8_q1_codes, 1, 1, INIT_VLC_USE_NEW_STATIC); q9up_vlc.table = q9up_table; q9up_vlc.table_allocated = 524; init_vlc(&q9up_vlc, MPC8_Q9UP_BITS, MPC8_Q9UP_SIZE, mpc8_q9up_bits, 1, 1, mpc8_q9up_codes, 1, 1, INIT_VLC_USE_NEW_STATIC); scfi_vlc[0].table = scfi0_table; scfi_vlc[0].table_allocated = 1 << MPC8_SCFI0_BITS; init_vlc(&scfi_vlc[0], MPC8_SCFI0_BITS, MPC8_SCFI0_SIZE, mpc8_scfi0_bits, 1, 1, mpc8_scfi0_codes, 1, 1, INIT_VLC_USE_NEW_STATIC); scfi_vlc[1].table = scfi1_table; scfi_vlc[1].table_allocated = 1 << MPC8_SCFI1_BITS; init_vlc(&scfi_vlc[1], MPC8_SCFI1_BITS, MPC8_SCFI1_SIZE, mpc8_scfi1_bits, 1, 1, mpc8_scfi1_codes, 1, 1, INIT_VLC_USE_NEW_STATIC); dscf_vlc[0].table = dscf0_table; dscf_vlc[0].table_allocated = 560; init_vlc(&dscf_vlc[0], MPC8_DSCF0_BITS, MPC8_DSCF0_SIZE, mpc8_dscf0_bits, 1, 1, mpc8_dscf0_codes, 1, 1, INIT_VLC_USE_NEW_STATIC); dscf_vlc[1].table = dscf1_table; dscf_vlc[1].table_allocated = 598; init_vlc(&dscf_vlc[1], MPC8_DSCF1_BITS, MPC8_DSCF1_SIZE, mpc8_dscf1_bits, 1, 1, mpc8_dscf1_codes, 1, 1, INIT_VLC_USE_NEW_STATIC); q3_vlc[0].table = q3_0_table; q3_vlc[0].table_allocated = 512; ff_init_vlc_sparse(&q3_vlc[0], MPC8_Q3_BITS, MPC8_Q3_SIZE, mpc8_q3_bits, 1, 1, mpc8_q3_codes, 1, 1, mpc8_q3_syms, 1, 1, INIT_VLC_USE_NEW_STATIC); q3_vlc[1].table = q3_1_table; q3_vlc[1].table_allocated = 516; ff_init_vlc_sparse(&q3_vlc[1], MPC8_Q4_BITS, MPC8_Q4_SIZE, mpc8_q4_bits, 1, 1, mpc8_q4_codes, 1, 1, mpc8_q4_syms, 1, 1, INIT_VLC_USE_NEW_STATIC); for(i = 0; i < 2; i++){ res_vlc[i].table = &codes_table[vlc_offsets[0+i]]; res_vlc[i].table_allocated = vlc_offsets[1+i] - vlc_offsets[0+i]; init_vlc(&res_vlc[i], MPC8_RES_BITS, MPC8_RES_SIZE, &mpc8_res_bits[i], 1, 1, &mpc8_res_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC); q2_vlc[i].table = &codes_table[vlc_offsets[2+i]]; q2_vlc[i].table_allocated = vlc_offsets[3+i] - vlc_offsets[2+i]; init_vlc(&q2_vlc[i], MPC8_Q2_BITS, MPC8_Q2_SIZE, &mpc8_q2_bits[i], 1, 1, &mpc8_q2_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC); quant_vlc[0][i].table = &codes_table[vlc_offsets[4+i]]; quant_vlc[0][i].table_allocated = vlc_offsets[5+i] - vlc_offsets[4+i]; init_vlc(&quant_vlc[0][i], MPC8_Q5_BITS, MPC8_Q5_SIZE, &mpc8_q5_bits[i], 1, 1, &mpc8_q5_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC); quant_vlc[1][i].table = &codes_table[vlc_offsets[6+i]]; quant_vlc[1][i].table_allocated = vlc_offsets[7+i] - vlc_offsets[6+i]; init_vlc(&quant_vlc[1][i], MPC8_Q6_BITS, MPC8_Q6_SIZE, &mpc8_q6_bits[i], 1, 1, &mpc8_q6_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC); quant_vlc[2][i].table = &codes_table[vlc_offsets[8+i]]; quant_vlc[2][i].table_allocated = vlc_offsets[9+i] - vlc_offsets[8+i]; init_vlc(&quant_vlc[2][i], MPC8_Q7_BITS, MPC8_Q7_SIZE, &mpc8_q7_bits[i], 1, 1, &mpc8_q7_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC); quant_vlc[3][i].table = &codes_table[vlc_offsets[10+i]]; quant_vlc[3][i].table_allocated = vlc_offsets[11+i] - vlc_offsets[10+i]; init_vlc(&quant_vlc[3][i], MPC8_Q8_BITS, MPC8_Q8_SIZE, &mpc8_q8_bits[i], 1, 1, &mpc8_q8_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC); } vlc_initialized = 1; avcodec_get_frame_defaults(&c->frame); avctx->coded_frame = &c->frame; return 0; }
1,635
1
static int decode_pivot(MSS1Context *ctx, ArithCoder *acoder, int base) { int val, inv; inv = arith_get_model_sym(acoder, &ctx->edge_mode); val = arith_get_model_sym(acoder, &ctx->pivot) + 1; if (val > 2) { if ((base + 1) / 2 - 2 <= 0) { ctx->corrupted = 1; return 0; } val = arith_get_number(acoder, (base + 1) / 2 - 2) + 3; } if (val == base) { ctx->corrupted = 1; return 0; } return inv ? base - val : val; }
1,636
1
static void ccw_init(MachineState *machine) { int ret; VirtualCssBus *css_bus; s390_sclp_init(); s390_memory_init(machine->ram_size); /* init CPUs (incl. CPU model) early so s390_has_feature() works */ s390_init_cpus(machine); s390_flic_init(); /* get a BUS */ css_bus = virtual_css_bus_init(); s390_init_ipl_dev(machine->kernel_filename, machine->kernel_cmdline, machine->initrd_filename, "s390-ccw.img", "s390-netboot.img", true); if (s390_has_feat(S390_FEAT_ZPCI)) { DeviceState *dev = qdev_create(NULL, TYPE_S390_PCI_HOST_BRIDGE); object_property_add_child(qdev_get_machine(), TYPE_S390_PCI_HOST_BRIDGE, OBJECT(dev), NULL); qdev_init_nofail(dev); } /* register hypercalls */ virtio_ccw_register_hcalls(); s390_enable_css_support(s390_cpu_addr2state(0)); /* * Non mcss-e enabled guests only see the devices from the default * css, which is determined by the value of the squash_mcss property. * Note: we must not squash non virtual devices to css 0xFE. */ if (css_bus->squash_mcss) { ret = css_create_css_image(0, true); } else { ret = css_create_css_image(VIRTUAL_CSSID, true); } assert(ret == 0); /* Create VirtIO network adapters */ s390_create_virtio_net(BUS(css_bus), "virtio-net-ccw"); /* Register savevm handler for guest TOD clock */ register_savevm_live(NULL, "todclock", 0, 1, &savevm_gtod, NULL); }
1,637
1
int kvm_uncoalesce_mmio_region(target_phys_addr_t start, ram_addr_t size) { int ret = -ENOSYS; KVMState *s = kvm_state; if (s->coalesced_mmio) { struct kvm_coalesced_mmio_zone zone; zone.addr = start; zone.size = size; ret = kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone); } return ret; }
1,638
1
static int wma_decode_init(AVCodecContext * avctx) { WMADecodeContext *s = avctx->priv_data; int i, flags1, flags2; float *window; uint8_t *extradata; float bps1, high_freq; volatile float bps; int sample_rate1; int coef_vlc_table; s->sample_rate = avctx->sample_rate; s->nb_channels = avctx->channels; s->bit_rate = avctx->bit_rate; s->block_align = avctx->block_align; if (avctx->codec->id == CODEC_ID_WMAV1) { s->version = 1; } else { s->version = 2; } /* extract flag infos */ flags1 = 0; flags2 = 0; extradata = avctx->extradata; if (s->version == 1 && avctx->extradata_size >= 4) { flags1 = extradata[0] | (extradata[1] << 8); flags2 = extradata[2] | (extradata[3] << 8); } else if (s->version == 2 && avctx->extradata_size >= 6) { flags1 = extradata[0] | (extradata[1] << 8) | (extradata[2] << 16) | (extradata[3] << 24); flags2 = extradata[4] | (extradata[5] << 8); } s->use_exp_vlc = flags2 & 0x0001; s->use_bit_reservoir = flags2 & 0x0002; s->use_variable_block_len = flags2 & 0x0004; /* compute MDCT block size */ if (s->sample_rate <= 16000) { s->frame_len_bits = 9; } else if (s->sample_rate <= 22050 || (s->sample_rate <= 32000 && s->version == 1)) { s->frame_len_bits = 10; } else { s->frame_len_bits = 11; } s->frame_len = 1 << s->frame_len_bits; if (s->use_variable_block_len) { int nb_max, nb; nb = ((flags2 >> 3) & 3) + 1; if ((s->bit_rate / s->nb_channels) >= 32000) nb += 2; nb_max = s->frame_len_bits - BLOCK_MIN_BITS; if (nb > nb_max) nb = nb_max; s->nb_block_sizes = nb + 1; } else { s->nb_block_sizes = 1; } /* init rate dependant parameters */ s->use_noise_coding = 1; high_freq = s->sample_rate * 0.5; /* if version 2, then the rates are normalized */ sample_rate1 = s->sample_rate; if (s->version == 2) { if (sample_rate1 >= 44100) sample_rate1 = 44100; else if (sample_rate1 >= 22050) sample_rate1 = 22050; else if (sample_rate1 >= 16000) sample_rate1 = 16000; else if (sample_rate1 >= 11025) sample_rate1 = 11025; else if (sample_rate1 >= 8000) sample_rate1 = 8000; } bps = (float)s->bit_rate / (float)(s->nb_channels * s->sample_rate); s->byte_offset_bits = av_log2((int)(bps * s->frame_len / 8.0)) + 2; /* compute high frequency value and choose if noise coding should be activated */ bps1 = bps; if (s->nb_channels == 2) bps1 = bps * 1.6; if (sample_rate1 == 44100) { if (bps1 >= 0.61) s->use_noise_coding = 0; else high_freq = high_freq * 0.4; } else if (sample_rate1 == 22050) { if (bps1 >= 1.16) s->use_noise_coding = 0; else if (bps1 >= 0.72) high_freq = high_freq * 0.7; else high_freq = high_freq * 0.6; } else if (sample_rate1 == 16000) { if (bps > 0.5) high_freq = high_freq * 0.5; else high_freq = high_freq * 0.3; } else if (sample_rate1 == 11025) { high_freq = high_freq * 0.7; } else if (sample_rate1 == 8000) { if (bps <= 0.625) { high_freq = high_freq * 0.5; } else if (bps > 0.75) { s->use_noise_coding = 0; } else { high_freq = high_freq * 0.65; } } else { if (bps >= 0.8) { high_freq = high_freq * 0.75; } else if (bps >= 0.6) { high_freq = high_freq * 0.6; } else { high_freq = high_freq * 0.5; } } dprintf("flags1=0x%x flags2=0x%x\n", flags1, flags2); dprintf("version=%d channels=%d sample_rate=%d bitrate=%d block_align=%d\n", s->version, s->nb_channels, s->sample_rate, s->bit_rate, s->block_align); dprintf("bps=%f bps1=%f high_freq=%f bitoffset=%d\n", bps, bps1, high_freq, s->byte_offset_bits); dprintf("use_noise_coding=%d use_exp_vlc=%d nb_block_sizes=%d\n", s->use_noise_coding, s->use_exp_vlc, s->nb_block_sizes); /* compute the scale factor band sizes for each MDCT block size */ { int a, b, pos, lpos, k, block_len, i, j, n; const uint8_t *table; if (s->version == 1) { s->coefs_start = 3; } else { s->coefs_start = 0; } for(k = 0; k < s->nb_block_sizes; k++) { block_len = s->frame_len >> k; if (s->version == 1) { lpos = 0; for(i=0;i<25;i++) { a = wma_critical_freqs[i]; b = s->sample_rate; pos = ((block_len * 2 * a) + (b >> 1)) / b; if (pos > block_len) pos = block_len; s->exponent_bands[0][i] = pos - lpos; if (pos >= block_len) { i++; break; } lpos = pos; } s->exponent_sizes[0] = i; } else { /* hardcoded tables */ table = NULL; a = s->frame_len_bits - BLOCK_MIN_BITS - k; if (a < 3) { if (s->sample_rate >= 44100) table = exponent_band_44100[a]; else if (s->sample_rate >= 32000) table = exponent_band_32000[a]; else if (s->sample_rate >= 22050) table = exponent_band_22050[a]; } if (table) { n = *table++; for(i=0;i<n;i++) s->exponent_bands[k][i] = table[i]; s->exponent_sizes[k] = n; } else { j = 0; lpos = 0; for(i=0;i<25;i++) { a = wma_critical_freqs[i]; b = s->sample_rate; pos = ((block_len * 2 * a) + (b << 1)) / (4 * b); pos <<= 2; if (pos > block_len) pos = block_len; if (pos > lpos) s->exponent_bands[k][j++] = pos - lpos; if (pos >= block_len) break; lpos = pos; } s->exponent_sizes[k] = j; } } /* max number of coefs */ s->coefs_end[k] = (s->frame_len - ((s->frame_len * 9) / 100)) >> k; /* high freq computation */ s->high_band_start[k] = (int)((block_len * 2 * high_freq) / s->sample_rate + 0.5); n = s->exponent_sizes[k]; j = 0; pos = 0; for(i=0;i<n;i++) { int start, end; start = pos; pos += s->exponent_bands[k][i]; end = pos; if (start < s->high_band_start[k]) start = s->high_band_start[k]; if (end > s->coefs_end[k]) end = s->coefs_end[k]; if (end > start) s->exponent_high_bands[k][j++] = end - start; } s->exponent_high_sizes[k] = j; #if 0 tprintf("%5d: coefs_end=%d high_band_start=%d nb_high_bands=%d: ", s->frame_len >> k, s->coefs_end[k], s->high_band_start[k], s->exponent_high_sizes[k]); for(j=0;j<s->exponent_high_sizes[k];j++) tprintf(" %d", s->exponent_high_bands[k][j]); tprintf("\n"); #endif } } #ifdef TRACE { int i, j; for(i = 0; i < s->nb_block_sizes; i++) { tprintf("%5d: n=%2d:", s->frame_len >> i, s->exponent_sizes[i]); for(j=0;j<s->exponent_sizes[i];j++) tprintf(" %d", s->exponent_bands[i][j]); tprintf("\n"); } } #endif /* init MDCT */ for(i = 0; i < s->nb_block_sizes; i++) ff_mdct_init(&s->mdct_ctx[i], s->frame_len_bits - i + 1, 1); /* init MDCT windows : simple sinus window */ for(i = 0; i < s->nb_block_sizes; i++) { int n, j; float alpha; n = 1 << (s->frame_len_bits - i); window = av_malloc(sizeof(float) * n); alpha = M_PI / (2.0 * n); for(j=0;j<n;j++) { window[n - j - 1] = sin((j + 0.5) * alpha); } s->windows[i] = window; } s->reset_block_lengths = 1; if (s->use_noise_coding) { /* init the noise generator */ if (s->use_exp_vlc) s->noise_mult = 0.02; else s->noise_mult = 0.04; #ifdef TRACE for(i=0;i<NOISE_TAB_SIZE;i++) s->noise_table[i] = 1.0 * s->noise_mult; #else { unsigned int seed; float norm; seed = 1; norm = (1.0 / (float)(1LL << 31)) * sqrt(3) * s->noise_mult; for(i=0;i<NOISE_TAB_SIZE;i++) { seed = seed * 314159 + 1; s->noise_table[i] = (float)((int)seed) * norm; } } #endif init_vlc(&s->hgain_vlc, 9, sizeof(hgain_huffbits), hgain_huffbits, 1, 1, hgain_huffcodes, 2, 2); } if (s->use_exp_vlc) { init_vlc(&s->exp_vlc, 9, sizeof(scale_huffbits), scale_huffbits, 1, 1, scale_huffcodes, 4, 4); } else { wma_lsp_to_curve_init(s, s->frame_len); } /* choose the VLC tables for the coefficients */ coef_vlc_table = 2; if (s->sample_rate >= 32000) { if (bps1 < 0.72) coef_vlc_table = 0; else if (bps1 < 1.16) coef_vlc_table = 1; } init_coef_vlc(&s->coef_vlc[0], &s->run_table[0], &s->level_table[0], &coef_vlcs[coef_vlc_table * 2]); init_coef_vlc(&s->coef_vlc[1], &s->run_table[1], &s->level_table[1], &coef_vlcs[coef_vlc_table * 2 + 1]); return 0; }
1,639
1
void virtio_net_exit(VirtIODevice *vdev) { VirtIONet *n = DO_UPCAST(VirtIONet, vdev, vdev); qemu_del_vm_change_state_handler(n->vmstate); if (n->vhost_started) { vhost_net_stop(tap_get_vhost_net(n->nic->nc.peer), vdev); } qemu_purge_queued_packets(&n->nic->nc); unregister_savevm(n->qdev, "virtio-net", n); qemu_free(n->mac_table.macs); qemu_free(n->vlans); qemu_del_timer(n->tx_timer); qemu_free_timer(n->tx_timer); virtio_cleanup(&n->vdev); qemu_del_vlan_client(&n->nic->nc); }
1,640
1
static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr) { /* Do the "take the exception" parts of exception entry, * but not the pushing of state to the stack. This is * similar to the pseudocode ExceptionTaken() function. */ CPUARMState *env = &cpu->env; uint32_t addr; armv7m_nvic_acknowledge_irq(env->nvic); write_v7m_control_spsel(env, 0); arm_clear_exclusive(env); /* Clear IT bits */ env->condexec_bits = 0; env->regs[14] = lr; addr = arm_v7m_load_vector(cpu); env->regs[15] = addr & 0xfffffffe; env->thumb = addr & 1; }
1,641
1
static int omap_intc_init(SysBusDevice *sbd) { DeviceState *dev = DEVICE(sbd); struct omap_intr_handler_s *s = OMAP_INTC(dev); if (!s->iclk) { hw_error("omap-intc: clk not connected\n"); } s->nbanks = 1; sysbus_init_irq(sbd, &s->parent_intr[0]); sysbus_init_irq(sbd, &s->parent_intr[1]); qdev_init_gpio_in(dev, omap_set_intr, s->nbanks * 32); memory_region_init_io(&s->mmio, OBJECT(s), &omap_inth_mem_ops, s, "omap-intc", s->size); sysbus_init_mmio(sbd, &s->mmio); return 0; }
1,643
1
static void mov_read_chapters(AVFormatContext *s) { MOVContext *mov = s->priv_data; AVStream *st = NULL; MOVStreamContext *sc; int64_t cur_pos; int i; for (i = 0; i < s->nb_streams; i++) if (s->streams[i]->id == mov->chapter_track) { st = s->streams[i]; break; } if (!st) { av_log(s, AV_LOG_ERROR, "Referenced QT chapter track not found\n"); return; } st->discard = AVDISCARD_ALL; sc = st->priv_data; cur_pos = avio_tell(sc->pb); for (i = 0; i < st->nb_index_entries; i++) { AVIndexEntry *sample = &st->index_entries[i]; int64_t end = i+1 < st->nb_index_entries ? st->index_entries[i+1].timestamp : st->duration; uint8_t *title; uint16_t ch; int len, title_len; if (avio_seek(sc->pb, sample->pos, SEEK_SET) != sample->pos) { av_log(s, AV_LOG_ERROR, "Chapter %d not found in file\n", i); goto finish; } // the first two bytes are the length of the title len = avio_rb16(sc->pb); if (len > sample->size-2) continue; title_len = 2*len + 1; if (!(title = av_mallocz(title_len))) goto finish; // The samples could theoretically be in any encoding if there's an encd // atom following, but in practice are only utf-8 or utf-16, distinguished // instead by the presence of a BOM ch = avio_rb16(sc->pb); if (ch == 0xfeff) avio_get_str16be(sc->pb, len, title, title_len); else if (ch == 0xfffe) avio_get_str16le(sc->pb, len, title, title_len); else { AV_WB16(title, ch); avio_get_str(sc->pb, len - 2, title + 2, title_len - 2); } ff_new_chapter(s, i, st->time_base, sample->timestamp, end, title); av_freep(&title); } finish: avio_seek(sc->pb, cur_pos, SEEK_SET); }
1,644
1
int ff_cmap_read_palette(AVCodecContext *avctx, uint32_t *pal) { int count, i; if (avctx->bits_per_coded_sample > 8) { av_log(avctx, AV_LOG_ERROR, "bit_per_coded_sample > 8 not supported\n"); return AVERROR_INVALIDDATA; } count = 1 << avctx->bits_per_coded_sample; if (avctx->extradata_size < count * 3) { av_log(avctx, AV_LOG_ERROR, "palette data underflow\n"); return AVERROR_INVALIDDATA; } for (i=0; i < count; i++) { pal[i] = 0xFF000000 | AV_RB24( avctx->extradata + i*3 ); } return 0; }
1,645
1
static void isabus_bridge_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); dc->fw_name = "isa"; }
1,646
1
static void disable_device(PIIX4PMState *s, int slot) { s->ar.gpe.sts[0] |= PIIX4_PCI_HOTPLUG_STATUS; s->pci0_status.down |= (1 << slot); }
1,647
1
void page_set_flags(target_ulong start, target_ulong end, int flags) { target_ulong addr, len; /* This function should never be called with addresses outside the guest address space. If this assert fires, it probably indicates a missing call to h2g_valid. */ #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS assert(end < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS)); #endif assert(start < end); assert_memory_lock(); start = start & TARGET_PAGE_MASK; end = TARGET_PAGE_ALIGN(end); if (flags & PAGE_WRITE) { flags |= PAGE_WRITE_ORG; } for (addr = start, len = end - start; len != 0; len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) { PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1); /* If the write protection bit is set, then we invalidate the code inside. */ if (!(p->flags & PAGE_WRITE) && (flags & PAGE_WRITE) && p->first_tb) { tb_invalidate_phys_page(addr, 0); } p->flags = flags; } }
1,648
1
static int print_block_option_help(const char *filename, const char *fmt) { BlockDriver *drv, *proto_drv; QEMUOptionParameter *create_options = NULL; /* Find driver and parse its options */ drv = bdrv_find_format(fmt); if (!drv) { error_report("Unknown file format '%s'", fmt); return 1; } create_options = append_option_parameters(create_options, drv->create_options); if (filename) { proto_drv = bdrv_find_protocol(filename, true); if (!proto_drv) { error_report("Unknown protocol '%s'", filename); return 1; } create_options = append_option_parameters(create_options, proto_drv->create_options); } print_option_help(create_options); return 0; }
1,649
1
av_cold int ffv1_init_slice_contexts(FFV1Context *f) { int i; f->slice_count = f->num_h_slices * f->num_v_slices; av_assert0(f->slice_count > 0); for (i = 0; i < f->slice_count; i++) { FFV1Context *fs = av_mallocz(sizeof(*fs)); int sx = i % f->num_h_slices; int sy = i / f->num_h_slices; int sxs = f->avctx->width * sx / f->num_h_slices; int sxe = f->avctx->width * (sx + 1) / f->num_h_slices; int sys = f->avctx->height * sy / f->num_v_slices; int sye = f->avctx->height * (sy + 1) / f->num_v_slices; f->slice_context[i] = fs; memcpy(fs, f, sizeof(*fs)); memset(fs->rc_stat2, 0, sizeof(fs->rc_stat2)); fs->slice_width = sxe - sxs; fs->slice_height = sye - sys; fs->slice_x = sxs; fs->slice_y = sys; fs->sample_buffer = av_malloc(3 * MAX_PLANES * (fs->width + 6) * sizeof(*fs->sample_buffer)); if (!fs->sample_buffer) } return 0; }
1,650
1
void ff_rtp_send_aac(AVFormatContext *s1, const uint8_t *buff, int size) { RTPMuxContext *s = s1->priv_data; int len, max_packet_size; uint8_t *p; const int max_frames_per_packet = s->max_frames_per_packet ? s->max_frames_per_packet : 5; const int max_au_headers_size = 2 + 2 * max_frames_per_packet; /* skip ADTS header, if present */ if ((s1->streams[0]->codec->extradata_size) == 0) { size -= 7; buff += 7; } max_packet_size = s->max_payload_size - max_au_headers_size; /* test if the packet must be sent */ len = (s->buf_ptr - s->buf); if ((s->num_frames == max_frames_per_packet) || (len && (len + size) > s->max_payload_size)) { int au_size = s->num_frames * 2; p = s->buf + max_au_headers_size - au_size - 2; if (p != s->buf) { memmove(p + 2, s->buf + 2, au_size); } /* Write the AU header size */ p[0] = ((au_size * 8) & 0xFF) >> 8; p[1] = (au_size * 8) & 0xFF; ff_rtp_send_data(s1, p, s->buf_ptr - p, 1); s->num_frames = 0; } if (s->num_frames == 0) { s->buf_ptr = s->buf + max_au_headers_size; s->timestamp = s->cur_timestamp; } if (size <= max_packet_size) { p = s->buf + s->num_frames++ * 2 + 2; *p++ = size >> 5; *p = (size & 0x1F) << 3; memcpy(s->buf_ptr, buff, size); s->buf_ptr += size; } else { int au_size = size; max_packet_size = s->max_payload_size - 4; p = s->buf; p[0] = 0; p[1] = 16; while (size > 0) { len = FFMIN(size, max_packet_size); p[2] = au_size >> 5; p[3] = (au_size & 0x1F) << 3; memcpy(p + 4, buff, len); ff_rtp_send_data(s1, p, len + 4, len == size); size -= len; buff += len; } } }
1,651
1
static int vqf_read_packet(AVFormatContext *s, AVPacket *pkt) { VqfContext *c = s->priv_data; int ret; int size = (c->frame_bit_len - c->remaining_bits + 7)>>3; if (av_new_packet(pkt, size+2) < 0) return AVERROR(EIO); pkt->pos = avio_tell(s->pb); pkt->stream_index = 0; pkt->duration = 1; pkt->data[0] = 8 - c->remaining_bits; // Number of bits to skip pkt->data[1] = c->last_frame_bits; ret = avio_read(s->pb, pkt->data+2, size); if (ret<=0) { av_free_packet(pkt); return AVERROR(EIO); } c->last_frame_bits = pkt->data[size+1]; c->remaining_bits = (size << 3) - c->frame_bit_len + c->remaining_bits; return size+2; }
1,652
1
static int vhost_user_cleanup(struct vhost_dev *dev) { struct vhost_user *u; assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER); u = dev->opaque; if (u->slave_fd >= 0) { close(u->slave_fd); u->slave_fd = -1; } g_free(u); dev->opaque = 0; return 0; }
1,653
1
av_cold void ff_fmt_convert_init_arm(FmtConvertContext *c, AVCodecContext *avctx) { int cpu_flags = av_get_cpu_flags(); if (have_vfp(cpu_flags)) { if (!have_vfpv3(cpu_flags)) { c->int32_to_float_fmul_scalar = ff_int32_to_float_fmul_scalar_vfp; c->int32_to_float_fmul_array8 = ff_int32_to_float_fmul_array8_vfp; } } if (have_neon(cpu_flags)) { c->int32_to_float_fmul_scalar = ff_int32_to_float_fmul_scalar_neon; } }
1,654
0
static inline void RENAME(hyscale)(SwsContext *c, uint16_t *dst, long dstWidth, const uint8_t *src, int srcW, int xInc, const int16_t *hLumFilter, const int16_t *hLumFilterPos, int hLumFilterSize, uint8_t *formatConvBuffer, uint32_t *pal, int isAlpha) { void (*toYV12)(uint8_t *, const uint8_t *, long, uint32_t *) = isAlpha ? c->alpToYV12 : c->lumToYV12; void (*convertRange)(int16_t *, int) = isAlpha ? NULL : c->lumConvertRange; src += isAlpha ? c->alpSrcOffset : c->lumSrcOffset; if (toYV12) { toYV12(formatConvBuffer, src, srcW, pal); src= formatConvBuffer; } if (c->hScale16) { c->hScale16(dst, dstWidth, (uint16_t*)src, srcW, xInc, hLumFilter, hLumFilterPos, hLumFilterSize, av_pix_fmt_descriptors[c->srcFormat].comp[0].depth_minus1); } else if (!c->hyscale_fast) { c->hScale(dst, dstWidth, src, srcW, xInc, hLumFilter, hLumFilterPos, hLumFilterSize); } else { // fast bilinear upscale / crap downscale c->hyscale_fast(c, dst, dstWidth, src, srcW, xInc); } if (convertRange) convertRange(dst, dstWidth); }
1,655
0
static void dv_decode_ac(GetBitContext *gb, BlockInfo *mb, DCTELEM *block) { int last_index = get_bits_size(gb); const uint8_t *scan_table = mb->scan_table; const uint8_t *shift_table = mb->shift_table; int pos = mb->pos; int partial_bit_count = mb->partial_bit_count; int level, pos1, run, vlc_len, index; OPEN_READER(re, gb); UPDATE_CACHE(re, gb); /* if we must parse a partial vlc, we do it here */ if (partial_bit_count > 0) { re_cache = ((unsigned)re_cache >> partial_bit_count) | (mb->partial_bit_buffer << (sizeof(re_cache)*8 - partial_bit_count)); re_index -= partial_bit_count; mb->partial_bit_count = 0; } /* get the AC coefficients until last_index is reached */ for(;;) { #ifdef VLC_DEBUG printf("%2d: bits=%04x index=%d\n", pos, SHOW_UBITS(re, gb, 16), re_index); #endif /* our own optimized GET_RL_VLC */ index = NEG_USR32(re_cache, TEX_VLC_BITS); vlc_len = dv_rl_vlc[index].len; if (vlc_len < 0) { index = NEG_USR32((unsigned)re_cache << TEX_VLC_BITS, -vlc_len) + dv_rl_vlc[index].level; vlc_len = TEX_VLC_BITS - vlc_len; } level = dv_rl_vlc[index].level; run = dv_rl_vlc[index].run; /* gotta check if we're still within gb boundaries */ if (re_index + vlc_len > last_index) { /* should be < 16 bits otherwise a codeword could have been parsed */ mb->partial_bit_count = last_index - re_index; mb->partial_bit_buffer = NEG_USR32(re_cache, mb->partial_bit_count); re_index = last_index; break; } re_index += vlc_len; #ifdef VLC_DEBUG printf("run=%d level=%d\n", run, level); #endif pos += run; if (pos >= 64) break; if (level) { pos1 = scan_table[pos]; block[pos1] = level << shift_table[pos1]; } UPDATE_CACHE(re, gb); } CLOSE_READER(re, gb); mb->pos = pos; }
1,656
0
int opt_default(const char *opt, const char *arg){ int type; const AVOption *o= NULL; int opt_types[]={AV_OPT_FLAG_VIDEO_PARAM, AV_OPT_FLAG_AUDIO_PARAM, 0, AV_OPT_FLAG_SUBTITLE_PARAM, 0}; for(type=0; type<CODEC_TYPE_NB; type++){ const AVOption *o2 = av_find_opt(avctx_opts[0], opt, NULL, opt_types[type], opt_types[type]); if(o2) o = av_set_string2(avctx_opts[type], opt, arg, 1); } if(!o) o = av_set_string2(avformat_opts, opt, arg, 1); if(!o) o = av_set_string2(sws_opts, opt, arg, 1); if(!o){ if(opt[0] == 'a') o = av_set_string2(avctx_opts[CODEC_TYPE_AUDIO], opt+1, arg, 1); else if(opt[0] == 'v') o = av_set_string2(avctx_opts[CODEC_TYPE_VIDEO], opt+1, arg, 1); else if(opt[0] == 's') o = av_set_string2(avctx_opts[CODEC_TYPE_SUBTITLE], opt+1, arg, 1); } if(!o) return -1; // av_log(NULL, AV_LOG_ERROR, "%s:%s: %f 0x%0X\n", opt, arg, av_get_double(avctx_opts, opt, NULL), (int)av_get_int(avctx_opts, opt, NULL)); //FIXME we should always use avctx_opts, ... for storing options so there will not be any need to keep track of what i set over this opt_names= av_realloc(opt_names, sizeof(void*)*(opt_name_count+1)); opt_names[opt_name_count++]= o->name; if(avctx_opts[0]->debug || avformat_opts->debug) av_log_set_level(AV_LOG_DEBUG); return 0; }
1,658
0
static float quantize_band_cost(struct AACEncContext *s, const float *in, const float *scaled, int size, int scale_idx, int cb, const float lambda, const float uplim, int *bits) { const float IQ = ff_aac_pow2sf_tab[200 + scale_idx - SCALE_ONE_POS + SCALE_DIV_512]; const float Q = ff_aac_pow2sf_tab[200 - scale_idx + SCALE_ONE_POS - SCALE_DIV_512]; const float CLIPPED_ESCAPE = 165140.0f*IQ; int i, j, k; float cost = 0; const int dim = cb < FIRST_PAIR_BT ? 4 : 2; int resbits = 0; #ifndef USE_REALLY_FULL_SEARCH const float Q34 = sqrtf(Q * sqrtf(Q)); const int range = aac_cb_range[cb]; const int maxval = aac_cb_maxval[cb]; int offs[4]; #endif /* USE_REALLY_FULL_SEARCH */ if (!cb) { for (i = 0; i < size; i++) cost += in[i]*in[i]*lambda; if (bits) *bits = 0; return cost; } #ifndef USE_REALLY_FULL_SEARCH offs[0] = 1; for (i = 1; i < dim; i++) offs[i] = offs[i-1]*range; quantize_bands(s->qcoefs, in, scaled, size, Q34, !IS_CODEBOOK_UNSIGNED(cb), maxval); #endif /* USE_REALLY_FULL_SEARCH */ for (i = 0; i < size; i += dim) { float mincost; int minidx = 0; int minbits = 0; const float *vec; #ifndef USE_REALLY_FULL_SEARCH int (*quants)[2] = &s->qcoefs[i]; mincost = 0.0f; for (j = 0; j < dim; j++) mincost += in[i+j]*in[i+j]*lambda; minidx = IS_CODEBOOK_UNSIGNED(cb) ? 0 : 40; minbits = ff_aac_spectral_bits[cb-1][minidx]; mincost += minbits; for (j = 0; j < (1<<dim); j++) { float rd = 0.0f; int curbits; int curidx = IS_CODEBOOK_UNSIGNED(cb) ? 0 : 40; int same = 0; for (k = 0; k < dim; k++) { if ((j & (1 << k)) && quants[k][0] == quants[k][1]) { same = 1; break; } } if (same) continue; for (k = 0; k < dim; k++) curidx += quants[k][!!(j & (1 << k))] * offs[dim - 1 - k]; curbits = ff_aac_spectral_bits[cb-1][curidx]; vec = &ff_aac_codebook_vectors[cb-1][curidx*dim]; #else mincost = INFINITY; vec = ff_aac_codebook_vectors[cb-1]; for (j = 0; j < ff_aac_spectral_sizes[cb-1]; j++, vec += dim) { float rd = 0.0f; int curbits = ff_aac_spectral_bits[cb-1][j]; #endif /* USE_REALLY_FULL_SEARCH */ if (IS_CODEBOOK_UNSIGNED(cb)) { for (k = 0; k < dim; k++) { float t = fabsf(in[i+k]); float di; //do not code with escape sequence small values if (vec[k] == 64.0f && t < 39.0f*IQ) { rd = INFINITY; break; } if (vec[k] == 64.0f) { //FIXME: slow if (t >= CLIPPED_ESCAPE) { di = t - CLIPPED_ESCAPE; curbits += 21; } else { int c = av_clip(quant(t, Q), 0, 8191); di = t - c*cbrt(c)*IQ; curbits += av_log2(c)*2 - 4 + 1; } } else { di = t - vec[k]*IQ; } if (vec[k] != 0.0f) curbits++; rd += di*di*lambda; } } else { for (k = 0; k < dim; k++) { float di = in[i+k] - vec[k]*IQ; rd += di*di*lambda; } } rd += curbits; if (rd < mincost) { mincost = rd; minidx = j; minbits = curbits; } } cost += mincost; resbits += minbits; if (cost >= uplim) return uplim; } if (bits) *bits = resbits; return cost; }
1,659
0
static int encode_subband_c0run(SnowContext *s, SubBand *b, DWTELEM *src, DWTELEM *parent, int stride, int orientation){ const int w= b->width; const int h= b->height; int x, y; if(1){ int run=0; int runs[w*h]; int run_index=0; for(y=0; y<h; y++){ for(x=0; x<w; x++){ int v, p=0; int /*ll=0, */l=0, lt=0, t=0, rt=0; v= src[x + y*stride]; if(y){ t= src[x + (y-1)*stride]; if(x){ lt= src[x - 1 + (y-1)*stride]; } if(x + 1 < w){ rt= src[x + 1 + (y-1)*stride]; } } if(x){ l= src[x - 1 + y*stride]; /*if(x > 1){ if(orientation==1) ll= src[y + (x-2)*stride]; else ll= src[x - 2 + y*stride]; }*/ } if(parent){ int px= x>>1; int py= y>>1; if(px<b->parent->width && py<b->parent->height) p= parent[px + py*2*stride]; } if(!(/*ll|*/l|lt|t|rt|p)){ if(v){ runs[run_index++]= run; run=0; }else{ run++; } } } } runs[run_index++]= run; run_index=0; run= runs[run_index++]; put_symbol2(&s->c, b->state[1], run, 3); for(y=0; y<h; y++){ if(s->c.bytestream_end - s->c.bytestream < w*40){ av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n"); return -1; } for(x=0; x<w; x++){ int v, p=0; int /*ll=0, */l=0, lt=0, t=0, rt=0; v= src[x + y*stride]; if(y){ t= src[x + (y-1)*stride]; if(x){ lt= src[x - 1 + (y-1)*stride]; } if(x + 1 < w){ rt= src[x + 1 + (y-1)*stride]; } } if(x){ l= src[x - 1 + y*stride]; /*if(x > 1){ if(orientation==1) ll= src[y + (x-2)*stride]; else ll= src[x - 2 + y*stride]; }*/ } if(parent){ int px= x>>1; int py= y>>1; if(px<b->parent->width && py<b->parent->height) p= parent[px + py*2*stride]; } if(/*ll|*/l|lt|t|rt|p){ int context= av_log2(/*ABS(ll) + */3*ABS(l) + ABS(lt) + 2*ABS(t) + ABS(rt) + ABS(p)); put_rac(&s->c, &b->state[0][context], !!v); }else{ if(!run){ run= runs[run_index++]; put_symbol2(&s->c, b->state[1], run, 3); assert(v); }else{ run--; assert(!v); } } if(v){ int context= av_log2(/*ABS(ll) + */3*ABS(l) + ABS(lt) + 2*ABS(t) + ABS(rt) + ABS(p)); int l2= 2*ABS(l) + (l<0); int t2= 2*ABS(t) + (t<0); put_symbol2(&s->c, b->state[context + 2], ABS(v)-1, context-4); put_rac(&s->c, &b->state[0][16 + 1 + 3 + quant3bA[l2&0xFF] + 3*quant3bA[t2&0xFF]], v<0); } } } } return 0; }
1,660
0
static int g722_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { G722Context *c = avctx->priv_data; int16_t *out_buf = data; int j, out_len = 0; const int skip = 8 - avctx->bits_per_coded_sample; const int16_t *quantizer_table = low_inv_quants[skip]; GetBitContext gb; init_get_bits(&gb, avpkt->data, avpkt->size * 8); for (j = 0; j < avpkt->size; j++) { int ilow, ihigh, rlow, rhigh, dhigh; int xout1, xout2; ihigh = get_bits(&gb, 2); ilow = get_bits(&gb, 6 - skip); skip_bits(&gb, skip); rlow = av_clip((c->band[0].scale_factor * quantizer_table[ilow] >> 10) + c->band[0].s_predictor, -16384, 16383); ff_g722_update_low_predictor(&c->band[0], ilow >> (2 - skip)); dhigh = c->band[1].scale_factor * ff_g722_high_inv_quant[ihigh] >> 10; rhigh = av_clip(dhigh + c->band[1].s_predictor, -16384, 16383); ff_g722_update_high_predictor(&c->band[1], dhigh, ihigh); c->prev_samples[c->prev_samples_pos++] = rlow + rhigh; c->prev_samples[c->prev_samples_pos++] = rlow - rhigh; ff_g722_apply_qmf(c->prev_samples + c->prev_samples_pos - 24, &xout1, &xout2); out_buf[out_len++] = av_clip_int16(xout1 >> 12); out_buf[out_len++] = av_clip_int16(xout2 >> 12); if (c->prev_samples_pos >= PREV_SAMPLES_BUF_SIZE) { memmove(c->prev_samples, c->prev_samples + c->prev_samples_pos - 22, 22 * sizeof(c->prev_samples[0])); c->prev_samples_pos = 22; } } *data_size = out_len << 1; return avpkt->size; }
1,661
1
int qemu_create_pidfile(const char *filename) { char buffer[128]; int len; int fd; fd = qemu_open(filename, O_RDWR | O_CREAT, 0600); if (fd == -1) { return -1; } if (lockf(fd, F_TLOCK, 0) == -1) { close(fd); return -1; } len = snprintf(buffer, sizeof(buffer), FMT_pid "\n", getpid()); if (write(fd, buffer, len) != len) { close(fd); return -1; } close(fd); return 0; }
1,662
1
static av_cold int v410_decode_init(AVCodecContext *avctx) { avctx->pix_fmt = PIX_FMT_YUV444P10; avctx->bits_per_raw_sample = 10; if (avctx->width & 1) { av_log(avctx, AV_LOG_ERROR, "v410 requires width to be even.\n"); return AVERROR_INVALIDDATA; } avctx->coded_frame = avcodec_alloc_frame(); if (!avctx->coded_frame) { av_log(avctx, AV_LOG_ERROR, "Could not allocate frame.\n"); return AVERROR(ENOMEM); } return 0; }
1,663
1
static int alloc_picture(H264Context *h, Picture *pic) { int i, ret = 0; av_assert0(!pic->f.data[0]); if (h->avctx->hwaccel) { const AVHWAccel *hwaccel = h->avctx->hwaccel; av_assert0(!pic->hwaccel_picture_private); if (hwaccel->priv_data_size) { pic->hwaccel_priv_buf = av_buffer_allocz(hwaccel->priv_data_size); if (!pic->hwaccel_priv_buf) return AVERROR(ENOMEM); pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data; } } pic->tf.f = &pic->f; ret = ff_thread_get_buffer(h->avctx, &pic->tf, pic->reference ? AV_GET_BUFFER_FLAG_REF : 0); if (ret < 0) goto fail; h->linesize = pic->f.linesize[0]; h->uvlinesize = pic->f.linesize[1]; if (!h->qscale_table_pool) { ret = init_table_pools(h); if (ret < 0) goto fail; } pic->qscale_table_buf = av_buffer_pool_get(h->qscale_table_pool); pic->mb_type_buf = av_buffer_pool_get(h->mb_type_pool); if (!pic->qscale_table_buf || !pic->mb_type_buf) goto fail; pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * h->mb_stride + 1; pic->qscale_table = pic->qscale_table_buf->data + 2 * h->mb_stride + 1; for (i = 0; i < 2; i++) { pic->motion_val_buf[i] = av_buffer_pool_get(h->motion_val_pool); pic->ref_index_buf[i] = av_buffer_pool_get(h->ref_index_pool); if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i]) goto fail; pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4; pic->ref_index[i] = pic->ref_index_buf[i]->data; } return 0; fail: unref_picture(h, pic); return (ret < 0) ? ret : AVERROR(ENOMEM); }
1,664
1
static void rc4030_dma_tt_update(rc4030State *s, uint32_t new_tl_base, uint32_t new_tl_limit) { int entries, i; dma_pagetable_entry *dma_tl_contents; if (s->dma_tl_limit) { /* write old dma tl table to physical memory */ memory_region_del_subregion(get_system_memory(), &s->dma_tt_alias); cpu_physical_memory_write(s->dma_tl_limit & 0x7fffffff, memory_region_get_ram_ptr(&s->dma_tt), memory_region_size(&s->dma_tt_alias)); } object_unparent(OBJECT(&s->dma_tt_alias)); s->dma_tl_base = new_tl_base; s->dma_tl_limit = new_tl_limit; new_tl_base &= 0x7fffffff; if (s->dma_tl_limit) { uint64_t dma_tt_size; if (s->dma_tl_limit <= memory_region_size(&s->dma_tt)) { dma_tt_size = s->dma_tl_limit; } else { dma_tt_size = memory_region_size(&s->dma_tt); } memory_region_init_alias(&s->dma_tt_alias, OBJECT(s), "dma-table-alias", &s->dma_tt, 0, dma_tt_size); dma_tl_contents = memory_region_get_ram_ptr(&s->dma_tt); cpu_physical_memory_read(new_tl_base, dma_tl_contents, dma_tt_size); memory_region_transaction_begin(); entries = dma_tt_size / sizeof(dma_pagetable_entry); for (i = 0; i < entries; i++) { rc4030_dma_as_update_one(s, i, dma_tl_contents[i].frame); } memory_region_add_subregion(get_system_memory(), new_tl_base, &s->dma_tt_alias); memory_region_transaction_commit(); } else { memory_region_init(&s->dma_tt_alias, OBJECT(s), "dma-table-alias", 0); } }
1,665
1
static int mov_read_dvc1(MOVContext *c, AVIOContext *pb, MOVAtom atom) { AVStream *st; uint8_t profile_level; if (c->fc->nb_streams < 1) return 0; st = c->fc->streams[c->fc->nb_streams-1]; if (atom.size >= (1<<28) || atom.size < 7) return AVERROR_INVALIDDATA; profile_level = avio_r8(pb); if ((profile_level & 0xf0) != 0xc0) return 0; av_free(st->codec->extradata); st->codec->extradata = av_mallocz(atom.size - 7 + FF_INPUT_BUFFER_PADDING_SIZE); if (!st->codec->extradata) return AVERROR(ENOMEM); st->codec->extradata_size = atom.size - 7; avio_seek(pb, 6, SEEK_CUR); avio_read(pb, st->codec->extradata, st->codec->extradata_size); return 0; }
1,666
0
static void zero_remaining(unsigned int b, unsigned int b_max, const unsigned int *div_blocks, int32_t *buf) { unsigned int count = 0; while (b < b_max) count += div_blocks[b]; if (count) memset(buf, 0, sizeof(*buf) * count); }
1,667
1
static int push_samples(AVFilterContext *ctx, int nb_samples) { AVFilterLink *outlink = ctx->outputs[0]; LoopContext *s = ctx->priv; AVFrame *out; int ret, i = 0; while (s->loop != 0 && i < nb_samples) { out = ff_get_audio_buffer(outlink, FFMIN(nb_samples, s->nb_samples - s->current_sample)); if (!out) return AVERROR(ENOMEM); ret = av_audio_fifo_peek_at(s->fifo, (void **)out->extended_data, out->nb_samples, s->current_sample); if (ret < 0) return ret; out->pts = s->pts; out->nb_samples = ret; s->pts += out->nb_samples; i += out->nb_samples; s->current_sample += out->nb_samples; ret = ff_filter_frame(outlink, out); if (ret < 0) return ret; if (s->current_sample >= s->nb_samples) { s->current_sample = 0; if (s->loop > 0) s->loop--; } } return ret; }
1,668
1
void *qemu_realloc(void *ptr, size_t size) { if (!size && !allow_zero_malloc()) { abort(); } return oom_check(realloc(ptr, size ? size : 1)); }
1,669
1
static int coreaudio_init_out (HWVoiceOut *hw, struct audsettings *as) { OSStatus status; coreaudioVoiceOut *core = (coreaudioVoiceOut *) hw; UInt32 propertySize; int err; const char *typ = "playback"; AudioValueRange frameRange; /* create mutex */ err = pthread_mutex_init(&core->mutex, NULL); if (err) { dolog("Could not create mutex\nReason: %s\n", strerror (err)); return -1; } audio_pcm_init_info (&hw->info, as); /* open default output device */ propertySize = sizeof(core->outputDeviceID); status = AudioHardwareGetProperty( kAudioHardwarePropertyDefaultOutputDevice, &propertySize, &core->outputDeviceID); if (status != kAudioHardwareNoError) { coreaudio_logerr2 (status, typ, "Could not get default output Device\n"); return -1; } if (core->outputDeviceID == kAudioDeviceUnknown) { dolog ("Could not initialize %s - Unknown Audiodevice\n", typ); return -1; } /* get minimum and maximum buffer frame sizes */ propertySize = sizeof(frameRange); status = AudioDeviceGetProperty( core->outputDeviceID, 0, 0, kAudioDevicePropertyBufferFrameSizeRange, &propertySize, &frameRange); if (status != kAudioHardwareNoError) { coreaudio_logerr2 (status, typ, "Could not get device buffer frame range\n"); return -1; } if (frameRange.mMinimum > conf.buffer_frames) { core->audioDevicePropertyBufferFrameSize = (UInt32) frameRange.mMinimum; dolog ("warning: Upsizing Buffer Frames to %f\n", frameRange.mMinimum); } else if (frameRange.mMaximum < conf.buffer_frames) { core->audioDevicePropertyBufferFrameSize = (UInt32) frameRange.mMaximum; dolog ("warning: Downsizing Buffer Frames to %f\n", frameRange.mMaximum); } else { core->audioDevicePropertyBufferFrameSize = conf.buffer_frames; } /* set Buffer Frame Size */ propertySize = sizeof(core->audioDevicePropertyBufferFrameSize); status = AudioDeviceSetProperty( core->outputDeviceID, NULL, 0, false, kAudioDevicePropertyBufferFrameSize, propertySize, &core->audioDevicePropertyBufferFrameSize); if (status != kAudioHardwareNoError) { coreaudio_logerr2 (status, typ, "Could not set device buffer frame size %" PRIu32 "\n", (uint32_t)core->audioDevicePropertyBufferFrameSize); return -1; } /* get Buffer Frame Size */ propertySize = sizeof(core->audioDevicePropertyBufferFrameSize); status = AudioDeviceGetProperty( core->outputDeviceID, 0, false, kAudioDevicePropertyBufferFrameSize, &propertySize, &core->audioDevicePropertyBufferFrameSize); if (status != kAudioHardwareNoError) { coreaudio_logerr2 (status, typ, "Could not get device buffer frame size\n"); return -1; } hw->samples = conf.nbuffers * core->audioDevicePropertyBufferFrameSize; /* get StreamFormat */ propertySize = sizeof(core->outputStreamBasicDescription); status = AudioDeviceGetProperty( core->outputDeviceID, 0, false, kAudioDevicePropertyStreamFormat, &propertySize, &core->outputStreamBasicDescription); if (status != kAudioHardwareNoError) { coreaudio_logerr2 (status, typ, "Could not get Device Stream properties\n"); core->outputDeviceID = kAudioDeviceUnknown; return -1; } /* set Samplerate */ core->outputStreamBasicDescription.mSampleRate = (Float64) as->freq; propertySize = sizeof(core->outputStreamBasicDescription); status = AudioDeviceSetProperty( core->outputDeviceID, 0, 0, 0, kAudioDevicePropertyStreamFormat, propertySize, &core->outputStreamBasicDescription); if (status != kAudioHardwareNoError) { coreaudio_logerr2 (status, typ, "Could not set samplerate %d\n", as->freq); core->outputDeviceID = kAudioDeviceUnknown; return -1; } /* set Callback */ status = AudioDeviceAddIOProc(core->outputDeviceID, audioDeviceIOProc, hw); if (status != kAudioHardwareNoError) { coreaudio_logerr2 (status, typ, "Could not set IOProc\n"); core->outputDeviceID = kAudioDeviceUnknown; return -1; } /* start Playback */ if (!isPlaying(core->outputDeviceID)) { status = AudioDeviceStart(core->outputDeviceID, audioDeviceIOProc); if (status != kAudioHardwareNoError) { coreaudio_logerr2 (status, typ, "Could not start playback\n"); AudioDeviceRemoveIOProc(core->outputDeviceID, audioDeviceIOProc); core->outputDeviceID = kAudioDeviceUnknown; return -1; } } return 0; }
1,670
1
static void xilinx_spips_flush_txfifo(XilinxSPIPS *s) { int debug_level = 0; XilinxQSPIPS *q = (XilinxQSPIPS *) object_dynamic_cast(OBJECT(s), TYPE_XILINX_QSPIPS); for (;;) { int i; uint8_t tx = 0; uint8_t tx_rx[num_effective_busses(s)]; uint8_t dummy_cycles = 0; uint8_t addr_length; if (fifo8_is_empty(&s->tx_fifo)) { if (!(s->regs[R_LQSPI_CFG] & LQSPI_CFG_LQ_MODE)) { s->regs[R_INTR_STATUS] |= IXR_TX_FIFO_UNDERFLOW; } xilinx_spips_update_ixr(s); return; } else if (s->snoop_state == SNOOP_STRIPING) { for (i = 0; i < num_effective_busses(s); ++i) { tx_rx[i] = fifo8_pop(&s->tx_fifo); } stripe8(tx_rx, num_effective_busses(s), false); } else if (s->snoop_state >= SNOOP_ADDR) { tx = fifo8_pop(&s->tx_fifo); for (i = 0; i < num_effective_busses(s); ++i) { tx_rx[i] = tx; } } else { /* Extract a dummy byte and generate dummy cycles according to the * link state */ tx = fifo8_pop(&s->tx_fifo); dummy_cycles = 8 / s->link_state; } for (i = 0; i < num_effective_busses(s); ++i) { int bus = num_effective_busses(s) - 1 - i; if (dummy_cycles) { int d; for (d = 0; d < dummy_cycles; ++d) { tx_rx[0] = ssi_transfer(s->spi[bus], (uint32_t)tx_rx[0]); } } else { DB_PRINT_L(debug_level, "tx = %02x\n", tx_rx[i]); tx_rx[i] = ssi_transfer(s->spi[bus], (uint32_t)tx_rx[i]); DB_PRINT_L(debug_level, "rx = %02x\n", tx_rx[i]); } } if (s->regs[R_CMND] & R_CMND_RXFIFO_DRAIN) { DB_PRINT_L(debug_level, "dircarding drained rx byte\n"); /* Do nothing */ } else if (s->rx_discard) { DB_PRINT_L(debug_level, "dircarding discarded rx byte\n"); s->rx_discard -= 8 / s->link_state; } else if (fifo8_is_full(&s->rx_fifo)) { s->regs[R_INTR_STATUS] |= IXR_RX_FIFO_OVERFLOW; DB_PRINT_L(0, "rx FIFO overflow"); } else if (s->snoop_state == SNOOP_STRIPING) { stripe8(tx_rx, num_effective_busses(s), true); for (i = 0; i < num_effective_busses(s); ++i) { fifo8_push(&s->rx_fifo, (uint8_t)tx_rx[i]); DB_PRINT_L(debug_level, "pushing striped rx byte\n"); } } else { DB_PRINT_L(debug_level, "pushing unstriped rx byte\n"); fifo8_push(&s->rx_fifo, (uint8_t)tx_rx[0]); } if (s->link_state_next_when) { s->link_state_next_when--; if (!s->link_state_next_when) { s->link_state = s->link_state_next; } } DB_PRINT_L(debug_level, "initial snoop state: %x\n", (unsigned)s->snoop_state); switch (s->snoop_state) { case (SNOOP_CHECKING): /* Store the count of dummy bytes in the txfifo */ s->cmd_dummies = xilinx_spips_num_dummies(q, tx); addr_length = get_addr_length(s, tx); if (s->cmd_dummies < 0) { s->snoop_state = SNOOP_NONE; } else { s->snoop_state = SNOOP_ADDR + addr_length - 1; } switch (tx) { case DPP: case DOR: case DOR_4: s->link_state_next = 2; s->link_state_next_when = addr_length + s->cmd_dummies; break; case QPP: case QPP_4: case QOR: case QOR_4: s->link_state_next = 4; s->link_state_next_when = addr_length + s->cmd_dummies; break; case DIOR: case DIOR_4: s->link_state = 2; break; case QIOR: case QIOR_4: s->link_state = 4; break; } break; case (SNOOP_ADDR): /* Address has been transmitted, transmit dummy cycles now if * needed */ if (s->cmd_dummies < 0) { s->snoop_state = SNOOP_NONE; } else { s->snoop_state = s->cmd_dummies; } break; case (SNOOP_STRIPING): case (SNOOP_NONE): /* Once we hit the boring stuff - squelch debug noise */ if (!debug_level) { DB_PRINT_L(0, "squelching debug info ....\n"); debug_level = 1; } break; default: s->snoop_state--; } DB_PRINT_L(debug_level, "final snoop state: %x\n", (unsigned)s->snoop_state); } }
1,671
0
static int img_set_parameters(AVFormatContext *s, AVFormatParameters *ap) { VideoData *img = s->priv_data; AVStream *st; AVImageFormat *img_fmt; int i; /* find output image format */ if (ap && ap->image_format) { img_fmt = ap->image_format; } else { img_fmt = guess_image_format(s->filename); } if (!img_fmt) return -1; if (s->nb_streams != 1) return -1; st = s->streams[0]; /* we select the first matching format */ for(i=0;i<PIX_FMT_NB;i++) { if (img_fmt->supported_pixel_formats & (1 << i)) break; } if (i >= PIX_FMT_NB) return -1; img->img_fmt = img_fmt; img->pix_fmt = i; st->codec->pix_fmt = img->pix_fmt; return 0; }
1,674
1
void qemu_coroutine_enter(Coroutine *co) { Coroutine *self = qemu_coroutine_self(); CoroutineAction ret; trace_qemu_coroutine_enter(self, co, co->entry_arg); if (co->caller) { fprintf(stderr, "Co-routine re-entered recursively\n"); abort(); } co->caller = self; co->ctx = qemu_get_current_aio_context(); /* Store co->ctx before anything that stores co. Matches * barrier in aio_co_wake. */ smp_wmb(); ret = qemu_coroutine_switch(self, co, COROUTINE_ENTER); qemu_co_queue_run_restart(co); switch (ret) { case COROUTINE_YIELD: return; case COROUTINE_TERMINATE: assert(!co->locks_held); trace_qemu_coroutine_terminate(co); coroutine_delete(co); return; default: abort(); } }
1,676
1
static void aarch64_cpu_set_pc(CPUState *cs, vaddr value) { ARMCPU *cpu = ARM_CPU(cs); /* * TODO: this will need updating for system emulation, * when the core may be in AArch32 mode. */ cpu->env.pc = value; }
1,678
1
static struct iovec *cap_sg(struct iovec *sg, int cap, int *cnt) { int i; int total = 0; for (i = 0; i < *cnt; i++) { if ((total + sg[i].iov_len) > cap) { sg[i].iov_len -= ((total + sg[i].iov_len) - cap); i++; break; } total += sg[i].iov_len; } *cnt = i; return sg; }
1,679
1
void mcf_fec_init(NICInfo *nd, target_phys_addr_t base, qemu_irq *irq) { mcf_fec_state *s; qemu_check_nic_model(nd, "mcf_fec"); s = (mcf_fec_state *)qemu_mallocz(sizeof(mcf_fec_state)); s->irq = irq; s->mmio_index = cpu_register_io_memory(mcf_fec_readfn, mcf_fec_writefn, s); cpu_register_physical_memory(base, 0x400, s->mmio_index); s->vc = qemu_new_vlan_client(nd->vlan, nd->model, nd->name, mcf_fec_can_receive, mcf_fec_receive, NULL, mcf_fec_cleanup, s); memcpy(s->macaddr, nd->macaddr, 6); qemu_format_nic_info_str(s->vc, s->macaddr); }
1,680
1
static int opt_sameq(void *optctx, const char *opt, const char *arg) { av_log(NULL, AV_LOG_WARNING, "Ignoring option '%s'\n", opt); return 0; }
1,681
1
static inline void yuv2nv12XinC(int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize, int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize, uint8_t *dest, uint8_t *uDest, int dstW, int chrDstW, int dstFormat) { //FIXME Optimize (just quickly writen not opti..) int i; for(i=0; i<dstW; i++) { int val=1<<18; int j; for(j=0; j<lumFilterSize; j++) val += lumSrc[j][i] * lumFilter[j]; dest[i]= av_clip_uint8(val>>19); } if(uDest == NULL) return; if(dstFormat == PIX_FMT_NV12) for(i=0; i<chrDstW; i++) { int u=1<<18; int v=1<<18; int j; for(j=0; j<chrFilterSize; j++) { u += chrSrc[j][i] * chrFilter[j]; v += chrSrc[j][i + 2048] * chrFilter[j]; } uDest[2*i]= av_clip_uint8(u>>19); uDest[2*i+1]= av_clip_uint8(v>>19); } else for(i=0; i<chrDstW; i++) { int u=1<<18; int v=1<<18; int j; for(j=0; j<chrFilterSize; j++) { u += chrSrc[j][i] * chrFilter[j]; v += chrSrc[j][i + 2048] * chrFilter[j]; } uDest[2*i]= av_clip_uint8(v>>19); uDest[2*i+1]= av_clip_uint8(u>>19); } }
1,682
0
static void lowpass_line_complex_c(uint8_t *dstp, ptrdiff_t width, const uint8_t *srcp, ptrdiff_t mref, ptrdiff_t pref) { const uint8_t *srcp_above = srcp + mref; const uint8_t *srcp_below = srcp + pref; const uint8_t *srcp_above2 = srcp + mref * 2; const uint8_t *srcp_below2 = srcp + pref * 2; int i; for (i = 0; i < width; i++) { // this calculation is an integer representation of // '0.75 * current + 0.25 * above + 0.25 * below - 0.125 * above2 - 0.125 * below2' // '4 +' is for rounding. dstp[i] = av_clip_uint8((4 + (srcp[i] << 2) + ((srcp[i] + srcp_above[i] + srcp_below[i]) << 1) - srcp_above2[i] - srcp_below2[i]) >> 3); } }
1,684
0
static void pic_write(void *opaque, target_phys_addr_t addr, uint64_t value, unsigned size) { HeathrowPICS *s = opaque; HeathrowPIC *pic; unsigned int n; n = ((addr & 0xfff) - 0x10) >> 4; PIC_DPRINTF("writel: " TARGET_FMT_plx " %u: %08x\n", addr, n, value); if (n >= 2) return; pic = &s->pics[n]; switch(addr & 0xf) { case 0x04: pic->mask = value; heathrow_pic_update(s); break; case 0x08: /* do not reset level triggered IRQs */ value &= ~pic->level_triggered; pic->events &= ~value; heathrow_pic_update(s); break; default: break; } }
1,685
0
static void tcg_out_jxx(TCGContext *s, int opc, int label_index, int small) { int32_t val, val1; TCGLabel *l = &s->labels[label_index]; if (l->has_value) { val = tcg_pcrel_diff(s, l->u.value_ptr); val1 = val - 2; if ((int8_t)val1 == val1) { if (opc == -1) { tcg_out8(s, OPC_JMP_short); } else { tcg_out8(s, OPC_JCC_short + opc); } tcg_out8(s, val1); } else { if (small) { tcg_abort(); } if (opc == -1) { tcg_out8(s, OPC_JMP_long); tcg_out32(s, val - 5); } else { tcg_out_opc(s, OPC_JCC_long + opc, 0, 0, 0); tcg_out32(s, val - 6); } } } else if (small) { if (opc == -1) { tcg_out8(s, OPC_JMP_short); } else { tcg_out8(s, OPC_JCC_short + opc); } tcg_out_reloc(s, s->code_ptr, R_386_PC8, label_index, -1); s->code_ptr += 1; } else { if (opc == -1) { tcg_out8(s, OPC_JMP_long); } else { tcg_out_opc(s, OPC_JCC_long + opc, 0, 0, 0); } tcg_out_reloc(s, s->code_ptr, R_386_PC32, label_index, -4); s->code_ptr += 4; } }
1,686
0
hwaddr ppc_hash32_get_phys_page_debug(CPUPPCState *env, target_ulong addr) { struct mmu_ctx_hash32 ctx; if (unlikely(ppc_hash32_get_physical_address(env, &ctx, addr, 0, ACCESS_INT) != 0)) { return -1; } return ctx.raddr & TARGET_PAGE_MASK; }
1,687
0
static int vhdx_log_write(BlockDriverState *bs, BDRVVHDXState *s, void *data, uint32_t length, uint64_t offset) { int ret = 0; void *buffer = NULL; void *merged_sector = NULL; void *data_tmp, *sector_write; unsigned int i; int sector_offset; uint32_t desc_sectors, sectors, total_length; uint32_t sectors_written = 0; uint32_t aligned_length; uint32_t leading_length = 0; uint32_t trailing_length = 0; uint32_t partial_sectors = 0; uint32_t bytes_written = 0; uint64_t file_offset; VHDXHeader *header; VHDXLogEntryHeader new_hdr; VHDXLogDescriptor *new_desc = NULL; VHDXLogDataSector *data_sector = NULL; MSGUID new_guid = { 0 }; header = s->headers[s->curr_header]; /* need to have offset read data, and be on 4096 byte boundary */ if (length > header->log_length) { /* no log present. we could create a log here instead of failing */ ret = -EINVAL; goto exit; } if (guid_eq(header->log_guid, zero_guid)) { vhdx_guid_generate(&new_guid); vhdx_update_headers(bs, s, false, &new_guid); } else { /* currently, we require that the log be flushed after * every write. */ ret = -ENOTSUP; goto exit; } /* 0 is an invalid sequence number, but may also represent the first * log write (or a wrapped seq) */ if (s->log.sequence == 0) { s->log.sequence = 1; } sector_offset = offset % VHDX_LOG_SECTOR_SIZE; file_offset = (offset / VHDX_LOG_SECTOR_SIZE) * VHDX_LOG_SECTOR_SIZE; aligned_length = length; /* add in the unaligned head and tail bytes */ if (sector_offset) { leading_length = (VHDX_LOG_SECTOR_SIZE - sector_offset); leading_length = leading_length > length ? length : leading_length; aligned_length -= leading_length; partial_sectors++; } sectors = aligned_length / VHDX_LOG_SECTOR_SIZE; trailing_length = aligned_length - (sectors * VHDX_LOG_SECTOR_SIZE); if (trailing_length) { partial_sectors++; } sectors += partial_sectors; /* sectors is now how many sectors the data itself takes, not * including the header and descriptor metadata */ new_hdr = (VHDXLogEntryHeader) { .signature = VHDX_LOG_SIGNATURE, .tail = s->log.tail, .sequence_number = s->log.sequence, .descriptor_count = sectors, .reserved = 0, .flushed_file_offset = bdrv_getlength(bs->file), .last_file_offset = bdrv_getlength(bs->file), }; new_hdr.log_guid = header->log_guid; desc_sectors = vhdx_compute_desc_sectors(new_hdr.descriptor_count); total_length = (desc_sectors + sectors) * VHDX_LOG_SECTOR_SIZE; new_hdr.entry_length = total_length; vhdx_log_entry_hdr_le_export(&new_hdr); buffer = qemu_blockalign(bs, total_length); memcpy(buffer, &new_hdr, sizeof(new_hdr)); new_desc = (VHDXLogDescriptor *) (buffer + sizeof(new_hdr)); data_sector = buffer + (desc_sectors * VHDX_LOG_SECTOR_SIZE); data_tmp = data; /* All log sectors are 4KB, so for any partial sectors we must * merge the data with preexisting data from the final file * destination */ merged_sector = qemu_blockalign(bs, VHDX_LOG_SECTOR_SIZE); for (i = 0; i < sectors; i++) { new_desc->signature = VHDX_LOG_DESC_SIGNATURE; new_desc->sequence_number = s->log.sequence; new_desc->file_offset = file_offset; if (i == 0 && leading_length) { /* partial sector at the front of the buffer */ ret = bdrv_pread(bs->file, file_offset, merged_sector, VHDX_LOG_SECTOR_SIZE); if (ret < 0) { goto exit; } memcpy(merged_sector + sector_offset, data_tmp, leading_length); bytes_written = leading_length; sector_write = merged_sector; } else if (i == sectors - 1 && trailing_length) { /* partial sector at the end of the buffer */ ret = bdrv_pread(bs->file, file_offset, merged_sector + trailing_length, VHDX_LOG_SECTOR_SIZE - trailing_length); if (ret < 0) { goto exit; } memcpy(merged_sector, data_tmp, trailing_length); bytes_written = trailing_length; sector_write = merged_sector; } else { bytes_written = VHDX_LOG_SECTOR_SIZE; sector_write = data_tmp; } /* populate the raw sector data into the proper structures, * as well as update the descriptor, and convert to proper * endianness */ vhdx_log_raw_to_le_sector(new_desc, data_sector, sector_write, s->log.sequence); data_tmp += bytes_written; data_sector++; new_desc++; file_offset += VHDX_LOG_SECTOR_SIZE; } /* checksum covers entire entry, from the log header through the * last data sector */ vhdx_update_checksum(buffer, total_length, offsetof(VHDXLogEntryHeader, checksum)); cpu_to_le32s((uint32_t *)(buffer + 4)); /* now write to the log */ vhdx_log_write_sectors(bs, &s->log, &sectors_written, buffer, desc_sectors + sectors); if (ret < 0) { goto exit; } if (sectors_written != desc_sectors + sectors) { /* instead of failing, we could flush the log here */ ret = -EINVAL; goto exit; } s->log.sequence++; /* write new tail */ s->log.tail = s->log.write; exit: qemu_vfree(buffer); qemu_vfree(merged_sector); return ret; }
1,689
0
Aml *aml_index(Aml *arg1, Aml *idx) { Aml *var = aml_opcode(0x88 /* IndexOp */); aml_append(var, arg1); aml_append(var, idx); build_append_byte(var->buf, 0x00 /* NullNameOp */); return var; }
1,690
0
static void timer_write(void *opaque, target_phys_addr_t addr, uint64_t value, unsigned size) { LM32TimerState *s = opaque; trace_lm32_timer_memory_write(addr, value); addr >>= 2; switch (addr) { case R_SR: s->regs[R_SR] &= ~SR_TO; break; case R_CR: s->regs[R_CR] = value; if (s->regs[R_CR] & CR_START) { ptimer_run(s->ptimer, 1); } if (s->regs[R_CR] & CR_STOP) { ptimer_stop(s->ptimer); } break; case R_PERIOD: s->regs[R_PERIOD] = value; ptimer_set_count(s->ptimer, value); break; case R_SNAPSHOT: error_report("lm32_timer: write access to read only register 0x" TARGET_FMT_plx, addr << 2); break; default: error_report("lm32_timer: write access to unknown register 0x" TARGET_FMT_plx, addr << 2); break; } timer_update_irq(s); }
1,691
0
static inline void menelaus_rtc_start(MenelausState *s) { s->rtc.next += qemu_get_clock(rt_clock); qemu_mod_timer(s->rtc.hz_tm, s->rtc.next); }
1,692
0
static void test_visitor_in_alternate_number(TestInputVisitorData *data, const void *unused) { Visitor *v; Error *err = NULL; AltStrBool *asb; AltStrNum *asn; AltNumStr *ans; AltStrInt *asi; AltIntNum *ain; AltNumInt *ani; /* Parsing an int */ v = visitor_input_test_init(data, "42"); visit_type_AltStrBool(v, &asb, NULL, &err); g_assert(err); error_free(err); err = NULL; qapi_free_AltStrBool(asb); visitor_input_teardown(data, NULL); /* FIXME: Order of alternate should not affect semantics; asn should * parse the same as ans */ v = visitor_input_test_init(data, "42"); visit_type_AltStrNum(v, &asn, NULL, &err); /* FIXME g_assert_cmpint(asn->kind, == ALT_STR_NUM_KIND_N); */ /* FIXME g_assert_cmpfloat(asn->n, ==, 42); */ g_assert(err); error_free(err); err = NULL; qapi_free_AltStrNum(asn); visitor_input_teardown(data, NULL); v = visitor_input_test_init(data, "42"); visit_type_AltNumStr(v, &ans, NULL, &error_abort); g_assert_cmpint(ans->kind, ==, ALT_NUM_STR_KIND_N); g_assert_cmpfloat(ans->n, ==, 42); qapi_free_AltNumStr(ans); visitor_input_teardown(data, NULL); v = visitor_input_test_init(data, "42"); visit_type_AltStrInt(v, &asi, NULL, &error_abort); g_assert_cmpint(asi->kind, ==, ALT_STR_INT_KIND_I); g_assert_cmpint(asi->i, ==, 42); qapi_free_AltStrInt(asi); visitor_input_teardown(data, NULL); v = visitor_input_test_init(data, "42"); visit_type_AltIntNum(v, &ain, NULL, &error_abort); g_assert_cmpint(ain->kind, ==, ALT_INT_NUM_KIND_I); g_assert_cmpint(ain->i, ==, 42); qapi_free_AltIntNum(ain); visitor_input_teardown(data, NULL); v = visitor_input_test_init(data, "42"); visit_type_AltNumInt(v, &ani, NULL, &error_abort); g_assert_cmpint(ani->kind, ==, ALT_NUM_INT_KIND_I); g_assert_cmpint(ani->i, ==, 42); qapi_free_AltNumInt(ani); visitor_input_teardown(data, NULL); /* Parsing a double */ v = visitor_input_test_init(data, "42.5"); visit_type_AltStrBool(v, &asb, NULL, &err); g_assert(err); error_free(err); err = NULL; qapi_free_AltStrBool(asb); visitor_input_teardown(data, NULL); v = visitor_input_test_init(data, "42.5"); visit_type_AltStrNum(v, &asn, NULL, &error_abort); g_assert_cmpint(asn->kind, ==, ALT_STR_NUM_KIND_N); g_assert_cmpfloat(asn->n, ==, 42.5); qapi_free_AltStrNum(asn); visitor_input_teardown(data, NULL); v = visitor_input_test_init(data, "42.5"); visit_type_AltNumStr(v, &ans, NULL, &error_abort); g_assert_cmpint(ans->kind, ==, ALT_NUM_STR_KIND_N); g_assert_cmpfloat(ans->n, ==, 42.5); qapi_free_AltNumStr(ans); visitor_input_teardown(data, NULL); v = visitor_input_test_init(data, "42.5"); visit_type_AltStrInt(v, &asi, NULL, &err); g_assert(err); error_free(err); err = NULL; qapi_free_AltStrInt(asi); visitor_input_teardown(data, NULL); v = visitor_input_test_init(data, "42.5"); visit_type_AltIntNum(v, &ain, NULL, &error_abort); g_assert_cmpint(ain->kind, ==, ALT_INT_NUM_KIND_N); g_assert_cmpfloat(ain->n, ==, 42.5); qapi_free_AltIntNum(ain); visitor_input_teardown(data, NULL); v = visitor_input_test_init(data, "42.5"); visit_type_AltNumInt(v, &ani, NULL, &error_abort); g_assert_cmpint(ani->kind, ==, ALT_NUM_INT_KIND_N); g_assert_cmpfloat(ani->n, ==, 42.5); qapi_free_AltNumInt(ani); visitor_input_teardown(data, NULL); }
1,693
0
static int zipl_run(struct scsi_blockptr *pte) { struct component_header *header; struct component_entry *entry; uint8_t tmp_sec[SECTOR_SIZE]; virtio_read(pte->blockno, tmp_sec); header = (struct component_header *)tmp_sec; if (!zipl_magic(tmp_sec)) { goto fail; } if (header->type != ZIPL_COMP_HEADER_IPL) { goto fail; } dputs("start loading images\n"); /* Load image(s) into RAM */ entry = (struct component_entry *)(&header[1]); while (entry->component_type == ZIPL_COMP_ENTRY_LOAD) { if (zipl_load_segment(entry) < 0) { goto fail; } entry++; if ((uint8_t*)(&entry[1]) > (tmp_sec + SECTOR_SIZE)) { goto fail; } } if (entry->component_type != ZIPL_COMP_ENTRY_EXEC) { goto fail; } /* should not return */ jump_to_IPL_code(entry->load_address); return 0; fail: sclp_print("failed running zipl\n"); return -1; }
1,694
0
static int udp_write(URLContext *h, const uint8_t *buf, int size) { UDPContext *s = h->priv_data; int ret; for(;;) { if (!s->is_connected) { ret = sendto (s->udp_fd, buf, size, 0, (struct sockaddr *) &s->dest_addr, s->dest_addr_len); } else ret = send(s->udp_fd, buf, size, 0); if (ret < 0) { if (ff_neterrno() != AVERROR(EINTR) && ff_neterrno() != AVERROR(EAGAIN)) return ff_neterrno(); } else { break; } } return size; }
1,695
0
static int megasas_cache_flush(MegasasState *s, MegasasCmd *cmd) { bdrv_drain_all(); return MFI_STAT_OK; }
1,696
0
static void test_visitor_in_fail_list_nested(TestInputVisitorData *data, const void *unused) { int64_t i64 = -1; Visitor *v; /* Unvisited nested list tail */ v = visitor_input_test_init(data, "[ 0, [ 1, 2, 3 ] ]"); visit_start_list(v, NULL, NULL, 0, &error_abort); visit_type_int(v, NULL, &i64, &error_abort); g_assert_cmpint(i64, ==, 0); visit_start_list(v, NULL, NULL, 0, &error_abort); visit_type_int(v, NULL, &i64, &error_abort); g_assert_cmpint(i64, ==, 1); visit_end_list(v, NULL); /* BUG: unvisited tail not reported; actually not reportable by design */ visit_end_list(v, NULL); }
1,697
0
static void mptsas_scsi_uninit(PCIDevice *dev) { MPTSASState *s = MPT_SAS(dev); qemu_bh_delete(s->request_bh); if (s->msi_in_use) { msi_uninit(dev); } }
1,698
0
static unsigned int dec_swap_r(DisasContext *dc) { TCGv t0; #if DISAS_CRIS char modename[4]; #endif DIS(fprintf (logfile, "swap%s $r%u\n", swapmode_name(dc->op2, modename), dc->op1)); cris_cc_mask(dc, CC_MASK_NZ); t0 = tcg_temp_new(TCG_TYPE_TL); t_gen_mov_TN_reg(t0, dc->op1); if (dc->op2 & 8) tcg_gen_not_tl(t0, t0); if (dc->op2 & 4) t_gen_swapw(t0, t0); if (dc->op2 & 2) t_gen_swapb(t0, t0); if (dc->op2 & 1) t_gen_swapr(t0, t0); cris_alu(dc, CC_OP_MOVE, cpu_R[dc->op1], cpu_R[dc->op1], t0, 4); tcg_temp_free(t0); return 2; }
1,699
0
static void test_visitor_in_number(TestInputVisitorData *data, const void *unused) { double res = 0, value = 3.14; Visitor *v; v = visitor_input_test_init(data, "%f", value); visit_type_number(v, NULL, &res, &error_abort); g_assert_cmpfloat(res, ==, value); }
1,700
0
static int protocol_client_auth(VncState *vs, uint8_t *data, size_t len) { /* We only advertise 1 auth scheme at a time, so client * must pick the one we sent. Verify this */ if (data[0] != vs->vd->auth) { /* Reject auth */ VNC_DEBUG("Reject auth %d\n", (int)data[0]); vnc_write_u32(vs, 1); if (vs->minor >= 8) { static const char err[] = "Authentication failed"; vnc_write_u32(vs, sizeof(err)); vnc_write(vs, err, sizeof(err)); } vnc_client_error(vs); } else { /* Accept requested auth */ VNC_DEBUG("Client requested auth %d\n", (int)data[0]); switch (vs->vd->auth) { case VNC_AUTH_NONE: VNC_DEBUG("Accept auth none\n"); if (vs->minor >= 8) { vnc_write_u32(vs, 0); /* Accept auth completion */ vnc_flush(vs); } start_client_init(vs); break; case VNC_AUTH_VNC: VNC_DEBUG("Start VNC auth\n"); start_auth_vnc(vs); break; #ifdef CONFIG_VNC_TLS case VNC_AUTH_VENCRYPT: VNC_DEBUG("Accept VeNCrypt auth\n");; start_auth_vencrypt(vs); break; #endif /* CONFIG_VNC_TLS */ #ifdef CONFIG_VNC_SASL case VNC_AUTH_SASL: VNC_DEBUG("Accept SASL auth\n"); start_auth_sasl(vs); break; #endif /* CONFIG_VNC_SASL */ default: /* Should not be possible, but just in case */ VNC_DEBUG("Reject auth %d\n", vs->vd->auth); vnc_write_u8(vs, 1); if (vs->minor >= 8) { static const char err[] = "Authentication failed"; vnc_write_u32(vs, sizeof(err)); vnc_write(vs, err, sizeof(err)); } vnc_client_error(vs); } } return 0; }
1,702
0
BlockAIOCB *bdrv_aio_readv(BlockDriverState *bs, int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, BlockCompletionFunc *cb, void *opaque) { trace_bdrv_aio_readv(bs, sector_num, nb_sectors, opaque); return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 0, cb, opaque, false); }
1,703
0
static int do_qmp_capabilities(Monitor *mon, const QDict *params, QObject **ret_data) { /* Will setup QMP capabilities in the future */ if (monitor_ctrl_mode(mon)) { mon->qmp.command_mode = 1; } return 0; }
1,704
0
int vhdx_log_write_and_flush(BlockDriverState *bs, BDRVVHDXState *s, void *data, uint32_t length, uint64_t offset) { int ret = 0; VHDXLogSequence logs = { .valid = true, .count = 1, .hdr = { 0 } }; /* Make sure data written (new and/or changed blocks) is stable * on disk, before creating log entry */ bdrv_flush(bs); ret = vhdx_log_write(bs, s, data, length, offset); if (ret < 0) { goto exit; } logs.log = s->log; /* Make sure log is stable on disk */ bdrv_flush(bs); ret = vhdx_log_flush(bs, s, &logs); if (ret < 0) { goto exit; } s->log = logs.log; exit: return ret; }
1,705
0
static bool release_pending(sPAPRDRConnector *drc) { return drc->awaiting_release; }
1,707
0
void dp83932_init(NICInfo *nd, target_phys_addr_t base, int it_shift, MemoryRegion *address_space, qemu_irq irq, void* mem_opaque, void (*memory_rw)(void *opaque, target_phys_addr_t addr, uint8_t *buf, int len, int is_write)) { dp8393xState *s; qemu_check_nic_model(nd, "dp83932"); s = g_malloc0(sizeof(dp8393xState)); s->address_space = address_space; s->mem_opaque = mem_opaque; s->memory_rw = memory_rw; s->it_shift = it_shift; s->irq = irq; s->watchdog = qemu_new_timer_ns(vm_clock, dp8393x_watchdog, s); s->regs[SONIC_SR] = 0x0004; /* only revision recognized by Linux */ s->conf.macaddr = nd->macaddr; s->conf.peer = nd->netdev; s->nic = qemu_new_nic(&net_dp83932_info, &s->conf, nd->model, nd->name, s); qemu_format_nic_info_str(&s->nic->nc, s->conf.macaddr.a); qemu_register_reset(nic_reset, s); nic_reset(s); memory_region_init_io(&s->mmio, &dp8393x_ops, s, "dp8393x", 0x40 << it_shift); memory_region_add_subregion(address_space, base, &s->mmio); }
1,708
0
int net_init_l2tpv3(const NetClientOptions *opts, const char *name, NetClientState *peer, Error **errp) { /* FIXME error_setg(errp, ...) on failure */ const NetdevL2TPv3Options *l2tpv3; NetL2TPV3State *s; NetClientState *nc; int fd = -1, gairet; struct addrinfo hints; struct addrinfo *result = NULL; char *srcport, *dstport; nc = qemu_new_net_client(&net_l2tpv3_info, peer, "l2tpv3", name); s = DO_UPCAST(NetL2TPV3State, nc, nc); s->queue_head = 0; s->queue_tail = 0; s->header_mismatch = false; assert(opts->type == NET_CLIENT_OPTIONS_KIND_L2TPV3); l2tpv3 = opts->u.l2tpv3; if (l2tpv3->has_ipv6 && l2tpv3->ipv6) { s->ipv6 = l2tpv3->ipv6; } else { s->ipv6 = false; } if ((l2tpv3->has_offset) && (l2tpv3->offset > 256)) { error_report("l2tpv3_open : offset must be less than 256 bytes"); goto outerr; } if (l2tpv3->has_rxcookie || l2tpv3->has_txcookie) { if (l2tpv3->has_rxcookie && l2tpv3->has_txcookie) { s->cookie = true; } else { goto outerr; } } else { s->cookie = false; } if (l2tpv3->has_cookie64 || l2tpv3->cookie64) { s->cookie_is_64 = true; } else { s->cookie_is_64 = false; } if (l2tpv3->has_udp && l2tpv3->udp) { s->udp = true; if (!(l2tpv3->has_srcport && l2tpv3->has_dstport)) { error_report("l2tpv3_open : need both src and dst port for udp"); goto outerr; } else { srcport = l2tpv3->srcport; dstport = l2tpv3->dstport; } } else { s->udp = false; srcport = NULL; dstport = NULL; } s->offset = 4; s->session_offset = 0; s->cookie_offset = 4; s->counter_offset = 4; s->tx_session = l2tpv3->txsession; if (l2tpv3->has_rxsession) { s->rx_session = l2tpv3->rxsession; } else { s->rx_session = s->tx_session; } if (s->cookie) { s->rx_cookie = l2tpv3->rxcookie; s->tx_cookie = l2tpv3->txcookie; if (s->cookie_is_64 == true) { /* 64 bit cookie */ s->offset += 8; s->counter_offset += 8; } else { /* 32 bit cookie */ s->offset += 4; s->counter_offset += 4; } } memset(&hints, 0, sizeof(hints)); if (s->ipv6) { hints.ai_family = AF_INET6; } else { hints.ai_family = AF_INET; } if (s->udp) { hints.ai_socktype = SOCK_DGRAM; hints.ai_protocol = 0; s->offset += 4; s->counter_offset += 4; s->session_offset += 4; s->cookie_offset += 4; } else { hints.ai_socktype = SOCK_RAW; hints.ai_protocol = IPPROTO_L2TP; } gairet = getaddrinfo(l2tpv3->src, srcport, &hints, &result); if ((gairet != 0) || (result == NULL)) { error_report( "l2tpv3_open : could not resolve src, errno = %s", gai_strerror(gairet) ); goto outerr; } fd = socket(result->ai_family, result->ai_socktype, result->ai_protocol); if (fd == -1) { fd = -errno; error_report("l2tpv3_open : socket creation failed, errno = %d", -fd); goto outerr; } if (bind(fd, (struct sockaddr *) result->ai_addr, result->ai_addrlen)) { error_report("l2tpv3_open : could not bind socket err=%i", errno); goto outerr; } if (result) { freeaddrinfo(result); } memset(&hints, 0, sizeof(hints)); if (s->ipv6) { hints.ai_family = AF_INET6; } else { hints.ai_family = AF_INET; } if (s->udp) { hints.ai_socktype = SOCK_DGRAM; hints.ai_protocol = 0; } else { hints.ai_socktype = SOCK_RAW; hints.ai_protocol = IPPROTO_L2TP; } result = NULL; gairet = getaddrinfo(l2tpv3->dst, dstport, &hints, &result); if ((gairet != 0) || (result == NULL)) { error_report( "l2tpv3_open : could not resolve dst, error = %s", gai_strerror(gairet) ); goto outerr; } s->dgram_dst = g_new0(struct sockaddr_storage, 1); memcpy(s->dgram_dst, result->ai_addr, result->ai_addrlen); s->dst_size = result->ai_addrlen; if (result) { freeaddrinfo(result); } if (l2tpv3->has_counter && l2tpv3->counter) { s->has_counter = true; s->offset += 4; } else { s->has_counter = false; } if (l2tpv3->has_pincounter && l2tpv3->pincounter) { s->has_counter = true; /* pin counter implies that there is counter */ s->pin_counter = true; } else { s->pin_counter = false; } if (l2tpv3->has_offset) { /* extra offset */ s->offset += l2tpv3->offset; } if ((s->ipv6) || (s->udp)) { s->header_size = s->offset; } else { s->header_size = s->offset + sizeof(struct iphdr); } s->msgvec = build_l2tpv3_vector(s, MAX_L2TPV3_MSGCNT); s->vec = g_new(struct iovec, MAX_L2TPV3_IOVCNT); s->header_buf = g_malloc(s->header_size); qemu_set_nonblock(fd); s->fd = fd; s->counter = 0; l2tpv3_read_poll(s, true); snprintf(s->nc.info_str, sizeof(s->nc.info_str), "l2tpv3: connected"); return 0; outerr: qemu_del_net_client(nc); if (fd >= 0) { close(fd); } if (result) { freeaddrinfo(result); } return -1; }
1,709
0
CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler) { CPUDebugExcpHandler *old_handler = debug_excp_handler; debug_excp_handler = handler; return old_handler; }
1,711
0
av_cold int ff_vp56_init_context(AVCodecContext *avctx, VP56Context *s, int flip, int has_alpha) { int i; s->avctx = avctx; avctx->pix_fmt = has_alpha ? AV_PIX_FMT_YUVA420P : AV_PIX_FMT_YUV420P; if (avctx->skip_alpha) avctx->pix_fmt = AV_PIX_FMT_YUV420P; ff_h264chroma_init(&s->h264chroma, 8); ff_hpeldsp_init(&s->hdsp, avctx->flags); ff_videodsp_init(&s->vdsp, 8); ff_vp3dsp_init(&s->vp3dsp, avctx->flags); ff_vp56dsp_init(&s->vp56dsp, avctx->codec->id); for (i = 0; i < 64; i++) { #define TRANSPOSE(x) (x >> 3) | ((x & 7) << 3) s->idct_scantable[i] = TRANSPOSE(ff_zigzag_direct[i]); #undef TRANSPOSE } for (i = 0; i < FF_ARRAY_ELEMS(s->frames); i++) { s->frames[i] = av_frame_alloc(); if (!s->frames[i]) { ff_vp56_free(avctx); return AVERROR(ENOMEM); } } s->edge_emu_buffer_alloc = NULL; s->above_blocks = NULL; s->macroblocks = NULL; s->quantizer = -1; s->deblock_filtering = 1; s->golden_frame = 0; s->filter = NULL; s->has_alpha = has_alpha; s->modelp = &s->model; if (flip) { s->flip = -1; s->frbi = 2; s->srbi = 0; } else { s->flip = 1; s->frbi = 0; s->srbi = 2; } return 0; }
1,714
0
static av_always_inline void dist_scale(HEVCContext *s, Mv *mv, int min_pu_width, int x, int y, int elist, int ref_idx_curr, int ref_idx) { RefPicList *refPicList = s->ref->refPicList; MvField *tab_mvf = s->ref->tab_mvf; int ref_pic_elist = refPicList[elist].list[TAB_MVF(x, y).ref_idx[elist]]; int ref_pic_curr = refPicList[ref_idx_curr].list[ref_idx]; if (ref_pic_elist != ref_pic_curr) mv_scale(mv, mv, s->poc - ref_pic_elist, s->poc - ref_pic_curr); }
1,715
0
static int rm_write_video(AVFormatContext *s, const uint8_t *buf, int size) { RMContext *rm = s->priv_data; ByteIOContext *pb = &s->pb; StreamInfo *stream = rm->video_stream; int key_frame = stream->enc->coded_frame->key_frame; /* XXX: this is incorrect: should be a parameter */ /* Well, I spent some time finding the meaning of these bits. I am not sure I understood everything, but it works !! */ #if 1 write_packet_header(s, stream, size + 7, key_frame); /* bit 7: '1' if final packet of a frame converted in several packets */ put_byte(pb, 0x81); /* bit 7: '1' if I frame. bits 6..0 : sequence number in current frame starting from 1 */ if (key_frame) { put_byte(pb, 0x81); } else { put_byte(pb, 0x01); } put_be16(pb, 0x4000 | (size)); /* total frame size */ put_be16(pb, 0x4000 | (size)); /* offset from the start or the end */ #else /* full frame */ write_packet_header(s, size + 6); put_byte(pb, 0xc0); put_be16(pb, 0x4000 | size); /* total frame size */ put_be16(pb, 0x4000 + packet_number * 126); /* position in stream */ #endif put_byte(pb, stream->nb_frames & 0xff); put_buffer(pb, buf, size); put_flush_packet(pb); stream->nb_frames++; return 0; }
1,716