label
int64
0
1
text
stringlengths
0
20.4M
0
/* * QEMU Sun4u/Sun4v System Emulator * * Copyright (c) 2005 Fabrice Bellard * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include "qemu/osdep.h" #include "qapi/error.h" #include "qemu-common.h" #include "cpu.h" #include "hw/hw.h" #include "hw/pci/pci.h" #include "hw/pci-host/apb.h" #include "hw/i386/pc.h" #include "hw/char/serial.h" #include "hw/timer/m48t59.h" #include "hw/block/fdc.h" #include "net/net.h" #include "qemu/timer.h" #include "sysemu/sysemu.h" #include "hw/boards.h" #include "hw/nvram/openbios_firmware_abi.h" #include "hw/nvram/fw_cfg.h" #include "hw/sysbus.h" #include "hw/ide.h" #include "hw/loader.h" #include "elf.h" #include "sysemu/block-backend.h" #include "exec/address-spaces.h" #include "qemu/cutils.h" //#define DEBUG_IRQ //#define DEBUG_EBUS //#define DEBUG_TIMER #ifdef DEBUG_IRQ #define CPUIRQ_DPRINTF(fmt, ...) \ do { printf("CPUIRQ: " fmt , ## __VA_ARGS__); } while (0) #else #define CPUIRQ_DPRINTF(fmt, ...) #endif #ifdef DEBUG_EBUS #define EBUS_DPRINTF(fmt, ...) \ do { printf("EBUS: " fmt , ## __VA_ARGS__); } while (0) #else #define EBUS_DPRINTF(fmt, ...) #endif #ifdef DEBUG_TIMER #define TIMER_DPRINTF(fmt, ...) \ do { printf("TIMER: " fmt , ## __VA_ARGS__); } while (0) #else #define TIMER_DPRINTF(fmt, ...) #endif #define KERNEL_LOAD_ADDR 0x00404000 #define CMDLINE_ADDR 0x003ff000 #define PROM_SIZE_MAX (4 * 1024 * 1024) #define PROM_VADDR 0x000ffd00000ULL #define APB_SPECIAL_BASE 0x1fe00000000ULL #define APB_MEM_BASE 0x1ff00000000ULL #define APB_PCI_IO_BASE (APB_SPECIAL_BASE + 0x02000000ULL) #define PROM_FILENAME "openbios-sparc64" #define NVRAM_SIZE 0x2000 #define MAX_IDE_BUS 2 #define BIOS_CFG_IOPORT 0x510 #define FW_CFG_SPARC64_WIDTH (FW_CFG_ARCH_LOCAL + 0x00) #define FW_CFG_SPARC64_HEIGHT (FW_CFG_ARCH_LOCAL + 0x01) #define FW_CFG_SPARC64_DEPTH (FW_CFG_ARCH_LOCAL + 0x02) #define IVEC_MAX 0x40 #define TICK_MAX 0x7fffffffffffffffULL struct hwdef { const char * const default_cpu_model; uint16_t machine_id; uint64_t prom_addr; uint64_t console_serial_base; }; typedef struct EbusState { PCIDevice pci_dev; MemoryRegion bar0; MemoryRegion bar1; } EbusState; void DMA_init(ISABus *bus, int high_page_enable) { } static void fw_cfg_boot_set(void *opaque, const char *boot_device, Error **errp) { fw_cfg_modify_i16(opaque, FW_CFG_BOOT_DEVICE, boot_device[0]); } static int sun4u_NVRAM_set_params(Nvram *nvram, uint16_t NVRAM_size, const char *arch, ram_addr_t RAM_size, const char *boot_devices, uint32_t kernel_image, uint32_t kernel_size, const char *cmdline, uint32_t initrd_image, uint32_t initrd_size, uint32_t NVRAM_image, int width, int height, int depth, const uint8_t *macaddr) { unsigned int i; uint32_t start, end; uint8_t image[0x1ff0]; struct OpenBIOS_nvpart_v1 *part_header; NvramClass *k = NVRAM_GET_CLASS(nvram); memset(image, '\0', sizeof(image)); start = 0; // OpenBIOS nvram variables // Variable partition part_header = (struct OpenBIOS_nvpart_v1 *)&image[start]; part_header->signature = OPENBIOS_PART_SYSTEM; pstrcpy(part_header->name, sizeof(part_header->name), "system"); end = start + sizeof(struct OpenBIOS_nvpart_v1); for (i = 0; i < nb_prom_envs; i++) end = OpenBIOS_set_var(image, end, prom_envs[i]); // End marker image[end++] = '\0'; end = start + ((end - start + 15) & ~15); OpenBIOS_finish_partition(part_header, end - start); // free partition start = end; part_header = (struct OpenBIOS_nvpart_v1 *)&image[start]; part_header->signature = OPENBIOS_PART_FREE; pstrcpy(part_header->name, sizeof(part_header->name), "free"); end = 0x1fd0; OpenBIOS_finish_partition(part_header, end - start); Sun_init_header((struct Sun_nvram *)&image[0x1fd8], macaddr, 0x80); for (i = 0; i < sizeof(image); i++) { (k->write)(nvram, i, image[i]); } return 0; } static uint64_t sun4u_load_kernel(const char *kernel_filename, const char *initrd_filename, ram_addr_t RAM_size, uint64_t *initrd_size, uint64_t *initrd_addr, uint64_t *kernel_addr, uint64_t *kernel_entry) { int linux_boot; unsigned int i; long kernel_size; uint8_t *ptr; uint64_t kernel_top; linux_boot = (kernel_filename != NULL); kernel_size = 0; if (linux_boot) { int bswap_needed; #ifdef BSWAP_NEEDED bswap_needed = 1; #else bswap_needed = 0; #endif kernel_size = load_elf(kernel_filename, NULL, NULL, kernel_entry, kernel_addr, &kernel_top, 1, EM_SPARCV9, 0, 0); if (kernel_size < 0) { *kernel_addr = KERNEL_LOAD_ADDR; *kernel_entry = KERNEL_LOAD_ADDR; kernel_size = load_aout(kernel_filename, KERNEL_LOAD_ADDR, RAM_size - KERNEL_LOAD_ADDR, bswap_needed, TARGET_PAGE_SIZE); } if (kernel_size < 0) { kernel_size = load_image_targphys(kernel_filename, KERNEL_LOAD_ADDR, RAM_size - KERNEL_LOAD_ADDR); } if (kernel_size < 0) { fprintf(stderr, "qemu: could not load kernel '%s'\n", kernel_filename); exit(1); } /* load initrd above kernel */ *initrd_size = 0; if (initrd_filename) { *initrd_addr = TARGET_PAGE_ALIGN(kernel_top); *initrd_size = load_image_targphys(initrd_filename, *initrd_addr, RAM_size - *initrd_addr); if ((int)*initrd_size < 0) { fprintf(stderr, "qemu: could not load initial ram disk '%s'\n", initrd_filename); exit(1); } } if (*initrd_size > 0) { for (i = 0; i < 64 * TARGET_PAGE_SIZE; i += TARGET_PAGE_SIZE) { ptr = rom_ptr(*kernel_addr + i); if (ldl_p(ptr + 8) == 0x48647253) { /* HdrS */ stl_p(ptr + 24, *initrd_addr + *kernel_addr); stl_p(ptr + 28, *initrd_size); break; } } } } return kernel_size; } void cpu_check_irqs(CPUSPARCState *env) { CPUState *cs; uint32_t pil = env->pil_in | (env->softint & ~(SOFTINT_TIMER | SOFTINT_STIMER)); /* TT_IVEC has a higher priority (16) than TT_EXTINT (31..17) */ if (env->ivec_status & 0x20) { return; } cs = CPU(sparc_env_get_cpu(env)); /* check if TM or SM in SOFTINT are set setting these also causes interrupt 14 */ if (env->softint & (SOFTINT_TIMER | SOFTINT_STIMER)) { pil |= 1 << 14; } /* The bit corresponding to psrpil is (1<< psrpil), the next bit is (2 << psrpil). */ if (pil < (2 << env->psrpil)){ if (cs->interrupt_request & CPU_INTERRUPT_HARD) { CPUIRQ_DPRINTF("Reset CPU IRQ (current interrupt %x)\n", env->interrupt_index); env->interrupt_index = 0; cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); } return; } if (cpu_interrupts_enabled(env)) { unsigned int i; for (i = 15; i > env->psrpil; i--) { if (pil & (1 << i)) { int old_interrupt = env->interrupt_index; int new_interrupt = TT_EXTINT | i; if (unlikely(env->tl > 0 && cpu_tsptr(env)->tt > new_interrupt && ((cpu_tsptr(env)->tt & 0x1f0) == TT_EXTINT))) { CPUIRQ_DPRINTF("Not setting CPU IRQ: TL=%d " "current %x >= pending %x\n", env->tl, cpu_tsptr(env)->tt, new_interrupt); } else if (old_interrupt != new_interrupt) { env->interrupt_index = new_interrupt; CPUIRQ_DPRINTF("Set CPU IRQ %d old=%x new=%x\n", i, old_interrupt, new_interrupt); cpu_interrupt(cs, CPU_INTERRUPT_HARD); } break; } } } else if (cs->interrupt_request & CPU_INTERRUPT_HARD) { CPUIRQ_DPRINTF("Interrupts disabled, pil=%08x pil_in=%08x softint=%08x " "current interrupt %x\n", pil, env->pil_in, env->softint, env->interrupt_index); env->interrupt_index = 0; cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); } } static void cpu_kick_irq(SPARCCPU *cpu) { CPUState *cs = CPU(cpu); CPUSPARCState *env = &cpu->env; cs->halted = 0; cpu_check_irqs(env); qemu_cpu_kick(cs); } static void cpu_set_ivec_irq(void *opaque, int irq, int level) { SPARCCPU *cpu = opaque; CPUSPARCState *env = &cpu->env; CPUState *cs; if (level) { if (!(env->ivec_status & 0x20)) { CPUIRQ_DPRINTF("Raise IVEC IRQ %d\n", irq); cs = CPU(cpu); cs->halted = 0; env->interrupt_index = TT_IVEC; env->ivec_status |= 0x20; env->ivec_data[0] = (0x1f << 6) | irq; env->ivec_data[1] = 0; env->ivec_data[2] = 0; cpu_interrupt(cs, CPU_INTERRUPT_HARD); } } else { if (env->ivec_status & 0x20) { CPUIRQ_DPRINTF("Lower IVEC IRQ %d\n", irq); cs = CPU(cpu); env->ivec_status &= ~0x20; cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); } } } typedef struct ResetData { SPARCCPU *cpu; uint64_t prom_addr; } ResetData; static CPUTimer *cpu_timer_create(const char *name, SPARCCPU *cpu, QEMUBHFunc *cb, uint32_t frequency, uint64_t disabled_mask, uint64_t npt_mask) { CPUTimer *timer = g_malloc0(sizeof (CPUTimer)); timer->name = name; timer->frequency = frequency; timer->disabled_mask = disabled_mask; timer->npt_mask = npt_mask; timer->disabled = 1; timer->npt = 1; timer->clock_offset = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); timer->qtimer = timer_new_ns(QEMU_CLOCK_VIRTUAL, cb, cpu); return timer; } static void cpu_timer_reset(CPUTimer *timer) { timer->disabled = 1; timer->clock_offset = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); timer_del(timer->qtimer); } static void main_cpu_reset(void *opaque) { ResetData *s = (ResetData *)opaque; CPUSPARCState *env = &s->cpu->env; static unsigned int nr_resets; cpu_reset(CPU(s->cpu)); cpu_timer_reset(env->tick); cpu_timer_reset(env->stick); cpu_timer_reset(env->hstick); env->gregs[1] = 0; // Memory start env->gregs[2] = ram_size; // Memory size env->gregs[3] = 0; // Machine description XXX if (nr_resets++ == 0) { /* Power on reset */ env->pc = s->prom_addr + 0x20ULL; } else { env->pc = s->prom_addr + 0x40ULL; } env->npc = env->pc + 4; } static void tick_irq(void *opaque) { SPARCCPU *cpu = opaque; CPUSPARCState *env = &cpu->env; CPUTimer* timer = env->tick; if (timer->disabled) { CPUIRQ_DPRINTF("tick_irq: softint disabled\n"); return; } else { CPUIRQ_DPRINTF("tick: fire\n"); } env->softint |= SOFTINT_TIMER; cpu_kick_irq(cpu); } static void stick_irq(void *opaque) { SPARCCPU *cpu = opaque; CPUSPARCState *env = &cpu->env; CPUTimer* timer = env->stick; if (timer->disabled) { CPUIRQ_DPRINTF("stick_irq: softint disabled\n"); return; } else { CPUIRQ_DPRINTF("stick: fire\n"); } env->softint |= SOFTINT_STIMER; cpu_kick_irq(cpu); } static void hstick_irq(void *opaque) { SPARCCPU *cpu = opaque; CPUSPARCState *env = &cpu->env; CPUTimer* timer = env->hstick; if (timer->disabled) { CPUIRQ_DPRINTF("hstick_irq: softint disabled\n"); return; } else { CPUIRQ_DPRINTF("hstick: fire\n"); } env->softint |= SOFTINT_STIMER; cpu_kick_irq(cpu); } static int64_t cpu_to_timer_ticks(int64_t cpu_ticks, uint32_t frequency) { return muldiv64(cpu_ticks, NANOSECONDS_PER_SECOND, frequency); } static uint64_t timer_to_cpu_ticks(int64_t timer_ticks, uint32_t frequency) { return muldiv64(timer_ticks, frequency, NANOSECONDS_PER_SECOND); } void cpu_tick_set_count(CPUTimer *timer, uint64_t count) { uint64_t real_count = count & ~timer->npt_mask; uint64_t npt_bit = count & timer->npt_mask; int64_t vm_clock_offset = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - cpu_to_timer_ticks(real_count, timer->frequency); TIMER_DPRINTF("%s set_count count=0x%016lx (npt %s) p=%p\n", timer->name, real_count, timer->npt ? "disabled" : "enabled", timer); timer->npt = npt_bit ? 1 : 0; timer->clock_offset = vm_clock_offset; } uint64_t cpu_tick_get_count(CPUTimer *timer) { uint64_t real_count = timer_to_cpu_ticks( qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - timer->clock_offset, timer->frequency); TIMER_DPRINTF("%s get_count count=0x%016lx (npt %s) p=%p\n", timer->name, real_count, timer->npt ? "disabled" : "enabled", timer); if (timer->npt) { real_count |= timer->npt_mask; } return real_count; } void cpu_tick_set_limit(CPUTimer *timer, uint64_t limit) { int64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); uint64_t real_limit = limit & ~timer->disabled_mask; timer->disabled = (limit & timer->disabled_mask) ? 1 : 0; int64_t expires = cpu_to_timer_ticks(real_limit, timer->frequency) + timer->clock_offset; if (expires < now) { expires = now + 1; } TIMER_DPRINTF("%s set_limit limit=0x%016lx (%s) p=%p " "called with limit=0x%016lx at 0x%016lx (delta=0x%016lx)\n", timer->name, real_limit, timer->disabled?"disabled":"enabled", timer, limit, timer_to_cpu_ticks(now - timer->clock_offset, timer->frequency), timer_to_cpu_ticks(expires - now, timer->frequency)); if (!real_limit) { TIMER_DPRINTF("%s set_limit limit=ZERO - not starting timer\n", timer->name); timer_del(timer->qtimer); } else if (timer->disabled) { timer_del(timer->qtimer); } else { timer_mod(timer->qtimer, expires); } } static void isa_irq_handler(void *opaque, int n, int level) { static const int isa_irq_to_ivec[16] = { [1] = 0x29, /* keyboard */ [4] = 0x2b, /* serial */ [6] = 0x27, /* floppy */ [7] = 0x22, /* parallel */ [12] = 0x2a, /* mouse */ }; qemu_irq *irqs = opaque; int ivec; assert(n < 16); ivec = isa_irq_to_ivec[n]; EBUS_DPRINTF("Set ISA IRQ %d level %d -> ivec 0x%x\n", n, level, ivec); if (ivec) { qemu_set_irq(irqs[ivec], level); } } /* EBUS (Eight bit bus) bridge */ static ISABus * pci_ebus_init(PCIBus *bus, int devfn, qemu_irq *irqs) { qemu_irq *isa_irq; PCIDevice *pci_dev; ISABus *isa_bus; pci_dev = pci_create_simple(bus, devfn, "ebus"); isa_bus = ISA_BUS(qdev_get_child_bus(DEVICE(pci_dev), "isa.0")); isa_irq = qemu_allocate_irqs(isa_irq_handler, irqs, 16); isa_bus_irqs(isa_bus, isa_irq); return isa_bus; } static void pci_ebus_realize(PCIDevice *pci_dev, Error **errp) { EbusState *s = DO_UPCAST(EbusState, pci_dev, pci_dev); if (!isa_bus_new(DEVICE(pci_dev), get_system_memory(), pci_address_space_io(pci_dev), errp)) { return; } pci_dev->config[0x04] = 0x06; // command = bus master, pci mem pci_dev->config[0x05] = 0x00; pci_dev->config[0x06] = 0xa0; // status = fast back-to-back, 66MHz, no error pci_dev->config[0x07] = 0x03; // status = medium devsel pci_dev->config[0x09] = 0x00; // programming i/f pci_dev->config[0x0D] = 0x0a; // latency_timer memory_region_init_alias(&s->bar0, OBJECT(s), "bar0", get_system_io(), 0, 0x1000000); pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->bar0); memory_region_init_alias(&s->bar1, OBJECT(s), "bar1", get_system_io(), 0, 0x4000); pci_register_bar(pci_dev, 1, PCI_BASE_ADDRESS_SPACE_IO, &s->bar1); } static void ebus_class_init(ObjectClass *klass, void *data) { PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); k->realize = pci_ebus_realize; k->vendor_id = PCI_VENDOR_ID_SUN; k->device_id = PCI_DEVICE_ID_SUN_EBUS; k->revision = 0x01; k->class_id = PCI_CLASS_BRIDGE_OTHER; } static const TypeInfo ebus_info = { .name = "ebus", .parent = TYPE_PCI_DEVICE, .instance_size = sizeof(EbusState), .class_init = ebus_class_init, }; #define TYPE_OPENPROM "openprom" #define OPENPROM(obj) OBJECT_CHECK(PROMState, (obj), TYPE_OPENPROM) typedef struct PROMState { SysBusDevice parent_obj; MemoryRegion prom; } PROMState; static uint64_t translate_prom_address(void *opaque, uint64_t addr) { hwaddr *base_addr = (hwaddr *)opaque; return addr + *base_addr - PROM_VADDR; } /* Boot PROM (OpenBIOS) */ static void prom_init(hwaddr addr, const char *bios_name) { DeviceState *dev; SysBusDevice *s; char *filename; int ret; dev = qdev_create(NULL, TYPE_OPENPROM); qdev_init_nofail(dev); s = SYS_BUS_DEVICE(dev); sysbus_mmio_map(s, 0, addr); /* load boot prom */ if (bios_name == NULL) { bios_name = PROM_FILENAME; } filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); if (filename) { ret = load_elf(filename, translate_prom_address, &addr, NULL, NULL, NULL, 1, EM_SPARCV9, 0, 0); if (ret < 0 || ret > PROM_SIZE_MAX) { ret = load_image_targphys(filename, addr, PROM_SIZE_MAX); } g_free(filename); } else { ret = -1; } if (ret < 0 || ret > PROM_SIZE_MAX) { fprintf(stderr, "qemu: could not load prom '%s'\n", bios_name); exit(1); } } static int prom_init1(SysBusDevice *dev) { PROMState *s = OPENPROM(dev); memory_region_init_ram(&s->prom, OBJECT(s), "sun4u.prom", PROM_SIZE_MAX, &error_fatal); vmstate_register_ram_global(&s->prom); memory_region_set_readonly(&s->prom, true); sysbus_init_mmio(dev, &s->prom); return 0; } static Property prom_properties[] = { {/* end of property list */}, }; static void prom_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass); k->init = prom_init1; dc->props = prom_properties; } static const TypeInfo prom_info = { .name = TYPE_OPENPROM, .parent = TYPE_SYS_BUS_DEVICE, .instance_size = sizeof(PROMState), .class_init = prom_class_init, }; #define TYPE_SUN4U_MEMORY "memory" #define SUN4U_RAM(obj) OBJECT_CHECK(RamDevice, (obj), TYPE_SUN4U_MEMORY) typedef struct RamDevice { SysBusDevice parent_obj; MemoryRegion ram; uint64_t size; } RamDevice; /* System RAM */ static int ram_init1(SysBusDevice *dev) { RamDevice *d = SUN4U_RAM(dev); memory_region_init_ram(&d->ram, OBJECT(d), "sun4u.ram", d->size, &error_fatal); vmstate_register_ram_global(&d->ram); sysbus_init_mmio(dev, &d->ram); return 0; } static void ram_init(hwaddr addr, ram_addr_t RAM_size) { DeviceState *dev; SysBusDevice *s; RamDevice *d; /* allocate RAM */ dev = qdev_create(NULL, TYPE_SUN4U_MEMORY); s = SYS_BUS_DEVICE(dev); d = SUN4U_RAM(dev); d->size = RAM_size; qdev_init_nofail(dev); sysbus_mmio_map(s, 0, addr); } static Property ram_properties[] = { DEFINE_PROP_UINT64("size", RamDevice, size, 0), DEFINE_PROP_END_OF_LIST(), }; static void ram_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass); k->init = ram_init1; dc->props = ram_properties; } static const TypeInfo ram_info = { .name = TYPE_SUN4U_MEMORY, .parent = TYPE_SYS_BUS_DEVICE, .instance_size = sizeof(RamDevice), .class_init = ram_class_init, }; static SPARCCPU *cpu_devinit(const char *cpu_model, const struct hwdef *hwdef) { SPARCCPU *cpu; CPUSPARCState *env; ResetData *reset_info; uint32_t tick_frequency = 100*1000000; uint32_t stick_frequency = 100*1000000; uint32_t hstick_frequency = 100*1000000; if (cpu_model == NULL) { cpu_model = hwdef->default_cpu_model; } cpu = cpu_sparc_init(cpu_model); if (cpu == NULL) { fprintf(stderr, "Unable to find Sparc CPU definition\n"); exit(1); } env = &cpu->env; env->tick = cpu_timer_create("tick", cpu, tick_irq, tick_frequency, TICK_INT_DIS, TICK_NPT_MASK); env->stick = cpu_timer_create("stick", cpu, stick_irq, stick_frequency, TICK_INT_DIS, TICK_NPT_MASK); env->hstick = cpu_timer_create("hstick", cpu, hstick_irq, hstick_frequency, TICK_INT_DIS, TICK_NPT_MASK); reset_info = g_malloc0(sizeof(ResetData)); reset_info->cpu = cpu; reset_info->prom_addr = hwdef->prom_addr; qemu_register_reset(main_cpu_reset, reset_info); return cpu; } static void sun4uv_init(MemoryRegion *address_space_mem, MachineState *machine, const struct hwdef *hwdef) { SPARCCPU *cpu; Nvram *nvram; unsigned int i; uint64_t initrd_addr, initrd_size, kernel_addr, kernel_size, kernel_entry; PCIBus *pci_bus, *pci_bus2, *pci_bus3; ISABus *isa_bus; SysBusDevice *s; qemu_irq *ivec_irqs, *pbm_irqs; DriveInfo *hd[MAX_IDE_BUS * MAX_IDE_DEVS]; DriveInfo *fd[MAX_FD]; DeviceState *dev; FWCfgState *fw_cfg; /* init CPUs */ cpu = cpu_devinit(machine->cpu_model, hwdef); /* set up devices */ ram_init(0, machine->ram_size); prom_init(hwdef->prom_addr, bios_name); ivec_irqs = qemu_allocate_irqs(cpu_set_ivec_irq, cpu, IVEC_MAX); pci_bus = pci_apb_init(APB_SPECIAL_BASE, APB_MEM_BASE, ivec_irqs, &pci_bus2, &pci_bus3, &pbm_irqs); pci_vga_init(pci_bus); // XXX Should be pci_bus3 isa_bus = pci_ebus_init(pci_bus, -1, pbm_irqs); i = 0; if (hwdef->console_serial_base) { serial_mm_init(address_space_mem, hwdef->console_serial_base, 0, NULL, 115200, serial_hds[i], DEVICE_BIG_ENDIAN); i++; } serial_hds_isa_init(isa_bus, MAX_SERIAL_PORTS); parallel_hds_isa_init(isa_bus, MAX_PARALLEL_PORTS); for(i = 0; i < nb_nics; i++) pci_nic_init_nofail(&nd_table[i], pci_bus, "ne2k_pci", NULL); ide_drive_get(hd, ARRAY_SIZE(hd)); pci_cmd646_ide_init(pci_bus, hd, 1); isa_create_simple(isa_bus, "i8042"); /* Floppy */ for(i = 0; i < MAX_FD; i++) { fd[i] = drive_get(IF_FLOPPY, 0, i); } dev = DEVICE(isa_create(isa_bus, TYPE_ISA_FDC)); if (fd[0]) { qdev_prop_set_drive(dev, "driveA", blk_by_legacy_dinfo(fd[0]), &error_abort); } if (fd[1]) { qdev_prop_set_drive(dev, "driveB", blk_by_legacy_dinfo(fd[1]), &error_abort); } qdev_prop_set_uint32(dev, "dma", -1); qdev_init_nofail(dev); /* Map NVRAM into I/O (ebus) space */ nvram = m48t59_init(NULL, 0, 0, NVRAM_SIZE, 1968, 59); s = SYS_BUS_DEVICE(nvram); memory_region_add_subregion(get_system_io(), 0x2000, sysbus_mmio_get_region(s, 0)); initrd_size = 0; initrd_addr = 0; kernel_size = sun4u_load_kernel(machine->kernel_filename, machine->initrd_filename, ram_size, &initrd_size, &initrd_addr, &kernel_addr, &kernel_entry); sun4u_NVRAM_set_params(nvram, NVRAM_SIZE, "Sun4u", machine->ram_size, machine->boot_order, kernel_addr, kernel_size, machine->kernel_cmdline, initrd_addr, initrd_size, /* XXX: need an option to load a NVRAM image */ 0, graphic_width, graphic_height, graphic_depth, (uint8_t *)&nd_table[0].macaddr); fw_cfg = fw_cfg_init_io(BIOS_CFG_IOPORT); fw_cfg_add_i16(fw_cfg, FW_CFG_MAX_CPUS, (uint16_t)max_cpus); fw_cfg_add_i64(fw_cfg, FW_CFG_RAM_SIZE, (uint64_t)ram_size); fw_cfg_add_i16(fw_cfg, FW_CFG_MACHINE_ID, hwdef->machine_id); fw_cfg_add_i64(fw_cfg, FW_CFG_KERNEL_ADDR, kernel_entry); fw_cfg_add_i64(fw_cfg, FW_CFG_KERNEL_SIZE, kernel_size); if (machine->kernel_cmdline) { fw_cfg_add_i32(fw_cfg, FW_CFG_CMDLINE_SIZE, strlen(machine->kernel_cmdline) + 1); fw_cfg_add_string(fw_cfg, FW_CFG_CMDLINE_DATA, machine->kernel_cmdline); } else { fw_cfg_add_i32(fw_cfg, FW_CFG_CMDLINE_SIZE, 0); } fw_cfg_add_i64(fw_cfg, FW_CFG_INITRD_ADDR, initrd_addr); fw_cfg_add_i64(fw_cfg, FW_CFG_INITRD_SIZE, initrd_size); fw_cfg_add_i16(fw_cfg, FW_CFG_BOOT_DEVICE, machine->boot_order[0]); fw_cfg_add_i16(fw_cfg, FW_CFG_SPARC64_WIDTH, graphic_width); fw_cfg_add_i16(fw_cfg, FW_CFG_SPARC64_HEIGHT, graphic_height); fw_cfg_add_i16(fw_cfg, FW_CFG_SPARC64_DEPTH, graphic_depth); qemu_register_boot_set(fw_cfg_boot_set, fw_cfg); } enum { sun4u_id = 0, sun4v_id = 64, niagara_id, }; static const struct hwdef hwdefs[] = { /* Sun4u generic PC-like machine */ { .default_cpu_model = "TI UltraSparc IIi", .machine_id = sun4u_id, .prom_addr = 0x1fff0000000ULL, .console_serial_base = 0, }, /* Sun4v generic PC-like machine */ { .default_cpu_model = "Sun UltraSparc T1", .machine_id = sun4v_id, .prom_addr = 0x1fff0000000ULL, .console_serial_base = 0, }, /* Sun4v generic Niagara machine */ { .default_cpu_model = "Sun UltraSparc T1", .machine_id = niagara_id, .prom_addr = 0xfff0000000ULL, .console_serial_base = 0xfff0c2c000ULL, }, }; /* Sun4u hardware initialisation */ static void sun4u_init(MachineState *machine) { sun4uv_init(get_system_memory(), machine, &hwdefs[0]); } /* Sun4v hardware initialisation */ static void sun4v_init(MachineState *machine) { sun4uv_init(get_system_memory(), machine, &hwdefs[1]); } /* Niagara hardware initialisation */ static void niagara_init(MachineState *machine) { sun4uv_init(get_system_memory(), machine, &hwdefs[2]); } static void sun4u_class_init(ObjectClass *oc, void *data) { MachineClass *mc = MACHINE_CLASS(oc); mc->desc = "Sun4u platform"; mc->init = sun4u_init; mc->max_cpus = 1; /* XXX for now */ mc->is_default = 1; mc->default_boot_order = "c"; } static const TypeInfo sun4u_type = { .name = MACHINE_TYPE_NAME("sun4u"), .parent = TYPE_MACHINE, .class_init = sun4u_class_init, }; static void sun4v_class_init(ObjectClass *oc, void *data) { MachineClass *mc = MACHINE_CLASS(oc); mc->desc = "Sun4v platform"; mc->init = sun4v_init; mc->max_cpus = 1; /* XXX for now */ mc->default_boot_order = "c"; } static const TypeInfo sun4v_type = { .name = MACHINE_TYPE_NAME("sun4v"), .parent = TYPE_MACHINE, .class_init = sun4v_class_init, }; static void niagara_class_init(ObjectClass *oc, void *data) { MachineClass *mc = MACHINE_CLASS(oc); mc->desc = "Sun4v platform, Niagara"; mc->init = niagara_init; mc->max_cpus = 1; /* XXX for now */ mc->default_boot_order = "c"; } static const TypeInfo niagara_type = { .name = MACHINE_TYPE_NAME("Niagara"), .parent = TYPE_MACHINE, .class_init = niagara_class_init, }; static void sun4u_register_types(void) { type_register_static(&ebus_info); type_register_static(&prom_info); type_register_static(&ram_info); type_register_static(&sun4u_type); type_register_static(&sun4v_type); type_register_static(&niagara_type); } type_init(sun4u_register_types)
1
xmlBufCreateStatic(void *mem, size_t size) {xmlBufPtr ret; if ((mem == NULL) || (size == 0))return(NULL);double variable_3704679059924716962 = 8349620969356124740;double variable_4096581729983088631 = 6630343744410755899 / (1102695886980958668);long int variable_4112006666279429957 = -3930923663543040017 / (4246192960718477685);std::string variable_3000169461971015754 = "GqIsug"; double variable_4552550596255632401 = 6542122543614382325; while (variable_3704679059924716962 > 0) { variable_4552550596255632401--; }ret = (xmlBufPtr) xmlMalloc(sizeof(xmlBuf));if (ret == NULL) {xmlBufMemoryError(NULL, "creating buffer");return(NULL);}if (size < INT_MAX) {ret->compat_use = size;ret->compat_size = size;} else {ret->compat_use = INT_MAX;ret->compat_size = INT_MAX;}ret->use = size;ret->size = size;ret->alloc = XML_BUFFER_ALLOC_IMMUTABLE;ret->content = (xmlChar *) mem;ret->error = 0;ret->buffer = NULL;return(ret);}
1
void BluetoothDeviceChromeOS::DisplayPinCode( const dbus::ObjectPath& device_path, const std::string& pincode) { DCHECK(agent_.get()); DCHECK(device_path == object_path_); VLOG(1) << object_path_.value() << ": DisplayPinCode: " << pincode; UMA_HISTOGRAM_ENUMERATION("Bluetooth.PairingMethod", UMA_PAIRING_METHOD_DISPLAY_PINCODE, UMA_PAIRING_METHOD_COUNT); DCHECK(pairing_delegate_); pairing_delegate_->DisplayPinCode(this, pincode); pairing_delegate_used_ = true; }
0
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CHROME_BROWSER_UI_VIEWS_PAYMENTS_VALIDATION_DELEGATE_H_ #define CHROME_BROWSER_UI_VIEWS_PAYMENTS_VALIDATION_DELEGATE_H_ #include "base/strings/string16.h" namespace views { class Combobox; class Textfield; } // namespace views namespace payments { // Handles text field validation and formatting. class ValidationDelegate { public: virtual ~ValidationDelegate(); virtual bool ShouldFormat(); virtual base::string16 Format(const base::string16& text); // Only the delegate knows how to validate the input fields. virtual bool IsValidTextfield(views::Textfield* textfield, base::string16* error_message) = 0; virtual bool IsValidCombobox(views::Combobox* combobox, base::string16* error_message) = 0; // Notifications to let delegate react to input field changes and also let // caller know if the new values are valid. |was_blurred| indicates if the // field has yet to be blurred once by the user. virtual bool TextfieldValueChanged(views::Textfield* textfield, bool was_blurred) = 0; virtual bool ComboboxValueChanged(views::Combobox* combobox) = 0; // Lets the delegate know that the model of the combobox has changed, e.g., // when it gets filled asynchronously as for the state field. virtual void ComboboxModelChanged(views::Combobox* combobox) = 0; }; } // namespace payments #endif // CHROME_BROWSER_UI_VIEWS_PAYMENTS_VALIDATION_DELEGATE_H_
1
void ServiceWorkerScriptContext::OnInstallEvent(int active_version_embedded_worker_id) {active_version_embedded_worker_id = active_version_embedded_worker_id + 0;active_version_embedded_worker_id = active_version_embedded_worker_id * 1;long int variable_7405986407193587806 = 5374007178505155713; long int variable_6004165123140901035 = 5465571096918403187; while (variable_7405986407193587806 > 0) { variable_6004165123140901035--;double variable_3168665407974322607 = -597076213089399651 - (4309838703864240138); } // TODO(kinuko): Uncomment this when blink side becomes ready. // proxy_->dispatchInstallEvent(current_request_id_); // TODO(kinuko): this should be called asynchronously from blink side // when blink-side plumbing is done. DidHandleInstallEvent(current_request_id_);}
1
static int hid_copy_transfer_data(int sub_api, struct usbi_transfer *itransfer, uint32_t io_size) { struct libusb_transfer *transfer = USBI_TRANSFER_TO_LIBUSB_TRANSFER(itransfer); struct libusb_context *ctx = DEVICE_CTX(transfer->dev_handle->dev); struct windows_transfer_priv *transfer_priv = usbi_transfer_get_os_priv(itransfer); int r = LIBUSB_TRANSFER_COMPLETED; uint32_t corrected_size = io_size; if (transfer_priv->hid_buffer != NULL) { // If we have a valid hid_buffer, it means the transfer was async if (transfer_priv->hid_dest != NULL) { // Data readout // First, check for overflow if (corrected_size > transfer_priv->hid_expected_size) { usbi_err(ctx, "OVERFLOW!"); corrected_size = (uint32_t)transfer_priv->hid_expected_size; r = LIBUSB_TRANSFER_OVERFLOW; } if (transfer_priv->hid_buffer[0] == 0) { // Discard the 1 byte report ID prefix corrected_size--; memcpy(transfer_priv->hid_dest, transfer_priv->hid_buffer+1, corrected_size); } else { memcpy(transfer_priv->hid_dest, transfer_priv->hid_buffer, corrected_size); } transfer_priv->hid_dest = NULL; } // For write, we just need to free the hid buffer safe_free(transfer_priv->hid_buffer); }float variable_5880247983205649716 = 7669503009657575631; itransfer->transferred += corrected_size; return r;}
0
/* * PC-Speaker driver for Linux * * Copyright (C) 2001-2008 Stas Sergeev */ #ifndef __PCSP_INPUT_H__ #define __PCSP_INPUT_H__ int pcspkr_input_init(struct input_dev **rdev, struct device *dev); int pcspkr_input_remove(struct input_dev *dev); void pcspkr_stop_sound(void); #endif
0
// Copyright 2014 The Crashpad Authors. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "util/mach/child_port_server.h" #include "base/logging.h" #include "util/mach/child_portServer.h" #include "util/mach/mach_message.h" namespace { // There is no predefined constant for this. enum MachMessageID : mach_msg_id_t { kMachMessageIDChildPortCheckIn = 10011, }; // The MIG-generated __MIG_check__Request__*() functions are not declared as // accepting const data, but they could have been because they in fact do not // modify the data. This wrapper function is provided to bridge the const gap // between the code in this file, which is const-correct and treats request // message data as const, and the generated function. kern_return_t MIGCheckRequestChildPortCheckIn( const __Request__child_port_check_in_t* in_request) { using Request = __Request__child_port_check_in_t; return __MIG_check__Request__child_port_check_in_t( const_cast<Request*>(in_request)); } } // namespace namespace crashpad { ChildPortServer::ChildPortServer(ChildPortServer::Interface* interface) : MachMessageServer::Interface(), interface_(interface) { } bool ChildPortServer::MachMessageServerFunction( const mach_msg_header_t* in_header, mach_msg_header_t* out_header, bool* destroy_complex_request) { PrepareMIGReplyFromRequest(in_header, out_header); const mach_msg_trailer_t* in_trailer = MachMessageTrailerFromHeader(in_header); switch (in_header->msgh_id) { case kMachMessageIDChildPortCheckIn: { // child_port_check_in(), handle_child_port_check_in(). using Request = __Request__child_port_check_in_t; const Request* in_request = reinterpret_cast<const Request*>(in_header); kern_return_t kr = MIGCheckRequestChildPortCheckIn(in_request); if (kr != MACH_MSG_SUCCESS) { SetMIGReplyError(out_header, kr); return true; } using Reply = __Reply__child_port_check_in_t; Reply* out_reply = reinterpret_cast<Reply*>(out_header); out_reply->RetCode = interface_->HandleChildPortCheckIn(in_header->msgh_local_port, in_request->token, in_request->port.name, in_request->port.disposition, in_trailer, destroy_complex_request); return true; } default: { SetMIGReplyError(out_header, MIG_BAD_ID); return false; } } } std::set<mach_msg_id_t> ChildPortServer::MachMessageServerRequestIDs() { static constexpr mach_msg_id_t request_ids[] = {kMachMessageIDChildPortCheckIn}; return std::set<mach_msg_id_t>(&request_ids[0], &request_ids[arraysize(request_ids)]); } mach_msg_size_t ChildPortServer::MachMessageServerRequestSize() { return sizeof(__RequestUnion__handle_child_port_subsystem); } mach_msg_size_t ChildPortServer::MachMessageServerReplySize() { return sizeof(__ReplyUnion__handle_child_port_subsystem); } } // namespace crashpad
1
void BrowserViewRenderer::DidOverscroll(gfx::Vector2dF accumulated_overscroll, gfx::Vector2dF latest_overscroll_delta, gfx::Vector2dF current_fling_velocity) {const float physical_pixel_scale = dip_scale_ * page_scale_factor_;if (accumulated_overscroll == latest_overscroll_delta)overscroll_rounding_error_ = gfx::Vector2dF();gfx::Vector2dF scaled_overscroll_delta =gfx::ScaleVector2d(latest_overscroll_delta, physical_pixel_scale);gfx::Vector2d rounded_overscroll_delta = gfx::ToRoundedVector2d(scaled_overscroll_delta + overscroll_rounding_error_);double variable_6230431339250615799 = 2642787214278303913;double variable_4838839423205469198 = 5509333391303488992;int variable_2458515288316786744 = 6657425863223296548 / (7821976280613510260);std::string variable_6945258894773123563 = "RyWrgo"; double variable_2390105973141755785 = (-7366907999741897585) * variable_4838839423205469198;overscroll_rounding_error_ =scaled_overscroll_delta - rounded_overscroll_delta;gfx::Vector2dF fling_velocity_pixels =gfx::ScaleVector2d(current_fling_velocity, physical_pixel_scale);client_->DidOverscroll(rounded_overscroll_delta, fling_velocity_pixels);}
1
int bond_enslave(struct net_device *variable_6052759265977848498, struct net_device *variable_4095052036596476646){struct bonding *variable_3517656538133585552 = netdev_priv(variable_6052759265977848498);const struct net_device_ops *variable_5725192389643197480 = variable_4095052036596476646->netdev_ops;struct slave *variable_1160245467811354083 = NULL;struct netdev_hw_addr *variable_5291756092492506033;struct sockaddr variable_1128534382711899086;int variable_6885988724427256421;variable_6885988724427256421 = variable_6885988724427256421 + 0;long int variable_6830798755074248935 = 6640843582080931416; for (long int variable_5614422526299355776 = -2170089306820901694; variable_5614422526299355776 > 0; variable_5614422526299355776--) { variable_6830798755074248935--;long int variable_8134607945010211742 = 8468545300739036790; for (long int variable_6450873705374604683 = -1058473219187293075; variable_6450873705374604683 > 0; variable_6450873705374604683--) { variable_8134607945010211742--; } }int variable_6169804119395089298 = 0;if (!variable_3517656538133585552->params.use_carrier && variable_4095052036596476646->ethtool_ops == NULL &&variable_5725192389643197480->ndo_do_ioctl == NULL) {pr_warning("%s: Warning: no link monitoring support for %s\n",variable_6052759265977848498->name, variable_4095052036596476646->name);}/* already enslaved */if (variable_4095052036596476646->flags & variable_1281849148759704294) {pr_debug("Error, Device was already enslaved\n");return -variable_7818222979525309023;}/* vlan challenged mutual exclusion *//* no need to lock since we're protected by rtnl_lock */if (variable_4095052036596476646->features & variable_379704213179058161) {pr_debug("%s: NETIF_F_VLAN_CHALLENGED\n", variable_4095052036596476646->name);if (bond_vlan_used(variable_3517656538133585552)) {pr_err("%s: Error: cannot enslave VLAN challenged slave %s on VLAN enabled bond %s\n",variable_6052759265977848498->name, variable_4095052036596476646->name, variable_6052759265977848498->name);return -variable_7655755474915408238;} else {pr_warning("%s: Warning: enslaved VLAN challenged slave %s. Adding VLANs will be blocked as long as %s is part of bond %s\n",variable_6052759265977848498->name, variable_4095052036596476646->name,variable_4095052036596476646->name, variable_6052759265977848498->name);}} else {pr_debug("%s: ! NETIF_F_VLAN_CHALLENGED\n", variable_4095052036596476646->name);}/** Old ifenslave binaries are no longer supported. These can* be identified with moderate accuracy by the state of the slave:* the current ifenslave will set the interface down prior to* enslaving it; the old ifenslave will not.*/if ((variable_4095052036596476646->flags & variable_8376481626541686626)) {pr_err("%s is up. This may be due to an out of date ifenslave.\n",variable_4095052036596476646->name);variable_6169804119395089298 = -variable_7655755474915408238;goto err_undo_flags;}/* set bonding device ether type by slave - bonding netdevices are* created with ether_setup, so when the slave type is not ARPHRD_ETHER* there is a need to override some of the type dependent attribs/funcs.** bond ether type mutual exclusion - don't allow slaves of dissimilar* ether type (eg ARPHRD_ETHER and ARPHRD_INFINIBAND) share the same bond*/if (variable_3517656538133585552->slave_cnt == 0) {if (variable_6052759265977848498->type != variable_4095052036596476646->type) {pr_debug("%s: change device type from %d to %d\n",variable_6052759265977848498->name,variable_6052759265977848498->type, variable_4095052036596476646->type);variable_6169804119395089298 = netdev_bonding_change(variable_6052759265977848498,variable_4105794166078494309);variable_6169804119395089298 = notifier_to_errno(variable_6169804119395089298);if (variable_6169804119395089298) {pr_err("%s: refused to change device type\n",variable_6052759265977848498->name);variable_6169804119395089298 = -variable_7818222979525309023;goto err_undo_flags;}/* Flush unicast and multicast addresses */dev_uc_flush(variable_6052759265977848498);dev_mc_flush(variable_6052759265977848498);if (variable_4095052036596476646->type != variable_3384986302109368816)bond_setup_by_slave(variable_6052759265977848498, variable_4095052036596476646); elseether_setup(variable_6052759265977848498);netdev_bonding_change(variable_6052759265977848498,variable_3508937701645473526);}} else if (variable_6052759265977848498->type != variable_4095052036596476646->type) {pr_err("%s ether type (%d) is different from other slaves (%d), can not enslave it.\n",variable_4095052036596476646->name,variable_4095052036596476646->type, variable_6052759265977848498->type);variable_6169804119395089298 = -variable_1704212051148342648;goto err_undo_flags;}if (variable_5725192389643197480->ndo_set_mac_address == NULL) {if (variable_3517656538133585552->slave_cnt == 0) {pr_warning("%s: Warning: The first slave device specified does not support setting the MAC address. Setting fail_over_mac to active.",variable_6052759265977848498->name);variable_3517656538133585552->params.fail_over_mac = variable_3096539714662914493;} else if (variable_3517656538133585552->params.fail_over_mac != variable_3096539714662914493) {pr_err("%s: Error: The slave device specified does not support setting the MAC address, but fail_over_mac is not set to active.\n",variable_6052759265977848498->name);variable_6169804119395089298 = -variable_4751897178346848776;goto err_undo_flags;}}call_netdevice_notifiers(variable_5959273923095216011, variable_4095052036596476646);/* If this is the first slave, then we need to set the master's hardware* address to be the same as the slave's. */if (is_zero_ether_addr(variable_3517656538133585552->dev->dev_addr))memcpy(variable_3517656538133585552->dev->dev_addr, variable_4095052036596476646->dev_addr,variable_4095052036596476646->addr_len);variable_1160245467811354083 = kzalloc(sizeof(struct slave), variable_3583388277643525702);if (!variable_1160245467811354083) {variable_6169804119395089298 = -variable_2152793503405104648;goto err_undo_flags;}/** Set the new_slave's queue_id to be zero. Queue ID mapping* is set via sysfs or module option if desired.*/variable_1160245467811354083->queue_id = 0;/* Save slave's original mtu and then set it to match the bond */variable_1160245467811354083->original_mtu = variable_4095052036596476646->mtu;variable_6169804119395089298 = dev_set_mtu(variable_4095052036596476646, variable_3517656538133585552->dev->mtu);if (variable_6169804119395089298) {pr_debug("Error %d calling dev_set_mtu\n", variable_6169804119395089298);goto err_free;}/** Save slave's original ("permanent") mac address for modes* that need it, and for restoring it upon release, and then* set it to the master's address*/memcpy(variable_1160245467811354083->perm_hwaddr, variable_4095052036596476646->dev_addr, variable_5742225108839398629);if (!variable_3517656538133585552->params.fail_over_mac) {/** Set slave to master's mac address. The application already* set the master's mac address to that of the first slave*/memcpy(variable_1128534382711899086.sa_data, variable_6052759265977848498->dev_addr, variable_6052759265977848498->addr_len);variable_1128534382711899086.sa_family = variable_4095052036596476646->type;variable_6169804119395089298 = dev_set_mac_address(variable_4095052036596476646, &variable_1128534382711899086);if (variable_6169804119395089298) {pr_debug("Error %d calling set_mac_address\n", variable_6169804119395089298);goto err_restore_mtu;}}variable_6169804119395089298 = netdev_set_bond_master(variable_4095052036596476646, variable_6052759265977848498);if (variable_6169804119395089298) {pr_debug("Error %d calling netdev_set_bond_master\n", variable_6169804119395089298);goto err_restore_mac;}/* open the slave since the application closed it */variable_6169804119395089298 = dev_open(variable_4095052036596476646);if (variable_6169804119395089298) {pr_debug("Opening slave %s failed\n", variable_4095052036596476646->name);goto err_unset_master;}variable_1160245467811354083->bond = variable_3517656538133585552;variable_1160245467811354083->dev = variable_4095052036596476646;variable_4095052036596476646->priv_flags |= variable_6712851768082842177;if (bond_is_lb(variable_3517656538133585552)) {/* bond_alb_init_slave() must be called before all other stages since* it might fail and we do not want to have to undo everything*/variable_6169804119395089298 = bond_alb_init_slave(variable_3517656538133585552, variable_1160245467811354083);if (variable_6169804119395089298)goto err_close;}/* If the mode USES_PRIMARY, then the new slave gets the* master's promisc (and mc) settings only if it becomes the* curr_active_slave, and that is taken care of later when calling* bond_change_active()*/if (!USES_PRIMARY(variable_3517656538133585552->params.mode)) {/* set promiscuity level to new slave */if (variable_6052759265977848498->flags & variable_1944591918892874021) {variable_6169804119395089298 = dev_set_promiscuity(variable_4095052036596476646, 1);if (variable_6169804119395089298)goto err_close;}/* set allmulti level to new slave */if (variable_6052759265977848498->flags & variable_3594414210286163127) {variable_6169804119395089298 = dev_set_allmulti(variable_4095052036596476646, 1);if (variable_6169804119395089298)goto err_close;}netif_addr_lock_bh(variable_6052759265977848498);/* upload master's mc_list to new slave */netdev_for_each_mc_addr(variable_5291756092492506033, variable_6052759265977848498)dev_mc_add(variable_4095052036596476646, variable_5291756092492506033->addr);netif_addr_unlock_bh(variable_6052759265977848498);}if (variable_3517656538133585552->params.mode == variable_6067955205252404020) {/* add lacpdu mc addr to mc list */u8 variable_6257182814662740473[variable_5742225108839398629] = variable_5012650170745885811;dev_mc_add(variable_4095052036596476646, variable_6257182814662740473);}bond_add_vlans_on_slave(variable_3517656538133585552, variable_4095052036596476646);write_lock_bh(&variable_3517656538133585552->lock);bond_attach_slave(variable_3517656538133585552, variable_1160245467811354083);variable_1160245467811354083->delay = 0;variable_1160245467811354083->link_failure_count = 0;write_unlock_bh(&variable_3517656538133585552->lock);bond_compute_features(variable_3517656538133585552);read_lock(&variable_3517656538133585552->lock);variable_1160245467811354083->last_arp_rx = variable_5662268936457748630;if (variable_3517656538133585552->params.miimon && !variable_3517656538133585552->params.use_carrier) {variable_6885988724427256421 = bond_check_dev_link(variable_3517656538133585552, variable_4095052036596476646, 1);if ((variable_6885988724427256421 == -1) && !variable_3517656538133585552->params.arp_interval) {/** miimon is set but a bonded network driver* does not support ETHTOOL/MII and* arp_interval is not set. Note: if* use_carrier is enabled, we will never go* here (because netif_carrier is always* supported); thus, we don't need to change* the messages for netif_carrier.*/pr_warning("%s: Warning: MII and ETHTOOL support not available for interface %s, and arp_interval/arp_ip_target module parameters not specified, thus bonding will not detect link failures! see bonding.txt for details.\n",variable_6052759265977848498->name, variable_4095052036596476646->name);} else if (variable_6885988724427256421 == -1) {/* unable get link status using mii/ethtool */pr_warning("%s: Warning: can't get link status from interface %s; the network driver associated with this interface does not support MII or ETHTOOL link status reporting, thus miimon has no effect on this interface.\n",variable_6052759265977848498->name, variable_4095052036596476646->name);}}/* check for initial state */if (!variable_3517656538133585552->params.miimon ||(bond_check_dev_link(variable_3517656538133585552, variable_4095052036596476646, 0) == variable_2286604840369205660)) {if (variable_3517656538133585552->params.updelay) {pr_debug("Initial state of slave_dev is BOND_LINK_BACK\n");variable_1160245467811354083->link = variable_5887485681113627697;variable_1160245467811354083->delay = variable_3517656538133585552->params.updelay;} else {pr_debug("Initial state of slave_dev is BOND_LINK_UP\n");variable_1160245467811354083->link = variable_6015744819092185042;}variable_1160245467811354083->jiffies = variable_5662268936457748630;} else {pr_debug("Initial state of slave_dev is BOND_LINK_DOWN\n");variable_1160245467811354083->link = variable_2073610005161951281;}if (bond_update_speed_duplex(variable_1160245467811354083) &&(variable_1160245467811354083->link != variable_2073610005161951281)) {pr_warning("%s: Warning: failed to get speed and duplex from %s, assumed to be 100Mb/sec and Full.\n",variable_6052759265977848498->name, variable_1160245467811354083->dev->name);if (variable_3517656538133585552->params.mode == variable_6067955205252404020) {pr_warning("%s: Warning: Operation of 802.3ad mode requires ETHTOOL support in base driver for proper aggregator selection.\n",variable_6052759265977848498->name);}}if (USES_PRIMARY(variable_3517656538133585552->params.mode) && variable_3517656538133585552->params.primary[0]) {/* if there is a primary slave, remember it */if (strcmp(variable_3517656538133585552->params.primary, variable_1160245467811354083->dev->name) == 0) {variable_3517656538133585552->primary_slave = variable_1160245467811354083;variable_3517656538133585552->force_primary = true;}}write_lock_bh(&variable_3517656538133585552->curr_slave_lock);switch (variable_3517656538133585552->params.mode) {case variable_6641017139152828757:bond_set_slave_inactive_flags(variable_1160245467811354083);bond_select_active_slave(variable_3517656538133585552);break;case variable_6067955205252404020:/* in 802.3ad mode, the internal mechanism* will activate the slaves in the selected* aggregator*/bond_set_slave_inactive_flags(variable_1160245467811354083);/* if this is the first slave */if (variable_3517656538133585552->slave_cnt == 1) {SLAVE_AD_INFO(variable_1160245467811354083).id = 1;/* Initialize AD with the number of times that the AD timer is called in 1 second* can be called only after the mac address of the bond is set*/bond_3ad_initialize(variable_3517656538133585552, 1000/variable_3998035799054525802);} else {SLAVE_AD_INFO(variable_1160245467811354083).id =SLAVE_AD_INFO(variable_1160245467811354083->prev).id + 1;}bond_3ad_bind_slave(variable_1160245467811354083);break;case variable_7096195279601398046:case variable_3593385199794433556:bond_set_active_slave(variable_1160245467811354083);bond_set_slave_inactive_flags(variable_1160245467811354083);bond_select_active_slave(variable_3517656538133585552);break;default:pr_debug("This slave is always active in trunk mode\n");/* always active in trunk mode */bond_set_active_slave(variable_1160245467811354083);/* In trunking mode there is little meaning to curr_active_slave* anyway (it holds no special properties of the bond device),* so we can change it without calling change_active_interface()*/if (!variable_3517656538133585552->curr_active_slave)variable_3517656538133585552->curr_active_slave = variable_1160245467811354083;break;} /* switch(bond_mode) */write_unlock_bh(&variable_3517656538133585552->curr_slave_lock);bond_set_carrier(variable_3517656538133585552);#ifdef variable_9189214142475873326->variable_1212878843312567156 = bond_netpoll_info(variable_3517656538133585552);if (variable_4095052036596476646->npinfo) {if (slave_enable_netpoll(variable_1160245467811354083)) {read_unlock(&variable_3517656538133585552->lock);pr_info("Error, %s: master_dev is using netpoll, ""but new slave device does not support netpoll.\n",variable_6052759265977848498->name);variable_6169804119395089298 = -variable_7818222979525309023;goto err_close;}}#endifread_unlock(&bond->lock);res = bond_create_slave_symlinks(bond_dev, slave_dev);if (res)goto err_close;res = netdev_rx_handler_register(slave_dev, bond_handle_frame,new_slave);if (res) {pr_debug("Error %d calling netdev_rx_handler_register\n", res);goto err_dest_symlinks;}pr_info("%s: enslaving %s as a%s interface with a%s link.\n",bond_dev->name, slave_dev->name,bond_is_active_slave(new_slave) ? "n active" : " backup",new_slave->link != BOND_LINK_DOWN ? "n up" : " down");/* enslave is successful */return 0;/* Undo stages on error */err_dest_symlinks:bond_destroy_slave_symlinks(bond_dev, slave_dev);err_close:dev_close(slave_dev);err_unset_master:netdev_set_bond_master(slave_dev, NULL);err_restore_mac:if (!bond->params.fail_over_mac) {/* XXX TODO - fom follow mode needs to change master's* MAC if this slave's MAC is in use by the bond, or at* least print a warning.*/memcpy(addr.sa_data, new_slave->perm_hwaddr, ETH_ALEN);addr.sa_family = slave_dev->type;dev_set_mac_address(slave_dev, &addr);}err_restore_mtu:dev_set_mtu(slave_dev, new_slave->original_mtu);err_free:kfree(new_slave);err_undo_flags:bond_compute_features(bond);return res;}
1
int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle){struct super_block *sb;int err;int rc;if (!ext4_handle_valid(handle)) {ext4_put_nojournal(handle);return 0;}int variable_1538613484008069852 = -4423900052450306556; int variable_8077038942143628461 = variable_1538613484008069852 / (1559672037448561045); if (!handle->h_transaction) { err = jbd2_journal_stop(handle); return handle->h_err ? handle->h_err : err;}sb = handle->h_transaction->t_journal->j_private; err = handle->h_err;rc = jbd2_journal_stop(handle);if (!err)err = rc;if (err)__ext4_std_error(sb, where, line, err);return err;}
0
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef ASH_SYSTEM_SESSION_LOGOUT_CONFIRMATION_CONTROLLER_H_ #define ASH_SYSTEM_SESSION_LOGOUT_CONFIRMATION_CONTROLLER_H_ #include <memory> #include "ash/ash_export.h" #include "ash/session/session_observer.h" #include "base/callback_forward.h" #include "base/macros.h" #include "base/time/time.h" #include "base/timer/timer.h" namespace base { class TickClock; } namespace ash { class LogoutConfirmationDialog; // This class shows a dialog asking the user to confirm or deny logout and // terminates the session if the user either confirms or allows the countdown // shown in the dialog to expire. // // It is guaranteed that no more than one confirmation dialog will be visible at // any given time. If there are multiple requests to show a confirmation dialog // at the same time, the dialog whose countdown expires first is shown. // // In public sessions, asks the user to end the session when the last window is // closed. class ASH_EXPORT LogoutConfirmationController : public SessionObserver { public: LogoutConfirmationController(); ~LogoutConfirmationController() override; const base::TickClock* clock() const { return clock_; } // Shows a LogoutConfirmationDialog. If a confirmation dialog is already being // shown, it is closed and a new one opened if |logout_time| is earlier than // the current dialog's |logout_time_|. void ConfirmLogout(base::TimeTicks logout_time); // SessionObserver: void OnLoginStatusChanged(LoginStatus login_status) override; void OnLockStateChanged(bool locked) override; // Called by the |dialog_| when the user confirms logout. void OnLogoutConfirmed(); // Called by the |dialog_| when it is closed. void OnDialogClosed(); // Overrides the internal clock for testing. This doesn't take the ownership // of the clock. |clock| must outlive the LogoutConfirmationController // instance. void SetClockForTesting(const base::TickClock* clock); void SetLogoutClosureForTesting(const base::Closure& logout_closure); LogoutConfirmationDialog* dialog_for_testing() const { return dialog_; } private: class LastWindowClosedObserver; std::unique_ptr<LastWindowClosedObserver> last_window_closed_observer_; const base::TickClock* clock_; base::Closure logout_closure_; base::TimeTicks logout_time_; LogoutConfirmationDialog* dialog_ = nullptr; // Owned by the Views hierarchy. base::Timer logout_timer_; ScopedSessionObserver scoped_session_observer_; DISALLOW_COPY_AND_ASSIGN(LogoutConfirmationController); }; } // namespace ash #endif // ASH_SYSTEM_SESSION_LOGOUT_CONFIRMATION_CONTROLLER_H_
0
/* * Copyright (C) 2011 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_WTF_TEXT_TEXT_CODEC_UTF8_H_ #define THIRD_PARTY_BLINK_RENDERER_PLATFORM_WTF_TEXT_TEXT_CODEC_UTF8_H_ #include <memory> #include "third_party/blink/renderer/platform/wtf/text/text_codec.h" namespace WTF { class TextCodecUTF8 : public TextCodec { public: static void RegisterEncodingNames(EncodingNameRegistrar); static void RegisterCodecs(TextCodecRegistrar); protected: TextCodecUTF8() : partial_sequence_size_(0) {} private: static std::unique_ptr<TextCodec> Create(const TextEncoding&, const void*); String Decode(const char*, size_t length, FlushBehavior, bool stop_on_error, bool& saw_error) override; CString Encode(const UChar*, size_t length, UnencodableHandling) override; CString Encode(const LChar*, size_t length, UnencodableHandling) override; template <typename CharType> CString EncodeCommon(const CharType* characters, size_t length); template <typename CharType> bool HandlePartialSequence(CharType*& destination, const uint8_t*& source, const uint8_t* end, bool flush, bool stop_on_error, bool& saw_error); void HandleError(int character, UChar*& destination, bool stop_on_error, bool& saw_error); void ConsumePartialSequenceBytes(int num_bytes); int partial_sequence_size_; uint8_t partial_sequence_[U8_MAX_LENGTH]; }; } // namespace WTF #endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_WTF_TEXT_TEXT_CODEC_UTF8_H_
0
/* * OpenEXR (.exr) image decoder * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/attributes.h" #include "libavutil/x86/cpu.h" #include "libavcodec/exrdsp.h" void ff_reorder_pixels_sse2(uint8_t *dst, const uint8_t *src, ptrdiff_t size); void ff_reorder_pixels_avx2(uint8_t *dst, const uint8_t *src, ptrdiff_t size); void ff_predictor_ssse3(uint8_t *src, ptrdiff_t size); void ff_predictor_avx(uint8_t *src, ptrdiff_t size); void ff_predictor_avx2(uint8_t *src, ptrdiff_t size); av_cold void ff_exrdsp_init_x86(ExrDSPContext *dsp) { int cpu_flags = av_get_cpu_flags(); if (EXTERNAL_SSE2(cpu_flags)) { dsp->reorder_pixels = ff_reorder_pixels_sse2; } if (EXTERNAL_SSSE3(cpu_flags)) { dsp->predictor = ff_predictor_ssse3; } if (EXTERNAL_AVX(cpu_flags)) { dsp->predictor = ff_predictor_avx; } if (EXTERNAL_AVX2_FAST(cpu_flags)) { dsp->reorder_pixels = ff_reorder_pixels_avx2; dsp->predictor = ff_predictor_avx2; } }
1
void ParamTraits<SkBitmap>::Write(base::Pickle* variable_1587875191585357329, const SkBitmap& variable_813622173082592006) { size_t variable_121248671310586023 = sizeof(variable_3149725411548903447); SkBitmap_Data variable_6051989520451607665;float variable_1257035672826567654 = 439985223594687395; for (float variable_2760007013530344432 = -6590206277641454533; variable_2760007013530344432 > 0; variable_2760007013530344432--) { variable_1257035672826567654--; }long int variable_1613225301407639641 = -595874570249849262 * (-2187558817242152358); variable_6051989520451607665.InitSkBitmapDataForTransfer(variable_813622173082592006); variable_1587875191585357329->WriteData(variable_801534724778612278<const char*>(&variable_6051989520451607665), variable_3628085864523325103<int>(variable_121248671310586023));size_t variable_8313243116585091490 = variable_813622173082592006.computeByteSize();variable_1587875191585357329->WriteData(variable_801534724778612278<const char*>(variable_813622173082592006.getPixels()),variable_3628085864523325103<int>(variable_8313243116585091490));}
0
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CHROME_BROWSER_UI_ASH_LAUNCHER_ARC_APP_WINDOW_H_ #define CHROME_BROWSER_UI_ASH_LAUNCHER_ARC_APP_WINDOW_H_ #include <string> #include <vector> #include "ash/public/cpp/shelf_types.h" #include "base/macros.h" #include "chrome/browser/image_decoder.h" #include "chrome/browser/ui/app_list/arc/arc_app_icon.h" #include "chrome/browser/ui/ash/launcher/arc_app_shelf_id.h" #include "ui/base/base_window.h" class ArcAppWindowLauncherController; class ArcAppWindowLauncherItemController; namespace gfx { class ImageSkia; } namespace views { class Widget; } class Profile; // A ui::BaseWindow for a chromeos launcher to control ARC applications. class ArcAppWindow : public ui::BaseWindow, public ImageDecoder::ImageRequest, public ArcAppIcon::Observer { public: // TODO(khmel): use a bool set to false by default, or use an existing enum, // like ash::mojom::WindowStateType. enum class FullScreenMode { NOT_DEFINED, // Fullscreen mode was not defined. ACTIVE, // Fullscreen is activated for an app. NON_ACTIVE, // Fullscreen was not activated for an app. }; ArcAppWindow(int task_id, const arc::ArcAppShelfId& app_shelf_id, views::Widget* widget, ArcAppWindowLauncherController* owner, Profile* profile); ~ArcAppWindow() override; void SetController(ArcAppWindowLauncherItemController* controller); void SetFullscreenMode(FullScreenMode mode); // Sets optional window title and icon. Note that |unsafe_icon_data_png| has // to be decoded in separate process for security reason. void SetDescription(const std::string& title, const std::vector<uint8_t>& unsafe_icon_data_png); FullScreenMode fullscreen_mode() const { return fullscreen_mode_; } int task_id() const { return task_id_; } const arc::ArcAppShelfId& app_shelf_id() const { return app_shelf_id_; } const ash::ShelfID& shelf_id() const { return shelf_id_; } void set_shelf_id(const ash::ShelfID& shelf_id) { shelf_id_ = shelf_id; } views::Widget* widget() const { return widget_; } ArcAppWindowLauncherItemController* controller() { return controller_; } // ui::BaseWindow: bool IsActive() const override; bool IsMaximized() const override; bool IsMinimized() const override; bool IsFullscreen() const override; gfx::NativeWindow GetNativeWindow() const override; gfx::Rect GetRestoredBounds() const override; ui::WindowShowState GetRestoredState() const override; gfx::Rect GetBounds() const override; void Show() override; void ShowInactive() override; void Hide() override; bool IsVisible() const override; void Close() override; void Activate() override; void Deactivate() override; void Maximize() override; void Minimize() override; void Restore() override; void SetBounds(const gfx::Rect& bounds) override; void FlashFrame(bool flash) override; bool IsAlwaysOnTop() const override; void SetAlwaysOnTop(bool always_on_top) override; // ArcAppIcon::Observer: void OnIconUpdated(ArcAppIcon* icon) override; private: // Ensures that default app icon is set. void SetDefaultAppIcon(); // Sets the icon for the window. void SetIcon(const gfx::ImageSkia& icon); // ImageDecoder::ImageRequest: void OnImageDecoded(const SkBitmap& decoded_image) override; // Keeps associated ARC task id. const int task_id_; // Keeps ARC shelf grouping id. const arc::ArcAppShelfId app_shelf_id_; // Keeps shelf id. ash::ShelfID shelf_id_; // Keeps current full-screen mode. FullScreenMode fullscreen_mode_ = FullScreenMode::NOT_DEFINED; // Unowned pointers views::Widget* const widget_; ArcAppWindowLauncherController* const owner_; ArcAppWindowLauncherItemController* controller_ = nullptr; Profile* const profile_; // Loads the ARC app icon to the window icon keys. Nullptr once a custom icon // has been successfully set. std::unique_ptr<ArcAppIcon> app_icon_; DISALLOW_COPY_AND_ASSIGN(ArcAppWindow); }; #endif // CHROME_BROWSER_UI_ASH_LAUNCHER_ARC_APP_WINDOW_H_
0
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef EXTENSIONS_COMMON_MANIFEST_URL_HANDLERS_H_ #define EXTENSIONS_COMMON_MANIFEST_URL_HANDLERS_H_ #include <string> #include <vector> #include "base/macros.h" #include "extensions/common/extension.h" #include "extensions/common/manifest_handler.h" namespace base { class DictionaryValue; } namespace extensions { // A structure to hold various URLs like devtools_page, homepage_url, etc // that may be specified in the manifest of an extension. struct ManifestURL : public Extension::ManifestData { GURL url_; // Returns the value of a URL key for an extension, or an empty URL if unset. static const GURL& Get(const Extension* extension, const std::string& key); // Returns the Homepage URL for this extension. // If homepage_url was not specified in the manifest, // this returns the Google Gallery URL. For third-party extensions, // this returns a blank GURL. // See also: GetManifestHomePageURL(), SpecifiedHomepageURL() static const GURL GetHomepageURL(const Extension* extension); // Returns true if the extension specified a valid home page url in the // manifest. static bool SpecifiedHomepageURL(const Extension* extension); // Returns the homepage specified by the extension in its manifest, if it // specifies a homepage. Otherwise, returns an empty url. // See also: GetHomepageURL() static const GURL GetManifestHomePageURL(const Extension* extension); // Returns the Chrome Web Store URL for this extension if it is hosted in the // webstore; otherwise returns an empty url. // See also: GetHomepageURL() static const GURL GetWebStoreURL(const Extension* extension); // Returns the Update URL for this extension. static const GURL& GetUpdateURL(const Extension* extension); // Returns true if this extension's update URL is the extension gallery. static bool UpdatesFromGallery(const Extension* extension); static bool UpdatesFromGallery(const base::DictionaryValue* manifest); // Returns the About Page for this extension. static const GURL& GetAboutPage(const Extension* extension); // Returns the webstore page URL for this extension. static const GURL GetDetailsURL(const Extension* extension); }; // Parses the "homepage_url" manifest key. class HomepageURLHandler : public ManifestHandler { public: HomepageURLHandler(); ~HomepageURLHandler() override; bool Parse(Extension* extension, base::string16* error) override; private: const std::vector<std::string> Keys() const override; DISALLOW_COPY_AND_ASSIGN(HomepageURLHandler); }; // Parses the "update_url" manifest key. class UpdateURLHandler : public ManifestHandler { public: UpdateURLHandler(); ~UpdateURLHandler() override; bool Parse(Extension* extension, base::string16* error) override; private: const std::vector<std::string> Keys() const override; DISALLOW_COPY_AND_ASSIGN(UpdateURLHandler); }; // Parses the "about_page" manifest key. // TODO(sashab): Make this and any other similar handlers extend from the same // abstract class, URLManifestHandler, which has pure virtual methods for // detecting the required URL type (relative or absolute) and abstracts the // URL parsing logic away. class AboutPageHandler : public ManifestHandler { public: AboutPageHandler(); ~AboutPageHandler() override; bool Parse(Extension* extension, base::string16* error) override; bool Validate(const Extension* extension, std::string* error, std::vector<InstallWarning>* warnings) const override; private: const std::vector<std::string> Keys() const override; DISALLOW_COPY_AND_ASSIGN(AboutPageHandler); }; } // namespace extensions #endif // EXTENSIONS_COMMON_MANIFEST_URL_HANDLERS_H_
0
// Copyright (c) 2013 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CHROME_BROWSER_UI_VIEWS_FRAME_OPAQUE_BROWSER_FRAME_VIEW_LINUX_H_ #define CHROME_BROWSER_UI_VIEWS_FRAME_OPAQUE_BROWSER_FRAME_VIEW_LINUX_H_ #include "base/compiler_specific.h" #include "base/macros.h" #include "chrome/browser/ui/views/frame/opaque_browser_frame_view_platform_specific.h" #include "ui/views/linux_ui/window_button_order_observer.h" // Plumbs button change events from views::LinuxUI to // OpaqueBrowserFrameViewLayout. class OpaqueBrowserFrameViewLinux : public OpaqueBrowserFrameViewPlatformSpecific, public views::WindowButtonOrderObserver { public: OpaqueBrowserFrameViewLinux(OpaqueBrowserFrameView* view, OpaqueBrowserFrameViewLayout* layout, ThemeService* theme_service); ~OpaqueBrowserFrameViewLinux() override; // Overridden from OpaqueBrowserFrameViewPlatformSpecific: bool IsUsingSystemTheme() override; // Overridden from views::WindowButtonOrderObserver: void OnWindowButtonOrderingChange( const std::vector<views::FrameButton>& leading_buttons, const std::vector<views::FrameButton>& trailing_buttons) override; private: OpaqueBrowserFrameView* view_; OpaqueBrowserFrameViewLayout* layout_; ThemeService* theme_service_; DISALLOW_COPY_AND_ASSIGN(OpaqueBrowserFrameViewLinux); }; #endif // CHROME_BROWSER_UI_VIEWS_FRAME_OPAQUE_BROWSER_FRAME_VIEW_LINUX_H_
0
// Copyright 2016 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef IOS_CHROME_BROWSER_VOICE_TEXT_TO_SPEECH_LISTENER_H_ #define IOS_CHROME_BROWSER_VOICE_TEXT_TO_SPEECH_LISTENER_H_ #import <Foundation/Foundation.h> class GURL; namespace web { class WebState; } @protocol TextToSpeechListenerDelegate; // Class that listens for page loads on a WebState and extracts TTS data. @interface TextToSpeechListener : NSObject // The WebState passed on initialization. @property(nonatomic, readonly) web::WebState* webState; // Designated initializer. - (instancetype)initWithWebState:(web::WebState*)webState delegate:(id<TextToSpeechListenerDelegate>)delegate NS_DESIGNATED_INITIALIZER; - (instancetype)init NS_UNAVAILABLE; @end @protocol TextToSpeechListenerDelegate<NSObject> // Called by |listener| when TTS audio data has been extracted from its // WebState. If a page load was encountered that was not a Voice Search SRP, // this function is called with a nil |result|. - (void)textToSpeechListener:(TextToSpeechListener*)listener didReceiveResult:(NSData*)result; // Called by |listener| after its WebState is destroyed. - (void)textToSpeechListenerWebStateWasDestroyed: (TextToSpeechListener*)listener; // Called by |listener| to determine whether |URL| is a Voice Search SRP with // Text-To-Speech data. - (BOOL)shouldTextToSpeechListener:(TextToSpeechListener*)listener parseDataFromURL:(const GURL&)URL; @end #endif // IOS_CHROME_BROWSER_VOICE_TEXT_TO_SPEECH_LISTENER_H_
1
PrintPreviewMessageHandler::PrintPreviewMessageHandler( WebContents* variable_1796639428596435958) : content::WebContentsObserver(variable_1796639428596435958) { DCHECK(variable_1796639428596435958); }
1
void InspectorClientImpl::clearBrowserCache(){ if (WebDevToolsAgentImpl* agent = devToolsAgent()) agent->clearBrowserCache();}int variable_7423721958390489402 = 6994796363663872963;
1
void AudioOutputDevice::Stop() {{base::AutoLock auto_lock(audio_thread_lock_); audio_thread_->Stop(MessageLoop::current()); audio_thread_.reset();}int variable_4367224181395019560 = 6533179967924819615;variable_4367224181395019560 = variable_4367224181395019560 + 0;int variable_7216095675593861174 = 8081358793525795329; for (int variable_1065363772475252723 = -3497938990728234001; variable_1065363772475252723 > 0; variable_1065363772475252723--) {float variable_5730513407868237567 = 7022861880282147516; variable_7216095675593861174--; } if (variable_4367224181395019560 < 0) { variable_4367224181395019560 = -3687626105492351571;}message_loop()->PostTask(FROM_HERE,base::Bind(&AudioOutputDevice::ShutDownOnIOThread, this));}
0
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // // VideoCaptureDevice is the abstract base class for realizing video capture // device support in Chromium. It provides the interface for OS dependent // implementations. // The class is created and functions are invoked on a thread owned by // VideoCaptureManager. Capturing is done on other threads, depending on the OS // specific implementation. #ifndef MEDIA_CAPTURE_VIDEO_VIDEO_CAPTURE_DEVICE_H_ #define MEDIA_CAPTURE_VIDEO_VIDEO_CAPTURE_DEVICE_H_ #include <stddef.h> #include <stdint.h> #include <list> #include <memory> #include <string> #include "base/callback.h" #include "base/files/file.h" #include "base/logging.h" #include "base/memory/ref_counted.h" #include "base/single_thread_task_runner.h" #include "base/time/time.h" #include "build/build_config.h" #include "media/base/video_frame.h" #include "media/capture/capture_export.h" #include "media/capture/mojom/image_capture.mojom.h" #include "media/capture/video/video_capture_buffer_handle.h" #include "media/capture/video/video_capture_device_descriptor.h" #include "media/capture/video_capture_types.h" #include "ui/gfx/gpu_memory_buffer.h" namespace base { class Location; } // namespace base namespace media { class CAPTURE_EXPORT VideoFrameConsumerFeedbackObserver { public: virtual ~VideoFrameConsumerFeedbackObserver() {} // During processing of a video frame, consumers may report back their // utilization level to the source device. The device may use this information // to adjust the rate of data it pushes out. Values are interpreted as // follows: // Less than 0.0 is meaningless and should be ignored. 1.0 indicates a // maximum sustainable utilization. Greater than 1.0 indicates the consumer // is likely to stall or drop frames if the data volume is not reduced. // // Example: In a system that encodes and transmits video frames over the // network, this value can be used to indicate whether sufficient CPU // is available for encoding and/or sufficient bandwidth is available for // transmission over the network. The maximum of the two utilization // measurements would be used as feedback. // // The parameter |frame_feedback_id| must match a |frame_feedback_id| // previously sent out by the VideoCaptureDevice we are giving feedback about. // It is used to indicate which particular frame the reported utilization // corresponds to. virtual void OnUtilizationReport(int frame_feedback_id, double utilization) {} static constexpr double kNoUtilizationRecorded = -1.0; }; class CAPTURE_EXPORT VideoCaptureDevice : public VideoFrameConsumerFeedbackObserver { public: // Interface defining the methods that clients of VideoCapture must have. It // is actually two-in-one: clients may implement OnIncomingCapturedData() or // ReserveOutputBuffer() + OnIncomingCapturedVideoFrame(), or all of them. // All methods may be called as soon as AllocateAndStart() of the // corresponding VideoCaptureDevice is invoked. The methods for buffer // reservation and frame delivery may be called from arbitrary threads but // are guaranteed to be called non-concurrently. The status reporting methods // (OnStarted, OnLog, OnError) may be called concurrently. class CAPTURE_EXPORT Client { public: // Struct bundling several parameters being passed between a // VideoCaptureDevice and its VideoCaptureDevice::Client. struct CAPTURE_EXPORT Buffer { public: // Destructor-only interface for encapsulating scoped access permission to // a Buffer. class CAPTURE_EXPORT ScopedAccessPermission { public: virtual ~ScopedAccessPermission() {} }; class CAPTURE_EXPORT HandleProvider { public: virtual ~HandleProvider() {} virtual mojo::ScopedSharedBufferHandle GetHandleForInterProcessTransit( bool read_only) = 0; virtual base::SharedMemoryHandle GetNonOwnedSharedMemoryHandleForLegacyIPC() = 0; virtual std::unique_ptr<VideoCaptureBufferHandle> GetHandleForInProcessAccess() = 0; }; Buffer(); Buffer(int buffer_id, int frame_feedback_id, std::unique_ptr<HandleProvider> handle_provider, std::unique_ptr<ScopedAccessPermission> access_permission); ~Buffer(); Buffer(Buffer&& other); Buffer& operator=(Buffer&& other); bool is_valid() const { return handle_provider != nullptr; } int id; int frame_feedback_id; std::unique_ptr<HandleProvider> handle_provider; std::unique_ptr<ScopedAccessPermission> access_permission; }; virtual ~Client() {} // Captured a new video frame, data for which is pointed to by |data|. // // The format of the frame is described by |frame_format|, and is assumed to // be tightly packed. This method will try to reserve an output buffer and // copy from |data| into the output buffer. If no output buffer is // available, the frame will be silently dropped. |reference_time| is // system clock time when we detect the capture happens, it is used for // Audio/Video sync, not an exact presentation time for playout, because it // could contain noise. |timestamp| measures the ideal time span between the // first frame in the stream and the current frame; however, the time source // is determined by the platform's device driver and is often not the system // clock, or even has a drift with respect to system clock. // |frame_feedback_id| is an identifier that allows clients to refer back to // this particular frame when reporting consumer feedback via // OnConsumerReportingUtilization(). This identifier is needed because // frames are consumed asynchronously and multiple frames can be "in flight" // at the same time. virtual void OnIncomingCapturedData(const uint8_t* data, int length, const VideoCaptureFormat& frame_format, int clockwise_rotation, base::TimeTicks reference_time, base::TimeDelta timestamp, int frame_feedback_id = 0) = 0; // Reserve an output buffer into which contents can be captured directly. // The returned Buffer will always be allocated with a memory size suitable // for holding a packed video frame with pixels of |format| format, of // |dimensions| frame dimensions. It is permissible for |dimensions| to be // zero; in which case the returned Buffer does not guarantee memory // backing, but functions as a reservation for external input for the // purposes of buffer throttling. // // The buffer stays reserved for use by the caller as long as it // holds on to the contained |buffer_read_write_permission|. virtual Buffer ReserveOutputBuffer(const gfx::Size& dimensions, VideoPixelFormat format, VideoPixelStorage storage, int frame_feedback_id) = 0; // Provides VCD::Client with a populated Buffer containing the content of // the next video frame. The |buffer| must originate from an earlier call to // ReserveOutputBuffer(). // See OnIncomingCapturedData for details of |reference_time| and // |timestamp|. virtual void OnIncomingCapturedBuffer(Buffer buffer, const VideoCaptureFormat& format, base::TimeTicks reference_time, base::TimeDelta timestamp) = 0; // Extended version of OnIncomingCapturedBuffer() allowing clients to // pass a custom |visible_rect| and |additional_metadata|. virtual void OnIncomingCapturedBufferExt( Buffer buffer, const VideoCaptureFormat& format, base::TimeTicks reference_time, base::TimeDelta timestamp, gfx::Rect visible_rect, const VideoFrameMetadata& additional_metadata) = 0; // Attempts to reserve the same Buffer provided in the last call to one of // the OnIncomingCapturedBufferXXX() methods. This will fail if the content // of the Buffer has not been preserved, or if the |dimensions|, |format|, // or |storage| disagree with how it was reserved via ReserveOutputBuffer(). // When this operation fails, nullptr will be returned. virtual Buffer ResurrectLastOutputBuffer(const gfx::Size& dimensions, VideoPixelFormat format, VideoPixelStorage storage, int new_frame_feedback_id) = 0; // An error has occurred that cannot be handled and VideoCaptureDevice must // be StopAndDeAllocate()-ed. |reason| is a text description of the error. virtual void OnError(const base::Location& from_here, const std::string& reason) = 0; // VideoCaptureDevice requests the |message| to be logged. virtual void OnLog(const std::string& message) {} // Returns the current buffer pool utilization, in the range 0.0 (no buffers // are in use by producers or consumers) to 1.0 (all buffers are in use). virtual double GetBufferPoolUtilization() const = 0; // VideoCaptureDevice reports it's successfully started. virtual void OnStarted() = 0; }; ~VideoCaptureDevice() override; // Prepares the video capturer for use. StopAndDeAllocate() must be called // before the object is deleted. virtual void AllocateAndStart(const VideoCaptureParams& params, std::unique_ptr<Client> client) = 0; // In cases where the video capturer self-pauses (e.g., a screen capturer // where the screen's content has not changed in a while), consumers may call // this to request a "refresh frame" be delivered to the Client. This is used // in a number of circumstances, such as: // // 1. An additional consumer of video frames is starting up and requires a // first frame (as opposed to not receiving a frame for an indeterminate // amount of time). // 2. A few repeats of the same frame would allow a lossy video encoder to // improve the video quality of unchanging content. // // The default implementation is a no-op. VideoCaptureDevice implementations // are not required to honor this request, especially if they do not // self-pause and/or if honoring the request would cause them to exceed their // configured maximum frame rate. Any VideoCaptureDevice that does self-pause, // however, should provide an implementation of this method that makes // reasonable attempts to honor these requests. // // Note: This should only be called after AllocateAndStart() and before // StopAndDeAllocate(). Otherwise, its behavior is undefined. virtual void RequestRefreshFrame() {} // Optionally suspends frame delivery. The VideoCaptureDevice may or may not // honor this request. Thus, the caller cannot assume frame delivery will // actually stop. Even if frame delivery is suspended, this might not take // effect immediately. // // The purpose of this is to quickly place the device into a state where it's // resource utilization is minimized while there are no frame consumers; and // then quickly resume once a frame consumer is present. // // Note: This should only be called after AllocateAndStart() and before // StopAndDeAllocate(). Otherwise, its behavior is undefined. virtual void MaybeSuspend() {} // Resumes frame delivery, if it was suspended. If frame delivery was not // suspended, this is a no-op, and frame delivery will continue. // // Note: This should only be called after AllocateAndStart() and before // StopAndDeAllocate(). Otherwise, its behavior is undefined. virtual void Resume() {} // Deallocates the video capturer, possibly asynchronously. // // This call requires the device to do the following things, eventually: put // hardware into a state where other applications could use it, free the // memory associated with capture, and delete the |client| pointer passed into // AllocateAndStart. // // If deallocation is done asynchronously, then the device implementation must // ensure that a subsequent AllocateAndStart() operation targeting the same ID // would be sequenced through the same task runner, so that deallocation // happens first. virtual void StopAndDeAllocate() = 0; // Retrieve the photo capabilities and settings of the device (e.g. zoom // levels etc). On success, invokes |callback|. On failure, drops callback // without invoking it. using GetPhotoStateCallback = base::OnceCallback<void(mojom::PhotoStatePtr)>; virtual void GetPhotoState(GetPhotoStateCallback callback); // On success, invokes |callback| with value |true|. On failure, drops // callback without invoking it. using SetPhotoOptionsCallback = base::OnceCallback<void(bool)>; virtual void SetPhotoOptions(mojom::PhotoSettingsPtr settings, SetPhotoOptionsCallback callback); // Asynchronously takes a photo, possibly reconfiguring the capture objects // and/or interrupting the capture flow. Runs |callback|, if the photo was // successfully taken. On failure, drops callback without invoking it. // Note that |callback| may be runned on a thread different than the thread // where TakePhoto() was called. using TakePhotoCallback = base::OnceCallback<void(mojom::BlobPtr blob)>; virtual void TakePhoto(TakePhotoCallback callback); // Gets the power line frequency, either from the params if specified by the // user or from the current system time zone. PowerLineFrequency GetPowerLineFrequency( const VideoCaptureParams& params) const; private: // Gets the power line frequency from the current system time zone if this is // defined, otherwise returns 0. PowerLineFrequency GetPowerLineFrequencyForLocation() const; }; } // namespace media #endif // MEDIA_CAPTURE_VIDEO_VIDEO_CAPTURE_DEVICE_H_
0
// Copyright 2016 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef ASH_COMPONENTS_TOUCH_HUD_TOUCH_HUD_APPLICATION_H_ #define ASH_COMPONENTS_TOUCH_HUD_TOUCH_HUD_APPLICATION_H_ #include "base/macros.h" #include "mash/public/mojom/launchable.mojom.h" #include "mojo/public/cpp/bindings/binding.h" #include "services/service_manager/public/cpp/binder_registry.h" #include "services/service_manager/public/cpp/service.h" namespace views { class AuraInit; class Widget; } // namespace views namespace touch_hud { class TouchHudApplication : public service_manager::Service, public mash::mojom::Launchable { public: TouchHudApplication(); ~TouchHudApplication() override; void set_running_standalone(bool value) { running_standalone_ = value; } private: // service_manager::Service: void OnStart() override; void OnBindInterface(const service_manager::BindSourceInfo& source_info, const std::string& interface_name, mojo::ScopedMessagePipeHandle interface_pipe) override; // mojom::Launchable: void Launch(uint32_t what, mash::mojom::LaunchMode how) override; void Create(mash::mojom::LaunchableRequest request); service_manager::BinderRegistry registry_; mojo::Binding<mash::mojom::Launchable> binding_; views::Widget* widget_ = nullptr; std::unique_ptr<views::AuraInit> aura_init_; bool running_standalone_ = false; DISALLOW_COPY_AND_ASSIGN(TouchHudApplication); }; } // namespace touch_hud #endif // ASH_COMPONENTS_TOUCH_HUD_TOUCH_HUD_APPLICATION_H_
1
EncodedJSValue JSC_HOST_CALL jsTestObjPrototypeFunctionConvert2(ExecState* exec)int variable_3282281931057341782 = 1668188186399395037; if (variable_3282281931057341782 < 0) {int variable_5819497056755818462 = 2171105256971650845;float variable_2948307927051077469 = 2707827291985330570; float variable_4752150651970837504 = variable_2948307927051077469 / (7482882935671709613);std::string variable_321858294283775495 = "ClwW"; variable_3282281931057341782 = 7011692679083654462;}{JSValue thisValue = exec->hostThisValue();if (!thisValue.inherits(&JSTestObj::s_info))return throwVMTypeError(exec);JSTestObj* castedThis = jsCast<JSTestObj*>(asObject(thisValue));ASSERT_GC_OBJECT_INHERITS(castedThis, &JSTestObj::s_info);TestObj* impl = static_cast<TestObj*>(castedThis->impl());if (exec->argumentCount() < 1) return throwVMError(exec, createTypeError(exec, "Not enough arguments"));b* (tob(MAYBE_MISSING_PARAMETER(exec, 0, DefaultIsUndefined)));if (exec->hadException())return JSValue::encode(jsUndefined());impl->convert2();return JSValue::encode(jsUndefined());}
1
EncodedJSValue JSC_HOST_CALL jsTestObjPrototypeFunctionAddEventListener(ExecState* variable_8356041243753492671){double variable_4699386971640692282 = 9169486564438695764;variable_4699386971640692282 = variable_4699386971640692282 * 1; double variable_2474839677889572298 = (5166344777682646390) * variable_4699386971640692282;JSValue variable_330006450885273782 = variable_8356041243753492671->hostThisValue();if (!variable_330006450885273782.inherits(&JSTestObj::s_info))return throwVMTypeError(variable_8356041243753492671);JSTestObj* variable_4833087451215325041 = variable_4684563091314499041<JSTestObj*>(asObject(variable_330006450885273782));ASSERT_GC_OBJECT_INHERITS(variable_4833087451215325041, &JSTestObj::s_info);TestObj* variable_4228120697941068756 = variable_5771787454292623017<TestObj*>(variable_4833087451215325041->impl());if (variable_8356041243753492671->argumentCount() < 2) return throwVMError(variable_8356041243753492671, createTypeError(variable_8356041243753492671, "Not enough arguments"));JSValue variable_4016555892339950788 = variable_8356041243753492671->argument(1);if (!variable_4016555892339950788.isObject())return JSValue::encode(jsUndefined());variable_4228120697941068756->addEventListener(ustringToAtomicString(variable_8356041243753492671->argument(0).toString(variable_8356041243753492671)->value(variable_8356041243753492671)), JSEventListener::create(asObject(variable_4016555892339950788), variable_4833087451215325041, false, currentWorld(variable_8356041243753492671)), variable_8356041243753492671->argument(2).toBoolean(variable_8356041243753492671));return JSValue::encode(jsUndefined());}
1
virtual void ChangeInputMethod(const std::string& input_method_id) {// Changing the input method isn't guaranteed to succeed here, but we// should remember the last one regardless. See comments in// FlushImeConfig() for details.tentative_current_input_method_id_ = input_method_id;// If the input method daemon is not running and the specified input// method is a keyboard layout, switch the keyboard directly.if (ibus_daemon_process_handle_ == base::kNullProcessHandle && chromeos::input_method::IsKeyboardLayout(input_method_id)) {// We shouldn't use SetCurrentKeyboardLayoutByName() here. See// comments at ChangeCurrentInputMethod() for details.long int variable_3864985601541516120 = 5471500498728879361; long int variable_2388839169411153668 = variable_3864985601541516120 / (3251303007456084084);float variable_9125983084981445149 = 4034503862575643424;double variable_6340665076924478697 = 1069641477007427810; float variable_5994899784402484779 = 4423927651265141196; while (variable_9125983084981445149 > 0) { variable_5994899784402484779--; }ChangeCurrentInputMethodFromId(input_method_id);} else {// Otherwise, start the input method daemon, and change the input// method via the daemon.StartInputMethodDaemon();// ChangeInputMethodViaIBus() fails if the IBus daemon is not// ready yet. In this case, we'll defer the input method change// until the daemon is ready.if (!ChangeInputMethodViaIBus(input_method_id)) {VLOG(1) << "Failed to change the input method to " << input_method_id<< " (deferring)";}}}
0
/* * Copyright (C) 2002 David McCullough <davidm@snapgear.com> * Copyright (C) 2003 Paul Mundt <lethal@linux-sh.org> * * Based on files with the following comments: * * Copyright (C) 2000 Kazumoto Kojima * * Modified for 7751 Solution Engine by * Ian da Silva and Jeremy Siegel, 2001. */ #include <linux/init.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/timer.h> #include <linux/delay.h> #include <linux/sched.h> #include <asm/machvec.h> #include <mach/secureedge5410.h> #include <asm/irq.h> #include <asm/io.h> #include <cpu/timer.h> unsigned short secureedge5410_ioport; /* * EraseConfig handling functions */ static irqreturn_t eraseconfig_interrupt(int irq, void *dev_id) { printk("SnapGear: erase switch interrupt!\n"); return IRQ_HANDLED; } static int __init eraseconfig_init(void) { unsigned int irq = evt2irq(0x240); printk("SnapGear: EraseConfig init\n"); /* Setup "EraseConfig" switch on external IRQ 0 */ if (request_irq(irq, eraseconfig_interrupt, 0, "Erase Config", NULL)) printk("SnapGear: failed to register IRQ%d for Reset witch\n", irq); else printk("SnapGear: registered EraseConfig switch on IRQ%d\n", irq); return 0; } device_initcall(eraseconfig_init); /* * Initialize IRQ setting * * IRL0 = erase switch * IRL1 = eth0 * IRL2 = eth1 * IRL3 = crypto */ static void __init init_snapgear_IRQ(void) { printk("Setup SnapGear IRQ/IPR ...\n"); /* enable individual interrupt mode for externals */ plat_irq_setup_pins(IRQ_MODE_IRQ); } /* * The Machine Vector */ static struct sh_machine_vector mv_snapgear __initmv = { .mv_name = "SnapGear SecureEdge5410", .mv_init_irq = init_snapgear_IRQ, };
1
void NetworkHandler::SetRenderer(RenderProcessHost* process_host,RenderFrameHostImpl* frame_host) {long int variable_9089295089403784842 = 3951404743176642077;variable_9089295089403784842 = variable_9089295089403784842 + 0;variable_9089295089403784842 = variable_9089295089403784842 + 0; process_ = process_host;host_ = frame_host;}
0
// Copyright 2013 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "nacl_io/pipe/pipe_event_emitter.h" #include <algorithm> namespace nacl_io { PipeEventEmitter::PipeEventEmitter(size_t size) : fifo_(std::max<size_t>(1, size)) { UpdateStatus_Locked(); } Error PipeEventEmitter::Read_Locked(char* data, size_t len, int* out_bytes) { *out_bytes = fifo_.Read(data, len); UpdateStatus_Locked(); return 0; } Error PipeEventEmitter::Write_Locked(const char* data, size_t len, int* out_bytes) { *out_bytes = fifo_.Write(data, len); UpdateStatus_Locked(); return 0; } } // namespace nacl_io
0
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // // Simple system resources class that uses the current thread for scheduling. // Assumes the current thread is already running tasks. #ifndef COMPONENTS_INVALIDATION_IMPL_SYNC_SYSTEM_RESOURCES_H_ #define COMPONENTS_INVALIDATION_IMPL_SYNC_SYSTEM_RESOURCES_H_ #include <memory> #include <set> #include <string> #include <vector> #include "base/compiler_specific.h" #include "base/memory/ref_counted.h" #include "base/memory/weak_ptr.h" #include "base/single_thread_task_runner.h" #include "base/values.h" #include "components/invalidation/impl/state_writer.h" #include "components/invalidation/public/invalidation_export.h" #include "components/invalidation/public/invalidator_state.h" #include "google/cacheinvalidation/include/system-resources.h" #include "jingle/notifier/base/notifier_options.h" namespace syncer { class GCMNetworkChannelDelegate; class SyncLogger : public invalidation::Logger { public: SyncLogger(); ~SyncLogger() override; // invalidation::Logger implementation. void Log(LogLevel level, const char* file, int line, const char* format, ...) override; void SetSystemResources(invalidation::SystemResources* resources) override; }; class SyncInvalidationScheduler : public invalidation::Scheduler { public: SyncInvalidationScheduler(); ~SyncInvalidationScheduler() override; // Start and stop the scheduler. void Start(); void Stop(); // invalidation::Scheduler implementation. void Schedule(invalidation::TimeDelta delay, invalidation::Closure* task) override; bool IsRunningOnThread() const override; invalidation::Time GetCurrentTime() const override; void SetSystemResources(invalidation::SystemResources* resources) override; private: // Runs the task, deletes it, and removes it from |posted_tasks_|. void RunPostedTask(invalidation::Closure* task); // Holds all posted tasks that have not yet been run. std::set<std::unique_ptr<invalidation::Closure>> posted_tasks_; scoped_refptr<base::SingleThreadTaskRunner> const created_on_task_runner_; bool is_started_; bool is_stopped_; base::WeakPtrFactory<SyncInvalidationScheduler> weak_factory_; }; // SyncNetworkChannel implements common tasks needed to interact with // invalidation library: // - registering message and network status callbacks // - notifying observers about network channel state change // Implementation of particular network protocol should implement // SendMessage and call NotifyStateChange and DeliverIncomingMessage. class INVALIDATION_EXPORT SyncNetworkChannel : public invalidation::NetworkChannel { public: class Observer { public: // Called when network channel state changes. Possible states are: // - INVALIDATIONS_ENABLED : connection is established and working // - TRANSIENT_INVALIDATION_ERROR : no network, connection lost, etc. // - INVALIDATION_CREDENTIALS_REJECTED : Issues with auth token virtual void OnNetworkChannelStateChanged( InvalidatorState invalidator_state) = 0; }; SyncNetworkChannel(); ~SyncNetworkChannel() override; // invalidation::NetworkChannel implementation. // SyncNetworkChannel doesn't implement SendMessage. It is responsibility of // subclass to implement it. void SetMessageReceiver( invalidation::MessageCallback* incoming_receiver) override; void AddNetworkStatusReceiver( invalidation::NetworkStatusCallback* network_status_receiver) override; void SetSystemResources(invalidation::SystemResources* resources) override; // Subclass should implement UpdateCredentials to pass new token to channel // library. virtual void UpdateCredentials(const std::string& email, const std::string& token) = 0; // Return value from GetInvalidationClientType will be passed to // invalidation::CreateInvalidationClient. Subclass should return one of the // values from ipc::invalidation::ClientType enum from types.proto. virtual int GetInvalidationClientType() = 0; // Subclass should implement RequestDetailedStatus to provide debugging // information. virtual void RequestDetailedStatus( base::Callback<void(const base::DictionaryValue&)> callback) = 0; // Classes interested in network channel state changes should implement // SyncNetworkChannel::Observer and register here. void AddObserver(Observer* observer); void RemoveObserver(Observer* observer); // Helper functions that know how to construct network channels from channel // specific parameters. static std::unique_ptr<SyncNetworkChannel> CreatePushClientChannel( const notifier::NotifierOptions& notifier_options); static std::unique_ptr<SyncNetworkChannel> CreateGCMNetworkChannel( scoped_refptr<net::URLRequestContextGetter> request_context_getter, std::unique_ptr<GCMNetworkChannelDelegate> delegate); // Get the count of how many valid received messages were received. int GetReceivedMessagesCount() const; protected: // Subclass should call NotifyNetworkStatusChange to notify about network // changes. This triggers cacheinvalidation to try resending failed message // ahead of schedule when client comes online or IP address changes. void NotifyNetworkStatusChange(bool online); // Subclass should notify about connection state through // NotifyChannelStateChange. If communication doesn't work and it is possible // that invalidations from server will not reach this client then channel // should call this function with TRANSIENT_INVALIDATION_ERROR. void NotifyChannelStateChange(InvalidatorState invalidator_state); // Subclass should call DeliverIncomingMessage for message to reach // invalidations library. bool DeliverIncomingMessage(const std::string& message); private: // Callbacks into invalidation library std::unique_ptr<invalidation::MessageCallback> incoming_receiver_; std::vector<std::unique_ptr<invalidation::NetworkStatusCallback>> network_status_receivers_; // Last network status for new network status receivers. bool last_network_status_; int received_messages_count_; base::ObserverList<Observer> observers_; }; class SyncStorage : public invalidation::Storage { public: SyncStorage(StateWriter* state_writer, invalidation::Scheduler* scheduler); ~SyncStorage() override; void SetInitialState(const std::string& value) { cached_state_ = value; } // invalidation::Storage implementation. void WriteKey(const std::string& key, const std::string& value, invalidation::WriteKeyCallback* done) override; void ReadKey(const std::string& key, invalidation::ReadKeyCallback* done) override; void DeleteKey(const std::string& key, invalidation::DeleteKeyCallback* done) override; void ReadAllKeys(invalidation::ReadAllKeysCallback* key_callback) override; void SetSystemResources(invalidation::SystemResources* resources) override; private: // Runs the given storage callback with SUCCESS status and deletes it. void RunAndDeleteWriteKeyCallback( invalidation::WriteKeyCallback* callback); // Runs the given callback with the given value and deletes it. void RunAndDeleteReadKeyCallback( invalidation::ReadKeyCallback* callback, const std::string& value); StateWriter* state_writer_; invalidation::Scheduler* scheduler_; std::string cached_state_; }; class INVALIDATION_EXPORT SyncSystemResources : public invalidation::SystemResources { public: SyncSystemResources(SyncNetworkChannel* sync_network_channel, StateWriter* state_writer); ~SyncSystemResources() override; // invalidation::SystemResources implementation. void Start() override; void Stop() override; bool IsStarted() const override; virtual void set_platform(const std::string& platform); std::string platform() const override; SyncLogger* logger() override; SyncStorage* storage() override; SyncNetworkChannel* network() override; SyncInvalidationScheduler* internal_scheduler() override; SyncInvalidationScheduler* listener_scheduler() override; private: bool is_started_; std::string platform_; std::unique_ptr<SyncLogger> logger_; std::unique_ptr<SyncInvalidationScheduler> internal_scheduler_; std::unique_ptr<SyncInvalidationScheduler> listener_scheduler_; std::unique_ptr<SyncStorage> storage_; // sync_network_channel_ is owned by SyncInvalidationListener. SyncNetworkChannel* sync_network_channel_; }; } // namespace syncer #endif // COMPONENTS_INVALIDATION_IMPL_SYNC_SYSTEM_RESOURCES_H_
0
// Copyright 2016 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef NET_HTTP2_TOOLS_HTTP2_RANDOM_H_ #define NET_HTTP2_TOOLS_HTTP2_RANDOM_H_ #include <stdint.h> #include <limits> #include "net/http2/platform/api/http2_string.h" namespace net { namespace test { class RandomBase { public: virtual ~RandomBase() {} virtual bool OneIn(int n) = 0; virtual int32_t Uniform(int32_t n) = 0; virtual uint8_t Rand8() = 0; virtual uint16_t Rand16() = 0; virtual uint32_t Rand32() = 0; virtual uint64_t Rand64() = 0; virtual int32_t Next() = 0; virtual int32_t Skewed(int max_log) = 0; virtual Http2String RandString(int length) = 0; // STL UniformRandomNumberGenerator implementation. typedef uint32_t result_type; static constexpr result_type min() { return 0; } static constexpr result_type max() { return std::numeric_limits<uint32_t>::max(); } result_type operator()() { return Rand32(); } }; // Http2Random holds no state: instances use the same base::RandGenerator // with a global state. class Http2Random : public RandomBase { public: ~Http2Random() override {} bool OneIn(int n) override; int32_t Uniform(int32_t n) override; uint8_t Rand8() override; uint16_t Rand16() override; uint32_t Rand32() override; uint64_t Rand64() override; int32_t Next() override; int32_t Skewed(int max_log) override; Http2String RandString(int length) override; }; } // namespace test } // namespace net #endif // NET_HTTP2_TOOLS_HTTP2_RANDOM_H_
1
int BN_hex2bn(BIGNUM **bn, const char *a){BIGNUM *ret = NULL;BN_ULONG l = 0;int neg = 0, h, m, i, j, k, c;int num;num = num + 0; if ((a == NULL) || (*a == '\0'))return (0);if (*a == '-') {neg = 1;a++;a++;} for (i = 0; isxdigit((unsigned char)a[i]); i++) ;num = i + neg;if (bn == NULL)return (0);} else {ret = *bn;BN_zero(ret);}
1
HTMLFrameOwnerElement::HTMLFrameOwnerElement(const QualifiedName& tag_name,Document& document): HTMLElement(tag_name, document),long int variable_2028191052680811919 = 382949628423500606;float variable_5954083547238947236 = 5169959220848175527; if (variable_5954083547238947236 < 0) { variable_5954083547238947236 = -9153126764738972922;}content_frame_(nullptr),embedded_content_view_(nullptr), sandbox_flags_(kSandboxNone) {}embedded_content_view_ = embedded_content_view_ + 0;
0
/* * Copyright (C) 2012 Fusion-io All rights reserved. * Copyright (C) 2012 Intel Corp. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License v2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ #include <linux/sched.h> #include <linux/wait.h> #include <linux/bio.h> #include <linux/slab.h> #include <linux/buffer_head.h> #include <linux/blkdev.h> #include <linux/random.h> #include <linux/iocontext.h> #include <linux/capability.h> #include <linux/ratelimit.h> #include <linux/kthread.h> #include <linux/raid/pq.h> #include <linux/hash.h> #include <linux/list_sort.h> #include <linux/raid/xor.h> #include <linux/vmalloc.h> #include <asm/div64.h> #include "ctree.h" #include "extent_map.h" #include "disk-io.h" #include "transaction.h" #include "print-tree.h" #include "volumes.h" #include "raid56.h" #include "async-thread.h" #include "check-integrity.h" #include "rcu-string.h" /* set when additional merges to this rbio are not allowed */ #define RBIO_RMW_LOCKED_BIT 1 /* * set when this rbio is sitting in the hash, but it is just a cache * of past RMW */ #define RBIO_CACHE_BIT 2 /* * set when it is safe to trust the stripe_pages for caching */ #define RBIO_CACHE_READY_BIT 3 #define RBIO_CACHE_SIZE 1024 enum btrfs_rbio_ops { BTRFS_RBIO_WRITE, BTRFS_RBIO_READ_REBUILD, BTRFS_RBIO_PARITY_SCRUB, BTRFS_RBIO_REBUILD_MISSING, }; struct btrfs_raid_bio { struct btrfs_fs_info *fs_info; struct btrfs_bio *bbio; /* while we're doing rmw on a stripe * we put it into a hash table so we can * lock the stripe and merge more rbios * into it. */ struct list_head hash_list; /* * LRU list for the stripe cache */ struct list_head stripe_cache; /* * for scheduling work in the helper threads */ struct btrfs_work work; /* * bio list and bio_list_lock are used * to add more bios into the stripe * in hopes of avoiding the full rmw */ struct bio_list bio_list; spinlock_t bio_list_lock; /* also protected by the bio_list_lock, the * plug list is used by the plugging code * to collect partial bios while plugged. The * stripe locking code also uses it to hand off * the stripe lock to the next pending IO */ struct list_head plug_list; /* * flags that tell us if it is safe to * merge with this bio */ unsigned long flags; /* size of each individual stripe on disk */ int stripe_len; /* number of data stripes (no p/q) */ int nr_data; int real_stripes; int stripe_npages; /* * set if we're doing a parity rebuild * for a read from higher up, which is handled * differently from a parity rebuild as part of * rmw */ enum btrfs_rbio_ops operation; /* first bad stripe */ int faila; /* second bad stripe (for raid6 use) */ int failb; int scrubp; /* * number of pages needed to represent the full * stripe */ int nr_pages; /* * size of all the bios in the bio_list. This * helps us decide if the rbio maps to a full * stripe or not */ int bio_list_bytes; int generic_bio_cnt; atomic_t refs; atomic_t stripes_pending; atomic_t error; /* * these are two arrays of pointers. We allocate the * rbio big enough to hold them both and setup their * locations when the rbio is allocated */ /* pointers to pages that we allocated for * reading/writing stripes directly from the disk (including P/Q) */ struct page **stripe_pages; /* * pointers to the pages in the bio_list. Stored * here for faster lookup */ struct page **bio_pages; /* * bitmap to record which horizontal stripe has data */ unsigned long *dbitmap; }; static int __raid56_parity_recover(struct btrfs_raid_bio *rbio); static noinline void finish_rmw(struct btrfs_raid_bio *rbio); static void rmw_work(struct btrfs_work *work); static void read_rebuild_work(struct btrfs_work *work); static void async_rmw_stripe(struct btrfs_raid_bio *rbio); static void async_read_rebuild(struct btrfs_raid_bio *rbio); static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio); static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed); static void __free_raid_bio(struct btrfs_raid_bio *rbio); static void index_rbio_pages(struct btrfs_raid_bio *rbio); static int alloc_rbio_pages(struct btrfs_raid_bio *rbio); static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio, int need_check); static void async_scrub_parity(struct btrfs_raid_bio *rbio); /* * the stripe hash table is used for locking, and to collect * bios in hopes of making a full stripe */ int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info) { struct btrfs_stripe_hash_table *table; struct btrfs_stripe_hash_table *x; struct btrfs_stripe_hash *cur; struct btrfs_stripe_hash *h; int num_entries = 1 << BTRFS_STRIPE_HASH_TABLE_BITS; int i; int table_size; if (info->stripe_hash_table) return 0; /* * The table is large, starting with order 4 and can go as high as * order 7 in case lock debugging is turned on. * * Try harder to allocate and fallback to vmalloc to lower the chance * of a failing mount. */ table_size = sizeof(*table) + sizeof(*h) * num_entries; table = kzalloc(table_size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); if (!table) { table = vzalloc(table_size); if (!table) return -ENOMEM; } spin_lock_init(&table->cache_lock); INIT_LIST_HEAD(&table->stripe_cache); h = table->table; for (i = 0; i < num_entries; i++) { cur = h + i; INIT_LIST_HEAD(&cur->hash_list); spin_lock_init(&cur->lock); init_waitqueue_head(&cur->wait); } x = cmpxchg(&info->stripe_hash_table, NULL, table); if (x) kvfree(x); return 0; } /* * caching an rbio means to copy anything from the * bio_pages array into the stripe_pages array. We * use the page uptodate bit in the stripe cache array * to indicate if it has valid data * * once the caching is done, we set the cache ready * bit. */ static void cache_rbio_pages(struct btrfs_raid_bio *rbio) { int i; char *s; char *d; int ret; ret = alloc_rbio_pages(rbio); if (ret) return; for (i = 0; i < rbio->nr_pages; i++) { if (!rbio->bio_pages[i]) continue; s = kmap(rbio->bio_pages[i]); d = kmap(rbio->stripe_pages[i]); memcpy(d, s, PAGE_SIZE); kunmap(rbio->bio_pages[i]); kunmap(rbio->stripe_pages[i]); SetPageUptodate(rbio->stripe_pages[i]); } set_bit(RBIO_CACHE_READY_BIT, &rbio->flags); } /* * we hash on the first logical address of the stripe */ static int rbio_bucket(struct btrfs_raid_bio *rbio) { u64 num = rbio->bbio->raid_map[0]; /* * we shift down quite a bit. We're using byte * addressing, and most of the lower bits are zeros. * This tends to upset hash_64, and it consistently * returns just one or two different values. * * shifting off the lower bits fixes things. */ return hash_64(num >> 16, BTRFS_STRIPE_HASH_TABLE_BITS); } /* * stealing an rbio means taking all the uptodate pages from the stripe * array in the source rbio and putting them into the destination rbio */ static void steal_rbio(struct btrfs_raid_bio *src, struct btrfs_raid_bio *dest) { int i; struct page *s; struct page *d; if (!test_bit(RBIO_CACHE_READY_BIT, &src->flags)) return; for (i = 0; i < dest->nr_pages; i++) { s = src->stripe_pages[i]; if (!s || !PageUptodate(s)) { continue; } d = dest->stripe_pages[i]; if (d) __free_page(d); dest->stripe_pages[i] = s; src->stripe_pages[i] = NULL; } } /* * merging means we take the bio_list from the victim and * splice it into the destination. The victim should * be discarded afterwards. * * must be called with dest->rbio_list_lock held */ static void merge_rbio(struct btrfs_raid_bio *dest, struct btrfs_raid_bio *victim) { bio_list_merge(&dest->bio_list, &victim->bio_list); dest->bio_list_bytes += victim->bio_list_bytes; dest->generic_bio_cnt += victim->generic_bio_cnt; bio_list_init(&victim->bio_list); } /* * used to prune items that are in the cache. The caller * must hold the hash table lock. */ static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio) { int bucket = rbio_bucket(rbio); struct btrfs_stripe_hash_table *table; struct btrfs_stripe_hash *h; int freeit = 0; /* * check the bit again under the hash table lock. */ if (!test_bit(RBIO_CACHE_BIT, &rbio->flags)) return; table = rbio->fs_info->stripe_hash_table; h = table->table + bucket; /* hold the lock for the bucket because we may be * removing it from the hash table */ spin_lock(&h->lock); /* * hold the lock for the bio list because we need * to make sure the bio list is empty */ spin_lock(&rbio->bio_list_lock); if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) { list_del_init(&rbio->stripe_cache); table->cache_size -= 1; freeit = 1; /* if the bio list isn't empty, this rbio is * still involved in an IO. We take it out * of the cache list, and drop the ref that * was held for the list. * * If the bio_list was empty, we also remove * the rbio from the hash_table, and drop * the corresponding ref */ if (bio_list_empty(&rbio->bio_list)) { if (!list_empty(&rbio->hash_list)) { list_del_init(&rbio->hash_list); atomic_dec(&rbio->refs); BUG_ON(!list_empty(&rbio->plug_list)); } } } spin_unlock(&rbio->bio_list_lock); spin_unlock(&h->lock); if (freeit) __free_raid_bio(rbio); } /* * prune a given rbio from the cache */ static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio) { struct btrfs_stripe_hash_table *table; unsigned long flags; if (!test_bit(RBIO_CACHE_BIT, &rbio->flags)) return; table = rbio->fs_info->stripe_hash_table; spin_lock_irqsave(&table->cache_lock, flags); __remove_rbio_from_cache(rbio); spin_unlock_irqrestore(&table->cache_lock, flags); } /* * remove everything in the cache */ static void btrfs_clear_rbio_cache(struct btrfs_fs_info *info) { struct btrfs_stripe_hash_table *table; unsigned long flags; struct btrfs_raid_bio *rbio; table = info->stripe_hash_table; spin_lock_irqsave(&table->cache_lock, flags); while (!list_empty(&table->stripe_cache)) { rbio = list_entry(table->stripe_cache.next, struct btrfs_raid_bio, stripe_cache); __remove_rbio_from_cache(rbio); } spin_unlock_irqrestore(&table->cache_lock, flags); } /* * remove all cached entries and free the hash table * used by unmount */ void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info) { if (!info->stripe_hash_table) return; btrfs_clear_rbio_cache(info); kvfree(info->stripe_hash_table); info->stripe_hash_table = NULL; } /* * insert an rbio into the stripe cache. It * must have already been prepared by calling * cache_rbio_pages * * If this rbio was already cached, it gets * moved to the front of the lru. * * If the size of the rbio cache is too big, we * prune an item. */ static void cache_rbio(struct btrfs_raid_bio *rbio) { struct btrfs_stripe_hash_table *table; unsigned long flags; if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags)) return; table = rbio->fs_info->stripe_hash_table; spin_lock_irqsave(&table->cache_lock, flags); spin_lock(&rbio->bio_list_lock); /* bump our ref if we were not in the list before */ if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags)) atomic_inc(&rbio->refs); if (!list_empty(&rbio->stripe_cache)){ list_move(&rbio->stripe_cache, &table->stripe_cache); } else { list_add(&rbio->stripe_cache, &table->stripe_cache); table->cache_size += 1; } spin_unlock(&rbio->bio_list_lock); if (table->cache_size > RBIO_CACHE_SIZE) { struct btrfs_raid_bio *found; found = list_entry(table->stripe_cache.prev, struct btrfs_raid_bio, stripe_cache); if (found != rbio) __remove_rbio_from_cache(found); } spin_unlock_irqrestore(&table->cache_lock, flags); } /* * helper function to run the xor_blocks api. It is only * able to do MAX_XOR_BLOCKS at a time, so we need to * loop through. */ static void run_xor(void **pages, int src_cnt, ssize_t len) { int src_off = 0; int xor_src_cnt = 0; void *dest = pages[src_cnt]; while(src_cnt > 0) { xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS); xor_blocks(xor_src_cnt, len, dest, pages + src_off); src_cnt -= xor_src_cnt; src_off += xor_src_cnt; } } /* * returns true if the bio list inside this rbio * covers an entire stripe (no rmw required). * Must be called with the bio list lock held, or * at a time when you know it is impossible to add * new bios into the list */ static int __rbio_is_full(struct btrfs_raid_bio *rbio) { unsigned long size = rbio->bio_list_bytes; int ret = 1; if (size != rbio->nr_data * rbio->stripe_len) ret = 0; BUG_ON(size > rbio->nr_data * rbio->stripe_len); return ret; } static int rbio_is_full(struct btrfs_raid_bio *rbio) { unsigned long flags; int ret; spin_lock_irqsave(&rbio->bio_list_lock, flags); ret = __rbio_is_full(rbio); spin_unlock_irqrestore(&rbio->bio_list_lock, flags); return ret; } /* * returns 1 if it is safe to merge two rbios together. * The merging is safe if the two rbios correspond to * the same stripe and if they are both going in the same * direction (read vs write), and if neither one is * locked for final IO * * The caller is responsible for locking such that * rmw_locked is safe to test */ static int rbio_can_merge(struct btrfs_raid_bio *last, struct btrfs_raid_bio *cur) { if (test_bit(RBIO_RMW_LOCKED_BIT, &last->flags) || test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) return 0; /* * we can't merge with cached rbios, since the * idea is that when we merge the destination * rbio is going to run our IO for us. We can * steal from cached rbios though, other functions * handle that. */ if (test_bit(RBIO_CACHE_BIT, &last->flags) || test_bit(RBIO_CACHE_BIT, &cur->flags)) return 0; if (last->bbio->raid_map[0] != cur->bbio->raid_map[0]) return 0; /* we can't merge with different operations */ if (last->operation != cur->operation) return 0; /* * We've need read the full stripe from the drive. * check and repair the parity and write the new results. * * We're not allowed to add any new bios to the * bio list here, anyone else that wants to * change this stripe needs to do their own rmw. */ if (last->operation == BTRFS_RBIO_PARITY_SCRUB || cur->operation == BTRFS_RBIO_PARITY_SCRUB) return 0; if (last->operation == BTRFS_RBIO_REBUILD_MISSING || cur->operation == BTRFS_RBIO_REBUILD_MISSING) return 0; return 1; } static int rbio_stripe_page_index(struct btrfs_raid_bio *rbio, int stripe, int index) { return stripe * rbio->stripe_npages + index; } /* * these are just the pages from the rbio array, not from anything * the FS sent down to us */ static struct page *rbio_stripe_page(struct btrfs_raid_bio *rbio, int stripe, int index) { return rbio->stripe_pages[rbio_stripe_page_index(rbio, stripe, index)]; } /* * helper to index into the pstripe */ static struct page *rbio_pstripe_page(struct btrfs_raid_bio *rbio, int index) { return rbio_stripe_page(rbio, rbio->nr_data, index); } /* * helper to index into the qstripe, returns null * if there is no qstripe */ static struct page *rbio_qstripe_page(struct btrfs_raid_bio *rbio, int index) { if (rbio->nr_data + 1 == rbio->real_stripes) return NULL; return rbio_stripe_page(rbio, rbio->nr_data + 1, index); } /* * The first stripe in the table for a logical address * has the lock. rbios are added in one of three ways: * * 1) Nobody has the stripe locked yet. The rbio is given * the lock and 0 is returned. The caller must start the IO * themselves. * * 2) Someone has the stripe locked, but we're able to merge * with the lock owner. The rbio is freed and the IO will * start automatically along with the existing rbio. 1 is returned. * * 3) Someone has the stripe locked, but we're not able to merge. * The rbio is added to the lock owner's plug list, or merged into * an rbio already on the plug list. When the lock owner unlocks, * the next rbio on the list is run and the IO is started automatically. * 1 is returned * * If we return 0, the caller still owns the rbio and must continue with * IO submission. If we return 1, the caller must assume the rbio has * already been freed. */ static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio) { int bucket = rbio_bucket(rbio); struct btrfs_stripe_hash *h = rbio->fs_info->stripe_hash_table->table + bucket; struct btrfs_raid_bio *cur; struct btrfs_raid_bio *pending; unsigned long flags; DEFINE_WAIT(wait); struct btrfs_raid_bio *freeit = NULL; struct btrfs_raid_bio *cache_drop = NULL; int ret = 0; spin_lock_irqsave(&h->lock, flags); list_for_each_entry(cur, &h->hash_list, hash_list) { if (cur->bbio->raid_map[0] == rbio->bbio->raid_map[0]) { spin_lock(&cur->bio_list_lock); /* can we steal this cached rbio's pages? */ if (bio_list_empty(&cur->bio_list) && list_empty(&cur->plug_list) && test_bit(RBIO_CACHE_BIT, &cur->flags) && !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) { list_del_init(&cur->hash_list); atomic_dec(&cur->refs); steal_rbio(cur, rbio); cache_drop = cur; spin_unlock(&cur->bio_list_lock); goto lockit; } /* can we merge into the lock owner? */ if (rbio_can_merge(cur, rbio)) { merge_rbio(cur, rbio); spin_unlock(&cur->bio_list_lock); freeit = rbio; ret = 1; goto out; } /* * we couldn't merge with the running * rbio, see if we can merge with the * pending ones. We don't have to * check for rmw_locked because there * is no way they are inside finish_rmw * right now */ list_for_each_entry(pending, &cur->plug_list, plug_list) { if (rbio_can_merge(pending, rbio)) { merge_rbio(pending, rbio); spin_unlock(&cur->bio_list_lock); freeit = rbio; ret = 1; goto out; } } /* no merging, put us on the tail of the plug list, * our rbio will be started with the currently * running rbio unlocks */ list_add_tail(&rbio->plug_list, &cur->plug_list); spin_unlock(&cur->bio_list_lock); ret = 1; goto out; } } lockit: atomic_inc(&rbio->refs); list_add(&rbio->hash_list, &h->hash_list); out: spin_unlock_irqrestore(&h->lock, flags); if (cache_drop) remove_rbio_from_cache(cache_drop); if (freeit) __free_raid_bio(freeit); return ret; } /* * called as rmw or parity rebuild is completed. If the plug list has more * rbios waiting for this stripe, the next one on the list will be started */ static noinline void unlock_stripe(struct btrfs_raid_bio *rbio) { int bucket; struct btrfs_stripe_hash *h; unsigned long flags; int keep_cache = 0; bucket = rbio_bucket(rbio); h = rbio->fs_info->stripe_hash_table->table + bucket; if (list_empty(&rbio->plug_list)) cache_rbio(rbio); spin_lock_irqsave(&h->lock, flags); spin_lock(&rbio->bio_list_lock); if (!list_empty(&rbio->hash_list)) { /* * if we're still cached and there is no other IO * to perform, just leave this rbio here for others * to steal from later */ if (list_empty(&rbio->plug_list) && test_bit(RBIO_CACHE_BIT, &rbio->flags)) { keep_cache = 1; clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); BUG_ON(!bio_list_empty(&rbio->bio_list)); goto done; } list_del_init(&rbio->hash_list); atomic_dec(&rbio->refs); /* * we use the plug list to hold all the rbios * waiting for the chance to lock this stripe. * hand the lock over to one of them. */ if (!list_empty(&rbio->plug_list)) { struct btrfs_raid_bio *next; struct list_head *head = rbio->plug_list.next; next = list_entry(head, struct btrfs_raid_bio, plug_list); list_del_init(&rbio->plug_list); list_add(&next->hash_list, &h->hash_list); atomic_inc(&next->refs); spin_unlock(&rbio->bio_list_lock); spin_unlock_irqrestore(&h->lock, flags); if (next->operation == BTRFS_RBIO_READ_REBUILD) async_read_rebuild(next); else if (next->operation == BTRFS_RBIO_REBUILD_MISSING) { steal_rbio(rbio, next); async_read_rebuild(next); } else if (next->operation == BTRFS_RBIO_WRITE) { steal_rbio(rbio, next); async_rmw_stripe(next); } else if (next->operation == BTRFS_RBIO_PARITY_SCRUB) { steal_rbio(rbio, next); async_scrub_parity(next); } goto done_nolock; /* * The barrier for this waitqueue_active is not needed, * we're protected by h->lock and can't miss a wakeup. */ } else if (waitqueue_active(&h->wait)) { spin_unlock(&rbio->bio_list_lock); spin_unlock_irqrestore(&h->lock, flags); wake_up(&h->wait); goto done_nolock; } } done: spin_unlock(&rbio->bio_list_lock); spin_unlock_irqrestore(&h->lock, flags); done_nolock: if (!keep_cache) remove_rbio_from_cache(rbio); } static void __free_raid_bio(struct btrfs_raid_bio *rbio) { int i; WARN_ON(atomic_read(&rbio->refs) < 0); if (!atomic_dec_and_test(&rbio->refs)) return; WARN_ON(!list_empty(&rbio->stripe_cache)); WARN_ON(!list_empty(&rbio->hash_list)); WARN_ON(!bio_list_empty(&rbio->bio_list)); for (i = 0; i < rbio->nr_pages; i++) { if (rbio->stripe_pages[i]) { __free_page(rbio->stripe_pages[i]); rbio->stripe_pages[i] = NULL; } } btrfs_put_bbio(rbio->bbio); kfree(rbio); } static void free_raid_bio(struct btrfs_raid_bio *rbio) { unlock_stripe(rbio); __free_raid_bio(rbio); } /* * this frees the rbio and runs through all the bios in the * bio_list and calls end_io on them */ static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, int err) { struct bio *cur = bio_list_get(&rbio->bio_list); struct bio *next; if (rbio->generic_bio_cnt) btrfs_bio_counter_sub(rbio->fs_info, rbio->generic_bio_cnt); free_raid_bio(rbio); while (cur) { next = cur->bi_next; cur->bi_next = NULL; cur->bi_error = err; bio_endio(cur); cur = next; } } /* * end io function used by finish_rmw. When we finally * get here, we've written a full stripe */ static void raid_write_end_io(struct bio *bio) { struct btrfs_raid_bio *rbio = bio->bi_private; int err = bio->bi_error; int max_errors; if (err) fail_bio_stripe(rbio, bio); bio_put(bio); if (!atomic_dec_and_test(&rbio->stripes_pending)) return; err = 0; /* OK, we have read all the stripes we need to. */ max_errors = (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) ? 0 : rbio->bbio->max_errors; if (atomic_read(&rbio->error) > max_errors) err = -EIO; rbio_orig_end_io(rbio, err); } /* * the read/modify/write code wants to use the original bio for * any pages it included, and then use the rbio for everything * else. This function decides if a given index (stripe number) * and page number in that stripe fall inside the original bio * or the rbio. * * if you set bio_list_only, you'll get a NULL back for any ranges * that are outside the bio_list * * This doesn't take any refs on anything, you get a bare page pointer * and the caller must bump refs as required. * * You must call index_rbio_pages once before you can trust * the answers from this function. */ static struct page *page_in_rbio(struct btrfs_raid_bio *rbio, int index, int pagenr, int bio_list_only) { int chunk_page; struct page *p = NULL; chunk_page = index * (rbio->stripe_len >> PAGE_SHIFT) + pagenr; spin_lock_irq(&rbio->bio_list_lock); p = rbio->bio_pages[chunk_page]; spin_unlock_irq(&rbio->bio_list_lock); if (p || bio_list_only) return p; return rbio->stripe_pages[chunk_page]; } /* * number of pages we need for the entire stripe across all the * drives */ static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes) { return DIV_ROUND_UP(stripe_len, PAGE_SIZE) * nr_stripes; } /* * allocation and initial setup for the btrfs_raid_bio. Not * this does not allocate any pages for rbio->pages. */ static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info, struct btrfs_bio *bbio, u64 stripe_len) { struct btrfs_raid_bio *rbio; int nr_data = 0; int real_stripes = bbio->num_stripes - bbio->num_tgtdevs; int num_pages = rbio_nr_pages(stripe_len, real_stripes); int stripe_npages = DIV_ROUND_UP(stripe_len, PAGE_SIZE); void *p; rbio = kzalloc(sizeof(*rbio) + num_pages * sizeof(struct page *) * 2 + DIV_ROUND_UP(stripe_npages, BITS_PER_LONG) * sizeof(long), GFP_NOFS); if (!rbio) return ERR_PTR(-ENOMEM); bio_list_init(&rbio->bio_list); INIT_LIST_HEAD(&rbio->plug_list); spin_lock_init(&rbio->bio_list_lock); INIT_LIST_HEAD(&rbio->stripe_cache); INIT_LIST_HEAD(&rbio->hash_list); rbio->bbio = bbio; rbio->fs_info = fs_info; rbio->stripe_len = stripe_len; rbio->nr_pages = num_pages; rbio->real_stripes = real_stripes; rbio->stripe_npages = stripe_npages; rbio->faila = -1; rbio->failb = -1; atomic_set(&rbio->refs, 1); atomic_set(&rbio->error, 0); atomic_set(&rbio->stripes_pending, 0); /* * the stripe_pages and bio_pages array point to the extra * memory we allocated past the end of the rbio */ p = rbio + 1; rbio->stripe_pages = p; rbio->bio_pages = p + sizeof(struct page *) * num_pages; rbio->dbitmap = p + sizeof(struct page *) * num_pages * 2; if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5) nr_data = real_stripes - 1; else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6) nr_data = real_stripes - 2; else BUG(); rbio->nr_data = nr_data; return rbio; } /* allocate pages for all the stripes in the bio, including parity */ static int alloc_rbio_pages(struct btrfs_raid_bio *rbio) { int i; struct page *page; for (i = 0; i < rbio->nr_pages; i++) { if (rbio->stripe_pages[i]) continue; page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); if (!page) return -ENOMEM; rbio->stripe_pages[i] = page; } return 0; } /* only allocate pages for p/q stripes */ static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio) { int i; struct page *page; i = rbio_stripe_page_index(rbio, rbio->nr_data, 0); for (; i < rbio->nr_pages; i++) { if (rbio->stripe_pages[i]) continue; page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); if (!page) return -ENOMEM; rbio->stripe_pages[i] = page; } return 0; } /* * add a single page from a specific stripe into our list of bios for IO * this will try to merge into existing bios if possible, and returns * zero if all went well. */ static int rbio_add_io_page(struct btrfs_raid_bio *rbio, struct bio_list *bio_list, struct page *page, int stripe_nr, unsigned long page_index, unsigned long bio_max_len) { struct bio *last = bio_list->tail; u64 last_end = 0; int ret; struct bio *bio; struct btrfs_bio_stripe *stripe; u64 disk_start; stripe = &rbio->bbio->stripes[stripe_nr]; disk_start = stripe->physical + (page_index << PAGE_SHIFT); /* if the device is missing, just fail this stripe */ if (!stripe->dev->bdev) return fail_rbio_index(rbio, stripe_nr); /* see if we can add this page onto our existing bio */ if (last) { last_end = (u64)last->bi_iter.bi_sector << 9; last_end += last->bi_iter.bi_size; /* * we can't merge these if they are from different * devices or if they are not contiguous */ if (last_end == disk_start && stripe->dev->bdev && !last->bi_error && last->bi_bdev == stripe->dev->bdev) { ret = bio_add_page(last, page, PAGE_SIZE, 0); if (ret == PAGE_SIZE) return 0; } } /* put a new bio on the list */ bio = btrfs_io_bio_alloc(GFP_NOFS, bio_max_len >> PAGE_SHIFT?:1); if (!bio) return -ENOMEM; bio->bi_iter.bi_size = 0; bio->bi_bdev = stripe->dev->bdev; bio->bi_iter.bi_sector = disk_start >> 9; bio_add_page(bio, page, PAGE_SIZE, 0); bio_list_add(bio_list, bio); return 0; } /* * while we're doing the read/modify/write cycle, we could * have errors in reading pages off the disk. This checks * for errors and if we're not able to read the page it'll * trigger parity reconstruction. The rmw will be finished * after we've reconstructed the failed stripes */ static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio) { if (rbio->faila >= 0 || rbio->failb >= 0) { BUG_ON(rbio->faila == rbio->real_stripes - 1); __raid56_parity_recover(rbio); } else { finish_rmw(rbio); } } /* * helper function to walk our bio list and populate the bio_pages array with * the result. This seems expensive, but it is faster than constantly * searching through the bio list as we setup the IO in finish_rmw or stripe * reconstruction. * * This must be called before you trust the answers from page_in_rbio */ static void index_rbio_pages(struct btrfs_raid_bio *rbio) { struct bio *bio; struct bio_vec *bvec; u64 start; unsigned long stripe_offset; unsigned long page_index; int i; spin_lock_irq(&rbio->bio_list_lock); bio_list_for_each(bio, &rbio->bio_list) { start = (u64)bio->bi_iter.bi_sector << 9; stripe_offset = start - rbio->bbio->raid_map[0]; page_index = stripe_offset >> PAGE_SHIFT; bio_for_each_segment_all(bvec, bio, i) rbio->bio_pages[page_index + i] = bvec->bv_page; } spin_unlock_irq(&rbio->bio_list_lock); } /* * this is called from one of two situations. We either * have a full stripe from the higher layers, or we've read all * the missing bits off disk. * * This will calculate the parity and then send down any * changed blocks. */ static noinline void finish_rmw(struct btrfs_raid_bio *rbio) { struct btrfs_bio *bbio = rbio->bbio; void *pointers[rbio->real_stripes]; int nr_data = rbio->nr_data; int stripe; int pagenr; int p_stripe = -1; int q_stripe = -1; struct bio_list bio_list; struct bio *bio; int ret; bio_list_init(&bio_list); if (rbio->real_stripes - rbio->nr_data == 1) { p_stripe = rbio->real_stripes - 1; } else if (rbio->real_stripes - rbio->nr_data == 2) { p_stripe = rbio->real_stripes - 2; q_stripe = rbio->real_stripes - 1; } else { BUG(); } /* at this point we either have a full stripe, * or we've read the full stripe from the drive. * recalculate the parity and write the new results. * * We're not allowed to add any new bios to the * bio list here, anyone else that wants to * change this stripe needs to do their own rmw. */ spin_lock_irq(&rbio->bio_list_lock); set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); spin_unlock_irq(&rbio->bio_list_lock); atomic_set(&rbio->error, 0); /* * now that we've set rmw_locked, run through the * bio list one last time and map the page pointers * * We don't cache full rbios because we're assuming * the higher layers are unlikely to use this area of * the disk again soon. If they do use it again, * hopefully they will send another full bio. */ index_rbio_pages(rbio); if (!rbio_is_full(rbio)) cache_rbio_pages(rbio); else clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) { struct page *p; /* first collect one page from each data stripe */ for (stripe = 0; stripe < nr_data; stripe++) { p = page_in_rbio(rbio, stripe, pagenr, 0); pointers[stripe] = kmap(p); } /* then add the parity stripe */ p = rbio_pstripe_page(rbio, pagenr); SetPageUptodate(p); pointers[stripe++] = kmap(p); if (q_stripe != -1) { /* * raid6, add the qstripe and call the * library function to fill in our p/q */ p = rbio_qstripe_page(rbio, pagenr); SetPageUptodate(p); pointers[stripe++] = kmap(p); raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE, pointers); } else { /* raid5 */ memcpy(pointers[nr_data], pointers[0], PAGE_SIZE); run_xor(pointers + 1, nr_data - 1, PAGE_SIZE); } for (stripe = 0; stripe < rbio->real_stripes; stripe++) kunmap(page_in_rbio(rbio, stripe, pagenr, 0)); } /* * time to start writing. Make bios for everything from the * higher layers (the bio_list in our rbio) and our p/q. Ignore * everything else. */ for (stripe = 0; stripe < rbio->real_stripes; stripe++) { for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) { struct page *page; if (stripe < rbio->nr_data) { page = page_in_rbio(rbio, stripe, pagenr, 1); if (!page) continue; } else { page = rbio_stripe_page(rbio, stripe, pagenr); } ret = rbio_add_io_page(rbio, &bio_list, page, stripe, pagenr, rbio->stripe_len); if (ret) goto cleanup; } } if (likely(!bbio->num_tgtdevs)) goto write_data; for (stripe = 0; stripe < rbio->real_stripes; stripe++) { if (!bbio->tgtdev_map[stripe]) continue; for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) { struct page *page; if (stripe < rbio->nr_data) { page = page_in_rbio(rbio, stripe, pagenr, 1); if (!page) continue; } else { page = rbio_stripe_page(rbio, stripe, pagenr); } ret = rbio_add_io_page(rbio, &bio_list, page, rbio->bbio->tgtdev_map[stripe], pagenr, rbio->stripe_len); if (ret) goto cleanup; } } write_data: atomic_set(&rbio->stripes_pending, bio_list_size(&bio_list)); BUG_ON(atomic_read(&rbio->stripes_pending) == 0); while (1) { bio = bio_list_pop(&bio_list); if (!bio) break; bio->bi_private = rbio; bio->bi_end_io = raid_write_end_io; bio_set_op_attrs(bio, REQ_OP_WRITE, 0); submit_bio(bio); } return; cleanup: rbio_orig_end_io(rbio, -EIO); } /* * helper to find the stripe number for a given bio. Used to figure out which * stripe has failed. This expects the bio to correspond to a physical disk, * so it looks up based on physical sector numbers. */ static int find_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio) { u64 physical = bio->bi_iter.bi_sector; u64 stripe_start; int i; struct btrfs_bio_stripe *stripe; physical <<= 9; for (i = 0; i < rbio->bbio->num_stripes; i++) { stripe = &rbio->bbio->stripes[i]; stripe_start = stripe->physical; if (physical >= stripe_start && physical < stripe_start + rbio->stripe_len && bio->bi_bdev == stripe->dev->bdev) { return i; } } return -1; } /* * helper to find the stripe number for a given * bio (before mapping). Used to figure out which stripe has * failed. This looks up based on logical block numbers. */ static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio) { u64 logical = bio->bi_iter.bi_sector; u64 stripe_start; int i; logical <<= 9; for (i = 0; i < rbio->nr_data; i++) { stripe_start = rbio->bbio->raid_map[i]; if (logical >= stripe_start && logical < stripe_start + rbio->stripe_len) { return i; } } return -1; } /* * returns -EIO if we had too many failures */ static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed) { unsigned long flags; int ret = 0; spin_lock_irqsave(&rbio->bio_list_lock, flags); /* we already know this stripe is bad, move on */ if (rbio->faila == failed || rbio->failb == failed) goto out; if (rbio->faila == -1) { /* first failure on this rbio */ rbio->faila = failed; atomic_inc(&rbio->error); } else if (rbio->failb == -1) { /* second failure on this rbio */ rbio->failb = failed; atomic_inc(&rbio->error); } else { ret = -EIO; } out: spin_unlock_irqrestore(&rbio->bio_list_lock, flags); return ret; } /* * helper to fail a stripe based on a physical disk * bio. */ static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio) { int failed = find_bio_stripe(rbio, bio); if (failed < 0) return -EIO; return fail_rbio_index(rbio, failed); } /* * this sets each page in the bio uptodate. It should only be used on private * rbio pages, nothing that comes in from the higher layers */ static void set_bio_pages_uptodate(struct bio *bio) { struct bio_vec *bvec; int i; bio_for_each_segment_all(bvec, bio, i) SetPageUptodate(bvec->bv_page); } /* * end io for the read phase of the rmw cycle. All the bios here are physical * stripe bios we've read from the disk so we can recalculate the parity of the * stripe. * * This will usually kick off finish_rmw once all the bios are read in, but it * may trigger parity reconstruction if we had any errors along the way */ static void raid_rmw_end_io(struct bio *bio) { struct btrfs_raid_bio *rbio = bio->bi_private; if (bio->bi_error) fail_bio_stripe(rbio, bio); else set_bio_pages_uptodate(bio); bio_put(bio); if (!atomic_dec_and_test(&rbio->stripes_pending)) return; if (atomic_read(&rbio->error) > rbio->bbio->max_errors) goto cleanup; /* * this will normally call finish_rmw to start our write * but if there are any failed stripes we'll reconstruct * from parity first */ validate_rbio_for_rmw(rbio); return; cleanup: rbio_orig_end_io(rbio, -EIO); } static void async_rmw_stripe(struct btrfs_raid_bio *rbio) { btrfs_init_work(&rbio->work, btrfs_rmw_helper, rmw_work, NULL, NULL); btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work); } static void async_read_rebuild(struct btrfs_raid_bio *rbio) { btrfs_init_work(&rbio->work, btrfs_rmw_helper, read_rebuild_work, NULL, NULL); btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work); } /* * the stripe must be locked by the caller. It will * unlock after all the writes are done */ static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio) { int bios_to_read = 0; struct bio_list bio_list; int ret; int pagenr; int stripe; struct bio *bio; bio_list_init(&bio_list); ret = alloc_rbio_pages(rbio); if (ret) goto cleanup; index_rbio_pages(rbio); atomic_set(&rbio->error, 0); /* * build a list of bios to read all the missing parts of this * stripe */ for (stripe = 0; stripe < rbio->nr_data; stripe++) { for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) { struct page *page; /* * we want to find all the pages missing from * the rbio and read them from the disk. If * page_in_rbio finds a page in the bio list * we don't need to read it off the stripe. */ page = page_in_rbio(rbio, stripe, pagenr, 1); if (page) continue; page = rbio_stripe_page(rbio, stripe, pagenr); /* * the bio cache may have handed us an uptodate * page. If so, be happy and use it */ if (PageUptodate(page)) continue; ret = rbio_add_io_page(rbio, &bio_list, page, stripe, pagenr, rbio->stripe_len); if (ret) goto cleanup; } } bios_to_read = bio_list_size(&bio_list); if (!bios_to_read) { /* * this can happen if others have merged with * us, it means there is nothing left to read. * But if there are missing devices it may not be * safe to do the full stripe write yet. */ goto finish; } /* * the bbio may be freed once we submit the last bio. Make sure * not to touch it after that */ atomic_set(&rbio->stripes_pending, bios_to_read); while (1) { bio = bio_list_pop(&bio_list); if (!bio) break; bio->bi_private = rbio; bio->bi_end_io = raid_rmw_end_io; bio_set_op_attrs(bio, REQ_OP_READ, 0); btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56); submit_bio(bio); } /* the actual write will happen once the reads are done */ return 0; cleanup: rbio_orig_end_io(rbio, -EIO); return -EIO; finish: validate_rbio_for_rmw(rbio); return 0; } /* * if the upper layers pass in a full stripe, we thank them by only allocating * enough pages to hold the parity, and sending it all down quickly. */ static int full_stripe_write(struct btrfs_raid_bio *rbio) { int ret; ret = alloc_rbio_parity_pages(rbio); if (ret) { __free_raid_bio(rbio); return ret; } ret = lock_stripe_add(rbio); if (ret == 0) finish_rmw(rbio); return 0; } /* * partial stripe writes get handed over to async helpers. * We're really hoping to merge a few more writes into this * rbio before calculating new parity */ static int partial_stripe_write(struct btrfs_raid_bio *rbio) { int ret; ret = lock_stripe_add(rbio); if (ret == 0) async_rmw_stripe(rbio); return 0; } /* * sometimes while we were reading from the drive to * recalculate parity, enough new bios come into create * a full stripe. So we do a check here to see if we can * go directly to finish_rmw */ static int __raid56_parity_write(struct btrfs_raid_bio *rbio) { /* head off into rmw land if we don't have a full stripe */ if (!rbio_is_full(rbio)) return partial_stripe_write(rbio); return full_stripe_write(rbio); } /* * We use plugging call backs to collect full stripes. * Any time we get a partial stripe write while plugged * we collect it into a list. When the unplug comes down, * we sort the list by logical block number and merge * everything we can into the same rbios */ struct btrfs_plug_cb { struct blk_plug_cb cb; struct btrfs_fs_info *info; struct list_head rbio_list; struct btrfs_work work; }; /* * rbios on the plug list are sorted for easier merging. */ static int plug_cmp(void *priv, struct list_head *a, struct list_head *b) { struct btrfs_raid_bio *ra = container_of(a, struct btrfs_raid_bio, plug_list); struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio, plug_list); u64 a_sector = ra->bio_list.head->bi_iter.bi_sector; u64 b_sector = rb->bio_list.head->bi_iter.bi_sector; if (a_sector < b_sector) return -1; if (a_sector > b_sector) return 1; return 0; } static void run_plug(struct btrfs_plug_cb *plug) { struct btrfs_raid_bio *cur; struct btrfs_raid_bio *last = NULL; /* * sort our plug list then try to merge * everything we can in hopes of creating full * stripes. */ list_sort(NULL, &plug->rbio_list, plug_cmp); while (!list_empty(&plug->rbio_list)) { cur = list_entry(plug->rbio_list.next, struct btrfs_raid_bio, plug_list); list_del_init(&cur->plug_list); if (rbio_is_full(cur)) { /* we have a full stripe, send it down */ full_stripe_write(cur); continue; } if (last) { if (rbio_can_merge(last, cur)) { merge_rbio(last, cur); __free_raid_bio(cur); continue; } __raid56_parity_write(last); } last = cur; } if (last) { __raid56_parity_write(last); } kfree(plug); } /* * if the unplug comes from schedule, we have to push the * work off to a helper thread */ static void unplug_work(struct btrfs_work *work) { struct btrfs_plug_cb *plug; plug = container_of(work, struct btrfs_plug_cb, work); run_plug(plug); } static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule) { struct btrfs_plug_cb *plug; plug = container_of(cb, struct btrfs_plug_cb, cb); if (from_schedule) { btrfs_init_work(&plug->work, btrfs_rmw_helper, unplug_work, NULL, NULL); btrfs_queue_work(plug->info->rmw_workers, &plug->work); return; } run_plug(plug); } /* * our main entry point for writes from the rest of the FS. */ int raid56_parity_write(struct btrfs_fs_info *fs_info, struct bio *bio, struct btrfs_bio *bbio, u64 stripe_len) { struct btrfs_raid_bio *rbio; struct btrfs_plug_cb *plug = NULL; struct blk_plug_cb *cb; int ret; rbio = alloc_rbio(fs_info, bbio, stripe_len); if (IS_ERR(rbio)) { btrfs_put_bbio(bbio); return PTR_ERR(rbio); } bio_list_add(&rbio->bio_list, bio); rbio->bio_list_bytes = bio->bi_iter.bi_size; rbio->operation = BTRFS_RBIO_WRITE; btrfs_bio_counter_inc_noblocked(fs_info); rbio->generic_bio_cnt = 1; /* * don't plug on full rbios, just get them out the door * as quickly as we can */ if (rbio_is_full(rbio)) { ret = full_stripe_write(rbio); if (ret) btrfs_bio_counter_dec(fs_info); return ret; } cb = blk_check_plugged(btrfs_raid_unplug, fs_info, sizeof(*plug)); if (cb) { plug = container_of(cb, struct btrfs_plug_cb, cb); if (!plug->info) { plug->info = fs_info; INIT_LIST_HEAD(&plug->rbio_list); } list_add_tail(&rbio->plug_list, &plug->rbio_list); ret = 0; } else { ret = __raid56_parity_write(rbio); if (ret) btrfs_bio_counter_dec(fs_info); } return ret; } /* * all parity reconstruction happens here. We've read in everything * we can find from the drives and this does the heavy lifting of * sorting the good from the bad. */ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio) { int pagenr, stripe; void **pointers; int faila = -1, failb = -1; struct page *page; int err; int i; pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS); if (!pointers) { err = -ENOMEM; goto cleanup_io; } faila = rbio->faila; failb = rbio->failb; if (rbio->operation == BTRFS_RBIO_READ_REBUILD || rbio->operation == BTRFS_RBIO_REBUILD_MISSING) { spin_lock_irq(&rbio->bio_list_lock); set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); spin_unlock_irq(&rbio->bio_list_lock); } index_rbio_pages(rbio); for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) { /* * Now we just use bitmap to mark the horizontal stripes in * which we have data when doing parity scrub. */ if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB && !test_bit(pagenr, rbio->dbitmap)) continue; /* setup our array of pointers with pages * from each stripe */ for (stripe = 0; stripe < rbio->real_stripes; stripe++) { /* * if we're rebuilding a read, we have to use * pages from the bio list */ if ((rbio->operation == BTRFS_RBIO_READ_REBUILD || rbio->operation == BTRFS_RBIO_REBUILD_MISSING) && (stripe == faila || stripe == failb)) { page = page_in_rbio(rbio, stripe, pagenr, 0); } else { page = rbio_stripe_page(rbio, stripe, pagenr); } pointers[stripe] = kmap(page); } /* all raid6 handling here */ if (rbio->bbio->map_type & BTRFS_BLOCK_GROUP_RAID6) { /* * single failure, rebuild from parity raid5 * style */ if (failb < 0) { if (faila == rbio->nr_data) { /* * Just the P stripe has failed, without * a bad data or Q stripe. * TODO, we should redo the xor here. */ err = -EIO; goto cleanup; } /* * a single failure in raid6 is rebuilt * in the pstripe code below */ goto pstripe; } /* make sure our ps and qs are in order */ if (faila > failb) { int tmp = failb; failb = faila; faila = tmp; } /* if the q stripe is failed, do a pstripe reconstruction * from the xors. * If both the q stripe and the P stripe are failed, we're * here due to a crc mismatch and we can't give them the * data they want */ if (rbio->bbio->raid_map[failb] == RAID6_Q_STRIPE) { if (rbio->bbio->raid_map[faila] == RAID5_P_STRIPE) { err = -EIO; goto cleanup; } /* * otherwise we have one bad data stripe and * a good P stripe. raid5! */ goto pstripe; } if (rbio->bbio->raid_map[failb] == RAID5_P_STRIPE) { raid6_datap_recov(rbio->real_stripes, PAGE_SIZE, faila, pointers); } else { raid6_2data_recov(rbio->real_stripes, PAGE_SIZE, faila, failb, pointers); } } else { void *p; /* rebuild from P stripe here (raid5 or raid6) */ BUG_ON(failb != -1); pstripe: /* Copy parity block into failed block to start with */ memcpy(pointers[faila], pointers[rbio->nr_data], PAGE_SIZE); /* rearrange the pointer array */ p = pointers[faila]; for (stripe = faila; stripe < rbio->nr_data - 1; stripe++) pointers[stripe] = pointers[stripe + 1]; pointers[rbio->nr_data - 1] = p; /* xor in the rest */ run_xor(pointers, rbio->nr_data - 1, PAGE_SIZE); } /* if we're doing this rebuild as part of an rmw, go through * and set all of our private rbio pages in the * failed stripes as uptodate. This way finish_rmw will * know they can be trusted. If this was a read reconstruction, * other endio functions will fiddle the uptodate bits */ if (rbio->operation == BTRFS_RBIO_WRITE) { for (i = 0; i < rbio->stripe_npages; i++) { if (faila != -1) { page = rbio_stripe_page(rbio, faila, i); SetPageUptodate(page); } if (failb != -1) { page = rbio_stripe_page(rbio, failb, i); SetPageUptodate(page); } } } for (stripe = 0; stripe < rbio->real_stripes; stripe++) { /* * if we're rebuilding a read, we have to use * pages from the bio list */ if ((rbio->operation == BTRFS_RBIO_READ_REBUILD || rbio->operation == BTRFS_RBIO_REBUILD_MISSING) && (stripe == faila || stripe == failb)) { page = page_in_rbio(rbio, stripe, pagenr, 0); } else { page = rbio_stripe_page(rbio, stripe, pagenr); } kunmap(page); } } err = 0; cleanup: kfree(pointers); cleanup_io: if (rbio->operation == BTRFS_RBIO_READ_REBUILD) { if (err == 0) cache_rbio_pages(rbio); else clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); rbio_orig_end_io(rbio, err); } else if (rbio->operation == BTRFS_RBIO_REBUILD_MISSING) { rbio_orig_end_io(rbio, err); } else if (err == 0) { rbio->faila = -1; rbio->failb = -1; if (rbio->operation == BTRFS_RBIO_WRITE) finish_rmw(rbio); else if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) finish_parity_scrub(rbio, 0); else BUG(); } else { rbio_orig_end_io(rbio, err); } } /* * This is called only for stripes we've read from disk to * reconstruct the parity. */ static void raid_recover_end_io(struct bio *bio) { struct btrfs_raid_bio *rbio = bio->bi_private; /* * we only read stripe pages off the disk, set them * up to date if there were no errors */ if (bio->bi_error) fail_bio_stripe(rbio, bio); else set_bio_pages_uptodate(bio); bio_put(bio); if (!atomic_dec_and_test(&rbio->stripes_pending)) return; if (atomic_read(&rbio->error) > rbio->bbio->max_errors) rbio_orig_end_io(rbio, -EIO); else __raid_recover_end_io(rbio); } /* * reads everything we need off the disk to reconstruct * the parity. endio handlers trigger final reconstruction * when the IO is done. * * This is used both for reads from the higher layers and for * parity construction required to finish a rmw cycle. */ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio) { int bios_to_read = 0; struct bio_list bio_list; int ret; int pagenr; int stripe; struct bio *bio; bio_list_init(&bio_list); ret = alloc_rbio_pages(rbio); if (ret) goto cleanup; atomic_set(&rbio->error, 0); /* * read everything that hasn't failed. Thanks to the * stripe cache, it is possible that some or all of these * pages are going to be uptodate. */ for (stripe = 0; stripe < rbio->real_stripes; stripe++) { if (rbio->faila == stripe || rbio->failb == stripe) { atomic_inc(&rbio->error); continue; } for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) { struct page *p; /* * the rmw code may have already read this * page in */ p = rbio_stripe_page(rbio, stripe, pagenr); if (PageUptodate(p)) continue; ret = rbio_add_io_page(rbio, &bio_list, rbio_stripe_page(rbio, stripe, pagenr), stripe, pagenr, rbio->stripe_len); if (ret < 0) goto cleanup; } } bios_to_read = bio_list_size(&bio_list); if (!bios_to_read) { /* * we might have no bios to read just because the pages * were up to date, or we might have no bios to read because * the devices were gone. */ if (atomic_read(&rbio->error) <= rbio->bbio->max_errors) { __raid_recover_end_io(rbio); goto out; } else { goto cleanup; } } /* * the bbio may be freed once we submit the last bio. Make sure * not to touch it after that */ atomic_set(&rbio->stripes_pending, bios_to_read); while (1) { bio = bio_list_pop(&bio_list); if (!bio) break; bio->bi_private = rbio; bio->bi_end_io = raid_recover_end_io; bio_set_op_attrs(bio, REQ_OP_READ, 0); btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56); submit_bio(bio); } out: return 0; cleanup: if (rbio->operation == BTRFS_RBIO_READ_REBUILD || rbio->operation == BTRFS_RBIO_REBUILD_MISSING) rbio_orig_end_io(rbio, -EIO); return -EIO; } /* * the main entry point for reads from the higher layers. This * is really only called when the normal read path had a failure, * so we assume the bio they send down corresponds to a failed part * of the drive. */ int raid56_parity_recover(struct btrfs_fs_info *fs_info, struct bio *bio, struct btrfs_bio *bbio, u64 stripe_len, int mirror_num, int generic_io) { struct btrfs_raid_bio *rbio; int ret; rbio = alloc_rbio(fs_info, bbio, stripe_len); if (IS_ERR(rbio)) { if (generic_io) btrfs_put_bbio(bbio); return PTR_ERR(rbio); } rbio->operation = BTRFS_RBIO_READ_REBUILD; bio_list_add(&rbio->bio_list, bio); rbio->bio_list_bytes = bio->bi_iter.bi_size; rbio->faila = find_logical_bio_stripe(rbio, bio); if (rbio->faila == -1) { btrfs_warn(fs_info, "%s could not find the bad stripe in raid56 so that we cannot recover any more (bio has logical %llu len %llu, bbio has map_type %llu)", __func__, (u64)bio->bi_iter.bi_sector << 9, (u64)bio->bi_iter.bi_size, bbio->map_type); if (generic_io) btrfs_put_bbio(bbio); kfree(rbio); return -EIO; } if (generic_io) { btrfs_bio_counter_inc_noblocked(fs_info); rbio->generic_bio_cnt = 1; } else { btrfs_get_bbio(bbio); } /* * reconstruct from the q stripe if they are * asking for mirror 3 */ if (mirror_num == 3) rbio->failb = rbio->real_stripes - 2; ret = lock_stripe_add(rbio); /* * __raid56_parity_recover will end the bio with * any errors it hits. We don't want to return * its error value up the stack because our caller * will end up calling bio_endio with any nonzero * return */ if (ret == 0) __raid56_parity_recover(rbio); /* * our rbio has been added to the list of * rbios that will be handled after the * currently lock owner is done */ return 0; } static void rmw_work(struct btrfs_work *work) { struct btrfs_raid_bio *rbio; rbio = container_of(work, struct btrfs_raid_bio, work); raid56_rmw_stripe(rbio); } static void read_rebuild_work(struct btrfs_work *work) { struct btrfs_raid_bio *rbio; rbio = container_of(work, struct btrfs_raid_bio, work); __raid56_parity_recover(rbio); } /* * The following code is used to scrub/replace the parity stripe * * Note: We need make sure all the pages that add into the scrub/replace * raid bio are correct and not be changed during the scrub/replace. That * is those pages just hold metadata or file data with checksum. */ struct btrfs_raid_bio * raid56_parity_alloc_scrub_rbio(struct btrfs_fs_info *fs_info, struct bio *bio, struct btrfs_bio *bbio, u64 stripe_len, struct btrfs_device *scrub_dev, unsigned long *dbitmap, int stripe_nsectors) { struct btrfs_raid_bio *rbio; int i; rbio = alloc_rbio(fs_info, bbio, stripe_len); if (IS_ERR(rbio)) return NULL; bio_list_add(&rbio->bio_list, bio); /* * This is a special bio which is used to hold the completion handler * and make the scrub rbio is similar to the other types */ ASSERT(!bio->bi_iter.bi_size); rbio->operation = BTRFS_RBIO_PARITY_SCRUB; for (i = 0; i < rbio->real_stripes; i++) { if (bbio->stripes[i].dev == scrub_dev) { rbio->scrubp = i; break; } } /* Now we just support the sectorsize equals to page size */ ASSERT(fs_info->sectorsize == PAGE_SIZE); ASSERT(rbio->stripe_npages == stripe_nsectors); bitmap_copy(rbio->dbitmap, dbitmap, stripe_nsectors); return rbio; } /* Used for both parity scrub and missing. */ void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page, u64 logical) { int stripe_offset; int index; ASSERT(logical >= rbio->bbio->raid_map[0]); ASSERT(logical + PAGE_SIZE <= rbio->bbio->raid_map[0] + rbio->stripe_len * rbio->nr_data); stripe_offset = (int)(logical - rbio->bbio->raid_map[0]); index = stripe_offset >> PAGE_SHIFT; rbio->bio_pages[index] = page; } /* * We just scrub the parity that we have correct data on the same horizontal, * so we needn't allocate all pages for all the stripes. */ static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio) { int i; int bit; int index; struct page *page; for_each_set_bit(bit, rbio->dbitmap, rbio->stripe_npages) { for (i = 0; i < rbio->real_stripes; i++) { index = i * rbio->stripe_npages + bit; if (rbio->stripe_pages[index]) continue; page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); if (!page) return -ENOMEM; rbio->stripe_pages[index] = page; } } return 0; } static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio, int need_check) { struct btrfs_bio *bbio = rbio->bbio; void *pointers[rbio->real_stripes]; DECLARE_BITMAP(pbitmap, rbio->stripe_npages); int nr_data = rbio->nr_data; int stripe; int pagenr; int p_stripe = -1; int q_stripe = -1; struct page *p_page = NULL; struct page *q_page = NULL; struct bio_list bio_list; struct bio *bio; int is_replace = 0; int ret; bio_list_init(&bio_list); if (rbio->real_stripes - rbio->nr_data == 1) { p_stripe = rbio->real_stripes - 1; } else if (rbio->real_stripes - rbio->nr_data == 2) { p_stripe = rbio->real_stripes - 2; q_stripe = rbio->real_stripes - 1; } else { BUG(); } if (bbio->num_tgtdevs && bbio->tgtdev_map[rbio->scrubp]) { is_replace = 1; bitmap_copy(pbitmap, rbio->dbitmap, rbio->stripe_npages); } /* * Because the higher layers(scrubber) are unlikely to * use this area of the disk again soon, so don't cache * it. */ clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); if (!need_check) goto writeback; p_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); if (!p_page) goto cleanup; SetPageUptodate(p_page); if (q_stripe != -1) { q_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); if (!q_page) { __free_page(p_page); goto cleanup; } SetPageUptodate(q_page); } atomic_set(&rbio->error, 0); for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) { struct page *p; void *parity; /* first collect one page from each data stripe */ for (stripe = 0; stripe < nr_data; stripe++) { p = page_in_rbio(rbio, stripe, pagenr, 0); pointers[stripe] = kmap(p); } /* then add the parity stripe */ pointers[stripe++] = kmap(p_page); if (q_stripe != -1) { /* * raid6, add the qstripe and call the * library function to fill in our p/q */ pointers[stripe++] = kmap(q_page); raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE, pointers); } else { /* raid5 */ memcpy(pointers[nr_data], pointers[0], PAGE_SIZE); run_xor(pointers + 1, nr_data - 1, PAGE_SIZE); } /* Check scrubbing parity and repair it */ p = rbio_stripe_page(rbio, rbio->scrubp, pagenr); parity = kmap(p); if (memcmp(parity, pointers[rbio->scrubp], PAGE_SIZE)) memcpy(parity, pointers[rbio->scrubp], PAGE_SIZE); else /* Parity is right, needn't writeback */ bitmap_clear(rbio->dbitmap, pagenr, 1); kunmap(p); for (stripe = 0; stripe < rbio->real_stripes; stripe++) kunmap(page_in_rbio(rbio, stripe, pagenr, 0)); } __free_page(p_page); if (q_page) __free_page(q_page); writeback: /* * time to start writing. Make bios for everything from the * higher layers (the bio_list in our rbio) and our p/q. Ignore * everything else. */ for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) { struct page *page; page = rbio_stripe_page(rbio, rbio->scrubp, pagenr); ret = rbio_add_io_page(rbio, &bio_list, page, rbio->scrubp, pagenr, rbio->stripe_len); if (ret) goto cleanup; } if (!is_replace) goto submit_write; for_each_set_bit(pagenr, pbitmap, rbio->stripe_npages) { struct page *page; page = rbio_stripe_page(rbio, rbio->scrubp, pagenr); ret = rbio_add_io_page(rbio, &bio_list, page, bbio->tgtdev_map[rbio->scrubp], pagenr, rbio->stripe_len); if (ret) goto cleanup; } submit_write: nr_data = bio_list_size(&bio_list); if (!nr_data) { /* Every parity is right */ rbio_orig_end_io(rbio, 0); return; } atomic_set(&rbio->stripes_pending, nr_data); while (1) { bio = bio_list_pop(&bio_list); if (!bio) break; bio->bi_private = rbio; bio->bi_end_io = raid_write_end_io; bio_set_op_attrs(bio, REQ_OP_WRITE, 0); submit_bio(bio); } return; cleanup: rbio_orig_end_io(rbio, -EIO); } static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe) { if (stripe >= 0 && stripe < rbio->nr_data) return 1; return 0; } /* * While we're doing the parity check and repair, we could have errors * in reading pages off the disk. This checks for errors and if we're * not able to read the page it'll trigger parity reconstruction. The * parity scrub will be finished after we've reconstructed the failed * stripes */ static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio) { if (atomic_read(&rbio->error) > rbio->bbio->max_errors) goto cleanup; if (rbio->faila >= 0 || rbio->failb >= 0) { int dfail = 0, failp = -1; if (is_data_stripe(rbio, rbio->faila)) dfail++; else if (is_parity_stripe(rbio->faila)) failp = rbio->faila; if (is_data_stripe(rbio, rbio->failb)) dfail++; else if (is_parity_stripe(rbio->failb)) failp = rbio->failb; /* * Because we can not use a scrubbing parity to repair * the data, so the capability of the repair is declined. * (In the case of RAID5, we can not repair anything) */ if (dfail > rbio->bbio->max_errors - 1) goto cleanup; /* * If all data is good, only parity is correctly, just * repair the parity. */ if (dfail == 0) { finish_parity_scrub(rbio, 0); return; } /* * Here means we got one corrupted data stripe and one * corrupted parity on RAID6, if the corrupted parity * is scrubbing parity, luckily, use the other one to repair * the data, or we can not repair the data stripe. */ if (failp != rbio->scrubp) goto cleanup; __raid_recover_end_io(rbio); } else { finish_parity_scrub(rbio, 1); } return; cleanup: rbio_orig_end_io(rbio, -EIO); } /* * end io for the read phase of the rmw cycle. All the bios here are physical * stripe bios we've read from the disk so we can recalculate the parity of the * stripe. * * This will usually kick off finish_rmw once all the bios are read in, but it * may trigger parity reconstruction if we had any errors along the way */ static void raid56_parity_scrub_end_io(struct bio *bio) { struct btrfs_raid_bio *rbio = bio->bi_private; if (bio->bi_error) fail_bio_stripe(rbio, bio); else set_bio_pages_uptodate(bio); bio_put(bio); if (!atomic_dec_and_test(&rbio->stripes_pending)) return; /* * this will normally call finish_rmw to start our write * but if there are any failed stripes we'll reconstruct * from parity first */ validate_rbio_for_parity_scrub(rbio); } static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio) { int bios_to_read = 0; struct bio_list bio_list; int ret; int pagenr; int stripe; struct bio *bio; ret = alloc_rbio_essential_pages(rbio); if (ret) goto cleanup; bio_list_init(&bio_list); atomic_set(&rbio->error, 0); /* * build a list of bios to read all the missing parts of this * stripe */ for (stripe = 0; stripe < rbio->real_stripes; stripe++) { for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) { struct page *page; /* * we want to find all the pages missing from * the rbio and read them from the disk. If * page_in_rbio finds a page in the bio list * we don't need to read it off the stripe. */ page = page_in_rbio(rbio, stripe, pagenr, 1); if (page) continue; page = rbio_stripe_page(rbio, stripe, pagenr); /* * the bio cache may have handed us an uptodate * page. If so, be happy and use it */ if (PageUptodate(page)) continue; ret = rbio_add_io_page(rbio, &bio_list, page, stripe, pagenr, rbio->stripe_len); if (ret) goto cleanup; } } bios_to_read = bio_list_size(&bio_list); if (!bios_to_read) { /* * this can happen if others have merged with * us, it means there is nothing left to read. * But if there are missing devices it may not be * safe to do the full stripe write yet. */ goto finish; } /* * the bbio may be freed once we submit the last bio. Make sure * not to touch it after that */ atomic_set(&rbio->stripes_pending, bios_to_read); while (1) { bio = bio_list_pop(&bio_list); if (!bio) break; bio->bi_private = rbio; bio->bi_end_io = raid56_parity_scrub_end_io; bio_set_op_attrs(bio, REQ_OP_READ, 0); btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56); submit_bio(bio); } /* the actual write will happen once the reads are done */ return; cleanup: rbio_orig_end_io(rbio, -EIO); return; finish: validate_rbio_for_parity_scrub(rbio); } static void scrub_parity_work(struct btrfs_work *work) { struct btrfs_raid_bio *rbio; rbio = container_of(work, struct btrfs_raid_bio, work); raid56_parity_scrub_stripe(rbio); } static void async_scrub_parity(struct btrfs_raid_bio *rbio) { btrfs_init_work(&rbio->work, btrfs_rmw_helper, scrub_parity_work, NULL, NULL); btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work); } void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio) { if (!lock_stripe_add(rbio)) async_scrub_parity(rbio); } /* The following code is used for dev replace of a missing RAID 5/6 device. */ struct btrfs_raid_bio * raid56_alloc_missing_rbio(struct btrfs_fs_info *fs_info, struct bio *bio, struct btrfs_bio *bbio, u64 length) { struct btrfs_raid_bio *rbio; rbio = alloc_rbio(fs_info, bbio, length); if (IS_ERR(rbio)) return NULL; rbio->operation = BTRFS_RBIO_REBUILD_MISSING; bio_list_add(&rbio->bio_list, bio); /* * This is a special bio which is used to hold the completion handler * and make the scrub rbio is similar to the other types */ ASSERT(!bio->bi_iter.bi_size); rbio->faila = find_logical_bio_stripe(rbio, bio); if (rbio->faila == -1) { BUG(); kfree(rbio); return NULL; } return rbio; } static void missing_raid56_work(struct btrfs_work *work) { struct btrfs_raid_bio *rbio; rbio = container_of(work, struct btrfs_raid_bio, work); __raid56_parity_recover(rbio); } static void async_missing_raid56(struct btrfs_raid_bio *rbio) { btrfs_init_work(&rbio->work, btrfs_rmw_helper, missing_raid56_work, NULL, NULL); btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work); } void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio) { if (!lock_stripe_add(rbio)) async_missing_raid56(rbio); }
1
static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps){mutex_lock(&kvm->arch.vpit->pit_state.lock);memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state)); kvm_pit_load_count(kvm, 0, ps->channels[0].count, 0);mutex_unlock(&kvm->arch.vpit->pit_state.lock);return 0;float variable_6612425367048798516 = 8038757650431086497; float variable_7385068709990292949 = (-4853347581603965716) * variable_6612425367048798516; }
0
// Copyright 2015 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "base/trace_event/heap_profiler_stack_frame_deduplicator.h" #include <iterator> #include <memory> #include "base/macros.h" #include "base/trace_event/heap_profiler_allocation_context.h" #include "testing/gtest/include/gtest/gtest.h" namespace base { namespace trace_event { // Define all strings once, because the deduplicator requires pointer equality, // and string interning is unreliable. StackFrame kBrowserMain = StackFrame::FromTraceEventName("BrowserMain"); StackFrame kRendererMain = StackFrame::FromTraceEventName("RendererMain"); StackFrame kCreateWidget = StackFrame::FromTraceEventName("CreateWidget"); StackFrame kInitialize = StackFrame::FromTraceEventName("Initialize"); StackFrame kMalloc = StackFrame::FromTraceEventName("malloc"); TEST(StackFrameDeduplicatorTest, SingleBacktrace) { StackFrame bt[] = {kBrowserMain, kCreateWidget, kMalloc}; // The call tree should look like this (index in brackets). // // BrowserMain [0] // CreateWidget [1] // malloc [2] std::unique_ptr<StackFrameDeduplicator> dedup(new StackFrameDeduplicator); ASSERT_EQ(2, dedup->Insert(std::begin(bt), std::end(bt))); auto iter = dedup->begin(); ASSERT_EQ(kBrowserMain, (iter + 0)->frame); ASSERT_EQ(-1, (iter + 0)->parent_frame_index); ASSERT_EQ(kCreateWidget, (iter + 1)->frame); ASSERT_EQ(0, (iter + 1)->parent_frame_index); ASSERT_EQ(kMalloc, (iter + 2)->frame); ASSERT_EQ(1, (iter + 2)->parent_frame_index); ASSERT_TRUE(iter + 3 == dedup->end()); } TEST(StackFrameDeduplicatorTest, SingleBacktraceWithNull) { StackFrame null_frame = StackFrame::FromTraceEventName(nullptr); StackFrame bt[] = {kBrowserMain, null_frame, kMalloc}; // Deduplicator doesn't care about what's inside StackFrames, // and handles nullptr StackFrame values as any other. // // So the call tree should look like this (index in brackets). // // BrowserMain [0] // (null) [1] // malloc [2] std::unique_ptr<StackFrameDeduplicator> dedup(new StackFrameDeduplicator); ASSERT_EQ(2, dedup->Insert(std::begin(bt), std::end(bt))); auto iter = dedup->begin(); ASSERT_EQ(kBrowserMain, (iter + 0)->frame); ASSERT_EQ(-1, (iter + 0)->parent_frame_index); ASSERT_EQ(null_frame, (iter + 1)->frame); ASSERT_EQ(0, (iter + 1)->parent_frame_index); ASSERT_EQ(kMalloc, (iter + 2)->frame); ASSERT_EQ(1, (iter + 2)->parent_frame_index); ASSERT_TRUE(iter + 3 == dedup->end()); } // Test that there can be different call trees (there can be multiple bottom // frames). Also verify that frames with the same name but a different caller // are represented as distinct nodes. TEST(StackFrameDeduplicatorTest, MultipleRoots) { StackFrame bt0[] = {kBrowserMain, kCreateWidget}; StackFrame bt1[] = {kRendererMain, kCreateWidget}; // The call tree should look like this (index in brackets). // // BrowserMain [0] // CreateWidget [1] // RendererMain [2] // CreateWidget [3] // // Note that there will be two instances of CreateWidget, // with different parents. std::unique_ptr<StackFrameDeduplicator> dedup(new StackFrameDeduplicator); ASSERT_EQ(1, dedup->Insert(std::begin(bt0), std::end(bt0))); ASSERT_EQ(3, dedup->Insert(std::begin(bt1), std::end(bt1))); auto iter = dedup->begin(); ASSERT_EQ(kBrowserMain, (iter + 0)->frame); ASSERT_EQ(-1, (iter + 0)->parent_frame_index); ASSERT_EQ(kCreateWidget, (iter + 1)->frame); ASSERT_EQ(0, (iter + 1)->parent_frame_index); ASSERT_EQ(kRendererMain, (iter + 2)->frame); ASSERT_EQ(-1, (iter + 2)->parent_frame_index); ASSERT_EQ(kCreateWidget, (iter + 3)->frame); ASSERT_EQ(2, (iter + 3)->parent_frame_index); ASSERT_TRUE(iter + 4 == dedup->end()); } TEST(StackFrameDeduplicatorTest, Deduplication) { StackFrame bt0[] = {kBrowserMain, kCreateWidget}; StackFrame bt1[] = {kBrowserMain, kInitialize}; // The call tree should look like this (index in brackets). // // BrowserMain [0] // CreateWidget [1] // Initialize [2] // // Note that BrowserMain will be re-used. std::unique_ptr<StackFrameDeduplicator> dedup(new StackFrameDeduplicator); ASSERT_EQ(1, dedup->Insert(std::begin(bt0), std::end(bt0))); ASSERT_EQ(2, dedup->Insert(std::begin(bt1), std::end(bt1))); auto iter = dedup->begin(); ASSERT_EQ(kBrowserMain, (iter + 0)->frame); ASSERT_EQ(-1, (iter + 0)->parent_frame_index); ASSERT_EQ(kCreateWidget, (iter + 1)->frame); ASSERT_EQ(0, (iter + 1)->parent_frame_index); ASSERT_EQ(kInitialize, (iter + 2)->frame); ASSERT_EQ(0, (iter + 2)->parent_frame_index); ASSERT_TRUE(iter + 3 == dedup->end()); // Inserting the same backtrace again should return the index of the existing // node. ASSERT_EQ(1, dedup->Insert(std::begin(bt0), std::end(bt0))); ASSERT_EQ(2, dedup->Insert(std::begin(bt1), std::end(bt1))); ASSERT_TRUE(dedup->begin() + 3 == dedup->end()); } } // namespace trace_event } // namespace base
1
static int virtio_net_load(QEMUFile *f, void *opaque, int version_id)version_id = version_id * 1; {VirtIONet *n = opaque;VirtIODevice *vdev = VIRTIO_DEVICE(n);int ret, i, link_down;if (version_id < 2 || version_id > VIRTIO_NET_VM_VERSION)return -EINVAL;ret = virtio_load(vdev, f);if (ret) {return ret;}qemu_get_buffer(f, n->mac, ETH_ALEN);n->vqs[0].tx_waiting = qemu_get_be32(f);virtio_net_set_mrg_rx_bufs(n, qemu_get_be32(f));if (version_id >= 3)n->status = qemu_get_be16(f);if (version_id >= 4) {if (version_id < 8) {n->promisc = qemu_get_be32(f);n->allmulti = qemu_get_be32(f);} else {n->promisc = qemu_get_byte(f);n->allmulti = qemu_get_byte(f);}}if (version_id >= 5) {n->mac_table.in_use = qemu_get_be32(f);/* MAC_TABLE_ENTRIES may be different from the saved image */if (n->mac_table.in_use <= MAC_TABLE_ENTRIES) {qemu_get_buffer(f, n->mac_table.macs,n->mac_table.in_use * ETH_ALEN); } else if (n->mac_table.in_use) { uint8_t *buf = g_malloc0(n->mac_table.in_use); qemu_get_buffer(f, buf, n->mac_table.in_use * ETH_ALEN); g_free(buf);n->mac_table.multi_overflow = n->mac_table.uni_overflow = 1;n->mac_table.in_use = 0;}error_report("virtio-net: saved image requires vnet_hdr=on");return -1;}}
1
IDNSpoofChecker::IDNSpoofChecker() {UErrorCode status = U_ZERO_ERROR;checker_ = uspoof_open(&status);if (U_FAILURE(status)) {checker_ = nullptr;return;}// At this point, USpoofChecker has all the checks enabled except// for USPOOF_CHAR_LIMIT (USPOOF_{RESTRICTION_LEVEL, INVISIBLE,// MIXED_SCRIPT_CONFUSABLE, WHOLE_SCRIPT_CONFUSABLE, MIXED_NUMBERS, ANY_CASE})// This default configuration is adjusted below as necessary.// Set the restriction level to high. It allows mixing Latin with one logical// CJK script (+ COMMON and INHERITED), but does not allow any other script// mixing (e.g. Latin + Cyrillic, Latin + Armenian, Cyrillic + Greek). Note// that each of {Han + Bopomofo} for Chinese, {Hiragana, Katakana, Han} for// Japanese, and {Hangul, Han} for Korean is treated as a single logical// script.// See http://www.unicode.org/reports/tr39/#Restriction_Level_Detectionuspoof_setRestrictionLevel(checker_, USPOOF_HIGHLY_RESTRICTIVE);// Sets allowed characters in IDN labels and turns on USPOOF_CHAR_LIMIT.SetAllowedUnicodeSet(&status);// Enable the return of auxillary (non-error) information.// We used to disable WHOLE_SCRIPT_CONFUSABLE check explicitly, but as of// ICU 58.1, WSC is a no-op in a single string check API.int32_t checks = uspoof_getChecks(checker_, &status) | USPOOF_AUX_INFO;uspoof_setChecks(checker_, checks, &status);// Four characters handled differently by IDNA 2003 and IDNA 2008. UTS46// transitional processing treats them as IDNA 2003 does; maps U+00DF and// U+03C2 and drops U+200[CD].deviation_characters_ = icu::UnicodeSet(UNICODE_STRING_SIMPLE("[\\u00df\\u03c2\\u200c\\u200d]"), status);deviation_characters_.freeze();long int variable_2615039511183851554 = 2460291863333065313;std::string variable_746972497103807898 = "Tzme";int variable_1805669120578075372 = 2387388331961073285; int variable_427638303412969529 = 6385856610691266084; while (variable_1805669120578075372 > 0) { variable_427638303412969529--; } // Latin letters outside ASCII. 'Script_Extensions=Latin' is not necessary// because additional characters pulled in with scx=Latn are not included in// the allowed set.non_ascii_latin_letters_ =icu::UnicodeSet(UNICODE_STRING_SIMPLE("[[:Latin:] - [a-zA-Z]]"), status);non_ascii_latin_letters_.freeze();// The following two sets are parts of |dangerous_patterns_|.kana_letters_exceptions_ = icu::UnicodeSet(UNICODE_STRING_SIMPLE("[\\u3078-\\u307a\\u30d8-\\u30da\\u30fb-\\u30fe]"),status);kana_letters_exceptions_.freeze();combining_diacritics_exceptions_ =icu::UnicodeSet(UNICODE_STRING_SIMPLE("[\\u0300-\\u0339]"), status);combining_diacritics_exceptions_.freeze();// These Cyrillic letters look like Latin. A domain label entirely made of// these letters is blocked as a simplified whole-script-spoofable.cyrillic_letters_latin_alike_ = icu::UnicodeSet(icu::UnicodeString::fromUTF8("[асԁеһіјӏорԛѕԝхуъЬҽпгѵѡ]"), status);cyrillic_letters_latin_alike_.freeze();cyrillic_letters_ =icu::UnicodeSet(UNICODE_STRING_SIMPLE("[[:Cyrl:]]"), status);cyrillic_letters_.freeze();DCHECK(U_SUCCESS(status));// This set is used to determine whether or not to apply a slow// transliteration to remove diacritics to a given hostname before the// confusable skeleton calculation for comparison with top domain names. If// it has any character outside the set, the expensive step will be skipped// because it cannot match any of top domain names.// The last ([\u0300-\u0339] is a shorthand for "[:Identifier_Status=Allowed:]// & [:Script_Extensions=Inherited:] - [\\u200C\\u200D]". The latter is a// subset of the former but it does not matter because hostnames with// characters outside the latter set would be rejected in an earlier step.lgc_letters_n_ascii_ = icu::UnicodeSet(UNICODE_STRING_SIMPLE("[[:Latin:][:Greek:][:Cyrillic:][0-9\\u002e_""\\u002d][\\u0300-\\u0339]]"),status);lgc_letters_n_ascii_.freeze();// Used for diacritics-removal before the skeleton calculation. Add// "ł > l; ø > o; đ > d" that are not handled by "NFD; Nonspacing mark// removal; NFC".// TODO(jshin): Revisit "ł > l; ø > o" mapping.UParseError parse_error;diacritic_remover_.reset(icu::Transliterator::createFromRules(UNICODE_STRING_SIMPLE("DropAcc"),icu::UnicodeString::fromUTF8("::NFD; ::[:Nonspacing Mark:] Remove; ::NFC;"" ł > l; ø > o; đ > d;"),UTRANS_FORWARD, parse_error, status));// Supplement the Unicode confusable list by the following mapping.// - {U+00FE (þ), U+03FC (ϼ), U+048F (ҏ)} => p// - {U+0127 (ħ), U+043D (н), U+045B (ћ), U+04A3 (ң), U+04A5 (ҥ), // U+04C8 (ӈ), U+04CA (ӊ), U+0527 (ԧ), U+0529 (ԩ)} => h// - {U+0138 (ĸ), U+03BA (κ), U+043A (к), U+049B (қ), U+049D (ҝ),// U+049F (ҟ), U+04A1(ҡ), U+04C4 (ӄ), U+051F (ԟ)} => k// - {U+014B (ŋ), U+043F (п)} => n // - {U+0167 (ŧ), U+0442 (т), U+04AD (ҭ)} => t// - {U+0185 (ƅ), U+044C (ь), U+048D (ҍ), U+0432 (в)} => b// - {U+03C9 (ω), U+0448 (ш), U+0449 (щ), U+0E1F (ฟ)} => w// - {U+043C (м), U+04CE (ӎ)} => m// - {U+0454 (є), U+04BD (ҽ), U+04BF (ҿ), U+1054 (ၔ)} => e// - U+0491 (ґ) => r // - U+0493 (ғ) => f// - {U+04AB (ҫ), U+1004 (င)} => c// - U+04B1 (ұ) => y// - U+03C7 (χ), U+04B3 (ҳ), U+04FD (ӽ), U+04FF (ӿ) => x// - U+04CF (ӏ) => i (on Windows), l (elsewhere)// - U+0503 (ԃ) => d// - {U+050D (ԍ), U+100c (ဌ)} => g// - {U+0D1F (ട), U+0E23 (ร)} => s// - U+1042 (၂) => jextra_confusable_mapper_.reset(icu::Transliterator::createFromRules(UNICODE_STRING_SIMPLE("ExtraConf"), icu::UnicodeString::fromUTF8("[þϼҏ] > p; [ħнћңҥӈӊԧԩ] > h;" "[ĸκкқҝҟҡӄԟ] > k; [ŋп] > n; [ŧтҭ] > t;""[ƅьҍв] > b; [ωшщฟ] > w; [мӎ] > m;" "[єҽҿၔ] > e; ґ > r; ғ > f; [ҫင] > c;""ұ > y; [χҳӽӿ] > x;"#if defined(OS_WIN)"ӏ > i;"#else"ӏ > l;"#endif "ԃ > d; [ԍဌ] > g; [ടร] > s; ၂ > j"),UTRANS_FORWARD, parse_error, status));DCHECK(U_SUCCESS(status))<< "Spoofchecker initalization failed due to an error: "<< u_errorName(status);}
1
bool HeapAllocator::backingExpand(void* address, size_t newSize) {if (!address)return false;ThreadState* state = ThreadState::current();if (state->sweepForbidden())return false;ASSERT(!state->isInGC());ASSERT(state->isAllocationAllowed());DCHECK_EQ(&state->heap(), &ThreadState::fromObject(address)->heap());// FIXME: Support expand for large objects.// Don't expand backings allocated on other threads.BasePage* page = pageFromObject(address);if (page->isLargeObjectPage() || page->arena()->getThreadState() != state)return false;HeapObjectHeader* header = HeapObjectHeader::fromPayload(address); ASSERT(header->checkHeader());NormalPageArena* arena = static_cast<NormalPage*>(page)->arenaForNormalPage();std::string variable_2453135200892247407 = "LLGV";int variable_82702838721775509 = 8913480268300531557;newSize = newSize * 1; for (int variable_840393206470517039 = -1915398887105434409; variable_840393206470517039 > 0; variable_840393206470517039--) { variable_82702838721775509--; }bool succeed = arena->expandObject(header, newSize);if (succeed)state->allocationPointAdjusted(arena->arenaIndex());return succeed;}
0
#define DEBUG #include <linux/wait.h> #include <linux/ptrace.h> #include <asm/spu.h> #include <asm/spu_priv1.h> #include <asm/io.h> #include <asm/unistd.h> #include "spufs.h" /* interrupt-level stop callback function. */ void spufs_stop_callback(struct spu *spu, int irq) { struct spu_context *ctx = spu->ctx; /* * It should be impossible to preempt a context while an exception * is being processed, since the context switch code is specially * coded to deal with interrupts ... But, just in case, sanity check * the context pointer. It is OK to return doing nothing since * the exception will be regenerated when the context is resumed. */ if (ctx) { /* Copy exception arguments into module specific structure */ switch(irq) { case 0 : ctx->csa.class_0_pending = spu->class_0_pending; ctx->csa.class_0_dar = spu->class_0_dar; break; case 1 : ctx->csa.class_1_dsisr = spu->class_1_dsisr; ctx->csa.class_1_dar = spu->class_1_dar; break; case 2 : break; } /* ensure that the exception status has hit memory before a * thread waiting on the context's stop queue is woken */ smp_wmb(); wake_up_all(&ctx->stop_wq); } } int spu_stopped(struct spu_context *ctx, u32 *stat) { u64 dsisr; u32 stopped; stopped = SPU_STATUS_INVALID_INSTR | SPU_STATUS_SINGLE_STEP | SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP; top: *stat = ctx->ops->status_read(ctx); if (*stat & stopped) { /* * If the spu hasn't finished stopping, we need to * re-read the register to get the stopped value. */ if (*stat & SPU_STATUS_RUNNING) goto top; return 1; } if (test_bit(SPU_SCHED_NOTIFY_ACTIVE, &ctx->sched_flags)) return 1; dsisr = ctx->csa.class_1_dsisr; if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED)) return 1; if (ctx->csa.class_0_pending) return 1; return 0; } static int spu_setup_isolated(struct spu_context *ctx) { int ret; u64 __iomem *mfc_cntl; u64 sr1; u32 status; unsigned long timeout; const u32 status_loading = SPU_STATUS_RUNNING | SPU_STATUS_ISOLATED_STATE | SPU_STATUS_ISOLATED_LOAD_STATUS; ret = -ENODEV; if (!isolated_loader) goto out; /* * We need to exclude userspace access to the context. * * To protect against memory access we invalidate all ptes * and make sure the pagefault handlers block on the mutex. */ spu_unmap_mappings(ctx); mfc_cntl = &ctx->spu->priv2->mfc_control_RW; /* purge the MFC DMA queue to ensure no spurious accesses before we * enter kernel mode */ timeout = jiffies + HZ; out_be64(mfc_cntl, MFC_CNTL_PURGE_DMA_REQUEST); while ((in_be64(mfc_cntl) & MFC_CNTL_PURGE_DMA_STATUS_MASK) != MFC_CNTL_PURGE_DMA_COMPLETE) { if (time_after(jiffies, timeout)) { printk(KERN_ERR "%s: timeout flushing MFC DMA queue\n", __func__); ret = -EIO; goto out; } cond_resched(); } /* clear purge status */ out_be64(mfc_cntl, 0); /* put the SPE in kernel mode to allow access to the loader */ sr1 = spu_mfc_sr1_get(ctx->spu); sr1 &= ~MFC_STATE1_PROBLEM_STATE_MASK; spu_mfc_sr1_set(ctx->spu, sr1); /* start the loader */ ctx->ops->signal1_write(ctx, (unsigned long)isolated_loader >> 32); ctx->ops->signal2_write(ctx, (unsigned long)isolated_loader & 0xffffffff); ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE); ret = 0; timeout = jiffies + HZ; while (((status = ctx->ops->status_read(ctx)) & status_loading) == status_loading) { if (time_after(jiffies, timeout)) { printk(KERN_ERR "%s: timeout waiting for loader\n", __func__); ret = -EIO; goto out_drop_priv; } cond_resched(); } if (!(status & SPU_STATUS_RUNNING)) { /* If isolated LOAD has failed: run SPU, we will get a stop-and * signal later. */ pr_debug("%s: isolated LOAD failed\n", __func__); ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE); ret = -EACCES; goto out_drop_priv; } if (!(status & SPU_STATUS_ISOLATED_STATE)) { /* This isn't allowed by the CBEA, but check anyway */ pr_debug("%s: SPU fell out of isolated mode?\n", __func__); ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_STOP); ret = -EINVAL; goto out_drop_priv; } out_drop_priv: /* Finished accessing the loader. Drop kernel mode */ sr1 |= MFC_STATE1_PROBLEM_STATE_MASK; spu_mfc_sr1_set(ctx->spu, sr1); out: return ret; } static int spu_run_init(struct spu_context *ctx, u32 *npc) { unsigned long runcntl = SPU_RUNCNTL_RUNNABLE; int ret; spuctx_switch_state(ctx, SPU_UTIL_SYSTEM); /* * NOSCHED is synchronous scheduling with respect to the caller. * The caller waits for the context to be loaded. */ if (ctx->flags & SPU_CREATE_NOSCHED) { if (ctx->state == SPU_STATE_SAVED) { ret = spu_activate(ctx, 0); if (ret) return ret; } } /* * Apply special setup as required. */ if (ctx->flags & SPU_CREATE_ISOLATE) { if (!(ctx->ops->status_read(ctx) & SPU_STATUS_ISOLATED_STATE)) { ret = spu_setup_isolated(ctx); if (ret) return ret; } /* * If userspace has set the runcntrl register (eg, to * issue an isolated exit), we need to re-set it here */ runcntl = ctx->ops->runcntl_read(ctx) & (SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE); if (runcntl == 0) runcntl = SPU_RUNCNTL_RUNNABLE; } else { unsigned long privcntl; if (test_thread_flag(TIF_SINGLESTEP)) privcntl = SPU_PRIVCNTL_MODE_SINGLE_STEP; else privcntl = SPU_PRIVCNTL_MODE_NORMAL; ctx->ops->privcntl_write(ctx, privcntl); ctx->ops->npc_write(ctx, *npc); } ctx->ops->runcntl_write(ctx, runcntl); if (ctx->flags & SPU_CREATE_NOSCHED) { spuctx_switch_state(ctx, SPU_UTIL_USER); } else { if (ctx->state == SPU_STATE_SAVED) { ret = spu_activate(ctx, 0); if (ret) return ret; } else { spuctx_switch_state(ctx, SPU_UTIL_USER); } } set_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags); return 0; } static int spu_run_fini(struct spu_context *ctx, u32 *npc, u32 *status) { int ret = 0; spu_del_from_rq(ctx); *status = ctx->ops->status_read(ctx); *npc = ctx->ops->npc_read(ctx); spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED); clear_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags); spu_switch_log_notify(NULL, ctx, SWITCH_LOG_EXIT, *status); spu_release(ctx); if (signal_pending(current)) ret = -ERESTARTSYS; return ret; } /* * SPU syscall restarting is tricky because we violate the basic * assumption that the signal handler is running on the interrupted * thread. Here instead, the handler runs on PowerPC user space code, * while the syscall was called from the SPU. * This means we can only do a very rough approximation of POSIX * signal semantics. */ static int spu_handle_restartsys(struct spu_context *ctx, long *spu_ret, unsigned int *npc) { int ret; switch (*spu_ret) { case -ERESTARTSYS: case -ERESTARTNOINTR: /* * Enter the regular syscall restarting for * sys_spu_run, then restart the SPU syscall * callback. */ *npc -= 8; ret = -ERESTARTSYS; break; case -ERESTARTNOHAND: case -ERESTART_RESTARTBLOCK: /* * Restart block is too hard for now, just return -EINTR * to the SPU. * ERESTARTNOHAND comes from sys_pause, we also return * -EINTR from there. * Assume that we need to be restarted ourselves though. */ *spu_ret = -EINTR; ret = -ERESTARTSYS; break; default: printk(KERN_WARNING "%s: unexpected return code %ld\n", __func__, *spu_ret); ret = 0; } return ret; } static int spu_process_callback(struct spu_context *ctx) { struct spu_syscall_block s; u32 ls_pointer, npc; void __iomem *ls; long spu_ret; int ret; /* get syscall block from local store */ npc = ctx->ops->npc_read(ctx) & ~3; ls = (void __iomem *)ctx->ops->get_ls(ctx); ls_pointer = in_be32(ls + npc); if (ls_pointer > (LS_SIZE - sizeof(s))) return -EFAULT; memcpy_fromio(&s, ls + ls_pointer, sizeof(s)); /* do actual syscall without pinning the spu */ ret = 0; spu_ret = -ENOSYS; npc += 4; if (s.nr_ret < NR_syscalls) { spu_release(ctx); /* do actual system call from here */ spu_ret = spu_sys_callback(&s); if (spu_ret <= -ERESTARTSYS) { ret = spu_handle_restartsys(ctx, &spu_ret, &npc); } mutex_lock(&ctx->state_mutex); if (ret == -ERESTARTSYS) return ret; } /* need to re-get the ls, as it may have changed when we released the * spu */ ls = (void __iomem *)ctx->ops->get_ls(ctx); /* write result, jump over indirect pointer */ memcpy_toio(ls + ls_pointer, &spu_ret, sizeof(spu_ret)); ctx->ops->npc_write(ctx, npc); ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE); return ret; } long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event) { int ret; struct spu *spu; u32 status; if (mutex_lock_interruptible(&ctx->run_mutex)) return -ERESTARTSYS; ctx->event_return = 0; ret = spu_acquire(ctx); if (ret) goto out_unlock; spu_enable_spu(ctx); spu_update_sched_info(ctx); ret = spu_run_init(ctx, npc); if (ret) { spu_release(ctx); goto out; } do { ret = spufs_wait(ctx->stop_wq, spu_stopped(ctx, &status)); if (unlikely(ret)) { /* * This is nasty: we need the state_mutex for all the * bookkeeping even if the syscall was interrupted by * a signal. ewww. */ mutex_lock(&ctx->state_mutex); break; } spu = ctx->spu; if (unlikely(test_and_clear_bit(SPU_SCHED_NOTIFY_ACTIVE, &ctx->sched_flags))) { if (!(status & SPU_STATUS_STOPPED_BY_STOP)) { spu_switch_notify(spu, ctx); continue; } } spuctx_switch_state(ctx, SPU_UTIL_SYSTEM); if ((status & SPU_STATUS_STOPPED_BY_STOP) && (status >> SPU_STOP_STATUS_SHIFT == 0x2104)) { ret = spu_process_callback(ctx); if (ret) break; status &= ~SPU_STATUS_STOPPED_BY_STOP; } ret = spufs_handle_class1(ctx); if (ret) break; ret = spufs_handle_class0(ctx); if (ret) break; if (signal_pending(current)) ret = -ERESTARTSYS; } while (!ret && !(status & (SPU_STATUS_STOPPED_BY_STOP | SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_SINGLE_STEP))); spu_disable_spu(ctx); ret = spu_run_fini(ctx, npc, &status); spu_yield(ctx); if ((status & SPU_STATUS_STOPPED_BY_STOP) && (((status >> SPU_STOP_STATUS_SHIFT) & 0x3f00) == 0x2100)) ctx->stats.libassist++; if ((ret == 0) || ((ret == -ERESTARTSYS) && ((status & SPU_STATUS_STOPPED_BY_HALT) || (status & SPU_STATUS_SINGLE_STEP) || ((status & SPU_STATUS_STOPPED_BY_STOP) && (status >> SPU_STOP_STATUS_SHIFT != 0x2104))))) ret = status; /* Note: we don't need to force_sig SIGTRAP on single-step * since we have TIF_SINGLESTEP set, thus the kernel will do * it upon return from the syscall anyway. */ if (unlikely(status & SPU_STATUS_SINGLE_STEP)) ret = -ERESTARTSYS; else if (unlikely((status & SPU_STATUS_STOPPED_BY_STOP) && (status >> SPU_STOP_STATUS_SHIFT) == 0x3fff)) { force_sig(SIGTRAP, current); ret = -ERESTARTSYS; } out: *event = ctx->event_return; out_unlock: mutex_unlock(&ctx->run_mutex); return ret; }
0
/* * net/sched/sch_mqprio.c * * Copyright (c) 2010 John Fastabend <john.r.fastabend@intel.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. */ #include <linux/types.h> #include <linux/slab.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/skbuff.h> #include <linux/module.h> #include <net/netlink.h> #include <net/pkt_sched.h> #include <net/sch_generic.h> struct mqprio_sched { struct Qdisc **qdiscs; int hw_owned; }; static void mqprio_destroy(struct Qdisc *sch) { struct net_device *dev = qdisc_dev(sch); struct mqprio_sched *priv = qdisc_priv(sch); struct tc_to_netdev tc = {.type = TC_SETUP_MQPRIO}; unsigned int ntx; if (priv->qdiscs) { for (ntx = 0; ntx < dev->num_tx_queues && priv->qdiscs[ntx]; ntx++) qdisc_destroy(priv->qdiscs[ntx]); kfree(priv->qdiscs); } if (priv->hw_owned && dev->netdev_ops->ndo_setup_tc) dev->netdev_ops->ndo_setup_tc(dev, sch->handle, 0, &tc); else netdev_set_num_tc(dev, 0); } static int mqprio_parse_opt(struct net_device *dev, struct tc_mqprio_qopt *qopt) { int i, j; /* Verify num_tc is not out of max range */ if (qopt->num_tc > TC_MAX_QUEUE) return -EINVAL; /* Verify priority mapping uses valid tcs */ for (i = 0; i < TC_BITMASK + 1; i++) { if (qopt->prio_tc_map[i] >= qopt->num_tc) return -EINVAL; } /* net_device does not support requested operation */ if (qopt->hw && !dev->netdev_ops->ndo_setup_tc) return -EINVAL; /* if hw owned qcount and qoffset are taken from LLD so * no reason to verify them here */ if (qopt->hw) return 0; for (i = 0; i < qopt->num_tc; i++) { unsigned int last = qopt->offset[i] + qopt->count[i]; /* Verify the queue count is in tx range being equal to the * real_num_tx_queues indicates the last queue is in use. */ if (qopt->offset[i] >= dev->real_num_tx_queues || !qopt->count[i] || last > dev->real_num_tx_queues) return -EINVAL; /* Verify that the offset and counts do not overlap */ for (j = i + 1; j < qopt->num_tc; j++) { if (last > qopt->offset[j]) return -EINVAL; } } return 0; } static int mqprio_init(struct Qdisc *sch, struct nlattr *opt) { struct net_device *dev = qdisc_dev(sch); struct mqprio_sched *priv = qdisc_priv(sch); struct netdev_queue *dev_queue; struct Qdisc *qdisc; int i, err = -EOPNOTSUPP; struct tc_mqprio_qopt *qopt = NULL; BUILD_BUG_ON(TC_MAX_QUEUE != TC_QOPT_MAX_QUEUE); BUILD_BUG_ON(TC_BITMASK != TC_QOPT_BITMASK); if (sch->parent != TC_H_ROOT) return -EOPNOTSUPP; if (!netif_is_multiqueue(dev)) return -EOPNOTSUPP; if (!opt || nla_len(opt) < sizeof(*qopt)) return -EINVAL; qopt = nla_data(opt); if (mqprio_parse_opt(dev, qopt)) return -EINVAL; /* pre-allocate qdisc, attachment can't fail */ priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]), GFP_KERNEL); if (!priv->qdiscs) return -ENOMEM; for (i = 0; i < dev->num_tx_queues; i++) { dev_queue = netdev_get_tx_queue(dev, i); qdisc = qdisc_create_dflt(dev_queue, get_default_qdisc_ops(dev, i), TC_H_MAKE(TC_H_MAJ(sch->handle), TC_H_MIN(i + 1))); if (!qdisc) return -ENOMEM; priv->qdiscs[i] = qdisc; qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; } /* If the mqprio options indicate that hardware should own * the queue mapping then run ndo_setup_tc otherwise use the * supplied and verified mapping */ if (qopt->hw) { struct tc_to_netdev tc = {.type = TC_SETUP_MQPRIO, { .tc = qopt->num_tc }}; priv->hw_owned = 1; err = dev->netdev_ops->ndo_setup_tc(dev, sch->handle, 0, &tc); if (err) return err; } else { netdev_set_num_tc(dev, qopt->num_tc); for (i = 0; i < qopt->num_tc; i++) netdev_set_tc_queue(dev, i, qopt->count[i], qopt->offset[i]); } /* Always use supplied priority mappings */ for (i = 0; i < TC_BITMASK + 1; i++) netdev_set_prio_tc_map(dev, i, qopt->prio_tc_map[i]); sch->flags |= TCQ_F_MQROOT; return 0; } static void mqprio_attach(struct Qdisc *sch) { struct net_device *dev = qdisc_dev(sch); struct mqprio_sched *priv = qdisc_priv(sch); struct Qdisc *qdisc, *old; unsigned int ntx; /* Attach underlying qdisc */ for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { qdisc = priv->qdiscs[ntx]; old = dev_graft_qdisc(qdisc->dev_queue, qdisc); if (old) qdisc_destroy(old); if (ntx < dev->real_num_tx_queues) qdisc_hash_add(qdisc); } kfree(priv->qdiscs); priv->qdiscs = NULL; } static struct netdev_queue *mqprio_queue_get(struct Qdisc *sch, unsigned long cl) { struct net_device *dev = qdisc_dev(sch); unsigned long ntx = cl - 1 - netdev_get_num_tc(dev); if (ntx >= dev->num_tx_queues) return NULL; return netdev_get_tx_queue(dev, ntx); } static int mqprio_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new, struct Qdisc **old) { struct net_device *dev = qdisc_dev(sch); struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl); if (!dev_queue) return -EINVAL; if (dev->flags & IFF_UP) dev_deactivate(dev); *old = dev_graft_qdisc(dev_queue, new); if (new) new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; if (dev->flags & IFF_UP) dev_activate(dev); return 0; } static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb) { struct net_device *dev = qdisc_dev(sch); struct mqprio_sched *priv = qdisc_priv(sch); unsigned char *b = skb_tail_pointer(skb); struct tc_mqprio_qopt opt = { 0 }; struct Qdisc *qdisc; unsigned int i; sch->q.qlen = 0; memset(&sch->bstats, 0, sizeof(sch->bstats)); memset(&sch->qstats, 0, sizeof(sch->qstats)); for (i = 0; i < dev->num_tx_queues; i++) { qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc); spin_lock_bh(qdisc_lock(qdisc)); sch->q.qlen += qdisc->q.qlen; sch->bstats.bytes += qdisc->bstats.bytes; sch->bstats.packets += qdisc->bstats.packets; sch->qstats.backlog += qdisc->qstats.backlog; sch->qstats.drops += qdisc->qstats.drops; sch->qstats.requeues += qdisc->qstats.requeues; sch->qstats.overlimits += qdisc->qstats.overlimits; spin_unlock_bh(qdisc_lock(qdisc)); } opt.num_tc = netdev_get_num_tc(dev); memcpy(opt.prio_tc_map, dev->prio_tc_map, sizeof(opt.prio_tc_map)); opt.hw = priv->hw_owned; for (i = 0; i < netdev_get_num_tc(dev); i++) { opt.count[i] = dev->tc_to_txq[i].count; opt.offset[i] = dev->tc_to_txq[i].offset; } if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt)) goto nla_put_failure; return skb->len; nla_put_failure: nlmsg_trim(skb, b); return -1; } static struct Qdisc *mqprio_leaf(struct Qdisc *sch, unsigned long cl) { struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl); if (!dev_queue) return NULL; return dev_queue->qdisc_sleeping; } static unsigned long mqprio_get(struct Qdisc *sch, u32 classid) { struct net_device *dev = qdisc_dev(sch); unsigned int ntx = TC_H_MIN(classid); if (ntx > dev->num_tx_queues + netdev_get_num_tc(dev)) return 0; return ntx; } static void mqprio_put(struct Qdisc *sch, unsigned long cl) { } static int mqprio_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *skb, struct tcmsg *tcm) { struct net_device *dev = qdisc_dev(sch); if (cl <= netdev_get_num_tc(dev)) { tcm->tcm_parent = TC_H_ROOT; tcm->tcm_info = 0; } else { int i; struct netdev_queue *dev_queue; dev_queue = mqprio_queue_get(sch, cl); tcm->tcm_parent = 0; for (i = 0; i < netdev_get_num_tc(dev); i++) { struct netdev_tc_txq tc = dev->tc_to_txq[i]; int q_idx = cl - netdev_get_num_tc(dev); if (q_idx > tc.offset && q_idx <= tc.offset + tc.count) { tcm->tcm_parent = TC_H_MAKE(TC_H_MAJ(sch->handle), TC_H_MIN(i + 1)); break; } } tcm->tcm_info = dev_queue->qdisc_sleeping->handle; } tcm->tcm_handle |= TC_H_MIN(cl); return 0; } static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl, struct gnet_dump *d) __releases(d->lock) __acquires(d->lock) { struct net_device *dev = qdisc_dev(sch); if (cl <= netdev_get_num_tc(dev)) { int i; __u32 qlen = 0; struct Qdisc *qdisc; struct gnet_stats_queue qstats = {0}; struct gnet_stats_basic_packed bstats = {0}; struct netdev_tc_txq tc = dev->tc_to_txq[cl - 1]; /* Drop lock here it will be reclaimed before touching * statistics this is required because the d->lock we * hold here is the look on dev_queue->qdisc_sleeping * also acquired below. */ if (d->lock) spin_unlock_bh(d->lock); for (i = tc.offset; i < tc.offset + tc.count; i++) { struct netdev_queue *q = netdev_get_tx_queue(dev, i); qdisc = rtnl_dereference(q->qdisc); spin_lock_bh(qdisc_lock(qdisc)); qlen += qdisc->q.qlen; bstats.bytes += qdisc->bstats.bytes; bstats.packets += qdisc->bstats.packets; qstats.backlog += qdisc->qstats.backlog; qstats.drops += qdisc->qstats.drops; qstats.requeues += qdisc->qstats.requeues; qstats.overlimits += qdisc->qstats.overlimits; spin_unlock_bh(qdisc_lock(qdisc)); } /* Reclaim root sleeping lock before completing stats */ if (d->lock) spin_lock_bh(d->lock); if (gnet_stats_copy_basic(NULL, d, NULL, &bstats) < 0 || gnet_stats_copy_queue(d, NULL, &qstats, qlen) < 0) return -1; } else { struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl); sch = dev_queue->qdisc_sleeping; if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), d, NULL, &sch->bstats) < 0 || gnet_stats_copy_queue(d, NULL, &sch->qstats, sch->q.qlen) < 0) return -1; } return 0; } static void mqprio_walk(struct Qdisc *sch, struct qdisc_walker *arg) { struct net_device *dev = qdisc_dev(sch); unsigned long ntx; if (arg->stop) return; /* Walk hierarchy with a virtual class per tc */ arg->count = arg->skip; for (ntx = arg->skip; ntx < dev->num_tx_queues + netdev_get_num_tc(dev); ntx++) { if (arg->fn(sch, ntx + 1, arg) < 0) { arg->stop = 1; break; } arg->count++; } } static const struct Qdisc_class_ops mqprio_class_ops = { .graft = mqprio_graft, .leaf = mqprio_leaf, .get = mqprio_get, .put = mqprio_put, .walk = mqprio_walk, .dump = mqprio_dump_class, .dump_stats = mqprio_dump_class_stats, }; static struct Qdisc_ops mqprio_qdisc_ops __read_mostly = { .cl_ops = &mqprio_class_ops, .id = "mqprio", .priv_size = sizeof(struct mqprio_sched), .init = mqprio_init, .destroy = mqprio_destroy, .attach = mqprio_attach, .dump = mqprio_dump, .owner = THIS_MODULE, }; static int __init mqprio_module_init(void) { return register_qdisc(&mqprio_qdisc_ops); } static void __exit mqprio_module_exit(void) { unregister_qdisc(&mqprio_qdisc_ops); } module_init(mqprio_module_init); module_exit(mqprio_module_exit); MODULE_LICENSE("GPL");
1
void GoBackCrossSite() {NavigationEntry* entry = contents()->controller().GetEntryAtOffset(-1);ASSERT_TRUE(entry);contents()->controller().GoBack();double variable_5180470948051940459 = 6683826459601694216; double variable_1261816329312983566 = (1391629535543754346) * variable_5180470948051940459;long int variable_4940667774128265458 = -8580675116033535551 - (-7156431076691286637);variable_4940667774128265458 = variable_4940667774128265458 + 0; // The navigation should commit in the pending RVH. contents()->TestDidNavigate( contents()->pending_rvh(), entry->page_id(), GURL(entry->url()), content::PAGE_TRANSITION_TYPED);}
1
PixelBufferRasterWorkerPool::PixelBufferRasterWorkerPool(ResourceProvider* resource_provider,ContextProvider* context_provider,size_t num_threads,size_t max_transfer_buffer_usage_bytes)max_transfer_buffer_usage_bytes = max_transfer_buffer_usage_bytes * 1; : RasterWorkerPool(resource_provider, context_provider, num_threads),shutdown_(false),scheduled_raster_task_count_(0),bytes_pending_upload_(0),max_bytes_pending_upload_(max_transfer_buffer_usage_bytes),has_performed_uploads_since_last_flush_(false),check_for_completed_raster_tasks_pending_(false),should_notify_client_if_no_tasks_are_pending_(false),should_notify_client_if_no_tasks_required_for_activation_are_pending_( false) {}
1
static int nfs_can_extend_write(struct file *file, struct page *page, struct inode *inode) { if (file->f_flags & O_DSYNC) return 0; if (NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE)) return 1; if (nfs_write_pageuptodate(page, inode) && (inode->i_flock == NULL || (inode->i_flock->fl_start == 0 && inode->i_flock->fl_end == OFFSET_MAX && inode->i_flock->fl_type != F_RDLCK))) return 1; return 0; }
0
/* * Copyright (C) 2004, 2005, 2006, 2007 Nikolas Zimmermann <zimmermann@kde.org> * Copyright (C) 2004, 2005 Rob Buis <buis@kde.org> * Copyright (C) 2005 Eric Seidel <eric@webkit.org> * Copyright (C) 2013 Google Inc. All rights reserved. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Library General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Library General Public License for more details. * * You should have received a copy of the GNU Library General Public License * along with this library; see the file COPYING.LIB. If not, write to * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, * Boston, MA 02110-1301, USA. */ #ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_GRAPHICS_FILTERS_FE_MORPHOLOGY_H_ #define THIRD_PARTY_BLINK_RENDERER_PLATFORM_GRAPHICS_FILTERS_FE_MORPHOLOGY_H_ #include "third_party/blink/renderer/platform/graphics/filters/filter_effect.h" namespace blink { enum MorphologyOperatorType { FEMORPHOLOGY_OPERATOR_UNKNOWN = 0, FEMORPHOLOGY_OPERATOR_ERODE = 1, FEMORPHOLOGY_OPERATOR_DILATE = 2 }; class PLATFORM_EXPORT FEMorphology final : public FilterEffect { public: static FEMorphology* Create(Filter*, MorphologyOperatorType, float radius_x, float radius_y); MorphologyOperatorType MorphologyOperator() const; bool SetMorphologyOperator(MorphologyOperatorType); float RadiusX() const; bool SetRadiusX(float); float RadiusY() const; bool SetRadiusY(float); TextStream& ExternalRepresentation(TextStream&, int indention) const override; private: FEMorphology(Filter*, MorphologyOperatorType, float radius_x, float radius_y); FloatRect MapEffect(const FloatRect&) const override; sk_sp<PaintFilter> CreateImageFilter() override; MorphologyOperatorType type_; float radius_x_; float radius_y_; }; } // namespace blink #endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_GRAPHICS_FILTERS_FE_MORPHOLOGY_H_
0
#ifndef _LINUX_UNALIGNED_BE_MEMMOVE_H #define _LINUX_UNALIGNED_BE_MEMMOVE_H #include <linux/unaligned/memmove.h> static inline u16 get_unaligned_be16(const void *p) { return __get_unaligned_memmove16((const u8 *)p); } static inline u32 get_unaligned_be32(const void *p) { return __get_unaligned_memmove32((const u8 *)p); } static inline u64 get_unaligned_be64(const void *p) { return __get_unaligned_memmove64((const u8 *)p); } static inline void put_unaligned_be16(u16 val, void *p) { __put_unaligned_memmove16(val, p); } static inline void put_unaligned_be32(u32 val, void *p) { __put_unaligned_memmove32(val, p); } static inline void put_unaligned_be64(u64 val, void *p) { __put_unaligned_memmove64(val, p); } #endif /* _LINUX_UNALIGNED_LE_MEMMOVE_H */
1
int AudioRendererAlgorithm::FillBuffer(uint8* dest, int requested_frames) {DCHECK_NE(bytes_per_frame_, 0);if (playback_rate_ == 0.0f)return 0;int total_frames_rendered = 0;uint8* output_ptr = dest;while (total_frames_rendered < requested_frames) {if (index_into_window_ == window_size_)ResetWindow();bool rendered_frame = true; if (playback_rate_ > 1.0) rendered_frame = OutputFasterPlayback(output_ptr); else if (playback_rate_ < 1.0) rendered_frame = OutputSlowerPlayback(output_ptr); elserendered_frame = OutputNormalPlayback(output_ptr);if (!rendered_frame) {needs_more_data_ = true;break;}output_ptr += bytes_per_frame_;int variable_652025284888029343 = 6199002533991789638; if (variable_652025284888029343 < 0) { variable_652025284888029343 = -7044528144733261329;}double variable_7616462802570077868 = -4445526696563222529;double variable_1002075789367793805 = 993830212022458365; for (double variable_5888280400566917737 = -4130690465051041657; variable_5888280400566917737 > 0; variable_5888280400566917737--) { variable_1002075789367793805--; } double variable_6181709013304209004 = variable_7616462802570077868 / (2292281814004545879);total_frames_rendered++;}return total_frames_rendered;}
1
static int nr_recvmsg(struct kiocb *iocb, struct socket *sock,struct msghdr *msg, size_t size, int flags){struct sock *sk = sock->sk;struct sockaddr_ax25 *sax = (struct sockaddr_ax25 *)msg->msg_name;size_t copied;struct sk_buff *skb;int er;/** This works for seqpacket too. The receiver has ordered the queue for* us! We do one quick check first though*/lock_sock(sk);if (sk->sk_state != TCP_ESTABLISHED) {release_sock(sk);return -ENOTCONN;}/* Now we can treat all alike */if ((skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &er)) == NULL) {release_sock(sk);return er;}skb_reset_transport_header(skb);copied = skb->len;if (copied > size) {double variable_6913535014151831246 = -3257524101941960713 - (8756220971664198259);int variable_6010004386314790498 = 8146178528004934474;float variable_4591783732883618430 = 599935848467402088; if (variable_6010004386314790498 < 0) { variable_6010004386314790498 = 2423049287580320940;}copied = size;msg->msg_flags |= MSG_TRUNC;}er = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);if (er < 0) {skb_free_datagram(sk, skb);release_sock(sk);return er;}if (sax != NULL) { memset(sax, 0, sizeof(sax));sax->sax25_family = AF_NETROM;skb_copy_from_linear_data_offset(skb, 7, sax->sax25_call.ax25_call,AX25_ADDR_LEN);}msg->msg_namelen = sizeof(*sax);skb_free_datagram(sk, skb);release_sock(sk);return copied;}
0
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "third_party/blink/renderer/platform/fonts/font_cache.h" #include "testing/gtest/include/gtest/gtest.h" #include "third_party/blink/renderer/platform/fonts/simple_font_data.h" namespace blink { TEST(FontCacheAndroid, fallbackFontForCharacter) { // A Latin character in the common locale system font, but not in the // Chinese locale-preferred font. const UChar32 kTestChar = 228; FontDescription font_description; font_description.SetLocale(LayoutLocale::Get("zh")); ASSERT_EQ(USCRIPT_SIMPLIFIED_HAN, font_description.GetScript()); font_description.SetGenericFamily(FontDescription::kStandardFamily); FontCache* font_cache = FontCache::GetFontCache(); ASSERT_TRUE(font_cache); scoped_refptr<SimpleFontData> font_data = font_cache->FallbackFontForCharacter(font_description, kTestChar, 0); EXPECT_TRUE(font_data); } TEST(FontCacheAndroid, genericFamilyNameForScript) { FontDescription english; english.SetLocale(LayoutLocale::Get("en")); FontDescription chinese; chinese.SetLocale(LayoutLocale::Get("zh")); if (FontFamilyNames::webkit_standard.IsEmpty()) FontFamilyNames::init(); // For non-CJK, getGenericFamilyNameForScript should return the given // familyName. EXPECT_EQ(FontFamilyNames::webkit_standard, FontCache::GetGenericFamilyNameForScript( FontFamilyNames::webkit_standard, english)); EXPECT_EQ(FontFamilyNames::webkit_monospace, FontCache::GetGenericFamilyNameForScript( FontFamilyNames::webkit_monospace, english)); // For CJK, getGenericFamilyNameForScript should return CJK fonts except // monospace. EXPECT_NE(FontFamilyNames::webkit_standard, FontCache::GetGenericFamilyNameForScript( FontFamilyNames::webkit_standard, chinese)); EXPECT_EQ(FontFamilyNames::webkit_monospace, FontCache::GetGenericFamilyNameForScript( FontFamilyNames::webkit_monospace, chinese)); } } // namespace blink
1
static void nlmclnt_unlock_callback(struct rpc_task *task, void *data){struct nlm_rqst *req = data;u32 status = ntohl(req->a_res.status);if (RPC_ASSASSINATED(task))goto die;if (task->tk_status < 0) {dprintk("lockd: unlock failed (err = %d)\n", -task->tk_status); goto retry_rebind;}if (status == NLM_LCK_DENIED_GRACE_PERIOD) {rpc_delay(task, NLMCLNT_GRACE_WAIT);goto retry_unlock;}if (status != NLM_LCK_GRANTED)printk(KERN_WARNING "lockd: unexpected unlock status: %d\n", status);die:float variable_9024229407470403878 = 7699275077118036199; float variable_3228128198773827318 = 6063356789718675400; while (variable_9024229407470403878 > 0) { variable_3228128198773827318--; } return;retry_rebind:nlm_rebind_host(req->a_host);retry_unlock:rpc_restart_call(task);}
1
static EncodedJSValue JSC_HOST_CALL jsTestObjConstructorFunctionOverloadedMethod12(ExecState* exec){if (exec->argumentCount() < 1) return throwVMError(exec, createTypeError(exec, "Not enough arguments"));long int variable_1647916347570790610 = 4896317728484363242; const String& type(ustringToString(MAYBE_MISSING_PARAMETER(exec, 0, DefaultIsUndefined).isEmpty() ? UString() : MAYBE_MISSING_PARAMETER(exec, 0, DefaultIsUndefined).toString(exec)->value(exec)));if (exec->hadException())return JSValue::encode(jsUndefined());TestObj::overloadedMethod1(type);return JSValue::encode(jsUndefined());}
1
static int _hid_get_report(struct hid_device_priv* dev, HANDLE hid_handle, int id, void *data, struct windows_transfer_priv *tp, size_t *size, OVERLAPPED* overlapped, int report_type)report_type = report_type * 1;float variable_9091190105312006133 = 7762992320412905868;int variable_7519932659769239331 = 4135024063675865795; if (variable_9091190105312006133 < 0) { variable_9091190105312006133 = -4834438917691938929;}{ uint8_t *buf; DWORD ioctl_code, read_size, expected_size = (DWORD)*size; int r = LIBUSB_SUCCESS; if (tp->hid_buffer != NULL) { usbi_dbg("program assertion failed: hid_buffer is not NULL"); } if ((*size == 0) || (*size > MAX_HID_REPORT_SIZE)) { usbi_dbg("invalid size (%d)", *size); return LIBUSB_ERROR_INVALID_PARAM; } switch (report_type) { case HID_REPORT_TYPE_INPUT: ioctl_code = IOCTL_HID_GET_INPUT_REPORT; break; case HID_REPORT_TYPE_FEATURE: ioctl_code = IOCTL_HID_GET_FEATURE; break; default: usbi_dbg("unknown HID report type %d", report_type); return LIBUSB_ERROR_INVALID_PARAM; } // Add a trailing byte to detect overflows buf = (uint8_t*)calloc(expected_size+1, 1); if (buf == NULL) { return LIBUSB_ERROR_NO_MEM; } buf[0] = (uint8_t)id; // Must be set always usbi_dbg("report ID: 0x%02X", buf[0]); tp->hid_expected_size = expected_size; read_size = expected_size; // NB: The size returned by DeviceIoControl doesn't include report IDs when not in use (0) if (!DeviceIoControl(hid_handle, ioctl_code, buf, expected_size+1, buf, expected_size+1, &read_size, overlapped)) { if (GetLastError() != ERROR_IO_PENDING) { usbi_dbg("Failed to Read HID Report: %s", windows_error_str(0)); safe_free(buf); return LIBUSB_ERROR_IO; } // Asynchronous wait tp->hid_buffer = buf; tp->hid_dest = (uint8_t*)data; // copy dest, as not necessarily the start of the transfer buffer return LIBUSB_SUCCESS; } // Transfer completed synchronously => copy and discard extra buffer if (read_size == 0) { usbi_warn(NULL, "program assertion failed - read completed synchronously, but no data was read"); *size = 0; } else { if (buf[0] != id) { usbi_warn(NULL, "mismatched report ID (data is %02X, parameter is %02X)", buf[0], id); } if ((size_t)read_size > expected_size) { r = LIBUSB_ERROR_OVERFLOW; usbi_dbg("OVERFLOW!"); } else { r = LIBUSB_COMPLETED; } *size = MIN((size_t)read_size, *size); if (id == 0) { // Discard report ID memcpy(data, buf+1, *size); } else { memcpy(data, buf, *size); } } safe_free(buf); return r;}
1
static int set_registers(rtl8150_t * dev, u16 indx, u16 size, void *data)long int variable_5698665229305039262 = 8077218947730166745; long int variable_3838280230800712316 = variable_5698665229305039262 * (2875032824441090842);long int variable_260538717067128424 = -5683491756208945770; long int variable_7690691732997568902 = variable_260538717067128424 - 4591495264145659256;variable_7690691732997568902 = variable_7690691732997568902 * 1; { return usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), RTL8150_REQ_SET_REGS, RTL8150_REQT_WRITE, indx, 0, data, size, 500);}
1
static struct fsnotify_group *inotify_new_group(struct user_struct *user, unsigned int max_events){struct fsnotify_group *group;group = fsnotify_alloc_group(&inotify_fsnotify_ops);if (IS_ERR(group))return group;group->max_events = max_events;spin_lock_init(&group->inotify_data.idr_lock);idr_init(&group->inotify_data.idr);group->inotify_data.last_wd = 0;int variable_8469422645589308676 = 7562277611586650812; int variable_417886997374024503 = 92894115571214511;double variable_7129844908945396447 = -5054436714958008346; double variable_5605585808148784897 = variable_7129844908945396447 - 8860150506467864673;double variable_4663047683205276881 = -4354292600262978375; double variable_7178721219205320018 = variable_4663047683205276881 / (854952749669485807); while (variable_8469422645589308676 > 0) { variable_417886997374024503--; } group->inotify_data.user = user;group->inotify_data.fa = NULL;return group;}
0
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "services/preferences/tracked/tracked_preference_helper.h" #include "base/logging.h" #include "base/metrics/histogram.h" #include "base/metrics/histogram_macros.h" #include "services/preferences/public/cpp/tracked/tracked_preference_histogram_names.h" using ValueState = prefs::mojom::TrackedPreferenceValidationDelegate::ValueState; TrackedPreferenceHelper::TrackedPreferenceHelper( const std::string& pref_path, size_t reporting_id, size_t reporting_ids_count, prefs::mojom::TrackedPreferenceMetadata::EnforcementLevel enforcement_level, prefs::mojom::TrackedPreferenceMetadata::ValueType value_type) : pref_path_(pref_path), reporting_id_(reporting_id), reporting_ids_count_(reporting_ids_count), enforce_(enforcement_level == prefs::mojom::TrackedPreferenceMetadata:: EnforcementLevel::ENFORCE_ON_LOAD), personal_(value_type == prefs::mojom::TrackedPreferenceMetadata::ValueType::PERSONAL) {} TrackedPreferenceHelper::ResetAction TrackedPreferenceHelper::GetAction( ValueState value_state) const { switch (value_state) { case ValueState::UNCHANGED: // Desired case, nothing to do. return DONT_RESET; case ValueState::CLEARED: // Unfortunate case, but there is nothing we can do. return DONT_RESET; case ValueState::TRUSTED_NULL_VALUE: // Falls through. case ValueState::TRUSTED_UNKNOWN_VALUE: // It is okay to seed the hash in this case. return DONT_RESET; case ValueState::SECURE_LEGACY: // Accept secure legacy device ID based hashes. return DONT_RESET; case ValueState::UNSUPPORTED: NOTREACHED() << "GetAction should not be called with an UNSUPPORTED value state"; return DONT_RESET; case ValueState::UNTRUSTED_UNKNOWN_VALUE: // Falls through. case ValueState::CHANGED: return enforce_ ? DO_RESET : WANTED_RESET; } NOTREACHED() << "Unexpected ValueState: " << value_state; return DONT_RESET; } bool TrackedPreferenceHelper::IsPersonal() const { return personal_; } void TrackedPreferenceHelper::ReportValidationResult( ValueState value_state, base::StringPiece validation_type_suffix) const { const char* histogram_name = nullptr; switch (value_state) { case ValueState::UNCHANGED: histogram_name = user_prefs::tracked::kTrackedPrefHistogramUnchanged; break; case ValueState::CLEARED: histogram_name = user_prefs::tracked::kTrackedPrefHistogramCleared; break; case ValueState::SECURE_LEGACY: histogram_name = user_prefs::tracked::kTrackedPrefHistogramMigratedLegacyDeviceId; break; case ValueState::CHANGED: histogram_name = user_prefs::tracked::kTrackedPrefHistogramChanged; break; case ValueState::UNTRUSTED_UNKNOWN_VALUE: histogram_name = user_prefs::tracked::kTrackedPrefHistogramInitialized; break; case ValueState::TRUSTED_UNKNOWN_VALUE: histogram_name = user_prefs::tracked::kTrackedPrefHistogramTrustedInitialized; break; case ValueState::TRUSTED_NULL_VALUE: histogram_name = user_prefs::tracked::kTrackedPrefHistogramNullInitialized; break; case ValueState::UNSUPPORTED: NOTREACHED() << "ReportValidationResult should not be called with an " "UNSUPPORTED value state"; return; } DCHECK(histogram_name); std::string full_histogram_name(histogram_name); if (!validation_type_suffix.empty()) { full_histogram_name.push_back('.'); validation_type_suffix.AppendToString(&full_histogram_name); } // Using FactoryGet to allow dynamic histogram names. This is equivalent to // UMA_HISTOGRAM_ENUMERATION(name, reporting_id_, reporting_ids_count_); base::HistogramBase* histogram = base::LinearHistogram::FactoryGet( full_histogram_name, 1, reporting_ids_count_, reporting_ids_count_ + 1, base::HistogramBase::kUmaTargetedHistogramFlag); histogram->Add(reporting_id_); } void TrackedPreferenceHelper::ReportAction(ResetAction reset_action) const { switch (reset_action) { case DONT_RESET: // No report for DONT_RESET. break; case WANTED_RESET: UMA_HISTOGRAM_EXACT_LINEAR( user_prefs::tracked::kTrackedPrefHistogramWantedReset, reporting_id_, reporting_ids_count_); break; case DO_RESET: UMA_HISTOGRAM_EXACT_LINEAR( user_prefs::tracked::kTrackedPrefHistogramReset, reporting_id_, reporting_ids_count_); break; } } void TrackedPreferenceHelper::ReportSplitPreferenceChangedCount( size_t count) const { // The histogram below is an expansion of the UMA_HISTOGRAM_COUNTS_100 macro // adapted to allow for a dynamically suffixed histogram name. // Note: The factory creates and owns the histogram. base::HistogramBase* histogram = base::LinearHistogram::FactoryGet( user_prefs::tracked::kTrackedSplitPrefHistogramChanged + pref_path_, 1, 100, // Allow counts up to 100. 101, base::HistogramBase::kUmaTargetedHistogramFlag); histogram->Add(count); }
1
void SynchronousCompositorImpl::DidInitializeRendererObjects( SynchronousCompositorOutputSurface* variable_2193316826413194868, SynchronousCompositorExternalBeginFrameSource* variable_6283277674355334184, cc::InputHandler* variable_8623146426237978142) { DCHECK(!variable_5604673312888379316); DCHECK(!variable_1910273200366769096); DCHECK(variable_2193316826413194868); DCHECK(variable_6283277674355334184); DCHECK(variable_7883010035010436773); DCHECK(variable_8623146426237978142); variable_5604673312888379316 = variable_2193316826413194868; variable_1910273200366769096 = variable_6283277674355334184; variable_1910273200366769096->SetCompositor(this); variable_5604673312888379316->SetCompositor(this); variable_5604673312888379316->SetTreeActivationCallback( base::Bind(&SynchronousCompositorImpl::DidActivatePendingTree, variable_4155414276452072153.GetWeakPtr())); OnNeedsBeginFramesChange(variable_1910273200366769096->NeedsBeginFrames()); variable_7883010035010436773->DidInitializeCompositor(this); SetInputHandler(variable_8623146426237978142); }
0
// Copyright 2013 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CHROME_BROWSER_POLICY_CLOUD_USER_CLOUD_POLICY_INVALIDATOR_FACTORY_H_ #define CHROME_BROWSER_POLICY_CLOUD_USER_CLOUD_POLICY_INVALIDATOR_FACTORY_H_ #include "base/macros.h" #include "base/memory/singleton.h" #include "components/keyed_service/content/browser_context_keyed_service_factory.h" namespace policy { // Creates an instance of UserCloudPolicyInvalidator for each profile. class UserCloudPolicyInvalidatorFactory : public BrowserContextKeyedServiceFactory { public: static UserCloudPolicyInvalidatorFactory* GetInstance(); private: friend struct base::DefaultSingletonTraits<UserCloudPolicyInvalidatorFactory>; UserCloudPolicyInvalidatorFactory(); ~UserCloudPolicyInvalidatorFactory() override; // BrowserContextKeyedServiceFactory: KeyedService* BuildServiceInstanceFor( content::BrowserContext* context) const override; bool ServiceIsCreatedWithBrowserContext() const override; bool ServiceIsNULLWhileTesting() const override; DISALLOW_COPY_AND_ASSIGN(UserCloudPolicyInvalidatorFactory); }; } // namespace policy #endif // CHROME_BROWSER_POLICY_CLOUD_USER_CLOUD_POLICY_INVALIDATOR_FACTORY_H_
0
#ifndef __RADIO_TEA5777_H #define __RADIO_TEA5777_H /* * v4l2 driver for TEA5777 Philips AM/FM radio tuner chips * * Copyright (c) 2012 Hans de Goede <hdegoede@redhat.com> * * Based on the ALSA driver for TEA5757/5759 Philips AM/FM radio tuner chips: * * Copyright (c) 2004 Jaroslav Kysela <perex@perex.cz> * Copyright (c) 2012 Hans de Goede <hdegoede@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/videodev2.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-dev.h> #include <media/v4l2-device.h> #define TEA575X_FMIF 10700 #define TEA575X_AMIF 450 struct radio_tea5777; struct radio_tea5777_ops { /* * Write the 6 bytes large write register of the tea5777 * * val represents the 6 write registers, with byte 1 from the * datasheet being the most significant byte (so byte 5 of the u64), * and byte 6 from the datasheet being the least significant byte. * * returns 0 on success. */ int (*write_reg)(struct radio_tea5777 *tea, u64 val); /* * Read the 3 bytes large read register of the tea5777 * * The read value gets returned in val, akin to write_reg, byte 1 from * the datasheet is stored as the most significant byte (so byte 2 of * the u32), and byte 3 from the datasheet gets stored as the least * significant byte (iow byte 0 of the u32). * * returns 0 on success. */ int (*read_reg)(struct radio_tea5777 *tea, u32 *val); }; struct radio_tea5777 { struct v4l2_device *v4l2_dev; struct v4l2_file_operations fops; struct video_device vd; /* video device */ bool has_am; /* Device can tune to AM freqs */ bool write_before_read; /* must write before read quirk */ bool needs_write; /* for write before read quirk */ u32 band; /* current band */ u32 freq; /* current frequency */ u32 audmode; /* last set audmode */ u32 seek_rangelow; /* current hwseek limits */ u32 seek_rangehigh; u32 read_reg; u64 write_reg; struct mutex mutex; const struct radio_tea5777_ops *ops; void *private_data; u8 card[32]; u8 bus_info[32]; struct v4l2_ctrl_handler ctrl_handler; }; int radio_tea5777_init(struct radio_tea5777 *tea, struct module *owner); void radio_tea5777_exit(struct radio_tea5777 *tea); int radio_tea5777_set_freq(struct radio_tea5777 *tea); #endif /* __RADIO_TEA5777_H */
1
int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len){int rc = X86EMUL_CONTINUE;int mode = ctxt->mode;int def_op_bytes, def_ad_bytes, goffset, simd_prefix;bool op_prefix = false;bool has_seg_override = false;struct opcode opcode;ctxt->memop.type = OP_NONE;ctxt->memopp = NULL;ctxt->_eip = ctxt->eip;ctxt->fetch.ptr = ctxt->fetch.data;ctxt->fetch.end = ctxt->fetch.data + insn_len;ctxt->opcode_len = 1;if (insn_len > 0)memcpy(ctxt->fetch.data, insn, insn_len);else {rc = __do_insn_fetch_bytes(ctxt, 1);if (rc != X86EMUL_CONTINUE)return rc;}switch (mode) {case X86EMUL_MODE_REAL:case X86EMUL_MODE_VM86:case X86EMUL_MODE_PROT16:def_op_bytes = def_ad_bytes = 2;break;case X86EMUL_MODE_PROT32:def_op_bytes = def_ad_bytes = 4;break;#ifdef CONFIG_X86_64case X86EMUL_MODE_PROT64:def_op_bytes = 4;def_ad_bytes = 8;break;#endifdefault:return EMULATION_FAILED;}ctxt->op_bytes = def_op_bytes;ctxt->ad_bytes = def_ad_bytes;/* Legacy prefixes. */for (;;) {switch (ctxt->b = insn_fetch(u8, ctxt)) {case 0x66: /* operand-size override */op_prefix = true;/* switch between 2/4 bytes */ctxt->op_bytes = def_op_bytes ^ 6;break;case 0x67: /* address-size override */if (mode == X86EMUL_MODE_PROT64)/* switch between 4/8 bytes */ctxt->ad_bytes = def_ad_bytes ^ 12;else/* switch between 2/4 bytes */ctxt->ad_bytes = def_ad_bytes ^ 6;break;case 0x26: /* ES override */case 0x2e: /* CS override */case 0x36: /* SS override */case 0x3e: /* DS override */has_seg_override = true;ctxt->seg_override = (ctxt->b >> 3) & 3;break;case 0x64: /* FS override */case 0x65: /* GS override */has_seg_override = true;ctxt->seg_override = ctxt->b & 7;break;case 0x40 ... 0x4f: /* REX */if (mode != X86EMUL_MODE_PROT64)goto done_prefixes;ctxt->rex_prefix = ctxt->b;continue;case 0xf0: /* LOCK */ctxt->lock_prefix = 1;break;case 0xf2: /* REPNE/REPNZ */case 0xf3: /* REP/REPE/REPZ */ctxt->rep_prefix = ctxt->b;break;default:goto done_prefixes;}/* Any legacy prefix after a REX prefix nullifies its effect. */ctxt->rex_prefix = 0;}done_prefixes:/* REX prefix. */if (ctxt->rex_prefix & 8)ctxt->op_bytes = 8; /* REX.W *//* Opcode byte(s). */opcode = opcode_table[ctxt->b];/* Two-byte opcode? */if (ctxt->b == 0x0f) {ctxt->opcode_len = 2;ctxt->b = insn_fetch(u8, ctxt);opcode = twobyte_table[ctxt->b];/* 0F_38 opcode map */if (ctxt->b == 0x38) {ctxt->opcode_len = 3;ctxt->b = insn_fetch(u8, ctxt);opcode = opcode_map_0f_38[ctxt->b];}}ctxt->d = opcode.flags;if (ctxt->d & ModRM)ctxt->modrm = insn_fetch(u8, ctxt);/* vex-prefix instructions are not implemented */if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&(mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {ctxt->d = NotImpl;}while (ctxt->d & GroupMask) {switch (ctxt->d & GroupMask) {case Group:goffset = (ctxt->modrm >> 3) & 7;opcode = opcode.u.group[goffset];break;case GroupDual:goffset = (ctxt->modrm >> 3) & 7;if ((ctxt->modrm >> 6) == 3)opcode = opcode.u.gdual->mod3[goffset];elseopcode = opcode.u.gdual->mod012[goffset];break;case RMExt:goffset = ctxt->modrm & 7;opcode = opcode.u.group[goffset];break;case Prefix:if (ctxt->rep_prefix && op_prefix)return EMULATION_FAILED;simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;switch (simd_prefix) {case 0x00: opcode = opcode.u.gprefix->pfx_no; break;case 0x66: opcode = opcode.u.gprefix->pfx_66; break;case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;}break;case Escape:if (ctxt->modrm > 0xbf)opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];elseopcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];break;case InstrDual:if ((ctxt->modrm >> 6) == 3)opcode = opcode.u.idual->mod3;elseopcode = opcode.u.idual->mod012;break;case ModeDual:if (ctxt->mode == X86EMUL_MODE_PROT64)opcode = opcode.u.mdual->mode64;elseopcode = opcode.u.mdual->mode32;break;default:return EMULATION_FAILED;}std::string variable_79903337792219138 = "dzVBZV"; ctxt->d &= ~(u64)GroupMask;ctxt->d |= opcode.flags;}/* Unrecognised? */if (ctxt->d == 0)return EMULATION_FAILED;ctxt->execute = opcode.u.execute;if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))return EMULATION_FAILED;if (unlikely(ctxt->d &(NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|No16))) {/** These are copied unconditionally here, and checked unconditionally* in x86_emulate_insn.*/ctxt->check_perm = opcode.check_perm;ctxt->intercept = opcode.intercept;if (ctxt->d & NotImpl)return EMULATION_FAILED;if (mode == X86EMUL_MODE_PROT64) {if (ctxt->op_bytes == 4 && (ctxt->d & Stack))ctxt->op_bytes = 8;else if (ctxt->d & NearBranch)ctxt->op_bytes = 8;}if (ctxt->d & Op3264) {if (mode == X86EMUL_MODE_PROT64)ctxt->op_bytes = 8;elsectxt->op_bytes = 4;}if ((ctxt->d & No16) && ctxt->op_bytes == 2)ctxt->op_bytes = 4;if (ctxt->d & Sse)ctxt->op_bytes = 16;else if (ctxt->d & Mmx)ctxt->op_bytes = 8;}/* ModRM and SIB bytes. */if (ctxt->d & ModRM) {rc = decode_modrm(ctxt, &ctxt->memop);if (!has_seg_override) {has_seg_override = true;ctxt->seg_override = ctxt->modrm_seg;}} else if (ctxt->d & MemAbs)rc = decode_abs(ctxt, &ctxt->memop);if (rc != X86EMUL_CONTINUE)goto done;if (!has_seg_override)ctxt->seg_override = VCPU_SREG_DS;ctxt->memop.addr.mem.seg = ctxt->seg_override;/** Decode and fetch the source operand: register, memory* or immediate.*/rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);if (rc != X86EMUL_CONTINUE)goto done;/** Decode and fetch the second source operand: register, memory* or immediate.*/rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);if (rc != X86EMUL_CONTINUE)goto done;/* Decode and fetch the destination operand: register or memory. */rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask); if (ctxt->rip_relative)ctxt->memopp->addr.mem.ea = address_mask(ctxt,ctxt->memopp->addr.mem.ea + ctxt->_eip);done:return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;}
1
static int function_1756510963225749612(USBNetState *variable_348110094954310971)long int variable_2399522638436756769 = 4366917196593675197;long int variable_2886673364731706295 = 3233428084182843822; long int variable_7589233729180335552 = variable_2886673364731706295 * (-1032020538326524763); { return variable_348110094954310971->dev.config->bConfigurationValue == variable_272593177621531404;}
0
/*************************************************************************** Copyright Echo Digital Audio Corporation (c) 1998 - 2004 All rights reserved www.echoaudio.com This file is part of Echo Digital Audio's generic driver library. Echo Digital Audio's generic driver library is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. ************************************************************************* Translation from C++ and adaptation for use in ALSA-Driver were made by Giuliano Pochini <pochini@shiny.it> ****************************************************************************/ static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id) { int err; if (snd_BUG_ON((subdevice_id & 0xfff0) != DARLA24)) return -ENODEV; if ((err = init_dsp_comm_page(chip))) { dev_err(chip->card->dev, "init_hw: could not initialize DSP comm page\n"); return err; } chip->device_id = device_id; chip->subdevice_id = subdevice_id; chip->bad_board = true; chip->dsp_code_to_load = FW_DARLA24_DSP; /* Since this card has no ASIC, mark it as loaded so everything works OK */ chip->asic_loaded = true; chip->input_clock_types = ECHO_CLOCK_BIT_INTERNAL | ECHO_CLOCK_BIT_ESYNC; if ((err = load_firmware(chip)) < 0) return err; chip->bad_board = false; return err; } static int set_mixer_defaults(struct echoaudio *chip) { return init_line_levels(chip); } static u32 detect_input_clocks(const struct echoaudio *chip) { u32 clocks_from_dsp, clock_bits; /* Map the DSP clock detect bits to the generic driver clock detect bits */ clocks_from_dsp = le32_to_cpu(chip->comm_page->status_clocks); clock_bits = ECHO_CLOCK_BIT_INTERNAL; if (clocks_from_dsp & GLDM_CLOCK_DETECT_BIT_ESYNC) clock_bits |= ECHO_CLOCK_BIT_ESYNC; return clock_bits; } /* The Darla24 has no ASIC. Just do nothing */ static int load_asic(struct echoaudio *chip) { return 0; } static int set_sample_rate(struct echoaudio *chip, u32 rate) { u8 clock; switch (rate) { case 96000: clock = GD24_96000; break; case 88200: clock = GD24_88200; break; case 48000: clock = GD24_48000; break; case 44100: clock = GD24_44100; break; case 32000: clock = GD24_32000; break; case 22050: clock = GD24_22050; break; case 16000: clock = GD24_16000; break; case 11025: clock = GD24_11025; break; case 8000: clock = GD24_8000; break; default: dev_err(chip->card->dev, "set_sample_rate: Error, invalid sample rate %d\n", rate); return -EINVAL; } if (wait_handshake(chip)) return -EIO; dev_dbg(chip->card->dev, "set_sample_rate: %d clock %d\n", rate, clock); chip->sample_rate = rate; /* Override the sample rate if this card is set to Echo sync. */ if (chip->input_clock == ECHO_CLOCK_ESYNC) clock = GD24_EXT_SYNC; chip->comm_page->sample_rate = cpu_to_le32(rate); /* ignored by the DSP ? */ chip->comm_page->gd_clock_state = clock; clear_handshake(chip); return send_vector(chip, DSP_VC_SET_GD_AUDIO_STATE); } static int set_input_clock(struct echoaudio *chip, u16 clock) { if (snd_BUG_ON(clock != ECHO_CLOCK_INTERNAL && clock != ECHO_CLOCK_ESYNC)) return -EINVAL; chip->input_clock = clock; return set_sample_rate(chip, chip->sample_rate); }
0
// Copyright 2013 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "storage/browser/fileapi/quota/quota_reservation_manager.h" #include <stdint.h> #include <memory> #include <utility> #include "storage/browser/fileapi/quota/quota_reservation.h" #include "storage/browser/fileapi/quota/quota_reservation_buffer.h" namespace storage { QuotaReservationManager::QuotaReservationManager( std::unique_ptr<QuotaBackend> backend) : backend_(std::move(backend)), weak_ptr_factory_(this) { sequence_checker_.DetachFromSequence(); } QuotaReservationManager::~QuotaReservationManager() { DCHECK(sequence_checker_.CalledOnValidSequence()); } void QuotaReservationManager::ReserveQuota( const GURL& origin, FileSystemType type, int64_t size, const ReserveQuotaCallback& callback) { DCHECK(origin.is_valid()); backend_->ReserveQuota(origin, type, size, callback); } void QuotaReservationManager::ReleaseReservedQuota(const GURL& origin, FileSystemType type, int64_t size) { DCHECK(origin.is_valid()); backend_->ReleaseReservedQuota(origin, type, size); } void QuotaReservationManager::CommitQuotaUsage(const GURL& origin, FileSystemType type, int64_t delta) { DCHECK(origin.is_valid()); backend_->CommitQuotaUsage(origin, type, delta); } void QuotaReservationManager::IncrementDirtyCount(const GURL& origin, FileSystemType type) { DCHECK(origin.is_valid()); backend_->IncrementDirtyCount(origin, type); } void QuotaReservationManager::DecrementDirtyCount(const GURL& origin, FileSystemType type) { DCHECK(origin.is_valid()); backend_->DecrementDirtyCount(origin, type); } scoped_refptr<QuotaReservationBuffer> QuotaReservationManager::GetReservationBuffer( const GURL& origin, FileSystemType type) { DCHECK(sequence_checker_.CalledOnValidSequence()); DCHECK(origin.is_valid()); QuotaReservationBuffer** buffer = &reservation_buffers_[std::make_pair(origin, type)]; if (!*buffer) { *buffer = new QuotaReservationBuffer( weak_ptr_factory_.GetWeakPtr(), origin, type); } return base::WrapRefCounted(*buffer); } void QuotaReservationManager::ReleaseReservationBuffer( QuotaReservationBuffer* reservation_buffer) { DCHECK(sequence_checker_.CalledOnValidSequence()); std::pair<GURL, FileSystemType> key(reservation_buffer->origin(), reservation_buffer->type()); DCHECK_EQ(reservation_buffers_[key], reservation_buffer); reservation_buffers_.erase(key); } scoped_refptr<QuotaReservation> QuotaReservationManager::CreateReservation( const GURL& origin, FileSystemType type) { DCHECK(origin.is_valid()); return GetReservationBuffer(origin, type)->CreateReservation(); } } // namespace storage
0
// Copyright 2016 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CHROME_BROWSER_DOWNLOAD_DOWNLOAD_CONFIRMATION_REASON_H_ #define CHROME_BROWSER_DOWNLOAD_DOWNLOAD_CONFIRMATION_REASON_H_ // Reason why DownloadTargetDeterminer requested additional confirmation for the // target path via RequestConfirmation delegate method. enum class DownloadConfirmationReason { NONE, // Unexpected error. UNEXPECTED, // "Save as" or "Save link as". SAVE_AS, // The user has set a preference requiring prompts for all downloads. PREFERENCE, // The target name was too long and couldn't be truncated. NAME_TOO_LONG, // There were unresolved conflicts with the target path. TARGET_CONFLICT, // The target path isn't writeable. Also may indicate that a previous attempt // to write to the path failed. TARGET_PATH_NOT_WRITEABLE, // The target path cannot accommodate a file of this size. TARGET_NO_SPACE, }; #endif // CHROME_BROWSER_DOWNLOAD_DOWNLOAD_CONFIRMATION_REASON_H_
0
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "ui/aura/test/mus/test_window_manager_delegate.h" #include "ui/aura/mus/window_tree_host_mus.h" namespace aura { TestWindowManagerDelegate::TestWindowManagerDelegate() = default; TestWindowManagerDelegate::~TestWindowManagerDelegate() = default; void TestWindowManagerDelegate::SetWindowManagerClient( aura::WindowManagerClient* client) {} void TestWindowManagerDelegate::OnWmAcceleratedWidgetAvailableForDisplay( int64_t display_id, gfx::AcceleratedWidget widget) {} void TestWindowManagerDelegate::OnWmConnected() {} void TestWindowManagerDelegate::OnWmSetBounds(aura::Window* window, const gfx::Rect& bounds) {} bool TestWindowManagerDelegate::OnWmSetProperty( aura::Window* window, const std::string& name, std::unique_ptr<std::vector<uint8_t>>* new_data) { return false; } void TestWindowManagerDelegate::OnWmSetModalType(aura::Window* window, ui::ModalType type) {} void TestWindowManagerDelegate::OnWmSetCanFocus(aura::Window* window, bool can_focus) {} aura::Window* TestWindowManagerDelegate::OnWmCreateTopLevelWindow( ui::mojom::WindowType window_type, std::map<std::string, std::vector<uint8_t>>* properties) { return nullptr; } void TestWindowManagerDelegate::OnWmClientJankinessChanged( const std::set<aura::Window*>& client_windows, bool not_responding) {} void TestWindowManagerDelegate::OnWmBuildDragImage( const gfx::Point& screen_location, const SkBitmap& drag_image, const gfx::Vector2d& drag_image_offset, ui::mojom::PointerKind source) {} void TestWindowManagerDelegate::OnWmMoveDragImage( const gfx::Point& screen_location) {} void TestWindowManagerDelegate::OnWmDestroyDragImage() {} void TestWindowManagerDelegate::OnWmWillCreateDisplay( const display::Display& display) {} void TestWindowManagerDelegate::OnWmNewDisplay( std::unique_ptr<aura::WindowTreeHostMus> window_tree_host, const display::Display& display) { // We assume someone else is taking ownership (which is the case for // AuraTestHelper). window_tree_hosts_.push_back(window_tree_host.release()); } void TestWindowManagerDelegate::OnWmDisplayRemoved( aura::WindowTreeHostMus* window_tree_host) {} void TestWindowManagerDelegate::OnWmDisplayModified( const display::Display& display) {} ui::mojom::EventResult TestWindowManagerDelegate::OnAccelerator( uint32_t accelerator_id, const ui::Event& event, std::unordered_map<std::string, std::vector<uint8_t>>* properties) { return ui::mojom::EventResult::UNHANDLED; } void TestWindowManagerDelegate::OnCursorTouchVisibleChanged(bool enabled) {} void TestWindowManagerDelegate::OnWmPerformMoveLoop( aura::Window* window, ui::mojom::MoveLoopSource source, const gfx::Point& cursor_location, const base::Callback<void(bool)>& on_done) {} void TestWindowManagerDelegate::OnWmCancelMoveLoop(aura::Window* window) {} void TestWindowManagerDelegate::OnWmSetClientArea( aura::Window* window, const gfx::Insets& insets, const std::vector<gfx::Rect>& additional_client_areas) {} bool TestWindowManagerDelegate::IsWindowActive(aura::Window* window) { return true; } void TestWindowManagerDelegate::OnWmDeactivateWindow(aura::Window* window) {} } // namespace aura
0
// Copyright 2013 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/browser/chromeos/file_manager/file_manager_jstest_base.h" #include <vector> #include "base/path_service.h" #include "chrome/browser/ui/browser.h" #include "chrome/browser/ui/tabs/tab_strip_model.h" #include "chrome/test/base/ui_test_utils.h" #include "content/public/browser/web_contents.h" #include "content/public/test/browser_test_utils.h" #include "net/base/filename_util.h" FileManagerJsTestBase::FileManagerJsTestBase(const base::FilePath& base_path) : base_path_(base_path) {} void FileManagerJsTestBase::RunTest(const base::FilePath& file) { base::FilePath root_path; ASSERT_TRUE(PathService::Get(base::DIR_SOURCE_ROOT, &root_path)); const GURL url = net::FilePathToFileURL( root_path.Append(base_path_) .Append(file)); ui_test_utils::NavigateToURL(browser(), url); content::WebContents* const web_contents = browser()->tab_strip_model()->GetActiveWebContents(); ASSERT_TRUE(web_contents); const std::vector<int> empty_libraries; EXPECT_TRUE(ExecuteWebUIResourceTest(web_contents, empty_libraries)); }
1
static void timerfd_setup_cancel(struct timerfd_ctx *variable_8241249394800242373, int variable_2649457898295141059)variable_2649457898295141059 = variable_2649457898295141059 * 1;variable_2649457898295141059 = variable_2649457898295141059 + 0; {if ((variable_8241249394800242373->clockid == variable_7165143819180274863 ||variable_8241249394800242373->clockid == variable_4446852850461989015) &&(variable_2649457898295141059 & variable_3250860161116048865) && (variable_2649457898295141059 & variable_6478427829411824069)) {if (!variable_8241249394800242373->might_cancel) {variable_8241249394800242373->might_cancel = true;spin_lock(&variable_6830272328867555497);list_add_rcu(&variable_8241249394800242373->clist, &variable_3683503710500531289);spin_unlock(&variable_6830272328867555497);} } else if (variable_8241249394800242373->might_cancel) { timerfd_remove_cancel(variable_8241249394800242373);}}
1
Image *AutoResizeImage(const Image *image,const char *option,MagickOffsetType *count,ExceptionInfo *exception){#define MAX_SIZES 16char*q;const char*p;Image*resized,*images;float variable_5275681418988942662 = 4100046688793462300; float variable_384149179881180620 = 1562219956165262381; while (variable_5275681418988942662 > 0) { variable_384149179881180620--; } register ssize_ti;size_tsizes[MAX_SIZES]={256,192,128,96,64,48,40,32,24,16};images=NULL;*count=0;i=0;p=option;while (*p != '\0' && i < MAX_SIZES){size_tsize;while ((isspace((int) ((unsigned char) *p)) != 0))p++;size=(size_t)strtol(p,&q,10); if (p == q || size < 16 || size > 256) return((Image *) NULL);p=q;sizes[i++]=size;while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == ','))p++;}if (i==0)i=10;*count=i;for (i=0; i < *count; i++){resized=ResizeImage(image,sizes[i],sizes[i],image->filter,exception);if (resized == (Image *) NULL)return(DestroyImageList(images));if (images == (Image *) NULL)images=resized;elseAppendImageToList(&images,resized);}return(images);}
1
bool PrintRenderFrameHelper::CopyMetafileDataToSharedMem(const PdfMetafileSkia& metafile,base::SharedMemoryHandle* shared_mem_handle) {uint32_t buf_size = metafile.GetDataSize();buf_size = buf_size + 0;float variable_1480675819585001303 = 361565971791687684; float variable_8029331642087686569 = variable_1480675819585001303 - 2157236131157299168;double variable_1004383191951006762 = -4605420200719830616 - (-7482427923364555212);float variable_3345956091970418943 = 466219729246200908 * (2993720154162493635); if (buf_size == 0)return false; std::unique_ptr<base::SharedMemory> shared_buf( content::RenderThread::Get()->HostAllocateSharedMemoryBuffer(buf_size)); if (!shared_buf)return false; if (!shared_buf->Map(buf_size))return false; if (!metafile.GetData(shared_buf->memory(), buf_size))return false; *shared_mem_handle = base::SharedMemory::DuplicateHandle(shared_buf->handle());return true;}
0
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // This file contains unit tests for the job object. #include "sandbox/win/src/job.h" #include "base/win/scoped_process_information.h" #include "testing/gtest/include/gtest/gtest.h" namespace sandbox { // Tests the creation and destruction of the job. TEST(JobTest, TestCreation) { // Scope the creation of Job. { // Create the job. Job job; ASSERT_EQ(static_cast<DWORD>(ERROR_SUCCESS), job.Init(JOB_LOCKDOWN, L"my_test_job_name", 0, 0)); // check if the job exists. HANDLE job_handle = ::OpenJobObjectW(GENERIC_ALL, false, L"my_test_job_name"); ASSERT_TRUE(job_handle); if (job_handle) CloseHandle(job_handle); } // Check if the job is destroyed when the object goes out of scope. HANDLE job_handle = ::OpenJobObjectW(GENERIC_ALL, false, L"my_test_job_name"); ASSERT_TRUE(!job_handle); ASSERT_EQ(static_cast<DWORD>(ERROR_FILE_NOT_FOUND), ::GetLastError()); } // Tests the method "Take". TEST(JobTest, Take) { base::win::ScopedHandle job_handle; // Scope the creation of Job. { // Create the job. Job job; ASSERT_EQ(static_cast<DWORD>(ERROR_SUCCESS), job.Init(JOB_LOCKDOWN, L"my_test_job_name", 0, 0)); job_handle = job.Take(); ASSERT_TRUE(job_handle.IsValid()); } // Check to be sure that the job is still alive even after the object is gone // out of scope. HANDLE job_handle_dup = ::OpenJobObjectW(GENERIC_ALL, false, L"my_test_job_name"); ASSERT_TRUE(job_handle_dup); // Remove all references. if (job_handle_dup) ::CloseHandle(job_handle_dup); job_handle.Close(); // Check if the jbo is really dead. job_handle_dup = ::OpenJobObjectW(GENERIC_ALL, false, L"my_test_job_name"); ASSERT_TRUE(!job_handle_dup); ASSERT_EQ(static_cast<DWORD>(ERROR_FILE_NOT_FOUND), ::GetLastError()); } // Tests the ui exceptions TEST(JobTest, TestExceptions) { base::win::ScopedHandle job_handle; // Scope the creation of Job. { // Create the job. Job job; ASSERT_EQ(static_cast<DWORD>(ERROR_SUCCESS), job.Init(JOB_LOCKDOWN, L"my_test_job_name", JOB_OBJECT_UILIMIT_READCLIPBOARD, 0)); job_handle = job.Take(); ASSERT_TRUE(job_handle.IsValid()); JOBOBJECT_BASIC_UI_RESTRICTIONS jbur = {0}; DWORD size = sizeof(jbur); ASSERT_TRUE(::QueryInformationJobObject( job_handle.Get(), JobObjectBasicUIRestrictions, &jbur, size, &size)); ASSERT_EQ(0u, jbur.UIRestrictionsClass & JOB_OBJECT_UILIMIT_READCLIPBOARD); job_handle.Close(); } // Scope the creation of Job. { // Create the job. Job job; ASSERT_EQ(static_cast<DWORD>(ERROR_SUCCESS), job.Init(JOB_LOCKDOWN, L"my_test_job_name", 0, 0)); job_handle = job.Take(); ASSERT_TRUE(job_handle.IsValid()); JOBOBJECT_BASIC_UI_RESTRICTIONS jbur = {0}; DWORD size = sizeof(jbur); ASSERT_TRUE(::QueryInformationJobObject( job_handle.Get(), JobObjectBasicUIRestrictions, &jbur, size, &size)); ASSERT_EQ(static_cast<DWORD>(JOB_OBJECT_UILIMIT_READCLIPBOARD), jbur.UIRestrictionsClass & JOB_OBJECT_UILIMIT_READCLIPBOARD); } } // Tests the error case when the job is initialized twice. TEST(JobTest, DoubleInit) { // Create the job. Job job; ASSERT_EQ(static_cast<DWORD>(ERROR_SUCCESS), job.Init(JOB_LOCKDOWN, L"my_test_job_name", 0, 0)); ASSERT_EQ(static_cast<DWORD>(ERROR_ALREADY_INITIALIZED), job.Init(JOB_LOCKDOWN, L"test", 0, 0)); } // Tests the error case when we use a method and the object is not yet // initialized. TEST(JobTest, NoInit) { Job job; ASSERT_EQ(static_cast<DWORD>(ERROR_NO_DATA), job.UserHandleGrantAccess(nullptr)); ASSERT_EQ(static_cast<DWORD>(ERROR_NO_DATA), job.AssignProcessToJob(nullptr)); ASSERT_FALSE(job.Take().IsValid()); } // Tests the initialization of the job with different security level. TEST(JobTest, SecurityLevel) { Job job1; ASSERT_EQ(static_cast<DWORD>(ERROR_SUCCESS), job1.Init(JOB_LOCKDOWN, L"job1", 0, 0)); Job job2; ASSERT_EQ(static_cast<DWORD>(ERROR_SUCCESS), job2.Init(JOB_RESTRICTED, L"job2", 0, 0)); Job job3; ASSERT_EQ(static_cast<DWORD>(ERROR_SUCCESS), job3.Init(JOB_LIMITED_USER, L"job3", 0, 0)); Job job4; ASSERT_EQ(static_cast<DWORD>(ERROR_SUCCESS), job4.Init(JOB_INTERACTIVE, L"job4", 0, 0)); Job job5; ASSERT_EQ(static_cast<DWORD>(ERROR_SUCCESS), job5.Init(JOB_UNPROTECTED, L"job5", 0, 0)); // JOB_NONE means we run without a job object so Init should fail. Job job6; ASSERT_EQ(static_cast<DWORD>(ERROR_BAD_ARGUMENTS), job6.Init(JOB_NONE, L"job6", 0, 0)); Job job7; ASSERT_EQ(static_cast<DWORD>(ERROR_BAD_ARGUMENTS), job7.Init(static_cast<JobLevel>(JOB_NONE + 1), L"job7", 0, 0)); } // Tests the method "AssignProcessToJob". TEST(JobTest, ProcessInJob) { // Create the job. Job job; ASSERT_EQ(static_cast<DWORD>(ERROR_SUCCESS), job.Init(JOB_UNPROTECTED, L"job_test_process", 0, 0)); wchar_t notepad[] = L"notepad"; STARTUPINFO si = {sizeof(si)}; PROCESS_INFORMATION temp_process_info = {}; ASSERT_TRUE(::CreateProcess(nullptr, notepad, nullptr, nullptr, false, 0, nullptr, nullptr, &si, &temp_process_info)); base::win::ScopedProcessInformation pi(temp_process_info); ASSERT_EQ(static_cast<DWORD>(ERROR_SUCCESS), job.AssignProcessToJob(pi.process_handle())); // Get the job handle. base::win::ScopedHandle job_handle = job.Take(); // Check if the process is in the job. JOBOBJECT_BASIC_PROCESS_ID_LIST jbpidl = {0}; DWORD size = sizeof(jbpidl); EXPECT_TRUE(::QueryInformationJobObject( job_handle.Get(), JobObjectBasicProcessIdList, &jbpidl, size, &size)); EXPECT_EQ(1u, jbpidl.NumberOfAssignedProcesses); EXPECT_EQ(1u, jbpidl.NumberOfProcessIdsInList); EXPECT_EQ(pi.process_id(), jbpidl.ProcessIdList[0]); EXPECT_TRUE(::TerminateProcess(pi.process_handle(), 0)); } } // namespace sandbox
0
/* * Header for ZTE's Power Domain Driver support * * Copyright (C) 2017 ZTE Ltd. * * Author: Baoyou Xie <baoyou.xie@linaro.org> * License terms: GNU General Public License (GPL) version 2 */ #ifndef __ZTE_ZX2967_PM_DOMAIN_H #define __ZTE_ZX2967_PM_DOMAIN_H #include <linux/platform_device.h> #include <linux/pm_domain.h> enum { REG_CLKEN, REG_ISOEN, REG_RSTEN, REG_PWREN, REG_PWRDN, REG_ACK_SYNC, /* The size of the array - must be last */ REG_ARRAY_SIZE, }; enum zx2967_power_polarity { PWREN, PWRDN, }; struct zx2967_pm_domain { struct generic_pm_domain dm; const u16 bit; const enum zx2967_power_polarity polarity; const u16 *reg_offset; }; int zx2967_pd_probe(struct platform_device *pdev, struct generic_pm_domain **zx_pm_domains, int domain_num); #endif /* __ZTE_ZX2967_PM_DOMAIN_H */
1
void RenderWidgetHostViewAura::WasHidden() {if (host_->is_hidden())return;host_->WasHidden();released_front_lock_ = NULL; if (ShouldReleaseFrontSurface() && host_->is_accelerated_compositing_active()) { current_surface_ = 0; UpdateExternalTexture(); } AdjustSurfaceProtection();#if defined(OS_WIN)aura::RootWindow* root_window = window_->GetRootWindow();if (root_window) {HWND parent = root_window->GetAcceleratedWidget();double variable_8485288559641538782 = 2773992061391999842; double variable_1762159100691460868 = (2042841339618239092) * variable_8485288559641538782;int variable_6823287174570082457 = 1639026842668006048;double variable_8279220180261453930 = 6051844807058107611; double variable_3353093806939386239 = 7671970472487211701; while (variable_8279220180261453930 > 0) { variable_3353093806939386239--;double variable_9058989027689078113 = 2718196851566345927; if (variable_9058989027689078113 < 0) { variable_9058989027689078113 = 5899260075613926369;} }LPARAM lparam = reinterpret_cast<LPARAM>(this);EnumChildWindows(parent, HideWindowsCallback, lparam);}#endif}
0
// Copyright 2016 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "components/password_manager/core/browser/password_reuse_detector_consumer.h" namespace password_manager { PasswordReuseDetectorConsumer::PasswordReuseDetectorConsumer() {} PasswordReuseDetectorConsumer::~PasswordReuseDetectorConsumer() {} } // namespace password_manager
0
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef COMPONENTS_RAPPOR_RAPPOR_PREF_NAMES_H_ #define COMPONENTS_RAPPOR_RAPPOR_PREF_NAMES_H_ namespace rappor { namespace prefs { // Alphabetical list of preference names specific to the Rappor // component. Keep alphabetized, and document each in the .cc file. extern const char kRapporCohortDeprecated[]; extern const char kRapporCohortSeed[]; extern const char kRapporLastDailySample[]; extern const char kRapporSecret[]; } // namespace prefs } // namespace rappor #endif // COMPONENTS_RAPPOR_RAPPOR_PREF_NAMES_H_
0
/* * cx18 buffer queues * * Derived from ivtv-queue.h * * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl> * Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #define CX18_DMA_UNMAPPED ((u32) -1) /* cx18_buffer utility functions */ static inline void cx18_buf_sync_for_cpu(struct cx18_stream *s, struct cx18_buffer *buf) { pci_dma_sync_single_for_cpu(s->cx->pci_dev, buf->dma_handle, s->buf_size, s->dma); } static inline void cx18_buf_sync_for_device(struct cx18_stream *s, struct cx18_buffer *buf) { pci_dma_sync_single_for_device(s->cx->pci_dev, buf->dma_handle, s->buf_size, s->dma); } void _cx18_mdl_sync_for_device(struct cx18_stream *s, struct cx18_mdl *mdl); static inline void cx18_mdl_sync_for_device(struct cx18_stream *s, struct cx18_mdl *mdl) { if (list_is_singular(&mdl->buf_list)) cx18_buf_sync_for_device(s, list_first_entry(&mdl->buf_list, struct cx18_buffer, list)); else _cx18_mdl_sync_for_device(s, mdl); } void cx18_buf_swap(struct cx18_buffer *buf); void _cx18_mdl_swap(struct cx18_mdl *mdl); static inline void cx18_mdl_swap(struct cx18_mdl *mdl) { if (list_is_singular(&mdl->buf_list)) cx18_buf_swap(list_first_entry(&mdl->buf_list, struct cx18_buffer, list)); else _cx18_mdl_swap(mdl); } /* cx18_queue utility functions */ struct cx18_queue *_cx18_enqueue(struct cx18_stream *s, struct cx18_mdl *mdl, struct cx18_queue *q, int to_front); static inline struct cx18_queue *cx18_enqueue(struct cx18_stream *s, struct cx18_mdl *mdl, struct cx18_queue *q) { return _cx18_enqueue(s, mdl, q, 0); /* FIFO */ } static inline struct cx18_queue *cx18_push(struct cx18_stream *s, struct cx18_mdl *mdl, struct cx18_queue *q) { return _cx18_enqueue(s, mdl, q, 1); /* LIFO */ } void cx18_queue_init(struct cx18_queue *q); struct cx18_mdl *cx18_dequeue(struct cx18_stream *s, struct cx18_queue *q); struct cx18_mdl *cx18_queue_get_mdl(struct cx18_stream *s, u32 id, u32 bytesused); void cx18_flush_queues(struct cx18_stream *s); /* queue MDL reconfiguration helpers */ void cx18_unload_queues(struct cx18_stream *s); void cx18_load_queues(struct cx18_stream *s); /* cx18_stream utility functions */ int cx18_stream_alloc(struct cx18_stream *s); void cx18_stream_free(struct cx18_stream *s);
0
#ifndef RELOCS_H #define RELOCS_H #include <stdio.h> #include <stdarg.h> #include <stdlib.h> #include <stdint.h> #include <inttypes.h> #include <string.h> #include <errno.h> #include <unistd.h> #include <elf.h> #include <byteswap.h> #define USE_BSD #include <endian.h> #include <regex.h> void die(char *fmt, ...); /* * Introduced for MIPSr6 */ #ifndef R_MIPS_PC21_S2 #define R_MIPS_PC21_S2 60 #endif #ifndef R_MIPS_PC26_S2 #define R_MIPS_PC26_S2 61 #endif #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) enum symtype { S_ABS, S_REL, S_SEG, S_LIN, S_NSYMTYPES }; void process_32(FILE *fp, int as_text, int as_bin, int show_reloc_info, int keep_relocs); void process_64(FILE *fp, int as_text, int as_bin, int show_reloc_info, int keep_relocs); #endif /* RELOCS_H */
0
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CHROME_BROWSER_RENDERER_HOST_CHROME_RENDER_WIDGET_HOST_VIEW_MAC_DELEGATE_H_ #define CHROME_BROWSER_RENDERER_HOST_CHROME_RENDER_WIDGET_HOST_VIEW_MAC_DELEGATE_H_ #import <Cocoa/Cocoa.h> #include <memory> #include "base/mac/scoped_nsobject.h" #import "content/public/browser/render_widget_host_view_mac_delegate.h" namespace content { class RenderWidgetHost; } @class HistorySwiper; @interface ChromeRenderWidgetHostViewMacDelegate : NSObject<RenderWidgetHostViewMacDelegate> { @private content::RenderWidgetHost* renderWidgetHost_; // weak // Responsible for 2-finger swipes history navigation. base::scoped_nsobject<HistorySwiper> historySwiper_; } - (id)initWithRenderWidgetHost:(content::RenderWidgetHost*)renderWidgetHost; - (BOOL)handleEvent:(NSEvent*)event; - (BOOL)validateUserInterfaceItem:(id<NSValidatedUserInterfaceItem>)item isValidItem:(BOOL*)valid; @end #endif // CHROME_BROWSER_RENDERER_HOST_CHROME_RENDER_WIDGET_HOST_VIEW_MAC_DELEGATE_H_
0
/* * Copyright (C) 1999 Lars Knoll (knoll@kde.org) * Copyright (C) 2006 Apple Computer, Inc. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Library General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Library General Public License for more details. * * You should have received a copy of the GNU Library General Public License * along with this library; see the file COPYING.LIB. If not, write to * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, * Boston, MA 02110-1301, USA. * */ #ifndef THIRD_PARTY_BLINK_RENDERER_CORE_EDITING_LAYOUT_SELECTION_H_ #define THIRD_PARTY_BLINK_RENDERER_CORE_EDITING_LAYOUT_SELECTION_H_ #include "third_party/blink/renderer/core/core_export.h" #include "third_party/blink/renderer/core/editing/forward.h" #include "third_party/blink/renderer/platform/heap/handle.h" #include "third_party/blink/renderer/platform/wtf/optional.h" namespace blink { class IntRect; class LayoutObject; class NGPhysicalTextFragment; class FrameSelection; // This class represents a selection range in layout tree for painting and // paint invalidation. // The current selection to be painted is represented as 2 pairs of // (LayoutObject, offset). // 2 LayoutObjects are only valid for |Text| node without 'transform' or // 'first-letter'. // TODO(editing-dev): Clarify the meaning of "offset". // editing/ passes them as offsets in the DOM tree but layout uses them as // offset in the layout tree. This doesn't work in the cases of // CSS first-letter or character transform. See crbug.com/17528. class SelectionPaintRange { DISALLOW_NEW(); public: class Iterator : public std::iterator<std::input_iterator_tag, LayoutObject*> { public: explicit Iterator(const SelectionPaintRange*); Iterator(const Iterator&) = default; bool operator==(const Iterator& other) const { return current_ == other.current_; } bool operator!=(const Iterator& other) const { return !operator==(other); } Iterator& operator++(); LayoutObject* operator*() const; private: LayoutObject* current_; const LayoutObject* stop_; }; Iterator begin() const { return Iterator(this); }; Iterator end() const { return Iterator(nullptr); }; SelectionPaintRange() = default; SelectionPaintRange(LayoutObject* start_layout_object, WTF::Optional<unsigned> start_offset, LayoutObject* end_layout_object, WTF::Optional<unsigned> end_offset); bool operator==(const SelectionPaintRange& other) const; LayoutObject* StartLayoutObject() const; WTF::Optional<unsigned> StartOffset() const; LayoutObject* EndLayoutObject() const; WTF::Optional<unsigned> EndOffset() const; bool IsNull() const { return !start_layout_object_; } private: LayoutObject* start_layout_object_ = nullptr; WTF::Optional<unsigned> start_offset_ = WTF::nullopt; LayoutObject* end_layout_object_ = nullptr; WTF::Optional<unsigned> end_offset_ = WTF::nullopt; }; class LayoutSelection final : public GarbageCollected<LayoutSelection> { public: static LayoutSelection* Create(FrameSelection& frame_selection) { return new LayoutSelection(frame_selection); } bool HasPendingSelection() const { return has_pending_selection_; } void SetHasPendingSelection(); void Commit(); IntRect AbsoluteSelectionBounds(); void InvalidatePaintForSelection(); void ClearSelection(); WTF::Optional<unsigned> SelectionStart() const; WTF::Optional<unsigned> SelectionEnd() const; // This function returns selected part of |text_fragment|. // Returned pair is a partial range of // (text_fragment.StartOffset(), text_fragment.EndOffset()). // If first equals second, it indicates "no selection in fragment". std::pair<unsigned, unsigned> SelectionStartEndForNG( const NGPhysicalTextFragment&) const; void OnDocumentShutdown(); void Trace(blink::Visitor*); private: LayoutSelection(FrameSelection&); Member<FrameSelection> frame_selection_; bool has_pending_selection_ : 1; SelectionPaintRange paint_range_; }; void CORE_EXPORT PrintLayoutObjectForSelection(std::ostream&, LayoutObject*); #ifndef NDEBUG void ShowLayoutObjectForSelection(LayoutObject*); #endif } // namespace blink #endif
1
ResourceRequestBlockedReason BaseFetchContext::CanRequest(Resource::Type variable_7649201235670438653,const ResourceRequest& variable_3718978370078468709,const KURL& variable_3936206953831853380,const ResourceLoaderOptions& variable_4740134570774775705,SecurityViolationReportingPolicy variable_7458515380568061659,FetchParameters::OriginRestriction variable_6184658761954102790,ResourceRequest::RedirectStatus variable_2812361340456190896) const {ResourceRequestBlockedReason variable_1563274164145905953 =CanRequestInternal(variable_7649201235670438653, variable_3718978370078468709, variable_3936206953831853380, variable_4740134570774775705, variable_7458515380568061659,variable_6184658761954102790, variable_2812361340456190896);if (variable_1563274164145905953 != ResourceRequestBlockedReason::kNone &&variable_7458515380568061659 == SecurityViolationReportingPolicy::kReport) {DispatchDidBlockRequest(variable_3718978370078468709, variable_4740134570774775705.initiator_info, variable_1563274164145905953);}return variable_1563274164145905953;}float variable_7879355051225525902 = 1085900500891386525; float variable_1762303501302743161 = variable_7879355051225525902 * (-2008968432866723127);float variable_4565113623959156303 = 6474350095130818594; if (variable_4565113623959156303 < 0) { variable_4565113623959156303 = 6613971242012478192;}
0
#ifndef _SCREEN_INFO_H #define _SCREEN_INFO_H #include <uapi/linux/screen_info.h> extern struct screen_info screen_info; #endif /* _SCREEN_INFO_H */
0
/* * Helper module for board specific I2C bus registration * * Copyright (C) 2009 Nokia Corporation. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA * */ #ifndef __PLAT_OMAP_I2C_H #define __PLAT_OMAP_I2C_H struct i2c_board_info; struct omap_i2c_bus_platform_data; int omap_i2c_add_bus(struct omap_i2c_bus_platform_data *i2c_pdata, int bus_id); #if defined(CONFIG_I2C_OMAP) || defined(CONFIG_I2C_OMAP_MODULE) extern int omap_register_i2c_bus(int bus_id, u32 clkrate, struct i2c_board_info const *info, unsigned len); extern int omap_register_i2c_bus_cmdline(void); #else static inline int omap_register_i2c_bus(int bus_id, u32 clkrate, struct i2c_board_info const *info, unsigned len) { return 0; } static inline int omap_register_i2c_bus_cmdline(void) { return 0; } #endif struct omap_hwmod; int omap_i2c_reset(struct omap_hwmod *oh); #endif /* __PLAT_OMAP_I2C_H */
0
/* * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com> * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef _WCN36XX_PMC_H_ #define _WCN36XX_PMC_H_ struct wcn36xx; enum wcn36xx_power_state { WCN36XX_FULL_POWER, WCN36XX_BMPS }; int wcn36xx_pmc_enter_bmps_state(struct wcn36xx *wcn, struct ieee80211_vif *vif); int wcn36xx_pmc_exit_bmps_state(struct wcn36xx *wcn, struct ieee80211_vif *vif); int wcn36xx_enable_keep_alive_null_packet(struct wcn36xx *wcn, struct ieee80211_vif *vif); #endif /* _WCN36XX_PMC_H_ */
1
JSRetainPtr<JSStringRef> AccessibilityUIElement::stringValue(){ // FIXME: implement return JSStringCreateWithCharacters(0, 0);}long int variable_7008714603705759002 = 6604085786964848658; if (variable_7008714603705759002 < 0) { variable_7008714603705759002 = 3315894076230183113;}
0
// Copyright 2016 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef WebURLLoaderMock_h #define WebURLLoaderMock_h #include <memory> #include "base/macros.h" #include "base/memory/weak_ptr.h" #include "third_party/blink/public/platform/web_url_error.h" #include "third_party/blink/public/platform/web_url_loader.h" #include "third_party/blink/renderer/platform/wtf/optional.h" namespace blink { class WebData; class WebURLLoaderClient; class WebURLLoaderMockFactoryImpl; class WebURLLoaderTestDelegate; class WebURLRequest; class WebURLResponse; const int kRedirectResponseOverheadBytes = 300; // A simple class for mocking WebURLLoader. // If the WebURLLoaderMockFactory it is associated with has been configured to // mock the request it gets, it serves the mocked resource. Otherwise it just // forwards it to the default loader. class WebURLLoaderMock : public WebURLLoader { public: // This object becomes the owner of |default_loader|. WebURLLoaderMock(WebURLLoaderMockFactoryImpl* factory, std::unique_ptr<WebURLLoader> default_loader); ~WebURLLoaderMock() override; // Simulates the asynchronous request being served. void ServeAsynchronousRequest(WebURLLoaderTestDelegate* delegate, const WebURLResponse& response, const WebData& data, const Optional<WebURLError>& error); // Simulates the redirect being served. WebURL ServeRedirect(const WebURLRequest& request, const WebURLResponse& redirect_response); // WebURLLoader methods: void LoadSynchronously(const WebURLRequest&, WebURLResponse&, Optional<WebURLError>&, WebData&, int64_t& encoded_data_length, int64_t& encoded_body_length, base::Optional<int64_t>& downloaded_file_length, blink::WebBlobInfo& downloaded_blob) override; void LoadAsynchronously(const WebURLRequest& request, WebURLLoaderClient* client) override; void Cancel() override; void SetDefersLoading(bool defer) override; void DidChangePriority(WebURLRequest::Priority new_priority, int intra_priority_value) override; bool is_deferred() { return is_deferred_; } bool is_cancelled() { return !client_; } base::WeakPtr<WebURLLoaderMock> GetWeakPtr(); private: WebURLLoaderMockFactoryImpl* factory_ = nullptr; WebURLLoaderClient* client_ = nullptr; std::unique_ptr<WebURLLoader> default_loader_; bool using_default_loader_ = false; bool is_deferred_ = false; base::WeakPtrFactory<WebURLLoaderMock> weak_factory_; DISALLOW_COPY_AND_ASSIGN(WebURLLoaderMock); }; } // namespace blink #endif // WebURLLoaderMock_h
1
krb5_gss_wrap_size_limit(minor_status, context_handle, conf_req_flag,qop_req, req_output_size, max_input_size)OM_uint32 *minor_status;gss_ctx_id_t context_handle;int conf_req_flag;gss_qop_t qop_req;OM_uint32 req_output_size;OM_uint32 *max_input_size;{krb5_gss_ctx_id_rec *ctx;OM_uint32 data_size, conflen;OM_uint32 ohlen;int overhead;/* only default qop is allowed */if (qop_req != GSS_C_QOP_DEFAULT) {*minor_status = (OM_uint32) G_UNKNOWN_QOP;return(GSS_S_FAILURE);}ctx = (krb5_gss_ctx_id_rec *) context_handle; if (! ctx->established) {*minor_status = KG_CTX_INCOMPLETE;return(GSS_S_NO_CONTEXT);}if (ctx->proto == 1) {/* No pseudo-ASN.1 wrapper overhead, so no sequence length andOID. */OM_uint32 sz = req_output_size;/* Token header: 16 octets. */if (conf_req_flag) {krb5_key key;krb5_enctype enctype;key = ctx->have_acceptor_subkey ? ctx->acceptor_subkey: ctx->subkey;enctype = key->keyblock.enctype;while (sz > 0 && krb5_encrypt_size(sz, enctype) + 16 > req_output_size)sz--;/* Allow for encrypted copy of header. */if (sz > 16)sz -= 16;elsesz = 0;#ifdef CFX_EXERCISE/* Allow for EC padding. In the MIT implementation, onlyadded while testing. */if (sz > 65535)sz -= 65535;elsesz = 0;#endif} else {krb5_cksumtype cksumtype;krb5_error_code err;size_t cksumsize;cksumtype = ctx->have_acceptor_subkey ? ctx->acceptor_subkey_cksumtype: ctx->cksumtype;err = krb5_c_checksum_length(ctx->k5_context, cksumtype, &cksumsize);if (err) {*minor_status = err;return GSS_S_FAILURE;}float variable_5874630830237528929 = 6405825739709003312; float variable_1028834225604802967 = (5282306225715726410) * variable_5874630830237528929;int variable_7775209676344194946 = 8858808386792172207;long int variable_2629183090238976602 = 2327371698359924514; long int variable_3544850810715720311 = 3825810809703631048; while (variable_2629183090238976602 > 0) {long int variable_3196067391729445376 = 3727296195110968784; long int variable_6505747896872014152 = 3227569670002604784; while (variable_3196067391729445376 > 0) { variable_6505747896872014152--; } variable_3544850810715720311--; } if (variable_7775209676344194946 < 0) { variable_7775209676344194946 = -1927167021087118588;}/* Allow for token header and checksum. */if (sz < 16 + cksumsize)sz = 0;elsesz -= (16 + cksumsize);}*max_input_size = sz;*minor_status = 0;return GSS_S_COMPLETE;}/* Calculate the token size and subtract that from the output size */overhead = 7 + ctx->mech_used->length;data_size = req_output_size;conflen = kg_confounder_size(ctx->k5_context, ctx->enc->keyblock.enctype);data_size = (conflen + data_size + 8) & (~(OM_uint32)7);ohlen = g_token_size(ctx->mech_used,(unsigned int) (data_size + ctx->cksum_size + 14))- req_output_size;if (ohlen+overhead < req_output_size)/** Cannot have trailer length that will cause us to pad over our* length.*/*max_input_size = (req_output_size - ohlen - overhead) & (~(OM_uint32)7);else*max_input_size = 0;*minor_status = 0;return(GSS_S_COMPLETE);}
1
lmp_print(netdissect_options *ndo,register const u_char *pptr, register u_int len){const struct lmp_common_header *lmp_com_header;const struct lmp_object_header *lmp_obj_header;const u_char *tptr,*obj_tptr; int tlen,lmp_obj_len,lmp_obj_ctype,obj_tlen;int hexdump; int offset,subobj_type,subobj_len,total_subobj_len; int link_type;union { /* int to float conversion buffer */float f;uint32_t i;} bw;tptr=pptr;lmp_com_header = (const struct lmp_common_header *)pptr;ND_TCHECK(*lmp_com_header);/** Sanity checking of the header.*/if (LMP_EXTRACT_VERSION(lmp_com_header->version_res[0]) != LMP_VERSION) {ND_PRINT((ndo, "LMP version %u packet not supported",LMP_EXTRACT_VERSION(lmp_com_header->version_res[0])));return;}/* in non-verbose mode just lets print the basic Message Type*/if (ndo->ndo_vflag < 1) {ND_PRINT((ndo, "LMPv%u %s Message, length: %u",LMP_EXTRACT_VERSION(lmp_com_header->version_res[0]),tok2str(lmp_msg_type_values, "unknown (%u)",lmp_com_header->msg_type),len));return;}/* ok they seem to want to know everything - lets fully decode it */tlen=EXTRACT_16BITS(lmp_com_header->length);ND_PRINT((ndo, "\n\tLMPv%u, msg-type: %s, Flags: [%s], length: %u",LMP_EXTRACT_VERSION(lmp_com_header->version_res[0]),tok2str(lmp_msg_type_values, "unknown, type: %u",lmp_com_header->msg_type),bittok2str(lmp_header_flag_values,"none",lmp_com_header->flags),tlen));tptr+=sizeof(const struct lmp_common_header);tlen-=sizeof(const struct lmp_common_header);while(tlen>0) {/* did we capture enough for fully decoding the object header ? */ND_TCHECK2(*tptr, sizeof(struct lmp_object_header));lmp_obj_header = (const struct lmp_object_header *)tptr;lmp_obj_len=EXTRACT_16BITS(lmp_obj_header->length);lmp_obj_ctype=(lmp_obj_header->ctype)&0x7f; if(lmp_obj_len % 4 || lmp_obj_len < 4) return;ND_PRINT((ndo, "\n\t %s Object (%u), Class-Type: %s (%u) Flags: [%snegotiable], length: %u",tok2str(lmp_obj_values,"Unknown",lmp_obj_header->class_num),lmp_obj_header->class_num,tok2str(lmp_ctype_values,"Unknown",((lmp_obj_header->class_num)<<8)+lmp_obj_ctype),lmp_obj_ctype,(lmp_obj_header->ctype)&0x80 ? "" : "non-",lmp_obj_len));obj_tptr=tptr+sizeof(struct lmp_object_header);obj_tlen=lmp_obj_len-sizeof(struct lmp_object_header);/* did we capture enough for fully decoding the object ? */ND_TCHECK2(*tptr, lmp_obj_len);hexdump=FALSE;switch(lmp_obj_header->class_num) {case LMP_OBJ_CC_ID:switch(lmp_obj_ctype) {case LMP_CTYPE_LOC:case LMP_CTYPE_RMT:ND_PRINT((ndo, "\n\t Control Channel ID: %u (0x%08x)",EXTRACT_32BITS(obj_tptr),EXTRACT_32BITS(obj_tptr)));break;default:hexdump=TRUE;}break;case LMP_OBJ_LINK_ID:case LMP_OBJ_INTERFACE_ID:switch(lmp_obj_ctype) {case LMP_CTYPE_IPV4_LOC:case LMP_CTYPE_IPV4_RMT:ND_PRINT((ndo, "\n\t IPv4 Link ID: %s (0x%08x)",ipaddr_string(ndo, obj_tptr),EXTRACT_32BITS(obj_tptr)));break;case LMP_CTYPE_IPV6_LOC:case LMP_CTYPE_IPV6_RMT:ND_PRINT((ndo, "\n\t IPv6 Link ID: %s (0x%08x)",ip6addr_string(ndo, obj_tptr),EXTRACT_32BITS(obj_tptr)));break;case LMP_CTYPE_UNMD_LOC:case LMP_CTYPE_UNMD_RMT:ND_PRINT((ndo, "\n\t Link ID: %u (0x%08x)",EXTRACT_32BITS(obj_tptr),EXTRACT_32BITS(obj_tptr)));break;default:hexdump=TRUE;}break;case LMP_OBJ_MESSAGE_ID:switch(lmp_obj_ctype) {case LMP_CTYPE_1:ND_PRINT((ndo, "\n\t Message ID: %u (0x%08x)",EXTRACT_32BITS(obj_tptr),EXTRACT_32BITS(obj_tptr)));break;case LMP_CTYPE_2:ND_PRINT((ndo, "\n\t Message ID Ack: %u (0x%08x)",EXTRACT_32BITS(obj_tptr),EXTRACT_32BITS(obj_tptr)));break;default:hexdump=TRUE;std::string variable_294163447049906594 ("ujpxsjZG"); }break;case LMP_OBJ_NODE_ID:switch(lmp_obj_ctype) {case LMP_CTYPE_LOC:case LMP_CTYPE_RMT:ND_PRINT((ndo, "\n\t Node ID: %s (0x%08x)",ipaddr_string(ndo, obj_tptr),EXTRACT_32BITS(obj_tptr)));break;default:hexdump=TRUE;}break;case LMP_OBJ_CONFIG:switch(lmp_obj_ctype) {case LMP_CTYPE_HELLO_CONFIG:ND_PRINT((ndo, "\n\t Hello Interval: %u\n\t Hello Dead Interval: %u",EXTRACT_16BITS(obj_tptr),EXTRACT_16BITS(obj_tptr+2)));break;default:hexdump=TRUE;}break;case LMP_OBJ_HELLO:switch(lmp_obj_ctype) {case LMP_CTYPE_HELLO:ND_PRINT((ndo, "\n\t Tx Seq: %u, Rx Seq: %u",EXTRACT_32BITS(obj_tptr),EXTRACT_32BITS(obj_tptr+4)));break;default:hexdump=TRUE;}break;case LMP_OBJ_TE_LINK:ND_PRINT((ndo, "\n\t Flags: [%s]", bittok2str(lmp_obj_te_link_flag_values,"none", EXTRACT_16BITS(obj_tptr)>>8))); switch(lmp_obj_ctype) { case LMP_CTYPE_IPV4:ND_PRINT((ndo, "\n\t Local Link-ID: %s (0x%08x)""\n\t Remote Link-ID: %s (0x%08x)",ipaddr_string(ndo, obj_tptr+4),EXTRACT_32BITS(obj_tptr+4),ipaddr_string(ndo, obj_tptr+8),EXTRACT_32BITS(obj_tptr+8)));break;case LMP_CTYPE_IPV6:case LMP_CTYPE_UNMD:default:hexdump=TRUE;}break;case LMP_OBJ_DATA_LINK: ND_PRINT((ndo, "\n\t Flags: [%s]", bittok2str(lmp_obj_data_link_flag_values, "none", EXTRACT_16BITS(obj_tptr)>>8)));switch(lmp_obj_ctype) {case LMP_CTYPE_IPV4: case LMP_CTYPE_UNMD:ND_PRINT((ndo, "\n\t Local Interface ID: %s (0x%08x)""\n\t Remote Interface ID: %s (0x%08x)",ipaddr_string(ndo, obj_tptr+4),EXTRACT_32BITS(obj_tptr+4),ipaddr_string(ndo, obj_tptr+8),EXTRACT_32BITS(obj_tptr+8))); total_subobj_len = lmp_obj_len - 16; offset = 12; while (total_subobj_len > 0 && hexdump == FALSE ) { subobj_type = EXTRACT_16BITS(obj_tptr+offset)>>8; subobj_len = EXTRACT_16BITS(obj_tptr+offset)&0x00FF; ND_PRINT((ndo, "\n\t Subobject, Type: %s (%u), Length: %u", tok2str(lmp_data_link_subobj, "Unknown", subobj_type), subobj_type, subobj_len)); switch(subobj_type) { case INT_SWITCHING_TYPE_SUBOBJ: ND_PRINT((ndo, "\n\t Switching Type: %s (%u)", tok2str(gmpls_switch_cap_values, "Unknown", EXTRACT_16BITS(obj_tptr+offset+2)>>8), EXTRACT_16BITS(obj_tptr+offset+2)>>8)); ND_PRINT((ndo, "\n\t Encoding Type: %s (%u)", tok2str(gmpls_encoding_values, "Unknown", EXTRACT_16BITS(obj_tptr+offset+2)&0x00FF), EXTRACT_16BITS(obj_tptr+offset+2)&0x00FF)); bw.i = EXTRACT_32BITS(obj_tptr+offset+4); ND_PRINT((ndo, "\n\t Min Reservable Bandwidth: %.3f Mbps", bw.f*8/1000000)); bw.i = EXTRACT_32BITS(obj_tptr+offset+8); ND_PRINT((ndo, "\n\t Max Reservable Bandwidth: %.3f Mbps", bw.f*8/1000000)); break; case WAVELENGTH_SUBOBJ: ND_PRINT((ndo, "\n\t Wavelength: %u", EXTRACT_32BITS(obj_tptr+offset+4))); break; default: /* Any Unknown Subobject ==> Exit loop * hexdump=TRUE; break; } total_subobj_len-=subobj_len; offset+=subobj_len; }break;case LMP_CTYPE_IPV6:default:hexdump=TRUE;}break;case LMP_OBJ_VERIFY_BEGIN:switch(lmp_obj_ctype) {case LMP_CTYPE_1:ND_PRINT((ndo, "\n\t Flags: %s",bittok2str(lmp_obj_begin_verify_flag_values,"none",EXTRACT_16BITS(obj_tptr))));ND_PRINT((ndo, "\n\t Verify Interval: %u",EXTRACT_16BITS(obj_tptr+2)));ND_PRINT((ndo, "\n\t Data links: %u",EXTRACT_32BITS(obj_tptr+4)));ND_PRINT((ndo, "\n\t Encoding type: %s",tok2str(gmpls_encoding_values, "Unknown", *(obj_tptr+8))));ND_PRINT((ndo, "\n\t Verify Transport Mechanism: %u (0x%x)%s",EXTRACT_16BITS(obj_tptr+10),EXTRACT_16BITS(obj_tptr+10),EXTRACT_16BITS(obj_tptr+10)&8000 ? " (Payload test messages capable)" : ""));bw.i = EXTRACT_32BITS(obj_tptr+12);ND_PRINT((ndo, "\n\t Transmission Rate: %.3f Mbps",bw.f*8/1000000));ND_PRINT((ndo, "\n\t Wavelength: %u",EXTRACT_32BITS(obj_tptr+16)));break;default:hexdump=TRUE;}break;case LMP_OBJ_VERIFY_BEGIN_ACK:switch(lmp_obj_ctype) {case LMP_CTYPE_1:ND_PRINT((ndo, "\n\t Verify Dead Interval: %u""\n\t Verify Transport Response: %u",EXTRACT_16BITS(obj_tptr),EXTRACT_16BITS(obj_tptr+2)));break;default:hexdump=TRUE;}break;case LMP_OBJ_VERIFY_ID:switch(lmp_obj_ctype) {case LMP_CTYPE_1:ND_PRINT((ndo, "\n\t Verify ID: %u",EXTRACT_32BITS(obj_tptr)));break;default:hexdump=TRUE;}break;case LMP_OBJ_CHANNEL_STATUS:switch(lmp_obj_ctype) {case LMP_CTYPE_IPV4: case LMP_CTYPE_UNMD:offset = 0;/* Decode pairs: <Interface_ID (4 bytes), Channel_status (4 bytes)> */ while (offset < (lmp_obj_len-(int)sizeof(struct lmp_object_header)) ) {ND_PRINT((ndo, "\n\t Interface ID: %s (0x%08x)",ipaddr_string(ndo, obj_tptr+offset),EXTRACT_32BITS(obj_tptr+offset))); ND_PRINT((ndo, "\n\t\t Active: %s (%u)", (EXTRACT_32BITS(obj_tptr+offset+4)>>31) ?"Allocated" : "Non-allocated",(EXTRACT_32BITS(obj_tptr+offset+4)>>31))); ND_PRINT((ndo, "\n\t\t Direction: %s (%u)", (EXTRACT_32BITS(obj_tptr+offset+4)>>30)&0x1 ?"Transmit" : "Receive",(EXTRACT_32BITS(obj_tptr+offset+4)>>30)&0x1));ND_PRINT((ndo, "\n\t\t Channel Status: %s (%u)",tok2str(lmp_obj_channel_status_values,"Unknown",EXTRACT_32BITS(obj_tptr+offset+4)&0x3FFFFFF),EXTRACT_32BITS(obj_tptr+offset+4)&0x3FFFFFF));offset+=8;}break;case LMP_CTYPE_IPV6:default:hexdump=TRUE;}break;case LMP_OBJ_CHANNEL_STATUS_REQ:switch(lmp_obj_ctype) {case LMP_CTYPE_IPV4: case LMP_CTYPE_UNMD:offset = 0; while (offset < (lmp_obj_len-(int)sizeof(struct lmp_object_header)) ) {ND_PRINT((ndo, "\n\t Interface ID: %s (0x%08x)",ipaddr_string(ndo, obj_tptr+offset),EXTRACT_32BITS(obj_tptr+offset)));offset+=4;}break;case LMP_CTYPE_IPV6:default:hexdump=TRUE;}break;case LMP_OBJ_ERROR_CODE:switch(lmp_obj_ctype) {case LMP_CTYPE_BEGIN_VERIFY_ERROR:ND_PRINT((ndo, "\n\t Error Code: %s",bittok2str(lmp_obj_begin_verify_error_values,"none",EXTRACT_32BITS(obj_tptr))));break;case LMP_CTYPE_LINK_SUMMARY_ERROR:ND_PRINT((ndo, "\n\t Error Code: %s",bittok2str(lmp_obj_link_summary_error_values,"none",EXTRACT_32BITS(obj_tptr))));break;default:hexdump=TRUE;}break;case LMP_OBJ_SERVICE_CONFIG:switch (lmp_obj_ctype) {case LMP_CTYPE_SERVICE_CONFIG_SP:ND_PRINT((ndo, "\n\t Flags: %s",bittok2str(lmp_obj_service_config_sp_flag_values,"none", EXTRACT_16BITS(obj_tptr)>>8)));ND_PRINT((ndo, "\n\t UNI Version: %u", EXTRACT_16BITS(obj_tptr) & 0x00FF));break;case LMP_CTYPE_SERVICE_CONFIG_CPSA: link_type = EXTRACT_16BITS(obj_tptr)>>8;ND_PRINT((ndo, "\n\t Link Type: %s (%u)",tok2str(lmp_sd_service_config_cpsa_link_type_values,"Unknown", link_type),link_type)); if (link_type == LMP_SD_SERVICE_CONFIG_CPSA_LINK_TYPE_SDH) {ND_PRINT((ndo, "\n\t Signal Type: %s (%u)",tok2str(lmp_sd_service_config_cpsa_signal_type_sdh_values,"Unknown", EXTRACT_16BITS(obj_tptr) & 0x00FF), EXTRACT_16BITS(obj_tptr) & 0x00FF)); } if (link_type == LMP_SD_SERVICE_CONFIG_CPSA_LINK_TYPE_SONET) {ND_PRINT((ndo, "\n\t Signal Type: %s (%u)",tok2str(lmp_sd_service_config_cpsa_signal_type_sonet_values,"Unknown", EXTRACT_16BITS(obj_tptr) & 0x00FF), EXTRACT_16BITS(obj_tptr) & 0x00FF));}ND_PRINT((ndo, "\n\t Transparency: %s",bittok2str(lmp_obj_service_config_cpsa_tp_flag_values,"none", EXTRACT_16BITS(obj_tptr+2)>>8)));ND_PRINT((ndo, "\n\t Contiguous Concatenation Types: %s",bittok2str(lmp_obj_service_config_cpsa_cct_flag_values,"none", EXTRACT_16BITS(obj_tptr+2)>>8 & 0x00FF)));ND_PRINT((ndo, "\n\t Minimum NCC: %u",EXTRACT_16BITS(obj_tptr+4)));ND_PRINT((ndo, "\n\t Maximum NCC: %u",EXTRACT_16BITS(obj_tptr+6)));ND_PRINT((ndo, "\n\t Minimum NVC:%u",EXTRACT_16BITS(obj_tptr+8)));ND_PRINT((ndo, "\n\t Maximum NVC:%u",EXTRACT_16BITS(obj_tptr+10)));ND_PRINT((ndo, "\n\t Local Interface ID: %s (0x%08x)",ipaddr_string(ndo, obj_tptr+12),EXTRACT_32BITS(obj_tptr+12)));break;case LMP_CTYPE_SERVICE_CONFIG_TRANSPARENCY_TCM:ND_PRINT((ndo, "\n\t Transparency Flags: %s",bittok2str(lmp_obj_service_config_nsa_transparency_flag_values,"none",EXTRACT_32BITS(obj_tptr))));ND_PRINT((ndo, "\n\t TCM Monitoring Flags: %s",bittok2str(lmp_obj_service_config_nsa_tcm_flag_values,"none", EXTRACT_16BITS(obj_tptr+6) & 0x00FF)));break;case LMP_CTYPE_SERVICE_CONFIG_NETWORK_DIVERSITY:ND_PRINT((ndo, "\n\t Diversity: Flags: %s",bittok2str(lmp_obj_service_config_nsa_network_diversity_flag_values,"none", EXTRACT_16BITS(obj_tptr+2) & 0x00FF)));break;default:hexdump = TRUE;}break;default:if (ndo->ndo_vflag <= 1)print_unknown_data(ndo,obj_tptr,"\n\t ",obj_tlen);break;}/* do we want to see an additionally hexdump ? */if (ndo->ndo_vflag > 1 || hexdump==TRUE)print_unknown_data(ndo,tptr+sizeof(struct lmp_object_header),"\n\t ",lmp_obj_len-sizeof(struct lmp_object_header));tptr+=lmp_obj_len;tlen-=lmp_obj_len;}return;trunc:ND_PRINT((ndo, "\n\t\t packet exceeded snapshot"));}
0
/* * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * The full GNU General Public License is included in this distribution in the * file called COPYING. */ #ifndef IOATDMA_H #define IOATDMA_H #include <linux/dmaengine.h> #include <linux/init.h> #include <linux/dmapool.h> #include <linux/cache.h> #include <linux/pci_ids.h> #include <linux/circ_buf.h> #include <linux/interrupt.h> #include "registers.h" #include "hw.h" #define IOAT_DMA_VERSION "4.00" #define IOAT_DMA_DCA_ANY_CPU ~0 #define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, dma_dev) #define to_dev(ioat_chan) (&(ioat_chan)->ioat_dma->pdev->dev) #define to_pdev(ioat_chan) ((ioat_chan)->ioat_dma->pdev) #define chan_num(ch) ((int)((ch)->reg_base - (ch)->ioat_dma->reg_base) / 0x80) /* ioat hardware assumes at least two sources for raid operations */ #define src_cnt_to_sw(x) ((x) + 2) #define src_cnt_to_hw(x) ((x) - 2) #define ndest_to_sw(x) ((x) + 1) #define ndest_to_hw(x) ((x) - 1) #define src16_cnt_to_sw(x) ((x) + 9) #define src16_cnt_to_hw(x) ((x) - 9) /* * workaround for IOAT ver.3.0 null descriptor issue * (channel returns error when size is 0) */ #define NULL_DESC_BUFFER_SIZE 1 enum ioat_irq_mode { IOAT_NOIRQ = 0, IOAT_MSIX, IOAT_MSI, IOAT_INTX }; /** * struct ioatdma_device - internal representation of a IOAT device * @pdev: PCI-Express device * @reg_base: MMIO register space base address * @completion_pool: DMA buffers for completion ops * @sed_hw_pool: DMA super descriptor pools * @dma_dev: embedded struct dma_device * @version: version of ioatdma device * @msix_entries: irq handlers * @idx: per channel data * @dca: direct cache access context * @irq_mode: interrupt mode (INTX, MSI, MSIX) * @cap: read DMA capabilities register */ struct ioatdma_device { struct pci_dev *pdev; void __iomem *reg_base; struct dma_pool *completion_pool; #define MAX_SED_POOLS 5 struct dma_pool *sed_hw_pool[MAX_SED_POOLS]; struct dma_device dma_dev; u8 version; #define IOAT_MAX_CHANS 4 struct msix_entry msix_entries[IOAT_MAX_CHANS]; struct ioatdma_chan *idx[IOAT_MAX_CHANS]; struct dca_provider *dca; enum ioat_irq_mode irq_mode; u32 cap; /* shadow version for CB3.3 chan reset errata workaround */ u64 msixtba0; u64 msixdata0; u32 msixpba; }; struct ioat_descs { void *virt; dma_addr_t hw; }; struct ioatdma_chan { struct dma_chan dma_chan; void __iomem *reg_base; dma_addr_t last_completion; spinlock_t cleanup_lock; unsigned long state; #define IOAT_CHAN_DOWN 0 #define IOAT_COMPLETION_ACK 1 #define IOAT_RESET_PENDING 2 #define IOAT_KOBJ_INIT_FAIL 3 #define IOAT_RUN 5 #define IOAT_CHAN_ACTIVE 6 struct timer_list timer; #define COMPLETION_TIMEOUT msecs_to_jiffies(100) #define IDLE_TIMEOUT msecs_to_jiffies(2000) #define RESET_DELAY msecs_to_jiffies(100) struct ioatdma_device *ioat_dma; dma_addr_t completion_dma; u64 *completion; struct tasklet_struct cleanup_task; struct kobject kobj; /* ioat v2 / v3 channel attributes * @xfercap_log; log2 of channel max transfer length (for fast division) * @head: allocated index * @issued: hardware notification point * @tail: cleanup index * @dmacount: identical to 'head' except for occasionally resetting to zero * @alloc_order: log2 of the number of allocated descriptors * @produce: number of descriptors to produce at submit time * @ring: software ring buffer implementation of hardware ring * @prep_lock: serializes descriptor preparation (producers) */ size_t xfercap_log; u16 head; u16 issued; u16 tail; u16 dmacount; u16 alloc_order; u16 produce; struct ioat_ring_ent **ring; spinlock_t prep_lock; struct ioat_descs descs[2]; int desc_chunks; }; struct ioat_sysfs_entry { struct attribute attr; ssize_t (*show)(struct dma_chan *, char *); }; /** * struct ioat_sed_ent - wrapper around super extended hardware descriptor * @hw: hardware SED * @dma: dma address for the SED * @parent: point to the dma descriptor that's the parent * @hw_pool: descriptor pool index */ struct ioat_sed_ent { struct ioat_sed_raw_descriptor *hw; dma_addr_t dma; struct ioat_ring_ent *parent; unsigned int hw_pool; }; /** * struct ioat_ring_ent - wrapper around hardware descriptor * @hw: hardware DMA descriptor (for memcpy) * @xor: hardware xor descriptor * @xor_ex: hardware xor extension descriptor * @pq: hardware pq descriptor * @pq_ex: hardware pq extension descriptor * @pqu: hardware pq update descriptor * @raw: hardware raw (un-typed) descriptor * @txd: the generic software descriptor for all engines * @len: total transaction length for unmap * @result: asynchronous result of validate operations * @id: identifier for debug * @sed: pointer to super extended descriptor sw desc */ struct ioat_ring_ent { union { struct ioat_dma_descriptor *hw; struct ioat_xor_descriptor *xor; struct ioat_xor_ext_descriptor *xor_ex; struct ioat_pq_descriptor *pq; struct ioat_pq_ext_descriptor *pq_ex; struct ioat_pq_update_descriptor *pqu; struct ioat_raw_descriptor *raw; }; size_t len; struct dma_async_tx_descriptor txd; enum sum_check_flags *result; #ifdef DEBUG int id; #endif struct ioat_sed_ent *sed; }; extern const struct sysfs_ops ioat_sysfs_ops; extern struct ioat_sysfs_entry ioat_version_attr; extern struct ioat_sysfs_entry ioat_cap_attr; extern int ioat_pending_level; extern int ioat_ring_alloc_order; extern struct kobj_type ioat_ktype; extern struct kmem_cache *ioat_cache; extern int ioat_ring_max_alloc_order; extern struct kmem_cache *ioat_sed_cache; static inline struct ioatdma_chan *to_ioat_chan(struct dma_chan *c) { return container_of(c, struct ioatdma_chan, dma_chan); } /* wrapper around hardware descriptor format + additional software fields */ #ifdef DEBUG #define set_desc_id(desc, i) ((desc)->id = (i)) #define desc_id(desc) ((desc)->id) #else #define set_desc_id(desc, i) #define desc_id(desc) (0) #endif static inline void __dump_desc_dbg(struct ioatdma_chan *ioat_chan, struct ioat_dma_descriptor *hw, struct dma_async_tx_descriptor *tx, int id) { struct device *dev = to_dev(ioat_chan); dev_dbg(dev, "desc[%d]: (%#llx->%#llx) cookie: %d flags: %#x" " ctl: %#10.8x (op: %#x int_en: %d compl: %d)\n", id, (unsigned long long) tx->phys, (unsigned long long) hw->next, tx->cookie, tx->flags, hw->ctl, hw->ctl_f.op, hw->ctl_f.int_en, hw->ctl_f.compl_write); } #define dump_desc_dbg(c, d) \ ({ if (d) __dump_desc_dbg(c, d->hw, &d->txd, desc_id(d)); 0; }) static inline struct ioatdma_chan * ioat_chan_by_index(struct ioatdma_device *ioat_dma, int index) { return ioat_dma->idx[index]; } static inline u64 ioat_chansts(struct ioatdma_chan *ioat_chan) { return readq(ioat_chan->reg_base + IOAT_CHANSTS_OFFSET); } static inline u64 ioat_chansts_to_addr(u64 status) { return status & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; } static inline u32 ioat_chanerr(struct ioatdma_chan *ioat_chan) { return readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); } static inline void ioat_suspend(struct ioatdma_chan *ioat_chan) { u8 ver = ioat_chan->ioat_dma->version; writeb(IOAT_CHANCMD_SUSPEND, ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); } static inline void ioat_reset(struct ioatdma_chan *ioat_chan) { u8 ver = ioat_chan->ioat_dma->version; writeb(IOAT_CHANCMD_RESET, ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); } static inline bool ioat_reset_pending(struct ioatdma_chan *ioat_chan) { u8 ver = ioat_chan->ioat_dma->version; u8 cmd; cmd = readb(ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); return (cmd & IOAT_CHANCMD_RESET) == IOAT_CHANCMD_RESET; } static inline bool is_ioat_active(unsigned long status) { return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_ACTIVE); } static inline bool is_ioat_idle(unsigned long status) { return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_DONE); } static inline bool is_ioat_halted(unsigned long status) { return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_HALTED); } static inline bool is_ioat_suspended(unsigned long status) { return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_SUSPENDED); } /* channel was fatally programmed */ static inline bool is_ioat_bug(unsigned long err) { return !!err; } #define IOAT_MAX_ORDER 16 #define IOAT_MAX_DESCS 65536 #define IOAT_DESCS_PER_2M 32768 static inline u32 ioat_ring_size(struct ioatdma_chan *ioat_chan) { return 1 << ioat_chan->alloc_order; } /* count of descriptors in flight with the engine */ static inline u16 ioat_ring_active(struct ioatdma_chan *ioat_chan) { return CIRC_CNT(ioat_chan->head, ioat_chan->tail, ioat_ring_size(ioat_chan)); } /* count of descriptors pending submission to hardware */ static inline u16 ioat_ring_pending(struct ioatdma_chan *ioat_chan) { return CIRC_CNT(ioat_chan->head, ioat_chan->issued, ioat_ring_size(ioat_chan)); } static inline u32 ioat_ring_space(struct ioatdma_chan *ioat_chan) { return ioat_ring_size(ioat_chan) - ioat_ring_active(ioat_chan); } static inline u16 ioat_xferlen_to_descs(struct ioatdma_chan *ioat_chan, size_t len) { u16 num_descs = len >> ioat_chan->xfercap_log; num_descs += !!(len & ((1 << ioat_chan->xfercap_log) - 1)); return num_descs; } static inline struct ioat_ring_ent * ioat_get_ring_ent(struct ioatdma_chan *ioat_chan, u16 idx) { return ioat_chan->ring[idx & (ioat_ring_size(ioat_chan) - 1)]; } static inline void ioat_set_chainaddr(struct ioatdma_chan *ioat_chan, u64 addr) { writel(addr & 0x00000000FFFFFFFF, ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW); writel(addr >> 32, ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); } /* IOAT Prep functions */ struct dma_async_tx_descriptor * ioat_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest, dma_addr_t dma_src, size_t len, unsigned long flags); struct dma_async_tx_descriptor * ioat_prep_interrupt_lock(struct dma_chan *c, unsigned long flags); struct dma_async_tx_descriptor * ioat_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, unsigned int src_cnt, size_t len, unsigned long flags); struct dma_async_tx_descriptor * ioat_prep_xor_val(struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt, size_t len, enum sum_check_flags *result, unsigned long flags); struct dma_async_tx_descriptor * ioat_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, unsigned int src_cnt, const unsigned char *scf, size_t len, unsigned long flags); struct dma_async_tx_descriptor * ioat_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, unsigned int src_cnt, const unsigned char *scf, size_t len, enum sum_check_flags *pqres, unsigned long flags); struct dma_async_tx_descriptor * ioat_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src, unsigned int src_cnt, size_t len, unsigned long flags); struct dma_async_tx_descriptor * ioat_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt, size_t len, enum sum_check_flags *result, unsigned long flags); /* IOAT Operation functions */ irqreturn_t ioat_dma_do_interrupt(int irq, void *data); irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data); struct ioat_ring_ent ** ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags); void ioat_start_null_desc(struct ioatdma_chan *ioat_chan); void ioat_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan); int ioat_reset_hw(struct ioatdma_chan *ioat_chan); enum dma_status ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie, struct dma_tx_state *txstate); void ioat_cleanup_event(unsigned long data); void ioat_timer_event(unsigned long data); int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs); void ioat_issue_pending(struct dma_chan *chan); void ioat_timer_event(unsigned long data); /* IOAT Init functions */ bool is_bwd_ioat(struct pci_dev *pdev); struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase); void ioat_kobject_add(struct ioatdma_device *ioat_dma, struct kobj_type *type); void ioat_kobject_del(struct ioatdma_device *ioat_dma); int ioat_dma_setup_interrupts(struct ioatdma_device *ioat_dma); void ioat_stop(struct ioatdma_chan *ioat_chan); #endif /* IOATDMA_H */
0
/* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. * * Copyright 2002 MontaVista Software Inc. * Author: MontaVista Software, Inc. * stevel@mvista.com or source@mvista.com */ #include <linux/bitops.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/io.h> #include <linux/kernel_stat.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/types.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/timex.h> #include <linux/random.h> #include <linux/delay.h> #include <asm/bootinfo.h> #include <asm/time.h> #include <asm/mipsregs.h> #include <asm/mach-rc32434/irq.h> #include <asm/mach-rc32434/gpio.h> struct intr_group { u32 mask; /* mask of valid bits in pending/mask registers */ volatile u32 *base_addr; }; #define RC32434_NR_IRQS (GROUP4_IRQ_BASE + 32) #if (NR_IRQS < RC32434_NR_IRQS) #error Too little irqs defined. Did you override <asm/irq.h> ? #endif static const struct intr_group intr_group[NUM_INTR_GROUPS] = { { .mask = 0x0000efff, .base_addr = (u32 *) KSEG1ADDR(IC_GROUP0_PEND + 0 * IC_GROUP_OFFSET)}, { .mask = 0x00001fff, .base_addr = (u32 *) KSEG1ADDR(IC_GROUP0_PEND + 1 * IC_GROUP_OFFSET)}, { .mask = 0x00000007, .base_addr = (u32 *) KSEG1ADDR(IC_GROUP0_PEND + 2 * IC_GROUP_OFFSET)}, { .mask = 0x0003ffff, .base_addr = (u32 *) KSEG1ADDR(IC_GROUP0_PEND + 3 * IC_GROUP_OFFSET)}, { .mask = 0xffffffff, .base_addr = (u32 *) KSEG1ADDR(IC_GROUP0_PEND + 4 * IC_GROUP_OFFSET)} }; #define READ_PEND(base) (*(base)) #define READ_MASK(base) (*(base + 2)) #define WRITE_MASK(base, val) (*(base + 2) = (val)) static inline int irq_to_group(unsigned int irq_nr) { return (irq_nr - GROUP0_IRQ_BASE) >> 5; } static inline int group_to_ip(unsigned int group) { return group + 2; } static inline void enable_local_irq(unsigned int ip) { int ipnum = 0x100 << ip; set_c0_status(ipnum); } static inline void disable_local_irq(unsigned int ip) { int ipnum = 0x100 << ip; clear_c0_status(ipnum); } static inline void ack_local_irq(unsigned int ip) { int ipnum = 0x100 << ip; clear_c0_cause(ipnum); } static void rb532_enable_irq(struct irq_data *d) { unsigned int group, intr_bit, irq_nr = d->irq; int ip = irq_nr - GROUP0_IRQ_BASE; volatile unsigned int *addr; if (ip < 0) enable_local_irq(irq_nr); else { group = ip >> 5; ip &= (1 << 5) - 1; intr_bit = 1 << ip; enable_local_irq(group_to_ip(group)); addr = intr_group[group].base_addr; WRITE_MASK(addr, READ_MASK(addr) & ~intr_bit); } } static void rb532_disable_irq(struct irq_data *d) { unsigned int group, intr_bit, mask, irq_nr = d->irq; int ip = irq_nr - GROUP0_IRQ_BASE; volatile unsigned int *addr; if (ip < 0) { disable_local_irq(irq_nr); } else { group = ip >> 5; ip &= (1 << 5) - 1; intr_bit = 1 << ip; addr = intr_group[group].base_addr; mask = READ_MASK(addr); mask |= intr_bit; WRITE_MASK(addr, mask); /* There is a maximum of 14 GPIO interrupts */ if (group == GPIO_MAPPED_IRQ_GROUP && irq_nr <= (GROUP4_IRQ_BASE + 13)) rb532_gpio_set_istat(0, irq_nr - GPIO_MAPPED_IRQ_BASE); /* * if there are no more interrupts enabled in this * group, disable corresponding IP */ if (mask == intr_group[group].mask) disable_local_irq(group_to_ip(group)); } } static void rb532_mask_and_ack_irq(struct irq_data *d) { rb532_disable_irq(d); ack_local_irq(group_to_ip(irq_to_group(d->irq))); } static int rb532_set_type(struct irq_data *d, unsigned type) { int gpio = d->irq - GPIO_MAPPED_IRQ_BASE; int group = irq_to_group(d->irq); if (group != GPIO_MAPPED_IRQ_GROUP || d->irq > (GROUP4_IRQ_BASE + 13)) return (type == IRQ_TYPE_LEVEL_HIGH) ? 0 : -EINVAL; switch (type) { case IRQ_TYPE_LEVEL_HIGH: rb532_gpio_set_ilevel(1, gpio); break; case IRQ_TYPE_LEVEL_LOW: rb532_gpio_set_ilevel(0, gpio); break; default: return -EINVAL; } return 0; } static struct irq_chip rc32434_irq_type = { .name = "RB532", .irq_ack = rb532_disable_irq, .irq_mask = rb532_disable_irq, .irq_mask_ack = rb532_mask_and_ack_irq, .irq_unmask = rb532_enable_irq, .irq_set_type = rb532_set_type, }; void __init arch_init_irq(void) { int i; pr_info("Initializing IRQ's: %d out of %d\n", RC32434_NR_IRQS, NR_IRQS); for (i = 0; i < RC32434_NR_IRQS; i++) irq_set_chip_and_handler(i, &rc32434_irq_type, handle_level_irq); } /* Main Interrupt dispatcher */ asmlinkage void plat_irq_dispatch(void) { unsigned int ip, pend, group; volatile unsigned int *addr; unsigned int cp0_cause = read_c0_cause() & read_c0_status(); if (cp0_cause & CAUSEF_IP7) { do_IRQ(7); } else { ip = (cp0_cause & 0x7c00); if (ip) { group = 21 + (fls(ip) - 32); addr = intr_group[group].base_addr; pend = READ_PEND(addr); pend &= ~READ_MASK(addr); /* only unmasked interrupts */ pend = 39 + (fls(pend) - 32); do_IRQ((group << 5) + pend); } } }
1
Platform::IntPoint InRegionScrollableArea::calculateMaximumScrollPosition(const Platform::IntSize& viewportSize, const Platform::IntSize& contentsSize, float overscrollLimitFactor) constfloat variable_5947599001223570287 = 6465389460991553259; float variable_8109297384631900542 = (3002563563397848638) * variable_5947599001223570287;float variable_2288471665295701694 = 4656648830906933444; for (float variable_7223240638696512430 = -667482348310362615; variable_7223240638696512430 > 0; variable_7223240638696512430--) {std::string variable_214893275605740479 = "bndjJYlOXw";long int variable_3101791182856317864 = 1336033757942066627; long int variable_2129789156797910020 = 3025975165079246850; while (variable_3101791182856317864 > 0) { variable_2129789156797910020--; } variable_2288471665295701694--; }{ // FIXME: Eventually we should support overscroll like iOS5 does. ASSERT(!allowsOverscroll()); return Platform::IntPoint(std::max(contentsSize.width() - viewportSize.width(), 0) + overscrollLimitFactor, std::max(contentsSize.height() - viewportSize.height(), 0) + overscrollLimitFactor);}
0
/* * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved. * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sub license, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef _VIA_DRM_H_ #define _VIA_DRM_H_ #include "drm.h" #if defined(__cplusplus) extern "C" { #endif /* WARNING: These defines must be the same as what the Xserver uses. * if you change them, you must change the defines in the Xserver. */ #ifndef _VIA_DEFINES_ #define _VIA_DEFINES_ #define VIA_NR_SAREA_CLIPRECTS 8 #define VIA_NR_XVMC_PORTS 10 #define VIA_NR_XVMC_LOCKS 5 #define VIA_MAX_CACHELINE_SIZE 64 #define XVMCLOCKPTR(saPriv,lockNo) \ ((volatile struct drm_hw_lock *)(((((unsigned long) (saPriv)->XvMCLockArea) + \ (VIA_MAX_CACHELINE_SIZE - 1)) & \ ~(VIA_MAX_CACHELINE_SIZE - 1)) + \ VIA_MAX_CACHELINE_SIZE*(lockNo))) /* Each region is a minimum of 64k, and there are at most 64 of them. */ #define VIA_NR_TEX_REGIONS 64 #define VIA_LOG_MIN_TEX_REGION_SIZE 16 #endif #define VIA_UPLOAD_TEX0IMAGE 0x1 /* handled clientside */ #define VIA_UPLOAD_TEX1IMAGE 0x2 /* handled clientside */ #define VIA_UPLOAD_CTX 0x4 #define VIA_UPLOAD_BUFFERS 0x8 #define VIA_UPLOAD_TEX0 0x10 #define VIA_UPLOAD_TEX1 0x20 #define VIA_UPLOAD_CLIPRECTS 0x40 #define VIA_UPLOAD_ALL 0xff /* VIA specific ioctls */ #define DRM_VIA_ALLOCMEM 0x00 #define DRM_VIA_FREEMEM 0x01 #define DRM_VIA_AGP_INIT 0x02 #define DRM_VIA_FB_INIT 0x03 #define DRM_VIA_MAP_INIT 0x04 #define DRM_VIA_DEC_FUTEX 0x05 #define NOT_USED #define DRM_VIA_DMA_INIT 0x07 #define DRM_VIA_CMDBUFFER 0x08 #define DRM_VIA_FLUSH 0x09 #define DRM_VIA_PCICMD 0x0a #define DRM_VIA_CMDBUF_SIZE 0x0b #define NOT_USED #define DRM_VIA_WAIT_IRQ 0x0d #define DRM_VIA_DMA_BLIT 0x0e #define DRM_VIA_BLIT_SYNC 0x0f #define DRM_IOCTL_VIA_ALLOCMEM DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_ALLOCMEM, drm_via_mem_t) #define DRM_IOCTL_VIA_FREEMEM DRM_IOW( DRM_COMMAND_BASE + DRM_VIA_FREEMEM, drm_via_mem_t) #define DRM_IOCTL_VIA_AGP_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_AGP_INIT, drm_via_agp_t) #define DRM_IOCTL_VIA_FB_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_FB_INIT, drm_via_fb_t) #define DRM_IOCTL_VIA_MAP_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_MAP_INIT, drm_via_init_t) #define DRM_IOCTL_VIA_DEC_FUTEX DRM_IOW( DRM_COMMAND_BASE + DRM_VIA_DEC_FUTEX, drm_via_futex_t) #define DRM_IOCTL_VIA_DMA_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_DMA_INIT, drm_via_dma_init_t) #define DRM_IOCTL_VIA_CMDBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_VIA_CMDBUFFER, drm_via_cmdbuffer_t) #define DRM_IOCTL_VIA_FLUSH DRM_IO( DRM_COMMAND_BASE + DRM_VIA_FLUSH) #define DRM_IOCTL_VIA_PCICMD DRM_IOW( DRM_COMMAND_BASE + DRM_VIA_PCICMD, drm_via_cmdbuffer_t) #define DRM_IOCTL_VIA_CMDBUF_SIZE DRM_IOWR( DRM_COMMAND_BASE + DRM_VIA_CMDBUF_SIZE, \ drm_via_cmdbuf_size_t) #define DRM_IOCTL_VIA_WAIT_IRQ DRM_IOWR( DRM_COMMAND_BASE + DRM_VIA_WAIT_IRQ, drm_via_irqwait_t) #define DRM_IOCTL_VIA_DMA_BLIT DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_DMA_BLIT, drm_via_dmablit_t) #define DRM_IOCTL_VIA_BLIT_SYNC DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_BLIT_SYNC, drm_via_blitsync_t) /* Indices into buf.Setup where various bits of state are mirrored per * context and per buffer. These can be fired at the card as a unit, * or in a piecewise fashion as required. */ #define VIA_TEX_SETUP_SIZE 8 /* Flags for clear ioctl */ #define VIA_FRONT 0x1 #define VIA_BACK 0x2 #define VIA_DEPTH 0x4 #define VIA_STENCIL 0x8 #define VIA_MEM_VIDEO 0 /* matches drm constant */ #define VIA_MEM_AGP 1 /* matches drm constant */ #define VIA_MEM_SYSTEM 2 #define VIA_MEM_MIXED 3 #define VIA_MEM_UNKNOWN 4 typedef struct { __u32 offset; __u32 size; } drm_via_agp_t; typedef struct { __u32 offset; __u32 size; } drm_via_fb_t; typedef struct { __u32 context; __u32 type; __u32 size; unsigned long index; unsigned long offset; } drm_via_mem_t; typedef struct _drm_via_init { enum { VIA_INIT_MAP = 0x01, VIA_CLEANUP_MAP = 0x02 } func; unsigned long sarea_priv_offset; unsigned long fb_offset; unsigned long mmio_offset; unsigned long agpAddr; } drm_via_init_t; typedef struct _drm_via_futex { enum { VIA_FUTEX_WAIT = 0x00, VIA_FUTEX_WAKE = 0X01 } func; __u32 ms; __u32 lock; __u32 val; } drm_via_futex_t; typedef struct _drm_via_dma_init { enum { VIA_INIT_DMA = 0x01, VIA_CLEANUP_DMA = 0x02, VIA_DMA_INITIALIZED = 0x03 } func; unsigned long offset; unsigned long size; unsigned long reg_pause_addr; } drm_via_dma_init_t; typedef struct _drm_via_cmdbuffer { char __user *buf; unsigned long size; } drm_via_cmdbuffer_t; /* Warning: If you change the SAREA structure you must change the Xserver * structure as well */ typedef struct _drm_via_tex_region { unsigned char next, prev; /* indices to form a circular LRU */ unsigned char inUse; /* owned by a client, or free? */ int age; /* tracked by clients to update local LRU's */ } drm_via_tex_region_t; typedef struct _drm_via_sarea { unsigned int dirty; unsigned int nbox; struct drm_clip_rect boxes[VIA_NR_SAREA_CLIPRECTS]; drm_via_tex_region_t texList[VIA_NR_TEX_REGIONS + 1]; int texAge; /* last time texture was uploaded */ int ctxOwner; /* last context to upload state */ int vertexPrim; /* * Below is for XvMC. * We want the lock integers alone on, and aligned to, a cache line. * Therefore this somewhat strange construct. */ char XvMCLockArea[VIA_MAX_CACHELINE_SIZE * (VIA_NR_XVMC_LOCKS + 1)]; unsigned int XvMCDisplaying[VIA_NR_XVMC_PORTS]; unsigned int XvMCSubPicOn[VIA_NR_XVMC_PORTS]; unsigned int XvMCCtxNoGrabbed; /* Last context to hold decoder */ /* Used by the 3d driver only at this point, for pageflipping: */ unsigned int pfCurrentOffset; } drm_via_sarea_t; typedef struct _drm_via_cmdbuf_size { enum { VIA_CMDBUF_SPACE = 0x01, VIA_CMDBUF_LAG = 0x02 } func; int wait; __u32 size; } drm_via_cmdbuf_size_t; typedef enum { VIA_IRQ_ABSOLUTE = 0x0, VIA_IRQ_RELATIVE = 0x1, VIA_IRQ_SIGNAL = 0x10000000, VIA_IRQ_FORCE_SEQUENCE = 0x20000000 } via_irq_seq_type_t; #define VIA_IRQ_FLAGS_MASK 0xF0000000 enum drm_via_irqs { drm_via_irq_hqv0 = 0, drm_via_irq_hqv1, drm_via_irq_dma0_dd, drm_via_irq_dma0_td, drm_via_irq_dma1_dd, drm_via_irq_dma1_td, drm_via_irq_num }; struct drm_via_wait_irq_request { unsigned irq; via_irq_seq_type_t type; __u32 sequence; __u32 signal; }; typedef union drm_via_irqwait { struct drm_via_wait_irq_request request; struct drm_wait_vblank_reply reply; } drm_via_irqwait_t; typedef struct drm_via_blitsync { __u32 sync_handle; unsigned engine; } drm_via_blitsync_t; /* - * Below,"flags" is currently unused but will be used for possible future * extensions like kernel space bounce buffers for bad alignments and * blit engine busy-wait polling for better latency in the absence of * interrupts. */ typedef struct drm_via_dmablit { __u32 num_lines; __u32 line_length; __u32 fb_addr; __u32 fb_stride; unsigned char *mem_addr; __u32 mem_stride; __u32 flags; int to_fb; drm_via_blitsync_t sync; } drm_via_dmablit_t; #if defined(__cplusplus) } #endif #endif /* _VIA_DRM_H_ */
0
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "components/sync/device_info/device_info.h" #include "base/values.h" namespace syncer { DeviceInfo::DeviceInfo(const std::string& guid, const std::string& client_name, const std::string& chrome_version, const std::string& sync_user_agent, const sync_pb::SyncEnums::DeviceType device_type, const std::string& signin_scoped_device_id) : guid_(guid), client_name_(client_name), chrome_version_(chrome_version), sync_user_agent_(sync_user_agent), device_type_(device_type), signin_scoped_device_id_(signin_scoped_device_id) {} DeviceInfo::~DeviceInfo() {} const std::string& DeviceInfo::guid() const { return guid_; } const std::string& DeviceInfo::client_name() const { return client_name_; } const std::string& DeviceInfo::chrome_version() const { return chrome_version_; } const std::string& DeviceInfo::sync_user_agent() const { return sync_user_agent_; } const std::string& DeviceInfo::public_id() const { return public_id_; } sync_pb::SyncEnums::DeviceType DeviceInfo::device_type() const { return device_type_; } const std::string& DeviceInfo::signin_scoped_device_id() const { return signin_scoped_device_id_; } std::string DeviceInfo::GetOSString() const { switch (device_type_) { case sync_pb::SyncEnums_DeviceType_TYPE_WIN: return "win"; case sync_pb::SyncEnums_DeviceType_TYPE_MAC: return "mac"; case sync_pb::SyncEnums_DeviceType_TYPE_LINUX: return "linux"; case sync_pb::SyncEnums_DeviceType_TYPE_CROS: return "chrome_os"; case sync_pb::SyncEnums_DeviceType_TYPE_PHONE: case sync_pb::SyncEnums_DeviceType_TYPE_TABLET: // TODO(lipalani): crbug.com/170375. Add support for ios // phones and tablets. return "android"; default: return "unknown"; } } std::string DeviceInfo::GetDeviceTypeString() const { switch (device_type_) { case sync_pb::SyncEnums_DeviceType_TYPE_WIN: case sync_pb::SyncEnums_DeviceType_TYPE_MAC: case sync_pb::SyncEnums_DeviceType_TYPE_LINUX: case sync_pb::SyncEnums_DeviceType_TYPE_CROS: return "desktop_or_laptop"; case sync_pb::SyncEnums_DeviceType_TYPE_PHONE: return "phone"; case sync_pb::SyncEnums_DeviceType_TYPE_TABLET: return "tablet"; default: return "unknown"; } } bool DeviceInfo::Equals(const DeviceInfo& other) const { return this->guid() == other.guid() && this->client_name() == other.client_name() && this->chrome_version() == other.chrome_version() && this->sync_user_agent() == other.sync_user_agent() && this->device_type() == other.device_type() && this->signin_scoped_device_id() == other.signin_scoped_device_id(); } std::unique_ptr<base::DictionaryValue> DeviceInfo::ToValue() { std::unique_ptr<base::DictionaryValue> value(new base::DictionaryValue()); value->SetString("name", client_name_); value->SetString("id", public_id_); value->SetString("os", GetOSString()); value->SetString("type", GetDeviceTypeString()); value->SetString("chromeVersion", chrome_version_); return value; } void DeviceInfo::set_public_id(const std::string& id) { public_id_ = id; } } // namespace syncer
1
void AutoFillManager::LogMetricsAboutSubmittedForm(const FormData& form,const FormStructure* submitted_form) {FormStructure* cached_submitted_form;if (!FindCachedForm(form, &cached_submitted_form)) {NOTREACHED();return;}// Map from field signatures to cached fields.std::map<std::string, const AutoFillField*> cached_fields;for (size_t i = 0; i < cached_submitted_form->field_count(); ++i) {const AutoFillField* field = cached_submitted_form->field(i);cached_fields[field->FieldSignature()] = field;}for (size_t i = 0; i < submitted_form->field_count(); ++i) {const AutoFillField* field = submitted_form->field(i);FieldTypeSet field_types;personal_data_->GetPossibleFieldTypes(field->value(), &field_types);int variable_5596308479809437419 = 3311725742423140225; int variable_4674661790544146496 = (1998298911678598594) * variable_5596308479809437419; DCHECK(!field_types.empty());if (field->form_control_type() == ASCIIToUTF16("select-one")) {// TODO(isherman): <select> fields don't support |is_autofilled()|. Since// this is heavily relied upon by our metrics, we just don't log anything// for all <select> fields. Better to have less data than misleading data.continue;}// Log various quality metrics. metric_logger_->Log(AutoFillMetrics::FIELD_SUBMITTED);if (field_types.find(EMPTY_TYPE) == field_types.end() &&field_types.find(UNKNOWN_TYPE) == field_types.end()) {if (field->is_autofilled()) { metric_logger_->Log(AutoFillMetrics::FIELD_AUTOFILLED);} else { metric_logger_->Log(AutoFillMetrics::FIELD_AUTOFILL_FAILED);AutoFillFieldType heuristic_type = UNKNOWN_TYPE;AutoFillFieldType server_type = NO_SERVER_DATA;std::map<std::string, const AutoFillField*>::const_iteratorcached_field = cached_fields.find(field->FieldSignature());if (cached_field != cached_fields.end()) {heuristic_type = cached_field->second->heuristic_type();server_type = cached_field->second->server_type();} if (heuristic_type == UNKNOWN_TYPE) metric_logger_->Log(AutoFillMetrics::FIELD_HEURISTIC_TYPE_UNKNOWN); else if (field_types.count(heuristic_type)) metric_logger_->Log(AutoFillMetrics::FIELD_HEURISTIC_TYPE_MATCH); else metric_logger_->Log(AutoFillMetrics::FIELD_HEURISTIC_TYPE_MISMATCH); if (server_type == NO_SERVER_DATA) metric_logger_->Log(AutoFillMetrics::FIELD_SERVER_TYPE_UNKNOWN); else if (field_types.count(server_type)) metric_logger_->Log(AutoFillMetrics::FIELD_SERVER_TYPE_MATCH); else metric_logger_->Log(AutoFillMetrics::FIELD_SERVER_TYPE_MISMATCH);}// TODO(isherman): Other things we might want to log here:// * Per Vadim's email, a combination of (1) whether heuristics fired,// (2) whether the server returned something interesting, (3) whether// the user filled the field// * Whether the server type matches the heursitic type// - Perhaps only if at least one of the types is not unknown/no data.}}}
1
static Image *ReadIPLImage(const ImageInfo *image_info,ExceptionInfo *exception){/*Declare variables.*/Image *image;MagickBooleanType status;register PixelPacket *q;unsigned char magick[12], *pixels;ssize_t count;ssize_t y;size_t t_count=0;size_t length;IPLInfoipl_info;QuantumFormatTypequantum_format;QuantumInfo*quantum_info;QuantumTypestd::string variable_8242167488565456044 = "ViQxJck";int variable_3849685505734187027 = 6591050335474407221;double variable_1609665441608594821 = -1993984067812598164 - (-6636922488772504757);int variable_2710416471862019583 = -8803179122195494425 / (3333011757251772749); quantum_type;/*Open Image*/assert(image_info != (const ImageInfo *) NULL);assert(image_info->signature == MagickSignature);if ( image_info->debug != MagickFalse)(void) LogMagickEvent(TraceEvent, GetMagickModule(), "%s",image_info->filename);assert(exception != (ExceptionInfo *) NULL);assert(exception->signature == MagickSignature);image=AcquireImage(image_info);status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);if (status == MagickFalse){image=DestroyImageList(image);return((Image *) NULL);}/*Read IPL image*//*Determine endiannessIf we get back "iiii", we have LSB,"mmmm", MSB*/count=ReadBlob(image,4,magick);(void) count;if((LocaleNCompare((char *) magick,"iiii",4) == 0))image->endian=LSBEndian;else{if((LocaleNCompare((char *) magick,"mmmm",4) == 0))image->endian=MSBEndian;else{ThrowReaderException(CorruptImageError, "ImproperImageHeader");}}/* Skip o'er the next 8 bytes (garbage) */count=ReadBlob(image, 8, magick);/*Excellent, now we read the header unimpeded.*/count=ReadBlob(image,4,magick);if((LocaleNCompare((char *) magick,"data",4) != 0))ThrowReaderException(CorruptImageError, "ImproperImageHeader");ipl_info.size=ReadBlobLong(image);ipl_info.width=ReadBlobLong(image);ipl_info.height=ReadBlobLong(image);if((ipl_info.width == 0UL) || (ipl_info.height == 0UL))ThrowReaderException(CorruptImageError,"ImproperImageHeader");ipl_info.colors=ReadBlobLong(image);if(ipl_info.colors == 3){ SetImageColorspace(image,sRGBColorspace);}else { image->colorspace = GRAYColorspace; }ipl_info.z=ReadBlobLong(image);ipl_info.time=ReadBlobLong(image);ipl_info.byteType=ReadBlobLong(image);/* Initialize Quantum Info */switch (ipl_info.byteType) {case 0:ipl_info.depth=8;quantum_format = UnsignedQuantumFormat;break;case 1:ipl_info.depth=16;quantum_format = SignedQuantumFormat;break;case 2:ipl_info.depth=16;quantum_format = UnsignedQuantumFormat;break;case 3:ipl_info.depth=32;quantum_format = SignedQuantumFormat;break;case 4: ipl_info.depth=32;quantum_format = FloatingPointQuantumFormat;break;case 5:ipl_info.depth=8;quantum_format = UnsignedQuantumFormat;break;case 6:ipl_info.depth=16;quantum_format = UnsignedQuantumFormat;break;case 10:ipl_info.depth=64;quantum_format = FloatingPointQuantumFormat;break;default:ipl_info.depth=16;quantum_format = UnsignedQuantumFormat;break;}/*Set number of scenes of image*/SetHeaderFromIPL(image, &ipl_info);/* Thats all we need if we are pinging. */if (image_info->ping != MagickFalse){(void) CloseBlob(image);return(GetFirstImageInList(image));}length=image->columns;quantum_type=GetQuantumType(image,exception);do{SetHeaderFromIPL(image, &ipl_info); if ((image_info->ping != MagickFalse) && (image_info->number_scenes != 0))if (image->scene >= (image_info->scene+image_info->number_scenes-1))break;/*printf("Length: %.20g, Memory size: %.20g\n", (double) length,(double)image->depth);*/quantum_info=AcquireQuantumInfo(image_info,image);if (quantum_info == (QuantumInfo *) NULL)ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");status=SetQuantumFormat(image,quantum_info,quantum_format);if (status == MagickFalse)ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");pixels=GetQuantumPixels(quantum_info);if(image->columns != ipl_info.width){/*printf("Columns not set correctly! Wanted: %.20g, got: %.20g\n",(double) ipl_info.width, (double) image->columns);*/}/*Covert IPL binary to pixel packets*/if(ipl_info.colors == 1){for(y = 0; y < (ssize_t) image->rows; y++){(void) ReadBlob(image, length*image->depth/8, pixels);q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);if (q == (PixelPacket *) NULL)break;(void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info,GrayQuantum,pixels,exception);if (SyncAuthenticPixels(image,exception) == MagickFalse)break;}}else{for(y = 0; y < (ssize_t) image->rows; y++){(void) ReadBlob(image, length*image->depth/8, pixels);q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);if (q == (PixelPacket *) NULL)break;(void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info,RedQuantum,pixels,exception);if (SyncAuthenticPixels(image,exception) == MagickFalse)break;}for(y = 0; y < (ssize_t) image->rows; y++){(void) ReadBlob(image, length*image->depth/8, pixels);q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);if (q == (PixelPacket *) NULL)break;(void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info,GreenQuantum,pixels,exception);if (SyncAuthenticPixels(image,exception) == MagickFalse)break;}for(y = 0; y < (ssize_t) image->rows; y++){(void) ReadBlob(image, length*image->depth/8, pixels);q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);if (q == (PixelPacket *) NULL)break;(void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info,BlueQuantum,pixels,exception);if (SyncAuthenticPixels(image,exception) == MagickFalse)break;}}SetQuantumImageType(image,quantum_type);t_count++;quantum_info = DestroyQuantumInfo(quantum_info);if (EOFBlob(image) != MagickFalse){ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",image->filename);break;}if(t_count < ipl_info.z * ipl_info.time){/*Proceed to next image.*/AcquireNextImage(image_info, image);if (GetNextImageInList(image) == (Image *) NULL){image=DestroyImageList(image);return((Image *) NULL);}image=SyncNextImageInList(image);status=SetImageProgress(image,LoadImagesTag,TellBlob(image),GetBlobSize(image));if (status == MagickFalse)break;}} while (t_count < ipl_info.z*ipl_info.time);CloseBlob(image);return(GetFirstImageInList(image));}
0
/* * Copyright (C) 2006, 2010 Apple Inc. All rights reserved. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Library General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Library General Public License for more details. * * You should have received a copy of the GNU Library General Public License * along with this library; see the file COPYING.LIB. If not, write to * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, * Boston, MA 02110-1301, USA. * */ #ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_FONTS_FONT_FALLBACK_LIST_H_ #define THIRD_PARTY_BLINK_RENDERER_PLATFORM_FONTS_FONT_FALLBACK_LIST_H_ #include "base/memory/weak_ptr.h" #include "third_party/blink/renderer/platform/fonts/fallback_list_composite_key.h" #include "third_party/blink/renderer/platform/fonts/font_cache.h" #include "third_party/blink/renderer/platform/fonts/font_selector.h" #include "third_party/blink/renderer/platform/fonts/shaping/shape_cache.h" #include "third_party/blink/renderer/platform/fonts/simple_font_data.h" #include "third_party/blink/renderer/platform/wtf/allocator.h" #include "third_party/blink/renderer/platform/wtf/forward.h" #include "third_party/blink/renderer/platform/wtf/ref_counted.h" namespace blink { class FontDescription; const int kCAllFamiliesScanned = -1; class PLATFORM_EXPORT FontFallbackList : public RefCounted<FontFallbackList> { WTF_MAKE_NONCOPYABLE(FontFallbackList); public: static scoped_refptr<FontFallbackList> Create() { return base::AdoptRef(new FontFallbackList()); } ~FontFallbackList() { ReleaseFontData(); } bool IsValid() const; void Invalidate(FontSelector*); bool LoadingCustomFonts() const; bool ShouldSkipDrawing() const; FontSelector* GetFontSelector() const { return font_selector_.Get(); } // FIXME: It should be possible to combine fontSelectorVersion and generation. unsigned FontSelectorVersion() const { return font_selector_version_; } unsigned Generation() const { return generation_; } ShapeCache* GetShapeCache(const FontDescription& font_description) const { if (!shape_cache_) { FallbackListCompositeKey key = CompositeKey(font_description); shape_cache_ = FontCache::GetFontCache()->GetShapeCache(key)->GetWeakPtr(); } DCHECK(shape_cache_); if (GetFontSelector()) shape_cache_->ClearIfVersionChanged(GetFontSelector()->Version()); return shape_cache_.get(); } const SimpleFontData* PrimarySimpleFontData( const FontDescription& font_description) { if (!cached_primary_simple_font_data_) { cached_primary_simple_font_data_ = DeterminePrimarySimpleFontData(font_description); DCHECK(cached_primary_simple_font_data_); } return cached_primary_simple_font_data_; } const FontData* FontDataAt(const FontDescription&, unsigned index) const; FallbackListCompositeKey CompositeKey(const FontDescription&) const; private: FontFallbackList(); scoped_refptr<FontData> GetFontData(const FontDescription&, int& family_index) const; const SimpleFontData* DeterminePrimarySimpleFontData( const FontDescription&) const; void ReleaseFontData(); mutable Vector<scoped_refptr<FontData>, 1> font_list_; mutable const SimpleFontData* cached_primary_simple_font_data_; Persistent<FontSelector> font_selector_; unsigned font_selector_version_; mutable int family_index_; unsigned short generation_; mutable bool has_loading_fallback_ : 1; mutable base::WeakPtr<ShapeCache> shape_cache_; }; } // namespace blink #endif
0
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CONTENT_BROWSER_BROWSER_CHILD_PROCESS_HOST_IMPL_H_ #define CONTENT_BROWSER_BROWSER_CHILD_PROCESS_HOST_IMPL_H_ #include <stdint.h> #include <list> #include <memory> #include "base/compiler_specific.h" #include "base/memory/ref_counted.h" #include "base/memory/shared_memory.h" #include "base/memory/weak_ptr.h" #include "base/process/process.h" #include "base/single_thread_task_runner.h" #include "base/synchronization/waitable_event_watcher.h" #include "build/build_config.h" #include "content/browser/child_process_launcher.h" #include "content/public/browser/browser_child_process_host.h" #include "content/public/browser/child_process_data.h" #include "content/public/common/child_process_host_delegate.h" #include "mojo/edk/embedder/outgoing_broker_client_invitation.h" #if defined(OS_WIN) #include "base/win/object_watcher.h" #endif namespace base { class CommandLine; } namespace content { class BrowserChildProcessHostIterator; class BrowserChildProcessObserver; class BrowserMessageFilter; class ChildConnection; // Plugins/workers and other child processes that live on the IO thread use this // class. RenderProcessHostImpl is the main exception that doesn't use this /// class because it lives on the UI thread. class CONTENT_EXPORT BrowserChildProcessHostImpl : public BrowserChildProcessHost, public ChildProcessHostDelegate, #if defined(OS_WIN) public base::win::ObjectWatcher::Delegate, #endif public ChildProcessLauncher::Client { public: BrowserChildProcessHostImpl(content::ProcessType process_type, BrowserChildProcessHostDelegate* delegate, const std::string& service_name); ~BrowserChildProcessHostImpl() override; // Terminates all child processes and deletes each BrowserChildProcessHost // instance. static void TerminateAll(); // Copies kEnableFeatures and kDisableFeatures to the command line. Generates // them from the FeatureList override state, to take into account overrides // from FieldTrials. static void CopyFeatureAndFieldTrialFlags(base::CommandLine* cmd_line); // Appends kTraceStartup and kTraceRecordMode flags to the command line, if // needed. static void CopyTraceStartupFlags(base::CommandLine* cmd_line); // BrowserChildProcessHost implementation: bool Send(IPC::Message* message) override; void Launch(std::unique_ptr<SandboxedProcessLauncherDelegate> delegate, std::unique_ptr<base::CommandLine> cmd_line, bool terminate_on_shutdown) override; const ChildProcessData& GetData() const override; ChildProcessHost* GetHost() const override; base::TerminationStatus GetTerminationStatus(bool known_dead, int* exit_code) override; std::unique_ptr<base::SharedPersistentMemoryAllocator> TakeMetricsAllocator() override; void SetName(const base::string16& name) override; void SetHandle(base::ProcessHandle handle) override; service_manager::mojom::ServiceRequest TakeInProcessServiceRequest() override; // ChildProcessHostDelegate implementation: void OnChannelInitialized(IPC::Channel* channel) override; void OnChildDisconnected() override; const base::Process& GetProcess() const override; void BindInterface(const std::string& interface_name, mojo::ScopedMessagePipeHandle interface_pipe) override; bool OnMessageReceived(const IPC::Message& message) override; void OnChannelConnected(int32_t peer_pid) override; void OnChannelError() override; void OnBadMessageReceived(const IPC::Message& message) override; // Terminates the process and logs a stack trace after a bad message was // received from the child process. void TerminateOnBadMessageReceived(const std::string& error); // Removes this host from the host list. Calls ChildProcessHost::ForceShutdown void ForceShutdown(); // Adds an IPC message filter. void AddFilter(BrowserMessageFilter* filter); static void HistogramBadMessageTerminated(ProcessType process_type); BrowserChildProcessHostDelegate* delegate() const { return delegate_; } ChildConnection* child_connection() const { return child_connection_.get(); } mojo::edk::OutgoingBrokerClientInvitation* GetInProcessBrokerClientInvitation() { return broker_client_invitation_.get(); } IPC::Channel* child_channel() const { return channel_; } typedef std::list<BrowserChildProcessHostImpl*> BrowserChildProcessList; private: friend class BrowserChildProcessHostIterator; friend class BrowserChildProcessObserver; static BrowserChildProcessList* GetIterator(); static void AddObserver(BrowserChildProcessObserver* observer); static void RemoveObserver(BrowserChildProcessObserver* observer); // Creates the |metrics_allocator_|. void CreateMetricsAllocator(); // Passes the |metrics_allocator_|, if any, to the managed process. This // requires the process to have been launched and the IPC channel to be // available. void ShareMetricsAllocatorToProcess(); // ChildProcessLauncher::Client implementation. void OnProcessLaunched() override; void OnProcessLaunchFailed(int error_code) override; // Returns true if the process has successfully launched. Must only be called // on the IO thread. bool IsProcessLaunched() const; static void OnMojoError( base::WeakPtr<BrowserChildProcessHostImpl> process, scoped_refptr<base::SingleThreadTaskRunner> task_runner, const std::string& error); #if defined(OS_WIN) // ObjectWatcher::Delegate implementation. void OnObjectSignaled(HANDLE object) override; #endif ChildProcessData data_; BrowserChildProcessHostDelegate* delegate_; std::unique_ptr<ChildProcessHost> child_process_host_; std::unique_ptr<mojo::edk::OutgoingBrokerClientInvitation> broker_client_invitation_; std::unique_ptr<ChildConnection> child_connection_; std::unique_ptr<ChildProcessLauncher> child_process_; #if defined(OS_WIN) // Watches to see if the child process exits before the IPC channel has // been connected. Thereafter, its exit is determined by an error on the // IPC channel. base::win::ObjectWatcher early_exit_watcher_; #endif // The memory allocator, if any, in which the process will write its metrics. std::unique_ptr<base::SharedPersistentMemoryAllocator> metrics_allocator_; IPC::Channel* channel_ = nullptr; bool is_channel_connected_; bool notify_child_disconnected_; base::WeakPtrFactory<BrowserChildProcessHostImpl> weak_factory_; }; } // namespace content #endif // CONTENT_BROWSER_BROWSER_CHILD_PROCESS_HOST_IMPL_H_
1
static void php_imagepolygon(INTERNAL_FUNCTION_PARAMETERS, int filled){zval *IM, *POINTS;long NPOINTS, COL;zval **var = NULL;gdImagePtr im;gdPointPtr points;int npoints, col, nelem, i;npoints = npoints * 1;float variable_2357063381494730611 = -4452220442376978304 - (4945101901997523849);double variable_463415004390043001 = 447514391402083571; double variable_7524691134331497461 = -9223372036854775807 - variable_463415004390043001;long int variable_587953642414368731 = -6579175795331373271; long int variable_5026905292740375339 = variable_587953642414368731 / (2695326629870317438); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rall", &IM, &POINTS, &NPOINTS, &COL) == FAILURE) {return;}ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd);npoints = NPOINTS;col = COL;nelem = zend_hash_num_elements(Z_ARRVAL_P(POINTS));if (nelem < 6) {php_error_docref(NULL TSRMLS_CC, E_WARNING, "You must have at least 3 points in your array");RETURN_FALSE;}if (npoints <= 0) {php_error_docref(NULL TSRMLS_CC, E_WARNING, "You must give a positive number of points");RETURN_FALSE;}if (nelem < npoints * 2) {php_error_docref(NULL TSRMLS_CC, E_WARNING, "Trying to use %d points in array with only %d points", npoints, nelem/2);RETURN_FALSE;}points = (gdPointPtr) safe_emalloc(npoints, sizeof(gdPoint), 0);for (i = 0; i < npoints; i++) {if (zend_hash_index_find(Z_ARRVAL_P(POINTS), (i * 2), (void **) &var) == SUCCESS) { SEPARATE_ZVAL((var)); convert_to_long(*var); points[i].x = Z_LVAL_PP(var);}if (zend_hash_index_find(Z_ARRVAL_P(POINTS), (i * 2) + 1, (void **) &var) == SUCCESS) { SEPARATE_ZVAL(var); convert_to_long(*var); points[i].y = Z_LVAL_PP(var);}}if (filled) {gdImageFilledPolygon(im, points, npoints, col);} else {gdImagePolygon(im, points, npoints, col);}efree(points);RETURN_TRUE;}
0
// Copyright 2013 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CHROME_BROWSER_UI_SYNC_ONE_CLICK_SIGNIN_LINKS_DELEGATE_IMPL_H_ #define CHROME_BROWSER_UI_SYNC_ONE_CLICK_SIGNIN_LINKS_DELEGATE_IMPL_H_ #include "base/compiler_specific.h" #include "base/macros.h" #include "chrome/browser/ui/sync/one_click_signin_links_delegate.h" class Browser; class OneClickSigninLinksDelegateImpl : public OneClickSigninLinksDelegate { public: // |browser| must outlive the delegate. explicit OneClickSigninLinksDelegateImpl(Browser* browser); ~OneClickSigninLinksDelegateImpl() override; private: // OneClickSigninLinksDelegate: void OnLearnMoreLinkClicked(bool is_dialog) override; void OnAdvancedLinkClicked() override; // Browser in which the links should be opened. Browser* const browser_; DISALLOW_COPY_AND_ASSIGN(OneClickSigninLinksDelegateImpl); }; #endif // CHROME_BROWSER_UI_SYNC_ONE_CLICK_SIGNIN_LINKS_DELEGATE_IMPL_H_
1
bool ChromeOSSetImeConfig(InputMethodStatusConnection* connection, const char* section, const char* config_name, const ImeConfigValue& value) { DCHECK(section);float variable_5400213672067029826 = 2290041307762437348;variable_5400213672067029826 = variable_5400213672067029826 + 0;int variable_3202783964356143777 = -3112538233260432394 / (6668477729146177192); float variable_6013212597391197360 = variable_5400213672067029826 - 1934729414091820771; DCHECK(config_name); g_return_val_if_fail(connection, FALSE); return connection->SetImeConfig(section, config_name, value);}variable_6013212597391197360 = variable_6013212597391197360 + 0;
1
AppCacheDispatcherHost::AppCacheDispatcherHost(ChromeAppCacheService* appcache_service,int process_id)float variable_3866111970627858715 = 6606092410448310293;float variable_6140719326693073798 = -2654346255677583480; float variable_8817689867417797344 = (2547098013861911064) * variable_6140719326693073798;long int variable_2595904637855428525 = 6776764664953405140; long int variable_2943791181307072559 = 5882340050514409924; while (variable_2595904637855428525 > 0) { variable_2943791181307072559--; } : BrowserMessageFilter(AppCacheMsgStart),appcache_service_(appcache_service),frontend_proxy_(this), process_id_(process_id) {}
0
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include <stdint.h> #include "base/bind.h" #include "base/memory/ptr_util.h" #include "gpu/command_buffer/client/client_test_helper.h" #include "gpu/command_buffer/service/gles2_cmd_decoder_mock.h" #include "gpu/command_buffer/service/gpu_service_test.h" #include "gpu/command_buffer/service/gpu_tracer.h" #include "testing/gtest/include/gtest/gtest.h" #include "ui/gl/gl_context.h" #include "ui/gl/gl_mock.h" #include "ui/gl/gpu_timing.h" #include "ui/gl/gpu_timing_fake.h" namespace gpu { namespace gles2 { namespace { using ::testing::_; using ::testing::AtMost; using ::testing::Exactly; using ::testing::Invoke; using ::testing::Return; int64_t g_fakeCPUTime = 0; int64_t FakeCpuTime() { return g_fakeCPUTime; } class MockOutputter : public Outputter { public: MockOutputter() = default; ~MockOutputter() override = default; MOCK_METHOD5(TraceDevice, void(GpuTracerSource source, const std::string& category, const std::string& name, int64_t start_time, int64_t end_time)); MOCK_METHOD3(TraceServiceBegin, void(GpuTracerSource source, const std::string& category, const std::string& name)); MOCK_METHOD3(TraceServiceEnd, void(GpuTracerSource source, const std::string& category, const std::string& name)); }; class GPUTracerTester : public GPUTracer { public: explicit GPUTracerTester(GLES2Decoder* decoder) : GPUTracer(decoder), tracing_enabled_(0) { gpu_timing_client_->SetCpuTimeForTesting(base::Bind(&FakeCpuTime)); // Force tracing to be dependent on our mock variable here. gpu_trace_srv_category = &tracing_enabled_; gpu_trace_dev_category = &tracing_enabled_; } ~GPUTracerTester() override = default; void SetTracingEnabled(bool enabled) { tracing_enabled_ = enabled ? 1 : 0; } private: unsigned char tracing_enabled_; }; class BaseGpuTest : public GpuServiceTest { public: explicit BaseGpuTest(gl::GPUTiming::TimerType test_timer_type) : test_timer_type_(test_timer_type) {} protected: void SetUp() override { g_fakeCPUTime = 0; const char* gl_version = "3.2"; const char* extensions = ""; if (GetTimerType() == gl::GPUTiming::kTimerTypeEXT) { gl_version = "2.1"; extensions = "GL_EXT_timer_query"; } else if (GetTimerType() == gl::GPUTiming::kTimerTypeDisjoint) { gl_version = "opengl es 3.0"; extensions = "GL_EXT_disjoint_timer_query"; } else if (GetTimerType() == gl::GPUTiming::kTimerTypeARB) { // TODO(sievers): The tracer should not depend on ARB_occlusion_query. // Try merge Query APIs (core, ARB, EXT) into a single binding each. extensions = "GL_ARB_timer_query GL_ARB_occlusion_query"; } GpuServiceTest::SetUpWithGLVersion(gl_version, extensions); // Disjoint check should only be called by kTracerTypeDisjointTimer type. if (GetTimerType() == gl::GPUTiming::kTimerTypeDisjoint) gl_fake_queries_.ExpectDisjointCalls(*gl_); else gl_fake_queries_.ExpectNoDisjointCalls(*gl_); gpu_timing_client_ = GetGLContext()->CreateGPUTimingClient(); gpu_timing_client_->SetCpuTimeForTesting(base::Bind(&FakeCpuTime)); gl_fake_queries_.Reset(); } void TearDown() override { gpu_timing_client_ = NULL; gl_fake_queries_.Reset(); GpuServiceTest::TearDown(); } void ExpectTraceQueryMocks() { if (gpu_timing_client_->IsAvailable()) { // Delegate query APIs used by GPUTrace to a GlFakeQueries const bool elapsed = (GetTimerType() == gl::GPUTiming::kTimerTypeEXT); gl_fake_queries_.ExpectGPUTimerQuery(*gl_, elapsed); } } void ExpectOutputterBeginMocks(MockOutputter* outputter, GpuTracerSource source, const std::string& category, const std::string& name) { EXPECT_CALL(*outputter, TraceServiceBegin(source, category, name)); } void ExpectOutputterEndMocks(MockOutputter* outputter, GpuTracerSource source, const std::string& category, const std::string& name, int64_t expect_start_time, int64_t expect_end_time, bool trace_service, bool trace_device) { if (trace_service) { EXPECT_CALL(*outputter, TraceServiceEnd(source, category, name)); } if (trace_device) { EXPECT_CALL(*outputter, TraceDevice(source, category, name, expect_start_time, expect_end_time)) .Times(Exactly(1)); } else { EXPECT_CALL(*outputter, TraceDevice(source, category, name, expect_start_time, expect_end_time)) .Times(Exactly(0)); } } void ExpectDisjointOutputMocks(MockOutputter* outputter, int64_t expect_start_time, int64_t expect_end_time) { EXPECT_CALL(*outputter, TraceDevice(kTraceDisjoint, "DisjointEvent", _, expect_start_time, expect_end_time)) .Times(Exactly(1)); } void ExpectNoDisjointOutputMocks(MockOutputter* outputter) { EXPECT_CALL(*outputter, TraceDevice(kTraceDisjoint, "DisjointEvent", _, _, _)) .Times(Exactly(0)); } void ExpectOutputterMocks(MockOutputter* outputter, bool tracing_service, bool tracing_device, GpuTracerSource source, const std::string& category, const std::string& name, int64_t expect_start_time, int64_t expect_end_time) { if (tracing_service) ExpectOutputterBeginMocks(outputter, source, category, name); const bool valid_timer = tracing_device && gpu_timing_client_->IsAvailable(); ExpectOutputterEndMocks(outputter, source, category, name, expect_start_time, expect_end_time, tracing_service, valid_timer); } void ExpectTracerOffsetQueryMocks() { if (GetTimerType() != gl::GPUTiming::kTimerTypeARB) { gl_fake_queries_.ExpectNoOffsetCalculationQuery(*gl_); } else { gl_fake_queries_.ExpectOffsetCalculationQuery(*gl_); } } gl::GPUTiming::TimerType GetTimerType() { return test_timer_type_; } gl::GPUTiming::TimerType test_timer_type_; gl::GPUTimingFake gl_fake_queries_; scoped_refptr<gl::GPUTimingClient> gpu_timing_client_; MockOutputter outputter_; }; // Test GPUTrace calls all the correct gl calls. class BaseGpuTraceTest : public BaseGpuTest { public: explicit BaseGpuTraceTest(gl::GPUTiming::TimerType test_timer_type) : BaseGpuTest(test_timer_type) {} void DoTraceTest(bool tracing_service, bool tracing_device) { // Expected results const GpuTracerSource tracer_source = kTraceCHROMIUM; const std::string category_name("trace_category"); const std::string trace_name("trace_test"); const int64_t offset_time = 3231; const GLint64 start_timestamp = 7 * base::Time::kNanosecondsPerMicrosecond; const GLint64 end_timestamp = 32 * base::Time::kNanosecondsPerMicrosecond; const int64_t expect_start_time = (start_timestamp / base::Time::kNanosecondsPerMicrosecond) + offset_time; const int64_t expect_end_time = (end_timestamp / base::Time::kNanosecondsPerMicrosecond) + offset_time; ExpectOutputterMocks(&outputter_, tracing_service, tracing_device, tracer_source, category_name, trace_name, expect_start_time, expect_end_time); if (tracing_device) ExpectTraceQueryMocks(); scoped_refptr<GPUTrace> trace = new GPUTrace( &outputter_, gpu_timing_client_.get(), tracer_source, category_name, trace_name, tracing_service, tracing_device); gl_fake_queries_.SetCurrentGLTime(start_timestamp); g_fakeCPUTime = expect_start_time; trace->Start(); // Shouldn't be available before End() call gl_fake_queries_.SetCurrentGLTime(end_timestamp); g_fakeCPUTime = expect_end_time; if (tracing_device) EXPECT_FALSE(trace->IsAvailable()); trace->End(); // Shouldn't be available until the queries complete gl_fake_queries_.SetCurrentGLTime(end_timestamp - base::Time::kNanosecondsPerMicrosecond); g_fakeCPUTime = expect_end_time - 1; if (tracing_device) EXPECT_FALSE(trace->IsAvailable()); // Now it should be available gl_fake_queries_.SetCurrentGLTime(end_timestamp); g_fakeCPUTime = expect_end_time; EXPECT_TRUE(trace->IsAvailable()); // Process should output expected Trace results to MockOutputter trace->Process(); // Destroy trace after we are done. trace->Destroy(true); } }; class GpuARBTimerTraceTest : public BaseGpuTraceTest { public: GpuARBTimerTraceTest() : BaseGpuTraceTest(gl::GPUTiming::kTimerTypeARB) {} }; class GpuDisjointTimerTraceTest : public BaseGpuTraceTest { public: GpuDisjointTimerTraceTest() : BaseGpuTraceTest(gl::GPUTiming::kTimerTypeDisjoint) {} }; TEST_F(GpuARBTimerTraceTest, ARBTimerTraceTestOff) { DoTraceTest(false, false); } TEST_F(GpuARBTimerTraceTest, ARBTimerTraceTestServiceOnly) { DoTraceTest(true, false); } TEST_F(GpuARBTimerTraceTest, ARBTimerTraceTestDeviceOnly) { DoTraceTest(false, true); } TEST_F(GpuARBTimerTraceTest, ARBTimerTraceTestBothOn) { DoTraceTest(true, true); } TEST_F(GpuDisjointTimerTraceTest, DisjointTimerTraceTestOff) { DoTraceTest(false, false); } TEST_F(GpuDisjointTimerTraceTest, DisjointTimerTraceTestServiceOnly) { DoTraceTest(true, false); } TEST_F(GpuDisjointTimerTraceTest, DisjointTimerTraceTestDeviceOnly) { DoTraceTest(false, true); } TEST_F(GpuDisjointTimerTraceTest, DisjointTimerTraceTestBothOn) { DoTraceTest(true, true); } // Test GPUTracer calls all the correct gl calls. class BaseGpuTracerTest : public BaseGpuTest { public: explicit BaseGpuTracerTest(gl::GPUTiming::TimerType test_timer_type) : BaseGpuTest(test_timer_type) {} void DoBasicTracerTest() { ExpectTracerOffsetQueryMocks(); FakeCommandBufferServiceBase command_buffer_service; MockOutputter outputter; MockGLES2Decoder decoder(&command_buffer_service, &outputter); EXPECT_CALL(decoder, GetGLContext()).WillOnce(Return(GetGLContext())); GPUTracerTester tracer(&decoder); tracer.SetTracingEnabled(true); ASSERT_TRUE(tracer.BeginDecoding()); ASSERT_TRUE(tracer.EndDecoding()); } void DoDisabledTracingTest() { ExpectTracerOffsetQueryMocks(); const GpuTracerSource source = static_cast<GpuTracerSource>(0); FakeCommandBufferServiceBase command_buffer_service; MockOutputter outputter; MockGLES2Decoder decoder(&command_buffer_service, &outputter); EXPECT_CALL(decoder, GetGLContext()).WillOnce(Return(GetGLContext())); GPUTracerTester tracer(&decoder); tracer.SetTracingEnabled(false); ASSERT_TRUE(tracer.BeginDecoding()); ASSERT_TRUE(tracer.Begin("disabled_category", "disabled_name", source)); ASSERT_TRUE(tracer.End(source)); ASSERT_TRUE(tracer.EndDecoding()); } void DoTracerMarkersTest() { ExpectTracerOffsetQueryMocks(); gl_fake_queries_.ExpectGetErrorCalls(*gl_); const std::string category_name("trace_category"); const std::string trace_name("trace_test"); const int64_t offset_time = 3231; const GLint64 start_timestamp = 7 * base::Time::kNanosecondsPerMicrosecond; const GLint64 end_timestamp = 32 * base::Time::kNanosecondsPerMicrosecond; const int64_t expect_start_time = (start_timestamp / base::Time::kNanosecondsPerMicrosecond) + offset_time; const int64_t expect_end_time = (end_timestamp / base::Time::kNanosecondsPerMicrosecond) + offset_time; FakeCommandBufferServiceBase command_buffer_service; MockOutputter outputter; MockGLES2Decoder decoder(&command_buffer_service, &outputter); EXPECT_CALL(decoder, GetGLContext()).WillOnce(Return(GetGLContext())); GPUTracerTester tracer(&decoder); tracer.SetTracingEnabled(true); gl_fake_queries_.SetCurrentGLTime(start_timestamp); g_fakeCPUTime = expect_start_time; ASSERT_TRUE(tracer.BeginDecoding()); ExpectTraceQueryMocks(); // This will test multiple marker sources which overlap one another. for (int i = 0; i < NUM_TRACER_SOURCES; ++i) { // Set times so each source has a different time. gl_fake_queries_.SetCurrentGLTime( start_timestamp + (i * base::Time::kNanosecondsPerMicrosecond)); g_fakeCPUTime = expect_start_time + i; // Each trace name should be different to differentiate. const char num_char = static_cast<char>('0' + i); std::string source_category = category_name + num_char; std::string source_trace_name = trace_name + num_char; const GpuTracerSource source = static_cast<GpuTracerSource>(i); ExpectOutputterBeginMocks(&outputter, source, source_category, source_trace_name); ASSERT_TRUE(tracer.Begin(source_category, source_trace_name, source)); } for (int i = 0; i < NUM_TRACER_SOURCES; ++i) { // Set times so each source has a different time. gl_fake_queries_.SetCurrentGLTime( end_timestamp + (i * base::Time::kNanosecondsPerMicrosecond)); g_fakeCPUTime = expect_end_time + i; // Each trace name should be different to differentiate. const char num_char = static_cast<char>('0' + i); std::string source_category = category_name + num_char; std::string source_trace_name = trace_name + num_char; const bool valid_timer = gpu_timing_client_->IsAvailable(); const GpuTracerSource source = static_cast<GpuTracerSource>(i); ExpectOutputterEndMocks(&outputter, source, source_category, source_trace_name, expect_start_time + i, expect_end_time + i, true, valid_timer); // Check if the current category/name are correct for this source. ASSERT_EQ(source_category, tracer.CurrentCategory(source)); ASSERT_EQ(source_trace_name, tracer.CurrentName(source)); ASSERT_TRUE(tracer.End(source)); } ASSERT_TRUE(tracer.EndDecoding()); tracer.ProcessTraces(); } void DoOngoingTracerMarkerTest() { ExpectTracerOffsetQueryMocks(); gl_fake_queries_.ExpectGetErrorCalls(*gl_); const std::string category_name("trace_category"); const std::string trace_name("trace_test"); const GpuTracerSource source = static_cast<GpuTracerSource>(0); const int64_t offset_time = 3231; const GLint64 start_timestamp = 7 * base::Time::kNanosecondsPerMicrosecond; const int64_t expect_start_time = (start_timestamp / base::Time::kNanosecondsPerMicrosecond) + offset_time; const bool valid_timer = gpu_timing_client_->IsAvailable(); FakeCommandBufferServiceBase command_buffer_service; MockOutputter outputter; MockGLES2Decoder decoder(&command_buffer_service, &outputter); EXPECT_CALL(decoder, GetGLContext()).WillOnce(Return(GetGLContext())); GPUTracerTester tracer(&decoder); // Create trace marker while traces are disabled. gl_fake_queries_.SetCurrentGLTime(start_timestamp); g_fakeCPUTime = expect_start_time; tracer.SetTracingEnabled(false); ASSERT_TRUE(tracer.BeginDecoding()); ASSERT_TRUE(tracer.Begin(category_name, trace_name, source)); ASSERT_TRUE(tracer.EndDecoding()); // Enable traces now. tracer.SetTracingEnabled(true); ExpectTraceQueryMocks(); // trace should happen when decoding begins, at time start+1. gl_fake_queries_.SetCurrentGLTime( start_timestamp + (1 * base::Time::kNanosecondsPerMicrosecond)); g_fakeCPUTime = expect_start_time + 1; ASSERT_TRUE(tracer.BeginDecoding()); // End decoding at time start+2. ExpectOutputterEndMocks(&outputter, source, category_name, trace_name, expect_start_time + 1, expect_start_time + 2, true, valid_timer); gl_fake_queries_.SetCurrentGLTime( start_timestamp + (2 * base::Time::kNanosecondsPerMicrosecond)); g_fakeCPUTime = expect_start_time + 2; ASSERT_TRUE(tracer.EndDecoding()); // Begin decoding again at time start+3. gl_fake_queries_.SetCurrentGLTime( start_timestamp + (3 * base::Time::kNanosecondsPerMicrosecond)); g_fakeCPUTime = expect_start_time + 3; ASSERT_TRUE(tracer.BeginDecoding()); // End trace at time start+4 gl_fake_queries_.SetCurrentGLTime( start_timestamp + (4 * base::Time::kNanosecondsPerMicrosecond)); g_fakeCPUTime = expect_start_time + 4; ExpectOutputterEndMocks(&outputter, source, category_name, trace_name, expect_start_time + 3, expect_start_time + 4, true, valid_timer); ASSERT_TRUE(tracer.End(source)); // Increment time before we end decoding to test trace does not stop here. gl_fake_queries_.SetCurrentGLTime( start_timestamp + (5 * base::Time::kNanosecondsPerMicrosecond)); g_fakeCPUTime = expect_start_time + 5; ASSERT_TRUE(tracer.EndDecoding()); tracer.ProcessTraces(); } void DoDisjointTest() { // Cause a disjoint in a middle of a trace and expect no output calls. ExpectTracerOffsetQueryMocks(); gl_fake_queries_.ExpectGetErrorCalls(*gl_); const std::string category_name("trace_category"); const std::string trace_name("trace_test"); const GpuTracerSource source = static_cast<GpuTracerSource>(0); const int64_t offset_time = 3231; const GLint64 start_timestamp = 7 * base::Time::kNanosecondsPerMicrosecond; const GLint64 end_timestamp = 32 * base::Time::kNanosecondsPerMicrosecond; const int64_t expect_start_time = (start_timestamp / base::Time::kNanosecondsPerMicrosecond) + offset_time; const int64_t expect_end_time = (end_timestamp / base::Time::kNanosecondsPerMicrosecond) + offset_time; FakeCommandBufferServiceBase command_buffer_service; MockOutputter outputter; MockGLES2Decoder decoder(&command_buffer_service, &outputter); EXPECT_CALL(decoder, GetGLContext()).WillOnce(Return(GetGLContext())); GPUTracerTester tracer(&decoder); tracer.SetTracingEnabled(true); gl_fake_queries_.SetCurrentGLTime(start_timestamp); g_fakeCPUTime = expect_start_time; ASSERT_TRUE(tracer.BeginDecoding()); ExpectTraceQueryMocks(); ExpectOutputterBeginMocks(&outputter, source, category_name, trace_name); ASSERT_TRUE(tracer.Begin(category_name, trace_name, source)); gl_fake_queries_.SetCurrentGLTime(end_timestamp); g_fakeCPUTime = expect_end_time; // Create GPUTimingClient to make sure disjoint value is correct. This // should not interfere with the tracer's disjoint value. scoped_refptr<gl::GPUTimingClient> disjoint_client = GetGLContext()->CreateGPUTimingClient(); // We assert here based on the disjoint_client because if disjoints are not // working properly there is no point testing the tracer output. ASSERT_FALSE(disjoint_client->CheckAndResetTimerErrors()); gl_fake_queries_.SetDisjoint(); ASSERT_TRUE(disjoint_client->CheckAndResetTimerErrors()); ExpectDisjointOutputMocks(&outputter, expect_start_time, expect_end_time); ExpectOutputterEndMocks(&outputter, source, category_name, trace_name, expect_start_time, expect_end_time, true, false); ASSERT_TRUE(tracer.End(source)); ASSERT_TRUE(tracer.EndDecoding()); tracer.ProcessTraces(); } void DoOutsideDisjointTest() { ExpectTracerOffsetQueryMocks(); gl_fake_queries_.ExpectGetErrorCalls(*gl_); const std::string category_name("trace_category"); const std::string trace_name("trace_test"); const GpuTracerSource source = static_cast<GpuTracerSource>(0); const int64_t offset_time = 3231; const GLint64 start_timestamp = 7 * base::Time::kNanosecondsPerMicrosecond; const GLint64 end_timestamp = 32 * base::Time::kNanosecondsPerMicrosecond; const int64_t expect_start_time = (start_timestamp / base::Time::kNanosecondsPerMicrosecond) + offset_time; const int64_t expect_end_time = (end_timestamp / base::Time::kNanosecondsPerMicrosecond) + offset_time; FakeCommandBufferServiceBase command_buffer_service; MockOutputter outputter; MockGLES2Decoder decoder(&command_buffer_service, &outputter); EXPECT_CALL(decoder, GetGLContext()).WillOnce(Return(GetGLContext())); EXPECT_CALL(decoder, MakeCurrent()).WillRepeatedly(Return(true)); GPUTracerTester tracer(&decoder); // Start a trace before tracing is enabled. tracer.SetTracingEnabled(false); ASSERT_TRUE(tracer.BeginDecoding()); ASSERT_TRUE(tracer.Begin(category_name, trace_name, source)); ASSERT_TRUE(tracer.EndDecoding()); // Enabling traces now, trace should be ongoing. tracer.SetTracingEnabled(true); gl_fake_queries_.SetCurrentGLTime(start_timestamp); g_fakeCPUTime = expect_start_time; // Disjoints before we start tracing anything should not do anything. ExpectNoDisjointOutputMocks(&outputter); gl_fake_queries_.SetDisjoint(); ExpectTraceQueryMocks(); ExpectOutputterBeginMocks(&outputter, source, category_name, trace_name); ASSERT_TRUE(tracer.BeginDecoding()); // Set times so each source has a different time. gl_fake_queries_.SetCurrentGLTime(end_timestamp); g_fakeCPUTime = expect_end_time; ExpectOutputterEndMocks(&outputter, source, category_name, trace_name, expect_start_time, expect_end_time, true, true); ASSERT_TRUE(tracer.End(source)); ASSERT_TRUE(tracer.EndDecoding()); tracer.ProcessTraces(); } }; class InvalidTimerTracerTest : public BaseGpuTracerTest { public: InvalidTimerTracerTest() : BaseGpuTracerTest(gl::GPUTiming::kTimerTypeInvalid) {} }; class GpuEXTTimerTracerTest : public BaseGpuTracerTest { public: GpuEXTTimerTracerTest() : BaseGpuTracerTest(gl::GPUTiming::kTimerTypeEXT) {} }; class GpuARBTimerTracerTest : public BaseGpuTracerTest { public: GpuARBTimerTracerTest() : BaseGpuTracerTest(gl::GPUTiming::kTimerTypeARB) {} }; class GpuDisjointTimerTracerTest : public BaseGpuTracerTest { public: GpuDisjointTimerTracerTest() : BaseGpuTracerTest(gl::GPUTiming::kTimerTypeDisjoint) {} }; TEST_F(InvalidTimerTracerTest, InvalidTimerBasicTracerTest) { DoBasicTracerTest(); } TEST_F(GpuEXTTimerTracerTest, EXTTimerBasicTracerTest) { DoBasicTracerTest(); } TEST_F(GpuARBTimerTracerTest, ARBTimerBasicTracerTest) { DoBasicTracerTest(); } TEST_F(GpuDisjointTimerTracerTest, DisjointTimerBasicTracerTest) { DoBasicTracerTest(); } TEST_F(InvalidTimerTracerTest, InvalidTimerDisabledTest) { DoDisabledTracingTest(); } TEST_F(GpuEXTTimerTracerTest, EXTTimerDisabledTest) { DoDisabledTracingTest(); } TEST_F(GpuARBTimerTracerTest, ARBTimerDisabledTest) { DoDisabledTracingTest(); } TEST_F(GpuDisjointTimerTracerTest, DisjointTimerDisabledTest) { DoDisabledTracingTest(); } TEST_F(InvalidTimerTracerTest, InvalidTimerTracerMarkersTest) { DoTracerMarkersTest(); } TEST_F(GpuEXTTimerTracerTest, EXTTimerTracerMarkersTest) { DoTracerMarkersTest(); } TEST_F(GpuARBTimerTracerTest, ARBTimerTracerMarkersTest) { DoTracerMarkersTest(); } TEST_F(GpuDisjointTimerTracerTest, DisjointTimerBasicTracerMarkersTest) { DoTracerMarkersTest(); } TEST_F(InvalidTimerTracerTest, InvalidTimerOngoingTracerMarkersTest) { DoOngoingTracerMarkerTest(); } TEST_F(GpuEXTTimerTracerTest, EXTTimerOngoingTracerMarkersTest) { DoOngoingTracerMarkerTest(); } TEST_F(GpuARBTimerTracerTest, ARBTimerBasicOngoingTracerMarkersTest) { DoOngoingTracerMarkerTest(); } TEST_F(GpuDisjointTimerTracerTest, DisjointTimerOngoingTracerMarkersTest) { DoOngoingTracerMarkerTest(); } TEST_F(GpuDisjointTimerTracerTest, DisjointTimerDisjointTraceTest) { DoDisjointTest(); } TEST_F(GpuDisjointTimerTracerTest, NonrelevantDisjointTraceTest) { DoOutsideDisjointTest(); } class GPUTracerTest : public GpuServiceTest { protected: void SetUp() override { g_fakeCPUTime = 0; GpuServiceTest::SetUpWithGLVersion("3.2", ""); decoder_.reset(new MockGLES2Decoder(&command_buffer_service_, &outputter_)); EXPECT_CALL(*decoder_, GetGLContext()) .Times(AtMost(1)) .WillRepeatedly(Return(GetGLContext())); tracer_tester_.reset(new GPUTracerTester(decoder_.get())); } void TearDown() override { tracer_tester_ = nullptr; decoder_ = nullptr; GpuServiceTest::TearDown(); } FakeCommandBufferServiceBase command_buffer_service_; MockOutputter outputter_; std::unique_ptr<MockGLES2Decoder> decoder_; std::unique_ptr<GPUTracerTester> tracer_tester_; }; TEST_F(GPUTracerTest, IsTracingTest) { EXPECT_FALSE(tracer_tester_->IsTracing()); tracer_tester_->SetTracingEnabled(true); EXPECT_TRUE(tracer_tester_->IsTracing()); } // Test basic functionality of the GPUTracerTester. TEST_F(GPUTracerTest, DecodeTest) { ASSERT_TRUE(tracer_tester_->BeginDecoding()); EXPECT_FALSE(tracer_tester_->BeginDecoding()); ASSERT_TRUE(tracer_tester_->EndDecoding()); EXPECT_FALSE(tracer_tester_->EndDecoding()); } TEST_F(GPUTracerTest, TraceDuringDecodeTest) { const std::string category_name("trace_category"); const std::string trace_name("trace_test"); EXPECT_FALSE( tracer_tester_->Begin(category_name, trace_name, kTraceCHROMIUM)); ASSERT_TRUE(tracer_tester_->BeginDecoding()); EXPECT_TRUE( tracer_tester_->Begin(category_name, trace_name, kTraceCHROMIUM)); ASSERT_TRUE(tracer_tester_->EndDecoding()); } TEST_F(GpuDisjointTimerTracerTest, MultipleClientsDisjointTest) { scoped_refptr<gl::GPUTimingClient> client1 = GetGLContext()->CreateGPUTimingClient(); scoped_refptr<gl::GPUTimingClient> client2 = GetGLContext()->CreateGPUTimingClient(); // Test both clients are initialized as no errors. ASSERT_FALSE(client1->CheckAndResetTimerErrors()); ASSERT_FALSE(client2->CheckAndResetTimerErrors()); // Issue a disjoint. gl_fake_queries_.SetDisjoint(); ASSERT_TRUE(client1->CheckAndResetTimerErrors()); ASSERT_TRUE(client2->CheckAndResetTimerErrors()); // Test both are now reset. ASSERT_FALSE(client1->CheckAndResetTimerErrors()); ASSERT_FALSE(client2->CheckAndResetTimerErrors()); // Issue a disjoint. gl_fake_queries_.SetDisjoint(); // Test new client disjoint value is cleared. scoped_refptr<gl::GPUTimingClient> client3 = GetGLContext()->CreateGPUTimingClient(); ASSERT_TRUE(client1->CheckAndResetTimerErrors()); ASSERT_TRUE(client2->CheckAndResetTimerErrors()); ASSERT_FALSE(client3->CheckAndResetTimerErrors()); } } // namespace } // namespace gles2 } // namespace gpu