repo_name
stringlengths
5
85
path
stringlengths
3
252
copies
stringlengths
1
5
size
stringlengths
4
6
content
stringlengths
922
999k
license
stringclasses
15 values
tjwei/acer-stream-minimal-kernel
init/calibrate.c
266
5121
/* calibrate.c: default delay calibration * * Excised from init/main.c * Copyright (C) 1991, 1992 Linus Torvalds */ #include <linux/jiffies.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/timex.h> #include <linux/smp.h> unsigned long lpj_fine; unsigned long preset_lpj; static int __init lpj_setup(char *str) { preset_lpj = simple_strtoul(str,NULL,0); return 1; } __setup("lpj=", lpj_setup); #ifdef ARCH_HAS_READ_CURRENT_TIMER /* This routine uses the read_current_timer() routine and gets the * loops per jiffy directly, instead of guessing it using delay(). * Also, this code tries to handle non-maskable asynchronous events * (like SMIs) */ #define DELAY_CALIBRATION_TICKS ((HZ < 100) ? 1 : (HZ/100)) #define MAX_DIRECT_CALIBRATION_RETRIES 5 static unsigned long __cpuinit calibrate_delay_direct(void) { unsigned long pre_start, start, post_start; unsigned long pre_end, end, post_end; unsigned long start_jiffies; unsigned long timer_rate_min, timer_rate_max; unsigned long good_timer_sum = 0; unsigned long good_timer_count = 0; int i; if (read_current_timer(&pre_start) < 0 ) return 0; /* * A simple loop like * while ( jiffies < start_jiffies+1) * start = read_current_timer(); * will not do. As we don't really know whether jiffy switch * happened first or timer_value was read first. And some asynchronous * event can happen between these two events introducing errors in lpj. * * So, we do * 1. pre_start <- When we are sure that jiffy switch hasn't happened * 2. check jiffy switch * 3. start <- timer value before or after jiffy switch * 4. post_start <- When we are sure that jiffy switch has happened * * Note, we don't know anything about order of 2 and 3. * Now, by looking at post_start and pre_start difference, we can * check whether any asynchronous event happened or not */ for (i = 0; i < MAX_DIRECT_CALIBRATION_RETRIES; i++) { pre_start = 0; read_current_timer(&start); start_jiffies = jiffies; while (jiffies <= (start_jiffies + 1)) { pre_start = start; read_current_timer(&start); } read_current_timer(&post_start); pre_end = 0; end = post_start; while (jiffies <= (start_jiffies + 1 + DELAY_CALIBRATION_TICKS)) { pre_end = end; read_current_timer(&end); } read_current_timer(&post_end); timer_rate_max = (post_end - pre_start) / DELAY_CALIBRATION_TICKS; timer_rate_min = (pre_end - post_start) / DELAY_CALIBRATION_TICKS; /* * If the upper limit and lower limit of the timer_rate is * >= 12.5% apart, redo calibration. */ if (pre_start != 0 && pre_end != 0 && (timer_rate_max - timer_rate_min) < (timer_rate_max >> 3)) { good_timer_count++; good_timer_sum += timer_rate_max; } } if (good_timer_count) return (good_timer_sum/good_timer_count); printk(KERN_WARNING "calibrate_delay_direct() failed to get a good " "estimate for loops_per_jiffy.\nProbably due to long platform interrupts. Consider using \"lpj=\" boot option.\n"); return 0; } #else static unsigned long __cpuinit calibrate_delay_direct(void) {return 0;} #endif /* * This is the number of bits of precision for the loops_per_jiffy. Each * bit takes on average 1.5/HZ seconds. This (like the original) is a little * better than 1% * For the boot cpu we can skip the delay calibration and assign it a value * calculated based on the timer frequency. * For the rest of the CPUs we cannot assume that the timer frequency is same as * the cpu frequency, hence do the calibration for those. */ #define LPS_PREC 8 void __cpuinit calibrate_delay(void) { unsigned long ticks, loopbit; int lps_precision = LPS_PREC; if (preset_lpj) { loops_per_jiffy = preset_lpj; printk(KERN_INFO "Calibrating delay loop (skipped) preset value.. "); } else if ((smp_processor_id() == 0) && lpj_fine) { loops_per_jiffy = lpj_fine; printk(KERN_INFO "Calibrating delay loop (skipped), " "value calculated using timer frequency.. "); } else if ((loops_per_jiffy = calibrate_delay_direct()) != 0) { printk(KERN_INFO "Calibrating delay using timer specific routine.. "); } else { loops_per_jiffy = (1<<12); printk(KERN_INFO "Calibrating delay loop... "); while ((loops_per_jiffy <<= 1) != 0) { /* wait for "start of" clock tick */ ticks = jiffies; while (ticks == jiffies) /* nothing */; /* Go .. */ ticks = jiffies; __delay(loops_per_jiffy); ticks = jiffies - ticks; if (ticks) break; } /* * Do a binary approximation to get loops_per_jiffy set to * equal one clock (up to lps_precision bits) */ loops_per_jiffy >>= 1; loopbit = loops_per_jiffy; while (lps_precision-- && (loopbit >>= 1)) { loops_per_jiffy |= loopbit; ticks = jiffies; while (ticks == jiffies) /* nothing */; ticks = jiffies; __delay(loops_per_jiffy); if (jiffies != ticks) /* longer than 1 tick */ loops_per_jiffy &= ~loopbit; } } printk(KERN_CONT "%lu.%02lu BogoMIPS (lpj=%lu)\n", loops_per_jiffy/(500000/HZ), (loops_per_jiffy/(5000/HZ)) % 100, loops_per_jiffy); }
gpl-2.0
AndroidDeveloperAlliance/kernel_samsung_d2
arch/powerpc/platforms/85xx/mpc85xx_cds.c
2314
9046
/* * MPC85xx setup and early boot code plus other random bits. * * Maintained by Kumar Gala (see MAINTAINERS for contact information) * * Copyright 2005 Freescale Semiconductor Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/reboot.h> #include <linux/pci.h> #include <linux/kdev_t.h> #include <linux/major.h> #include <linux/console.h> #include <linux/delay.h> #include <linux/seq_file.h> #include <linux/initrd.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/fsl_devices.h> #include <linux/of_platform.h> #include <asm/system.h> #include <asm/pgtable.h> #include <asm/page.h> #include <asm/atomic.h> #include <asm/time.h> #include <asm/io.h> #include <asm/machdep.h> #include <asm/ipic.h> #include <asm/pci-bridge.h> #include <asm/irq.h> #include <mm/mmu_decl.h> #include <asm/prom.h> #include <asm/udbg.h> #include <asm/mpic.h> #include <asm/i8259.h> #include <sysdev/fsl_soc.h> #include <sysdev/fsl_pci.h> /* CADMUS info */ /* xxx - galak, move into device tree */ #define CADMUS_BASE (0xf8004000) #define CADMUS_SIZE (256) #define CM_VER (0) #define CM_CSR (1) #define CM_RST (2) static int cds_pci_slot = 2; static volatile u8 *cadmus; #ifdef CONFIG_PCI #define ARCADIA_HOST_BRIDGE_IDSEL 17 #define ARCADIA_2ND_BRIDGE_IDSEL 3 static int mpc85xx_exclude_device(struct pci_controller *hose, u_char bus, u_char devfn) { /* We explicitly do not go past the Tundra 320 Bridge */ if ((bus == 1) && (PCI_SLOT(devfn) == ARCADIA_2ND_BRIDGE_IDSEL)) return PCIBIOS_DEVICE_NOT_FOUND; if ((bus == 0) && (PCI_SLOT(devfn) == ARCADIA_2ND_BRIDGE_IDSEL)) return PCIBIOS_DEVICE_NOT_FOUND; else return PCIBIOS_SUCCESSFUL; } static void mpc85xx_cds_restart(char *cmd) { struct pci_dev *dev; u_char tmp; if ((dev = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, NULL))) { /* Use the VIA Super Southbridge to force a PCI reset */ pci_read_config_byte(dev, 0x47, &tmp); pci_write_config_byte(dev, 0x47, tmp | 1); /* Flush the outbound PCI write queues */ pci_read_config_byte(dev, 0x47, &tmp); /* * At this point, the harware reset should have triggered. * However, if it doesn't work for some mysterious reason, * just fall through to the default reset below. */ pci_dev_put(dev); } /* * If we can't find the VIA chip (maybe the P2P bridge is disabled) * or the VIA chip reset didn't work, just use the default reset. */ fsl_rstcr_restart(NULL); } static void __init mpc85xx_cds_pci_irq_fixup(struct pci_dev *dev) { u_char c; if (dev->vendor == PCI_VENDOR_ID_VIA) { switch (dev->device) { case PCI_DEVICE_ID_VIA_82C586_1: /* * U-Boot does not set the enable bits * for the IDE device. Force them on here. */ pci_read_config_byte(dev, 0x40, &c); c |= 0x03; /* IDE: Chip Enable Bits */ pci_write_config_byte(dev, 0x40, c); /* * Since only primary interface works, force the * IDE function to standard primary IDE interrupt * w/ 8259 offset */ dev->irq = 14; pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq); break; /* * Force legacy USB interrupt routing */ case PCI_DEVICE_ID_VIA_82C586_2: /* There are two USB controllers. * Identify them by functon number */ if (PCI_FUNC(dev->devfn) == 3) dev->irq = 11; else dev->irq = 10; pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq); default: break; } } } static void __devinit skip_fake_bridge(struct pci_dev *dev) { /* Make it an error to skip the fake bridge * in pci_setup_device() in probe.c */ dev->hdr_type = 0x7f; } DECLARE_PCI_FIXUP_EARLY(0x1957, 0x3fff, skip_fake_bridge); DECLARE_PCI_FIXUP_EARLY(0x3fff, 0x1957, skip_fake_bridge); DECLARE_PCI_FIXUP_EARLY(0xff3f, 0x5719, skip_fake_bridge); #ifdef CONFIG_PPC_I8259 static void mpc85xx_8259_cascade_handler(unsigned int irq, struct irq_desc *desc) { unsigned int cascade_irq = i8259_irq(); if (cascade_irq != NO_IRQ) /* handle an interrupt from the 8259 */ generic_handle_irq(cascade_irq); /* check for any interrupts from the shared IRQ line */ handle_fasteoi_irq(irq, desc); } static irqreturn_t mpc85xx_8259_cascade_action(int irq, void *dev_id) { return IRQ_HANDLED; } static struct irqaction mpc85xxcds_8259_irqaction = { .handler = mpc85xx_8259_cascade_action, .flags = IRQF_SHARED, .name = "8259 cascade", }; #endif /* PPC_I8259 */ #endif /* CONFIG_PCI */ static void __init mpc85xx_cds_pic_init(void) { struct mpic *mpic; struct resource r; struct device_node *np = NULL; np = of_find_node_by_type(np, "open-pic"); if (np == NULL) { printk(KERN_ERR "Could not find open-pic node\n"); return; } if (of_address_to_resource(np, 0, &r)) { printk(KERN_ERR "Failed to map mpic register space\n"); of_node_put(np); return; } mpic = mpic_alloc(np, r.start, MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN, 0, 256, " OpenPIC "); BUG_ON(mpic == NULL); /* Return the mpic node */ of_node_put(np); mpic_init(mpic); } #if defined(CONFIG_PPC_I8259) && defined(CONFIG_PCI) static int mpc85xx_cds_8259_attach(void) { int ret; struct device_node *np = NULL; struct device_node *cascade_node = NULL; int cascade_irq; /* Initialize the i8259 controller */ for_each_node_by_type(np, "interrupt-controller") if (of_device_is_compatible(np, "chrp,iic")) { cascade_node = np; break; } if (cascade_node == NULL) { printk(KERN_DEBUG "Could not find i8259 PIC\n"); return -ENODEV; } cascade_irq = irq_of_parse_and_map(cascade_node, 0); if (cascade_irq == NO_IRQ) { printk(KERN_ERR "Failed to map cascade interrupt\n"); return -ENXIO; } i8259_init(cascade_node, 0); of_node_put(cascade_node); /* * Hook the interrupt to make sure desc->action is never NULL. * This is required to ensure that the interrupt does not get * disabled when the last user of the shared IRQ line frees their * interrupt. */ if ((ret = setup_irq(cascade_irq, &mpc85xxcds_8259_irqaction))) { printk(KERN_ERR "Failed to setup cascade interrupt\n"); return ret; } /* Success. Connect our low-level cascade handler. */ irq_set_handler(cascade_irq, mpc85xx_8259_cascade_handler); return 0; } machine_device_initcall(mpc85xx_cds, mpc85xx_cds_8259_attach); #endif /* CONFIG_PPC_I8259 */ /* * Setup the architecture */ static void __init mpc85xx_cds_setup_arch(void) { #ifdef CONFIG_PCI struct device_node *np; #endif if (ppc_md.progress) ppc_md.progress("mpc85xx_cds_setup_arch()", 0); cadmus = ioremap(CADMUS_BASE, CADMUS_SIZE); cds_pci_slot = ((cadmus[CM_CSR] >> 6) & 0x3) + 1; if (ppc_md.progress) { char buf[40]; snprintf(buf, 40, "CDS Version = 0x%x in slot %d\n", cadmus[CM_VER], cds_pci_slot); ppc_md.progress(buf, 0); } #ifdef CONFIG_PCI for_each_node_by_type(np, "pci") { if (of_device_is_compatible(np, "fsl,mpc8540-pci") || of_device_is_compatible(np, "fsl,mpc8548-pcie")) { struct resource rsrc; of_address_to_resource(np, 0, &rsrc); if ((rsrc.start & 0xfffff) == 0x8000) fsl_add_bridge(np, 1); else fsl_add_bridge(np, 0); } } ppc_md.pci_irq_fixup = mpc85xx_cds_pci_irq_fixup; ppc_md.pci_exclude_device = mpc85xx_exclude_device; #endif } static void mpc85xx_cds_show_cpuinfo(struct seq_file *m) { uint pvid, svid, phid1; pvid = mfspr(SPRN_PVR); svid = mfspr(SPRN_SVR); seq_printf(m, "Vendor\t\t: Freescale Semiconductor\n"); seq_printf(m, "Machine\t\t: MPC85xx CDS (0x%x)\n", cadmus[CM_VER]); seq_printf(m, "PVR\t\t: 0x%x\n", pvid); seq_printf(m, "SVR\t\t: 0x%x\n", svid); /* Display cpu Pll setting */ phid1 = mfspr(SPRN_HID1); seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f)); } /* * Called very early, device-tree isn't unflattened */ static int __init mpc85xx_cds_probe(void) { unsigned long root = of_get_flat_dt_root(); return of_flat_dt_is_compatible(root, "MPC85xxCDS"); } static struct of_device_id __initdata of_bus_ids[] = { { .type = "soc", }, { .compatible = "soc", }, { .compatible = "simple-bus", }, { .compatible = "gianfar", }, {}, }; static int __init declare_of_platform_devices(void) { return of_platform_bus_probe(NULL, of_bus_ids, NULL); } machine_device_initcall(mpc85xx_cds, declare_of_platform_devices); define_machine(mpc85xx_cds) { .name = "MPC85xx CDS", .probe = mpc85xx_cds_probe, .setup_arch = mpc85xx_cds_setup_arch, .init_IRQ = mpc85xx_cds_pic_init, .show_cpuinfo = mpc85xx_cds_show_cpuinfo, .get_irq = mpic_get_irq, #ifdef CONFIG_PCI .restart = mpc85xx_cds_restart, .pcibios_fixup_bus = fsl_pcibios_fixup_bus, #else .restart = fsl_rstcr_restart, #endif .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, };
gpl-2.0
quanghieu/linux-DFI
arch/mips/pci/ops-loongson2.c
2314
5499
/* * Copyright (C) 1999, 2000, 2004 MIPS Technologies, Inc. * All rights reserved. * Authors: Carsten Langgaard <carstenl@mips.com> * Maciej W. Rozycki <macro@mips.com> * * Copyright (C) 2009 Lemote Inc. * Author: Wu Zhangjin <wuzhangjin@gmail.com> * * This program is free software; you can distribute it and/or modify it * under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. */ #include <linux/types.h> #include <linux/pci.h> #include <linux/kernel.h> #include <linux/export.h> #include <loongson.h> #ifdef CONFIG_CS5536 #include <cs5536/cs5536_pci.h> #include <cs5536/cs5536.h> #endif #define PCI_ACCESS_READ 0 #define PCI_ACCESS_WRITE 1 #define CFG_SPACE_REG(offset) \ (void *)CKSEG1ADDR(LOONGSON_PCICFG_BASE | (offset)) #define ID_SEL_BEGIN 11 #define MAX_DEV_NUM (31 - ID_SEL_BEGIN) static int loongson_pcibios_config_access(unsigned char access_type, struct pci_bus *bus, unsigned int devfn, int where, u32 *data) { u32 busnum = bus->number; u32 addr, type; u32 dummy; void *addrp; int device = PCI_SLOT(devfn); int function = PCI_FUNC(devfn); int reg = where & ~3; if (busnum == 0) { /* board-specific part,currently,only fuloong2f,yeeloong2f * use CS5536, fuloong2e use via686b, gdium has no * south bridge */ #ifdef CONFIG_CS5536 /* cs5536_pci_conf_read4/write4() will call _rdmsr/_wrmsr() to * access the regsters PCI_MSR_ADDR, PCI_MSR_DATA_LO, * PCI_MSR_DATA_HI, which is bigger than PCI_MSR_CTRL, so, it * will not go this branch, but the others. so, no calling dead * loop here. */ if ((PCI_IDSEL_CS5536 == device) && (reg < PCI_MSR_CTRL)) { switch (access_type) { case PCI_ACCESS_READ: *data = cs5536_pci_conf_read4(function, reg); break; case PCI_ACCESS_WRITE: cs5536_pci_conf_write4(function, reg, *data); break; } return 0; } #endif /* Type 0 configuration for onboard PCI bus */ if (device > MAX_DEV_NUM) return -1; addr = (1 << (device + ID_SEL_BEGIN)) | (function << 8) | reg; type = 0; } else { /* Type 1 configuration for offboard PCI bus */ addr = (busnum << 16) | (device << 11) | (function << 8) | reg; type = 0x10000; } /* Clear aborts */ LOONGSON_PCICMD |= LOONGSON_PCICMD_MABORT_CLR | \ LOONGSON_PCICMD_MTABORT_CLR; LOONGSON_PCIMAP_CFG = (addr >> 16) | type; /* Flush Bonito register block */ dummy = LOONGSON_PCIMAP_CFG; mmiowb(); addrp = CFG_SPACE_REG(addr & 0xffff); if (access_type == PCI_ACCESS_WRITE) writel(cpu_to_le32(*data), addrp); else *data = le32_to_cpu(readl(addrp)); /* Detect Master/Target abort */ if (LOONGSON_PCICMD & (LOONGSON_PCICMD_MABORT_CLR | LOONGSON_PCICMD_MTABORT_CLR)) { /* Error occurred */ /* Clear bits */ LOONGSON_PCICMD |= (LOONGSON_PCICMD_MABORT_CLR | LOONGSON_PCICMD_MTABORT_CLR); return -1; } return 0; } /* * We can't address 8 and 16 bit words directly. Instead we have to * read/write a 32bit word and mask/modify the data we actually want. */ static int loongson_pcibios_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { u32 data = 0; if ((size == 2) && (where & 1)) return PCIBIOS_BAD_REGISTER_NUMBER; else if ((size == 4) && (where & 3)) return PCIBIOS_BAD_REGISTER_NUMBER; if (loongson_pcibios_config_access(PCI_ACCESS_READ, bus, devfn, where, &data)) return -1; if (size == 1) *val = (data >> ((where & 3) << 3)) & 0xff; else if (size == 2) *val = (data >> ((where & 3) << 3)) & 0xffff; else *val = data; return PCIBIOS_SUCCESSFUL; } static int loongson_pcibios_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) { u32 data = 0; if ((size == 2) && (where & 1)) return PCIBIOS_BAD_REGISTER_NUMBER; else if ((size == 4) && (where & 3)) return PCIBIOS_BAD_REGISTER_NUMBER; if (size == 4) data = val; else { if (loongson_pcibios_config_access(PCI_ACCESS_READ, bus, devfn, where, &data)) return -1; if (size == 1) data = (data & ~(0xff << ((where & 3) << 3))) | (val << ((where & 3) << 3)); else if (size == 2) data = (data & ~(0xffff << ((where & 3) << 3))) | (val << ((where & 3) << 3)); } if (loongson_pcibios_config_access(PCI_ACCESS_WRITE, bus, devfn, where, &data)) return -1; return PCIBIOS_SUCCESSFUL; } struct pci_ops loongson_pci_ops = { .read = loongson_pcibios_read, .write = loongson_pcibios_write }; #ifdef CONFIG_CS5536 DEFINE_RAW_SPINLOCK(msr_lock); void _rdmsr(u32 msr, u32 *hi, u32 *lo) { struct pci_bus bus = { .number = PCI_BUS_CS5536 }; u32 devfn = PCI_DEVFN(PCI_IDSEL_CS5536, 0); unsigned long flags; raw_spin_lock_irqsave(&msr_lock, flags); loongson_pcibios_write(&bus, devfn, PCI_MSR_ADDR, 4, msr); loongson_pcibios_read(&bus, devfn, PCI_MSR_DATA_LO, 4, lo); loongson_pcibios_read(&bus, devfn, PCI_MSR_DATA_HI, 4, hi); raw_spin_unlock_irqrestore(&msr_lock, flags); } EXPORT_SYMBOL(_rdmsr); void _wrmsr(u32 msr, u32 hi, u32 lo) { struct pci_bus bus = { .number = PCI_BUS_CS5536 }; u32 devfn = PCI_DEVFN(PCI_IDSEL_CS5536, 0); unsigned long flags; raw_spin_lock_irqsave(&msr_lock, flags); loongson_pcibios_write(&bus, devfn, PCI_MSR_ADDR, 4, msr); loongson_pcibios_write(&bus, devfn, PCI_MSR_DATA_LO, 4, lo); loongson_pcibios_write(&bus, devfn, PCI_MSR_DATA_HI, 4, hi); raw_spin_unlock_irqrestore(&msr_lock, flags); } EXPORT_SYMBOL(_wrmsr); #endif
gpl-2.0
NamelessRom/android_kernel_oppo_msm8939
drivers/isdn/hisax/hfc_pci.c
2314
54563
/* $Id: hfc_pci.c,v 1.48.2.4 2004/02/11 13:21:33 keil Exp $ * * low level driver for CCD's hfc-pci based cards * * Author Werner Cornelius * based on existing driver for CCD hfc ISA cards * Copyright by Werner Cornelius <werner@isdn4linux.de> * by Karsten Keil <keil@isdn4linux.de> * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * * For changes and modifications please read * Documentation/isdn/HiSax.cert * */ #include <linux/init.h> #include "hisax.h" #include "hfc_pci.h" #include "isdnl1.h" #include <linux/pci.h> #include <linux/sched.h> #include <linux/interrupt.h> static const char *hfcpci_revision = "$Revision: 1.48.2.4 $"; /* table entry in the PCI devices list */ typedef struct { int vendor_id; int device_id; char *vendor_name; char *card_name; } PCI_ENTRY; #define NT_T1_COUNT 20 /* number of 3.125ms interrupts for G2 timeout */ #define CLKDEL_TE 0x0e /* CLKDEL in TE mode */ #define CLKDEL_NT 0x6c /* CLKDEL in NT mode */ static const PCI_ENTRY id_list[] = { {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_2BD0, "CCD/Billion/Asuscom", "2BD0"}, {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B000, "Billion", "B000"}, {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B006, "Billion", "B006"}, {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B007, "Billion", "B007"}, {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B008, "Billion", "B008"}, {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B009, "Billion", "B009"}, {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B00A, "Billion", "B00A"}, {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B00B, "Billion", "B00B"}, {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B00C, "Billion", "B00C"}, {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B100, "Seyeon", "B100"}, {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B700, "Primux II S0", "B700"}, {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B701, "Primux II S0 NT", "B701"}, {PCI_VENDOR_ID_ABOCOM, PCI_DEVICE_ID_ABOCOM_2BD1, "Abocom/Magitek", "2BD1"}, {PCI_VENDOR_ID_ASUSTEK, PCI_DEVICE_ID_ASUSTEK_0675, "Asuscom/Askey", "675"}, {PCI_VENDOR_ID_BERKOM, PCI_DEVICE_ID_BERKOM_T_CONCEPT, "German telekom", "T-Concept"}, {PCI_VENDOR_ID_BERKOM, PCI_DEVICE_ID_BERKOM_A1T, "German telekom", "A1T"}, {PCI_VENDOR_ID_ANIGMA, PCI_DEVICE_ID_ANIGMA_MC145575, "Motorola MC145575", "MC145575"}, {PCI_VENDOR_ID_ZOLTRIX, PCI_DEVICE_ID_ZOLTRIX_2BD0, "Zoltrix", "2BD0"}, {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_IOM2_E, "Digi International", "Digi DataFire Micro V IOM2 (Europe)"}, {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_E, "Digi International", "Digi DataFire Micro V (Europe)"}, {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_IOM2_A, "Digi International", "Digi DataFire Micro V IOM2 (North America)"}, {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_A, "Digi International", "Digi DataFire Micro V (North America)"}, {PCI_VENDOR_ID_SITECOM, PCI_DEVICE_ID_SITECOM_DC105V2, "Sitecom Europe", "DC-105 ISDN PCI"}, {0, 0, NULL, NULL}, }; /******************************************/ /* free hardware resources used by driver */ /******************************************/ static void release_io_hfcpci(struct IsdnCardState *cs) { printk(KERN_INFO "HiSax: release hfcpci at %p\n", cs->hw.hfcpci.pci_io); cs->hw.hfcpci.int_m2 = 0; /* interrupt output off ! */ Write_hfc(cs, HFCPCI_INT_M2, cs->hw.hfcpci.int_m2); Write_hfc(cs, HFCPCI_CIRM, HFCPCI_RESET); /* Reset On */ mdelay(10); Write_hfc(cs, HFCPCI_CIRM, 0); /* Reset Off */ mdelay(10); Write_hfc(cs, HFCPCI_INT_M2, cs->hw.hfcpci.int_m2); pci_write_config_word(cs->hw.hfcpci.dev, PCI_COMMAND, 0); /* disable memory mapped ports + busmaster */ del_timer(&cs->hw.hfcpci.timer); pci_free_consistent(cs->hw.hfcpci.dev, 0x8000, cs->hw.hfcpci.fifos, cs->hw.hfcpci.dma); cs->hw.hfcpci.fifos = NULL; iounmap((void *)cs->hw.hfcpci.pci_io); } /********************************************************************************/ /* function called to reset the HFC PCI chip. A complete software reset of chip */ /* and fifos is done. */ /********************************************************************************/ static void reset_hfcpci(struct IsdnCardState *cs) { pci_write_config_word(cs->hw.hfcpci.dev, PCI_COMMAND, PCI_ENA_MEMIO); /* enable memory mapped ports, disable busmaster */ cs->hw.hfcpci.int_m2 = 0; /* interrupt output off ! */ Write_hfc(cs, HFCPCI_INT_M2, cs->hw.hfcpci.int_m2); printk(KERN_INFO "HFC_PCI: resetting card\n"); pci_write_config_word(cs->hw.hfcpci.dev, PCI_COMMAND, PCI_ENA_MEMIO + PCI_ENA_MASTER); /* enable memory ports + busmaster */ Write_hfc(cs, HFCPCI_CIRM, HFCPCI_RESET); /* Reset On */ mdelay(10); Write_hfc(cs, HFCPCI_CIRM, 0); /* Reset Off */ mdelay(10); if (Read_hfc(cs, HFCPCI_STATUS) & 2) printk(KERN_WARNING "HFC-PCI init bit busy\n"); cs->hw.hfcpci.fifo_en = 0x30; /* only D fifos enabled */ Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en); cs->hw.hfcpci.trm = 0 + HFCPCI_BTRANS_THRESMASK; /* no echo connect , threshold */ Write_hfc(cs, HFCPCI_TRM, cs->hw.hfcpci.trm); Write_hfc(cs, HFCPCI_CLKDEL, CLKDEL_TE); /* ST-Bit delay for TE-Mode */ cs->hw.hfcpci.sctrl_e = HFCPCI_AUTO_AWAKE; Write_hfc(cs, HFCPCI_SCTRL_E, cs->hw.hfcpci.sctrl_e); /* S/T Auto awake */ cs->hw.hfcpci.bswapped = 0; /* no exchange */ cs->hw.hfcpci.nt_mode = 0; /* we are in TE mode */ cs->hw.hfcpci.ctmt = HFCPCI_TIM3_125 | HFCPCI_AUTO_TIMER; Write_hfc(cs, HFCPCI_CTMT, cs->hw.hfcpci.ctmt); cs->hw.hfcpci.int_m1 = HFCPCI_INTS_DTRANS | HFCPCI_INTS_DREC | HFCPCI_INTS_L1STATE | HFCPCI_INTS_TIMER; Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1); /* Clear already pending ints */ if (Read_hfc(cs, HFCPCI_INT_S1)); Write_hfc(cs, HFCPCI_STATES, HFCPCI_LOAD_STATE | 2); /* HFC ST 2 */ udelay(10); Write_hfc(cs, HFCPCI_STATES, 2); /* HFC ST 2 */ cs->hw.hfcpci.mst_m = HFCPCI_MASTER; /* HFC Master Mode */ Write_hfc(cs, HFCPCI_MST_MODE, cs->hw.hfcpci.mst_m); cs->hw.hfcpci.sctrl = 0x40; /* set tx_lo mode, error in datasheet ! */ Write_hfc(cs, HFCPCI_SCTRL, cs->hw.hfcpci.sctrl); cs->hw.hfcpci.sctrl_r = 0; Write_hfc(cs, HFCPCI_SCTRL_R, cs->hw.hfcpci.sctrl_r); /* Init GCI/IOM2 in master mode */ /* Slots 0 and 1 are set for B-chan 1 and 2 */ /* D- and monitor/CI channel are not enabled */ /* STIO1 is used as output for data, B1+B2 from ST->IOM+HFC */ /* STIO2 is used as data input, B1+B2 from IOM->ST */ /* ST B-channel send disabled -> continuous 1s */ /* The IOM slots are always enabled */ cs->hw.hfcpci.conn = 0x36; /* set data flow directions */ Write_hfc(cs, HFCPCI_CONNECT, cs->hw.hfcpci.conn); Write_hfc(cs, HFCPCI_B1_SSL, 0x80); /* B1-Slot 0 STIO1 out enabled */ Write_hfc(cs, HFCPCI_B2_SSL, 0x81); /* B2-Slot 1 STIO1 out enabled */ Write_hfc(cs, HFCPCI_B1_RSL, 0x80); /* B1-Slot 0 STIO2 in enabled */ Write_hfc(cs, HFCPCI_B2_RSL, 0x81); /* B2-Slot 1 STIO2 in enabled */ /* Finally enable IRQ output */ cs->hw.hfcpci.int_m2 = HFCPCI_IRQ_ENABLE; Write_hfc(cs, HFCPCI_INT_M2, cs->hw.hfcpci.int_m2); if (Read_hfc(cs, HFCPCI_INT_S1)); } /***************************************************/ /* Timer function called when kernel timer expires */ /***************************************************/ static void hfcpci_Timer(struct IsdnCardState *cs) { cs->hw.hfcpci.timer.expires = jiffies + 75; /* WD RESET */ /* WriteReg(cs, HFCD_DATA, HFCD_CTMT, cs->hw.hfcpci.ctmt | 0x80); add_timer(&cs->hw.hfcpci.timer); */ } /*********************************/ /* schedule a new D-channel task */ /*********************************/ static void sched_event_D_pci(struct IsdnCardState *cs, int event) { test_and_set_bit(event, &cs->event); schedule_work(&cs->tqueue); } /*********************************/ /* schedule a new b_channel task */ /*********************************/ static void hfcpci_sched_event(struct BCState *bcs, int event) { test_and_set_bit(event, &bcs->event); schedule_work(&bcs->tqueue); } /************************************************/ /* select a b-channel entry matching and active */ /************************************************/ static struct BCState * Sel_BCS(struct IsdnCardState *cs, int channel) { if (cs->bcs[0].mode && (cs->bcs[0].channel == channel)) return (&cs->bcs[0]); else if (cs->bcs[1].mode && (cs->bcs[1].channel == channel)) return (&cs->bcs[1]); else return (NULL); } /***************************************/ /* clear the desired B-channel rx fifo */ /***************************************/ static void hfcpci_clear_fifo_rx(struct IsdnCardState *cs, int fifo) { u_char fifo_state; bzfifo_type *bzr; if (fifo) { bzr = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.rxbz_b2; fifo_state = cs->hw.hfcpci.fifo_en & HFCPCI_FIFOEN_B2RX; } else { bzr = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.rxbz_b1; fifo_state = cs->hw.hfcpci.fifo_en & HFCPCI_FIFOEN_B1RX; } if (fifo_state) cs->hw.hfcpci.fifo_en ^= fifo_state; Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en); cs->hw.hfcpci.last_bfifo_cnt[fifo] = 0; bzr->za[MAX_B_FRAMES].z1 = B_FIFO_SIZE + B_SUB_VAL - 1; bzr->za[MAX_B_FRAMES].z2 = bzr->za[MAX_B_FRAMES].z1; bzr->f1 = MAX_B_FRAMES; bzr->f2 = bzr->f1; /* init F pointers to remain constant */ if (fifo_state) cs->hw.hfcpci.fifo_en |= fifo_state; Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en); } /***************************************/ /* clear the desired B-channel tx fifo */ /***************************************/ static void hfcpci_clear_fifo_tx(struct IsdnCardState *cs, int fifo) { u_char fifo_state; bzfifo_type *bzt; if (fifo) { bzt = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.txbz_b2; fifo_state = cs->hw.hfcpci.fifo_en & HFCPCI_FIFOEN_B2TX; } else { bzt = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.txbz_b1; fifo_state = cs->hw.hfcpci.fifo_en & HFCPCI_FIFOEN_B1TX; } if (fifo_state) cs->hw.hfcpci.fifo_en ^= fifo_state; Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en); bzt->za[MAX_B_FRAMES].z1 = B_FIFO_SIZE + B_SUB_VAL - 1; bzt->za[MAX_B_FRAMES].z2 = bzt->za[MAX_B_FRAMES].z1; bzt->f1 = MAX_B_FRAMES; bzt->f2 = bzt->f1; /* init F pointers to remain constant */ if (fifo_state) cs->hw.hfcpci.fifo_en |= fifo_state; Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en); } /*********************************************/ /* read a complete B-frame out of the buffer */ /*********************************************/ static struct sk_buff * hfcpci_empty_fifo(struct BCState *bcs, bzfifo_type *bz, u_char *bdata, int count) { u_char *ptr, *ptr1, new_f2; struct sk_buff *skb; struct IsdnCardState *cs = bcs->cs; int total, maxlen, new_z2; z_type *zp; if ((cs->debug & L1_DEB_HSCX) && !(cs->debug & L1_DEB_HSCX_FIFO)) debugl1(cs, "hfcpci_empty_fifo"); zp = &bz->za[bz->f2]; /* point to Z-Regs */ new_z2 = zp->z2 + count; /* new position in fifo */ if (new_z2 >= (B_FIFO_SIZE + B_SUB_VAL)) new_z2 -= B_FIFO_SIZE; /* buffer wrap */ new_f2 = (bz->f2 + 1) & MAX_B_FRAMES; if ((count > HSCX_BUFMAX + 3) || (count < 4) || (*(bdata + (zp->z1 - B_SUB_VAL)))) { if (cs->debug & L1_DEB_WARN) debugl1(cs, "hfcpci_empty_fifo: incoming packet invalid length %d or crc", count); #ifdef ERROR_STATISTIC bcs->err_inv++; #endif bz->za[new_f2].z2 = new_z2; bz->f2 = new_f2; /* next buffer */ skb = NULL; } else if (!(skb = dev_alloc_skb(count - 3))) printk(KERN_WARNING "HFCPCI: receive out of memory\n"); else { total = count; count -= 3; ptr = skb_put(skb, count); if (zp->z2 + count <= B_FIFO_SIZE + B_SUB_VAL) maxlen = count; /* complete transfer */ else maxlen = B_FIFO_SIZE + B_SUB_VAL - zp->z2; /* maximum */ ptr1 = bdata + (zp->z2 - B_SUB_VAL); /* start of data */ memcpy(ptr, ptr1, maxlen); /* copy data */ count -= maxlen; if (count) { /* rest remaining */ ptr += maxlen; ptr1 = bdata; /* start of buffer */ memcpy(ptr, ptr1, count); /* rest */ } bz->za[new_f2].z2 = new_z2; bz->f2 = new_f2; /* next buffer */ } return (skb); } /*******************************/ /* D-channel receive procedure */ /*******************************/ static int receive_dmsg(struct IsdnCardState *cs) { struct sk_buff *skb; int maxlen; int rcnt, total; int count = 5; u_char *ptr, *ptr1; dfifo_type *df; z_type *zp; df = &((fifo_area *) (cs->hw.hfcpci.fifos))->d_chan.d_rx; if (test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) { debugl1(cs, "rec_dmsg blocked"); return (1); } while (((df->f1 & D_FREG_MASK) != (df->f2 & D_FREG_MASK)) && count--) { zp = &df->za[df->f2 & D_FREG_MASK]; rcnt = zp->z1 - zp->z2; if (rcnt < 0) rcnt += D_FIFO_SIZE; rcnt++; if (cs->debug & L1_DEB_ISAC) debugl1(cs, "hfcpci recd f1(%d) f2(%d) z1(%x) z2(%x) cnt(%d)", df->f1, df->f2, zp->z1, zp->z2, rcnt); if ((rcnt > MAX_DFRAME_LEN + 3) || (rcnt < 4) || (df->data[zp->z1])) { if (cs->debug & L1_DEB_WARN) debugl1(cs, "empty_fifo hfcpci packet inv. len %d or crc %d", rcnt, df->data[zp->z1]); #ifdef ERROR_STATISTIC cs->err_rx++; #endif df->f2 = ((df->f2 + 1) & MAX_D_FRAMES) | (MAX_D_FRAMES + 1); /* next buffer */ df->za[df->f2 & D_FREG_MASK].z2 = (zp->z2 + rcnt) & (D_FIFO_SIZE - 1); } else if ((skb = dev_alloc_skb(rcnt - 3))) { total = rcnt; rcnt -= 3; ptr = skb_put(skb, rcnt); if (zp->z2 + rcnt <= D_FIFO_SIZE) maxlen = rcnt; /* complete transfer */ else maxlen = D_FIFO_SIZE - zp->z2; /* maximum */ ptr1 = df->data + zp->z2; /* start of data */ memcpy(ptr, ptr1, maxlen); /* copy data */ rcnt -= maxlen; if (rcnt) { /* rest remaining */ ptr += maxlen; ptr1 = df->data; /* start of buffer */ memcpy(ptr, ptr1, rcnt); /* rest */ } df->f2 = ((df->f2 + 1) & MAX_D_FRAMES) | (MAX_D_FRAMES + 1); /* next buffer */ df->za[df->f2 & D_FREG_MASK].z2 = (zp->z2 + total) & (D_FIFO_SIZE - 1); skb_queue_tail(&cs->rq, skb); sched_event_D_pci(cs, D_RCVBUFREADY); } else printk(KERN_WARNING "HFC-PCI: D receive out of memory\n"); } test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags); return (1); } /*******************************************************************************/ /* check for transparent receive data and read max one threshold size if avail */ /*******************************************************************************/ static int hfcpci_empty_fifo_trans(struct BCState *bcs, bzfifo_type *bz, u_char *bdata) { unsigned short *z1r, *z2r; int new_z2, fcnt, maxlen; struct sk_buff *skb; u_char *ptr, *ptr1; z1r = &bz->za[MAX_B_FRAMES].z1; /* pointer to z reg */ z2r = z1r + 1; if (!(fcnt = *z1r - *z2r)) return (0); /* no data avail */ if (fcnt <= 0) fcnt += B_FIFO_SIZE; /* bytes actually buffered */ if (fcnt > HFCPCI_BTRANS_THRESHOLD) fcnt = HFCPCI_BTRANS_THRESHOLD; /* limit size */ new_z2 = *z2r + fcnt; /* new position in fifo */ if (new_z2 >= (B_FIFO_SIZE + B_SUB_VAL)) new_z2 -= B_FIFO_SIZE; /* buffer wrap */ if (!(skb = dev_alloc_skb(fcnt))) printk(KERN_WARNING "HFCPCI: receive out of memory\n"); else { ptr = skb_put(skb, fcnt); if (*z2r + fcnt <= B_FIFO_SIZE + B_SUB_VAL) maxlen = fcnt; /* complete transfer */ else maxlen = B_FIFO_SIZE + B_SUB_VAL - *z2r; /* maximum */ ptr1 = bdata + (*z2r - B_SUB_VAL); /* start of data */ memcpy(ptr, ptr1, maxlen); /* copy data */ fcnt -= maxlen; if (fcnt) { /* rest remaining */ ptr += maxlen; ptr1 = bdata; /* start of buffer */ memcpy(ptr, ptr1, fcnt); /* rest */ } skb_queue_tail(&bcs->rqueue, skb); hfcpci_sched_event(bcs, B_RCVBUFREADY); } *z2r = new_z2; /* new position */ return (1); } /* hfcpci_empty_fifo_trans */ /**********************************/ /* B-channel main receive routine */ /**********************************/ static void main_rec_hfcpci(struct BCState *bcs) { struct IsdnCardState *cs = bcs->cs; int rcnt, real_fifo; int receive, count = 5; struct sk_buff *skb; bzfifo_type *bz; u_char *bdata; z_type *zp; if ((bcs->channel) && (!cs->hw.hfcpci.bswapped)) { bz = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.rxbz_b2; bdata = ((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.rxdat_b2; real_fifo = 1; } else { bz = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.rxbz_b1; bdata = ((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.rxdat_b1; real_fifo = 0; } Begin: count--; if (test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) { debugl1(cs, "rec_data %d blocked", bcs->channel); return; } if (bz->f1 != bz->f2) { if (cs->debug & L1_DEB_HSCX) debugl1(cs, "hfcpci rec %d f1(%d) f2(%d)", bcs->channel, bz->f1, bz->f2); zp = &bz->za[bz->f2]; rcnt = zp->z1 - zp->z2; if (rcnt < 0) rcnt += B_FIFO_SIZE; rcnt++; if (cs->debug & L1_DEB_HSCX) debugl1(cs, "hfcpci rec %d z1(%x) z2(%x) cnt(%d)", bcs->channel, zp->z1, zp->z2, rcnt); if ((skb = hfcpci_empty_fifo(bcs, bz, bdata, rcnt))) { skb_queue_tail(&bcs->rqueue, skb); hfcpci_sched_event(bcs, B_RCVBUFREADY); } rcnt = bz->f1 - bz->f2; if (rcnt < 0) rcnt += MAX_B_FRAMES + 1; if (cs->hw.hfcpci.last_bfifo_cnt[real_fifo] > rcnt + 1) { rcnt = 0; hfcpci_clear_fifo_rx(cs, real_fifo); } cs->hw.hfcpci.last_bfifo_cnt[real_fifo] = rcnt; if (rcnt > 1) receive = 1; else receive = 0; } else if (bcs->mode == L1_MODE_TRANS) receive = hfcpci_empty_fifo_trans(bcs, bz, bdata); else receive = 0; test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags); if (count && receive) goto Begin; } /**************************/ /* D-channel send routine */ /**************************/ static void hfcpci_fill_dfifo(struct IsdnCardState *cs) { int fcnt; int count, new_z1, maxlen; dfifo_type *df; u_char *src, *dst, new_f1; if (!cs->tx_skb) return; if (cs->tx_skb->len <= 0) return; df = &((fifo_area *) (cs->hw.hfcpci.fifos))->d_chan.d_tx; if (cs->debug & L1_DEB_ISAC) debugl1(cs, "hfcpci_fill_Dfifo f1(%d) f2(%d) z1(f1)(%x)", df->f1, df->f2, df->za[df->f1 & D_FREG_MASK].z1); fcnt = df->f1 - df->f2; /* frame count actually buffered */ if (fcnt < 0) fcnt += (MAX_D_FRAMES + 1); /* if wrap around */ if (fcnt > (MAX_D_FRAMES - 1)) { if (cs->debug & L1_DEB_ISAC) debugl1(cs, "hfcpci_fill_Dfifo more as 14 frames"); #ifdef ERROR_STATISTIC cs->err_tx++; #endif return; } /* now determine free bytes in FIFO buffer */ count = df->za[df->f2 & D_FREG_MASK].z2 - df->za[df->f1 & D_FREG_MASK].z1 - 1; if (count <= 0) count += D_FIFO_SIZE; /* count now contains available bytes */ if (cs->debug & L1_DEB_ISAC) debugl1(cs, "hfcpci_fill_Dfifo count(%u/%d)", cs->tx_skb->len, count); if (count < cs->tx_skb->len) { if (cs->debug & L1_DEB_ISAC) debugl1(cs, "hfcpci_fill_Dfifo no fifo mem"); return; } count = cs->tx_skb->len; /* get frame len */ new_z1 = (df->za[df->f1 & D_FREG_MASK].z1 + count) & (D_FIFO_SIZE - 1); new_f1 = ((df->f1 + 1) & D_FREG_MASK) | (D_FREG_MASK + 1); src = cs->tx_skb->data; /* source pointer */ dst = df->data + df->za[df->f1 & D_FREG_MASK].z1; maxlen = D_FIFO_SIZE - df->za[df->f1 & D_FREG_MASK].z1; /* end fifo */ if (maxlen > count) maxlen = count; /* limit size */ memcpy(dst, src, maxlen); /* first copy */ count -= maxlen; /* remaining bytes */ if (count) { dst = df->data; /* start of buffer */ src += maxlen; /* new position */ memcpy(dst, src, count); } df->za[new_f1 & D_FREG_MASK].z1 = new_z1; /* for next buffer */ df->za[df->f1 & D_FREG_MASK].z1 = new_z1; /* new pos actual buffer */ df->f1 = new_f1; /* next frame */ dev_kfree_skb_any(cs->tx_skb); cs->tx_skb = NULL; } /**************************/ /* B-channel send routine */ /**************************/ static void hfcpci_fill_fifo(struct BCState *bcs) { struct IsdnCardState *cs = bcs->cs; int maxlen, fcnt; int count, new_z1; bzfifo_type *bz; u_char *bdata; u_char new_f1, *src, *dst; unsigned short *z1t, *z2t; if (!bcs->tx_skb) return; if (bcs->tx_skb->len <= 0) return; if ((bcs->channel) && (!cs->hw.hfcpci.bswapped)) { bz = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.txbz_b2; bdata = ((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.txdat_b2; } else { bz = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.txbz_b1; bdata = ((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.txdat_b1; } if (bcs->mode == L1_MODE_TRANS) { z1t = &bz->za[MAX_B_FRAMES].z1; z2t = z1t + 1; if (cs->debug & L1_DEB_HSCX) debugl1(cs, "hfcpci_fill_fifo_trans %d z1(%x) z2(%x)", bcs->channel, *z1t, *z2t); fcnt = *z2t - *z1t; if (fcnt <= 0) fcnt += B_FIFO_SIZE; /* fcnt contains available bytes in fifo */ fcnt = B_FIFO_SIZE - fcnt; /* remaining bytes to send */ while ((fcnt < 2 * HFCPCI_BTRANS_THRESHOLD) && (bcs->tx_skb)) { if (bcs->tx_skb->len < B_FIFO_SIZE - fcnt) { /* data is suitable for fifo */ count = bcs->tx_skb->len; new_z1 = *z1t + count; /* new buffer Position */ if (new_z1 >= (B_FIFO_SIZE + B_SUB_VAL)) new_z1 -= B_FIFO_SIZE; /* buffer wrap */ src = bcs->tx_skb->data; /* source pointer */ dst = bdata + (*z1t - B_SUB_VAL); maxlen = (B_FIFO_SIZE + B_SUB_VAL) - *z1t; /* end of fifo */ if (maxlen > count) maxlen = count; /* limit size */ memcpy(dst, src, maxlen); /* first copy */ count -= maxlen; /* remaining bytes */ if (count) { dst = bdata; /* start of buffer */ src += maxlen; /* new position */ memcpy(dst, src, count); } bcs->tx_cnt -= bcs->tx_skb->len; fcnt += bcs->tx_skb->len; *z1t = new_z1; /* now send data */ } else if (cs->debug & L1_DEB_HSCX) debugl1(cs, "hfcpci_fill_fifo_trans %d frame length %d discarded", bcs->channel, bcs->tx_skb->len); if (test_bit(FLG_LLI_L1WAKEUP, &bcs->st->lli.flag) && (PACKET_NOACK != bcs->tx_skb->pkt_type)) { u_long flags; spin_lock_irqsave(&bcs->aclock, flags); bcs->ackcnt += bcs->tx_skb->len; spin_unlock_irqrestore(&bcs->aclock, flags); schedule_event(bcs, B_ACKPENDING); } dev_kfree_skb_any(bcs->tx_skb); bcs->tx_skb = skb_dequeue(&bcs->squeue); /* fetch next data */ } test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag); return; } if (cs->debug & L1_DEB_HSCX) debugl1(cs, "hfcpci_fill_fifo_hdlc %d f1(%d) f2(%d) z1(f1)(%x)", bcs->channel, bz->f1, bz->f2, bz->za[bz->f1].z1); fcnt = bz->f1 - bz->f2; /* frame count actually buffered */ if (fcnt < 0) fcnt += (MAX_B_FRAMES + 1); /* if wrap around */ if (fcnt > (MAX_B_FRAMES - 1)) { if (cs->debug & L1_DEB_HSCX) debugl1(cs, "hfcpci_fill_Bfifo more as 14 frames"); return; } /* now determine free bytes in FIFO buffer */ count = bz->za[bz->f2].z2 - bz->za[bz->f1].z1 - 1; if (count <= 0) count += B_FIFO_SIZE; /* count now contains available bytes */ if (cs->debug & L1_DEB_HSCX) debugl1(cs, "hfcpci_fill_fifo %d count(%u/%d),%lx", bcs->channel, bcs->tx_skb->len, count, current->state); if (count < bcs->tx_skb->len) { if (cs->debug & L1_DEB_HSCX) debugl1(cs, "hfcpci_fill_fifo no fifo mem"); return; } count = bcs->tx_skb->len; /* get frame len */ new_z1 = bz->za[bz->f1].z1 + count; /* new buffer Position */ if (new_z1 >= (B_FIFO_SIZE + B_SUB_VAL)) new_z1 -= B_FIFO_SIZE; /* buffer wrap */ new_f1 = ((bz->f1 + 1) & MAX_B_FRAMES); src = bcs->tx_skb->data; /* source pointer */ dst = bdata + (bz->za[bz->f1].z1 - B_SUB_VAL); maxlen = (B_FIFO_SIZE + B_SUB_VAL) - bz->za[bz->f1].z1; /* end fifo */ if (maxlen > count) maxlen = count; /* limit size */ memcpy(dst, src, maxlen); /* first copy */ count -= maxlen; /* remaining bytes */ if (count) { dst = bdata; /* start of buffer */ src += maxlen; /* new position */ memcpy(dst, src, count); } bcs->tx_cnt -= bcs->tx_skb->len; if (test_bit(FLG_LLI_L1WAKEUP, &bcs->st->lli.flag) && (PACKET_NOACK != bcs->tx_skb->pkt_type)) { u_long flags; spin_lock_irqsave(&bcs->aclock, flags); bcs->ackcnt += bcs->tx_skb->len; spin_unlock_irqrestore(&bcs->aclock, flags); schedule_event(bcs, B_ACKPENDING); } bz->za[new_f1].z1 = new_z1; /* for next buffer */ bz->f1 = new_f1; /* next frame */ dev_kfree_skb_any(bcs->tx_skb); bcs->tx_skb = NULL; test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag); } /**********************************************/ /* D-channel l1 state call for leased NT-mode */ /**********************************************/ static void dch_nt_l2l1(struct PStack *st, int pr, void *arg) { struct IsdnCardState *cs = (struct IsdnCardState *) st->l1.hardware; switch (pr) { case (PH_DATA | REQUEST): case (PH_PULL | REQUEST): case (PH_PULL | INDICATION): st->l1.l1hw(st, pr, arg); break; case (PH_ACTIVATE | REQUEST): st->l1.l1l2(st, PH_ACTIVATE | CONFIRM, NULL); break; case (PH_TESTLOOP | REQUEST): if (1 & (long) arg) debugl1(cs, "PH_TEST_LOOP B1"); if (2 & (long) arg) debugl1(cs, "PH_TEST_LOOP B2"); if (!(3 & (long) arg)) debugl1(cs, "PH_TEST_LOOP DISABLED"); st->l1.l1hw(st, HW_TESTLOOP | REQUEST, arg); break; default: if (cs->debug) debugl1(cs, "dch_nt_l2l1 msg %04X unhandled", pr); break; } } /***********************/ /* set/reset echo mode */ /***********************/ static int hfcpci_auxcmd(struct IsdnCardState *cs, isdn_ctrl *ic) { u_long flags; int i = *(unsigned int *) ic->parm.num; if ((ic->arg == 98) && (!(cs->hw.hfcpci.int_m1 & (HFCPCI_INTS_B2TRANS + HFCPCI_INTS_B2REC + HFCPCI_INTS_B1TRANS + HFCPCI_INTS_B1REC)))) { spin_lock_irqsave(&cs->lock, flags); Write_hfc(cs, HFCPCI_CLKDEL, CLKDEL_NT); /* ST-Bit delay for NT-Mode */ Write_hfc(cs, HFCPCI_STATES, HFCPCI_LOAD_STATE | 0); /* HFC ST G0 */ udelay(10); cs->hw.hfcpci.sctrl |= SCTRL_MODE_NT; Write_hfc(cs, HFCPCI_SCTRL, cs->hw.hfcpci.sctrl); /* set NT-mode */ udelay(10); Write_hfc(cs, HFCPCI_STATES, HFCPCI_LOAD_STATE | 1); /* HFC ST G1 */ udelay(10); Write_hfc(cs, HFCPCI_STATES, 1 | HFCPCI_ACTIVATE | HFCPCI_DO_ACTION); cs->dc.hfcpci.ph_state = 1; cs->hw.hfcpci.nt_mode = 1; cs->hw.hfcpci.nt_timer = 0; cs->stlist->l2.l2l1 = dch_nt_l2l1; spin_unlock_irqrestore(&cs->lock, flags); debugl1(cs, "NT mode activated"); return (0); } if ((cs->chanlimit > 1) || (cs->hw.hfcpci.bswapped) || (cs->hw.hfcpci.nt_mode) || (ic->arg != 12)) return (-EINVAL); spin_lock_irqsave(&cs->lock, flags); if (i) { cs->logecho = 1; cs->hw.hfcpci.trm |= 0x20; /* enable echo chan */ cs->hw.hfcpci.int_m1 |= HFCPCI_INTS_B2REC; cs->hw.hfcpci.fifo_en |= HFCPCI_FIFOEN_B2RX; } else { cs->logecho = 0; cs->hw.hfcpci.trm &= ~0x20; /* disable echo chan */ cs->hw.hfcpci.int_m1 &= ~HFCPCI_INTS_B2REC; cs->hw.hfcpci.fifo_en &= ~HFCPCI_FIFOEN_B2RX; } cs->hw.hfcpci.sctrl_r &= ~SCTRL_B2_ENA; cs->hw.hfcpci.sctrl &= ~SCTRL_B2_ENA; cs->hw.hfcpci.conn |= 0x10; /* B2-IOM -> B2-ST */ cs->hw.hfcpci.ctmt &= ~2; Write_hfc(cs, HFCPCI_CTMT, cs->hw.hfcpci.ctmt); Write_hfc(cs, HFCPCI_SCTRL_R, cs->hw.hfcpci.sctrl_r); Write_hfc(cs, HFCPCI_SCTRL, cs->hw.hfcpci.sctrl); Write_hfc(cs, HFCPCI_CONNECT, cs->hw.hfcpci.conn); Write_hfc(cs, HFCPCI_TRM, cs->hw.hfcpci.trm); Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en); Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1); spin_unlock_irqrestore(&cs->lock, flags); return (0); } /* hfcpci_auxcmd */ /*****************************/ /* E-channel receive routine */ /*****************************/ static void receive_emsg(struct IsdnCardState *cs) { int rcnt; int receive, count = 5; bzfifo_type *bz; u_char *bdata; z_type *zp; u_char *ptr, *ptr1, new_f2; int total, maxlen, new_z2; u_char e_buffer[256]; bz = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.rxbz_b2; bdata = ((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.rxdat_b2; Begin: count--; if (test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) { debugl1(cs, "echo_rec_data blocked"); return; } if (bz->f1 != bz->f2) { if (cs->debug & L1_DEB_ISAC) debugl1(cs, "hfcpci e_rec f1(%d) f2(%d)", bz->f1, bz->f2); zp = &bz->za[bz->f2]; rcnt = zp->z1 - zp->z2; if (rcnt < 0) rcnt += B_FIFO_SIZE; rcnt++; if (cs->debug & L1_DEB_ISAC) debugl1(cs, "hfcpci e_rec z1(%x) z2(%x) cnt(%d)", zp->z1, zp->z2, rcnt); new_z2 = zp->z2 + rcnt; /* new position in fifo */ if (new_z2 >= (B_FIFO_SIZE + B_SUB_VAL)) new_z2 -= B_FIFO_SIZE; /* buffer wrap */ new_f2 = (bz->f2 + 1) & MAX_B_FRAMES; if ((rcnt > 256 + 3) || (count < 4) || (*(bdata + (zp->z1 - B_SUB_VAL)))) { if (cs->debug & L1_DEB_WARN) debugl1(cs, "hfcpci_empty_echan: incoming packet invalid length %d or crc", rcnt); bz->za[new_f2].z2 = new_z2; bz->f2 = new_f2; /* next buffer */ } else { total = rcnt; rcnt -= 3; ptr = e_buffer; if (zp->z2 <= B_FIFO_SIZE + B_SUB_VAL) maxlen = rcnt; /* complete transfer */ else maxlen = B_FIFO_SIZE + B_SUB_VAL - zp->z2; /* maximum */ ptr1 = bdata + (zp->z2 - B_SUB_VAL); /* start of data */ memcpy(ptr, ptr1, maxlen); /* copy data */ rcnt -= maxlen; if (rcnt) { /* rest remaining */ ptr += maxlen; ptr1 = bdata; /* start of buffer */ memcpy(ptr, ptr1, rcnt); /* rest */ } bz->za[new_f2].z2 = new_z2; bz->f2 = new_f2; /* next buffer */ if (cs->debug & DEB_DLOG_HEX) { ptr = cs->dlog; if ((total - 3) < MAX_DLOG_SPACE / 3 - 10) { *ptr++ = 'E'; *ptr++ = 'C'; *ptr++ = 'H'; *ptr++ = 'O'; *ptr++ = ':'; ptr += QuickHex(ptr, e_buffer, total - 3); ptr--; *ptr++ = '\n'; *ptr = 0; HiSax_putstatus(cs, NULL, cs->dlog); } else HiSax_putstatus(cs, "LogEcho: ", "warning Frame too big (%d)", total - 3); } } rcnt = bz->f1 - bz->f2; if (rcnt < 0) rcnt += MAX_B_FRAMES + 1; if (rcnt > 1) receive = 1; else receive = 0; } else receive = 0; test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags); if (count && receive) goto Begin; } /* receive_emsg */ /*********************/ /* Interrupt handler */ /*********************/ static irqreturn_t hfcpci_interrupt(int intno, void *dev_id) { u_long flags; struct IsdnCardState *cs = dev_id; u_char exval; struct BCState *bcs; int count = 15; u_char val, stat; if (!(cs->hw.hfcpci.int_m2 & 0x08)) { debugl1(cs, "HFC-PCI: int_m2 %x not initialised", cs->hw.hfcpci.int_m2); return IRQ_NONE; /* not initialised */ } spin_lock_irqsave(&cs->lock, flags); if (HFCPCI_ANYINT & (stat = Read_hfc(cs, HFCPCI_STATUS))) { val = Read_hfc(cs, HFCPCI_INT_S1); if (cs->debug & L1_DEB_ISAC) debugl1(cs, "HFC-PCI: stat(%02x) s1(%02x)", stat, val); } else { spin_unlock_irqrestore(&cs->lock, flags); return IRQ_NONE; } if (cs->debug & L1_DEB_ISAC) debugl1(cs, "HFC-PCI irq %x %s", val, test_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags) ? "locked" : "unlocked"); val &= cs->hw.hfcpci.int_m1; if (val & 0x40) { /* state machine irq */ exval = Read_hfc(cs, HFCPCI_STATES) & 0xf; if (cs->debug & L1_DEB_ISAC) debugl1(cs, "ph_state chg %d->%d", cs->dc.hfcpci.ph_state, exval); cs->dc.hfcpci.ph_state = exval; sched_event_D_pci(cs, D_L1STATECHANGE); val &= ~0x40; } if (val & 0x80) { /* timer irq */ if (cs->hw.hfcpci.nt_mode) { if ((--cs->hw.hfcpci.nt_timer) < 0) sched_event_D_pci(cs, D_L1STATECHANGE); } val &= ~0x80; Write_hfc(cs, HFCPCI_CTMT, cs->hw.hfcpci.ctmt | HFCPCI_CLTIMER); } while (val) { if (test_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) { cs->hw.hfcpci.int_s1 |= val; spin_unlock_irqrestore(&cs->lock, flags); return IRQ_HANDLED; } if (cs->hw.hfcpci.int_s1 & 0x18) { exval = val; val = cs->hw.hfcpci.int_s1; cs->hw.hfcpci.int_s1 = exval; } if (val & 0x08) { if (!(bcs = Sel_BCS(cs, cs->hw.hfcpci.bswapped ? 1 : 0))) { if (cs->debug) debugl1(cs, "hfcpci spurious 0x08 IRQ"); } else main_rec_hfcpci(bcs); } if (val & 0x10) { if (cs->logecho) receive_emsg(cs); else if (!(bcs = Sel_BCS(cs, 1))) { if (cs->debug) debugl1(cs, "hfcpci spurious 0x10 IRQ"); } else main_rec_hfcpci(bcs); } if (val & 0x01) { if (!(bcs = Sel_BCS(cs, cs->hw.hfcpci.bswapped ? 1 : 0))) { if (cs->debug) debugl1(cs, "hfcpci spurious 0x01 IRQ"); } else { if (bcs->tx_skb) { if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) { hfcpci_fill_fifo(bcs); test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags); } else debugl1(cs, "fill_data %d blocked", bcs->channel); } else { if ((bcs->tx_skb = skb_dequeue(&bcs->squeue))) { if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) { hfcpci_fill_fifo(bcs); test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags); } else debugl1(cs, "fill_data %d blocked", bcs->channel); } else { hfcpci_sched_event(bcs, B_XMTBUFREADY); } } } } if (val & 0x02) { if (!(bcs = Sel_BCS(cs, 1))) { if (cs->debug) debugl1(cs, "hfcpci spurious 0x02 IRQ"); } else { if (bcs->tx_skb) { if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) { hfcpci_fill_fifo(bcs); test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags); } else debugl1(cs, "fill_data %d blocked", bcs->channel); } else { if ((bcs->tx_skb = skb_dequeue(&bcs->squeue))) { if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) { hfcpci_fill_fifo(bcs); test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags); } else debugl1(cs, "fill_data %d blocked", bcs->channel); } else { hfcpci_sched_event(bcs, B_XMTBUFREADY); } } } } if (val & 0x20) { /* receive dframe */ receive_dmsg(cs); } if (val & 0x04) { /* dframe transmitted */ if (test_and_clear_bit(FLG_DBUSY_TIMER, &cs->HW_Flags)) del_timer(&cs->dbusytimer); if (test_and_clear_bit(FLG_L1_DBUSY, &cs->HW_Flags)) sched_event_D_pci(cs, D_CLEARBUSY); if (cs->tx_skb) { if (cs->tx_skb->len) { if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) { hfcpci_fill_dfifo(cs); test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags); } else { debugl1(cs, "hfcpci_fill_dfifo irq blocked"); } goto afterXPR; } else { dev_kfree_skb_irq(cs->tx_skb); cs->tx_cnt = 0; cs->tx_skb = NULL; } } if ((cs->tx_skb = skb_dequeue(&cs->sq))) { cs->tx_cnt = 0; if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) { hfcpci_fill_dfifo(cs); test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags); } else { debugl1(cs, "hfcpci_fill_dfifo irq blocked"); } } else sched_event_D_pci(cs, D_XMTBUFREADY); } afterXPR: if (cs->hw.hfcpci.int_s1 && count--) { val = cs->hw.hfcpci.int_s1; cs->hw.hfcpci.int_s1 = 0; if (cs->debug & L1_DEB_ISAC) debugl1(cs, "HFC-PCI irq %x loop %d", val, 15 - count); } else val = 0; } spin_unlock_irqrestore(&cs->lock, flags); return IRQ_HANDLED; } /********************************************************************/ /* timer callback for D-chan busy resolution. Currently no function */ /********************************************************************/ static void hfcpci_dbusy_timer(struct IsdnCardState *cs) { } /*************************************/ /* Layer 1 D-channel hardware access */ /*************************************/ static void HFCPCI_l1hw(struct PStack *st, int pr, void *arg) { u_long flags; struct IsdnCardState *cs = (struct IsdnCardState *) st->l1.hardware; struct sk_buff *skb = arg; switch (pr) { case (PH_DATA | REQUEST): if (cs->debug & DEB_DLOG_HEX) LogFrame(cs, skb->data, skb->len); if (cs->debug & DEB_DLOG_VERBOSE) dlogframe(cs, skb, 0); spin_lock_irqsave(&cs->lock, flags); if (cs->tx_skb) { skb_queue_tail(&cs->sq, skb); #ifdef L2FRAME_DEBUG /* psa */ if (cs->debug & L1_DEB_LAPD) Logl2Frame(cs, skb, "PH_DATA Queued", 0); #endif } else { cs->tx_skb = skb; cs->tx_cnt = 0; #ifdef L2FRAME_DEBUG /* psa */ if (cs->debug & L1_DEB_LAPD) Logl2Frame(cs, skb, "PH_DATA", 0); #endif if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) { hfcpci_fill_dfifo(cs); test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags); } else debugl1(cs, "hfcpci_fill_dfifo blocked"); } spin_unlock_irqrestore(&cs->lock, flags); break; case (PH_PULL | INDICATION): spin_lock_irqsave(&cs->lock, flags); if (cs->tx_skb) { if (cs->debug & L1_DEB_WARN) debugl1(cs, " l2l1 tx_skb exist this shouldn't happen"); skb_queue_tail(&cs->sq, skb); spin_unlock_irqrestore(&cs->lock, flags); break; } if (cs->debug & DEB_DLOG_HEX) LogFrame(cs, skb->data, skb->len); if (cs->debug & DEB_DLOG_VERBOSE) dlogframe(cs, skb, 0); cs->tx_skb = skb; cs->tx_cnt = 0; #ifdef L2FRAME_DEBUG /* psa */ if (cs->debug & L1_DEB_LAPD) Logl2Frame(cs, skb, "PH_DATA_PULLED", 0); #endif if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) { hfcpci_fill_dfifo(cs); test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags); } else debugl1(cs, "hfcpci_fill_dfifo blocked"); spin_unlock_irqrestore(&cs->lock, flags); break; case (PH_PULL | REQUEST): #ifdef L2FRAME_DEBUG /* psa */ if (cs->debug & L1_DEB_LAPD) debugl1(cs, "-> PH_REQUEST_PULL"); #endif if (!cs->tx_skb) { test_and_clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags); st->l1.l1l2(st, PH_PULL | CONFIRM, NULL); } else test_and_set_bit(FLG_L1_PULL_REQ, &st->l1.Flags); break; case (HW_RESET | REQUEST): spin_lock_irqsave(&cs->lock, flags); Write_hfc(cs, HFCPCI_STATES, HFCPCI_LOAD_STATE | 3); /* HFC ST 3 */ udelay(6); Write_hfc(cs, HFCPCI_STATES, 3); /* HFC ST 2 */ cs->hw.hfcpci.mst_m |= HFCPCI_MASTER; Write_hfc(cs, HFCPCI_MST_MODE, cs->hw.hfcpci.mst_m); Write_hfc(cs, HFCPCI_STATES, HFCPCI_ACTIVATE | HFCPCI_DO_ACTION); spin_unlock_irqrestore(&cs->lock, flags); l1_msg(cs, HW_POWERUP | CONFIRM, NULL); break; case (HW_ENABLE | REQUEST): spin_lock_irqsave(&cs->lock, flags); Write_hfc(cs, HFCPCI_STATES, HFCPCI_DO_ACTION); spin_unlock_irqrestore(&cs->lock, flags); break; case (HW_DEACTIVATE | REQUEST): spin_lock_irqsave(&cs->lock, flags); cs->hw.hfcpci.mst_m &= ~HFCPCI_MASTER; Write_hfc(cs, HFCPCI_MST_MODE, cs->hw.hfcpci.mst_m); spin_unlock_irqrestore(&cs->lock, flags); break; case (HW_INFO3 | REQUEST): spin_lock_irqsave(&cs->lock, flags); cs->hw.hfcpci.mst_m |= HFCPCI_MASTER; Write_hfc(cs, HFCPCI_MST_MODE, cs->hw.hfcpci.mst_m); spin_unlock_irqrestore(&cs->lock, flags); break; case (HW_TESTLOOP | REQUEST): spin_lock_irqsave(&cs->lock, flags); switch ((long) arg) { case (1): Write_hfc(cs, HFCPCI_B1_SSL, 0x80); /* tx slot */ Write_hfc(cs, HFCPCI_B1_RSL, 0x80); /* rx slot */ cs->hw.hfcpci.conn = (cs->hw.hfcpci.conn & ~7) | 1; Write_hfc(cs, HFCPCI_CONNECT, cs->hw.hfcpci.conn); break; case (2): Write_hfc(cs, HFCPCI_B2_SSL, 0x81); /* tx slot */ Write_hfc(cs, HFCPCI_B2_RSL, 0x81); /* rx slot */ cs->hw.hfcpci.conn = (cs->hw.hfcpci.conn & ~0x38) | 0x08; Write_hfc(cs, HFCPCI_CONNECT, cs->hw.hfcpci.conn); break; default: spin_unlock_irqrestore(&cs->lock, flags); if (cs->debug & L1_DEB_WARN) debugl1(cs, "hfcpci_l1hw loop invalid %4lx", (long) arg); return; } cs->hw.hfcpci.trm |= 0x80; /* enable IOM-loop */ Write_hfc(cs, HFCPCI_TRM, cs->hw.hfcpci.trm); spin_unlock_irqrestore(&cs->lock, flags); break; default: if (cs->debug & L1_DEB_WARN) debugl1(cs, "hfcpci_l1hw unknown pr %4x", pr); break; } } /***********************************************/ /* called during init setting l1 stack pointer */ /***********************************************/ static void setstack_hfcpci(struct PStack *st, struct IsdnCardState *cs) { st->l1.l1hw = HFCPCI_l1hw; } /**************************************/ /* send B-channel data if not blocked */ /**************************************/ static void hfcpci_send_data(struct BCState *bcs) { struct IsdnCardState *cs = bcs->cs; if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) { hfcpci_fill_fifo(bcs); test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags); } else debugl1(cs, "send_data %d blocked", bcs->channel); } /***************************************************************/ /* activate/deactivate hardware for selected channels and mode */ /***************************************************************/ static void mode_hfcpci(struct BCState *bcs, int mode, int bc) { struct IsdnCardState *cs = bcs->cs; int fifo2; if (cs->debug & L1_DEB_HSCX) debugl1(cs, "HFCPCI bchannel mode %d bchan %d/%d", mode, bc, bcs->channel); bcs->mode = mode; bcs->channel = bc; fifo2 = bc; if (cs->chanlimit > 1) { cs->hw.hfcpci.bswapped = 0; /* B1 and B2 normal mode */ cs->hw.hfcpci.sctrl_e &= ~0x80; } else { if (bc) { if (mode != L1_MODE_NULL) { cs->hw.hfcpci.bswapped = 1; /* B1 and B2 exchanged */ cs->hw.hfcpci.sctrl_e |= 0x80; } else { cs->hw.hfcpci.bswapped = 0; /* B1 and B2 normal mode */ cs->hw.hfcpci.sctrl_e &= ~0x80; } fifo2 = 0; } else { cs->hw.hfcpci.bswapped = 0; /* B1 and B2 normal mode */ cs->hw.hfcpci.sctrl_e &= ~0x80; } } switch (mode) { case (L1_MODE_NULL): if (bc) { cs->hw.hfcpci.sctrl &= ~SCTRL_B2_ENA; cs->hw.hfcpci.sctrl_r &= ~SCTRL_B2_ENA; } else { cs->hw.hfcpci.sctrl &= ~SCTRL_B1_ENA; cs->hw.hfcpci.sctrl_r &= ~SCTRL_B1_ENA; } if (fifo2) { cs->hw.hfcpci.fifo_en &= ~HFCPCI_FIFOEN_B2; cs->hw.hfcpci.int_m1 &= ~(HFCPCI_INTS_B2TRANS + HFCPCI_INTS_B2REC); } else { cs->hw.hfcpci.fifo_en &= ~HFCPCI_FIFOEN_B1; cs->hw.hfcpci.int_m1 &= ~(HFCPCI_INTS_B1TRANS + HFCPCI_INTS_B1REC); } break; case (L1_MODE_TRANS): hfcpci_clear_fifo_rx(cs, fifo2); hfcpci_clear_fifo_tx(cs, fifo2); if (bc) { cs->hw.hfcpci.sctrl |= SCTRL_B2_ENA; cs->hw.hfcpci.sctrl_r |= SCTRL_B2_ENA; } else { cs->hw.hfcpci.sctrl |= SCTRL_B1_ENA; cs->hw.hfcpci.sctrl_r |= SCTRL_B1_ENA; } if (fifo2) { cs->hw.hfcpci.fifo_en |= HFCPCI_FIFOEN_B2; cs->hw.hfcpci.int_m1 |= (HFCPCI_INTS_B2TRANS + HFCPCI_INTS_B2REC); cs->hw.hfcpci.ctmt |= 2; cs->hw.hfcpci.conn &= ~0x18; } else { cs->hw.hfcpci.fifo_en |= HFCPCI_FIFOEN_B1; cs->hw.hfcpci.int_m1 |= (HFCPCI_INTS_B1TRANS + HFCPCI_INTS_B1REC); cs->hw.hfcpci.ctmt |= 1; cs->hw.hfcpci.conn &= ~0x03; } break; case (L1_MODE_HDLC): hfcpci_clear_fifo_rx(cs, fifo2); hfcpci_clear_fifo_tx(cs, fifo2); if (bc) { cs->hw.hfcpci.sctrl |= SCTRL_B2_ENA; cs->hw.hfcpci.sctrl_r |= SCTRL_B2_ENA; } else { cs->hw.hfcpci.sctrl |= SCTRL_B1_ENA; cs->hw.hfcpci.sctrl_r |= SCTRL_B1_ENA; } if (fifo2) { cs->hw.hfcpci.last_bfifo_cnt[1] = 0; cs->hw.hfcpci.fifo_en |= HFCPCI_FIFOEN_B2; cs->hw.hfcpci.int_m1 |= (HFCPCI_INTS_B2TRANS + HFCPCI_INTS_B2REC); cs->hw.hfcpci.ctmt &= ~2; cs->hw.hfcpci.conn &= ~0x18; } else { cs->hw.hfcpci.last_bfifo_cnt[0] = 0; cs->hw.hfcpci.fifo_en |= HFCPCI_FIFOEN_B1; cs->hw.hfcpci.int_m1 |= (HFCPCI_INTS_B1TRANS + HFCPCI_INTS_B1REC); cs->hw.hfcpci.ctmt &= ~1; cs->hw.hfcpci.conn &= ~0x03; } break; case (L1_MODE_EXTRN): if (bc) { cs->hw.hfcpci.conn |= 0x10; cs->hw.hfcpci.sctrl |= SCTRL_B2_ENA; cs->hw.hfcpci.sctrl_r |= SCTRL_B2_ENA; cs->hw.hfcpci.fifo_en &= ~HFCPCI_FIFOEN_B2; cs->hw.hfcpci.int_m1 &= ~(HFCPCI_INTS_B2TRANS + HFCPCI_INTS_B2REC); } else { cs->hw.hfcpci.conn |= 0x02; cs->hw.hfcpci.sctrl |= SCTRL_B1_ENA; cs->hw.hfcpci.sctrl_r |= SCTRL_B1_ENA; cs->hw.hfcpci.fifo_en &= ~HFCPCI_FIFOEN_B1; cs->hw.hfcpci.int_m1 &= ~(HFCPCI_INTS_B1TRANS + HFCPCI_INTS_B1REC); } break; } Write_hfc(cs, HFCPCI_SCTRL_E, cs->hw.hfcpci.sctrl_e); Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1); Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en); Write_hfc(cs, HFCPCI_SCTRL, cs->hw.hfcpci.sctrl); Write_hfc(cs, HFCPCI_SCTRL_R, cs->hw.hfcpci.sctrl_r); Write_hfc(cs, HFCPCI_CTMT, cs->hw.hfcpci.ctmt); Write_hfc(cs, HFCPCI_CONNECT, cs->hw.hfcpci.conn); } /******************************/ /* Layer2 -> Layer 1 Transfer */ /******************************/ static void hfcpci_l2l1(struct PStack *st, int pr, void *arg) { struct BCState *bcs = st->l1.bcs; u_long flags; struct sk_buff *skb = arg; switch (pr) { case (PH_DATA | REQUEST): spin_lock_irqsave(&bcs->cs->lock, flags); if (bcs->tx_skb) { skb_queue_tail(&bcs->squeue, skb); } else { bcs->tx_skb = skb; // test_and_set_bit(BC_FLG_BUSY, &bcs->Flag); bcs->cs->BC_Send_Data(bcs); } spin_unlock_irqrestore(&bcs->cs->lock, flags); break; case (PH_PULL | INDICATION): spin_lock_irqsave(&bcs->cs->lock, flags); if (bcs->tx_skb) { spin_unlock_irqrestore(&bcs->cs->lock, flags); printk(KERN_WARNING "hfc_l2l1: this shouldn't happen\n"); break; } // test_and_set_bit(BC_FLG_BUSY, &bcs->Flag); bcs->tx_skb = skb; bcs->cs->BC_Send_Data(bcs); spin_unlock_irqrestore(&bcs->cs->lock, flags); break; case (PH_PULL | REQUEST): if (!bcs->tx_skb) { test_and_clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags); st->l1.l1l2(st, PH_PULL | CONFIRM, NULL); } else test_and_set_bit(FLG_L1_PULL_REQ, &st->l1.Flags); break; case (PH_ACTIVATE | REQUEST): spin_lock_irqsave(&bcs->cs->lock, flags); test_and_set_bit(BC_FLG_ACTIV, &bcs->Flag); mode_hfcpci(bcs, st->l1.mode, st->l1.bc); spin_unlock_irqrestore(&bcs->cs->lock, flags); l1_msg_b(st, pr, arg); break; case (PH_DEACTIVATE | REQUEST): l1_msg_b(st, pr, arg); break; case (PH_DEACTIVATE | CONFIRM): spin_lock_irqsave(&bcs->cs->lock, flags); test_and_clear_bit(BC_FLG_ACTIV, &bcs->Flag); test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag); mode_hfcpci(bcs, 0, st->l1.bc); spin_unlock_irqrestore(&bcs->cs->lock, flags); st->l1.l1l2(st, PH_DEACTIVATE | CONFIRM, NULL); break; } } /******************************************/ /* deactivate B-channel access and queues */ /******************************************/ static void close_hfcpci(struct BCState *bcs) { mode_hfcpci(bcs, 0, bcs->channel); if (test_and_clear_bit(BC_FLG_INIT, &bcs->Flag)) { skb_queue_purge(&bcs->rqueue); skb_queue_purge(&bcs->squeue); if (bcs->tx_skb) { dev_kfree_skb_any(bcs->tx_skb); bcs->tx_skb = NULL; test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag); } } } /*************************************/ /* init B-channel queues and control */ /*************************************/ static int open_hfcpcistate(struct IsdnCardState *cs, struct BCState *bcs) { if (!test_and_set_bit(BC_FLG_INIT, &bcs->Flag)) { skb_queue_head_init(&bcs->rqueue); skb_queue_head_init(&bcs->squeue); } bcs->tx_skb = NULL; test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag); bcs->event = 0; bcs->tx_cnt = 0; return (0); } /*********************************/ /* inits the stack for B-channel */ /*********************************/ static int setstack_2b(struct PStack *st, struct BCState *bcs) { bcs->channel = st->l1.bc; if (open_hfcpcistate(st->l1.hardware, bcs)) return (-1); st->l1.bcs = bcs; st->l2.l2l1 = hfcpci_l2l1; setstack_manager(st); bcs->st = st; setstack_l1_B(st); return (0); } /***************************/ /* handle L1 state changes */ /***************************/ static void hfcpci_bh(struct work_struct *work) { struct IsdnCardState *cs = container_of(work, struct IsdnCardState, tqueue); u_long flags; // struct PStack *stptr; if (test_and_clear_bit(D_L1STATECHANGE, &cs->event)) { if (!cs->hw.hfcpci.nt_mode) switch (cs->dc.hfcpci.ph_state) { case (0): l1_msg(cs, HW_RESET | INDICATION, NULL); break; case (3): l1_msg(cs, HW_DEACTIVATE | INDICATION, NULL); break; case (8): l1_msg(cs, HW_RSYNC | INDICATION, NULL); break; case (6): l1_msg(cs, HW_INFO2 | INDICATION, NULL); break; case (7): l1_msg(cs, HW_INFO4_P8 | INDICATION, NULL); break; default: break; } else { spin_lock_irqsave(&cs->lock, flags); switch (cs->dc.hfcpci.ph_state) { case (2): if (cs->hw.hfcpci.nt_timer < 0) { cs->hw.hfcpci.nt_timer = 0; cs->hw.hfcpci.int_m1 &= ~HFCPCI_INTS_TIMER; Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1); /* Clear already pending ints */ if (Read_hfc(cs, HFCPCI_INT_S1)); Write_hfc(cs, HFCPCI_STATES, 4 | HFCPCI_LOAD_STATE); udelay(10); Write_hfc(cs, HFCPCI_STATES, 4); cs->dc.hfcpci.ph_state = 4; } else { cs->hw.hfcpci.int_m1 |= HFCPCI_INTS_TIMER; Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1); cs->hw.hfcpci.ctmt &= ~HFCPCI_AUTO_TIMER; cs->hw.hfcpci.ctmt |= HFCPCI_TIM3_125; Write_hfc(cs, HFCPCI_CTMT, cs->hw.hfcpci.ctmt | HFCPCI_CLTIMER); Write_hfc(cs, HFCPCI_CTMT, cs->hw.hfcpci.ctmt | HFCPCI_CLTIMER); cs->hw.hfcpci.nt_timer = NT_T1_COUNT; Write_hfc(cs, HFCPCI_STATES, 2 | HFCPCI_NT_G2_G3); /* allow G2 -> G3 transition */ } break; case (1): case (3): case (4): cs->hw.hfcpci.nt_timer = 0; cs->hw.hfcpci.int_m1 &= ~HFCPCI_INTS_TIMER; Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1); break; default: break; } spin_unlock_irqrestore(&cs->lock, flags); } } if (test_and_clear_bit(D_RCVBUFREADY, &cs->event)) DChannel_proc_rcv(cs); if (test_and_clear_bit(D_XMTBUFREADY, &cs->event)) DChannel_proc_xmt(cs); } /********************************/ /* called for card init message */ /********************************/ static void inithfcpci(struct IsdnCardState *cs) { cs->bcs[0].BC_SetStack = setstack_2b; cs->bcs[1].BC_SetStack = setstack_2b; cs->bcs[0].BC_Close = close_hfcpci; cs->bcs[1].BC_Close = close_hfcpci; cs->dbusytimer.function = (void *) hfcpci_dbusy_timer; cs->dbusytimer.data = (long) cs; init_timer(&cs->dbusytimer); mode_hfcpci(cs->bcs, 0, 0); mode_hfcpci(cs->bcs + 1, 0, 1); } /*******************************************/ /* handle card messages from control layer */ /*******************************************/ static int hfcpci_card_msg(struct IsdnCardState *cs, int mt, void *arg) { u_long flags; if (cs->debug & L1_DEB_ISAC) debugl1(cs, "HFCPCI: card_msg %x", mt); switch (mt) { case CARD_RESET: spin_lock_irqsave(&cs->lock, flags); reset_hfcpci(cs); spin_unlock_irqrestore(&cs->lock, flags); return (0); case CARD_RELEASE: release_io_hfcpci(cs); return (0); case CARD_INIT: spin_lock_irqsave(&cs->lock, flags); inithfcpci(cs); reset_hfcpci(cs); spin_unlock_irqrestore(&cs->lock, flags); msleep(80); /* Timeout 80ms */ /* now switch timer interrupt off */ spin_lock_irqsave(&cs->lock, flags); cs->hw.hfcpci.int_m1 &= ~HFCPCI_INTS_TIMER; Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1); /* reinit mode reg */ Write_hfc(cs, HFCPCI_MST_MODE, cs->hw.hfcpci.mst_m); spin_unlock_irqrestore(&cs->lock, flags); return (0); case CARD_TEST: return (0); } return (0); } /* this variable is used as card index when more than one cards are present */ static struct pci_dev *dev_hfcpci = NULL; int setup_hfcpci(struct IsdnCard *card) { u_long flags; struct IsdnCardState *cs = card->cs; char tmp[64]; int i; struct pci_dev *tmp_hfcpci = NULL; #ifdef __BIG_ENDIAN #error "not running on big endian machines now" #endif strcpy(tmp, hfcpci_revision); printk(KERN_INFO "HiSax: HFC-PCI driver Rev. %s\n", HiSax_getrev(tmp)); cs->hw.hfcpci.int_s1 = 0; cs->dc.hfcpci.ph_state = 0; cs->hw.hfcpci.fifo = 255; if (cs->typ != ISDN_CTYPE_HFC_PCI) return (0); i = 0; while (id_list[i].vendor_id) { tmp_hfcpci = hisax_find_pci_device(id_list[i].vendor_id, id_list[i].device_id, dev_hfcpci); i++; if (tmp_hfcpci) { dma_addr_t dma_mask = DMA_BIT_MASK(32) & ~0x7fffUL; if (pci_enable_device(tmp_hfcpci)) continue; if (pci_set_dma_mask(tmp_hfcpci, dma_mask)) { printk(KERN_WARNING "HiSax hfc_pci: No suitable DMA available.\n"); continue; } if (pci_set_consistent_dma_mask(tmp_hfcpci, dma_mask)) { printk(KERN_WARNING "HiSax hfc_pci: No suitable consistent DMA available.\n"); continue; } pci_set_master(tmp_hfcpci); if ((card->para[0]) && (card->para[0] != (tmp_hfcpci->resource[0].start & PCI_BASE_ADDRESS_IO_MASK))) continue; else break; } } if (!tmp_hfcpci) { printk(KERN_WARNING "HFC-PCI: No PCI card found\n"); return (0); } i--; dev_hfcpci = tmp_hfcpci; /* old device */ cs->hw.hfcpci.dev = dev_hfcpci; cs->irq = dev_hfcpci->irq; if (!cs->irq) { printk(KERN_WARNING "HFC-PCI: No IRQ for PCI card found\n"); return (0); } cs->hw.hfcpci.pci_io = (char *)(unsigned long)dev_hfcpci->resource[1].start; printk(KERN_INFO "HiSax: HFC-PCI card manufacturer: %s card name: %s\n", id_list[i].vendor_name, id_list[i].card_name); if (!cs->hw.hfcpci.pci_io) { printk(KERN_WARNING "HFC-PCI: No IO-Mem for PCI card found\n"); return (0); } /* Allocate memory for FIFOS */ cs->hw.hfcpci.fifos = pci_alloc_consistent(cs->hw.hfcpci.dev, 0x8000, &cs->hw.hfcpci.dma); if (!cs->hw.hfcpci.fifos) { printk(KERN_WARNING "HFC-PCI: Error allocating FIFO memory!\n"); return 0; } if (cs->hw.hfcpci.dma & 0x7fff) { printk(KERN_WARNING "HFC-PCI: Error DMA memory not on 32K boundary (%lx)\n", (u_long)cs->hw.hfcpci.dma); pci_free_consistent(cs->hw.hfcpci.dev, 0x8000, cs->hw.hfcpci.fifos, cs->hw.hfcpci.dma); return 0; } pci_write_config_dword(cs->hw.hfcpci.dev, 0x80, (u32)cs->hw.hfcpci.dma); cs->hw.hfcpci.pci_io = ioremap((ulong) cs->hw.hfcpci.pci_io, 256); printk(KERN_INFO "HFC-PCI: defined at mem %p fifo %p(%lx) IRQ %d HZ %d\n", cs->hw.hfcpci.pci_io, cs->hw.hfcpci.fifos, (u_long)cs->hw.hfcpci.dma, cs->irq, HZ); spin_lock_irqsave(&cs->lock, flags); pci_write_config_word(cs->hw.hfcpci.dev, PCI_COMMAND, PCI_ENA_MEMIO); /* enable memory mapped ports, disable busmaster */ cs->hw.hfcpci.int_m2 = 0; /* disable alle interrupts */ cs->hw.hfcpci.int_m1 = 0; Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1); Write_hfc(cs, HFCPCI_INT_M2, cs->hw.hfcpci.int_m2); /* At this point the needed PCI config is done */ /* fifos are still not enabled */ INIT_WORK(&cs->tqueue, hfcpci_bh); cs->setstack_d = setstack_hfcpci; cs->BC_Send_Data = &hfcpci_send_data; cs->readisac = NULL; cs->writeisac = NULL; cs->readisacfifo = NULL; cs->writeisacfifo = NULL; cs->BC_Read_Reg = NULL; cs->BC_Write_Reg = NULL; cs->irq_func = &hfcpci_interrupt; cs->irq_flags |= IRQF_SHARED; cs->hw.hfcpci.timer.function = (void *) hfcpci_Timer; cs->hw.hfcpci.timer.data = (long) cs; init_timer(&cs->hw.hfcpci.timer); cs->cardmsg = &hfcpci_card_msg; cs->auxcmd = &hfcpci_auxcmd; spin_unlock_irqrestore(&cs->lock, flags); return (1); }
gpl-2.0
AppliedMicro/ENGLinuxLatest
drivers/net/caif/caif_spi_slave.c
2314
6557
/* * Copyright (C) ST-Ericsson AB 2010 * Author: Daniel Martensson * License terms: GNU General Public License (GPL) version 2. */ #include <linux/module.h> #include <linux/device.h> #include <linux/platform_device.h> #include <linux/string.h> #include <linux/semaphore.h> #include <linux/workqueue.h> #include <linux/completion.h> #include <linux/list.h> #include <linux/interrupt.h> #include <linux/dma-mapping.h> #include <linux/delay.h> #include <linux/sched.h> #include <linux/debugfs.h> #include <net/caif/caif_spi.h> #ifndef CONFIG_CAIF_SPI_SYNC #define SPI_DATA_POS 0 static inline int forward_to_spi_cmd(struct cfspi *cfspi) { return cfspi->rx_cpck_len; } #else #define SPI_DATA_POS SPI_CMD_SZ static inline int forward_to_spi_cmd(struct cfspi *cfspi) { return 0; } #endif int spi_frm_align = 2; /* * SPI padding options. * Warning: must be a base of 2 (& operation used) and can not be zero ! */ int spi_up_head_align = 1 << 1; int spi_up_tail_align = 1 << 0; int spi_down_head_align = 1 << 2; int spi_down_tail_align = 1 << 1; #ifdef CONFIG_DEBUG_FS static inline void debugfs_store_prev(struct cfspi *cfspi) { /* Store previous command for debugging reasons.*/ cfspi->pcmd = cfspi->cmd; /* Store previous transfer. */ cfspi->tx_ppck_len = cfspi->tx_cpck_len; cfspi->rx_ppck_len = cfspi->rx_cpck_len; } #else static inline void debugfs_store_prev(struct cfspi *cfspi) { } #endif void cfspi_xfer(struct work_struct *work) { struct cfspi *cfspi; u8 *ptr = NULL; unsigned long flags; int ret; cfspi = container_of(work, struct cfspi, work); /* Initialize state. */ cfspi->cmd = SPI_CMD_EOT; for (;;) { cfspi_dbg_state(cfspi, CFSPI_STATE_WAITING); /* Wait for master talk or transmit event. */ wait_event_interruptible(cfspi->wait, test_bit(SPI_XFER, &cfspi->state) || test_bit(SPI_TERMINATE, &cfspi->state)); if (test_bit(SPI_TERMINATE, &cfspi->state)) return; #if CFSPI_DBG_PREFILL /* Prefill buffers for easier debugging. */ memset(cfspi->xfer.va_tx, 0xFF, SPI_DMA_BUF_LEN); memset(cfspi->xfer.va_rx, 0xFF, SPI_DMA_BUF_LEN); #endif /* CFSPI_DBG_PREFILL */ cfspi_dbg_state(cfspi, CFSPI_STATE_AWAKE); /* Check whether we have a committed frame. */ if (cfspi->tx_cpck_len) { int len; cfspi_dbg_state(cfspi, CFSPI_STATE_FETCH_PKT); /* Copy committed SPI frames after the SPI indication. */ ptr = (u8 *) cfspi->xfer.va_tx; ptr += SPI_IND_SZ; len = cfspi_xmitfrm(cfspi, ptr, cfspi->tx_cpck_len); WARN_ON(len != cfspi->tx_cpck_len); } cfspi_dbg_state(cfspi, CFSPI_STATE_GET_NEXT); /* Get length of next frame to commit. */ cfspi->tx_npck_len = cfspi_xmitlen(cfspi); WARN_ON(cfspi->tx_npck_len > SPI_DMA_BUF_LEN); /* * Add indication and length at the beginning of the frame, * using little endian. */ ptr = (u8 *) cfspi->xfer.va_tx; *ptr++ = SPI_CMD_IND; *ptr++ = (SPI_CMD_IND & 0xFF00) >> 8; *ptr++ = cfspi->tx_npck_len & 0x00FF; *ptr++ = (cfspi->tx_npck_len & 0xFF00) >> 8; /* Calculate length of DMAs. */ cfspi->xfer.tx_dma_len = cfspi->tx_cpck_len + SPI_IND_SZ; cfspi->xfer.rx_dma_len = cfspi->rx_cpck_len + SPI_CMD_SZ; /* Add SPI TX frame alignment padding, if necessary. */ if (cfspi->tx_cpck_len && (cfspi->xfer.tx_dma_len % spi_frm_align)) { cfspi->xfer.tx_dma_len += spi_frm_align - (cfspi->xfer.tx_dma_len % spi_frm_align); } /* Add SPI RX frame alignment padding, if necessary. */ if (cfspi->rx_cpck_len && (cfspi->xfer.rx_dma_len % spi_frm_align)) { cfspi->xfer.rx_dma_len += spi_frm_align - (cfspi->xfer.rx_dma_len % spi_frm_align); } cfspi_dbg_state(cfspi, CFSPI_STATE_INIT_XFER); /* Start transfer. */ ret = cfspi->dev->init_xfer(&cfspi->xfer, cfspi->dev); WARN_ON(ret); cfspi_dbg_state(cfspi, CFSPI_STATE_WAIT_ACTIVE); /* * TODO: We might be able to make an assumption if this is the * first loop. Make sure that minimum toggle time is respected. */ udelay(MIN_TRANSITION_TIME_USEC); cfspi_dbg_state(cfspi, CFSPI_STATE_SIG_ACTIVE); /* Signal that we are ready to receive data. */ cfspi->dev->sig_xfer(true, cfspi->dev); cfspi_dbg_state(cfspi, CFSPI_STATE_WAIT_XFER_DONE); /* Wait for transfer completion. */ wait_for_completion(&cfspi->comp); cfspi_dbg_state(cfspi, CFSPI_STATE_XFER_DONE); if (cfspi->cmd == SPI_CMD_EOT) { /* * Clear the master talk bit. A xfer is always at * least two bursts. */ clear_bit(SPI_SS_ON, &cfspi->state); } cfspi_dbg_state(cfspi, CFSPI_STATE_WAIT_INACTIVE); /* Make sure that the minimum toggle time is respected. */ if (SPI_XFER_TIME_USEC(cfspi->xfer.tx_dma_len, cfspi->dev->clk_mhz) < MIN_TRANSITION_TIME_USEC) { udelay(MIN_TRANSITION_TIME_USEC - SPI_XFER_TIME_USEC (cfspi->xfer.tx_dma_len, cfspi->dev->clk_mhz)); } cfspi_dbg_state(cfspi, CFSPI_STATE_SIG_INACTIVE); /* De-assert transfer signal. */ cfspi->dev->sig_xfer(false, cfspi->dev); /* Check whether we received a CAIF packet. */ if (cfspi->rx_cpck_len) { int len; cfspi_dbg_state(cfspi, CFSPI_STATE_DELIVER_PKT); /* Parse SPI frame. */ ptr = ((u8 *)(cfspi->xfer.va_rx + SPI_DATA_POS)); len = cfspi_rxfrm(cfspi, ptr, cfspi->rx_cpck_len); WARN_ON(len != cfspi->rx_cpck_len); } /* Check the next SPI command and length. */ ptr = (u8 *) cfspi->xfer.va_rx; ptr += forward_to_spi_cmd(cfspi); cfspi->cmd = *ptr++; cfspi->cmd |= ((*ptr++) << 8) & 0xFF00; cfspi->rx_npck_len = *ptr++; cfspi->rx_npck_len |= ((*ptr++) << 8) & 0xFF00; WARN_ON(cfspi->rx_npck_len > SPI_DMA_BUF_LEN); WARN_ON(cfspi->cmd > SPI_CMD_EOT); debugfs_store_prev(cfspi); /* Check whether the master issued an EOT command. */ if (cfspi->cmd == SPI_CMD_EOT) { /* Reset state. */ cfspi->tx_cpck_len = 0; cfspi->rx_cpck_len = 0; } else { /* Update state. */ cfspi->tx_cpck_len = cfspi->tx_npck_len; cfspi->rx_cpck_len = cfspi->rx_npck_len; } /* * Check whether we need to clear the xfer bit. * Spin lock needed for packet insertion. * Test and clear of different bits * are not supported. */ spin_lock_irqsave(&cfspi->lock, flags); if (cfspi->cmd == SPI_CMD_EOT && !cfspi_xmitlen(cfspi) && !test_bit(SPI_SS_ON, &cfspi->state)) clear_bit(SPI_XFER, &cfspi->state); spin_unlock_irqrestore(&cfspi->lock, flags); } } struct platform_driver cfspi_spi_driver = { .probe = cfspi_spi_probe, .remove = cfspi_spi_remove, .driver = { .name = "cfspi_sspi", .owner = THIS_MODULE, }, };
gpl-2.0
ClustyROM/Galaxy_Note
drivers/staging/octeon/ethernet-rgmii.c
3594
12762
/********************************************************************* * Author: Cavium Networks * * Contact: support@caviumnetworks.com * This file is part of the OCTEON SDK * * Copyright (c) 2003-2007 Cavium Networks * * This file is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, Version 2, as * published by the Free Software Foundation. * * This file is distributed in the hope that it will be useful, but * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or * NONINFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this file; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * or visit http://www.gnu.org/licenses/. * * This file may also be available under a different license from Cavium. * Contact Cavium Networks for more information **********************************************************************/ #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/phy.h> #include <net/dst.h> #include <asm/octeon/octeon.h> #include "ethernet-defines.h" #include "octeon-ethernet.h" #include "ethernet-util.h" #include "cvmx-helper.h" #include <asm/octeon/cvmx-ipd-defs.h> #include <asm/octeon/cvmx-npi-defs.h> #include "cvmx-gmxx-defs.h" DEFINE_SPINLOCK(global_register_lock); static int number_rgmii_ports; static void cvm_oct_rgmii_poll(struct net_device *dev) { struct octeon_ethernet *priv = netdev_priv(dev); unsigned long flags = 0; cvmx_helper_link_info_t link_info; int use_global_register_lock = (priv->phydev == NULL); BUG_ON(in_interrupt()); if (use_global_register_lock) { /* * Take the global register lock since we are going to * touch registers that affect more than one port. */ spin_lock_irqsave(&global_register_lock, flags); } else { mutex_lock(&priv->phydev->bus->mdio_lock); } link_info = cvmx_helper_link_get(priv->port); if (link_info.u64 == priv->link_info) { /* * If the 10Mbps preamble workaround is supported and we're * at 10Mbps we may need to do some special checking. */ if (USE_10MBPS_PREAMBLE_WORKAROUND && (link_info.s.speed == 10)) { /* * Read the GMXX_RXX_INT_REG[PCTERR] bit and * see if we are getting preamble errors. */ int interface = INTERFACE(priv->port); int index = INDEX(priv->port); union cvmx_gmxx_rxx_int_reg gmxx_rxx_int_reg; gmxx_rxx_int_reg.u64 = cvmx_read_csr(CVMX_GMXX_RXX_INT_REG (index, interface)); if (gmxx_rxx_int_reg.s.pcterr) { /* * We are getting preamble errors at * 10Mbps. Most likely the PHY is * giving us packets with mis aligned * preambles. In order to get these * packets we need to disable preamble * checking and do it in software. */ union cvmx_gmxx_rxx_frm_ctl gmxx_rxx_frm_ctl; union cvmx_ipd_sub_port_fcs ipd_sub_port_fcs; /* Disable preamble checking */ gmxx_rxx_frm_ctl.u64 = cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL (index, interface)); gmxx_rxx_frm_ctl.s.pre_chk = 0; cvmx_write_csr(CVMX_GMXX_RXX_FRM_CTL (index, interface), gmxx_rxx_frm_ctl.u64); /* Disable FCS stripping */ ipd_sub_port_fcs.u64 = cvmx_read_csr(CVMX_IPD_SUB_PORT_FCS); ipd_sub_port_fcs.s.port_bit &= 0xffffffffull ^ (1ull << priv->port); cvmx_write_csr(CVMX_IPD_SUB_PORT_FCS, ipd_sub_port_fcs.u64); /* Clear any error bits */ cvmx_write_csr(CVMX_GMXX_RXX_INT_REG (index, interface), gmxx_rxx_int_reg.u64); DEBUGPRINT("%s: Using 10Mbps with software " "preamble removal\n", dev->name); } } if (use_global_register_lock) spin_unlock_irqrestore(&global_register_lock, flags); else mutex_unlock(&priv->phydev->bus->mdio_lock); return; } /* If the 10Mbps preamble workaround is allowed we need to on preamble checking, FCS stripping, and clear error bits on every speed change. If errors occur during 10Mbps operation the above code will change this stuff */ if (USE_10MBPS_PREAMBLE_WORKAROUND) { union cvmx_gmxx_rxx_frm_ctl gmxx_rxx_frm_ctl; union cvmx_ipd_sub_port_fcs ipd_sub_port_fcs; union cvmx_gmxx_rxx_int_reg gmxx_rxx_int_reg; int interface = INTERFACE(priv->port); int index = INDEX(priv->port); /* Enable preamble checking */ gmxx_rxx_frm_ctl.u64 = cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface)); gmxx_rxx_frm_ctl.s.pre_chk = 1; cvmx_write_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface), gmxx_rxx_frm_ctl.u64); /* Enable FCS stripping */ ipd_sub_port_fcs.u64 = cvmx_read_csr(CVMX_IPD_SUB_PORT_FCS); ipd_sub_port_fcs.s.port_bit |= 1ull << priv->port; cvmx_write_csr(CVMX_IPD_SUB_PORT_FCS, ipd_sub_port_fcs.u64); /* Clear any error bits */ gmxx_rxx_int_reg.u64 = cvmx_read_csr(CVMX_GMXX_RXX_INT_REG(index, interface)); cvmx_write_csr(CVMX_GMXX_RXX_INT_REG(index, interface), gmxx_rxx_int_reg.u64); } if (priv->phydev == NULL) { link_info = cvmx_helper_link_autoconf(priv->port); priv->link_info = link_info.u64; } if (use_global_register_lock) spin_unlock_irqrestore(&global_register_lock, flags); else { mutex_unlock(&priv->phydev->bus->mdio_lock); } if (priv->phydev == NULL) { /* Tell core. */ if (link_info.s.link_up) { if (!netif_carrier_ok(dev)) netif_carrier_on(dev); if (priv->queue != -1) DEBUGPRINT("%s: %u Mbps %s duplex, " "port %2d, queue %2d\n", dev->name, link_info.s.speed, (link_info.s.full_duplex) ? "Full" : "Half", priv->port, priv->queue); else DEBUGPRINT("%s: %u Mbps %s duplex, " "port %2d, POW\n", dev->name, link_info.s.speed, (link_info.s.full_duplex) ? "Full" : "Half", priv->port); } else { if (netif_carrier_ok(dev)) netif_carrier_off(dev); DEBUGPRINT("%s: Link down\n", dev->name); } } } static irqreturn_t cvm_oct_rgmii_rml_interrupt(int cpl, void *dev_id) { union cvmx_npi_rsl_int_blocks rsl_int_blocks; int index; irqreturn_t return_status = IRQ_NONE; rsl_int_blocks.u64 = cvmx_read_csr(CVMX_NPI_RSL_INT_BLOCKS); /* Check and see if this interrupt was caused by the GMX0 block */ if (rsl_int_blocks.s.gmx0) { int interface = 0; /* Loop through every port of this interface */ for (index = 0; index < cvmx_helper_ports_on_interface(interface); index++) { /* Read the GMX interrupt status bits */ union cvmx_gmxx_rxx_int_reg gmx_rx_int_reg; gmx_rx_int_reg.u64 = cvmx_read_csr(CVMX_GMXX_RXX_INT_REG (index, interface)); gmx_rx_int_reg.u64 &= cvmx_read_csr(CVMX_GMXX_RXX_INT_EN (index, interface)); /* Poll the port if inband status changed */ if (gmx_rx_int_reg.s.phy_dupx || gmx_rx_int_reg.s.phy_link || gmx_rx_int_reg.s.phy_spd) { struct net_device *dev = cvm_oct_device[cvmx_helper_get_ipd_port (interface, index)]; struct octeon_ethernet *priv = netdev_priv(dev); if (dev && !atomic_read(&cvm_oct_poll_queue_stopping)) queue_work(cvm_oct_poll_queue, &priv->port_work); gmx_rx_int_reg.u64 = 0; gmx_rx_int_reg.s.phy_dupx = 1; gmx_rx_int_reg.s.phy_link = 1; gmx_rx_int_reg.s.phy_spd = 1; cvmx_write_csr(CVMX_GMXX_RXX_INT_REG (index, interface), gmx_rx_int_reg.u64); return_status = IRQ_HANDLED; } } } /* Check and see if this interrupt was caused by the GMX1 block */ if (rsl_int_blocks.s.gmx1) { int interface = 1; /* Loop through every port of this interface */ for (index = 0; index < cvmx_helper_ports_on_interface(interface); index++) { /* Read the GMX interrupt status bits */ union cvmx_gmxx_rxx_int_reg gmx_rx_int_reg; gmx_rx_int_reg.u64 = cvmx_read_csr(CVMX_GMXX_RXX_INT_REG (index, interface)); gmx_rx_int_reg.u64 &= cvmx_read_csr(CVMX_GMXX_RXX_INT_EN (index, interface)); /* Poll the port if inband status changed */ if (gmx_rx_int_reg.s.phy_dupx || gmx_rx_int_reg.s.phy_link || gmx_rx_int_reg.s.phy_spd) { struct net_device *dev = cvm_oct_device[cvmx_helper_get_ipd_port (interface, index)]; struct octeon_ethernet *priv = netdev_priv(dev); if (dev && !atomic_read(&cvm_oct_poll_queue_stopping)) queue_work(cvm_oct_poll_queue, &priv->port_work); gmx_rx_int_reg.u64 = 0; gmx_rx_int_reg.s.phy_dupx = 1; gmx_rx_int_reg.s.phy_link = 1; gmx_rx_int_reg.s.phy_spd = 1; cvmx_write_csr(CVMX_GMXX_RXX_INT_REG (index, interface), gmx_rx_int_reg.u64); return_status = IRQ_HANDLED; } } } return return_status; } int cvm_oct_rgmii_open(struct net_device *dev) { union cvmx_gmxx_prtx_cfg gmx_cfg; struct octeon_ethernet *priv = netdev_priv(dev); int interface = INTERFACE(priv->port); int index = INDEX(priv->port); cvmx_helper_link_info_t link_info; gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); gmx_cfg.s.en = 1; cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64); if (!octeon_is_simulation()) { link_info = cvmx_helper_link_get(priv->port); if (!link_info.s.link_up) netif_carrier_off(dev); } return 0; } int cvm_oct_rgmii_stop(struct net_device *dev) { union cvmx_gmxx_prtx_cfg gmx_cfg; struct octeon_ethernet *priv = netdev_priv(dev); int interface = INTERFACE(priv->port); int index = INDEX(priv->port); gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); gmx_cfg.s.en = 0; cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64); return 0; } static void cvm_oct_rgmii_immediate_poll(struct work_struct *work) { struct octeon_ethernet *priv = container_of(work, struct octeon_ethernet, port_work); cvm_oct_rgmii_poll(cvm_oct_device[priv->port]); } int cvm_oct_rgmii_init(struct net_device *dev) { struct octeon_ethernet *priv = netdev_priv(dev); int r; cvm_oct_common_init(dev); dev->netdev_ops->ndo_stop(dev); INIT_WORK(&priv->port_work, cvm_oct_rgmii_immediate_poll); /* * Due to GMX errata in CN3XXX series chips, it is necessary * to take the link down immediately when the PHY changes * state. In order to do this we call the poll function every * time the RGMII inband status changes. This may cause * problems if the PHY doesn't implement inband status * properly. */ if (number_rgmii_ports == 0) { r = request_irq(OCTEON_IRQ_RML, cvm_oct_rgmii_rml_interrupt, IRQF_SHARED, "RGMII", &number_rgmii_ports); if (r != 0) return r; } number_rgmii_ports++; /* * Only true RGMII ports need to be polled. In GMII mode, port * 0 is really a RGMII port. */ if (((priv->imode == CVMX_HELPER_INTERFACE_MODE_GMII) && (priv->port == 0)) || (priv->imode == CVMX_HELPER_INTERFACE_MODE_RGMII)) { if (!octeon_is_simulation()) { union cvmx_gmxx_rxx_int_en gmx_rx_int_en; int interface = INTERFACE(priv->port); int index = INDEX(priv->port); /* * Enable interrupts on inband status changes * for this port. */ gmx_rx_int_en.u64 = cvmx_read_csr(CVMX_GMXX_RXX_INT_EN (index, interface)); gmx_rx_int_en.s.phy_dupx = 1; gmx_rx_int_en.s.phy_link = 1; gmx_rx_int_en.s.phy_spd = 1; cvmx_write_csr(CVMX_GMXX_RXX_INT_EN(index, interface), gmx_rx_int_en.u64); priv->poll = cvm_oct_rgmii_poll; } } return 0; } void cvm_oct_rgmii_uninit(struct net_device *dev) { struct octeon_ethernet *priv = netdev_priv(dev); cvm_oct_common_uninit(dev); /* * Only true RGMII ports need to be polled. In GMII mode, port * 0 is really a RGMII port. */ if (((priv->imode == CVMX_HELPER_INTERFACE_MODE_GMII) && (priv->port == 0)) || (priv->imode == CVMX_HELPER_INTERFACE_MODE_RGMII)) { if (!octeon_is_simulation()) { union cvmx_gmxx_rxx_int_en gmx_rx_int_en; int interface = INTERFACE(priv->port); int index = INDEX(priv->port); /* * Disable interrupts on inband status changes * for this port. */ gmx_rx_int_en.u64 = cvmx_read_csr(CVMX_GMXX_RXX_INT_EN (index, interface)); gmx_rx_int_en.s.phy_dupx = 0; gmx_rx_int_en.s.phy_link = 0; gmx_rx_int_en.s.phy_spd = 0; cvmx_write_csr(CVMX_GMXX_RXX_INT_EN(index, interface), gmx_rx_int_en.u64); } } /* Remove the interrupt handler when the last port is removed. */ number_rgmii_ports--; if (number_rgmii_ports == 0) free_irq(OCTEON_IRQ_RML, &number_rgmii_ports); cancel_work_sync(&priv->port_work); }
gpl-2.0
GustavoRD78/78Kernel-Android-N-Developer-Preview
sound/isa/msnd/msnd_pinnacle.c
4106
31356
/********************************************************************* * * Linux multisound pinnacle/fiji driver for ALSA. * * 2002/06/30 Karsten Wiese: * for now this is only used to build a pinnacle / fiji driver. * the OSS parent of this code is designed to also support * the multisound classic via the file msnd_classic.c. * to make it easier for some brave heart to implemt classic * support in alsa, i left all the MSND_CLASSIC tokens in this file. * but for now this untested & undone. * * * ripped from linux kernel 2.4.18 by Karsten Wiese. * * the following is a copy of the 2.4.18 OSS FREE file-heading comment: * * Turtle Beach MultiSound Sound Card Driver for Linux * msnd_pinnacle.c / msnd_classic.c * * -- If MSND_CLASSIC is defined: * * -> driver for Turtle Beach Classic/Monterey/Tahiti * * -- Else * * -> driver for Turtle Beach Pinnacle/Fiji * * 12-3-2000 Modified IO port validation Steve Sycamore * * Copyright (C) 1998 Andrew Veliath * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * ********************************************************************/ #include <linux/kernel.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/types.h> #include <linux/delay.h> #include <linux/ioport.h> #include <linux/firmware.h> #include <linux/isa.h> #include <linux/isapnp.h> #include <linux/irq.h> #include <linux/io.h> #include <sound/core.h> #include <sound/initval.h> #include <sound/asound.h> #include <sound/pcm.h> #include <sound/mpu401.h> #ifdef MSND_CLASSIC # ifndef __alpha__ # define SLOWIO # endif #endif #include "msnd.h" #ifdef MSND_CLASSIC # include "msnd_classic.h" # define LOGNAME "msnd_classic" #else # include "msnd_pinnacle.h" # define LOGNAME "snd_msnd_pinnacle" #endif static void __devinit set_default_audio_parameters(struct snd_msnd *chip) { chip->play_sample_size = DEFSAMPLESIZE; chip->play_sample_rate = DEFSAMPLERATE; chip->play_channels = DEFCHANNELS; chip->capture_sample_size = DEFSAMPLESIZE; chip->capture_sample_rate = DEFSAMPLERATE; chip->capture_channels = DEFCHANNELS; } static void snd_msnd_eval_dsp_msg(struct snd_msnd *chip, u16 wMessage) { switch (HIBYTE(wMessage)) { case HIMT_PLAY_DONE: { if (chip->banksPlayed < 3) snd_printdd("%08X: HIMT_PLAY_DONE: %i\n", (unsigned)jiffies, LOBYTE(wMessage)); if (chip->last_playbank == LOBYTE(wMessage)) { snd_printdd("chip.last_playbank == LOBYTE(wMessage)\n"); break; } chip->banksPlayed++; if (test_bit(F_WRITING, &chip->flags)) snd_msnd_DAPQ(chip, 0); chip->last_playbank = LOBYTE(wMessage); chip->playDMAPos += chip->play_period_bytes; if (chip->playDMAPos > chip->playLimit) chip->playDMAPos = 0; snd_pcm_period_elapsed(chip->playback_substream); break; } case HIMT_RECORD_DONE: if (chip->last_recbank == LOBYTE(wMessage)) break; chip->last_recbank = LOBYTE(wMessage); chip->captureDMAPos += chip->capturePeriodBytes; if (chip->captureDMAPos > (chip->captureLimit)) chip->captureDMAPos = 0; if (test_bit(F_READING, &chip->flags)) snd_msnd_DARQ(chip, chip->last_recbank); snd_pcm_period_elapsed(chip->capture_substream); break; case HIMT_DSP: switch (LOBYTE(wMessage)) { #ifndef MSND_CLASSIC case HIDSP_PLAY_UNDER: #endif case HIDSP_INT_PLAY_UNDER: snd_printd(KERN_WARNING LOGNAME ": Play underflow %i\n", chip->banksPlayed); if (chip->banksPlayed > 2) clear_bit(F_WRITING, &chip->flags); break; case HIDSP_INT_RECORD_OVER: snd_printd(KERN_WARNING LOGNAME ": Record overflow\n"); clear_bit(F_READING, &chip->flags); break; default: snd_printd(KERN_WARNING LOGNAME ": DSP message %d 0x%02x\n", LOBYTE(wMessage), LOBYTE(wMessage)); break; } break; case HIMT_MIDI_IN_UCHAR: if (chip->msndmidi_mpu) snd_msndmidi_input_read(chip->msndmidi_mpu); break; default: snd_printd(KERN_WARNING LOGNAME ": HIMT message %d 0x%02x\n", HIBYTE(wMessage), HIBYTE(wMessage)); break; } } static irqreturn_t snd_msnd_interrupt(int irq, void *dev_id) { struct snd_msnd *chip = dev_id; void *pwDSPQData = chip->mappedbase + DSPQ_DATA_BUFF; /* Send ack to DSP */ /* inb(chip->io + HP_RXL); */ /* Evaluate queued DSP messages */ while (readw(chip->DSPQ + JQS_wTail) != readw(chip->DSPQ + JQS_wHead)) { u16 wTmp; snd_msnd_eval_dsp_msg(chip, readw(pwDSPQData + 2 * readw(chip->DSPQ + JQS_wHead))); wTmp = readw(chip->DSPQ + JQS_wHead) + 1; if (wTmp > readw(chip->DSPQ + JQS_wSize)) writew(0, chip->DSPQ + JQS_wHead); else writew(wTmp, chip->DSPQ + JQS_wHead); } /* Send ack to DSP */ inb(chip->io + HP_RXL); return IRQ_HANDLED; } static int snd_msnd_reset_dsp(long io, unsigned char *info) { int timeout = 100; outb(HPDSPRESET_ON, io + HP_DSPR); msleep(1); #ifndef MSND_CLASSIC if (info) *info = inb(io + HP_INFO); #endif outb(HPDSPRESET_OFF, io + HP_DSPR); msleep(1); while (timeout-- > 0) { if (inb(io + HP_CVR) == HP_CVR_DEF) return 0; msleep(1); } snd_printk(KERN_ERR LOGNAME ": Cannot reset DSP\n"); return -EIO; } static int __devinit snd_msnd_probe(struct snd_card *card) { struct snd_msnd *chip = card->private_data; unsigned char info; #ifndef MSND_CLASSIC char *xv, *rev = NULL; char *pin = "TB Pinnacle", *fiji = "TB Fiji"; char *pinfiji = "TB Pinnacle/Fiji"; #endif if (!request_region(chip->io, DSP_NUMIO, "probing")) { snd_printk(KERN_ERR LOGNAME ": I/O port conflict\n"); return -ENODEV; } if (snd_msnd_reset_dsp(chip->io, &info) < 0) { release_region(chip->io, DSP_NUMIO); return -ENODEV; } #ifdef MSND_CLASSIC strcpy(card->shortname, "Classic/Tahiti/Monterey"); strcpy(card->longname, "Turtle Beach Multisound"); printk(KERN_INFO LOGNAME ": %s, " "I/O 0x%lx-0x%lx, IRQ %d, memory mapped to 0x%lX-0x%lX\n", card->shortname, chip->io, chip->io + DSP_NUMIO - 1, chip->irq, chip->base, chip->base + 0x7fff); #else switch (info >> 4) { case 0xf: xv = "<= 1.15"; break; case 0x1: xv = "1.18/1.2"; break; case 0x2: xv = "1.3"; break; case 0x3: xv = "1.4"; break; default: xv = "unknown"; break; } switch (info & 0x7) { case 0x0: rev = "I"; strcpy(card->shortname, pin); break; case 0x1: rev = "F"; strcpy(card->shortname, pin); break; case 0x2: rev = "G"; strcpy(card->shortname, pin); break; case 0x3: rev = "H"; strcpy(card->shortname, pin); break; case 0x4: rev = "E"; strcpy(card->shortname, fiji); break; case 0x5: rev = "C"; strcpy(card->shortname, fiji); break; case 0x6: rev = "D"; strcpy(card->shortname, fiji); break; case 0x7: rev = "A-B (Fiji) or A-E (Pinnacle)"; strcpy(card->shortname, pinfiji); break; } strcpy(card->longname, "Turtle Beach Multisound Pinnacle"); printk(KERN_INFO LOGNAME ": %s revision %s, Xilinx version %s, " "I/O 0x%lx-0x%lx, IRQ %d, memory mapped to 0x%lX-0x%lX\n", card->shortname, rev, xv, chip->io, chip->io + DSP_NUMIO - 1, chip->irq, chip->base, chip->base + 0x7fff); #endif release_region(chip->io, DSP_NUMIO); return 0; } static int snd_msnd_init_sma(struct snd_msnd *chip) { static int initted; u16 mastVolLeft, mastVolRight; unsigned long flags; #ifdef MSND_CLASSIC outb(chip->memid, chip->io + HP_MEMM); #endif outb(HPBLKSEL_0, chip->io + HP_BLKS); /* Motorola 56k shared memory base */ chip->SMA = chip->mappedbase + SMA_STRUCT_START; if (initted) { mastVolLeft = readw(chip->SMA + SMA_wCurrMastVolLeft); mastVolRight = readw(chip->SMA + SMA_wCurrMastVolRight); } else mastVolLeft = mastVolRight = 0; memset_io(chip->mappedbase, 0, 0x8000); /* Critical section: bank 1 access */ spin_lock_irqsave(&chip->lock, flags); outb(HPBLKSEL_1, chip->io + HP_BLKS); memset_io(chip->mappedbase, 0, 0x8000); outb(HPBLKSEL_0, chip->io + HP_BLKS); spin_unlock_irqrestore(&chip->lock, flags); /* Digital audio play queue */ chip->DAPQ = chip->mappedbase + DAPQ_OFFSET; snd_msnd_init_queue(chip->DAPQ, DAPQ_DATA_BUFF, DAPQ_BUFF_SIZE); /* Digital audio record queue */ chip->DARQ = chip->mappedbase + DARQ_OFFSET; snd_msnd_init_queue(chip->DARQ, DARQ_DATA_BUFF, DARQ_BUFF_SIZE); /* MIDI out queue */ chip->MODQ = chip->mappedbase + MODQ_OFFSET; snd_msnd_init_queue(chip->MODQ, MODQ_DATA_BUFF, MODQ_BUFF_SIZE); /* MIDI in queue */ chip->MIDQ = chip->mappedbase + MIDQ_OFFSET; snd_msnd_init_queue(chip->MIDQ, MIDQ_DATA_BUFF, MIDQ_BUFF_SIZE); /* DSP -> host message queue */ chip->DSPQ = chip->mappedbase + DSPQ_OFFSET; snd_msnd_init_queue(chip->DSPQ, DSPQ_DATA_BUFF, DSPQ_BUFF_SIZE); /* Setup some DSP values */ #ifndef MSND_CLASSIC writew(1, chip->SMA + SMA_wCurrPlayFormat); writew(chip->play_sample_size, chip->SMA + SMA_wCurrPlaySampleSize); writew(chip->play_channels, chip->SMA + SMA_wCurrPlayChannels); writew(chip->play_sample_rate, chip->SMA + SMA_wCurrPlaySampleRate); #endif writew(chip->play_sample_rate, chip->SMA + SMA_wCalFreqAtoD); writew(mastVolLeft, chip->SMA + SMA_wCurrMastVolLeft); writew(mastVolRight, chip->SMA + SMA_wCurrMastVolRight); #ifndef MSND_CLASSIC writel(0x00010000, chip->SMA + SMA_dwCurrPlayPitch); writel(0x00000001, chip->SMA + SMA_dwCurrPlayRate); #endif writew(0x303, chip->SMA + SMA_wCurrInputTagBits); initted = 1; return 0; } static int upload_dsp_code(struct snd_card *card) { struct snd_msnd *chip = card->private_data; const struct firmware *init_fw = NULL, *perm_fw = NULL; int err; outb(HPBLKSEL_0, chip->io + HP_BLKS); err = request_firmware(&init_fw, INITCODEFILE, card->dev); if (err < 0) { printk(KERN_ERR LOGNAME ": Error loading " INITCODEFILE); goto cleanup1; } err = request_firmware(&perm_fw, PERMCODEFILE, card->dev); if (err < 0) { printk(KERN_ERR LOGNAME ": Error loading " PERMCODEFILE); goto cleanup; } memcpy_toio(chip->mappedbase, perm_fw->data, perm_fw->size); if (snd_msnd_upload_host(chip, init_fw->data, init_fw->size) < 0) { printk(KERN_WARNING LOGNAME ": Error uploading to DSP\n"); err = -ENODEV; goto cleanup; } printk(KERN_INFO LOGNAME ": DSP firmware uploaded\n"); err = 0; cleanup: release_firmware(perm_fw); cleanup1: release_firmware(init_fw); return err; } #ifdef MSND_CLASSIC static void reset_proteus(struct snd_msnd *chip) { outb(HPPRORESET_ON, chip->io + HP_PROR); msleep(TIME_PRO_RESET); outb(HPPRORESET_OFF, chip->io + HP_PROR); msleep(TIME_PRO_RESET_DONE); } #endif static int snd_msnd_initialize(struct snd_card *card) { struct snd_msnd *chip = card->private_data; int err, timeout; #ifdef MSND_CLASSIC outb(HPWAITSTATE_0, chip->io + HP_WAIT); outb(HPBITMODE_16, chip->io + HP_BITM); reset_proteus(chip); #endif err = snd_msnd_init_sma(chip); if (err < 0) { printk(KERN_WARNING LOGNAME ": Cannot initialize SMA\n"); return err; } err = snd_msnd_reset_dsp(chip->io, NULL); if (err < 0) return err; err = upload_dsp_code(card); if (err < 0) { printk(KERN_WARNING LOGNAME ": Cannot upload DSP code\n"); return err; } timeout = 200; while (readw(chip->mappedbase)) { msleep(1); if (!timeout--) { snd_printd(KERN_ERR LOGNAME ": DSP reset timeout\n"); return -EIO; } } snd_msndmix_setup(chip); return 0; } static int snd_msnd_dsp_full_reset(struct snd_card *card) { struct snd_msnd *chip = card->private_data; int rv; if (test_bit(F_RESETTING, &chip->flags) || ++chip->nresets > 10) return 0; set_bit(F_RESETTING, &chip->flags); snd_msnd_dsp_halt(chip, NULL); /* Unconditionally halt */ rv = snd_msnd_initialize(card); if (rv) printk(KERN_WARNING LOGNAME ": DSP reset failed\n"); snd_msndmix_force_recsrc(chip, 0); clear_bit(F_RESETTING, &chip->flags); return rv; } static int snd_msnd_dev_free(struct snd_device *device) { snd_printdd("snd_msnd_chip_free()\n"); return 0; } static int snd_msnd_send_dsp_cmd_chk(struct snd_msnd *chip, u8 cmd) { if (snd_msnd_send_dsp_cmd(chip, cmd) == 0) return 0; snd_msnd_dsp_full_reset(chip->card); return snd_msnd_send_dsp_cmd(chip, cmd); } static int __devinit snd_msnd_calibrate_adc(struct snd_msnd *chip, u16 srate) { snd_printdd("snd_msnd_calibrate_adc(%i)\n", srate); writew(srate, chip->SMA + SMA_wCalFreqAtoD); if (chip->calibrate_signal == 0) writew(readw(chip->SMA + SMA_wCurrHostStatusFlags) | 0x0001, chip->SMA + SMA_wCurrHostStatusFlags); else writew(readw(chip->SMA + SMA_wCurrHostStatusFlags) & ~0x0001, chip->SMA + SMA_wCurrHostStatusFlags); if (snd_msnd_send_word(chip, 0, 0, HDEXAR_CAL_A_TO_D) == 0 && snd_msnd_send_dsp_cmd_chk(chip, HDEX_AUX_REQ) == 0) { schedule_timeout_interruptible(msecs_to_jiffies(333)); return 0; } printk(KERN_WARNING LOGNAME ": ADC calibration failed\n"); return -EIO; } /* * ALSA callback function, called when attempting to open the MIDI device. */ static int snd_msnd_mpu401_open(struct snd_mpu401 *mpu) { snd_msnd_enable_irq(mpu->private_data); snd_msnd_send_dsp_cmd(mpu->private_data, HDEX_MIDI_IN_START); return 0; } static void snd_msnd_mpu401_close(struct snd_mpu401 *mpu) { snd_msnd_send_dsp_cmd(mpu->private_data, HDEX_MIDI_IN_STOP); snd_msnd_disable_irq(mpu->private_data); } static long mpu_io[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; static int mpu_irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; static int __devinit snd_msnd_attach(struct snd_card *card) { struct snd_msnd *chip = card->private_data; int err; static struct snd_device_ops ops = { .dev_free = snd_msnd_dev_free, }; err = request_irq(chip->irq, snd_msnd_interrupt, 0, card->shortname, chip); if (err < 0) { printk(KERN_ERR LOGNAME ": Couldn't grab IRQ %d\n", chip->irq); return err; } if (request_region(chip->io, DSP_NUMIO, card->shortname) == NULL) { free_irq(chip->irq, chip); return -EBUSY; } if (!request_mem_region(chip->base, BUFFSIZE, card->shortname)) { printk(KERN_ERR LOGNAME ": unable to grab memory region 0x%lx-0x%lx\n", chip->base, chip->base + BUFFSIZE - 1); release_region(chip->io, DSP_NUMIO); free_irq(chip->irq, chip); return -EBUSY; } chip->mappedbase = ioremap_nocache(chip->base, 0x8000); if (!chip->mappedbase) { printk(KERN_ERR LOGNAME ": unable to map memory region 0x%lx-0x%lx\n", chip->base, chip->base + BUFFSIZE - 1); err = -EIO; goto err_release_region; } err = snd_msnd_dsp_full_reset(card); if (err < 0) goto err_release_region; /* Register device */ err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops); if (err < 0) goto err_release_region; err = snd_msnd_pcm(card, 0, NULL); if (err < 0) { printk(KERN_ERR LOGNAME ": error creating new PCM device\n"); goto err_release_region; } err = snd_msndmix_new(card); if (err < 0) { printk(KERN_ERR LOGNAME ": error creating new Mixer device\n"); goto err_release_region; } if (mpu_io[0] != SNDRV_AUTO_PORT) { struct snd_mpu401 *mpu; err = snd_mpu401_uart_new(card, 0, MPU401_HW_MPU401, mpu_io[0], MPU401_MODE_INPUT | MPU401_MODE_OUTPUT, mpu_irq[0], &chip->rmidi); if (err < 0) { printk(KERN_ERR LOGNAME ": error creating new Midi device\n"); goto err_release_region; } mpu = chip->rmidi->private_data; mpu->open_input = snd_msnd_mpu401_open; mpu->close_input = snd_msnd_mpu401_close; mpu->private_data = chip; } disable_irq(chip->irq); snd_msnd_calibrate_adc(chip, chip->play_sample_rate); snd_msndmix_force_recsrc(chip, 0); err = snd_card_register(card); if (err < 0) goto err_release_region; return 0; err_release_region: if (chip->mappedbase) iounmap(chip->mappedbase); release_mem_region(chip->base, BUFFSIZE); release_region(chip->io, DSP_NUMIO); free_irq(chip->irq, chip); return err; } static void __devexit snd_msnd_unload(struct snd_card *card) { struct snd_msnd *chip = card->private_data; iounmap(chip->mappedbase); release_mem_region(chip->base, BUFFSIZE); release_region(chip->io, DSP_NUMIO); free_irq(chip->irq, chip); snd_card_free(card); } #ifndef MSND_CLASSIC /* Pinnacle/Fiji Logical Device Configuration */ static int __devinit snd_msnd_write_cfg(int cfg, int reg, int value) { outb(reg, cfg); outb(value, cfg + 1); if (value != inb(cfg + 1)) { printk(KERN_ERR LOGNAME ": snd_msnd_write_cfg: I/O error\n"); return -EIO; } return 0; } static int __devinit snd_msnd_write_cfg_io0(int cfg, int num, u16 io) { if (snd_msnd_write_cfg(cfg, IREG_LOGDEVICE, num)) return -EIO; if (snd_msnd_write_cfg(cfg, IREG_IO0_BASEHI, HIBYTE(io))) return -EIO; if (snd_msnd_write_cfg(cfg, IREG_IO0_BASELO, LOBYTE(io))) return -EIO; return 0; } static int __devinit snd_msnd_write_cfg_io1(int cfg, int num, u16 io) { if (snd_msnd_write_cfg(cfg, IREG_LOGDEVICE, num)) return -EIO; if (snd_msnd_write_cfg(cfg, IREG_IO1_BASEHI, HIBYTE(io))) return -EIO; if (snd_msnd_write_cfg(cfg, IREG_IO1_BASELO, LOBYTE(io))) return -EIO; return 0; } static int __devinit snd_msnd_write_cfg_irq(int cfg, int num, u16 irq) { if (snd_msnd_write_cfg(cfg, IREG_LOGDEVICE, num)) return -EIO; if (snd_msnd_write_cfg(cfg, IREG_IRQ_NUMBER, LOBYTE(irq))) return -EIO; if (snd_msnd_write_cfg(cfg, IREG_IRQ_TYPE, IRQTYPE_EDGE)) return -EIO; return 0; } static int __devinit snd_msnd_write_cfg_mem(int cfg, int num, int mem) { u16 wmem; mem >>= 8; wmem = (u16)(mem & 0xfff); if (snd_msnd_write_cfg(cfg, IREG_LOGDEVICE, num)) return -EIO; if (snd_msnd_write_cfg(cfg, IREG_MEMBASEHI, HIBYTE(wmem))) return -EIO; if (snd_msnd_write_cfg(cfg, IREG_MEMBASELO, LOBYTE(wmem))) return -EIO; if (wmem && snd_msnd_write_cfg(cfg, IREG_MEMCONTROL, MEMTYPE_HIADDR | MEMTYPE_16BIT)) return -EIO; return 0; } static int __devinit snd_msnd_activate_logical(int cfg, int num) { if (snd_msnd_write_cfg(cfg, IREG_LOGDEVICE, num)) return -EIO; if (snd_msnd_write_cfg(cfg, IREG_ACTIVATE, LD_ACTIVATE)) return -EIO; return 0; } static int __devinit snd_msnd_write_cfg_logical(int cfg, int num, u16 io0, u16 io1, u16 irq, int mem) { if (snd_msnd_write_cfg(cfg, IREG_LOGDEVICE, num)) return -EIO; if (snd_msnd_write_cfg_io0(cfg, num, io0)) return -EIO; if (snd_msnd_write_cfg_io1(cfg, num, io1)) return -EIO; if (snd_msnd_write_cfg_irq(cfg, num, irq)) return -EIO; if (snd_msnd_write_cfg_mem(cfg, num, mem)) return -EIO; if (snd_msnd_activate_logical(cfg, num)) return -EIO; return 0; } static int __devinit snd_msnd_pinnacle_cfg_reset(int cfg) { int i; /* Reset devices if told to */ printk(KERN_INFO LOGNAME ": Resetting all devices\n"); for (i = 0; i < 4; ++i) if (snd_msnd_write_cfg_logical(cfg, i, 0, 0, 0, 0)) return -EIO; return 0; } #endif static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */ module_param_array(index, int, NULL, S_IRUGO); MODULE_PARM_DESC(index, "Index value for msnd_pinnacle soundcard."); module_param_array(id, charp, NULL, S_IRUGO); MODULE_PARM_DESC(id, "ID string for msnd_pinnacle soundcard."); static long io[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; static int irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; static long mem[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; #ifndef MSND_CLASSIC static long cfg[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* Extra Peripheral Configuration (Default: Disable) */ static long ide_io0[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; static long ide_io1[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; static int ide_irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; static long joystick_io[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* If we have the digital daugherboard... */ static int digital[SNDRV_CARDS]; /* Extra Peripheral Configuration */ static int reset[SNDRV_CARDS]; #endif static int write_ndelay[SNDRV_CARDS] = { [0 ... (SNDRV_CARDS-1)] = 1 }; static int calibrate_signal; #ifdef CONFIG_PNP static bool isapnp[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; module_param_array(isapnp, bool, NULL, 0444); MODULE_PARM_DESC(isapnp, "ISA PnP detection for specified soundcard."); #define has_isapnp(x) isapnp[x] #else #define has_isapnp(x) 0 #endif MODULE_AUTHOR("Karsten Wiese <annabellesgarden@yahoo.de>"); MODULE_DESCRIPTION("Turtle Beach " LONGNAME " Linux Driver"); MODULE_LICENSE("GPL"); MODULE_FIRMWARE(INITCODEFILE); MODULE_FIRMWARE(PERMCODEFILE); module_param_array(io, long, NULL, S_IRUGO); MODULE_PARM_DESC(io, "IO port #"); module_param_array(irq, int, NULL, S_IRUGO); module_param_array(mem, long, NULL, S_IRUGO); module_param_array(write_ndelay, int, NULL, S_IRUGO); module_param(calibrate_signal, int, S_IRUGO); #ifndef MSND_CLASSIC module_param_array(digital, int, NULL, S_IRUGO); module_param_array(cfg, long, NULL, S_IRUGO); module_param_array(reset, int, 0, S_IRUGO); module_param_array(mpu_io, long, NULL, S_IRUGO); module_param_array(mpu_irq, int, NULL, S_IRUGO); module_param_array(ide_io0, long, NULL, S_IRUGO); module_param_array(ide_io1, long, NULL, S_IRUGO); module_param_array(ide_irq, int, NULL, S_IRUGO); module_param_array(joystick_io, long, NULL, S_IRUGO); #endif static int __devinit snd_msnd_isa_match(struct device *pdev, unsigned int i) { if (io[i] == SNDRV_AUTO_PORT) return 0; if (irq[i] == SNDRV_AUTO_PORT || mem[i] == SNDRV_AUTO_PORT) { printk(KERN_WARNING LOGNAME ": io, irq and mem must be set\n"); return 0; } #ifdef MSND_CLASSIC if (!(io[i] == 0x290 || io[i] == 0x260 || io[i] == 0x250 || io[i] == 0x240 || io[i] == 0x230 || io[i] == 0x220 || io[i] == 0x210 || io[i] == 0x3e0)) { printk(KERN_ERR LOGNAME ": \"io\" - DSP I/O base must be set " " to 0x210, 0x220, 0x230, 0x240, 0x250, 0x260, 0x290, " "or 0x3E0\n"); return 0; } #else if (io[i] < 0x100 || io[i] > 0x3e0 || (io[i] % 0x10) != 0) { printk(KERN_ERR LOGNAME ": \"io\" - DSP I/O base must within the range 0x100 " "to 0x3E0 and must be evenly divisible by 0x10\n"); return 0; } #endif /* MSND_CLASSIC */ if (!(irq[i] == 5 || irq[i] == 7 || irq[i] == 9 || irq[i] == 10 || irq[i] == 11 || irq[i] == 12)) { printk(KERN_ERR LOGNAME ": \"irq\" - must be set to 5, 7, 9, 10, 11 or 12\n"); return 0; } if (!(mem[i] == 0xb0000 || mem[i] == 0xc8000 || mem[i] == 0xd0000 || mem[i] == 0xd8000 || mem[i] == 0xe0000 || mem[i] == 0xe8000)) { printk(KERN_ERR LOGNAME ": \"mem\" - must be set to " "0xb0000, 0xc8000, 0xd0000, 0xd8000, 0xe0000 or " "0xe8000\n"); return 0; } #ifndef MSND_CLASSIC if (cfg[i] == SNDRV_AUTO_PORT) { printk(KERN_INFO LOGNAME ": Assuming PnP mode\n"); } else if (cfg[i] != 0x250 && cfg[i] != 0x260 && cfg[i] != 0x270) { printk(KERN_INFO LOGNAME ": Config port must be 0x250, 0x260 or 0x270 " "(or unspecified for PnP mode)\n"); return 0; } #endif /* MSND_CLASSIC */ return 1; } static int __devinit snd_msnd_isa_probe(struct device *pdev, unsigned int idx) { int err; struct snd_card *card; struct snd_msnd *chip; if (has_isapnp(idx) #ifndef MSND_CLASSIC || cfg[idx] == SNDRV_AUTO_PORT #endif ) { printk(KERN_INFO LOGNAME ": Assuming PnP mode\n"); return -ENODEV; } err = snd_card_create(index[idx], id[idx], THIS_MODULE, sizeof(struct snd_msnd), &card); if (err < 0) return err; snd_card_set_dev(card, pdev); chip = card->private_data; chip->card = card; #ifdef MSND_CLASSIC switch (irq[idx]) { case 5: chip->irqid = HPIRQ_5; break; case 7: chip->irqid = HPIRQ_7; break; case 9: chip->irqid = HPIRQ_9; break; case 10: chip->irqid = HPIRQ_10; break; case 11: chip->irqid = HPIRQ_11; break; case 12: chip->irqid = HPIRQ_12; break; } switch (mem[idx]) { case 0xb0000: chip->memid = HPMEM_B000; break; case 0xc8000: chip->memid = HPMEM_C800; break; case 0xd0000: chip->memid = HPMEM_D000; break; case 0xd8000: chip->memid = HPMEM_D800; break; case 0xe0000: chip->memid = HPMEM_E000; break; case 0xe8000: chip->memid = HPMEM_E800; break; } #else printk(KERN_INFO LOGNAME ": Non-PnP mode: configuring at port 0x%lx\n", cfg[idx]); if (!request_region(cfg[idx], 2, "Pinnacle/Fiji Config")) { printk(KERN_ERR LOGNAME ": Config port 0x%lx conflict\n", cfg[idx]); snd_card_free(card); return -EIO; } if (reset[idx]) if (snd_msnd_pinnacle_cfg_reset(cfg[idx])) { err = -EIO; goto cfg_error; } /* DSP */ err = snd_msnd_write_cfg_logical(cfg[idx], 0, io[idx], 0, irq[idx], mem[idx]); if (err) goto cfg_error; /* The following are Pinnacle specific */ /* MPU */ if (mpu_io[idx] != SNDRV_AUTO_PORT && mpu_irq[idx] != SNDRV_AUTO_IRQ) { printk(KERN_INFO LOGNAME ": Configuring MPU to I/O 0x%lx IRQ %d\n", mpu_io[idx], mpu_irq[idx]); err = snd_msnd_write_cfg_logical(cfg[idx], 1, mpu_io[idx], 0, mpu_irq[idx], 0); if (err) goto cfg_error; } /* IDE */ if (ide_io0[idx] != SNDRV_AUTO_PORT && ide_io1[idx] != SNDRV_AUTO_PORT && ide_irq[idx] != SNDRV_AUTO_IRQ) { printk(KERN_INFO LOGNAME ": Configuring IDE to I/O 0x%lx, 0x%lx IRQ %d\n", ide_io0[idx], ide_io1[idx], ide_irq[idx]); err = snd_msnd_write_cfg_logical(cfg[idx], 2, ide_io0[idx], ide_io1[idx], ide_irq[idx], 0); if (err) goto cfg_error; } /* Joystick */ if (joystick_io[idx] != SNDRV_AUTO_PORT) { printk(KERN_INFO LOGNAME ": Configuring joystick to I/O 0x%lx\n", joystick_io[idx]); err = snd_msnd_write_cfg_logical(cfg[idx], 3, joystick_io[idx], 0, 0, 0); if (err) goto cfg_error; } release_region(cfg[idx], 2); #endif /* MSND_CLASSIC */ set_default_audio_parameters(chip); #ifdef MSND_CLASSIC chip->type = msndClassic; #else chip->type = msndPinnacle; #endif chip->io = io[idx]; chip->irq = irq[idx]; chip->base = mem[idx]; chip->calibrate_signal = calibrate_signal ? 1 : 0; chip->recsrc = 0; chip->dspq_data_buff = DSPQ_DATA_BUFF; chip->dspq_buff_size = DSPQ_BUFF_SIZE; if (write_ndelay[idx]) clear_bit(F_DISABLE_WRITE_NDELAY, &chip->flags); else set_bit(F_DISABLE_WRITE_NDELAY, &chip->flags); #ifndef MSND_CLASSIC if (digital[idx]) set_bit(F_HAVEDIGITAL, &chip->flags); #endif spin_lock_init(&chip->lock); err = snd_msnd_probe(card); if (err < 0) { printk(KERN_ERR LOGNAME ": Probe failed\n"); snd_card_free(card); return err; } err = snd_msnd_attach(card); if (err < 0) { printk(KERN_ERR LOGNAME ": Attach failed\n"); snd_card_free(card); return err; } dev_set_drvdata(pdev, card); return 0; #ifndef MSND_CLASSIC cfg_error: release_region(cfg[idx], 2); snd_card_free(card); return err; #endif } static int __devexit snd_msnd_isa_remove(struct device *pdev, unsigned int dev) { snd_msnd_unload(dev_get_drvdata(pdev)); dev_set_drvdata(pdev, NULL); return 0; } #define DEV_NAME "msnd-pinnacle" static struct isa_driver snd_msnd_driver = { .match = snd_msnd_isa_match, .probe = snd_msnd_isa_probe, .remove = __devexit_p(snd_msnd_isa_remove), /* FIXME: suspend, resume */ .driver = { .name = DEV_NAME }, }; #ifdef CONFIG_PNP static int __devinit snd_msnd_pnp_detect(struct pnp_card_link *pcard, const struct pnp_card_device_id *pid) { static int idx; struct pnp_dev *pnp_dev; struct pnp_dev *mpu_dev; struct snd_card *card; struct snd_msnd *chip; int ret; for ( ; idx < SNDRV_CARDS; idx++) { if (has_isapnp(idx)) break; } if (idx >= SNDRV_CARDS) return -ENODEV; /* * Check that we still have room for another sound card ... */ pnp_dev = pnp_request_card_device(pcard, pid->devs[0].id, NULL); if (!pnp_dev) return -ENODEV; mpu_dev = pnp_request_card_device(pcard, pid->devs[1].id, NULL); if (!mpu_dev) return -ENODEV; if (!pnp_is_active(pnp_dev) && pnp_activate_dev(pnp_dev) < 0) { printk(KERN_INFO "msnd_pinnacle: device is inactive\n"); return -EBUSY; } if (!pnp_is_active(mpu_dev) && pnp_activate_dev(mpu_dev) < 0) { printk(KERN_INFO "msnd_pinnacle: MPU device is inactive\n"); return -EBUSY; } /* * Create a new ALSA sound card entry, in anticipation * of detecting our hardware ... */ ret = snd_card_create(index[idx], id[idx], THIS_MODULE, sizeof(struct snd_msnd), &card); if (ret < 0) return ret; chip = card->private_data; chip->card = card; snd_card_set_dev(card, &pcard->card->dev); /* * Read the correct parameters off the ISA PnP bus ... */ io[idx] = pnp_port_start(pnp_dev, 0); irq[idx] = pnp_irq(pnp_dev, 0); mem[idx] = pnp_mem_start(pnp_dev, 0); mpu_io[idx] = pnp_port_start(mpu_dev, 0); mpu_irq[idx] = pnp_irq(mpu_dev, 0); set_default_audio_parameters(chip); #ifdef MSND_CLASSIC chip->type = msndClassic; #else chip->type = msndPinnacle; #endif chip->io = io[idx]; chip->irq = irq[idx]; chip->base = mem[idx]; chip->calibrate_signal = calibrate_signal ? 1 : 0; chip->recsrc = 0; chip->dspq_data_buff = DSPQ_DATA_BUFF; chip->dspq_buff_size = DSPQ_BUFF_SIZE; if (write_ndelay[idx]) clear_bit(F_DISABLE_WRITE_NDELAY, &chip->flags); else set_bit(F_DISABLE_WRITE_NDELAY, &chip->flags); #ifndef MSND_CLASSIC if (digital[idx]) set_bit(F_HAVEDIGITAL, &chip->flags); #endif spin_lock_init(&chip->lock); ret = snd_msnd_probe(card); if (ret < 0) { printk(KERN_ERR LOGNAME ": Probe failed\n"); goto _release_card; } ret = snd_msnd_attach(card); if (ret < 0) { printk(KERN_ERR LOGNAME ": Attach failed\n"); goto _release_card; } pnp_set_card_drvdata(pcard, card); ++idx; return 0; _release_card: snd_card_free(card); return ret; } static void __devexit snd_msnd_pnp_remove(struct pnp_card_link *pcard) { snd_msnd_unload(pnp_get_card_drvdata(pcard)); pnp_set_card_drvdata(pcard, NULL); } static int isa_registered; static int pnp_registered; static struct pnp_card_device_id msnd_pnpids[] = { /* Pinnacle PnP */ { .id = "BVJ0440", .devs = { { "TBS0000" }, { "TBS0001" } } }, { .id = "" } /* end */ }; MODULE_DEVICE_TABLE(pnp_card, msnd_pnpids); static struct pnp_card_driver msnd_pnpc_driver = { .flags = PNP_DRIVER_RES_DO_NOT_CHANGE, .name = "msnd_pinnacle", .id_table = msnd_pnpids, .probe = snd_msnd_pnp_detect, .remove = __devexit_p(snd_msnd_pnp_remove), }; #endif /* CONFIG_PNP */ static int __init snd_msnd_init(void) { int err; err = isa_register_driver(&snd_msnd_driver, SNDRV_CARDS); #ifdef CONFIG_PNP if (!err) isa_registered = 1; err = pnp_register_card_driver(&msnd_pnpc_driver); if (!err) pnp_registered = 1; if (isa_registered) err = 0; #endif return err; } static void __exit snd_msnd_exit(void) { #ifdef CONFIG_PNP if (pnp_registered) pnp_unregister_card_driver(&msnd_pnpc_driver); if (isa_registered) #endif isa_unregister_driver(&snd_msnd_driver); } module_init(snd_msnd_init); module_exit(snd_msnd_exit);
gpl-2.0
gpkulkarni/linux-arm64
arch/arm/mach-imx/mx31lite-db.c
4362
5126
/* * LogicPD i.MX31 SOM-LV development board support * * Copyright (c) 2009 Daniel Mack <daniel@caiaq.de> * * based on code for other MX31 boards, * * Copyright 2005-2007 Freescale Semiconductor * Copyright (c) 2009 Alberto Panizzo <maramaopercheseimorto@gmail.com> * Copyright (C) 2009 Valentin Longchamp, EPFL Mobots group * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/init.h> #include <linux/gpio.h> #include <linux/leds.h> #include <linux/platform_device.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include "board-mx31lite.h" #include "common.h" #include "devices-imx31.h" #include "hardware.h" #include "iomux-mx3.h" /* * This file contains board-specific initialization routines for the * LogicPD i.MX31 SOM-LV development board, aka 'LiteKit'. * If you design an own baseboard for the module, use this file as base * for support code. */ static unsigned int litekit_db_board_pins[] __initdata = { /* UART1 */ MX31_PIN_CTS1__CTS1, MX31_PIN_RTS1__RTS1, MX31_PIN_TXD1__TXD1, MX31_PIN_RXD1__RXD1, /* SPI 0 */ MX31_PIN_CSPI1_SCLK__SCLK, MX31_PIN_CSPI1_MOSI__MOSI, MX31_PIN_CSPI1_MISO__MISO, MX31_PIN_CSPI1_SPI_RDY__SPI_RDY, MX31_PIN_CSPI1_SS0__SS0, MX31_PIN_CSPI1_SS1__SS1, MX31_PIN_CSPI1_SS2__SS2, /* SDHC1 */ MX31_PIN_SD1_DATA0__SD1_DATA0, MX31_PIN_SD1_DATA1__SD1_DATA1, MX31_PIN_SD1_DATA2__SD1_DATA2, MX31_PIN_SD1_DATA3__SD1_DATA3, MX31_PIN_SD1_CLK__SD1_CLK, MX31_PIN_SD1_CMD__SD1_CMD, }; /* UART */ static const struct imxuart_platform_data uart_pdata __initconst = { .flags = IMXUART_HAVE_RTSCTS, }; /* MMC */ static int gpio_det, gpio_wp; #define MMC_PAD_CFG (PAD_CTL_DRV_MAX | PAD_CTL_SRE_FAST | PAD_CTL_HYS_CMOS | \ PAD_CTL_ODE_CMOS) static int mxc_mmc1_get_ro(struct device *dev) { return gpio_get_value(IOMUX_TO_GPIO(MX31_PIN_GPIO1_6)); } static int mxc_mmc1_init(struct device *dev, irq_handler_t detect_irq, void *data) { int ret; gpio_det = IOMUX_TO_GPIO(MX31_PIN_DCD_DCE1); gpio_wp = IOMUX_TO_GPIO(MX31_PIN_GPIO1_6); mxc_iomux_set_pad(MX31_PIN_SD1_DATA0, MMC_PAD_CFG | PAD_CTL_PUE_PUD | PAD_CTL_100K_PU); mxc_iomux_set_pad(MX31_PIN_SD1_DATA1, MMC_PAD_CFG | PAD_CTL_PUE_PUD | PAD_CTL_100K_PU); mxc_iomux_set_pad(MX31_PIN_SD1_DATA2, MMC_PAD_CFG | PAD_CTL_PUE_PUD | PAD_CTL_100K_PU); mxc_iomux_set_pad(MX31_PIN_SD1_DATA3, MMC_PAD_CFG | PAD_CTL_PUE_PUD | PAD_CTL_100K_PU); mxc_iomux_set_pad(MX31_PIN_SD1_CMD, MMC_PAD_CFG | PAD_CTL_PUE_PUD | PAD_CTL_100K_PU); mxc_iomux_set_pad(MX31_PIN_SD1_CLK, MMC_PAD_CFG); ret = gpio_request(gpio_det, "MMC detect"); if (ret) return ret; ret = gpio_request(gpio_wp, "MMC w/p"); if (ret) goto exit_free_det; gpio_direction_input(gpio_det); gpio_direction_input(gpio_wp); ret = request_irq(gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_DCD_DCE1)), detect_irq, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, "MMC detect", data); if (ret) goto exit_free_wp; return 0; exit_free_wp: gpio_free(gpio_wp); exit_free_det: gpio_free(gpio_det); return ret; } static void mxc_mmc1_exit(struct device *dev, void *data) { gpio_free(gpio_det); gpio_free(gpio_wp); free_irq(gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_DCD_DCE1)), data); } static const struct imxmmc_platform_data mmc_pdata __initconst = { .get_ro = mxc_mmc1_get_ro, .init = mxc_mmc1_init, .exit = mxc_mmc1_exit, }; /* SPI */ static int spi_internal_chipselect[] = { MXC_SPI_CS(0), MXC_SPI_CS(1), MXC_SPI_CS(2), }; static const struct spi_imx_master spi0_pdata __initconst = { .chipselect = spi_internal_chipselect, .num_chipselect = ARRAY_SIZE(spi_internal_chipselect), }; /* GPIO LEDs */ static const struct gpio_led litekit_leds[] __initconst = { { .name = "GPIO0", .gpio = IOMUX_TO_GPIO(MX31_PIN_COMPARE), .active_low = 1, .default_state = LEDS_GPIO_DEFSTATE_OFF, }, { .name = "GPIO1", .gpio = IOMUX_TO_GPIO(MX31_PIN_CAPTURE), .active_low = 1, .default_state = LEDS_GPIO_DEFSTATE_OFF, } }; static const struct gpio_led_platform_data litekit_led_platform_data __initconst = { .leds = litekit_leds, .num_leds = ARRAY_SIZE(litekit_leds), }; void __init mx31lite_db_init(void) { mxc_iomux_setup_multiple_pins(litekit_db_board_pins, ARRAY_SIZE(litekit_db_board_pins), "development board pins"); imx31_add_imx_uart0(&uart_pdata); imx31_add_mxc_mmc(0, &mmc_pdata); imx31_add_spi_imx0(&spi0_pdata); gpio_led_register_device(-1, &litekit_led_platform_data); imx31_add_imx2_wdt(); imx31_add_mxc_rtc(); }
gpl-2.0
cile381/android_kernel_m7
arch/powerpc/oprofile/op_model_rs64.c
6922
4504
/* * Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/oprofile.h> #include <linux/init.h> #include <linux/smp.h> #include <asm/ptrace.h> #include <asm/processor.h> #include <asm/cputable.h> #include <asm/oprofile_impl.h> #define dbg(args...) static void ctrl_write(unsigned int i, unsigned int val) { unsigned int tmp = 0; unsigned long shift = 0, mask = 0; dbg("ctrl_write %d %x\n", i, val); switch(i) { case 0: tmp = mfspr(SPRN_MMCR0); shift = 6; mask = 0x7F; break; case 1: tmp = mfspr(SPRN_MMCR0); shift = 0; mask = 0x3F; break; case 2: tmp = mfspr(SPRN_MMCR1); shift = 31 - 4; mask = 0x1F; break; case 3: tmp = mfspr(SPRN_MMCR1); shift = 31 - 9; mask = 0x1F; break; case 4: tmp = mfspr(SPRN_MMCR1); shift = 31 - 14; mask = 0x1F; break; case 5: tmp = mfspr(SPRN_MMCR1); shift = 31 - 19; mask = 0x1F; break; case 6: tmp = mfspr(SPRN_MMCR1); shift = 31 - 24; mask = 0x1F; break; case 7: tmp = mfspr(SPRN_MMCR1); shift = 31 - 28; mask = 0xF; break; } tmp = tmp & ~(mask << shift); tmp |= val << shift; switch(i) { case 0: case 1: mtspr(SPRN_MMCR0, tmp); break; default: mtspr(SPRN_MMCR1, tmp); } dbg("ctrl_write mmcr0 %lx mmcr1 %lx\n", mfspr(SPRN_MMCR0), mfspr(SPRN_MMCR1)); } static unsigned long reset_value[OP_MAX_COUNTER]; static int num_counters; static int rs64_reg_setup(struct op_counter_config *ctr, struct op_system_config *sys, int num_ctrs) { int i; num_counters = num_ctrs; for (i = 0; i < num_counters; ++i) reset_value[i] = 0x80000000UL - ctr[i].count; /* XXX setup user and kernel profiling */ return 0; } static int rs64_cpu_setup(struct op_counter_config *ctr) { unsigned int mmcr0; /* reset MMCR0 and set the freeze bit */ mmcr0 = MMCR0_FC; mtspr(SPRN_MMCR0, mmcr0); /* reset MMCR1, MMCRA */ mtspr(SPRN_MMCR1, 0); if (cpu_has_feature(CPU_FTR_MMCRA)) mtspr(SPRN_MMCRA, 0); mmcr0 |= MMCR0_FCM1|MMCR0_PMXE|MMCR0_FCECE; /* Only applies to POWER3, but should be safe on RS64 */ mmcr0 |= MMCR0_PMC1CE|MMCR0_PMCjCE; mtspr(SPRN_MMCR0, mmcr0); dbg("setup on cpu %d, mmcr0 %lx\n", smp_processor_id(), mfspr(SPRN_MMCR0)); dbg("setup on cpu %d, mmcr1 %lx\n", smp_processor_id(), mfspr(SPRN_MMCR1)); return 0; } static int rs64_start(struct op_counter_config *ctr) { int i; unsigned int mmcr0; /* set the PMM bit (see comment below) */ mtmsrd(mfmsr() | MSR_PMM); for (i = 0; i < num_counters; ++i) { if (ctr[i].enabled) { classic_ctr_write(i, reset_value[i]); ctrl_write(i, ctr[i].event); } else { classic_ctr_write(i, 0); } } mmcr0 = mfspr(SPRN_MMCR0); /* * now clear the freeze bit, counting will not start until we * rfid from this excetion, because only at that point will * the PMM bit be cleared */ mmcr0 &= ~MMCR0_FC; mtspr(SPRN_MMCR0, mmcr0); dbg("start on cpu %d, mmcr0 %x\n", smp_processor_id(), mmcr0); return 0; } static void rs64_stop(void) { unsigned int mmcr0; /* freeze counters */ mmcr0 = mfspr(SPRN_MMCR0); mmcr0 |= MMCR0_FC; mtspr(SPRN_MMCR0, mmcr0); dbg("stop on cpu %d, mmcr0 %x\n", smp_processor_id(), mmcr0); mb(); } static void rs64_handle_interrupt(struct pt_regs *regs, struct op_counter_config *ctr) { unsigned int mmcr0; int is_kernel; int val; int i; unsigned long pc = mfspr(SPRN_SIAR); is_kernel = is_kernel_addr(pc); /* set the PMM bit (see comment below) */ mtmsrd(mfmsr() | MSR_PMM); for (i = 0; i < num_counters; ++i) { val = classic_ctr_read(i); if (val < 0) { if (ctr[i].enabled) { oprofile_add_ext_sample(pc, regs, i, is_kernel); classic_ctr_write(i, reset_value[i]); } else { classic_ctr_write(i, 0); } } } mmcr0 = mfspr(SPRN_MMCR0); /* reset the perfmon trigger */ mmcr0 |= MMCR0_PMXE; /* * now clear the freeze bit, counting will not start until we * rfid from this exception, because only at that point will * the PMM bit be cleared */ mmcr0 &= ~MMCR0_FC; mtspr(SPRN_MMCR0, mmcr0); } struct op_powerpc_model op_model_rs64 = { .reg_setup = rs64_reg_setup, .cpu_setup = rs64_cpu_setup, .start = rs64_start, .stop = rs64_stop, .handle_interrupt = rs64_handle_interrupt, };
gpl-2.0
mitwo-dev/android_kernel_xiaomi_msm8960
arch/mips/pmc-sierra/yosemite/ht.c
8458
12179
/* * Copyright 2003 PMC-Sierra * Author: Manish Lachwani (lachwani@pmc-sierra.com) * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/types.h> #include <linux/pci.h> #include <linux/kernel.h> #include <asm/pci.h> #include <asm/io.h> #include <linux/init.h> #include <asm/titan_dep.h> #ifdef CONFIG_HYPERTRANSPORT /* * This function check if the Hypertransport Link Initialization completed. If * it did, then proceed further with scanning bus #2 */ static __inline__ int check_titan_htlink(void) { u32 val; val = *(volatile uint32_t *)(RM9000x2_HTLINK_REG); if (val & 0x00000020) /* HT Link Initialization completed */ return 1; else return 0; } static int titan_ht_config_read_dword(struct pci_dev *device, int offset, u32* val) { int dev, bus, func; uint32_t address_reg, data_reg; uint32_t address; bus = device->bus->number; dev = PCI_SLOT(device->devfn); func = PCI_FUNC(device->devfn); /* XXX Need to change the Bus # */ if (bus > 2) address = (bus << 16) | (dev << 11) | (func << 8) | (offset & 0xfc) | 0x80000000 | 0x1; else address = (dev << 11) | (func << 8) | (offset & 0xfc) | 0x80000000; address_reg = RM9000x2_OCD_HTCFGA; data_reg = RM9000x2_OCD_HTCFGD; RM9K_WRITE(address_reg, address); RM9K_READ(data_reg, val); return PCIBIOS_SUCCESSFUL; } static int titan_ht_config_read_word(struct pci_dev *device, int offset, u16* val) { int dev, bus, func; uint32_t address_reg, data_reg; uint32_t address; bus = device->bus->number; dev = PCI_SLOT(device->devfn); func = PCI_FUNC(device->devfn); /* XXX Need to change the Bus # */ if (bus > 2) address = (bus << 16) | (dev << 11) | (func << 8) | (offset & 0xfc) | 0x80000000 | 0x1; else address = (dev << 11) | (func << 8) | (offset & 0xfc) | 0x80000000; address_reg = RM9000x2_OCD_HTCFGA; data_reg = RM9000x2_OCD_HTCFGD; if ((offset & 0x3) == 0) offset = 0x2; else offset = 0x0; RM9K_WRITE(address_reg, address); RM9K_READ_16(data_reg + offset, val); return PCIBIOS_SUCCESSFUL; } u32 longswap(unsigned long l) { unsigned char b1, b2, b3, b4; b1 = l&255; b2 = (l>>8)&255; b3 = (l>>16)&255; b4 = (l>>24)&255; return ((b1<<24) + (b2<<16) + (b3<<8) + b4); } static int titan_ht_config_read_byte(struct pci_dev *device, int offset, u8* val) { int dev, bus, func; uint32_t address_reg, data_reg; uint32_t address; int offset1; bus = device->bus->number; dev = PCI_SLOT(device->devfn); func = PCI_FUNC(device->devfn); /* XXX Need to change the Bus # */ if (bus > 2) address = (bus << 16) | (dev << 11) | (func << 8) | (offset & 0xfc) | 0x80000000 | 0x1; else address = (dev << 11) | (func << 8) | (offset & 0xfc) | 0x80000000; address_reg = RM9000x2_OCD_HTCFGA; data_reg = RM9000x2_OCD_HTCFGD; RM9K_WRITE(address_reg, address); if ((offset & 0x3) == 0) { offset1 = 0x3; } if ((offset & 0x3) == 1) { offset1 = 0x2; } if ((offset & 0x3) == 2) { offset1 = 0x1; } if ((offset & 0x3) == 3) { offset1 = 0x0; } RM9K_READ_8(data_reg + offset1, val); return PCIBIOS_SUCCESSFUL; } static int titan_ht_config_write_dword(struct pci_dev *device, int offset, u8 val) { int dev, bus, func; uint32_t address_reg, data_reg; uint32_t address; bus = device->bus->number; dev = PCI_SLOT(device->devfn); func = PCI_FUNC(device->devfn); /* XXX Need to change the Bus # */ if (bus > 2) address = (bus << 16) | (dev << 11) | (func << 8) | (offset & 0xfc) | 0x80000000 | 0x1; else address = (dev << 11) | (func << 8) | (offset & 0xfc) | 0x80000000; address_reg = RM9000x2_OCD_HTCFGA; data_reg = RM9000x2_OCD_HTCFGD; RM9K_WRITE(address_reg, address); RM9K_WRITE(data_reg, val); return PCIBIOS_SUCCESSFUL; } static int titan_ht_config_write_word(struct pci_dev *device, int offset, u8 val) { int dev, bus, func; uint32_t address_reg, data_reg; uint32_t address; bus = device->bus->number; dev = PCI_SLOT(device->devfn); func = PCI_FUNC(device->devfn); /* XXX Need to change the Bus # */ if (bus > 2) address = (bus << 16) | (dev << 11) | (func << 8) | (offset & 0xfc) | 0x80000000 | 0x1; else address = (dev << 11) | (func << 8) | (offset & 0xfc) | 0x80000000; address_reg = RM9000x2_OCD_HTCFGA; data_reg = RM9000x2_OCD_HTCFGD; if ((offset & 0x3) == 0) offset = 0x2; else offset = 0x0; RM9K_WRITE(address_reg, address); RM9K_WRITE_16(data_reg + offset, val); return PCIBIOS_SUCCESSFUL; } static int titan_ht_config_write_byte(struct pci_dev *device, int offset, u8 val) { int dev, bus, func; uint32_t address_reg, data_reg; uint32_t address; int offset1; bus = device->bus->number; dev = PCI_SLOT(device->devfn); func = PCI_FUNC(device->devfn); /* XXX Need to change the Bus # */ if (bus > 2) address = (bus << 16) | (dev << 11) | (func << 8) | (offset & 0xfc) | 0x80000000 | 0x1; else address = (dev << 11) | (func << 8) | (offset & 0xfc) | 0x80000000; address_reg = RM9000x2_OCD_HTCFGA; data_reg = RM9000x2_OCD_HTCFGD; RM9K_WRITE(address_reg, address); if ((offset & 0x3) == 0) { offset1 = 0x3; } if ((offset & 0x3) == 1) { offset1 = 0x2; } if ((offset & 0x3) == 2) { offset1 = 0x1; } if ((offset & 0x3) == 3) { offset1 = 0x0; } RM9K_WRITE_8(data_reg + offset1, val); return PCIBIOS_SUCCESSFUL; } static void titan_pcibios_set_master(struct pci_dev *dev) { u16 cmd; int bus = dev->bus->number; if (check_titan_htlink()) titan_ht_config_read_word(dev, PCI_COMMAND, &cmd); cmd |= PCI_COMMAND_MASTER; if (check_titan_htlink()) titan_ht_config_write_word(dev, PCI_COMMAND, cmd); } int pcibios_enable_resources(struct pci_dev *dev) { u16 cmd, old_cmd; u8 tmp1; int idx; struct resource *r; int bus = dev->bus->number; if (check_titan_htlink()) titan_ht_config_read_word(dev, PCI_COMMAND, &cmd); old_cmd = cmd; for (idx = 0; idx < 6; idx++) { r = &dev->resource[idx]; if (!r->start && r->end) { printk(KERN_ERR "PCI: Device %s not available because of " "resource collisions\n", pci_name(dev)); return -EINVAL; } if (r->flags & IORESOURCE_IO) cmd |= PCI_COMMAND_IO; if (r->flags & IORESOURCE_MEM) cmd |= PCI_COMMAND_MEMORY; } if (cmd != old_cmd) { if (check_titan_htlink()) titan_ht_config_write_word(dev, PCI_COMMAND, cmd); } if (check_titan_htlink()) titan_ht_config_read_byte(dev, PCI_CACHE_LINE_SIZE, &tmp1); if (tmp1 != 8) { printk(KERN_WARNING "PCI setting cache line size to 8 from " "%d\n", tmp1); } if (check_titan_htlink()) titan_ht_config_write_byte(dev, PCI_CACHE_LINE_SIZE, 8); if (check_titan_htlink()) titan_ht_config_read_byte(dev, PCI_LATENCY_TIMER, &tmp1); if (tmp1 < 32 || tmp1 == 0xff) { printk(KERN_WARNING "PCI setting latency timer to 32 from %d\n", tmp1); } if (check_titan_htlink()) titan_ht_config_write_byte(dev, PCI_LATENCY_TIMER, 32); return 0; } int pcibios_enable_device(struct pci_dev *dev, int mask) { return pcibios_enable_resources(dev); } resource_size_t pcibios_align_resource(void *data, const struct resource *res, resource_size_t size, resource_size_t align) { struct pci_dev *dev = data; resource_size_t start = res->start; if (res->flags & IORESOURCE_IO) { /* We need to avoid collisions with `mirrored' VGA ports and other strange ISA hardware, so we always want the addresses kilobyte aligned. */ if (size > 0x100) { printk(KERN_ERR "PCI: I/O Region %s/%d too large" " (%ld bytes)\n", pci_name(dev), dev->resource - res, size); } start = (start + 1024 - 1) & ~(1024 - 1); } return start; } struct pci_ops titan_pci_ops = { titan_ht_config_read_byte, titan_ht_config_read_word, titan_ht_config_read_dword, titan_ht_config_write_byte, titan_ht_config_write_word, titan_ht_config_write_dword }; void __init pcibios_fixup_bus(struct pci_bus *c) { titan_ht_pcibios_fixup_bus(c); } void __init pcibios_init(void) { /* Reset PCI I/O and PCI MEM values */ /* XXX Need to add the proper values here */ ioport_resource.start = 0xe0000000; ioport_resource.end = 0xe0000000 + 0x20000000 - 1; iomem_resource.start = 0xc0000000; iomem_resource.end = 0xc0000000 + 0x20000000 - 1; /* XXX Need to add bus values */ pci_scan_bus(2, &titan_pci_ops, NULL); pci_scan_bus(3, &titan_pci_ops, NULL); } /* * for parsing "pci=" kernel boot arguments. */ char *pcibios_setup(char *str) { printk(KERN_INFO "rr: pcibios_setup\n"); /* Nothing to do for now. */ return str; } unsigned __init int pcibios_assign_all_busses(void) { /* We want to use the PCI bus detection done by PMON */ return 0; } #endif /* CONFIG_HYPERTRANSPORT */
gpl-2.0
muftiarfan/Sony_xperia_m_ktt
sound/soc/msm/qdsp6/q6asm.c
11
107359
/* * Copyright (c) 2010-2012, The Linux Foundation. All rights reserved. * Copyright (c) 2013 Foxconn International Holdings, Ltd. All rights reserved. * * Author: Brian Swetland <swetland@google.com> * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/fs.h> #include <linux/mutex.h> #include <linux/wait.h> #include <linux/miscdevice.h> #include <linux/uaccess.h> #include <linux/sched.h> #include <linux/dma-mapping.h> #include <linux/miscdevice.h> #include <linux/delay.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/msm_audio.h> #include <linux/android_pmem.h> #include <linux/memory_alloc.h> #include <linux/debugfs.h> #include <linux/time.h> #include <linux/atomic.h> #include <asm/ioctls.h> #include <mach/memory.h> #include <mach/debug_mm.h> #include <mach/peripheral-loader.h> #include <mach/qdsp6v2/audio_acdb.h> #include <mach/qdsp6v2/rtac.h> #include <sound/apr_audio.h> #include <sound/q6asm.h> #define TRUE 0x01 #define FALSE 0x00 #define READDONE_IDX_STATUS 0 #define READDONE_IDX_BUFFER 1 #define READDONE_IDX_SIZE 2 #define READDONE_IDX_OFFSET 3 #define READDONE_IDX_MSW_TS 4 #define READDONE_IDX_LSW_TS 5 #define READDONE_IDX_FLAGS 6 #define READDONE_IDX_NUMFRAMES 7 #define READDONE_IDX_ID 8 #ifdef CONFIG_DEBUG_FS #define OUT_BUFFER_SIZE 56 #define IN_BUFFER_SIZE 24 #endif static DEFINE_MUTEX(session_lock); /* session id: 0 reserved */ static struct audio_client *session[SESSION_MAX+1]; static int32_t q6asm_mmapcallback(struct apr_client_data *data, void *priv); static int32_t q6asm_callback(struct apr_client_data *data, void *priv); static void q6asm_add_hdr(struct audio_client *ac, struct apr_hdr *hdr, uint32_t pkt_size, uint32_t cmd_flg); static void q6asm_add_hdr_async(struct audio_client *ac, struct apr_hdr *hdr, uint32_t pkt_size, uint32_t cmd_flg); static int q6asm_memory_map_regions(struct audio_client *ac, int dir, uint32_t bufsz, uint32_t bufcnt); static int q6asm_memory_unmap_regions(struct audio_client *ac, int dir, uint32_t bufsz, uint32_t bufcnt); static void q6asm_reset_buf_state(struct audio_client *ac); #ifdef CONFIG_DEBUG_FS static struct timeval out_cold_tv; static struct timeval out_warm_tv; static struct timeval out_cont_tv; static struct timeval in_cont_tv; static long out_enable_flag; static long in_enable_flag; static struct dentry *out_dentry; static struct dentry *in_dentry; static int in_cont_index; /*This var is used to keep track of first write done for cold output latency */ static int out_cold_index; static char *out_buffer; static char *in_buffer; static int audio_output_latency_dbgfs_open(struct inode *inode, struct file *file) { file->private_data = inode->i_private; return 0; } static ssize_t audio_output_latency_dbgfs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { snprintf(out_buffer, OUT_BUFFER_SIZE, "%ld,%ld,%ld,%ld,%ld,%ld,",\ out_cold_tv.tv_sec, out_cold_tv.tv_usec, out_warm_tv.tv_sec,\ out_warm_tv.tv_usec, out_cont_tv.tv_sec, out_cont_tv.tv_usec); return simple_read_from_buffer(buf, OUT_BUFFER_SIZE, ppos, out_buffer, OUT_BUFFER_SIZE); } static ssize_t audio_output_latency_dbgfs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { char *temp; if (count > 2*sizeof(char)) return -EINVAL; else temp = kmalloc(2*sizeof(char), GFP_KERNEL); out_cold_index = 0; if (temp) { if (copy_from_user(temp, buf, 2*sizeof(char))) { kfree(temp); return -EFAULT; } if (!strict_strtol(temp, 10, &out_enable_flag)) { kfree(temp); return count; } kfree(temp); } return -EINVAL; } static const struct file_operations audio_output_latency_debug_fops = { .open = audio_output_latency_dbgfs_open, .read = audio_output_latency_dbgfs_read, .write = audio_output_latency_dbgfs_write }; static int audio_input_latency_dbgfs_open(struct inode *inode, struct file *file) { file->private_data = inode->i_private; return 0; } static ssize_t audio_input_latency_dbgfs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { snprintf(in_buffer, IN_BUFFER_SIZE, "%ld,%ld,",\ in_cont_tv.tv_sec, in_cont_tv.tv_usec); return simple_read_from_buffer(buf, IN_BUFFER_SIZE, ppos, in_buffer, IN_BUFFER_SIZE); } static ssize_t audio_input_latency_dbgfs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { char *temp; if (count > 2*sizeof(char)) return -EINVAL; else temp = kmalloc(2*sizeof(char), GFP_KERNEL); if (temp) { if (copy_from_user(temp, buf, 2*sizeof(char))) { kfree(temp); return -EFAULT; } if (!strict_strtol(temp, 10, &in_enable_flag)) { kfree(temp); return count; } kfree(temp); } return -EINVAL; } static const struct file_operations audio_input_latency_debug_fops = { .open = audio_input_latency_dbgfs_open, .read = audio_input_latency_dbgfs_read, .write = audio_input_latency_dbgfs_write }; #endif struct asm_mmap { atomic_t ref_cnt; atomic_t cmd_state; wait_queue_head_t cmd_wait; void *apr; }; static struct asm_mmap this_mmap; static int q6asm_session_alloc(struct audio_client *ac) { int n; mutex_lock(&session_lock); for (n = 1; n <= SESSION_MAX; n++) { if (!session[n]) { session[n] = ac; mutex_unlock(&session_lock); return n; } } mutex_unlock(&session_lock); return -ENOMEM; } static void q6asm_session_free(struct audio_client *ac) { pr_debug("%s: sessionid[%d]\n", __func__, ac->session); rtac_remove_popp_from_adm_devices(ac->session); mutex_lock(&session_lock); session[ac->session] = 0; mutex_unlock(&session_lock); ac->session = 0; ac->perf_mode = false; return; } int q6asm_audio_client_buf_free(unsigned int dir, struct audio_client *ac) { struct audio_port_data *port; int cnt = 0; int rc = 0; pr_debug("%s: Session id %d\n", __func__, ac->session); mutex_lock(&ac->cmd_lock); if (ac->io_mode == SYNC_IO_MODE) { port = &ac->port[dir]; if (!port->buf) { mutex_unlock(&ac->cmd_lock); return 0; } cnt = port->max_buf_cnt - 1; if (cnt >= 0) { rc = q6asm_memory_unmap_regions(ac, dir, port->buf[0].size, port->max_buf_cnt); if (rc < 0) pr_err("%s CMD Memory_unmap_regions failed\n", __func__); } while (cnt >= 0) { if (port->buf[cnt].data) { #ifdef CONFIG_MSM_MULTIMEDIA_USE_ION ion_unmap_kernel(port->buf[cnt].client, port->buf[cnt].handle); ion_free(port->buf[cnt].client, port->buf[cnt].handle); ion_client_destroy(port->buf[cnt].client); #else pr_debug("%s:data[%p]phys[%p][%p] cnt[%d] mem_buffer[%p]\n", __func__, (void *)port->buf[cnt].data, (void *)port->buf[cnt].phys, (void *)&port->buf[cnt].phys, cnt, (void *)port->buf[cnt].mem_buffer); if (IS_ERR((void *)port->buf[cnt].mem_buffer)) pr_err("%s:mem buffer invalid, error = %ld\n", __func__, PTR_ERR((void *)port->buf[cnt].mem_buffer)); else { if (iounmap( port->buf[cnt].mem_buffer) < 0) pr_err("%s: unmap buffer failed\n", __func__); } free_contiguous_memory_by_paddr( port->buf[cnt].phys); #endif port->buf[cnt].data = NULL; port->buf[cnt].phys = 0; --(port->max_buf_cnt); } --cnt; } kfree(port->buf); port->buf = NULL; } mutex_unlock(&ac->cmd_lock); return 0; } int q6asm_audio_client_buf_free_contiguous(unsigned int dir, struct audio_client *ac) { struct audio_port_data *port; int cnt = 0; int rc = 0; pr_debug("%s: Session id %d\n", __func__, ac->session); mutex_lock(&ac->cmd_lock); port = &ac->port[dir]; if (!port->buf) { mutex_unlock(&ac->cmd_lock); return 0; } cnt = port->max_buf_cnt - 1; if (cnt >= 0) { rc = q6asm_memory_unmap(ac, port->buf[0].phys, dir); if (rc < 0) pr_err("%s CMD Memory_unmap_regions failed\n", __func__); } if (port->buf[0].data) { #ifdef CONFIG_MSM_MULTIMEDIA_USE_ION ion_unmap_kernel(port->buf[0].client, port->buf[0].handle); ion_free(port->buf[0].client, port->buf[0].handle); ion_client_destroy(port->buf[0].client); pr_debug("%s:data[%p]phys[%p][%p], client[%p] handle[%p]\n", __func__, (void *)port->buf[0].data, (void *)port->buf[0].phys, (void *)&port->buf[0].phys, (void *)port->buf[0].client, (void *)port->buf[0].handle); #else pr_debug("%s:data[%p]phys[%p][%p] mem_buffer[%p]\n", __func__, (void *)port->buf[0].data, (void *)port->buf[0].phys, (void *)&port->buf[0].phys, (void *)port->buf[0].mem_buffer); if (IS_ERR((void *)port->buf[0].mem_buffer)) pr_err("%s:mem buffer invalid, error = %ld\n", __func__, PTR_ERR((void *)port->buf[0].mem_buffer)); else { if (iounmap( port->buf[0].mem_buffer) < 0) pr_err("%s: unmap buffer failed\n", __func__); } free_contiguous_memory_by_paddr(port->buf[0].phys); #endif } while (cnt >= 0) { port->buf[cnt].data = NULL; port->buf[cnt].phys = 0; cnt--; } port->max_buf_cnt = 0; kfree(port->buf); port->buf = NULL; mutex_unlock(&ac->cmd_lock); return 0; } void q6asm_audio_client_free(struct audio_client *ac) { int loopcnt; struct audio_port_data *port; if (!ac || !ac->session) return; pr_debug("%s: Session id %d\n", __func__, ac->session); if (ac->io_mode == SYNC_IO_MODE) { for (loopcnt = 0; loopcnt <= OUT; loopcnt++) { port = &ac->port[loopcnt]; if (!port->buf) continue; pr_debug("%s:loopcnt = %d\n", __func__, loopcnt); q6asm_audio_client_buf_free(loopcnt, ac); } } apr_deregister(ac->apr); q6asm_session_free(ac); pr_debug("%s: APR De-Register\n", __func__); if (atomic_read(&this_mmap.ref_cnt) <= 0) { pr_err("%s: APR Common Port Already Closed\n", __func__); goto done; } atomic_dec(&this_mmap.ref_cnt); if (atomic_read(&this_mmap.ref_cnt) == 0) { apr_deregister(this_mmap.apr); pr_debug("%s:APR De-Register common port\n", __func__); } done: kfree(ac); return; } int q6asm_set_io_mode(struct audio_client *ac, uint32_t mode) { if (ac == NULL) { pr_err("%s APR handle NULL\n", __func__); return -EINVAL; } if ((mode == ASYNC_IO_MODE) || (mode == SYNC_IO_MODE)) { ac->io_mode = mode; pr_debug("%s:Set Mode to %d\n", __func__, ac->io_mode); return 0; } else { pr_err("%s:Not an valid IO Mode:%d\n", __func__, ac->io_mode); return -EINVAL; } } struct audio_client *q6asm_audio_client_alloc(app_cb cb, void *priv) { struct audio_client *ac; int n; int lcnt = 0; ac = kzalloc(sizeof(struct audio_client), GFP_KERNEL); if (!ac) return NULL; n = q6asm_session_alloc(ac); if (n <= 0) goto fail_session; ac->session = n; ac->cb = cb; ac->priv = priv; ac->io_mode = SYNC_IO_MODE; ac->perf_mode = false; ac->apr = apr_register("ADSP", "ASM", \ (apr_fn)q6asm_callback,\ ((ac->session) << 8 | 0x0001),\ ac); if (ac->apr == NULL) { pr_err("%s Registration with APR failed\n", __func__); goto fail; } rtac_set_asm_handle(n, ac->apr); pr_debug("%s Registering the common port with APR\n", __func__); if (atomic_read(&this_mmap.ref_cnt) == 0) { this_mmap.apr = apr_register("ADSP", "ASM", \ (apr_fn)q6asm_mmapcallback,\ 0x0FFFFFFFF, &this_mmap); if (this_mmap.apr == NULL) { pr_debug("%s Unable to register APR ASM common port\n", __func__); goto fail; } } atomic_inc(&this_mmap.ref_cnt); init_waitqueue_head(&ac->cmd_wait); init_waitqueue_head(&ac->time_wait); atomic_set(&ac->time_flag, 1); mutex_init(&ac->cmd_lock); for (lcnt = 0; lcnt <= OUT; lcnt++) { mutex_init(&ac->port[lcnt].lock); spin_lock_init(&ac->port[lcnt].dsp_lock); } atomic_set(&ac->cmd_state, 0); atomic_set(&ac->cmd_response, 0); pr_debug("%s: session[%d]\n", __func__, ac->session); return ac; fail: q6asm_audio_client_free(ac); return NULL; fail_session: kfree(ac); return NULL; } struct audio_client *q6asm_get_audio_client(int session_id) { if ((session_id <= 0) || (session_id > SESSION_MAX)) { pr_err("%s: invalid session: %d\n", __func__, session_id); goto err; } if (!session[session_id]) { pr_err("%s: session not active: %d\n", __func__, session_id); goto err; } return session[session_id]; err: return NULL; } int q6asm_audio_client_buf_alloc(unsigned int dir, struct audio_client *ac, unsigned int bufsz, unsigned int bufcnt) { int cnt = 0; int rc = 0; struct audio_buffer *buf; #ifdef CONFIG_MSM_MULTIMEDIA_USE_ION int len; #endif if (!(ac) || ((dir != IN) && (dir != OUT))) return -EINVAL; pr_debug("%s: session[%d]bufsz[%d]bufcnt[%d]\n", __func__, ac->session, bufsz, bufcnt); if (ac->session <= 0 || ac->session > 8) goto fail; if (ac->io_mode == SYNC_IO_MODE) { if (ac->port[dir].buf) { pr_debug("%s: buffer already allocated\n", __func__); return 0; } mutex_lock(&ac->cmd_lock); buf = kzalloc(((sizeof(struct audio_buffer))*bufcnt), GFP_KERNEL); if (!buf) { mutex_unlock(&ac->cmd_lock); goto fail; } ac->port[dir].buf = buf; while (cnt < bufcnt) { if (bufsz > 0) { if (!buf[cnt].data) { #ifdef CONFIG_MSM_MULTIMEDIA_USE_ION buf[cnt].client = msm_ion_client_create (UINT_MAX, "audio_client"); if (IS_ERR_OR_NULL((void *) buf[cnt].client)) { pr_err("%s: ION create client for AUDIO failed\n", __func__); mutex_unlock(&ac->cmd_lock); goto fail; } buf[cnt].handle = ion_alloc (buf[cnt].client, bufsz, SZ_4K, (0x1 << ION_AUDIO_HEAP_ID)); if (IS_ERR_OR_NULL((void *) buf[cnt].handle)) { pr_err("%s: ION memory allocation for AUDIO failed\n", __func__); mutex_unlock(&ac->cmd_lock); goto fail; } rc = ion_phys(buf[cnt].client, buf[cnt].handle, (ion_phys_addr_t *) &buf[cnt].phys, (size_t *)&len); if (rc) { pr_err("%s: ION Get Physical for AUDIO failed, rc = %d\n", __func__, rc); mutex_unlock(&ac->cmd_lock); goto fail; } buf[cnt].data = ion_map_kernel (buf[cnt].client, buf[cnt].handle, 0); if (IS_ERR_OR_NULL((void *) buf[cnt].data)) { pr_err("%s: ION memory mapping for AUDIO failed\n", __func__); mutex_unlock(&ac->cmd_lock); goto fail; } memset((void *)buf[cnt].data, 0, bufsz); #else unsigned int flags = 0; buf[cnt].phys = allocate_contiguous_ebi_nomap(bufsz, SZ_4K); if (!buf[cnt].phys) { pr_err("%s:Buf alloc failed size=%d\n", __func__, bufsz); mutex_unlock(&ac->cmd_lock); goto fail; } buf[cnt].mem_buffer = ioremap(buf[cnt].phys, bufsz); if (IS_ERR( (void *)buf[cnt].mem_buffer)) { pr_err("%s:map_buffer failed, error = %ld\n", __func__, PTR_ERR((void *)buf[cnt].mem_buffer)); mutex_unlock(&ac->cmd_lock); goto fail; } buf[cnt].data = buf[cnt].mem_buffer; if (!buf[cnt].data) { pr_err("%s:invalid vaddr, iomap failed\n", __func__); mutex_unlock(&ac->cmd_lock); goto fail; } #endif buf[cnt].used = 1; buf[cnt].size = bufsz; buf[cnt].actual_size = bufsz; pr_debug("%s data[%p]phys[%p][%p]\n", __func__, (void *)buf[cnt].data, (void *)buf[cnt].phys, (void *)&buf[cnt].phys); cnt++; } } } ac->port[dir].max_buf_cnt = cnt; mutex_unlock(&ac->cmd_lock); rc = q6asm_memory_map_regions(ac, dir, bufsz, cnt); if (rc < 0) { pr_err("%s:CMD Memory_map_regions failed\n", __func__); goto fail; } } return 0; fail: q6asm_audio_client_buf_free(dir, ac); return -EINVAL; } int q6asm_audio_client_buf_alloc_contiguous(unsigned int dir, struct audio_client *ac, unsigned int bufsz, unsigned int bufcnt) { int cnt = 0; int rc = 0; struct audio_buffer *buf; #ifdef CONFIG_MSM_MULTIMEDIA_USE_ION int len; #else int flags = 0; #endif if (!(ac) || ((dir != IN) && (dir != OUT))) return -EINVAL; pr_debug("%s: session[%d]bufsz[%d]bufcnt[%d]\n", __func__, ac->session, bufsz, bufcnt); if (ac->session <= 0 || ac->session > 8) goto fail; if (ac->port[dir].buf) { pr_debug("%s: buffer already allocated\n", __func__); return 0; } mutex_lock(&ac->cmd_lock); buf = kzalloc(((sizeof(struct audio_buffer))*bufcnt), GFP_KERNEL); if (!buf) { mutex_unlock(&ac->cmd_lock); goto fail; } ac->port[dir].buf = buf; #ifdef CONFIG_MSM_MULTIMEDIA_USE_ION buf[0].client = msm_ion_client_create(UINT_MAX, "audio_client"); if (IS_ERR_OR_NULL((void *)buf[0].client)) { pr_err("%s: ION create client for AUDIO failed\n", __func__); mutex_unlock(&ac->cmd_lock); goto fail; } buf[0].handle = ion_alloc(buf[0].client, bufsz * bufcnt, SZ_4K, (0x1 << ION_AUDIO_HEAP_ID)); if (IS_ERR_OR_NULL((void *) buf[0].handle)) { pr_err("%s: ION memory allocation for AUDIO failed\n", __func__); mutex_unlock(&ac->cmd_lock); goto fail; } rc = ion_phys(buf[0].client, buf[0].handle, (ion_phys_addr_t *)&buf[0].phys, (size_t *)&len); if (rc) { pr_err("%s: ION Get Physical for AUDIO failed, rc = %d\n", __func__, rc); mutex_unlock(&ac->cmd_lock); goto fail; } buf[0].data = ion_map_kernel(buf[0].client, buf[0].handle, 0); if (IS_ERR_OR_NULL((void *) buf[0].data)) { pr_err("%s: ION memory mapping for AUDIO failed\n", __func__); mutex_unlock(&ac->cmd_lock); goto fail; } memset((void *)buf[0].data, 0, (bufsz * bufcnt)); #else buf[0].phys = allocate_contiguous_ebi_nomap(bufsz * bufcnt, SZ_4K); if (!buf[0].phys) { pr_err("%s:Buf alloc failed size=%d, bufcnt=%d\n", __func__, bufsz, bufcnt); mutex_unlock(&ac->cmd_lock); goto fail; } buf[0].mem_buffer = ioremap(buf[0].phys, bufsz * bufcnt); if (IS_ERR((void *)buf[cnt].mem_buffer)) { pr_err("%s:map_buffer failed, error = %ld\n", __func__, PTR_ERR((void *)buf[0].mem_buffer)); mutex_unlock(&ac->cmd_lock); goto fail; } buf[0].data = buf[0].mem_buffer; #endif if (!buf[0].data) { pr_err("%s:invalid vaddr, iomap failed\n", __func__); mutex_unlock(&ac->cmd_lock); goto fail; } buf[0].used = dir ^ 1; buf[0].size = bufsz; buf[0].actual_size = bufsz; cnt = 1; while (cnt < bufcnt) { if (bufsz > 0) { buf[cnt].data = buf[0].data + (cnt * bufsz); buf[cnt].phys = buf[0].phys + (cnt * bufsz); if (!buf[cnt].data) { pr_err("%s Buf alloc failed\n", __func__); mutex_unlock(&ac->cmd_lock); goto fail; } buf[cnt].used = dir ^ 1; buf[cnt].size = bufsz; buf[cnt].actual_size = bufsz; pr_debug("%s data[%p]phys[%p][%p]\n", __func__, (void *)buf[cnt].data, (void *)buf[cnt].phys, (void *)&buf[cnt].phys); } cnt++; } ac->port[dir].max_buf_cnt = cnt; pr_debug("%s ac->port[%d].max_buf_cnt[%d]\n", __func__, dir, ac->port[dir].max_buf_cnt); mutex_unlock(&ac->cmd_lock); rc = q6asm_memory_map(ac, buf[0].phys, dir, bufsz, cnt); if (rc < 0) { pr_err("%s:CMD Memory_map_regions failed\n", __func__); goto fail; } return 0; fail: q6asm_audio_client_buf_free_contiguous(dir, ac); return -EINVAL; } static int32_t q6asm_mmapcallback(struct apr_client_data *data, void *priv) { uint32_t token; uint32_t *payload = data->payload; if (data->opcode == RESET_EVENTS) { pr_debug("%s: Reset event is received: %d %d apr[%p]\n", __func__, data->reset_event, data->reset_proc, this_mmap.apr); apr_reset(this_mmap.apr); this_mmap.apr = NULL; atomic_set(&this_mmap.cmd_state, 0); return 0; } pr_debug("%s:ptr0[0x%x]ptr1[0x%x]opcode[0x%x] token[0x%x]payload_s[%d] src[%d] dest[%d]\n", __func__, payload[0], payload[1], data->opcode, data->token, data->payload_size, data->src_port, data->dest_port); if (data->opcode == APR_BASIC_RSP_RESULT) { token = data->token; switch (payload[0]) { case ASM_SESSION_CMD_MEMORY_MAP: case ASM_SESSION_CMD_MEMORY_UNMAP: case ASM_SESSION_CMD_MEMORY_MAP_REGIONS: case ASM_SESSION_CMD_MEMORY_UNMAP_REGIONS: pr_debug("%s:command[0x%x]success [0x%x]\n", __func__, payload[0], payload[1]); if (atomic_read(&this_mmap.cmd_state)) { atomic_set(&this_mmap.cmd_state, 0); wake_up(&this_mmap.cmd_wait); } break; default: pr_debug("%s:command[0x%x] not expecting rsp\n", __func__, payload[0]); break; } } return 0; } static int32_t q6asm_callback(struct apr_client_data *data, void *priv) { int i = 0; struct audio_client *ac = (struct audio_client *)priv; uint32_t token; unsigned long dsp_flags; uint32_t *payload; uint32_t wakeup_flag = 1; if ((ac == NULL) || (data == NULL)) { pr_err("ac or priv NULL\n"); return -EINVAL; } if (ac->session <= 0 || ac->session > 8) { pr_err("%s:Session ID is invalid, session = %d\n", __func__, ac->session); return -EINVAL; } if (atomic_read(&ac->nowait_cmd_cnt) > 0) { pr_debug("%s: nowait_cmd_cnt %d\n", __func__, atomic_read(&ac->nowait_cmd_cnt)); atomic_dec(&ac->nowait_cmd_cnt); wakeup_flag = 0; } payload = data->payload; if (data->opcode == RESET_EVENTS) { pr_debug("q6asm_callback: Reset event is received: %d %d apr[%p]\n", data->reset_event, data->reset_proc, ac->apr); if (ac->cb) ac->cb(data->opcode, data->token, (uint32_t *)data->payload, ac->priv); apr_reset(ac->apr); return 0; } pr_debug("%s: session[%d]opcode[0x%x] token[0x%x]payload_s[%d] src[%d] dest[%d]\n", __func__, ac->session, data->opcode, data->token, data->payload_size, data->src_port, data->dest_port); if (data->opcode == APR_BASIC_RSP_RESULT) { token = data->token; pr_debug("%s payload[0]:%x", __func__, payload[0]); switch (payload[0]) { case ASM_STREAM_CMD_SET_PP_PARAMS: if (rtac_make_asm_callback(ac->session, payload, data->payload_size)) break; case ASM_SESSION_CMD_PAUSE: case ASM_DATA_CMD_EOS: case ASM_STREAM_CMD_CLOSE: case ASM_STREAM_CMD_FLUSH: case ASM_SESSION_CMD_RUN: case ASM_SESSION_CMD_REGISTER_FOR_TX_OVERFLOW_EVENTS: case ASM_STREAM_CMD_FLUSH_READBUFS: pr_debug("%s:Payload = [0x%x]\n", __func__, payload[0]); if (token != ac->session) { pr_err("%s:Invalid session[%d] rxed expected[%d]", __func__, token, ac->session); return -EINVAL; } case ASM_STREAM_CMD_OPEN_READ: case ASM_STREAM_CMD_OPEN_READ_V2_1: case ASM_STREAM_CMD_OPEN_WRITE: case ASM_STREAM_CMD_OPEN_WRITE_V2_1: case ASM_STREAM_CMD_OPEN_READWRITE: case ASM_DATA_CMD_MEDIA_FORMAT_UPDATE: case ASM_STREAM_CMD_SET_ENCDEC_PARAM: case ASM_STREAM_CMD_OPEN_WRITE_COMPRESSED: case ASM_STREAM_CMD_OPEN_READ_COMPRESSED: if (atomic_read(&ac->cmd_state) && wakeup_flag) { atomic_set(&ac->cmd_state, 0); if (payload[1] == ADSP_EUNSUPPORTED) { pr_debug("paload[1]:%d unsupported", payload[1]); atomic_set(&ac->cmd_response, 1); } else atomic_set(&ac->cmd_response, 0); wake_up(&ac->cmd_wait); } if (ac->cb) ac->cb(data->opcode, data->token, (uint32_t *)data->payload, ac->priv); break; default: pr_debug("%s:command[0x%x] not expecting rsp\n", __func__, payload[0]); break; } return 0; } switch (data->opcode) { case ASM_DATA_EVENT_WRITE_DONE:{ struct audio_port_data *port = &ac->port[IN]; pr_debug("%s: Rxed opcode[0x%x] status[0x%x] token[%d]", __func__, payload[0], payload[1], data->token); if (ac->io_mode == SYNC_IO_MODE) { if (port->buf == NULL) { pr_err("%s: Unexpected Write Done\n", __func__); return -EINVAL; } spin_lock_irqsave(&port->dsp_lock, dsp_flags); if (port->buf[data->token].phys != payload[0]) { pr_err("Buf expected[%p]rxed[%p]\n",\ (void *)port->buf[data->token].phys,\ (void *)payload[0]); spin_unlock_irqrestore(&port->dsp_lock, dsp_flags); return -EINVAL; } token = data->token; port->buf[token].used = 1; spin_unlock_irqrestore(&port->dsp_lock, dsp_flags); #ifdef CONFIG_DEBUG_FS if (out_enable_flag) { /* For first Write done log the time and reset out_cold_index*/ if (out_cold_index != 1) { do_gettimeofday(&out_cold_tv); pr_debug("COLD: apr_send_pkt at %ld sec %ld microsec\n", out_cold_tv.tv_sec, out_cold_tv.tv_usec); out_cold_index = 1; } pr_debug("out_enable_flag %ld",\ out_enable_flag); } #endif for (i = 0; i < port->max_buf_cnt; i++) pr_debug("%d ", port->buf[i].used); } break; } case ASM_STREAM_CMDRSP_GET_PP_PARAMS: rtac_make_asm_callback(ac->session, payload, data->payload_size); break; case ASM_DATA_EVENT_READ_DONE:{ struct audio_port_data *port = &ac->port[OUT]; #ifdef CONFIG_DEBUG_FS if (in_enable_flag) { /* when in_cont_index == 7, DSP would be * writing into the 8th 512 byte buffer and this * timestamp is tapped here.Once done it then writes * to 9th 512 byte buffer.These two buffers(8th, 9th) * reach the test application in 5th iteration and that * timestamp is tapped at user level. The difference * of these two timestamps gives us the time between * the time at which dsp started filling the sample * required and when it reached the test application. * Hence continuous input latency */ if (in_cont_index == 7) { do_gettimeofday(&in_cont_tv); pr_err("In_CONT:previous read buffer done at %ld sec %ld microsec\n", in_cont_tv.tv_sec, in_cont_tv.tv_usec); } } #endif pr_debug("%s:R-D: status=%d buff_add=%x act_size=%d offset=%d\n", __func__, payload[READDONE_IDX_STATUS], payload[READDONE_IDX_BUFFER], payload[READDONE_IDX_SIZE], payload[READDONE_IDX_OFFSET]); pr_debug("%s:R-D:msw_ts=%d lsw_ts=%d flags=%d id=%d num=%d\n", __func__, payload[READDONE_IDX_MSW_TS], payload[READDONE_IDX_LSW_TS], payload[READDONE_IDX_FLAGS], payload[READDONE_IDX_ID], payload[READDONE_IDX_NUMFRAMES]); #ifdef CONFIG_DEBUG_FS if (in_enable_flag) in_cont_index++; #endif if (ac->io_mode == SYNC_IO_MODE) { if (port->buf == NULL) { pr_err("%s: Unexpected Write Done\n", __func__); return -EINVAL; } spin_lock_irqsave(&port->dsp_lock, dsp_flags); token = data->token; port->buf[token].used = 0; if (port->buf[token].phys != payload[READDONE_IDX_BUFFER]) { pr_err("Buf expected[%p]rxed[%p]\n",\ (void *)port->buf[token].phys,\ (void *)payload[READDONE_IDX_BUFFER]); spin_unlock_irqrestore(&port->dsp_lock, dsp_flags); break; } port->buf[token].actual_size = payload[READDONE_IDX_SIZE]; spin_unlock_irqrestore(&port->dsp_lock, dsp_flags); } break; } case ASM_DATA_EVENT_EOS: case ASM_DATA_CMDRSP_EOS: pr_debug("%s:EOS ACK received: rxed opcode[0x%x]\n", __func__, data->opcode); break; case ASM_STREAM_CMDRSP_GET_ENCDEC_PARAM: break; case ASM_SESSION_EVENT_TX_OVERFLOW: pr_err("ASM_SESSION_EVENT_TX_OVERFLOW\n"); break; case ASM_SESSION_CMDRSP_GET_SESSION_TIME: pr_debug("%s: ASM_SESSION_CMDRSP_GET_SESSION_TIME, payload[0] = %d, payload[1] = %d, payload[2] = %d\n", __func__, payload[0], payload[1], payload[2]); ac->time_stamp = (uint64_t)(((uint64_t)payload[1] << 32) | payload[2]); if (atomic_read(&ac->time_flag)) { atomic_set(&ac->time_flag, 0); wake_up(&ac->time_wait); } break; case ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY: case ASM_DATA_EVENT_ENC_SR_CM_NOTIFY: pr_debug("%s: ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY, payload[0] = %d, payload[1] = %d, payload[2] = %d, payload[3] = %d\n", __func__, payload[0], payload[1], payload[2], payload[3]); break; } if (ac->cb) ac->cb(data->opcode, data->token, data->payload, ac->priv); return 0; } void *q6asm_is_cpu_buf_avail(int dir, struct audio_client *ac, uint32_t *size, uint32_t *index) { void *data; unsigned char idx; struct audio_port_data *port; if (!ac || ((dir != IN) && (dir != OUT))) return NULL; if (ac->io_mode == SYNC_IO_MODE) { port = &ac->port[dir]; mutex_lock(&port->lock); idx = port->cpu_buf; if (port->buf == NULL) { pr_debug("%s:Buffer pointer null\n", __func__); mutex_unlock(&port->lock); return NULL; } /* dir 0: used = 0 means buf in use dir 1: used = 1 means buf in use */ if (port->buf[idx].used == dir) { /* To make it more robust, we could loop and get the next avail buf, its risky though */ pr_debug("%s:Next buf idx[0x%x] not available,dir[%d]\n", __func__, idx, dir); mutex_unlock(&port->lock); return NULL; } *size = port->buf[idx].actual_size; *index = port->cpu_buf; data = port->buf[idx].data; pr_debug("%s:session[%d]index[%d] data[%p]size[%d]\n", __func__, ac->session, port->cpu_buf, data, *size); /* By default increase the cpu_buf cnt user accesses this function,increase cpu buf(to avoid another api)*/ port->buf[idx].used = dir; port->cpu_buf = ((port->cpu_buf + 1) & (port->max_buf_cnt - 1)); mutex_unlock(&port->lock); return data; } return NULL; } void *q6asm_is_cpu_buf_avail_nolock(int dir, struct audio_client *ac, uint32_t *size, uint32_t *index) { void *data; unsigned char idx; struct audio_port_data *port; if (!ac || ((dir != IN) && (dir != OUT))) return NULL; port = &ac->port[dir]; idx = port->cpu_buf; if (port->buf == NULL) { pr_debug("%s:Buffer pointer null\n", __func__); return NULL; } /* * dir 0: used = 0 means buf in use * dir 1: used = 1 means buf in use */ if (port->buf[idx].used == dir) { /* * To make it more robust, we could loop and get the * next avail buf, its risky though */ pr_debug("%s:Next buf idx[0x%x] not available, dir[%d]\n", __func__, idx, dir); return NULL; } *size = port->buf[idx].actual_size; *index = port->cpu_buf; data = port->buf[idx].data; pr_debug("%s:session[%d]index[%d] data[%p]size[%d]\n", __func__, ac->session, port->cpu_buf, data, *size); /* * By default increase the cpu_buf cnt * user accesses this function,increase cpu * buf(to avoid another api) */ port->buf[idx].used = dir; port->cpu_buf = ((port->cpu_buf + 1) & (port->max_buf_cnt - 1)); return data; } int q6asm_is_dsp_buf_avail(int dir, struct audio_client *ac) { int ret = -1; struct audio_port_data *port; uint32_t idx; if (!ac || (dir != OUT)) return ret; if (ac->io_mode == SYNC_IO_MODE) { port = &ac->port[dir]; mutex_lock(&port->lock); idx = port->dsp_buf; if (port->buf[idx].used == (dir ^ 1)) { /* To make it more robust, we could loop and get the next avail buf, its risky though */ pr_err("Next buf idx[0x%x] not available, dir[%d]\n", idx, dir); mutex_unlock(&port->lock); return ret; } pr_debug("%s: session[%d]dsp_buf=%d cpu_buf=%d\n", __func__, ac->session, port->dsp_buf, port->cpu_buf); ret = ((port->dsp_buf != port->cpu_buf) ? 0 : -1); mutex_unlock(&port->lock); } return ret; } static void q6asm_add_hdr(struct audio_client *ac, struct apr_hdr *hdr, uint32_t pkt_size, uint32_t cmd_flg) { pr_debug("%s:session=%d pkt size=%d cmd_flg=%d\n", __func__, pkt_size, cmd_flg, ac->session); mutex_lock(&ac->cmd_lock); hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, \ APR_HDR_LEN(sizeof(struct apr_hdr)),\ APR_PKT_VER); hdr->src_svc = ((struct apr_svc *)ac->apr)->id; hdr->src_domain = APR_DOMAIN_APPS; hdr->dest_svc = APR_SVC_ASM; hdr->dest_domain = APR_DOMAIN_ADSP; hdr->src_port = ((ac->session << 8) & 0xFF00) | 0x01; hdr->dest_port = ((ac->session << 8) & 0xFF00) | 0x01; if (cmd_flg) { hdr->token = ac->session; atomic_set(&ac->cmd_state, 1); } hdr->pkt_size = pkt_size; mutex_unlock(&ac->cmd_lock); return; } static void q6asm_add_mmaphdr(struct apr_hdr *hdr, uint32_t pkt_size, uint32_t cmd_flg) { pr_debug("%s:pkt size=%d cmd_flg=%d\n", __func__, pkt_size, cmd_flg); hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, \ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); hdr->src_port = 0; hdr->dest_port = 0; if (cmd_flg) { hdr->token = 0; atomic_set(&this_mmap.cmd_state, 1); } hdr->pkt_size = pkt_size; return; } int q6asm_open_read(struct audio_client *ac, uint32_t format) { int rc = 0x00; struct asm_stream_cmd_open_read open; #ifdef CONFIG_DEBUG_FS in_cont_index = 0; #endif if ((ac == NULL) || (ac->apr == NULL)) { pr_err("%s: APR handle NULL\n", __func__); return -EINVAL; } pr_debug("%s:session[%d]", __func__, ac->session); q6asm_add_hdr(ac, &open.hdr, sizeof(open), TRUE); open.hdr.opcode = ASM_STREAM_CMD_OPEN_READ; /* Stream prio : High, provide meta info with encoded frames */ open.src_endpoint = ASM_END_POINT_DEVICE_MATRIX; open.pre_proc_top = get_asm_topology(); if (open.pre_proc_top == 0) open.pre_proc_top = DEFAULT_POPP_TOPOLOGY; switch (format) { case FORMAT_LINEAR_PCM: open.uMode = STREAM_PRIORITY_HIGH; open.format = LINEAR_PCM; break; case FORMAT_MULTI_CHANNEL_LINEAR_PCM: open.uMode = STREAM_PRIORITY_HIGH; open.format = MULTI_CHANNEL_PCM; break; case FORMAT_MPEG4_AAC: open.uMode = BUFFER_META_ENABLE | STREAM_PRIORITY_HIGH; open.format = MPEG4_AAC; break; case FORMAT_V13K: open.uMode = BUFFER_META_ENABLE | STREAM_PRIORITY_HIGH; open.format = V13K_FS; break; case FORMAT_EVRC: open.uMode = BUFFER_META_ENABLE | STREAM_PRIORITY_HIGH; open.format = EVRC_FS; break; case FORMAT_AMRNB: open.uMode = BUFFER_META_ENABLE | STREAM_PRIORITY_HIGH; open.format = AMRNB_FS; break; case FORMAT_AMRWB: open.uMode = BUFFER_META_ENABLE | STREAM_PRIORITY_HIGH; open.format = AMRWB_FS; break; default: pr_err("Invalid format[%d]\n", format); goto fail_cmd; } rc = apr_send_pkt(ac->apr, (uint32_t *) &open); if (rc < 0) { pr_err("open failed op[0x%x]rc[%d]\n", \ open.hdr.opcode, rc); goto fail_cmd; } rc = wait_event_timeout(ac->cmd_wait, (atomic_read(&ac->cmd_state) == 0), 5*HZ); if (!rc) { pr_err("%s: timeout. waited for OPEN_WRITE rc[%d]\n", __func__, rc); goto fail_cmd; } return 0; fail_cmd: return -EINVAL; } int q6asm_open_read_v2_1(struct audio_client *ac, uint32_t format) { int rc = 0x00; struct asm_stream_cmd_open_read_v2_1 open; #ifdef CONFIG_DEBUG_FS in_cont_index = 0; #endif if ((ac == NULL) || (ac->apr == NULL)) { pr_err("%s: APR handle NULL\n", __func__); return -EINVAL; } pr_debug("%s:session[%d]", __func__, ac->session); q6asm_add_hdr(ac, &open.hdr, sizeof(open), TRUE); open.hdr.opcode = ASM_STREAM_CMD_OPEN_READ_V2_1; open.src_endpoint = ASM_END_POINT_DEVICE_MATRIX; open.pre_proc_top = get_asm_topology(); if (open.pre_proc_top == 0) open.pre_proc_top = DEFAULT_POPP_TOPOLOGY; switch (format) { case FORMAT_LINEAR_PCM: open.uMode = STREAM_PRIORITY_HIGH; open.format = LINEAR_PCM; break; case FORMAT_MULTI_CHANNEL_LINEAR_PCM: open.uMode = STREAM_PRIORITY_HIGH; open.format = MULTI_CHANNEL_PCM; break; case FORMAT_MPEG4_AAC: open.uMode = BUFFER_META_ENABLE | STREAM_PRIORITY_HIGH; open.format = MPEG4_AAC; break; case FORMAT_V13K: open.uMode = BUFFER_META_ENABLE | STREAM_PRIORITY_HIGH; open.format = V13K_FS; break; case FORMAT_EVRC: open.uMode = BUFFER_META_ENABLE | STREAM_PRIORITY_HIGH; open.format = EVRC_FS; break; case FORMAT_AMRNB: open.uMode = BUFFER_META_ENABLE | STREAM_PRIORITY_HIGH; open.format = AMRNB_FS; break; case FORMAT_AMRWB: open.uMode = BUFFER_META_ENABLE | STREAM_PRIORITY_HIGH; open.format = AMRWB_FS; break; default: pr_err("Invalid format[%d]\n", format); goto fail_cmd; } open.uMode = ASM_OPEN_READ_PERF_MODE_BIT; open.bits_per_sample = PCM_BITS_PER_SAMPLE; open.reserved = 0; rc = apr_send_pkt(ac->apr, (uint32_t *) &open); if (rc < 0) { pr_err("open failed op[0x%x]rc[%d]\n", \ open.hdr.opcode, rc); goto fail_cmd; } rc = wait_event_timeout(ac->cmd_wait, (atomic_read(&ac->cmd_state) == 0), 5*HZ); if (!rc) { pr_err("%s: timeout. waited for OPEN_WRITE rc[%d]\n", __func__, rc); goto fail_cmd; } return 0; fail_cmd: return -EINVAL; } int q6asm_open_read_compressed(struct audio_client *ac, uint32_t frames_per_buffer, uint32_t meta_data_mode) { int rc = 0x00; struct asm_stream_cmd_open_read_compressed open; #ifdef CONFIG_DEBUG_FS in_cont_index = 0; #endif if ((ac == NULL) || (ac->apr == NULL)) { pr_err("%s: APR handle NULL\n", __func__); return -EINVAL; } pr_debug("%s:session[%d]", __func__, ac->session); q6asm_add_hdr(ac, &open.hdr, sizeof(open), TRUE); open.hdr.opcode = ASM_STREAM_CMD_OPEN_READ_COMPRESSED; /* hardcoded as following*/ open.frame_per_buf = frames_per_buffer; open.uMode = meta_data_mode; rc = apr_send_pkt(ac->apr, (uint32_t *) &open); if (rc < 0) { pr_err("open failed op[0x%x]rc[%d]\n", open.hdr.opcode, rc); goto fail_cmd; } rc = wait_event_timeout(ac->cmd_wait, (atomic_read(&ac->cmd_state) == 0), 5*HZ); if (!rc) { pr_err("%s: timeout. waited for OPEN_READ_COMPRESSED rc[%d]\n", __func__, rc); goto fail_cmd; } return 0; fail_cmd: return -EINVAL; } int q6asm_open_write_compressed(struct audio_client *ac, uint32_t format) { int rc = 0x00; struct asm_stream_cmd_open_write_compressed open; if ((ac == NULL) || (ac->apr == NULL)) { pr_err("%s: APR handle NULL\n", __func__); return -EINVAL; } pr_debug("%s: session[%d] wr_format[0x%x]", __func__, ac->session, format); q6asm_add_hdr(ac, &open.hdr, sizeof(open), TRUE); open.hdr.opcode = ASM_STREAM_CMD_OPEN_WRITE_COMPRESSED; switch (format) { case FORMAT_AC3: open.format = AC3_DECODER; break; case FORMAT_EAC3: open.format = EAC3_DECODER; break; case FORMAT_MP3: open.format = MP3; break; case FORMAT_DTS: open.format = DTS; break; case FORMAT_DTS_LBR: open.format = DTS_LBR; break; case FORMAT_AAC: open.format = MPEG4_AAC; break; case FORMAT_ATRAC: open.format = ATRAC; break; case FORMAT_WMA_V10PRO: open.format = WMA_V10PRO; break; case FORMAT_MAT: open.format = MAT; break; default: pr_err("%s: Invalid format[%d]\n", __func__, format); goto fail_cmd; } /*Below flag indicates the DSP that Compressed audio input stream is not IEC 61937 or IEC 60958 packetizied*/ open.flags = 0x00000000; rc = apr_send_pkt(ac->apr, (uint32_t *) &open); if (rc < 0) { pr_err("%s: open failed op[0x%x]rc[%d]\n", \ __func__, open.hdr.opcode, rc); goto fail_cmd; } rc = wait_event_timeout(ac->cmd_wait, (atomic_read(&ac->cmd_state) == 0), 5*HZ); if (!rc) { pr_err("%s: timeout. waited for OPEN_WRITE rc[%d]\n", __func__, rc); goto fail_cmd; } return 0; fail_cmd: return -EINVAL; } int q6asm_open_write(struct audio_client *ac, uint32_t format) { int rc = 0x00; struct asm_stream_cmd_open_write open; if ((ac == NULL) || (ac->apr == NULL)) { pr_err("%s: APR handle NULL\n", __func__); return -EINVAL; } pr_debug("%s: session[%d] wr_format[0x%x]", __func__, ac->session, format); q6asm_add_hdr(ac, &open.hdr, sizeof(open), TRUE); if (ac->perf_mode) { pr_debug("%s In Performance/lowlatency mode", __func__); open.hdr.opcode = ASM_STREAM_CMD_OPEN_WRITE_V2_1; open.uMode = ASM_OPEN_WRITE_PERF_MODE_BIT; /* source endpoint : matrix */ open.sink_endpoint = ASM_END_POINT_DEVICE_MATRIX; open.stream_handle = PCM_BITS_PER_SAMPLE; } else { open.hdr.opcode = ASM_STREAM_CMD_OPEN_WRITE; open.uMode = STREAM_PRIORITY_HIGH; /* source endpoint : matrix */ open.sink_endpoint = ASM_END_POINT_DEVICE_MATRIX; open.stream_handle = 0x00; } open.post_proc_top = get_asm_topology(); if (open.post_proc_top == 0) open.post_proc_top = DEFAULT_POPP_TOPOLOGY; switch (format) { case FORMAT_LINEAR_PCM: open.format = LINEAR_PCM; break; case FORMAT_MULTI_CHANNEL_LINEAR_PCM: open.format = MULTI_CHANNEL_PCM; break; case FORMAT_MPEG4_AAC: open.format = MPEG4_AAC; break; case FORMAT_MPEG4_MULTI_AAC: open.format = MPEG4_MULTI_AAC; break; case FORMAT_WMA_V9: open.format = WMA_V9; break; case FORMAT_WMA_V10PRO: open.format = WMA_V10PRO; break; case FORMAT_MP3: open.format = MP3; break; case FORMAT_DTS: open.format = DTS; break; case FORMAT_DTS_LBR: open.format = DTS_LBR; break; case FORMAT_AMRWB: open.format = AMRWB_FS; pr_debug("q6asm_open_write FORMAT_AMRWB"); break; case FORMAT_AMR_WB_PLUS: open.format = AMR_WB_PLUS; pr_debug("q6asm_open_write FORMAT_AMR_WB_PLUS"); break; default: pr_err("%s: Invalid format[%d]\n", __func__, format); goto fail_cmd; } rc = apr_send_pkt(ac->apr, (uint32_t *) &open); if (rc < 0) { pr_err("%s: open failed op[0x%x]rc[%d]\n", \ __func__, open.hdr.opcode, rc); goto fail_cmd; } rc = wait_event_timeout(ac->cmd_wait, (atomic_read(&ac->cmd_state) == 0), 5*HZ); if (!rc) { pr_err("%s: timeout. waited for OPEN_WRITE rc[%d]\n", __func__, rc); goto fail_cmd; } if (atomic_read(&ac->cmd_response)) { pr_err("%s: format = %x not supported\n", __func__, format); goto fail_cmd; } return 0; fail_cmd: return -EINVAL; } int q6asm_open_read_write(struct audio_client *ac, uint32_t rd_format, uint32_t wr_format) { int rc = 0x00; struct asm_stream_cmd_open_read_write open; if ((ac == NULL) || (ac->apr == NULL)) { pr_err("APR handle NULL\n"); return -EINVAL; } pr_debug("%s: session[%d]", __func__, ac->session); pr_debug("wr_format[0x%x]rd_format[0x%x]", wr_format, rd_format); q6asm_add_hdr(ac, &open.hdr, sizeof(open), TRUE); open.hdr.opcode = ASM_STREAM_CMD_OPEN_READWRITE; open.uMode = BUFFER_META_ENABLE | STREAM_PRIORITY_NORMAL; /* source endpoint : matrix */ open.post_proc_top = get_asm_topology(); if (open.post_proc_top == 0) open.post_proc_top = DEFAULT_POPP_TOPOLOGY; switch (wr_format) { case FORMAT_LINEAR_PCM: open.write_format = LINEAR_PCM; break; case FORMAT_MPEG4_AAC: open.write_format = MPEG4_AAC; break; case FORMAT_MPEG4_MULTI_AAC: open.write_format = MPEG4_MULTI_AAC; break; case FORMAT_WMA_V9: open.write_format = WMA_V9; break; case FORMAT_WMA_V10PRO: open.write_format = WMA_V10PRO; break; case FORMAT_AMRNB: open.write_format = AMRNB_FS; break; case FORMAT_AMRWB: open.write_format = AMRWB_FS; break; case FORMAT_V13K: open.write_format = V13K_FS; break; case FORMAT_EVRC: open.write_format = EVRC_FS; break; case FORMAT_EVRCB: open.write_format = EVRCB_FS; break; case FORMAT_EVRCWB: open.write_format = EVRCWB_FS; break; case FORMAT_MP3: open.write_format = MP3; break; default: pr_err("Invalid format[%d]\n", wr_format); goto fail_cmd; } switch (rd_format) { case FORMAT_LINEAR_PCM: open.read_format = LINEAR_PCM; break; case FORMAT_MPEG4_AAC: open.read_format = MPEG4_AAC; break; case FORMAT_V13K: open.read_format = V13K_FS; break; case FORMAT_EVRC: open.read_format = EVRC_FS; break; case FORMAT_AMRNB: open.read_format = AMRNB_FS; break; case FORMAT_AMRWB: open.read_format = AMRWB_FS; break; default: pr_err("Invalid format[%d]\n", rd_format); goto fail_cmd; } pr_debug("%s:rdformat[0x%x]wrformat[0x%x]\n", __func__, open.read_format, open.write_format); rc = apr_send_pkt(ac->apr, (uint32_t *) &open); if (rc < 0) { pr_err("open failed op[0x%x]rc[%d]\n", \ open.hdr.opcode, rc); goto fail_cmd; } rc = wait_event_timeout(ac->cmd_wait, (atomic_read(&ac->cmd_state) == 0), 5*HZ); if (!rc) { pr_err("timeout. waited for OPEN_WRITE rc[%d]\n", rc); goto fail_cmd; } return 0; fail_cmd: return -EINVAL; } int q6asm_run(struct audio_client *ac, uint32_t flags, uint32_t msw_ts, uint32_t lsw_ts) { struct asm_stream_cmd_run run; int rc; if (!ac || ac->apr == NULL) { pr_err("APR handle NULL\n"); return -EINVAL; } pr_debug("%s session[%d]", __func__, ac->session); q6asm_add_hdr(ac, &run.hdr, sizeof(run), TRUE); run.hdr.opcode = ASM_SESSION_CMD_RUN; run.flags = flags; run.msw_ts = msw_ts; run.lsw_ts = lsw_ts; #ifdef CONFIG_DEBUG_FS if (out_enable_flag) { do_gettimeofday(&out_cold_tv); pr_debug("COLD: apr_send_pkt at %ld sec %ld microsec\n",\ out_cold_tv.tv_sec, out_cold_tv.tv_usec); } #endif rc = apr_send_pkt(ac->apr, (uint32_t *) &run); if (rc < 0) { pr_err("Commmand run failed[%d]", rc); goto fail_cmd; } rc = wait_event_timeout(ac->cmd_wait, (atomic_read(&ac->cmd_state) == 0), 5*HZ); if (!rc) { pr_err("timeout. waited for run success rc[%d]", rc); goto fail_cmd; } return 0; fail_cmd: return -EINVAL; } int q6asm_run_nowait(struct audio_client *ac, uint32_t flags, uint32_t msw_ts, uint32_t lsw_ts) { struct asm_stream_cmd_run run; int rc; if (!ac || ac->apr == NULL) { pr_err("%s:APR handle NULL\n", __func__); return -EINVAL; } pr_debug("session[%d]", ac->session); q6asm_add_hdr_async(ac, &run.hdr, sizeof(run), TRUE); run.hdr.opcode = ASM_SESSION_CMD_RUN; run.flags = flags; run.msw_ts = msw_ts; run.lsw_ts = lsw_ts; rc = apr_send_pkt(ac->apr, (uint32_t *) &run); if (rc < 0) { pr_err("%s:Commmand run failed[%d]", __func__, rc); return -EINVAL; } atomic_inc(&ac->nowait_cmd_cnt); return 0; } int q6asm_enc_cfg_blk_aac(struct audio_client *ac, uint32_t frames_per_buf, uint32_t sample_rate, uint32_t channels, uint32_t bit_rate, uint32_t mode, uint32_t format) { struct asm_stream_cmd_encdec_cfg_blk enc_cfg; int rc = 0; pr_debug("%s:session[%d]frames[%d]SR[%d]ch[%d]bitrate[%d]mode[%d] format[%d]", __func__, ac->session, frames_per_buf, sample_rate, channels, bit_rate, mode, format); q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE); enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM; enc_cfg.param_id = ASM_ENCDEC_CFG_BLK_ID; enc_cfg.param_size = sizeof(struct asm_encode_cfg_blk); enc_cfg.enc_blk.frames_per_buf = frames_per_buf; enc_cfg.enc_blk.format_id = MPEG4_AAC; enc_cfg.enc_blk.cfg_size = sizeof(struct asm_aac_read_cfg); enc_cfg.enc_blk.cfg.aac.bitrate = bit_rate; enc_cfg.enc_blk.cfg.aac.enc_mode = mode; enc_cfg.enc_blk.cfg.aac.format = format; enc_cfg.enc_blk.cfg.aac.ch_cfg = channels; enc_cfg.enc_blk.cfg.aac.sample_rate = sample_rate; rc = apr_send_pkt(ac->apr, (uint32_t *) &enc_cfg); if (rc < 0) { pr_err("Comamnd %d failed\n", ASM_STREAM_CMD_SET_ENCDEC_PARAM); rc = -EINVAL; goto fail_cmd; } rc = wait_event_timeout(ac->cmd_wait, (atomic_read(&ac->cmd_state) == 0), 5*HZ); if (!rc) { pr_err("timeout. waited for FORMAT_UPDATE\n"); goto fail_cmd; } return 0; fail_cmd: return -EINVAL; } int q6asm_enc_cfg_blk_pcm(struct audio_client *ac, uint32_t rate, uint32_t channels) { struct asm_stream_cmd_encdec_cfg_blk enc_cfg; int rc = 0; pr_debug("%s: Session %d, rate = %d, channels = %d\n", __func__, ac->session, rate, channels); q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE); enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM; enc_cfg.param_id = ASM_ENCDEC_CFG_BLK_ID; enc_cfg.param_size = sizeof(struct asm_encode_cfg_blk); enc_cfg.enc_blk.frames_per_buf = 1; enc_cfg.enc_blk.format_id = LINEAR_PCM; enc_cfg.enc_blk.cfg_size = sizeof(struct asm_pcm_cfg); enc_cfg.enc_blk.cfg.pcm.ch_cfg = channels; enc_cfg.enc_blk.cfg.pcm.bits_per_sample = 16; enc_cfg.enc_blk.cfg.pcm.sample_rate = rate; enc_cfg.enc_blk.cfg.pcm.is_signed = 1; enc_cfg.enc_blk.cfg.pcm.interleaved = 1; rc = apr_send_pkt(ac->apr, (uint32_t *) &enc_cfg); if (rc < 0) { pr_err("Comamnd open failed\n"); rc = -EINVAL; goto fail_cmd; } rc = wait_event_timeout(ac->cmd_wait, (atomic_read(&ac->cmd_state) == 0), 5*HZ); if (!rc) { pr_err("timeout opcode[0x%x] ", enc_cfg.hdr.opcode); goto fail_cmd; } return 0; fail_cmd: return -EINVAL; } int q6asm_enc_cfg_blk_pcm_native(struct audio_client *ac, uint32_t rate, uint32_t channels) { struct asm_stream_cmd_encdec_cfg_blk enc_cfg; int rc = 0; pr_debug("%s: Session %d, rate = %d, channels = %d, setting the rate and channels to 0 for native\n", __func__, ac->session, rate, channels); q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE); enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM; enc_cfg.param_id = ASM_ENCDEC_CFG_BLK_ID; enc_cfg.param_size = sizeof(struct asm_encode_cfg_blk); enc_cfg.enc_blk.frames_per_buf = 1; enc_cfg.enc_blk.format_id = LINEAR_PCM; enc_cfg.enc_blk.cfg_size = sizeof(struct asm_pcm_cfg); enc_cfg.enc_blk.cfg.pcm.ch_cfg = 0;/*channels;*/ enc_cfg.enc_blk.cfg.pcm.bits_per_sample = 16; enc_cfg.enc_blk.cfg.pcm.sample_rate = 0;/*rate;*/ enc_cfg.enc_blk.cfg.pcm.is_signed = 1; enc_cfg.enc_blk.cfg.pcm.interleaved = 1; rc = apr_send_pkt(ac->apr, (uint32_t *) &enc_cfg); if (rc < 0) { pr_err("Comamnd open failed\n"); rc = -EINVAL; goto fail_cmd; } rc = wait_event_timeout(ac->cmd_wait, (atomic_read(&ac->cmd_state) == 0), 5*HZ); if (!rc) { pr_err("timeout opcode[0x%x] ", enc_cfg.hdr.opcode); goto fail_cmd; } return 0; fail_cmd: return -EINVAL; } int q6asm_enc_cfg_blk_multi_ch_pcm(struct audio_client *ac, uint32_t rate, uint32_t channels) { struct asm_stream_cmd_encdec_cfg_blk enc_cfg; int rc = 0; pr_debug("%s: Session %d, rate = %d, channels = %d\n", __func__, ac->session, rate, channels); q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE); enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM; enc_cfg.param_id = ASM_ENCDEC_CFG_BLK_ID; enc_cfg.param_size = sizeof(struct asm_encode_cfg_blk); enc_cfg.enc_blk.frames_per_buf = 1; enc_cfg.enc_blk.format_id = MULTI_CHANNEL_PCM; enc_cfg.enc_blk.cfg_size = sizeof(struct asm_multi_channel_pcm_fmt_blk); enc_cfg.enc_blk.cfg.mpcm.num_channels = channels; enc_cfg.enc_blk.cfg.mpcm.bits_per_sample = 16; enc_cfg.enc_blk.cfg.mpcm.sample_rate = rate; enc_cfg.enc_blk.cfg.mpcm.is_signed = 1; enc_cfg.enc_blk.cfg.mpcm.is_interleaved = 1; if (channels == 1) { enc_cfg.enc_blk.cfg.mpcm.channel_mapping[0] = PCM_CHANNEL_FL; enc_cfg.enc_blk.cfg.mpcm.channel_mapping[1] = 0; enc_cfg.enc_blk.cfg.mpcm.channel_mapping[2] = 0; enc_cfg.enc_blk.cfg.mpcm.channel_mapping[3] = 0; enc_cfg.enc_blk.cfg.mpcm.channel_mapping[4] = 0; enc_cfg.enc_blk.cfg.mpcm.channel_mapping[5] = 0; enc_cfg.enc_blk.cfg.mpcm.channel_mapping[6] = 0; enc_cfg.enc_blk.cfg.mpcm.channel_mapping[7] = 0; } else if (channels == 2) { enc_cfg.enc_blk.cfg.mpcm.channel_mapping[0] = PCM_CHANNEL_FL; enc_cfg.enc_blk.cfg.mpcm.channel_mapping[1] = PCM_CHANNEL_FR; enc_cfg.enc_blk.cfg.mpcm.channel_mapping[2] = 0; enc_cfg.enc_blk.cfg.mpcm.channel_mapping[3] = 0; enc_cfg.enc_blk.cfg.mpcm.channel_mapping[4] = 0; enc_cfg.enc_blk.cfg.mpcm.channel_mapping[5] = 0; enc_cfg.enc_blk.cfg.mpcm.channel_mapping[6] = 0; enc_cfg.enc_blk.cfg.mpcm.channel_mapping[7] = 0; } else if (channels == 4) { enc_cfg.enc_blk.cfg.mpcm.channel_mapping[0] = PCM_CHANNEL_FL; enc_cfg.enc_blk.cfg.mpcm.channel_mapping[1] = PCM_CHANNEL_FR; enc_cfg.enc_blk.cfg.mpcm.channel_mapping[2] = PCM_CHANNEL_RB; enc_cfg.enc_blk.cfg.mpcm.channel_mapping[3] = PCM_CHANNEL_LB; enc_cfg.enc_blk.cfg.mpcm.channel_mapping[4] = 0; enc_cfg.enc_blk.cfg.mpcm.channel_mapping[5] = 0; enc_cfg.enc_blk.cfg.mpcm.channel_mapping[6] = 0; enc_cfg.enc_blk.cfg.mpcm.channel_mapping[7] = 0; } else if (channels == 6) { enc_cfg.enc_blk.cfg.mpcm.channel_mapping[0] = PCM_CHANNEL_FL; enc_cfg.enc_blk.cfg.mpcm.channel_mapping[1] = PCM_CHANNEL_FR; enc_cfg.enc_blk.cfg.mpcm.channel_mapping[2] = PCM_CHANNEL_LFE; enc_cfg.enc_blk.cfg.mpcm.channel_mapping[3] = PCM_CHANNEL_FC; enc_cfg.enc_blk.cfg.mpcm.channel_mapping[4] = PCM_CHANNEL_LB; enc_cfg.enc_blk.cfg.mpcm.channel_mapping[5] = PCM_CHANNEL_RB; enc_cfg.enc_blk.cfg.mpcm.channel_mapping[6] = 0; enc_cfg.enc_blk.cfg.mpcm.channel_mapping[7] = 0; } else if (channels == 8) { enc_cfg.enc_blk.cfg.mpcm.channel_mapping[0] = PCM_CHANNEL_FL; enc_cfg.enc_blk.cfg.mpcm.channel_mapping[1] = PCM_CHANNEL_FR; enc_cfg.enc_blk.cfg.mpcm.channel_mapping[2] = PCM_CHANNEL_LFE; enc_cfg.enc_blk.cfg.mpcm.channel_mapping[3] = PCM_CHANNEL_FC; enc_cfg.enc_blk.cfg.mpcm.channel_mapping[4] = PCM_CHANNEL_LB; enc_cfg.enc_blk.cfg.mpcm.channel_mapping[5] = PCM_CHANNEL_RB; enc_cfg.enc_blk.cfg.mpcm.channel_mapping[6] = PCM_CHANNEL_FLC; enc_cfg.enc_blk.cfg.mpcm.channel_mapping[7] = PCM_CHANNEL_FRC; } rc = apr_send_pkt(ac->apr, (uint32_t *) &enc_cfg); if (rc < 0) { pr_err("Comamnd open failed\n"); rc = -EINVAL; goto fail_cmd; } rc = wait_event_timeout(ac->cmd_wait, (atomic_read(&ac->cmd_state) == 0), 5*HZ); if (!rc) { pr_err("timeout opcode[0x%x] ", enc_cfg.hdr.opcode); goto fail_cmd; } return 0; fail_cmd: return -EINVAL; } int q6asm_enable_sbrps(struct audio_client *ac, uint32_t sbr_ps_enable) { struct asm_stream_cmd_encdec_sbr sbrps; int rc = 0; pr_debug("%s: Session %d\n", __func__, ac->session); q6asm_add_hdr(ac, &sbrps.hdr, sizeof(sbrps), TRUE); sbrps.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM; sbrps.param_id = ASM_ENABLE_SBR_PS; sbrps.param_size = sizeof(struct asm_sbr_ps); sbrps.sbr_ps.enable = sbr_ps_enable; rc = apr_send_pkt(ac->apr, (uint32_t *) &sbrps); if (rc < 0) { pr_err("Command opcode[0x%x]paramid[0x%x] failed\n", ASM_STREAM_CMD_SET_ENCDEC_PARAM, ASM_ENABLE_SBR_PS); rc = -EINVAL; goto fail_cmd; } rc = wait_event_timeout(ac->cmd_wait, (atomic_read(&ac->cmd_state) == 0), 5*HZ); if (!rc) { pr_err("timeout opcode[0x%x] ", sbrps.hdr.opcode); goto fail_cmd; } return 0; fail_cmd: return -EINVAL; } int q6asm_cfg_dual_mono_aac(struct audio_client *ac, uint16_t sce_left, uint16_t sce_right) { struct asm_stream_cmd_encdec_dualmono dual_mono; int rc = 0; pr_debug("%s: Session %d, sce_left = %d, sce_right = %d\n", __func__, ac->session, sce_left, sce_right); q6asm_add_hdr(ac, &dual_mono.hdr, sizeof(dual_mono), TRUE); dual_mono.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM; dual_mono.param_id = ASM_CONFIGURE_DUAL_MONO; dual_mono.param_size = sizeof(struct asm_dual_mono); dual_mono.channel_map.sce_left = sce_left; dual_mono.channel_map.sce_right = sce_right; rc = apr_send_pkt(ac->apr, (uint32_t *) &dual_mono); if (rc < 0) { pr_err("%s:Command opcode[0x%x]paramid[0x%x] failed\n", __func__, ASM_STREAM_CMD_SET_ENCDEC_PARAM, ASM_CONFIGURE_DUAL_MONO); rc = -EINVAL; goto fail_cmd; } rc = wait_event_timeout(ac->cmd_wait, (atomic_read(&ac->cmd_state) == 0), 5*HZ); if (!rc) { pr_err("%s:timeout opcode[0x%x]\n", __func__, dual_mono.hdr.opcode); goto fail_cmd; } return 0; fail_cmd: return -EINVAL; } int q6asm_set_encdec_chan_map(struct audio_client *ac, uint32_t num_channels) { struct asm_stream_cmd_encdec_channelmap chan_map; u8 *channel_mapping; int rc = 0; pr_debug("%s: Session %d, num_channels = %d\n", __func__, ac->session, num_channels); q6asm_add_hdr(ac, &chan_map.hdr, sizeof(chan_map), TRUE); chan_map.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM; chan_map.param_id = ASM_ENCDEC_DEC_CHAN_MAP; chan_map.param_size = sizeof(struct asm_dec_chan_map); chan_map.chan_map.num_channels = num_channels; channel_mapping = chan_map.chan_map.channel_mapping; memset(channel_mapping, PCM_CHANNEL_NULL, MAX_CHAN_MAP_CHANNELS); if (num_channels == 1) { channel_mapping[0] = PCM_CHANNEL_FL; } else if (num_channels == 2) { channel_mapping[0] = PCM_CHANNEL_FL; channel_mapping[1] = PCM_CHANNEL_FR; } else if (num_channels == 4) { channel_mapping[0] = PCM_CHANNEL_FL; channel_mapping[1] = PCM_CHANNEL_FR; channel_mapping[1] = PCM_CHANNEL_LB; channel_mapping[1] = PCM_CHANNEL_RB; } else if (num_channels == 6) { channel_mapping[0] = PCM_CHANNEL_FC; channel_mapping[1] = PCM_CHANNEL_FL; channel_mapping[2] = PCM_CHANNEL_FR; channel_mapping[3] = PCM_CHANNEL_LB; channel_mapping[4] = PCM_CHANNEL_RB; channel_mapping[5] = PCM_CHANNEL_LFE; } else if (num_channels == 8) { channel_mapping[0] = PCM_CHANNEL_FC; channel_mapping[1] = PCM_CHANNEL_FL; channel_mapping[2] = PCM_CHANNEL_FR; channel_mapping[3] = PCM_CHANNEL_LB; channel_mapping[4] = PCM_CHANNEL_RB; channel_mapping[5] = PCM_CHANNEL_LFE; channel_mapping[6] = PCM_CHANNEL_FLC; channel_mapping[7] = PCM_CHANNEL_FRC; } else { pr_err("%s: ERROR.unsupported num_ch = %u\n", __func__, num_channels); rc = -EINVAL; goto fail_cmd; } rc = apr_send_pkt(ac->apr, (uint32_t *) &chan_map); if (rc < 0) { pr_err("%s:Command opcode[0x%x]paramid[0x%x] failed\n", __func__, ASM_STREAM_CMD_SET_ENCDEC_PARAM, ASM_ENCDEC_DEC_CHAN_MAP); goto fail_cmd; } rc = wait_event_timeout(ac->cmd_wait, (atomic_read(&ac->cmd_state) == 0), 5*HZ); if (!rc) { pr_err("%s:timeout opcode[0x%x]\n", __func__, chan_map.hdr.opcode); rc = -ETIMEDOUT; goto fail_cmd; } return 0; fail_cmd: return rc; } int q6asm_enc_cfg_blk_qcelp(struct audio_client *ac, uint32_t frames_per_buf, uint16_t min_rate, uint16_t max_rate, uint16_t reduced_rate_level, uint16_t rate_modulation_cmd) { struct asm_stream_cmd_encdec_cfg_blk enc_cfg; int rc = 0; pr_debug("%s:session[%d]frames[%d]min_rate[0x%4x]max_rate[0x%4x] reduced_rate_level[0x%4x]rate_modulation_cmd[0x%4x]", __func__, ac->session, frames_per_buf, min_rate, max_rate, reduced_rate_level, rate_modulation_cmd); q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE); enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM; enc_cfg.param_id = ASM_ENCDEC_CFG_BLK_ID; enc_cfg.param_size = sizeof(struct asm_encode_cfg_blk); enc_cfg.enc_blk.frames_per_buf = frames_per_buf; enc_cfg.enc_blk.format_id = V13K_FS; enc_cfg.enc_blk.cfg_size = sizeof(struct asm_qcelp13_read_cfg); enc_cfg.enc_blk.cfg.qcelp13.min_rate = min_rate; enc_cfg.enc_blk.cfg.qcelp13.max_rate = max_rate; enc_cfg.enc_blk.cfg.qcelp13.reduced_rate_level = reduced_rate_level; enc_cfg.enc_blk.cfg.qcelp13.rate_modulation_cmd = rate_modulation_cmd; rc = apr_send_pkt(ac->apr, (uint32_t *) &enc_cfg); if (rc < 0) { pr_err("Comamnd %d failed\n", ASM_STREAM_CMD_SET_ENCDEC_PARAM); goto fail_cmd; } rc = wait_event_timeout(ac->cmd_wait, (atomic_read(&ac->cmd_state) == 0), 5*HZ); if (!rc) { pr_err("timeout. waited for FORMAT_UPDATE\n"); goto fail_cmd; } return 0; fail_cmd: return -EINVAL; } int q6asm_enc_cfg_blk_evrc(struct audio_client *ac, uint32_t frames_per_buf, uint16_t min_rate, uint16_t max_rate, uint16_t rate_modulation_cmd) { struct asm_stream_cmd_encdec_cfg_blk enc_cfg; int rc = 0; pr_debug("%s:session[%d]frames[%d]min_rate[0x%4x]max_rate[0x%4x] rate_modulation_cmd[0x%4x]", __func__, ac->session, frames_per_buf, min_rate, max_rate, rate_modulation_cmd); q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE); enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM; enc_cfg.param_id = ASM_ENCDEC_CFG_BLK_ID; enc_cfg.param_size = sizeof(struct asm_encode_cfg_blk); enc_cfg.enc_blk.frames_per_buf = frames_per_buf; enc_cfg.enc_blk.format_id = EVRC_FS; enc_cfg.enc_blk.cfg_size = sizeof(struct asm_evrc_read_cfg); enc_cfg.enc_blk.cfg.evrc.min_rate = min_rate; enc_cfg.enc_blk.cfg.evrc.max_rate = max_rate; enc_cfg.enc_blk.cfg.evrc.rate_modulation_cmd = rate_modulation_cmd; enc_cfg.enc_blk.cfg.evrc.reserved = 0; rc = apr_send_pkt(ac->apr, (uint32_t *) &enc_cfg); if (rc < 0) { pr_err("Comamnd %d failed\n", ASM_STREAM_CMD_SET_ENCDEC_PARAM); goto fail_cmd; } rc = wait_event_timeout(ac->cmd_wait, (atomic_read(&ac->cmd_state) == 0), 5*HZ); if (!rc) { pr_err("timeout. waited for FORMAT_UPDATE\n"); goto fail_cmd; } return 0; fail_cmd: return -EINVAL; } int q6asm_enc_cfg_blk_amrnb(struct audio_client *ac, uint32_t frames_per_buf, uint16_t band_mode, uint16_t dtx_enable) { struct asm_stream_cmd_encdec_cfg_blk enc_cfg; int rc = 0; pr_debug("%s:session[%d]frames[%d]band_mode[0x%4x]dtx_enable[0x%4x]", __func__, ac->session, frames_per_buf, band_mode, dtx_enable); q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE); enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM; enc_cfg.param_id = ASM_ENCDEC_CFG_BLK_ID; enc_cfg.param_size = sizeof(struct asm_encode_cfg_blk); enc_cfg.enc_blk.frames_per_buf = frames_per_buf; enc_cfg.enc_blk.format_id = AMRNB_FS; enc_cfg.enc_blk.cfg_size = sizeof(struct asm_amrnb_read_cfg); enc_cfg.enc_blk.cfg.amrnb.mode = band_mode; enc_cfg.enc_blk.cfg.amrnb.dtx_mode = dtx_enable; rc = apr_send_pkt(ac->apr, (uint32_t *) &enc_cfg); if (rc < 0) { pr_err("Comamnd %d failed\n", ASM_STREAM_CMD_SET_ENCDEC_PARAM); goto fail_cmd; } rc = wait_event_timeout(ac->cmd_wait, (atomic_read(&ac->cmd_state) == 0), 5*HZ); if (!rc) { pr_err("timeout. waited for FORMAT_UPDATE\n"); goto fail_cmd; } return 0; fail_cmd: return -EINVAL; } int q6asm_enc_cfg_blk_amrwb(struct audio_client *ac, uint32_t frames_per_buf, uint16_t band_mode, uint16_t dtx_enable) { struct asm_stream_cmd_encdec_cfg_blk enc_cfg; int rc = 0; pr_debug("%s:session[%d]frames[%d]band_mode[0x%4x]dtx_enable[0x%4x]", __func__, ac->session, frames_per_buf, band_mode, dtx_enable); q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE); enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM; enc_cfg.param_id = ASM_ENCDEC_CFG_BLK_ID; enc_cfg.param_size = sizeof(struct asm_encode_cfg_blk); enc_cfg.enc_blk.frames_per_buf = frames_per_buf; enc_cfg.enc_blk.format_id = AMRWB_FS; enc_cfg.enc_blk.cfg_size = sizeof(struct asm_amrwb_read_cfg); enc_cfg.enc_blk.cfg.amrwb.mode = band_mode; enc_cfg.enc_blk.cfg.amrwb.dtx_mode = dtx_enable; rc = apr_send_pkt(ac->apr, (uint32_t *) &enc_cfg); if (rc < 0) { pr_err("Comamnd %d failed\n", ASM_STREAM_CMD_SET_ENCDEC_PARAM); goto fail_cmd; } rc = wait_event_timeout(ac->cmd_wait, (atomic_read(&ac->cmd_state) == 0), 5*HZ); if (!rc) { pr_err("timeout. waited for FORMAT_UPDATE\n"); goto fail_cmd; } return 0; fail_cmd: return -EINVAL; } int q6asm_media_format_block_pcm(struct audio_client *ac, uint32_t rate, uint32_t channels) { struct asm_stream_media_format_update fmt; int rc = 0; pr_debug("%s:session[%d]rate[%d]ch[%d]\n", __func__, ac->session, rate, channels); q6asm_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE); fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FORMAT_UPDATE; fmt.format = LINEAR_PCM; fmt.cfg_size = sizeof(struct asm_pcm_cfg); fmt.write_cfg.pcm_cfg.ch_cfg = channels; fmt.write_cfg.pcm_cfg.bits_per_sample = 16; fmt.write_cfg.pcm_cfg.sample_rate = rate; fmt.write_cfg.pcm_cfg.is_signed = 1; fmt.write_cfg.pcm_cfg.interleaved = 1; rc = apr_send_pkt(ac->apr, (uint32_t *) &fmt); if (rc < 0) { pr_err("%s:Comamnd open failed\n", __func__); goto fail_cmd; } rc = wait_event_timeout(ac->cmd_wait, (atomic_read(&ac->cmd_state) == 0), 5*HZ); if (!rc) { pr_err("%s:timeout. waited for FORMAT_UPDATE\n", __func__); goto fail_cmd; } return 0; fail_cmd: return -EINVAL; } int q6asm_media_format_block_multi_ch_pcm(struct audio_client *ac, uint32_t rate, uint32_t channels) { struct asm_stream_media_format_update fmt; u8 *channel_mapping; int rc = 0; pr_debug("%s:session[%d]rate[%d]ch[%d]\n", __func__, ac->session, rate, channels); q6asm_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE); fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FORMAT_UPDATE; fmt.format = MULTI_CHANNEL_PCM; fmt.cfg_size = sizeof(struct asm_multi_channel_pcm_fmt_blk); fmt.write_cfg.multi_ch_pcm_cfg.num_channels = channels; fmt.write_cfg.multi_ch_pcm_cfg.bits_per_sample = 16; fmt.write_cfg.multi_ch_pcm_cfg.sample_rate = rate; fmt.write_cfg.multi_ch_pcm_cfg.is_signed = 1; fmt.write_cfg.multi_ch_pcm_cfg.is_interleaved = 1; channel_mapping = fmt.write_cfg.multi_ch_pcm_cfg.channel_mapping; memset(channel_mapping, 0, PCM_FORMAT_MAX_NUM_CHANNEL); if (channels == 1) { channel_mapping[0] = PCM_CHANNEL_FL; } else if (channels == 2) { channel_mapping[0] = PCM_CHANNEL_FL; channel_mapping[1] = PCM_CHANNEL_FR; } else if (channels == 4) { channel_mapping[0] = PCM_CHANNEL_FL; channel_mapping[1] = PCM_CHANNEL_FR; channel_mapping[1] = PCM_CHANNEL_LB; channel_mapping[1] = PCM_CHANNEL_RB; } else if (channels == 6) { channel_mapping[0] = PCM_CHANNEL_FC; channel_mapping[1] = PCM_CHANNEL_FL; channel_mapping[2] = PCM_CHANNEL_FR; channel_mapping[3] = PCM_CHANNEL_LB; channel_mapping[4] = PCM_CHANNEL_RB; channel_mapping[5] = PCM_CHANNEL_LFE; } else if (channels == 8) { channel_mapping[0] = PCM_CHANNEL_FC; channel_mapping[1] = PCM_CHANNEL_FL; channel_mapping[2] = PCM_CHANNEL_FR; channel_mapping[3] = PCM_CHANNEL_LB; channel_mapping[4] = PCM_CHANNEL_RB; channel_mapping[5] = PCM_CHANNEL_LFE; channel_mapping[6] = PCM_CHANNEL_FLC; channel_mapping[7] = PCM_CHANNEL_FRC; } else { pr_err("%s: ERROR.unsupported num_ch = %u\n", __func__, channels); return -EINVAL; } rc = apr_send_pkt(ac->apr, (uint32_t *) &fmt); if (rc < 0) { pr_err("%s:Comamnd open failed\n", __func__); goto fail_cmd; } rc = wait_event_timeout(ac->cmd_wait, (atomic_read(&ac->cmd_state) == 0), 5*HZ); if (!rc) { pr_err("%s:timeout. waited for FORMAT_UPDATE\n", __func__); goto fail_cmd; } return 0; fail_cmd: return -EINVAL; } int q6asm_media_format_block_aac(struct audio_client *ac, struct asm_aac_cfg *cfg) { struct asm_stream_media_format_update fmt; int rc = 0; pr_debug("%s:session[%d]rate[%d]ch[%d]\n", __func__, ac->session, cfg->sample_rate, cfg->ch_cfg); q6asm_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE); fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FORMAT_UPDATE; fmt.format = MPEG4_AAC; fmt.cfg_size = sizeof(struct asm_aac_cfg); fmt.write_cfg.aac_cfg.format = cfg->format; fmt.write_cfg.aac_cfg.aot = cfg->aot; fmt.write_cfg.aac_cfg.ep_config = cfg->ep_config; fmt.write_cfg.aac_cfg.section_data_resilience = cfg->section_data_resilience; fmt.write_cfg.aac_cfg.scalefactor_data_resilience = cfg->scalefactor_data_resilience; fmt.write_cfg.aac_cfg.spectral_data_resilience = cfg->spectral_data_resilience; fmt.write_cfg.aac_cfg.ch_cfg = cfg->ch_cfg; fmt.write_cfg.aac_cfg.sample_rate = cfg->sample_rate; pr_info("%s:format=%x cfg_size=%d aac-cfg=%x aot=%d ch=%d sr=%d\n", __func__, fmt.format, fmt.cfg_size, fmt.write_cfg.aac_cfg.format, fmt.write_cfg.aac_cfg.aot, fmt.write_cfg.aac_cfg.ch_cfg, fmt.write_cfg.aac_cfg.sample_rate); rc = apr_send_pkt(ac->apr, (uint32_t *) &fmt); if (rc < 0) { pr_err("%s:Comamnd open failed\n", __func__); goto fail_cmd; } rc = wait_event_timeout(ac->cmd_wait, (atomic_read(&ac->cmd_state) == 0), 5*HZ); if (!rc) { pr_err("%s:timeout. waited for FORMAT_UPDATE\n", __func__); goto fail_cmd; } return 0; fail_cmd: return -EINVAL; } int q6asm_media_format_block_amrwbplus(struct audio_client *ac, struct asm_amrwbplus_cfg *cfg) { struct asm_stream_media_format_update fmt; int rc = 0; pr_debug("q6asm_media_format_block_amrwbplus"); pr_debug("%s:session[%d]band-mode[%d]frame-fmt[%d]ch[%d]\n", __func__, ac->session, cfg->amr_band_mode, cfg->amr_frame_fmt, cfg->num_channels); q6asm_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE); fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FORMAT_UPDATE; fmt.format = AMR_WB_PLUS; fmt.cfg_size = cfg->size_bytes; fmt.write_cfg.amrwbplus_cfg.size_bytes = cfg->size_bytes; fmt.write_cfg.amrwbplus_cfg.version = cfg->version; fmt.write_cfg.amrwbplus_cfg.num_channels = cfg->num_channels; fmt.write_cfg.amrwbplus_cfg.amr_band_mode = cfg->amr_band_mode; fmt.write_cfg.amrwbplus_cfg.amr_dtx_mode = cfg->amr_dtx_mode; fmt.write_cfg.amrwbplus_cfg.amr_frame_fmt = cfg->amr_frame_fmt; fmt.write_cfg.amrwbplus_cfg.amr_lsf_idx = cfg->amr_lsf_idx; pr_debug("%s: num_channels=%x amr_band_mode=%d amr_frame_fmt=%d\n", __func__, cfg->num_channels, cfg->amr_band_mode, cfg->amr_frame_fmt); rc = apr_send_pkt(ac->apr, (uint32_t *) &fmt); if (rc < 0) { pr_err("%s:Comamnd media format update failed..\n", __func__); goto fail_cmd; } rc = wait_event_timeout(ac->cmd_wait, (atomic_read(&ac->cmd_state) == 0), 5*HZ); if (!rc) { pr_err("%s:timeout. waited for FORMAT_UPDATE\n", __func__); goto fail_cmd; } return 0; fail_cmd: return -EINVAL; } int q6asm_media_format_block_multi_aac(struct audio_client *ac, struct asm_aac_cfg *cfg) { struct asm_stream_media_format_update fmt; int rc = 0; pr_debug("%s:session[%d]rate[%d]ch[%d]\n", __func__, ac->session, cfg->sample_rate, cfg->ch_cfg); q6asm_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE); fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FORMAT_UPDATE; fmt.format = MPEG4_MULTI_AAC; fmt.cfg_size = sizeof(struct asm_aac_cfg); fmt.write_cfg.aac_cfg.format = cfg->format; fmt.write_cfg.aac_cfg.aot = cfg->aot; fmt.write_cfg.aac_cfg.ep_config = cfg->ep_config; fmt.write_cfg.aac_cfg.section_data_resilience = cfg->section_data_resilience; fmt.write_cfg.aac_cfg.scalefactor_data_resilience = cfg->scalefactor_data_resilience; fmt.write_cfg.aac_cfg.spectral_data_resilience = cfg->spectral_data_resilience; fmt.write_cfg.aac_cfg.ch_cfg = cfg->ch_cfg; fmt.write_cfg.aac_cfg.sample_rate = cfg->sample_rate; pr_info("%s:format=%x cfg_size=%d aac-cfg=%x aot=%d ch=%d sr=%d\n", __func__, fmt.format, fmt.cfg_size, fmt.write_cfg.aac_cfg.format, fmt.write_cfg.aac_cfg.aot, fmt.write_cfg.aac_cfg.ch_cfg, fmt.write_cfg.aac_cfg.sample_rate); rc = apr_send_pkt(ac->apr, (uint32_t *) &fmt); if (rc < 0) { pr_err("%s:Comamnd open failed\n", __func__); goto fail_cmd; } rc = wait_event_timeout(ac->cmd_wait, (atomic_read(&ac->cmd_state) == 0), 5*HZ); if (!rc) { pr_err("%s:timeout. waited for FORMAT_UPDATE\n", __func__); goto fail_cmd; } return 0; fail_cmd: return -EINVAL; } int q6asm_media_format_block(struct audio_client *ac, uint32_t format) { struct asm_stream_media_format_update fmt; int rc = 0; pr_debug("%s:session[%d] format[0x%x]\n", __func__, ac->session, format); q6asm_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE); fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FORMAT_UPDATE; switch (format) { case FORMAT_V13K: fmt.format = V13K_FS; break; case FORMAT_EVRC: fmt.format = EVRC_FS; break; case FORMAT_AMRWB: fmt.format = AMRWB_FS; break; case FORMAT_AMR_WB_PLUS: fmt.format = AMR_WB_PLUS; break; case FORMAT_AMRNB: fmt.format = AMRNB_FS; break; case FORMAT_MP3: fmt.format = MP3; break; case FORMAT_DTS: fmt.format = DTS; break; case FORMAT_DTS_LBR: fmt.format = DTS_LBR; break; default: pr_err("Invalid format[%d]\n", format); goto fail_cmd; } fmt.cfg_size = 0; rc = apr_send_pkt(ac->apr, (uint32_t *) &fmt); if (rc < 0) { pr_err("%s:Comamnd open failed\n", __func__); goto fail_cmd; } rc = wait_event_timeout(ac->cmd_wait, (atomic_read(&ac->cmd_state) == 0), 5*HZ); if (!rc) { pr_err("%s:timeout. waited for FORMAT_UPDATE\n", __func__); goto fail_cmd; } return 0; fail_cmd: return -EINVAL; } int q6asm_media_format_block_wma(struct audio_client *ac, void *cfg) { struct asm_stream_media_format_update fmt; struct asm_wma_cfg *wma_cfg = (struct asm_wma_cfg *)cfg; int rc = 0; pr_debug("session[%d]format_tag[0x%4x] rate[%d] ch[0x%4x] bps[%d], balign[0x%4x], bit_sample[0x%4x], ch_msk[%d], enc_opt[0x%4x]\n", ac->session, wma_cfg->format_tag, wma_cfg->sample_rate, wma_cfg->ch_cfg, wma_cfg->avg_bytes_per_sec, wma_cfg->block_align, wma_cfg->valid_bits_per_sample, wma_cfg->ch_mask, wma_cfg->encode_opt); q6asm_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE); fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FORMAT_UPDATE; fmt.format = WMA_V9; fmt.cfg_size = sizeof(struct asm_wma_cfg); fmt.write_cfg.wma_cfg.format_tag = wma_cfg->format_tag; fmt.write_cfg.wma_cfg.ch_cfg = wma_cfg->ch_cfg; fmt.write_cfg.wma_cfg.sample_rate = wma_cfg->sample_rate; fmt.write_cfg.wma_cfg.avg_bytes_per_sec = wma_cfg->avg_bytes_per_sec; fmt.write_cfg.wma_cfg.block_align = wma_cfg->block_align; fmt.write_cfg.wma_cfg.valid_bits_per_sample = wma_cfg->valid_bits_per_sample; fmt.write_cfg.wma_cfg.ch_mask = wma_cfg->ch_mask; fmt.write_cfg.wma_cfg.encode_opt = wma_cfg->encode_opt; fmt.write_cfg.wma_cfg.adv_encode_opt = 0; fmt.write_cfg.wma_cfg.adv_encode_opt2 = 0; fmt.write_cfg.wma_cfg.drc_peak_ref = 0; fmt.write_cfg.wma_cfg.drc_peak_target = 0; fmt.write_cfg.wma_cfg.drc_ave_ref = 0; fmt.write_cfg.wma_cfg.drc_ave_target = 0; rc = apr_send_pkt(ac->apr, (uint32_t *) &fmt); if (rc < 0) { pr_err("%s:Comamnd open failed\n", __func__); goto fail_cmd; } rc = wait_event_timeout(ac->cmd_wait, (atomic_read(&ac->cmd_state) == 0), 5*HZ); if (!rc) { pr_err("%s:timeout. waited for FORMAT_UPDATE\n", __func__); goto fail_cmd; } return 0; fail_cmd: return -EINVAL; } int q6asm_media_format_block_wmapro(struct audio_client *ac, void *cfg) { struct asm_stream_media_format_update fmt; struct asm_wmapro_cfg *wmapro_cfg = (struct asm_wmapro_cfg *)cfg; int rc = 0; pr_debug("session[%d]format_tag[0x%4x] rate[%d] ch[0x%4x] bps[%d], balign[0x%4x], bit_sample[0x%4x], ch_msk[%d], enc_opt[0x%4x], adv_enc_opt[0x%4x], adv_enc_opt2[0x%8x]\n", ac->session, wmapro_cfg->format_tag, wmapro_cfg->sample_rate, wmapro_cfg->ch_cfg, wmapro_cfg->avg_bytes_per_sec, wmapro_cfg->block_align, wmapro_cfg->valid_bits_per_sample, wmapro_cfg->ch_mask, wmapro_cfg->encode_opt, wmapro_cfg->adv_encode_opt, wmapro_cfg->adv_encode_opt2); q6asm_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE); fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FORMAT_UPDATE; fmt.format = WMA_V10PRO; fmt.cfg_size = sizeof(struct asm_wmapro_cfg); fmt.write_cfg.wmapro_cfg.format_tag = wmapro_cfg->format_tag; fmt.write_cfg.wmapro_cfg.ch_cfg = wmapro_cfg->ch_cfg; fmt.write_cfg.wmapro_cfg.sample_rate = wmapro_cfg->sample_rate; fmt.write_cfg.wmapro_cfg.avg_bytes_per_sec = wmapro_cfg->avg_bytes_per_sec; fmt.write_cfg.wmapro_cfg.block_align = wmapro_cfg->block_align; fmt.write_cfg.wmapro_cfg.valid_bits_per_sample = wmapro_cfg->valid_bits_per_sample; fmt.write_cfg.wmapro_cfg.ch_mask = wmapro_cfg->ch_mask; fmt.write_cfg.wmapro_cfg.encode_opt = wmapro_cfg->encode_opt; fmt.write_cfg.wmapro_cfg.adv_encode_opt = wmapro_cfg->adv_encode_opt; fmt.write_cfg.wmapro_cfg.adv_encode_opt2 = wmapro_cfg->adv_encode_opt2; fmt.write_cfg.wmapro_cfg.drc_peak_ref = 0; fmt.write_cfg.wmapro_cfg.drc_peak_target = 0; fmt.write_cfg.wmapro_cfg.drc_ave_ref = 0; fmt.write_cfg.wmapro_cfg.drc_ave_target = 0; rc = apr_send_pkt(ac->apr, (uint32_t *) &fmt); if (rc < 0) { pr_err("%s:Comamnd open failed\n", __func__); goto fail_cmd; } rc = wait_event_timeout(ac->cmd_wait, (atomic_read(&ac->cmd_state) == 0), 5*HZ); if (!rc) { pr_err("%s:timeout. waited for FORMAT_UPDATE\n", __func__); goto fail_cmd; } return 0; fail_cmd: return -EINVAL; } int q6asm_memory_map(struct audio_client *ac, uint32_t buf_add, int dir, uint32_t bufsz, uint32_t bufcnt) { struct asm_stream_cmd_memory_map mem_map; int rc = 0; if (!ac || ac->apr == NULL || this_mmap.apr == NULL) { pr_err("APR handle NULL\n"); return -EINVAL; } pr_debug("%s: Session[%d]\n", __func__, ac->session); mem_map.hdr.opcode = ASM_SESSION_CMD_MEMORY_MAP; mem_map.buf_add = buf_add; mem_map.buf_size = bufsz * bufcnt; mem_map.mempool_id = 0; /* EBI */ mem_map.reserved = 0; q6asm_add_mmaphdr(&mem_map.hdr, sizeof(struct asm_stream_cmd_memory_map), TRUE); pr_debug("buf add[%x] buf_add_parameter[%x]\n", mem_map.buf_add, buf_add); rc = apr_send_pkt(this_mmap.apr, (uint32_t *) &mem_map); if (rc < 0) { pr_err("mem_map op[0x%x]rc[%d]\n", mem_map.hdr.opcode, rc); rc = -EINVAL; goto fail_cmd; } rc = wait_event_timeout(this_mmap.cmd_wait, (atomic_read(&this_mmap.cmd_state) == 0), 5 * HZ); if (!rc) { pr_err("timeout. waited for memory_map\n"); rc = -EINVAL; goto fail_cmd; } rc = 0; fail_cmd: return rc; } int q6asm_memory_unmap(struct audio_client *ac, uint32_t buf_add, int dir) { struct asm_stream_cmd_memory_unmap mem_unmap; int rc = 0; if (!ac || ac->apr == NULL || this_mmap.apr == NULL) { pr_err("APR handle NULL\n"); return -EINVAL; } pr_debug("%s: Session[%d]\n", __func__, ac->session); q6asm_add_mmaphdr(&mem_unmap.hdr, sizeof(struct asm_stream_cmd_memory_unmap), TRUE); mem_unmap.hdr.opcode = ASM_SESSION_CMD_MEMORY_UNMAP; mem_unmap.buf_add = buf_add; rc = apr_send_pkt(this_mmap.apr, (uint32_t *) &mem_unmap); if (rc < 0) { pr_err("mem_unmap op[0x%x]rc[%d]\n", mem_unmap.hdr.opcode, rc); rc = -EINVAL; goto fail_cmd; } rc = wait_event_timeout(this_mmap.cmd_wait, (atomic_read(&this_mmap.cmd_state) == 0), 5 * HZ); if (!rc) { pr_err("timeout. waited for memory_map\n"); rc = -EINVAL; goto fail_cmd; } rc = 0; fail_cmd: return rc; } int q6asm_set_lrgain(struct audio_client *ac, int left_gain, int right_gain) { void *vol_cmd = NULL; void *payload = NULL; struct asm_pp_params_command *cmd = NULL; struct asm_lrchannel_gain_params *lrgain = NULL; int sz = 0; int rc = 0; sz = sizeof(struct asm_pp_params_command) + + sizeof(struct asm_lrchannel_gain_params); vol_cmd = kzalloc(sz, GFP_KERNEL); if (vol_cmd == NULL) { pr_err("%s[%d]: Mem alloc failed\n", __func__, ac->session); rc = -EINVAL; return rc; } cmd = (struct asm_pp_params_command *)vol_cmd; q6asm_add_hdr_async(ac, &cmd->hdr, sz, TRUE); cmd->hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS; cmd->payload = NULL; cmd->payload_size = sizeof(struct asm_pp_param_data_hdr) + sizeof(struct asm_lrchannel_gain_params); cmd->params.module_id = VOLUME_CONTROL_MODULE_ID; cmd->params.param_id = L_R_CHANNEL_GAIN_PARAM_ID; cmd->params.param_size = sizeof(struct asm_lrchannel_gain_params); cmd->params.reserved = 0; payload = (u8 *)(vol_cmd + sizeof(struct asm_pp_params_command)); lrgain = (struct asm_lrchannel_gain_params *)payload; lrgain->left_gain = left_gain; lrgain->right_gain = right_gain; rc = apr_send_pkt(ac->apr, (uint32_t *) vol_cmd); if (rc < 0) { pr_err("%s: Volume Command failed\n", __func__); rc = -EINVAL; goto fail_cmd; } rc = wait_event_timeout(ac->cmd_wait, (atomic_read(&ac->cmd_state) == 0), 5*HZ); if (!rc) { pr_err("%s: timeout in sending volume command to apr\n", __func__); rc = -EINVAL; goto fail_cmd; } rc = 0; fail_cmd: kfree(vol_cmd); return rc; } static int q6asm_memory_map_regions(struct audio_client *ac, int dir, uint32_t bufsz, uint32_t bufcnt) { struct asm_stream_cmd_memory_map_regions *mmap_regions = NULL; struct asm_memory_map_regions *mregions = NULL; struct audio_port_data *port = NULL; struct audio_buffer *ab = NULL; void *mmap_region_cmd = NULL; void *payload = NULL; int rc = 0; int i = 0; int cmd_size = 0; if (!ac || ac->apr == NULL || this_mmap.apr == NULL) { pr_err("APR handle NULL\n"); return -EINVAL; } pr_debug("%s: Session[%d]\n", __func__, ac->session); cmd_size = sizeof(struct asm_stream_cmd_memory_map_regions) + sizeof(struct asm_memory_map_regions) * bufcnt; mmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL); if (mmap_region_cmd == NULL) { pr_err("%s: Mem alloc failed\n", __func__); rc = -EINVAL; return rc; } mmap_regions = (struct asm_stream_cmd_memory_map_regions *) mmap_region_cmd; q6asm_add_mmaphdr(&mmap_regions->hdr, cmd_size, TRUE); mmap_regions->hdr.opcode = ASM_SESSION_CMD_MEMORY_MAP_REGIONS; mmap_regions->mempool_id = 0; mmap_regions->nregions = bufcnt & 0x00ff; pr_debug("map_regions->nregions = %d\n", mmap_regions->nregions); payload = ((u8 *) mmap_region_cmd + sizeof(struct asm_stream_cmd_memory_map_regions)); mregions = (struct asm_memory_map_regions *)payload; port = &ac->port[dir]; for (i = 0; i < bufcnt; i++) { ab = &port->buf[i]; mregions->phys = ab->phys; mregions->buf_size = ab->size; ++mregions; } rc = apr_send_pkt(this_mmap.apr, (uint32_t *) mmap_region_cmd); if (rc < 0) { pr_err("mmap_regions op[0x%x]rc[%d]\n", mmap_regions->hdr.opcode, rc); rc = -EINVAL; goto fail_cmd; } rc = wait_event_timeout(this_mmap.cmd_wait, (atomic_read(&this_mmap.cmd_state) == 0), 5*HZ); if (!rc) { pr_err("timeout. waited for memory_map\n"); rc = -EINVAL; goto fail_cmd; } rc = 0; fail_cmd: kfree(mmap_region_cmd); return rc; } static int q6asm_memory_unmap_regions(struct audio_client *ac, int dir, uint32_t bufsz, uint32_t bufcnt) { struct asm_stream_cmd_memory_unmap_regions *unmap_regions = NULL; struct asm_memory_unmap_regions *mregions = NULL; struct audio_port_data *port = NULL; struct audio_buffer *ab = NULL; void *unmap_region_cmd = NULL; void *payload = NULL; int rc = 0; int i = 0; int cmd_size = 0; if (!ac || ac->apr == NULL || this_mmap.apr == NULL) { pr_err("APR handle NULL\n"); return -EINVAL; } pr_debug("%s: Session[%d]\n", __func__, ac->session); cmd_size = sizeof(struct asm_stream_cmd_memory_unmap_regions) + sizeof(struct asm_memory_unmap_regions) * bufcnt; unmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL); if (unmap_region_cmd == NULL) { pr_err("%s: Mem alloc failed\n", __func__); rc = -EINVAL; return rc; } unmap_regions = (struct asm_stream_cmd_memory_unmap_regions *) unmap_region_cmd; q6asm_add_mmaphdr(&unmap_regions->hdr, cmd_size, TRUE); unmap_regions->hdr.opcode = ASM_SESSION_CMD_MEMORY_UNMAP_REGIONS; unmap_regions->nregions = bufcnt & 0x00ff; pr_debug("unmap_regions->nregions = %d\n", unmap_regions->nregions); payload = ((u8 *) unmap_region_cmd + sizeof(struct asm_stream_cmd_memory_unmap_regions)); mregions = (struct asm_memory_unmap_regions *)payload; port = &ac->port[dir]; for (i = 0; i < bufcnt; i++) { ab = &port->buf[i]; mregions->phys = ab->phys; ++mregions; } rc = apr_send_pkt(this_mmap.apr, (uint32_t *) unmap_region_cmd); if (rc < 0) { pr_err("mmap_regions op[0x%x]rc[%d]\n", unmap_regions->hdr.opcode, rc); goto fail_cmd; } rc = wait_event_timeout(this_mmap.cmd_wait, (atomic_read(&this_mmap.cmd_state) == 0), 5*HZ); if (!rc) { pr_err("timeout. waited for memory_unmap\n"); goto fail_cmd; } rc = 0; fail_cmd: kfree(unmap_region_cmd); return rc; } int q6asm_set_mute(struct audio_client *ac, int muteflag) { void *vol_cmd = NULL; void *payload = NULL; struct asm_pp_params_command *cmd = NULL; struct asm_mute_params *mute = NULL; int sz = 0; int rc = 0; sz = sizeof(struct asm_pp_params_command) + + sizeof(struct asm_mute_params); vol_cmd = kzalloc(sz, GFP_KERNEL); if (vol_cmd == NULL) { pr_err("%s[%d]: Mem alloc failed\n", __func__, ac->session); rc = -EINVAL; return rc; } cmd = (struct asm_pp_params_command *)vol_cmd; q6asm_add_hdr_async(ac, &cmd->hdr, sz, TRUE); cmd->hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS; cmd->payload = NULL; cmd->payload_size = sizeof(struct asm_pp_param_data_hdr) + sizeof(struct asm_mute_params); cmd->params.module_id = VOLUME_CONTROL_MODULE_ID; cmd->params.param_id = MUTE_CONFIG_PARAM_ID; cmd->params.param_size = sizeof(struct asm_mute_params); cmd->params.reserved = 0; payload = (u8 *)(vol_cmd + sizeof(struct asm_pp_params_command)); mute = (struct asm_mute_params *)payload; mute->muteflag = muteflag; rc = apr_send_pkt(ac->apr, (uint32_t *) vol_cmd); if (rc < 0) { pr_err("%s: Mute Command failed\n", __func__); rc = -EINVAL; goto fail_cmd; } rc = wait_event_timeout(ac->cmd_wait, (atomic_read(&ac->cmd_state) == 0), 5*HZ); if (!rc) { pr_err("%s: timeout in sending mute command to apr\n", __func__); rc = -EINVAL; goto fail_cmd; } rc = 0; fail_cmd: kfree(vol_cmd); return rc; } int q6asm_set_volume(struct audio_client *ac, int volume) { void *vol_cmd = NULL; void *payload = NULL; struct asm_pp_params_command *cmd = NULL; struct asm_master_gain_params *mgain = NULL; int sz = 0; int rc = 0; sz = sizeof(struct asm_pp_params_command) + + sizeof(struct asm_master_gain_params); vol_cmd = kzalloc(sz, GFP_KERNEL); if (vol_cmd == NULL) { pr_err("%s[%d]: Mem alloc failed\n", __func__, ac->session); rc = -EINVAL; return rc; } cmd = (struct asm_pp_params_command *)vol_cmd; q6asm_add_hdr_async(ac, &cmd->hdr, sz, TRUE); cmd->hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS; cmd->payload = NULL; cmd->payload_size = sizeof(struct asm_pp_param_data_hdr) + sizeof(struct asm_master_gain_params); cmd->params.module_id = VOLUME_CONTROL_MODULE_ID; cmd->params.param_id = MASTER_GAIN_PARAM_ID; cmd->params.param_size = sizeof(struct asm_master_gain_params); cmd->params.reserved = 0; payload = (u8 *)(vol_cmd + sizeof(struct asm_pp_params_command)); mgain = (struct asm_master_gain_params *)payload; mgain->master_gain = volume; mgain->padding = 0x00; rc = apr_send_pkt(ac->apr, (uint32_t *) vol_cmd); if (rc < 0) { pr_err("%s: Volume Command failed\n", __func__); rc = -EINVAL; goto fail_cmd; } rc = wait_event_timeout(ac->cmd_wait, (atomic_read(&ac->cmd_state) == 0), 5*HZ); if (!rc) { pr_err("%s: timeout in sending volume command to apr\n", __func__); rc = -EINVAL; goto fail_cmd; } rc = 0; fail_cmd: kfree(vol_cmd); return rc; } int q6asm_set_softpause(struct audio_client *ac, struct asm_softpause_params *pause_param) { void *vol_cmd = NULL; void *payload = NULL; struct asm_pp_params_command *cmd = NULL; struct asm_softpause_params *params = NULL; int sz = 0; int rc = 0; sz = sizeof(struct asm_pp_params_command) + + sizeof(struct asm_softpause_params); vol_cmd = kzalloc(sz, GFP_KERNEL); if (vol_cmd == NULL) { pr_err("%s[%d]: Mem alloc failed\n", __func__, ac->session); rc = -EINVAL; return rc; } cmd = (struct asm_pp_params_command *)vol_cmd; q6asm_add_hdr_async(ac, &cmd->hdr, sz, TRUE); cmd->hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS; cmd->payload = NULL; cmd->payload_size = sizeof(struct asm_pp_param_data_hdr) + sizeof(struct asm_softpause_params); cmd->params.module_id = VOLUME_CONTROL_MODULE_ID; cmd->params.param_id = SOFT_PAUSE_PARAM_ID; cmd->params.param_size = sizeof(struct asm_softpause_params); cmd->params.reserved = 0; payload = (u8 *)(vol_cmd + sizeof(struct asm_pp_params_command)); params = (struct asm_softpause_params *)payload; params->enable = pause_param->enable; params->period = pause_param->period; params->step = pause_param->step; params->rampingcurve = pause_param->rampingcurve; pr_debug("%s: soft Pause Command: enable = %d, period = %d, step = %d, curve = %d\n", __func__, params->enable, params->period, params->step, params->rampingcurve); rc = apr_send_pkt(ac->apr, (uint32_t *) vol_cmd); if (rc < 0) { pr_err("%s: Volume Command(soft_pause) failed\n", __func__); rc = -EINVAL; goto fail_cmd; } rc = wait_event_timeout(ac->cmd_wait, (atomic_read(&ac->cmd_state) == 0), 5*HZ); if (!rc) { pr_err("%s: timeout in sending volume command(soft_pause) to apr\n", __func__); rc = -EINVAL; goto fail_cmd; } rc = 0; fail_cmd: kfree(vol_cmd); return rc; } int q6asm_set_softvolume(struct audio_client *ac, struct asm_softvolume_params *softvol_param) { void *vol_cmd = NULL; void *payload = NULL; struct asm_pp_params_command *cmd = NULL; struct asm_softvolume_params *params = NULL; int sz = 0; int rc = 0; sz = sizeof(struct asm_pp_params_command) + + sizeof(struct asm_softvolume_params); vol_cmd = kzalloc(sz, GFP_KERNEL); if (vol_cmd == NULL) { pr_err("%s[%d]: Mem alloc failed\n", __func__, ac->session); rc = -EINVAL; return rc; } cmd = (struct asm_pp_params_command *)vol_cmd; q6asm_add_hdr_async(ac, &cmd->hdr, sz, TRUE); cmd->hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS; cmd->payload = NULL; cmd->payload_size = sizeof(struct asm_pp_param_data_hdr) + sizeof(struct asm_softvolume_params); cmd->params.module_id = VOLUME_CONTROL_MODULE_ID; cmd->params.param_id = SOFT_VOLUME_PARAM_ID; cmd->params.param_size = sizeof(struct asm_softvolume_params); cmd->params.reserved = 0; payload = (u8 *)(vol_cmd + sizeof(struct asm_pp_params_command)); params = (struct asm_softvolume_params *)payload; params->period = softvol_param->period; params->step = softvol_param->step; params->rampingcurve = softvol_param->rampingcurve; pr_debug("%s: soft Volume:opcode = %d,payload_sz =%d,module_id =%d, param_id = %d, param_sz = %d\n", __func__, cmd->hdr.opcode, cmd->payload_size, cmd->params.module_id, cmd->params.param_id, cmd->params.param_size); pr_debug("%s: soft Volume Command: period = %d, step = %d, curve = %d\n", __func__, params->period, params->step, params->rampingcurve); rc = apr_send_pkt(ac->apr, (uint32_t *) vol_cmd); if (rc < 0) { pr_err("%s: Volume Command(soft_volume) failed\n", __func__); rc = -EINVAL; goto fail_cmd; } rc = wait_event_timeout(ac->cmd_wait, (atomic_read(&ac->cmd_state) == 0), 5*HZ); if (!rc) { pr_err("%s: timeout in sending volume command(soft_volume) to apr\n", __func__); rc = -EINVAL; goto fail_cmd; } rc = 0; fail_cmd: kfree(vol_cmd); return rc; } int q6asm_equalizer(struct audio_client *ac, void *eq) { void *eq_cmd = NULL; void *payload = NULL; struct asm_pp_params_command *cmd = NULL; struct asm_equalizer_params *equalizer = NULL; struct msm_audio_eq_stream_config *eq_params = NULL; int i = 0; int sz = 0; int rc = 0; sz = sizeof(struct asm_pp_params_command) + + sizeof(struct asm_equalizer_params); eq_cmd = kzalloc(sz, GFP_KERNEL); if (eq_cmd == NULL) { pr_err("%s[%d]: Mem alloc failed\n", __func__, ac->session); rc = -EINVAL; goto fail_cmd; } eq_params = (struct msm_audio_eq_stream_config *) eq; cmd = (struct asm_pp_params_command *)eq_cmd; q6asm_add_hdr(ac, &cmd->hdr, sz, TRUE); cmd->hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS; cmd->payload = NULL; cmd->payload_size = sizeof(struct asm_pp_param_data_hdr) + sizeof(struct asm_equalizer_params); cmd->params.module_id = EQUALIZER_MODULE_ID; cmd->params.param_id = EQUALIZER_PARAM_ID; cmd->params.param_size = sizeof(struct asm_equalizer_params); cmd->params.reserved = 0; payload = (u8 *)(eq_cmd + sizeof(struct asm_pp_params_command)); equalizer = (struct asm_equalizer_params *)payload; equalizer->enable = eq_params->enable; equalizer->num_bands = eq_params->num_bands; pr_debug("%s: enable:%d numbands:%d\n", __func__, eq_params->enable, eq_params->num_bands); for (i = 0; i < eq_params->num_bands; i++) { equalizer->eq_bands[i].band_idx = eq_params->eq_bands[i].band_idx; equalizer->eq_bands[i].filter_type = eq_params->eq_bands[i].filter_type; equalizer->eq_bands[i].center_freq_hz = eq_params->eq_bands[i].center_freq_hz; equalizer->eq_bands[i].filter_gain = eq_params->eq_bands[i].filter_gain; equalizer->eq_bands[i].q_factor = eq_params->eq_bands[i].q_factor; pr_debug("%s: filter_type:%u bandnum:%d\n", __func__, eq_params->eq_bands[i].filter_type, i); pr_debug("%s: center_freq_hz:%u bandnum:%d\n", __func__, eq_params->eq_bands[i].center_freq_hz, i); pr_debug("%s: filter_gain:%d bandnum:%d\n", __func__, eq_params->eq_bands[i].filter_gain, i); pr_debug("%s: q_factor:%d bandnum:%d\n", __func__, eq_params->eq_bands[i].q_factor, i); } rc = apr_send_pkt(ac->apr, (uint32_t *) eq_cmd); if (rc < 0) { pr_err("%s: Equalizer Command failed\n", __func__); rc = -EINVAL; goto fail_cmd; } rc = wait_event_timeout(ac->cmd_wait, (atomic_read(&ac->cmd_state) == 0), 5*HZ); if (!rc) { pr_err("%s: timeout in sending equalizer command to apr\n", __func__); rc = -EINVAL; goto fail_cmd; } rc = 0; fail_cmd: kfree(eq_cmd); return rc; } int q6asm_read(struct audio_client *ac) { struct asm_stream_cmd_read read; struct audio_buffer *ab; int dsp_buf; struct audio_port_data *port; int rc; if (!ac || ac->apr == NULL) { pr_err("APR handle NULL\n"); return -EINVAL; } if (ac->io_mode == SYNC_IO_MODE) { port = &ac->port[OUT]; q6asm_add_hdr(ac, &read.hdr, sizeof(read), FALSE); mutex_lock(&port->lock); dsp_buf = port->dsp_buf; ab = &port->buf[dsp_buf]; pr_debug("%s:session[%d]dsp-buf[%d][%p]cpu_buf[%d][%p]\n", __func__, ac->session, dsp_buf, (void *)port->buf[dsp_buf].data, port->cpu_buf, (void *)port->buf[port->cpu_buf].phys); read.hdr.opcode = ASM_DATA_CMD_READ; read.buf_add = ab->phys; read.buf_size = ab->size; read.uid = port->dsp_buf; read.hdr.token = port->dsp_buf; port->dsp_buf = (port->dsp_buf + 1) & (port->max_buf_cnt - 1); mutex_unlock(&port->lock); pr_debug("%s:buf add[0x%x] token[%d] uid[%d]\n", __func__, read.buf_add, read.hdr.token, read.uid); rc = apr_send_pkt(ac->apr, (uint32_t *) &read); if (rc < 0) { pr_err("read op[0x%x]rc[%d]\n", read.hdr.opcode, rc); goto fail_cmd; } return 0; } fail_cmd: return -EINVAL; } int q6asm_read_nolock(struct audio_client *ac) { struct asm_stream_cmd_read read; struct audio_buffer *ab; int dsp_buf; struct audio_port_data *port; int rc; if (!ac || ac->apr == NULL) { pr_err("APR handle NULL\n"); return -EINVAL; } if (ac->io_mode == SYNC_IO_MODE) { port = &ac->port[OUT]; q6asm_add_hdr_async(ac, &read.hdr, sizeof(read), FALSE); dsp_buf = port->dsp_buf; ab = &port->buf[dsp_buf]; pr_debug("%s:session[%d]dsp-buf[%d][%p]cpu_buf[%d][%p]\n", __func__, ac->session, dsp_buf, (void *)port->buf[dsp_buf].data, port->cpu_buf, (void *)port->buf[port->cpu_buf].phys); read.hdr.opcode = ASM_DATA_CMD_READ; read.buf_add = ab->phys; read.buf_size = ab->size; read.uid = port->dsp_buf; read.hdr.token = port->dsp_buf; port->dsp_buf = (port->dsp_buf + 1) & (port->max_buf_cnt - 1); pr_debug("%s:buf add[0x%x] token[%d] uid[%d]\n", __func__, read.buf_add, read.hdr.token, read.uid); rc = apr_send_pkt(ac->apr, (uint32_t *) &read); if (rc < 0) { pr_err("read op[0x%x]rc[%d]\n", read.hdr.opcode, rc); goto fail_cmd; } return 0; } fail_cmd: return -EINVAL; } static void q6asm_add_hdr_async(struct audio_client *ac, struct apr_hdr *hdr, uint32_t pkt_size, uint32_t cmd_flg) { pr_debug("session=%d pkt size=%d cmd_flg=%d\n", pkt_size, cmd_flg, ac->session); hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, \ APR_HDR_LEN(sizeof(struct apr_hdr)),\ APR_PKT_VER); hdr->src_svc = ((struct apr_svc *)ac->apr)->id; hdr->src_domain = APR_DOMAIN_APPS; hdr->dest_svc = APR_SVC_ASM; hdr->dest_domain = APR_DOMAIN_ADSP; hdr->src_port = ((ac->session << 8) & 0xFF00) | 0x01; hdr->dest_port = ((ac->session << 8) & 0xFF00) | 0x01; if (cmd_flg) { hdr->token = ac->session; atomic_set(&ac->cmd_state, 1); } hdr->pkt_size = pkt_size; return; } int q6asm_async_write(struct audio_client *ac, struct audio_aio_write_param *param) { int rc = 0; struct asm_stream_cmd_write write; if (!ac || ac->apr == NULL) { pr_err("%s: APR handle NULL\n", __func__); return -EINVAL; } q6asm_add_hdr_async(ac, &write.hdr, sizeof(write), FALSE); /* Pass physical address as token for AIO scheme */ write.hdr.token = param->uid; write.hdr.opcode = ASM_DATA_CMD_WRITE; write.buf_add = param->paddr; write.avail_bytes = param->len; write.uid = param->uid; write.msw_ts = param->msw_ts; write.lsw_ts = param->lsw_ts; /* Use 0xFF00 for disabling timestamps */ if (param->flags == 0xFF00) write.uflags = (0x00000000 | (param->flags & 0x800000FF)); else write.uflags = (0x80000000 | param->flags); pr_debug("%s: session[%d] bufadd[0x%x]len[0x%x]", __func__, ac->session, write.buf_add, write.avail_bytes); rc = apr_send_pkt(ac->apr, (uint32_t *) &write); if (rc < 0) { pr_debug("[%s] write op[0x%x]rc[%d]\n", __func__, write.hdr.opcode, rc); goto fail_cmd; } return 0; fail_cmd: return -EINVAL; } int q6asm_async_read(struct audio_client *ac, struct audio_aio_read_param *param) { int rc = 0; struct asm_stream_cmd_read read; if (!ac || ac->apr == NULL) { pr_err("%s: APR handle NULL\n", __func__); return -EINVAL; } q6asm_add_hdr_async(ac, &read.hdr, sizeof(read), FALSE); /* Pass physical address as token for AIO scheme */ read.hdr.token = param->paddr; read.hdr.opcode = ASM_DATA_CMD_READ; read.buf_add = param->paddr; read.buf_size = param->len; read.uid = param->uid; pr_debug("%s: session[%d] bufadd[0x%x]len[0x%x]", __func__, ac->session, read.buf_add, read.buf_size); rc = apr_send_pkt(ac->apr, (uint32_t *) &read); if (rc < 0) { pr_debug("[%s] read op[0x%x]rc[%d]\n", __func__, read.hdr.opcode, rc); goto fail_cmd; } return 0; fail_cmd: return -EINVAL; } int q6asm_async_read_compressed(struct audio_client *ac, struct audio_aio_read_param *param) { int rc = 0; struct asm_stream_cmd_read read; if (!ac || ac->apr == NULL) { pr_err("%s: APR handle NULL\n", __func__); return -EINVAL; } q6asm_add_hdr_async(ac, &read.hdr, sizeof(read), FALSE); /* Pass physical address as token for AIO scheme */ read.hdr.token = param->paddr; read.hdr.opcode = ASM_DATA_CMD_READ_COMPRESSED; read.buf_add = param->paddr; read.buf_size = param->len; read.uid = param->uid; pr_debug("%s: session[%d] bufadd[0x%x]len[0x%x]", __func__, ac->session, read.buf_add, read.buf_size); rc = apr_send_pkt(ac->apr, (uint32_t *) &read); if (rc < 0) { pr_debug("[%s] read op[0x%x]rc[%d]\n", __func__, read.hdr.opcode, rc); goto fail_cmd; } return 0; fail_cmd: return -EINVAL; } int q6asm_write(struct audio_client *ac, uint32_t len, uint32_t msw_ts, uint32_t lsw_ts, uint32_t flags) { int rc = 0; struct asm_stream_cmd_write write; struct audio_port_data *port; struct audio_buffer *ab; int dsp_buf = 0; if (!ac || ac->apr == NULL) { pr_err("APR handle NULL\n"); return -EINVAL; } pr_debug("%s: session[%d] len=%d", __func__, ac->session, len); if (ac->io_mode == SYNC_IO_MODE) { port = &ac->port[IN]; q6asm_add_hdr(ac, &write.hdr, sizeof(write), FALSE); mutex_lock(&port->lock); dsp_buf = port->dsp_buf; ab = &port->buf[dsp_buf]; write.hdr.token = port->dsp_buf; write.hdr.opcode = ASM_DATA_CMD_WRITE; write.buf_add = ab->phys; write.avail_bytes = len; write.uid = port->dsp_buf; write.msw_ts = msw_ts; write.lsw_ts = lsw_ts; /* Use 0xFF00 for disabling timestamps */ if (flags == 0xFF00) write.uflags = (0x00000000 | (flags & 0x800000FF)); else write.uflags = (0x80000000 | flags); port->dsp_buf = (port->dsp_buf + 1) & (port->max_buf_cnt - 1); pr_debug("%s:ab->phys[0x%x]bufadd[0x%x]token[0x%x]buf_id[0x%x]" , __func__, ab->phys, write.buf_add, write.hdr.token, write.uid); mutex_unlock(&port->lock); #ifdef CONFIG_DEBUG_FS if (out_enable_flag) { char zero_pattern[2] = {0x00, 0x00}; /* If First two byte is non zero and last two byte is zero then it is warm output pattern */ if ((strncmp(((char *)ab->data), zero_pattern, 2)) && (!strncmp(((char *)ab->data + 2), zero_pattern, 2))) { do_gettimeofday(&out_warm_tv); pr_debug("WARM:apr_send_pkt at %ld sec %ld microsec\n", out_warm_tv.tv_sec,\ out_warm_tv.tv_usec); pr_debug("Warm Pattern Matched"); } /* If First two byte is zero and last two byte is non zero then it is cont ouput pattern */ else if ((!strncmp(((char *)ab->data), zero_pattern, 2)) && (strncmp(((char *)ab->data + 2), zero_pattern, 2))) { do_gettimeofday(&out_cont_tv); pr_debug("CONT:apr_send_pkt at %ld sec %ld microsec\n", out_cont_tv.tv_sec,\ out_cont_tv.tv_usec); pr_debug("Cont Pattern Matched"); } } #endif rc = apr_send_pkt(ac->apr, (uint32_t *) &write); if (rc < 0) { pr_err("write op[0x%x]rc[%d]\n", write.hdr.opcode, rc); goto fail_cmd; } pr_debug("%s: WRITE SUCCESS\n", __func__); return 0; } fail_cmd: return -EINVAL; } int q6asm_write_nolock(struct audio_client *ac, uint32_t len, uint32_t msw_ts, uint32_t lsw_ts, uint32_t flags) { int rc = 0; struct asm_stream_cmd_write write; struct audio_port_data *port; struct audio_buffer *ab; int dsp_buf = 0; if (!ac || ac->apr == NULL) { pr_err("APR handle NULL\n"); return -EINVAL; } pr_debug("%s: session[%d] len=%d", __func__, ac->session, len); if (ac->io_mode == SYNC_IO_MODE) { port = &ac->port[IN]; q6asm_add_hdr_async(ac, &write.hdr, sizeof(write), FALSE); dsp_buf = port->dsp_buf; ab = &port->buf[dsp_buf]; write.hdr.token = port->dsp_buf; write.hdr.opcode = ASM_DATA_CMD_WRITE; write.buf_add = ab->phys; write.avail_bytes = len; write.uid = port->dsp_buf; write.msw_ts = msw_ts; write.lsw_ts = lsw_ts; /* Use 0xFF00 for disabling timestamps */ if (flags == 0xFF00) write.uflags = (0x00000000 | (flags & 0x800000FF)); else write.uflags = (0x80000000 | flags); port->dsp_buf = (port->dsp_buf + 1) & (port->max_buf_cnt - 1); pr_debug("%s:ab->phys[0x%x]bufadd[0x%x]token[0x%x]buf_id[0x%x]" , __func__, ab->phys, write.buf_add, write.hdr.token, write.uid); rc = apr_send_pkt(ac->apr, (uint32_t *) &write); if (rc < 0) { pr_err("write op[0x%x]rc[%d]\n", write.hdr.opcode, rc); goto fail_cmd; } pr_debug("%s: WRITE SUCCESS\n", __func__); return 0; } fail_cmd: return -EINVAL; } uint64_t q6asm_get_session_time(struct audio_client *ac) { struct apr_hdr hdr; int rc; if (!ac || ac->apr == NULL) { pr_err("APR handle NULL\n"); return -EINVAL; } q6asm_add_hdr(ac, &hdr, sizeof(hdr), FALSE); hdr.opcode = ASM_SESSION_CMD_GET_SESSION_TIME; atomic_set(&ac->time_flag, 1); pr_debug("%s: session[%d]opcode[0x%x]\n", __func__, ac->session, hdr.opcode); rc = apr_send_pkt(ac->apr, (uint32_t *) &hdr); if (rc < 0) { pr_err("Commmand 0x%x failed\n", hdr.opcode); goto fail_cmd; } rc = wait_event_timeout(ac->time_wait, (atomic_read(&ac->time_flag) == 0), 5*HZ); if (!rc) { pr_err("%s: timeout in getting session time from DSP\n", __func__); goto fail_cmd; } return ac->time_stamp; fail_cmd: return -EINVAL; } int q6asm_cmd(struct audio_client *ac, int cmd) { struct apr_hdr hdr; int rc; atomic_t *state; int cnt = 0; if (!ac || ac->apr == NULL) { pr_err("APR handle NULL\n"); return -EINVAL; } q6asm_add_hdr(ac, &hdr, sizeof(hdr), TRUE); switch (cmd) { case CMD_PAUSE: pr_debug("%s:CMD_PAUSE\n", __func__); hdr.opcode = ASM_SESSION_CMD_PAUSE; state = &ac->cmd_state; break; case CMD_FLUSH: pr_debug("%s:CMD_FLUSH\n", __func__); hdr.opcode = ASM_STREAM_CMD_FLUSH; state = &ac->cmd_state; break; case CMD_OUT_FLUSH: pr_debug("%s:CMD_OUT_FLUSH\n", __func__); hdr.opcode = ASM_STREAM_CMD_FLUSH_READBUFS; state = &ac->cmd_state; break; case CMD_EOS: pr_debug("%s:CMD_EOS\n", __func__); hdr.opcode = ASM_DATA_CMD_EOS; atomic_set(&ac->cmd_state, 0); state = &ac->cmd_state; break; case CMD_CLOSE: pr_debug("%s:CMD_CLOSE\n", __func__); hdr.opcode = ASM_STREAM_CMD_CLOSE; state = &ac->cmd_state; break; default: pr_err("Invalid format[%d]\n", cmd); goto fail_cmd; } pr_debug("%s:session[%d]opcode[0x%x] ", __func__, ac->session, hdr.opcode); rc = apr_send_pkt(ac->apr, (uint32_t *) &hdr); if (rc < 0) { pr_err("Commmand 0x%x failed\n", hdr.opcode); goto fail_cmd; } rc = wait_event_timeout(ac->cmd_wait, (atomic_read(state) == 0), 5*HZ); if (!rc) { pr_err("timeout. waited for response opcode[0x%x]\n", hdr.opcode); goto fail_cmd; } if (cmd == CMD_FLUSH) q6asm_reset_buf_state(ac); if (cmd == CMD_CLOSE) { /* check if DSP return all buffers */ if (ac->port[IN].buf) { for (cnt = 0; cnt < ac->port[IN].max_buf_cnt; cnt++) { if (ac->port[IN].buf[cnt].used == IN) { pr_debug("Write Buf[%d] not returned\n", cnt); } } } if (ac->port[OUT].buf) { for (cnt = 0; cnt < ac->port[OUT].max_buf_cnt; cnt++) { if (ac->port[OUT].buf[cnt].used == OUT) { pr_debug("Read Buf[%d] not returned\n", cnt); } } } } return 0; fail_cmd: return -EINVAL; } int q6asm_cmd_nowait(struct audio_client *ac, int cmd) { struct apr_hdr hdr; int rc; if (!ac || ac->apr == NULL) { pr_err("%s:APR handle NULL\n", __func__); return -EINVAL; } q6asm_add_hdr_async(ac, &hdr, sizeof(hdr), TRUE); switch (cmd) { case CMD_PAUSE: pr_debug("%s:CMD_PAUSE\n", __func__); hdr.opcode = ASM_SESSION_CMD_PAUSE; break; case CMD_EOS: pr_debug("%s:CMD_EOS\n", __func__); hdr.opcode = ASM_DATA_CMD_EOS; break; default: pr_err("%s:Invalid format[%d]\n", __func__, cmd); goto fail_cmd; } pr_debug("%s:session[%d]opcode[0x%x] ", __func__, ac->session, hdr.opcode); rc = apr_send_pkt(ac->apr, (uint32_t *) &hdr); if (rc < 0) { pr_err("%s:Commmand 0x%x failed\n", __func__, hdr.opcode); goto fail_cmd; } atomic_inc(&ac->nowait_cmd_cnt); return 0; fail_cmd: return -EINVAL; } static void q6asm_reset_buf_state(struct audio_client *ac) { int cnt = 0; int loopcnt = 0; struct audio_port_data *port = NULL; if (ac->io_mode == SYNC_IO_MODE) { mutex_lock(&ac->cmd_lock); for (loopcnt = 0; loopcnt <= OUT; loopcnt++) { port = &ac->port[loopcnt]; cnt = port->max_buf_cnt - 1; port->dsp_buf = 0; port->cpu_buf = 0; while (cnt >= 0) { if (!port->buf) continue; port->buf[cnt].used = 1; cnt--; } } mutex_unlock(&ac->cmd_lock); } } int q6asm_reg_tx_overflow(struct audio_client *ac, uint16_t enable) { struct asm_stream_cmd_reg_tx_overflow_event tx_overflow; int rc; if (!ac || ac->apr == NULL) { pr_err("APR handle NULL\n"); return -EINVAL; } pr_debug("%s:session[%d]enable[%d]\n", __func__, ac->session, enable); q6asm_add_hdr(ac, &tx_overflow.hdr, sizeof(tx_overflow), TRUE); tx_overflow.hdr.opcode = \ ASM_SESSION_CMD_REGISTER_FOR_TX_OVERFLOW_EVENTS; /* tx overflow event: enable */ tx_overflow.enable = enable; rc = apr_send_pkt(ac->apr, (uint32_t *) &tx_overflow); if (rc < 0) { pr_err("tx overflow op[0x%x]rc[%d]\n", \ tx_overflow.hdr.opcode, rc); goto fail_cmd; } rc = wait_event_timeout(ac->cmd_wait, (atomic_read(&ac->cmd_state) == 0), 5*HZ); if (!rc) { pr_err("timeout. waited for tx overflow\n"); goto fail_cmd; } return 0; fail_cmd: return -EINVAL; } int q6asm_get_apr_service_id(int session_id) { pr_debug("%s\n", __func__); if (session_id < 0 || session_id > SESSION_MAX) { pr_err("%s: invalid session_id = %d\n", __func__, session_id); return -EINVAL; } return ((struct apr_svc *)session[session_id]->apr)->id; } static int __init q6asm_init(void) { pr_debug("%s\n", __func__); init_waitqueue_head(&this_mmap.cmd_wait); memset(session, 0, sizeof(session)); #ifdef CONFIG_DEBUG_FS out_buffer = kmalloc(OUT_BUFFER_SIZE, GFP_KERNEL); /* MM-NC-FILE_PERMISSION-00-[+ */ out_dentry = debugfs_create_file("audio_out_latency_measurement_node",\ S_IFREG | S_IRUGO | S_IWUSR,\ NULL, NULL, &audio_output_latency_debug_fops); /* MM-NC-FILE_PERMISSION-00-]- */ if (IS_ERR(out_dentry)) pr_err("debugfs_create_file failed\n"); in_buffer = kmalloc(IN_BUFFER_SIZE, GFP_KERNEL); /* MM-NC-FILE_PERMISSION-00-[+ */ in_dentry = debugfs_create_file("audio_in_latency_measurement_node",\ S_IFREG | S_IRUGO | S_IWUSR,\ NULL, NULL, &audio_input_latency_debug_fops); /* MM-NC-FILE_PERMISSION-00-]- */ if (IS_ERR(in_dentry)) pr_err("debugfs_create_file failed\n"); #endif return 0; } device_initcall(q6asm_init);
gpl-2.0
degasus/dolphin
Source/Core/VideoBackends/Software/SWOGLWindow.cpp
11
3899
// Copyright 2015 Dolphin Emulator Project // Licensed under GPLv2+ // Refer to the license.txt file included. #include <memory> #include "Common/GL/GLInterfaceBase.h" #include "Common/GL/GLUtil.h" #include "Common/Logging/Log.h" #include "VideoBackends/Software/SWOGLWindow.h" #include "VideoBackends/Software/SWTexture.h" std::unique_ptr<SWOGLWindow> SWOGLWindow::s_instance; void SWOGLWindow::Init(void* window_handle) { GLUtil::InitInterface(); GLInterface->SetMode(GLInterfaceMode::MODE_DETECT); if (!GLInterface->Create(window_handle)) { ERROR_LOG(VIDEO, "GLInterface::Create failed."); } s_instance.reset(new SWOGLWindow()); } void SWOGLWindow::Shutdown() { GLInterface->Shutdown(); GLInterface.reset(); s_instance.reset(); } void SWOGLWindow::Prepare() { if (m_init) return; m_init = true; // Init extension support. if (!GLExtensions::Init()) { ERROR_LOG(VIDEO, "GLExtensions::Init failed!Does your video card support OpenGL 2.0?"); return; } else if (GLExtensions::Version() < 310) { ERROR_LOG(VIDEO, "OpenGL Version %d detected, but at least 3.1 is required.", GLExtensions::Version()); return; } std::string frag_shader = "in vec2 TexCoord;\n" "out vec4 ColorOut;\n" "uniform sampler2D samp;\n" "void main() {\n" " ColorOut = texture(samp, TexCoord);\n" "}\n"; std::string vertex_shader = "out vec2 TexCoord;\n" "void main() {\n" " vec2 rawpos = vec2(gl_VertexID & 1, (gl_VertexID & 2) >> 1);\n" " gl_Position = vec4(rawpos * 2.0 - 1.0, 0.0, 1.0);\n" " TexCoord = vec2(rawpos.x, -rawpos.y);\n" "}\n"; std::string header = GLInterface->GetMode() == GLInterfaceMode::MODE_OPENGL ? "#version 140\n" : "#version 300 es\n" "precision highp float;\n"; m_image_program = GLUtil::CompileProgram(header + vertex_shader, header + frag_shader); glUseProgram(m_image_program); glUniform1i(glGetUniformLocation(m_image_program, "samp"), 0); glGenTextures(1, &m_image_texture); glBindTexture(GL_TEXTURE_2D, m_image_texture); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); glGenVertexArrays(1, &m_image_vao); } void SWOGLWindow::PrintText(const std::string& text, int x, int y, u32 color) { m_text.push_back({text, x, y, color}); } void SWOGLWindow::ShowImage(AbstractTexture* image, const EFBRectangle& xfb_region) { SW::SWTexture* sw_image = static_cast<SW::SWTexture*>(image); GLInterface->Update(); // just updates the render window position and the backbuffer size GLsizei glWidth = (GLsizei)GLInterface->GetBackBufferWidth(); GLsizei glHeight = (GLsizei)GLInterface->GetBackBufferHeight(); glViewport(0, 0, glWidth, glHeight); glActiveTexture(GL_TEXTURE9); glBindTexture(GL_TEXTURE_2D, m_image_texture); // TODO: Apply xfb_region glPixelStorei(GL_UNPACK_ALIGNMENT, 4); // 4-byte pixel alignment glPixelStorei(GL_UNPACK_ROW_LENGTH, sw_image->GetConfig().width); glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, static_cast<GLsizei>(sw_image->GetConfig().width), static_cast<GLsizei>(sw_image->GetConfig().height), 0, GL_RGBA, GL_UNSIGNED_BYTE, sw_image->GetData()); glUseProgram(m_image_program); glBindVertexArray(m_image_vao); glDrawArrays(GL_TRIANGLE_STRIP, 0, 4); // TODO: implement OSD // for (TextData& text : m_text) // { // } m_text.clear(); GLInterface->Swap(); } int SWOGLWindow::PeekMessages() { return GLInterface->PeekMessages(); }
gpl-2.0
caiiiycuk/em-dosbox
src/hardware/sblaster.cpp
11
48005
/* * Copyright (C) 2002-2015 The DOSBox Team * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <iomanip> #include <sstream> #include <string.h> #include <math.h> #include "dosbox.h" #include "inout.h" #include "mixer.h" #include "dma.h" #include "pic.h" #include "hardware.h" #include "setup.h" #include "support.h" #include "shell.h" using namespace std; void MIDI_RawOutByte(Bit8u data); bool MIDI_Available(void); #define SB_PIC_EVENTS 0 #define DSP_MAJOR 3 #define DSP_MINOR 1 #define MIXER_INDEX 0x04 #define MIXER_DATA 0x05 #define DSP_RESET 0x06 #define DSP_READ_DATA 0x0A #define DSP_WRITE_DATA 0x0C #define DSP_WRITE_STATUS 0x0C #define DSP_READ_STATUS 0x0E #define DSP_ACK_16BIT 0x0f #define DSP_NO_COMMAND 0 #define DMA_BUFSIZE 1024 #define DSP_BUFSIZE 64 #define DSP_DACSIZE 512 //Should be enough for sound generated in millisecond blocks #define SB_BUF_SIZE 8096 #define SB_SH 14 #define SB_SH_MASK ((1 << SB_SH)-1) enum {DSP_S_RESET,DSP_S_RESET_WAIT,DSP_S_NORMAL,DSP_S_HIGHSPEED}; enum SB_TYPES {SBT_NONE=0,SBT_1=1,SBT_PRO1=2,SBT_2=3,SBT_PRO2=4,SBT_16=6,SBT_GB=7}; enum SB_IRQS {SB_IRQ_8,SB_IRQ_16,SB_IRQ_MPU}; enum DSP_MODES { MODE_NONE, MODE_DAC, MODE_DMA, MODE_DMA_PAUSE, MODE_DMA_MASKED }; enum DMA_MODES { DSP_DMA_NONE, DSP_DMA_2,DSP_DMA_3,DSP_DMA_4,DSP_DMA_8, DSP_DMA_16,DSP_DMA_16_ALIASED }; enum { PLAY_MONO,PLAY_STEREO }; struct SB_INFO { Bitu freq; struct { bool stereo,sign,autoinit; DMA_MODES mode; Bitu rate,mul; Bitu total,left,min; Bit64u start; union { Bit8u b8[DMA_BUFSIZE]; Bit16s b16[DMA_BUFSIZE]; } buf; Bitu bits; DmaChannel * chan; Bitu remain_size; } dma; bool speaker; bool midi; Bit8u time_constant; DSP_MODES mode; SB_TYPES type; struct { bool pending_8bit; bool pending_16bit; } irq; struct { Bit8u state; Bit8u cmd; Bit8u cmd_len; Bit8u cmd_in_pos; Bit8u cmd_in[DSP_BUFSIZE]; struct { Bit8u lastval; Bit8u data[DSP_BUFSIZE]; Bitu pos,used; } in,out; Bit8u test_register; Bitu write_busy; } dsp; struct { Bit16s data[DSP_DACSIZE+1]; Bitu used; Bit16s last; } dac; struct { Bit8u index; Bit8u dac[2],fm[2],cda[2],master[2],lin[2]; Bit8u mic; bool stereo; bool enabled; bool filtered; Bit8u unhandled[0x48]; } mixer; struct { Bit8u reference; Bits stepsize; bool haveref; } adpcm; struct { Bitu base; Bitu irq; Bit8u dma8,dma16; } hw; struct { Bits value; Bitu count; } e2; MixerChannel * chan; }; static SB_INFO sb; static char const * const copyright_string="COPYRIGHT (C) CREATIVE TECHNOLOGY LTD, 1992."; // number of bytes in input for commands (sb/sbpro) static Bit8u DSP_cmd_len_sb[256] = { 0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0, // 0x00 // 1,0,0,0, 2,0,2,2, 0,0,0,0, 0,0,0,0, // 0x10 1,0,0,0, 2,2,2,2, 0,0,0,0, 0,0,0,0, // 0x10 Wari hack 0,0,0,0, 2,0,0,0, 0,0,0,0, 0,0,0,0, // 0x20 0,0,0,0, 0,0,0,0, 1,0,0,0, 0,0,0,0, // 0x30 1,2,2,0, 0,0,0,0, 2,0,0,0, 0,0,0,0, // 0x40 0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0, // 0x50 0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0, // 0x60 0,0,0,0, 2,2,2,2, 0,0,0,0, 0,0,0,0, // 0x70 2,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0, // 0x80 0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0, // 0x90 0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0, // 0xa0 0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0, // 0xb0 0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0, // 0xc0 0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0, // 0xd0 1,0,1,0, 1,0,0,0, 0,0,0,0, 0,0,0,0, // 0xe0 0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0 // 0xf0 }; // number of bytes in input for commands (sb16) static Bit8u DSP_cmd_len_sb16[256] = { 0,0,0,0, 1,2,0,0, 1,0,0,0, 0,0,2,1, // 0x00 // 1,0,0,0, 2,0,2,2, 0,0,0,0, 0,0,0,0, // 0x10 1,0,0,0, 2,2,2,2, 0,0,0,0, 0,0,0,0, // 0x10 Wari hack 0,0,0,0, 2,0,0,0, 0,0,0,0, 0,0,0,0, // 0x20 0,0,0,0, 0,0,0,0, 1,0,0,0, 0,0,0,0, // 0x30 1,2,2,0, 0,0,0,0, 2,0,0,0, 0,0,0,0, // 0x40 0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0, // 0x50 0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0, // 0x60 0,0,0,0, 2,2,2,2, 0,0,0,0, 0,0,0,0, // 0x70 2,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0, // 0x80 0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0, // 0x90 0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0, // 0xa0 3,3,3,3, 3,3,3,3, 3,3,3,3, 3,3,3,3, // 0xb0 3,3,3,3, 3,3,3,3, 3,3,3,3, 3,3,3,3, // 0xc0 0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0, // 0xd0 1,0,1,0, 1,0,0,0, 0,0,0,0, 0,0,0,0, // 0xe0 0,0,0,0, 0,0,0,0, 0,1,0,0, 0,0,0,0 // 0xf0 }; static Bit8u ASP_regs[256]; static bool ASP_init_in_progress = false; static int E2_incr_table[4][9] = { { 0x01, -0x02, -0x04, 0x08, -0x10, 0x20, 0x40, -0x80, -106 }, { -0x01, 0x02, -0x04, 0x08, 0x10, -0x20, 0x40, -0x80, 165 }, { -0x01, 0x02, 0x04, -0x08, 0x10, -0x20, -0x40, 0x80, -151 }, { 0x01, -0x02, 0x04, -0x08, -0x10, 0x20, -0x40, 0x80, 90 } }; #ifndef max #define max(a,b) ((a)>(b)?(a):(b)) #endif #ifndef min #define min(a,b) ((a)<(b)?(a):(b)) #endif static void DSP_ChangeMode(DSP_MODES mode); static void CheckDMAEnd(); static void END_DMA_Event(Bitu); static void DMA_Silent_Event(Bitu val); static void GenerateDMASound(Bitu size); static void DSP_SetSpeaker(bool how) { if (sb.speaker==how) return; sb.speaker=how; if (sb.type==SBT_16) return; sb.chan->Enable(how); if (sb.speaker) { PIC_RemoveEvents(DMA_Silent_Event); CheckDMAEnd(); } else { } } static INLINE void SB_RaiseIRQ(SB_IRQS type) { LOG(LOG_SB,LOG_NORMAL)("Raising IRQ"); switch (type) { case SB_IRQ_8: if (sb.irq.pending_8bit) { // LOG_MSG("SB: 8bit irq pending"); return; } sb.irq.pending_8bit=true; PIC_ActivateIRQ(sb.hw.irq); break; case SB_IRQ_16: if (sb.irq.pending_16bit) { // LOG_MSG("SB: 16bit irq pending"); return; } sb.irq.pending_16bit=true; PIC_ActivateIRQ(sb.hw.irq); break; default: break; } } static INLINE void DSP_FlushData(void) { sb.dsp.out.used=0; sb.dsp.out.pos=0; } static void DSP_DMA_CallBack(DmaChannel * chan, DMAEvent event) { if (chan!=sb.dma.chan || event==DMA_REACHED_TC) return; else if (event==DMA_MASKED) { if (sb.mode==MODE_DMA) { GenerateDMASound(sb.dma.min); sb.mode=MODE_DMA_MASKED; // DSP_ChangeMode(MODE_DMA_MASKED); LOG(LOG_SB,LOG_NORMAL)("DMA masked,stopping output, left %d",chan->currcnt); } } else if (event==DMA_UNMASKED) { if (sb.mode==MODE_DMA_MASKED && sb.dma.mode!=DSP_DMA_NONE) { DSP_ChangeMode(MODE_DMA); // sb.mode=MODE_DMA; CheckDMAEnd(); LOG(LOG_SB,LOG_NORMAL)("DMA unmasked,starting output, auto %d block %d",chan->autoinit,chan->basecnt); } } } #define MIN_ADAPTIVE_STEP_SIZE 0 #define MAX_ADAPTIVE_STEP_SIZE 32767 #define DC_OFFSET_FADE 254 static INLINE Bit8u decode_ADPCM_4_sample(Bit8u sample,Bit8u & reference,Bits& scale) { static const Bit8s scaleMap[64] = { 0, 1, 2, 3, 4, 5, 6, 7, 0, -1, -2, -3, -4, -5, -6, -7, 1, 3, 5, 7, 9, 11, 13, 15, -1, -3, -5, -7, -9, -11, -13, -15, 2, 6, 10, 14, 18, 22, 26, 30, -2, -6, -10, -14, -18, -22, -26, -30, 4, 12, 20, 28, 36, 44, 52, 60, -4, -12, -20, -28, -36, -44, -52, -60 }; static const Bit8u adjustMap[64] = { 0, 0, 0, 0, 0, 16, 16, 16, 0, 0, 0, 0, 0, 16, 16, 16, 240, 0, 0, 0, 0, 16, 16, 16, 240, 0, 0, 0, 0, 16, 16, 16, 240, 0, 0, 0, 0, 16, 16, 16, 240, 0, 0, 0, 0, 16, 16, 16, 240, 0, 0, 0, 0, 0, 0, 0, 240, 0, 0, 0, 0, 0, 0, 0 }; Bits samp = sample + scale; if ((samp < 0) || (samp > 63)) { LOG(LOG_SB,LOG_ERROR)("Bad ADPCM-4 sample"); if(samp < 0 ) samp = 0; if(samp > 63) samp = 63; } Bits ref = reference + scaleMap[samp]; if (ref > 0xff) reference = 0xff; else if (ref < 0x00) reference = 0x00; else reference = (Bit8u)(ref&0xff); scale = (scale + adjustMap[samp]) & 0xff; return reference; } static INLINE Bit8u decode_ADPCM_2_sample(Bit8u sample,Bit8u & reference,Bits& scale) { static const Bit8s scaleMap[24] = { 0, 1, 0, -1, 1, 3, -1, -3, 2, 6, -2, -6, 4, 12, -4, -12, 8, 24, -8, -24, 6, 48, -16, -48 }; static const Bit8u adjustMap[24] = { 0, 4, 0, 4, 252, 4, 252, 4, 252, 4, 252, 4, 252, 4, 252, 4, 252, 4, 252, 4, 252, 0, 252, 0 }; Bits samp = sample + scale; if ((samp < 0) || (samp > 23)) { LOG(LOG_SB,LOG_ERROR)("Bad ADPCM-2 sample"); if(samp < 0 ) samp = 0; if(samp > 23) samp = 23; } Bits ref = reference + scaleMap[samp]; if (ref > 0xff) reference = 0xff; else if (ref < 0x00) reference = 0x00; else reference = (Bit8u)(ref&0xff); scale = (scale + adjustMap[samp]) & 0xff; return reference; } INLINE Bit8u decode_ADPCM_3_sample(Bit8u sample,Bit8u & reference,Bits& scale) { static const Bit8s scaleMap[40] = { 0, 1, 2, 3, 0, -1, -2, -3, 1, 3, 5, 7, -1, -3, -5, -7, 2, 6, 10, 14, -2, -6, -10, -14, 4, 12, 20, 28, -4, -12, -20, -28, 5, 15, 25, 35, -5, -15, -25, -35 }; static const Bit8u adjustMap[40] = { 0, 0, 0, 8, 0, 0, 0, 8, 248, 0, 0, 8, 248, 0, 0, 8, 248, 0, 0, 8, 248, 0, 0, 8, 248, 0, 0, 8, 248, 0, 0, 8, 248, 0, 0, 0, 248, 0, 0, 0 }; Bits samp = sample + scale; if ((samp < 0) || (samp > 39)) { LOG(LOG_SB,LOG_ERROR)("Bad ADPCM-3 sample"); if(samp < 0 ) samp = 0; if(samp > 39) samp = 39; } Bits ref = reference + scaleMap[samp]; if (ref > 0xff) reference = 0xff; else if (ref < 0x00) reference = 0x00; else reference = (Bit8u)(ref&0xff); scale = (scale + adjustMap[samp]) & 0xff; return reference; } static void GenerateDMASound(Bitu size) { Bitu read=0;Bitu done=0;Bitu i=0; if(sb.dma.autoinit) { if (sb.dma.left <= size) size = sb.dma.left; } else if (sb.dma.left <= sb.dma.min) size = sb.dma.left; switch (sb.dma.mode) { case DSP_DMA_2: read=sb.dma.chan->Read(size,sb.dma.buf.b8); if (read && sb.adpcm.haveref) { sb.adpcm.haveref=false; sb.adpcm.reference=sb.dma.buf.b8[0]; sb.adpcm.stepsize=MIN_ADAPTIVE_STEP_SIZE; i++; } for (;i<read;i++) { MixTemp[done++]=decode_ADPCM_2_sample((sb.dma.buf.b8[i] >> 6) & 0x3,sb.adpcm.reference,sb.adpcm.stepsize); MixTemp[done++]=decode_ADPCM_2_sample((sb.dma.buf.b8[i] >> 4) & 0x3,sb.adpcm.reference,sb.adpcm.stepsize); MixTemp[done++]=decode_ADPCM_2_sample((sb.dma.buf.b8[i] >> 2) & 0x3,sb.adpcm.reference,sb.adpcm.stepsize); MixTemp[done++]=decode_ADPCM_2_sample((sb.dma.buf.b8[i] >> 0) & 0x3,sb.adpcm.reference,sb.adpcm.stepsize); } sb.chan->AddSamples_m8(done,MixTemp); break; case DSP_DMA_3: read=sb.dma.chan->Read(size,sb.dma.buf.b8); if (read && sb.adpcm.haveref) { sb.adpcm.haveref=false; sb.adpcm.reference=sb.dma.buf.b8[0]; sb.adpcm.stepsize=MIN_ADAPTIVE_STEP_SIZE; i++; } for (;i<read;i++) { MixTemp[done++]=decode_ADPCM_3_sample((sb.dma.buf.b8[i] >> 5) & 0x7,sb.adpcm.reference,sb.adpcm.stepsize); MixTemp[done++]=decode_ADPCM_3_sample((sb.dma.buf.b8[i] >> 2) & 0x7,sb.adpcm.reference,sb.adpcm.stepsize); MixTemp[done++]=decode_ADPCM_3_sample((sb.dma.buf.b8[i] & 0x3) << 1,sb.adpcm.reference,sb.adpcm.stepsize); } sb.chan->AddSamples_m8(done,MixTemp); break; case DSP_DMA_4: read=sb.dma.chan->Read(size,sb.dma.buf.b8); if (read && sb.adpcm.haveref) { sb.adpcm.haveref=false; sb.adpcm.reference=sb.dma.buf.b8[0]; sb.adpcm.stepsize=MIN_ADAPTIVE_STEP_SIZE; i++; } for (;i<read;i++) { MixTemp[done++]=decode_ADPCM_4_sample(sb.dma.buf.b8[i] >> 4,sb.adpcm.reference,sb.adpcm.stepsize); MixTemp[done++]=decode_ADPCM_4_sample(sb.dma.buf.b8[i]& 0xf,sb.adpcm.reference,sb.adpcm.stepsize); } sb.chan->AddSamples_m8(done,MixTemp); break; case DSP_DMA_8: if (sb.dma.stereo) { read=sb.dma.chan->Read(size,&sb.dma.buf.b8[sb.dma.remain_size]); Bitu total=read+sb.dma.remain_size; if (!sb.dma.sign) sb.chan->AddSamples_s8(total>>1,sb.dma.buf.b8); else sb.chan->AddSamples_s8s(total>>1,(Bit8s*)sb.dma.buf.b8); if (total&1) { sb.dma.remain_size=1; sb.dma.buf.b8[0]=sb.dma.buf.b8[total-1]; } else sb.dma.remain_size=0; } else { read=sb.dma.chan->Read(size,sb.dma.buf.b8); if (!sb.dma.sign) sb.chan->AddSamples_m8(read,sb.dma.buf.b8); else sb.chan->AddSamples_m8s(read,(Bit8s *)sb.dma.buf.b8); } break; case DSP_DMA_16: case DSP_DMA_16_ALIASED: if (sb.dma.stereo) { /* In DSP_DMA_16_ALIASED mode temporarily divide by 2 to get number of 16-bit samples, because 8-bit DMA Read returns byte size, while in DSP_DMA_16 mode 16-bit DMA Read returns word size */ read=sb.dma.chan->Read(size,(Bit8u *)&sb.dma.buf.b16[sb.dma.remain_size]) >> (sb.dma.mode==DSP_DMA_16_ALIASED ? 1:0); Bitu total=read+sb.dma.remain_size; #if defined(WORDS_BIGENDIAN) if (sb.dma.sign) sb.chan->AddSamples_s16_nonnative(total>>1,sb.dma.buf.b16); else sb.chan->AddSamples_s16u_nonnative(total>>1,(Bit16u *)sb.dma.buf.b16); #else if (sb.dma.sign) sb.chan->AddSamples_s16(total>>1,sb.dma.buf.b16); else sb.chan->AddSamples_s16u(total>>1,(Bit16u *)sb.dma.buf.b16); #endif if (total&1) { sb.dma.remain_size=1; sb.dma.buf.b16[0]=sb.dma.buf.b16[total-1]; } else sb.dma.remain_size=0; } else { read=sb.dma.chan->Read(size,(Bit8u *)sb.dma.buf.b16) >> (sb.dma.mode==DSP_DMA_16_ALIASED ? 1:0); #if defined(WORDS_BIGENDIAN) if (sb.dma.sign) sb.chan->AddSamples_m16_nonnative(read,sb.dma.buf.b16); else sb.chan->AddSamples_m16u_nonnative(read,(Bit16u *)sb.dma.buf.b16); #else if (sb.dma.sign) sb.chan->AddSamples_m16(read,sb.dma.buf.b16); else sb.chan->AddSamples_m16u(read,(Bit16u *)sb.dma.buf.b16); #endif } //restore buffer length value to byte size in aliased mode if (sb.dma.mode==DSP_DMA_16_ALIASED) read=read<<1; break; default: LOG_MSG("Unhandled dma mode %d",sb.dma.mode); sb.mode=MODE_NONE; return; } sb.dma.left-=read; if (!sb.dma.left) { PIC_RemoveEvents(END_DMA_Event); if (sb.dma.mode >= DSP_DMA_16) SB_RaiseIRQ(SB_IRQ_16); else SB_RaiseIRQ(SB_IRQ_8); if (!sb.dma.autoinit) { LOG(LOG_SB,LOG_NORMAL)("Single cycle transfer ended"); sb.mode=MODE_NONE; sb.dma.mode=DSP_DMA_NONE; } else { sb.dma.left=sb.dma.total; if (!sb.dma.left) { LOG(LOG_SB,LOG_NORMAL)("Auto-init transfer with 0 size"); sb.mode=MODE_NONE; } } } } /* old version... static void GenerateDACSound(Bitu len) { if (!sb.dac.used) { sb.mode=MODE_NONE; return; } Bitu dac_add=(sb.dac.used<<16)/len; Bitu dac_pos=0; Bit16s * out=(Bit16s *)MixTemp; for (Bitu i=len;i;i--) { *out++=sb.dac.data[0+(dac_pos>>16)]; dac_pos+=dac_add; } sb.dac.used=0; sb.chan->AddSamples_m16(len,(Bit16s *)MixTemp); } */ static void DMA_Silent_Event(Bitu val) { if (sb.dma.left<val) val=sb.dma.left; Bitu read=sb.dma.chan->Read(val,sb.dma.buf.b8); sb.dma.left-=read; if (!sb.dma.left) { if (sb.dma.mode >= DSP_DMA_16) SB_RaiseIRQ(SB_IRQ_16); else SB_RaiseIRQ(SB_IRQ_8); if (sb.dma.autoinit) sb.dma.left=sb.dma.total; else { sb.mode=MODE_NONE; sb.dma.mode=DSP_DMA_NONE; } } if (sb.dma.left) { Bitu bigger=(sb.dma.left > sb.dma.min) ? sb.dma.min : sb.dma.left; float delay=(bigger*1000.0f)/sb.dma.rate; PIC_AddEvent(DMA_Silent_Event,delay,bigger); } } static void END_DMA_Event(Bitu val) { GenerateDMASound(val); } static void CheckDMAEnd(void) { if (!sb.dma.left) return; if (!sb.speaker && sb.type!=SBT_16) { Bitu bigger=(sb.dma.left > sb.dma.min) ? sb.dma.min : sb.dma.left; float delay=(bigger*1000.0f)/sb.dma.rate; PIC_AddEvent(DMA_Silent_Event,delay,bigger); LOG(LOG_SB,LOG_NORMAL)("Silent DMA Transfer scheduling IRQ in %.3f milliseconds",delay); } else if (sb.dma.left<sb.dma.min) { float delay=(sb.dma.left*1000.0f)/sb.dma.rate; LOG(LOG_SB,LOG_NORMAL)("Short transfer scheduling IRQ in %.3f milliseconds",delay); PIC_AddEvent(END_DMA_Event,delay,sb.dma.left); } } static void DSP_ChangeMode(DSP_MODES mode) { if (sb.mode==mode) return; else sb.chan->FillUp(); sb.mode=mode; } static void DSP_RaiseIRQEvent(Bitu /*val*/) { SB_RaiseIRQ(SB_IRQ_8); } static void DSP_DoDMATransfer(DMA_MODES mode,Bitu freq,bool stereo) { char const * type; sb.mode=MODE_DMA_MASKED; sb.chan->FillUp(); sb.dma.left=sb.dma.total; sb.dma.mode=mode; sb.dma.stereo=stereo; sb.irq.pending_8bit=false; sb.irq.pending_16bit=false; switch (mode) { case DSP_DMA_2: type="2-bits ADPCM"; sb.dma.mul=(1 << SB_SH)/4; break; case DSP_DMA_3: type="3-bits ADPCM"; sb.dma.mul=(1 << SB_SH)/3; break; case DSP_DMA_4: type="4-bits ADPCM"; sb.dma.mul=(1 << SB_SH)/2; break; case DSP_DMA_8: type="8-bits PCM"; sb.dma.mul=(1 << SB_SH); break; case DSP_DMA_16_ALIASED: type="16-bits(aliased) PCM"; sb.dma.mul=(1 << SB_SH)*2; break; case DSP_DMA_16: type="16-bits PCM"; sb.dma.mul=(1 << SB_SH); break; default: LOG(LOG_SB,LOG_ERROR)("DSP:Illegal transfer mode %d",mode); return; } if (sb.dma.stereo) sb.dma.mul*=2; sb.dma.rate=(sb.freq*sb.dma.mul) >> SB_SH; sb.dma.min=(sb.dma.rate*3)/1000; sb.chan->SetFreq(freq); sb.dma.mode=mode; PIC_RemoveEvents(END_DMA_Event); sb.dma.chan->Register_Callback(DSP_DMA_CallBack); #if (C_DEBUG) LOG(LOG_SB,LOG_NORMAL)("DMA Transfer:%s %s %s freq %d rate %d size %d", type, sb.dma.stereo ? "Stereo" : "Mono", sb.dma.autoinit ? "Auto-Init" : "Single-Cycle", freq,sb.dma.rate,sb.dma.total ); #endif } static void DSP_PrepareDMA_Old(DMA_MODES mode,bool autoinit,bool sign) { sb.dma.autoinit=autoinit; sb.dma.sign=sign; if (!autoinit) sb.dma.total=1+sb.dsp.in.data[0]+(sb.dsp.in.data[1] << 8); sb.dma.chan=GetDMAChannel(sb.hw.dma8); DSP_DoDMATransfer(mode,sb.freq / (sb.mixer.stereo ? 2 : 1),sb.mixer.stereo); } static void DSP_PrepareDMA_New(DMA_MODES mode,Bitu length,bool autoinit,bool stereo) { Bitu freq=sb.freq; //equal length if data format and dma channel are both 16-bit or 8-bit sb.dma.total=length; sb.dma.autoinit=autoinit; if (mode==DSP_DMA_16) { if (sb.hw.dma16!=0xff) { sb.dma.chan=GetDMAChannel(sb.hw.dma16); if (sb.dma.chan==NULL) { sb.dma.chan=GetDMAChannel(sb.hw.dma8); mode=DSP_DMA_16_ALIASED; sb.dma.total<<=1; } } else { sb.dma.chan=GetDMAChannel(sb.hw.dma8); mode=DSP_DMA_16_ALIASED; //UNDOCUMENTED: //In aliased mode sample length is written to DSP as number of //16-bit samples so we need double 8-bit DMA buffer length sb.dma.total<<=1; } } else sb.dma.chan=GetDMAChannel(sb.hw.dma8); DSP_DoDMATransfer(mode,freq,stereo); } static void DSP_AddData(Bit8u val) { if (sb.dsp.out.used<DSP_BUFSIZE) { Bitu start=sb.dsp.out.used+sb.dsp.out.pos; if (start>=DSP_BUFSIZE) start-=DSP_BUFSIZE; sb.dsp.out.data[start]=val; sb.dsp.out.used++; } else { LOG(LOG_SB,LOG_ERROR)("DSP:Data Output buffer full"); } } static void DSP_FinishReset(Bitu /*val*/) { DSP_FlushData(); DSP_AddData(0xaa); sb.dsp.state=DSP_S_NORMAL; } static void DSP_Reset(void) { LOG(LOG_SB,LOG_ERROR)("DSP:Reset"); PIC_DeActivateIRQ(sb.hw.irq); DSP_ChangeMode(MODE_NONE); DSP_FlushData(); sb.dsp.cmd=DSP_NO_COMMAND; sb.dsp.cmd_len=0; sb.dsp.in.pos=0; sb.dsp.write_busy=0; PIC_RemoveEvents(DSP_FinishReset); sb.dma.left=0; sb.dma.total=0; sb.dma.stereo=false; sb.dma.sign=false; sb.dma.autoinit=false; sb.dma.mode=DSP_DMA_NONE; sb.dma.remain_size=0; if (sb.dma.chan) sb.dma.chan->Clear_Request(); sb.freq=22050; sb.time_constant=45; sb.dac.used=0; sb.dac.last=0; sb.e2.value=0xaa; sb.e2.count=0; sb.irq.pending_8bit=false; sb.irq.pending_16bit=false; sb.chan->SetFreq(22050); // DSP_SetSpeaker(false); PIC_RemoveEvents(END_DMA_Event); } static void DSP_DoReset(Bit8u val) { if (((val&1)!=0) && (sb.dsp.state!=DSP_S_RESET)) { //TODO Get out of highspeed mode DSP_Reset(); sb.dsp.state=DSP_S_RESET; } else if (((val&1)==0) && (sb.dsp.state==DSP_S_RESET)) { // reset off sb.dsp.state=DSP_S_RESET_WAIT; PIC_RemoveEvents(DSP_FinishReset); PIC_AddEvent(DSP_FinishReset,20.0f/1000.0f,0); // 20 microseconds } } static void DSP_E2_DMA_CallBack(DmaChannel * /*chan*/, DMAEvent event) { if (event==DMA_UNMASKED) { Bit8u val=(Bit8u)(sb.e2.value&0xff); DmaChannel * chan=GetDMAChannel(sb.hw.dma8); chan->Register_Callback(0); chan->Write(1,&val); } } static void DSP_ADC_CallBack(DmaChannel * /*chan*/, DMAEvent event) { if (event!=DMA_UNMASKED) return; Bit8u val=128; DmaChannel * ch=GetDMAChannel(sb.hw.dma8); while (sb.dma.left--) { ch->Write(1,&val); } SB_RaiseIRQ(SB_IRQ_8); ch->Register_Callback(0); } static void DSP_ChangeRate(Bitu freq) { if (sb.freq!=freq && sb.dma.mode!=DSP_DMA_NONE) { sb.chan->FillUp(); sb.chan->SetFreq(freq / (sb.mixer.stereo ? 2 : 1)); sb.dma.rate=(freq*sb.dma.mul) >> SB_SH; sb.dma.min=(sb.dma.rate*3)/1000; } sb.freq=freq; } Bitu DEBUG_EnableDebugger(void); #define DSP_SB16_ONLY if (sb.type != SBT_16) { LOG(LOG_SB,LOG_ERROR)("DSP:Command %2X requires SB16",sb.dsp.cmd); break; } #define DSP_SB2_ABOVE if (sb.type <= SBT_1) { LOG(LOG_SB,LOG_ERROR)("DSP:Command %2X requires SB2 or above",sb.dsp.cmd); break; } static void DSP_DoCommand(void) { // LOG_MSG("DSP Command %X",sb.dsp.cmd); switch (sb.dsp.cmd) { case 0x04: if (sb.type == SBT_16) { /* SB16 ASP set mode register */ if ((sb.dsp.in.data[0]&0xf1)==0xf1) ASP_init_in_progress=true; else ASP_init_in_progress=false; LOG(LOG_SB,LOG_NORMAL)("DSP Unhandled SB16ASP command %X (set mode register to %X)",sb.dsp.cmd,sb.dsp.in.data[0]); } else { /* DSP Status SB 2.0/pro version. NOT SB16. */ DSP_FlushData(); if (sb.type == SBT_2) DSP_AddData(0x88); else if ((sb.type == SBT_PRO1) || (sb.type == SBT_PRO2)) DSP_AddData(0x7b); else DSP_AddData(0xff); //Everything enabled } break; case 0x05: /* SB16 ASP set codec parameter */ LOG(LOG_SB,LOG_NORMAL)("DSP Unhandled SB16ASP command %X (set codec parameter)",sb.dsp.cmd); break; case 0x08: /* SB16 ASP get version */ LOG(LOG_SB,LOG_NORMAL)("DSP Unhandled SB16ASP command %X sub %X",sb.dsp.cmd,sb.dsp.in.data[0]); if (sb.type == SBT_16) { switch (sb.dsp.in.data[0]) { case 0x03: DSP_AddData(0x18); // version ID (??) break; default: LOG(LOG_SB,LOG_NORMAL)("DSP Unhandled SB16ASP command %X sub %X",sb.dsp.cmd,sb.dsp.in.data[0]); break; } } else { LOG(LOG_SB,LOG_NORMAL)("DSP Unhandled SB16ASP command %X sub %X",sb.dsp.cmd,sb.dsp.in.data[0]); } break; case 0x0e: /* SB16 ASP set register */ if (sb.type == SBT_16) { // LOG(LOG_SB,LOG_NORMAL)("SB16 ASP set register %X := %X",sb.dsp.in.data[0],sb.dsp.in.data[1]); ASP_regs[sb.dsp.in.data[0]] = sb.dsp.in.data[1]; } else { LOG(LOG_SB,LOG_NORMAL)("DSP Unhandled SB16ASP command %X (set register)",sb.dsp.cmd); } break; case 0x0f: /* SB16 ASP get register */ if (sb.type == SBT_16) { if ((ASP_init_in_progress) && (sb.dsp.in.data[0]==0x83)) { ASP_regs[0x83] = ~ASP_regs[0x83]; } // LOG(LOG_SB,LOG_NORMAL)("SB16 ASP get register %X == %X",sb.dsp.in.data[0],ASP_regs[sb.dsp.in.data[0]]); DSP_AddData(ASP_regs[sb.dsp.in.data[0]]); } else { LOG(LOG_SB,LOG_NORMAL)("DSP Unhandled SB16ASP command %X (get register)",sb.dsp.cmd); } break; case 0x10: /* Direct DAC */ DSP_ChangeMode(MODE_DAC); if (sb.dac.used<DSP_DACSIZE) { sb.dac.data[sb.dac.used++]=(Bit8s(sb.dsp.in.data[0] ^ 0x80)) << 8; sb.dac.data[sb.dac.used++]=(Bit8s(sb.dsp.in.data[0] ^ 0x80)) << 8; } break; case 0x24: /* Singe Cycle 8-Bit DMA ADC */ sb.dma.left=sb.dma.total=1+sb.dsp.in.data[0]+(sb.dsp.in.data[1] << 8); sb.dma.sign=false; LOG(LOG_SB,LOG_ERROR)("DSP:Faked ADC for %d bytes",sb.dma.total); GetDMAChannel(sb.hw.dma8)->Register_Callback(DSP_ADC_CallBack); break; case 0x14: /* Singe Cycle 8-Bit DMA DAC */ case 0x15: /* Wari hack. Waru uses this one instead of 0x14, but some weird stuff going on there anyway */ case 0x91: /* Singe Cycle 8-Bit DMA High speed DAC */ /* Note: 0x91 is documented only for DSP ver.2.x and 3.x, not 4.x */ DSP_PrepareDMA_Old(DSP_DMA_8,false,false); break; case 0x90: /* Auto Init 8-bit DMA High Speed */ case 0x1c: /* Auto Init 8-bit DMA */ DSP_SB2_ABOVE; /* Note: 0x90 is documented only for DSP ver.2.x and 3.x, not 4.x */ DSP_PrepareDMA_Old(DSP_DMA_8,true,false); break; case 0x38: /* Write to SB MIDI Output */ if (sb.midi == true) MIDI_RawOutByte(sb.dsp.in.data[0]); break; case 0x40: /* Set Timeconstant */ DSP_ChangeRate(1000000 / (256 - sb.dsp.in.data[0])); break; case 0x41: /* Set Output Samplerate */ case 0x42: /* Set Input Samplerate */ /* Note: 0x42 is handled like 0x41, needed by Fasttracker II */ DSP_SB16_ONLY; DSP_ChangeRate((sb.dsp.in.data[0] << 8) | sb.dsp.in.data[1]); break; case 0x48: /* Set DMA Block Size */ DSP_SB2_ABOVE; //TODO Maybe check limit for new irq? sb.dma.total=1+sb.dsp.in.data[0]+(sb.dsp.in.data[1] << 8); break; case 0x75: /* 075h : Single Cycle 4-bit ADPCM Reference */ sb.adpcm.haveref=true; case 0x74: /* 074h : Single Cycle 4-bit ADPCM */ DSP_PrepareDMA_Old(DSP_DMA_4,false,false); break; case 0x77: /* 077h : Single Cycle 3-bit(2.6bit) ADPCM Reference*/ sb.adpcm.haveref=true; case 0x76: /* 074h : Single Cycle 3-bit(2.6bit) ADPCM */ DSP_PrepareDMA_Old(DSP_DMA_3,false,false); break; case 0x7d: /* Auto Init 4-bit ADPCM Reference */ DSP_SB2_ABOVE; sb.adpcm.haveref=true; DSP_PrepareDMA_Old(DSP_DMA_4,true,false); break; case 0x17: /* 017h : Single Cycle 2-bit ADPCM Reference*/ sb.adpcm.haveref=true; case 0x16: /* 074h : Single Cycle 2-bit ADPCM */ DSP_PrepareDMA_Old(DSP_DMA_2,false,false); break; case 0x80: /* Silence DAC */ PIC_AddEvent(&DSP_RaiseIRQEvent, (1000.0f*(1+sb.dsp.in.data[0]+(sb.dsp.in.data[1] << 8))/sb.freq)); break; case 0xb0: case 0xb1: case 0xb2: case 0xb3: case 0xb4: case 0xb5: case 0xb6: case 0xb7: case 0xb8: case 0xb9: case 0xba: case 0xbb: case 0xbc: case 0xbd: case 0xbe: case 0xbf: case 0xc0: case 0xc1: case 0xc2: case 0xc3: case 0xc4: case 0xc5: case 0xc6: case 0xc7: case 0xc8: case 0xc9: case 0xca: case 0xcb: case 0xcc: case 0xcd: case 0xce: case 0xcf: DSP_SB16_ONLY; /* Generic 8/16 bit DMA */ // DSP_SetSpeaker(true); //SB16 always has speaker enabled sb.dma.sign=(sb.dsp.in.data[0] & 0x10) > 0; DSP_PrepareDMA_New((sb.dsp.cmd & 0x10) ? DSP_DMA_16 : DSP_DMA_8, 1+sb.dsp.in.data[1]+(sb.dsp.in.data[2] << 8), (sb.dsp.cmd & 0x4)>0, (sb.dsp.in.data[0] & 0x20) > 0 ); break; case 0xd5: /* Halt 16-bit DMA */ DSP_SB16_ONLY; case 0xd0: /* Halt 8-bit DMA */ // DSP_ChangeMode(MODE_NONE); // Games sometimes already program a new dma before stopping, gives noise if (sb.mode==MODE_NONE) { // possibly different code here that does not switch to MODE_DMA_PAUSE } sb.mode=MODE_DMA_PAUSE; PIC_RemoveEvents(END_DMA_Event); break; case 0xd1: /* Enable Speaker */ DSP_SetSpeaker(true); break; case 0xd3: /* Disable Speaker */ DSP_SetSpeaker(false); break; case 0xd8: /* Speaker status */ DSP_SB2_ABOVE; DSP_FlushData(); if (sb.speaker) DSP_AddData(0xff); else DSP_AddData(0x00); break; case 0xd6: /* Continue DMA 16-bit */ DSP_SB16_ONLY; case 0xd4: /* Continue DMA 8-bit*/ if (sb.mode==MODE_DMA_PAUSE) { sb.mode=MODE_DMA_MASKED; if (sb.dma.chan!=NULL) sb.dma.chan->Register_Callback(DSP_DMA_CallBack); } break; case 0xd9: /* Exit Autoinitialize 16-bit */ DSP_SB16_ONLY; case 0xda: /* Exit Autoinitialize 8-bit */ DSP_SB2_ABOVE; /* Set mode to single transfer so it ends with current block */ sb.dma.autoinit=false; //Should stop itself break; case 0xe0: /* DSP Identification - SB2.0+ */ DSP_FlushData(); DSP_AddData(~sb.dsp.in.data[0]); break; case 0xe1: /* Get DSP Version */ DSP_FlushData(); switch (sb.type) { case SBT_1: DSP_AddData(0x1);DSP_AddData(0x05);break; case SBT_2: DSP_AddData(0x2);DSP_AddData(0x1);break; case SBT_PRO1: DSP_AddData(0x3);DSP_AddData(0x0);break; case SBT_PRO2: DSP_AddData(0x3);DSP_AddData(0x2);break; case SBT_16: DSP_AddData(0x4);DSP_AddData(0x5);break; default: break; } break; case 0xe2: /* Weird DMA identification write routine */ { LOG(LOG_SB,LOG_NORMAL)("DSP Function 0xe2"); for (Bitu i = 0; i < 8; i++) if ((sb.dsp.in.data[0] >> i) & 0x01) sb.e2.value += E2_incr_table[sb.e2.count % 4][i]; sb.e2.value += E2_incr_table[sb.e2.count % 4][8]; sb.e2.count++; GetDMAChannel(sb.hw.dma8)->Register_Callback(DSP_E2_DMA_CallBack); } break; case 0xe3: /* DSP Copyright */ { DSP_FlushData(); for (size_t i=0;i<=strlen(copyright_string);i++) { DSP_AddData(copyright_string[i]); } } break; case 0xe4: /* Write Test Register */ sb.dsp.test_register=sb.dsp.in.data[0]; break; case 0xe8: /* Read Test Register */ DSP_FlushData(); DSP_AddData(sb.dsp.test_register);; break; case 0xf2: /* Trigger 8bit IRQ */ //Small delay in order to emulate the slowness of the DSP, fixes Llamatron 2012 and Lemmings 3D PIC_AddEvent(&DSP_RaiseIRQEvent,0.01f); break; case 0xf3: /* Trigger 16bit IRQ */ DSP_SB16_ONLY; SB_RaiseIRQ(SB_IRQ_16); break; case 0xf8: /* Undocumented, pre-SB16 only */ DSP_FlushData(); DSP_AddData(0); break; case 0x30: case 0x31: LOG(LOG_SB,LOG_ERROR)("DSP:Unimplemented MIDI I/O command %2X",sb.dsp.cmd); break; case 0x34: case 0x35: case 0x36: case 0x37: DSP_SB2_ABOVE; LOG(LOG_SB,LOG_ERROR)("DSP:Unimplemented MIDI UART command %2X",sb.dsp.cmd); break; case 0x7f: case 0x1f: DSP_SB2_ABOVE; LOG(LOG_SB,LOG_ERROR)("DSP:Unimplemented auto-init DMA ADPCM command %2X",sb.dsp.cmd); break; case 0x20: DSP_AddData(0x7f); // fake silent input for Creative parrot break; case 0x2c: case 0x98: case 0x99: /* Documented only for DSP 2.x and 3.x */ case 0xa0: case 0xa8: /* Documented only for DSP 3.x */ LOG(LOG_SB,LOG_ERROR)("DSP:Unimplemented input command %2X",sb.dsp.cmd); break; case 0xf9: /* SB16 ASP ??? */ if (sb.type == SBT_16) { LOG(LOG_SB,LOG_NORMAL)("SB16 ASP unknown function %x",sb.dsp.in.data[0]); // just feed it what it expects switch (sb.dsp.in.data[0]) { case 0x0b: DSP_AddData(0x00); break; case 0x0e: DSP_AddData(0xff); break; case 0x0f: DSP_AddData(0x07); break; case 0x23: DSP_AddData(0x00); break; case 0x24: DSP_AddData(0x00); break; case 0x2b: DSP_AddData(0x00); break; case 0x2c: DSP_AddData(0x00); break; case 0x2d: DSP_AddData(0x00); break; case 0x37: DSP_AddData(0x38); break; default: DSP_AddData(0x00); break; } } else { LOG(LOG_SB,LOG_NORMAL)("SB16 ASP unknown function %X",sb.dsp.cmd); } break; default: LOG(LOG_SB,LOG_ERROR)("DSP:Unhandled (undocumented) command %2X",sb.dsp.cmd); break; } sb.dsp.cmd=DSP_NO_COMMAND; sb.dsp.cmd_len=0; sb.dsp.in.pos=0; } static void DSP_DoWrite(Bit8u val) { switch (sb.dsp.cmd) { case DSP_NO_COMMAND: sb.dsp.cmd=val; if (sb.type == SBT_16) sb.dsp.cmd_len=DSP_cmd_len_sb16[val]; else sb.dsp.cmd_len=DSP_cmd_len_sb[val]; sb.dsp.in.pos=0; if (!sb.dsp.cmd_len) DSP_DoCommand(); break; default: sb.dsp.in.data[sb.dsp.in.pos]=val; sb.dsp.in.pos++; if (sb.dsp.in.pos>=sb.dsp.cmd_len) DSP_DoCommand(); } } static Bit8u DSP_ReadData(void) { /* Static so it repeats the last value on succesive reads (JANGLE DEMO) */ if (sb.dsp.out.used) { sb.dsp.out.lastval=sb.dsp.out.data[sb.dsp.out.pos]; sb.dsp.out.pos++; if (sb.dsp.out.pos>=DSP_BUFSIZE) sb.dsp.out.pos-=DSP_BUFSIZE; sb.dsp.out.used--; } return sb.dsp.out.lastval; } //The soundblaster manual says 2.0 Db steps but we'll go for a bit less #define CALCVOL(_VAL) (float)pow(10.0f,((float)(31-_VAL)*-1.3f)/20) static void CTMIXER_UpdateVolumes(void) { if (!sb.mixer.enabled) return; MixerChannel * chan; //adjust to get linear master volume slider in trackers chan=MIXER_FindChannel("SB"); if (chan) chan->SetVolume(float(sb.mixer.master[0])/31.0f*CALCVOL(sb.mixer.dac[0]), float(sb.mixer.master[1])/31.0f*CALCVOL(sb.mixer.dac[1])); chan=MIXER_FindChannel("FM"); if (chan) chan->SetVolume(float(sb.mixer.master[0])/31.0f*CALCVOL(sb.mixer.fm[0]), float(sb.mixer.master[1])/31.0f*CALCVOL(sb.mixer.fm[1])); chan=MIXER_FindChannel("CDAUDIO"); if (chan) chan->SetVolume(float(sb.mixer.master[0])/31.0f*CALCVOL(sb.mixer.cda[0]), float(sb.mixer.master[1])/31.0f*CALCVOL(sb.mixer.cda[1])); } static void CTMIXER_Reset(void) { sb.mixer.fm[0]= sb.mixer.fm[1]= sb.mixer.cda[0]= sb.mixer.cda[1]= sb.mixer.dac[0]= sb.mixer.dac[1]=31; sb.mixer.master[0]= sb.mixer.master[1]=31; CTMIXER_UpdateVolumes(); } #define SETPROVOL(_WHICH_,_VAL_) \ _WHICH_[0]= ((((_VAL_) & 0xf0) >> 3)|(sb.type==SBT_16 ? 1:3)); \ _WHICH_[1]= ((((_VAL_) & 0x0f) << 1)|(sb.type==SBT_16 ? 1:3)); \ #define MAKEPROVOL(_WHICH_) \ ((((_WHICH_[0] & 0x1e) << 3) | ((_WHICH_[1] & 0x1e) >> 1)) | \ ((sb.type==SBT_PRO1 || sb.type==SBT_PRO2) ? 0x11:0)) static void DSP_ChangeStereo(bool stereo) { if (!sb.dma.stereo && stereo) { sb.chan->SetFreq(sb.freq/2); sb.dma.mul*=2; sb.dma.rate=(sb.freq*sb.dma.mul) >> SB_SH; sb.dma.min=(sb.dma.rate*3)/1000; } else if (sb.dma.stereo && !stereo) { sb.chan->SetFreq(sb.freq); sb.dma.mul/=2; sb.dma.rate=(sb.freq*sb.dma.mul) >> SB_SH; sb.dma.min=(sb.dma.rate*3)/1000; } sb.dma.stereo=stereo; } static void CTMIXER_Write(Bit8u val) { switch (sb.mixer.index) { case 0x00: /* Reset */ CTMIXER_Reset(); LOG(LOG_SB,LOG_WARN)("Mixer reset value %x",val); break; case 0x02: /* Master Volume (SB2 Only) */ SETPROVOL(sb.mixer.master,(val&0xf)|(val<<4)); CTMIXER_UpdateVolumes(); break; case 0x04: /* DAC Volume (SBPRO) */ SETPROVOL(sb.mixer.dac,val); CTMIXER_UpdateVolumes(); break; case 0x06: /* FM output selection, Somewhat obsolete with dual OPL SBpro + FM volume (SB2 Only) */ //volume controls both channels SETPROVOL(sb.mixer.fm,(val&0xf)|(val<<4)); CTMIXER_UpdateVolumes(); if(val&0x60) LOG(LOG_SB,LOG_WARN)("Turned FM one channel off. not implemented %X",val); //TODO Change FM Mode if only 1 fm channel is selected break; case 0x08: /* CDA Volume (SB2 Only) */ SETPROVOL(sb.mixer.cda,(val&0xf)|(val<<4)); CTMIXER_UpdateVolumes(); break; case 0x0a: /* Mic Level (SBPRO) or DAC Volume (SB2): 2-bit, 3-bit on SB16 */ if (sb.type==SBT_2) { sb.mixer.dac[0]=sb.mixer.dac[1]=((val & 0x6) << 2)|3; CTMIXER_UpdateVolumes(); } else { sb.mixer.mic=((val & 0x7) << 2)|(sb.type==SBT_16?1:3); } break; case 0x0e: /* Output/Stereo Select */ sb.mixer.stereo=(val & 0x2) > 0; sb.mixer.filtered=(val & 0x20) > 0; DSP_ChangeStereo(sb.mixer.stereo); LOG(LOG_SB,LOG_WARN)("Mixer set to %s",sb.dma.stereo ? "STEREO" : "MONO"); break; case 0x22: /* Master Volume (SBPRO) */ SETPROVOL(sb.mixer.master,val); CTMIXER_UpdateVolumes(); break; case 0x26: /* FM Volume (SBPRO) */ SETPROVOL(sb.mixer.fm,val); CTMIXER_UpdateVolumes(); break; case 0x28: /* CD Audio Volume (SBPRO) */ SETPROVOL(sb.mixer.cda,val); CTMIXER_UpdateVolumes(); break; case 0x2e: /* Line-in Volume (SBPRO) */ SETPROVOL(sb.mixer.lin,val); break; //case 0x20: /* Master Volume Left (SBPRO) ? */ case 0x30: /* Master Volume Left (SB16) */ if (sb.type==SBT_16) { sb.mixer.master[0]=val>>3; CTMIXER_UpdateVolumes(); } break; //case 0x21: /* Master Volume Right (SBPRO) ? */ case 0x31: /* Master Volume Right (SB16) */ if (sb.type==SBT_16) { sb.mixer.master[1]=val>>3; CTMIXER_UpdateVolumes(); } break; case 0x32: /* DAC Volume Left (SB16) */ if (sb.type==SBT_16) { sb.mixer.dac[0]=val>>3; CTMIXER_UpdateVolumes(); } break; case 0x33: /* DAC Volume Right (SB16) */ if (sb.type==SBT_16) { sb.mixer.dac[1]=val>>3; CTMIXER_UpdateVolumes(); } break; case 0x34: /* FM Volume Left (SB16) */ if (sb.type==SBT_16) { sb.mixer.fm[0]=val>>3; CTMIXER_UpdateVolumes(); } break; case 0x35: /* FM Volume Right (SB16) */ if (sb.type==SBT_16) { sb.mixer.fm[1]=val>>3; CTMIXER_UpdateVolumes(); } break; case 0x36: /* CD Volume Left (SB16) */ if (sb.type==SBT_16) { sb.mixer.cda[0]=val>>3; CTMIXER_UpdateVolumes(); } break; case 0x37: /* CD Volume Right (SB16) */ if (sb.type==SBT_16) { sb.mixer.cda[1]=val>>3; CTMIXER_UpdateVolumes(); } break; case 0x38: /* Line-in Volume Left (SB16) */ if (sb.type==SBT_16) sb.mixer.lin[0]=val>>3; break; case 0x39: /* Line-in Volume Right (SB16) */ if (sb.type==SBT_16) sb.mixer.lin[1]=val>>3; break; case 0x3a: if (sb.type==SBT_16) sb.mixer.mic=val>>3; break; case 0x80: /* IRQ Select */ sb.hw.irq=0xff; if (val & 0x1) sb.hw.irq=2; else if (val & 0x2) sb.hw.irq=5; else if (val & 0x4) sb.hw.irq=7; else if (val & 0x8) sb.hw.irq=10; break; case 0x81: /* DMA Select */ sb.hw.dma8=0xff; sb.hw.dma16=0xff; if (val & 0x1) sb.hw.dma8=0; else if (val & 0x2) sb.hw.dma8=1; else if (val & 0x8) sb.hw.dma8=3; if (val & 0x20) sb.hw.dma16=5; else if (val & 0x40) sb.hw.dma16=6; else if (val & 0x80) sb.hw.dma16=7; LOG(LOG_SB,LOG_NORMAL)("Mixer select dma8:%x dma16:%x",sb.hw.dma8,sb.hw.dma16); break; default: if( ((sb.type == SBT_PRO1 || sb.type == SBT_PRO2) && sb.mixer.index==0x0c) || /* Input control on SBPro */ (sb.type == SBT_16 && sb.mixer.index >= 0x3b && sb.mixer.index <= 0x47)) /* New SB16 registers */ sb.mixer.unhandled[sb.mixer.index] = val; LOG(LOG_SB,LOG_WARN)("MIXER:Write %X to unhandled index %X",val,sb.mixer.index); } } static Bit8u CTMIXER_Read(void) { Bit8u ret; // if ( sb.mixer.index< 0x80) LOG_MSG("Read mixer %x",sb.mixer.index); switch (sb.mixer.index) { case 0x00: /* RESET */ return 0x00; case 0x02: /* Master Volume (SB2 Only) */ return ((sb.mixer.master[1]>>1) & 0xe); case 0x22: /* Master Volume (SBPRO) */ return MAKEPROVOL(sb.mixer.master); case 0x04: /* DAC Volume (SBPRO) */ return MAKEPROVOL(sb.mixer.dac); case 0x06: /* FM Volume (SB2 Only) + FM output selection */ return ((sb.mixer.fm[1]>>1) & 0xe); case 0x08: /* CD Volume (SB2 Only) */ return ((sb.mixer.cda[1]>>1) & 0xe); case 0x0a: /* Mic Level (SBPRO) or Voice (SB2 Only) */ if (sb.type==SBT_2) return (sb.mixer.dac[0]>>2); else return ((sb.mixer.mic >> 2) & (sb.type==SBT_16 ? 7:6)); case 0x0e: /* Output/Stereo Select */ return 0x11|(sb.mixer.stereo ? 0x02 : 0x00)|(sb.mixer.filtered ? 0x20 : 0x00); case 0x26: /* FM Volume (SBPRO) */ return MAKEPROVOL(sb.mixer.fm); case 0x28: /* CD Audio Volume (SBPRO) */ return MAKEPROVOL(sb.mixer.cda); case 0x2e: /* Line-IN Volume (SBPRO) */ return MAKEPROVOL(sb.mixer.lin); case 0x30: /* Master Volume Left (SB16) */ if (sb.type==SBT_16) return sb.mixer.master[0]<<3; ret=0xa; break; case 0x31: /* Master Volume Right (S16) */ if (sb.type==SBT_16) return sb.mixer.master[1]<<3; ret=0xa; break; case 0x32: /* DAC Volume Left (SB16) */ if (sb.type==SBT_16) return sb.mixer.dac[0]<<3; ret=0xa; break; case 0x33: /* DAC Volume Right (SB16) */ if (sb.type==SBT_16) return sb.mixer.dac[1]<<3; ret=0xa; break; case 0x34: /* FM Volume Left (SB16) */ if (sb.type==SBT_16) return sb.mixer.fm[0]<<3; ret=0xa; break; case 0x35: /* FM Volume Right (SB16) */ if (sb.type==SBT_16) return sb.mixer.fm[1]<<3; ret=0xa; break; case 0x36: /* CD Volume Left (SB16) */ if (sb.type==SBT_16) return sb.mixer.cda[0]<<3; ret=0xa; break; case 0x37: /* CD Volume Right (SB16) */ if (sb.type==SBT_16) return sb.mixer.cda[1]<<3; ret=0xa; break; case 0x38: /* Line-in Volume Left (SB16) */ if (sb.type==SBT_16) return sb.mixer.lin[0]<<3; ret=0xa; break; case 0x39: /* Line-in Volume Right (SB16) */ if (sb.type==SBT_16) return sb.mixer.lin[1]<<3; ret=0xa; break; case 0x3a: /* Mic Volume (SB16) */ if (sb.type==SBT_16) return sb.mixer.mic<<3; ret=0xa; break; case 0x80: /* IRQ Select */ switch (sb.hw.irq) { case 2: return 0x1; case 5: return 0x2; case 7: return 0x4; case 10: return 0x8; } case 0x81: /* DMA Select */ ret=0; switch (sb.hw.dma8) { case 0:ret|=0x1;break; case 1:ret|=0x2;break; case 3:ret|=0x8;break; } switch (sb.hw.dma16) { case 5:ret|=0x20;break; case 6:ret|=0x40;break; case 7:ret|=0x80;break; } return ret; case 0x82: /* IRQ Status */ return (sb.irq.pending_8bit ? 0x1 : 0) | (sb.irq.pending_16bit ? 0x2 : 0) | ((sb.type == SBT_16) ? 0x20 : 0); default: if ( ((sb.type == SBT_PRO1 || sb.type == SBT_PRO2) && sb.mixer.index==0x0c) || /* Input control on SBPro */ (sb.type == SBT_16 && sb.mixer.index >= 0x3b && sb.mixer.index <= 0x47)) /* New SB16 registers */ ret = sb.mixer.unhandled[sb.mixer.index]; else ret=0xa; LOG(LOG_SB,LOG_WARN)("MIXER:Read from unhandled index %X",sb.mixer.index); } return ret; } static Bitu read_sb(Bitu port,Bitu /*iolen*/) { switch (port-sb.hw.base) { case MIXER_INDEX: return sb.mixer.index; case MIXER_DATA: return CTMIXER_Read(); case DSP_READ_DATA: return DSP_ReadData(); case DSP_READ_STATUS: //TODO See for high speed dma :) if (sb.irq.pending_8bit) { sb.irq.pending_8bit=false; PIC_DeActivateIRQ(sb.hw.irq); } if (sb.dsp.out.used) return 0xff; else return 0x7f; case DSP_ACK_16BIT: sb.irq.pending_16bit=false; break; case DSP_WRITE_STATUS: switch (sb.dsp.state) { case DSP_S_NORMAL: sb.dsp.write_busy++; if (sb.dsp.write_busy & 8) return 0xff; return 0x7f; case DSP_S_RESET: case DSP_S_RESET_WAIT: return 0xff; } return 0xff; case DSP_RESET: return 0xff; default: LOG(LOG_SB,LOG_NORMAL)("Unhandled read from SB Port %4X",port); break; } return 0xff; } static void write_sb(Bitu port,Bitu val,Bitu /*iolen*/) { Bit8u val8=(Bit8u)(val&0xff); switch (port-sb.hw.base) { case DSP_RESET: DSP_DoReset(val8); break; case DSP_WRITE_DATA: DSP_DoWrite(val8); break; case MIXER_INDEX: sb.mixer.index=val8; break; case MIXER_DATA: CTMIXER_Write(val8); break; default: LOG(LOG_SB,LOG_NORMAL)("Unhandled write to SB Port %4X",port); break; } } static void adlib_gusforward(Bitu /*port*/,Bitu val,Bitu /*iolen*/) { adlib_commandreg=(Bit8u)(val&0xff); } bool SB_Get_Address(Bitu& sbaddr, Bitu& sbirq, Bitu& sbdma) { sbaddr=0; sbirq =0; sbdma =0; if (sb.type == SBT_NONE) return false; else { sbaddr=sb.hw.base; sbirq =sb.hw.irq; sbdma = sb.hw.dma8; return true; } } static void SBLASTER_CallBack(Bitu len) { switch (sb.mode) { case MODE_NONE: case MODE_DMA_PAUSE: case MODE_DMA_MASKED: sb.chan->AddSilence(); break; case MODE_DAC: // GenerateDACSound(len); // break; if (!sb.dac.used) { sb.mode=MODE_NONE; return; } sb.chan->AddStretched(sb.dac.used,sb.dac.data); sb.dac.used=0; break; case MODE_DMA: len*=sb.dma.mul; if (len&SB_SH_MASK) len+=1 << SB_SH; len>>=SB_SH; if (len>sb.dma.left) len=sb.dma.left; GenerateDMASound(len); break; } } class SBLASTER: public Module_base { private: /* Data */ IO_ReadHandleObject ReadHandler[0x10]; IO_WriteHandleObject WriteHandler[0x10]; AutoexecObject autoexecline; MixerObject MixerChan; OPL_Mode oplmode; /* Support Functions */ void Find_Type_And_Opl(Section_prop* config,SB_TYPES& type, OPL_Mode& opl_mode){ const char * sbtype=config->Get_string("sbtype"); if (!strcasecmp(sbtype,"sb1")) type=SBT_1; else if (!strcasecmp(sbtype,"sb2")) type=SBT_2; else if (!strcasecmp(sbtype,"sbpro1")) type=SBT_PRO1; else if (!strcasecmp(sbtype,"sbpro2")) type=SBT_PRO2; else if (!strcasecmp(sbtype,"sb16")) type=SBT_16; else if (!strcasecmp(sbtype,"gb")) type=SBT_GB; else if (!strcasecmp(sbtype,"none")) type=SBT_NONE; else type=SBT_16; if (type==SBT_16) { if ((!IS_EGAVGA_ARCH) || !SecondDMAControllerAvailable()) type=SBT_PRO2; } /* OPL/CMS Init */ const char * omode=config->Get_string("oplmode"); if (!strcasecmp(omode,"none")) opl_mode=OPL_none; else if (!strcasecmp(omode,"cms")) opl_mode=OPL_cms; else if (!strcasecmp(omode,"opl2")) opl_mode=OPL_opl2; else if (!strcasecmp(omode,"dualopl2")) opl_mode=OPL_dualopl2; else if (!strcasecmp(omode,"opl3")) opl_mode=OPL_opl3; else if (!strcasecmp(omode,"opl3gold")) opl_mode=OPL_opl3gold; /* Else assume auto */ else { switch (type) { case SBT_NONE: opl_mode=OPL_none; break; case SBT_GB: opl_mode=OPL_cms; break; case SBT_1: case SBT_2: opl_mode=OPL_opl2; break; case SBT_PRO1: opl_mode=OPL_dualopl2; break; case SBT_PRO2: case SBT_16: opl_mode=OPL_opl3; break; } } } public: SBLASTER(Section* configuration):Module_base(configuration) { Bitu i; Section_prop * section=static_cast<Section_prop *>(configuration); sb.hw.base=section->Get_hex("sbbase"); sb.hw.irq=section->Get_int("irq"); Bitu dma8bit=section->Get_int("dma"); if (dma8bit>0xff) dma8bit=0xff; sb.hw.dma8=(Bit8u)(dma8bit&0xff); Bitu dma16bit=section->Get_int("hdma"); if (dma16bit>0xff) dma16bit=0xff; sb.hw.dma16=(Bit8u)(dma16bit&0xff); sb.mixer.enabled=section->Get_bool("sbmixer"); sb.mixer.stereo=false; Find_Type_And_Opl(section,sb.type,oplmode); switch (oplmode) { case OPL_none: WriteHandler[0].Install(0x388,adlib_gusforward,IO_MB); break; case OPL_cms: WriteHandler[0].Install(0x388,adlib_gusforward,IO_MB); CMS_Init(section); break; case OPL_opl2: CMS_Init(section); // fall-through case OPL_dualopl2: case OPL_opl3: case OPL_opl3gold: OPL_Init(section,oplmode); break; } if (sb.type==SBT_NONE || sb.type==SBT_GB) return; sb.chan=MixerChan.Install(&SBLASTER_CallBack,22050,"SB"); sb.dsp.state=DSP_S_NORMAL; sb.dsp.out.lastval=0xaa; sb.dma.chan=NULL; for (i=4;i<=0xf;i++) { if (i==8 || i==9) continue; //Disable mixer ports for lower soundblaster if ((sb.type==SBT_1 || sb.type==SBT_2) && (i==4 || i==5)) continue; ReadHandler[i].Install(sb.hw.base+i,read_sb,IO_MB); WriteHandler[i].Install(sb.hw.base+i,write_sb,IO_MB); } for (i=0;i<256;i++) ASP_regs[i] = 0; ASP_regs[5] = 0x01; ASP_regs[9] = 0xf8; DSP_Reset(); CTMIXER_Reset(); // The documentation does not specify if SB gets initialized with the speaker enabled // or disabled. Real SBPro2 has it disabled. sb.speaker=false; // On SB16 the speaker flag does not affect actual speaker state. if (sb.type == SBT_16) sb.chan->Enable(true); else sb.chan->Enable(false); // Create set blaster line ostringstream temp; temp << "SET BLASTER=A" << setw(3)<< hex << sb.hw.base << " I" << dec << (Bitu)sb.hw.irq << " D" << (Bitu)sb.hw.dma8; if (sb.type==SBT_16) temp << " H" << (Bitu)sb.hw.dma16; temp << " T" << static_cast<unsigned int>(sb.type) << ends; autoexecline.Install(temp.str()); /* Soundblaster midi interface */ if (!MIDI_Available()) sb.midi = false; else sb.midi = true; } ~SBLASTER() { switch (oplmode) { case OPL_none: break; case OPL_cms: CMS_ShutDown(m_configuration); break; case OPL_opl2: CMS_ShutDown(m_configuration); // fall-through case OPL_dualopl2: case OPL_opl3: case OPL_opl3gold: OPL_ShutDown(m_configuration); break; } if (sb.type==SBT_NONE || sb.type==SBT_GB) return; DSP_Reset(); // Stop everything } }; //End of SBLASTER class static SBLASTER* test; void SBLASTER_ShutDown(Section* /*sec*/) { delete test; } void SBLASTER_Init(Section* sec) { test = new SBLASTER(sec); sec->AddDestroyFunction(&SBLASTER_ShutDown,true); }
gpl-2.0
antmicro/ecos-openrisc
packages/devs/flash/mips/atlas/current/src/atlas_flash.c
11
4743
//========================================================================== // // atlas_flash.c // // Flash programming // //========================================================================== // ####ECOSGPLCOPYRIGHTBEGIN#### // ------------------------------------------- // This file is part of eCos, the Embedded Configurable Operating System. // Copyright (C) 1998, 1999, 2000, 2001, 2002 Free Software Foundation, Inc. // // eCos is free software; you can redistribute it and/or modify it under // the terms of the GNU General Public License as published by the Free // Software Foundation; either version 2 or (at your option) any later // version. // // eCos is distributed in the hope that it will be useful, but WITHOUT // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License // for more details. // // You should have received a copy of the GNU General Public License // along with eCos; if not, write to the Free Software Foundation, Inc., // 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. // // As a special exception, if other files instantiate templates or use // macros or inline functions from this file, or you compile this file // and link it with other works to produce a work based on this file, // this file does not by itself cause the resulting work to be covered by // the GNU General Public License. However the source code for this file // must still be made available in accordance with section (3) of the GNU // General Public License v2. // // This exception does not invalidate any other reasons why a work based // on this file might be covered by the GNU General Public License. // ------------------------------------------- // ####ECOSGPLCOPYRIGHTEND#### //========================================================================== //#####DESCRIPTIONBEGIN#### // // Author(s): gthomas // Contributors: gthomas, msalter // Date: 2000-12-06 // Purpose: // Description: // //####DESCRIPTIONEND#### // //========================================================================== #include <pkgconf/hal.h> #include <cyg/hal/hal_arch.h> #include <cyg/infra/diag.h> #include <cyg/io/flash.h> #include <cyg/io/flash_dev.h> #include "flash.h" #include <string.h> #define _si(p) ((p[1]<<8)|p[0]) int flash_hwr_init(void) { struct FLASH_query data, *qp; int num_regions, region_size; flash_dev_query(&data); qp = &data; if (/*(qp->manuf_code == FLASH_Intel_code) && */ (strncmp(qp->id, "QRY", 3) == 0)) { num_regions = _si(qp->num_regions)+1; region_size = _si(qp->region_size)*256; flash_info.block_size = region_size*2; // Pairs of chips in parallel flash_info.blocks = num_regions*2; // and pairs of chips in serial flash_info.start = (void *)0x9c000000; flash_info.end = (void *)0x9e000000; return FLASH_ERR_OK; } else { (*flash_info.pf)("Can't identify FLASH, sorry\n"); diag_dump_buf((void*)&data, sizeof(data)); return FLASH_ERR_HWR; } } // Map a hardware status to a package error int flash_hwr_map_error(int err) { if (err & 0x007E007E) { (*flash_info.pf)("Err = %x\n", err); if (err & 0x00100010) { return FLASH_ERR_PROGRAM; } else if (err & 0x00200020) { return FLASH_ERR_ERASE; } else return FLASH_ERR_HWR; // FIXME } else { return FLASH_ERR_OK; } } // See if a range of FLASH addresses overlaps currently running code bool flash_code_overlaps(void *start, void *end) { extern char _stext[], _etext[]; unsigned long p_stext, pstart, p_etext, pend; p_stext = CYGARC_PHYSICAL_ADDRESS((unsigned long)&_stext); p_etext = CYGARC_PHYSICAL_ADDRESS((unsigned long)&_etext); // if _stext/_etext in boot shadow region, convert to // system flash address if ((p_stext >= 0x1fc00000) && (p_etext <= 0x20000000)) { p_stext -= 0x02000000; p_etext -= 0x02000000; } pstart = CYGARC_PHYSICAL_ADDRESS((unsigned long)start); pend = CYGARC_PHYSICAL_ADDRESS((unsigned long)end); return (((p_stext >= pstart) && (p_stext < pend)) || ((p_etext >= pstart) && (p_etext < pend))); }
gpl-2.0
Paolo-Maffei/rt-thread-stm32f4discovery
components/dfs/filesystems/jffs2/src/read.c
11
7161
/* * JFFS2 -- Journalling Flash File System, Version 2. * * Copyright (C) 2001-2003 Red Hat, Inc. * * Created by David Woodhouse <dwmw2@infradead.org> * * For licensing information, see the file 'LICENCE' in this directory. * * $Id: read.c,v 1.41 2005/07/22 10:32:08 dedekind Exp $ * */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/crc32.h> #include <linux/pagemap.h> #include <linux/mtd/mtd.h> #include <linux/compiler.h> #include "nodelist.h" #include "compr.h" int jffs2_read_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_full_dnode *fd, unsigned char *buf, int ofs, int len) { struct jffs2_raw_inode *ri; size_t readlen; uint32_t crc; unsigned char *decomprbuf = NULL; unsigned char *readbuf = NULL; int ret = 0,i=0; ri = jffs2_alloc_raw_inode(); if (!ri) return -ENOMEM; ret = jffs2_flash_read(c, ref_offset(fd->raw), sizeof(*ri), &readlen, (unsigned char *)ri); if (ret) { jffs2_free_raw_inode(ri); printk(KERN_WARNING "Error reading node from 0x%08x: %d\n", ref_offset(fd->raw), ret); return ret; } if (readlen != sizeof(*ri)) { jffs2_free_raw_inode(ri); printk(KERN_WARNING "Short read from 0x%08x: wanted 0x%zx bytes, got 0x%zx\n", ref_offset(fd->raw), sizeof(*ri), readlen); return -EIO; } crc = crc32(0, ri, sizeof(*ri)-8); D1(printk(KERN_DEBUG "Node read from %08x: node_crc %08x, calculated CRC %08x. dsize %x, csize %x, offset %x, buf %p\n", ref_offset(fd->raw), je32_to_cpu(ri->node_crc), crc, je32_to_cpu(ri->dsize), je32_to_cpu(ri->csize), je32_to_cpu(ri->offset), buf)); if (crc != je32_to_cpu(ri->node_crc)) { printk(KERN_WARNING "Node CRC %08x != calculated CRC %08x for node at %08x\n", je32_to_cpu(ri->node_crc), crc, ref_offset(fd->raw)); ret = -EIO; goto out_ri; } /* There was a bug where we wrote hole nodes out with csize/dsize swapped. Deal with it */ if (ri->compr == JFFS2_COMPR_ZERO && !je32_to_cpu(ri->dsize) && je32_to_cpu(ri->csize)) { ri->dsize = ri->csize; ri->csize = cpu_to_je32(0); } D1(if(ofs + len > je32_to_cpu(ri->dsize)) { printk(KERN_WARNING "jffs2_read_dnode() asked for %d bytes at %d from %d-byte node\n", len, ofs, je32_to_cpu(ri->dsize)); ret = -EINVAL; goto out_ri; }); if (ri->compr == JFFS2_COMPR_ZERO) { memset(buf, 0, len); goto out_ri; } /* Cases: Reading whole node and it's uncompressed - read directly to buffer provided, check CRC. Reading whole node and it's compressed - read into comprbuf, check CRC and decompress to buffer provided Reading partial node and it's uncompressed - read into readbuf, check CRC, and copy Reading partial node and it's compressed - read into readbuf, check checksum, decompress to decomprbuf and copy */ if (ri->compr == JFFS2_COMPR_NONE && len == je32_to_cpu(ri->dsize)) { readbuf = buf; } else { readbuf = kmalloc(je32_to_cpu(ri->csize), GFP_KERNEL); if (!readbuf) { ret = -ENOMEM; goto out_ri; } } if (ri->compr != JFFS2_COMPR_NONE) { if (len < je32_to_cpu(ri->dsize)) { decomprbuf = kmalloc(je32_to_cpu(ri->dsize), GFP_KERNEL); if (!decomprbuf) { ret = -ENOMEM; goto out_readbuf; } } else { decomprbuf = buf; } } else { decomprbuf = readbuf; } D2(printk(KERN_DEBUG "Read %d bytes to %p\n", je32_to_cpu(ri->csize), readbuf)); ret = jffs2_flash_read(c, (ref_offset(fd->raw)) + sizeof(*ri), je32_to_cpu(ri->csize), &readlen, readbuf); if (!ret && readlen != je32_to_cpu(ri->csize)) ret = -EIO; if (ret) goto out_decomprbuf; crc = crc32(0, readbuf, je32_to_cpu(ri->csize)); if (crc != je32_to_cpu(ri->data_crc)) { printk(KERN_WARNING "Data CRC %08x != calculated CRC %08x for node at %08x\n", je32_to_cpu(ri->data_crc), crc, ref_offset(fd->raw)); ret = -EIO; goto out_decomprbuf; } D2(printk(KERN_DEBUG "Data CRC matches calculated CRC %08x\n", crc)); if (ri->compr != JFFS2_COMPR_NONE) { D2(printk(KERN_DEBUG "Decompress %d bytes from %p to %d bytes at %p\n", je32_to_cpu(ri->csize), readbuf, je32_to_cpu(ri->dsize), decomprbuf)); //add for debug // for (i=0; i<je32_to_cpu(ri->csize); i++) // { // printf("%02x ", readbuf[i]); // if( (i+1) % 16 == 0) // printf("\n"); // } //end debug ret = jffs2_decompress(c, f, ri->compr | (ri->usercompr << 8), readbuf, decomprbuf, je32_to_cpu(ri->csize), je32_to_cpu(ri->dsize)); if (ret) { printk(KERN_WARNING "Error: jffs2_decompress returned %d\n", ret); goto out_decomprbuf; } } if (len < je32_to_cpu(ri->dsize)) { memcpy(buf, decomprbuf+ofs, len); } out_decomprbuf: if(decomprbuf != buf && decomprbuf != readbuf) kfree(decomprbuf); out_readbuf: if(readbuf != buf) kfree(readbuf); out_ri: jffs2_free_raw_inode(ri); return ret; } int jffs2_read_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f, unsigned char *buf, uint32_t offset, uint32_t len) { uint32_t end = offset + len; struct jffs2_node_frag *frag; int ret; D1(printk(KERN_DEBUG "jffs2_read_inode_range: ino #%u, range 0x%08x-0x%08x\n", f->inocache->ino, offset, offset+len)); frag = jffs2_lookup_node_frag(&f->fragtree, offset); /* XXX FIXME: Where a single physical node actually shows up in two frags, we read it twice. Don't do that. */ /* Now we're pointing at the first frag which overlaps our page */ while(offset < end) { D2(printk(KERN_DEBUG "jffs2_read_inode_range: offset %d, end %d\n", offset, end)); if (unlikely(!frag || frag->ofs > offset)) { uint32_t holesize = end - offset; if (frag) { D1(printk(KERN_NOTICE "Eep. Hole in ino #%u fraglist. frag->ofs = 0x%08x, offset = 0x%08x\n", f->inocache->ino, frag->ofs, offset)); holesize = min(holesize, frag->ofs - offset); } D1(printk(KERN_DEBUG "Filling non-frag hole from %d-%d\n", offset, offset+holesize)); memset(buf, 0, holesize); buf += holesize; offset += holesize; continue; } else if (unlikely(!frag->node)) { uint32_t holeend = min(end, frag->ofs + frag->size); D1(printk(KERN_DEBUG "Filling frag hole from %d-%d (frag 0x%x 0x%x)\n", offset, holeend, frag->ofs, frag->ofs + frag->size)); memset(buf, 0, holeend - offset); buf += holeend - offset; offset = holeend; frag = frag_next(frag); continue; } else { uint32_t readlen; uint32_t fragofs; /* offset within the frag to start reading */ fragofs = offset - frag->ofs; readlen = min(frag->size - fragofs, end - offset); D1(printk(KERN_DEBUG "Reading %d-%d from node at 0x%08x (%d)\n", frag->ofs+fragofs, frag->ofs+fragofs+readlen, ref_offset(frag->node->raw), ref_flags(frag->node->raw))); ret = jffs2_read_dnode(c, f, frag->node, buf, fragofs + frag->ofs - frag->node->ofs, readlen); D2(printk(KERN_DEBUG "node read done\n")); if (ret) { D1(printk(KERN_DEBUG"jffs2_read_inode_range error %d\n",ret)); memset(buf, 0, readlen); return ret; } buf += readlen; offset += readlen; frag = frag_next(frag); D2(printk(KERN_DEBUG "node read was OK. Looping\n")); } } return 0; }
gpl-2.0
juj/emscripten-scummvm
backends/platform/wince/CEException.cpp
11
5086
/* ScummVM - Graphic Adventure Engine * * ScummVM is the legal property of its developers, whose names * are too numerous to list here. Please refer to the COPYRIGHT * file distributed with this source distribution. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * */ #include "CEException.h" void CEException::writeString(HANDLE file, char *data) { DWORD dummy; WriteFile(file, data, strlen(data), &dummy, NULL); WriteFile(file, "\r\n", 2, &dummy, NULL); } void CEException::writeBreak(HANDLE file) { char tempo[100]; int i; memset(tempo, 0, sizeof(tempo)); for (i = 0; i < 40; i++) tempo[i] = '-'; writeString(file, tempo); } void CEException::dumpContext(HANDLE file, HANDLE hProcess, CONTEXT *context) { char tempo[200]; unsigned char memoryDump[100]; DWORD size; unsigned int i; #ifdef ARM writeBreak(file); writeString(file, "Context dump"); sprintf(tempo, "R0=%.8x R1=%.8x R2=%.8x R3=%.8x R4=%.8x", context->R0, context->R1, context->R2, context->R3, context->R4); writeString(file, tempo); sprintf(tempo, "R5=%.8x R6=%.8x R7=%.8x R8=%.8x R9=%.8x", context->R5, context->R6, context->R7, context->R8, context->R9); writeString(file, tempo); sprintf(tempo, "R10=%.8x R11=%.8x R12=%.8x", context->R10, context->R11, context->R12); writeString(file, tempo); sprintf(tempo, "Sp=%.8x Lr=%.8x Pc=%.8x Psr=%.8x", context->Sp, context->Lr, context->Pc, context->Psr); writeString(file, tempo); writeBreak(file); sprintf(tempo, "Memory dump at %.8x", context->Pc - (sizeof(memoryDump) / 2)); writeString(file, tempo); if (ReadProcessMemory(hProcess, (LPCVOID)(context->Pc - (sizeof(memoryDump) / 2)), memoryDump, sizeof(memoryDump), &size)) { for (i = 0; i < size; i += 8) { int j; char digit[4]; int max; max = size - i; if (max > 8) max = 8; tempo[0] = '\0'; for (j = 0; j < max; j++) { sprintf(digit, "%.2x ", memoryDump[i + j]); strcat(tempo, digit); } writeString(file, tempo); } } #else writeBreak(file); writeString(file, "Context dump only available on ARM devices"); #endif } void CEException::dumpException(HANDLE file, EXCEPTION_RECORD *exceptionRecord) { char tempo[200]; char exceptionName[50]; unsigned int i; #if (_WIN32_WCE >= 300) writeBreak(file); switch (exceptionRecord->ExceptionCode) { case EXCEPTION_ACCESS_VIOLATION : strcpy(exceptionName, "Access Violation"); break; case EXCEPTION_ARRAY_BOUNDS_EXCEEDED : strcpy(exceptionName, "Array Bounds Exceeded"); break; case EXCEPTION_DATATYPE_MISALIGNMENT : strcpy(exceptionName, "Datatype Misalignment"); break; case EXCEPTION_IN_PAGE_ERROR : strcpy(exceptionName, "In Page Error"); break; case EXCEPTION_INT_DIVIDE_BY_ZERO : strcpy(exceptionName, "Int Divide By Zero"); break; case EXCEPTION_INT_OVERFLOW : strcpy(exceptionName, "Int Overflow"); break; case EXCEPTION_STACK_OVERFLOW : strcpy(exceptionName, "Stack Overflow"); break; default: sprintf(exceptionName, "%.8x", exceptionRecord->ExceptionCode); break; } sprintf(tempo, "Exception %s Flags %.8x Address %.8x", exceptionName, exceptionRecord->ExceptionFlags, exceptionRecord->ExceptionAddress); writeString(file, tempo); if (exceptionRecord->NumberParameters) { for (i = 0; i < exceptionRecord->NumberParameters; i++) { sprintf(tempo, "Parameter %d %.8x", i, exceptionRecord->ExceptionInformation[i]); writeString(file, tempo); } } if (exceptionRecord->ExceptionRecord) dumpException(file, exceptionRecord->ExceptionRecord); #else writeBreak(file); writeString(file, "Cannot get exception information on this CE version"); #endif } bool CEException::writeException(TCHAR *path, EXCEPTION_POINTERS *exceptionPointers) { HANDLE dumpFile; TCHAR dumpFileName[MAX_PATH]; SYSTEMTIME systemTime; GetSystemTime(&systemTime); wsprintf(dumpFileName, TEXT("%s_%.2d_%.2d_%.4d_%.2d_%.2d_%.2d.txt"), path, systemTime.wDay, systemTime.wMonth, systemTime.wYear, systemTime.wHour, systemTime.wMinute, systemTime.wSecond); dumpFile = CreateFile(dumpFileName, GENERIC_READ | GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL); if (dumpFile == INVALID_HANDLE_VALUE) return false; dumpException(dumpFile, exceptionPointers->ExceptionRecord); dumpContext(dumpFile, GetCurrentProcess(), exceptionPointers->ContextRecord); CloseHandle(dumpFile); return true; }
gpl-2.0
Pingmin/linux
drivers/firmware/qcom_scm.c
11
17561
// SPDX-License-Identifier: GPL-2.0-only /* * Qualcomm SCM driver * * Copyright (c) 2010,2015, The Linux Foundation. All rights reserved. * Copyright (C) 2015 Linaro Ltd. */ #include <linux/platform_device.h> #include <linux/init.h> #include <linux/cpumask.h> #include <linux/export.h> #include <linux/dma-direct.h> #include <linux/dma-mapping.h> #include <linux/module.h> #include <linux/types.h> #include <linux/qcom_scm.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_platform.h> #include <linux/clk.h> #include <linux/reset-controller.h> #include "qcom_scm.h" static bool download_mode = IS_ENABLED(CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT); module_param(download_mode, bool, 0); #define SCM_HAS_CORE_CLK BIT(0) #define SCM_HAS_IFACE_CLK BIT(1) #define SCM_HAS_BUS_CLK BIT(2) struct qcom_scm { struct device *dev; struct clk *core_clk; struct clk *iface_clk; struct clk *bus_clk; struct reset_controller_dev reset; u64 dload_mode_addr; }; struct qcom_scm_current_perm_info { __le32 vmid; __le32 perm; __le64 ctx; __le32 ctx_size; __le32 unused; }; struct qcom_scm_mem_map_info { __le64 mem_addr; __le64 mem_size; }; static struct qcom_scm *__scm; static int qcom_scm_clk_enable(void) { int ret; ret = clk_prepare_enable(__scm->core_clk); if (ret) goto bail; ret = clk_prepare_enable(__scm->iface_clk); if (ret) goto disable_core; ret = clk_prepare_enable(__scm->bus_clk); if (ret) goto disable_iface; return 0; disable_iface: clk_disable_unprepare(__scm->iface_clk); disable_core: clk_disable_unprepare(__scm->core_clk); bail: return ret; } static void qcom_scm_clk_disable(void) { clk_disable_unprepare(__scm->core_clk); clk_disable_unprepare(__scm->iface_clk); clk_disable_unprepare(__scm->bus_clk); } /** * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus * @entry: Entry point function for the cpus * @cpus: The cpumask of cpus that will use the entry point * * Set the cold boot address of the cpus. Any cpu outside the supported * range would be removed from the cpu present mask. */ int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus) { return __qcom_scm_set_cold_boot_addr(entry, cpus); } EXPORT_SYMBOL(qcom_scm_set_cold_boot_addr); /** * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus * @entry: Entry point function for the cpus * @cpus: The cpumask of cpus that will use the entry point * * Set the Linux entry point for the SCM to transfer control to when coming * out of a power down. CPU power down may be executed on cpuidle or hotplug. */ int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus) { return __qcom_scm_set_warm_boot_addr(__scm->dev, entry, cpus); } EXPORT_SYMBOL(qcom_scm_set_warm_boot_addr); /** * qcom_scm_cpu_power_down() - Power down the cpu * @flags - Flags to flush cache * * This is an end point to power down cpu. If there was a pending interrupt, * the control would return from this function, otherwise, the cpu jumps to the * warm boot entry point set for this cpu upon reset. */ void qcom_scm_cpu_power_down(u32 flags) { __qcom_scm_cpu_power_down(flags); } EXPORT_SYMBOL(qcom_scm_cpu_power_down); /** * qcom_scm_hdcp_available() - Check if secure environment supports HDCP. * * Return true if HDCP is supported, false if not. */ bool qcom_scm_hdcp_available(void) { int ret = qcom_scm_clk_enable(); if (ret) return ret; ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP, QCOM_SCM_CMD_HDCP); qcom_scm_clk_disable(); return ret > 0 ? true : false; } EXPORT_SYMBOL(qcom_scm_hdcp_available); /** * qcom_scm_hdcp_req() - Send HDCP request. * @req: HDCP request array * @req_cnt: HDCP request array count * @resp: response buffer passed to SCM * * Write HDCP register(s) through SCM. */ int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp) { int ret = qcom_scm_clk_enable(); if (ret) return ret; ret = __qcom_scm_hdcp_req(__scm->dev, req, req_cnt, resp); qcom_scm_clk_disable(); return ret; } EXPORT_SYMBOL(qcom_scm_hdcp_req); /** * qcom_scm_pas_supported() - Check if the peripheral authentication service is * available for the given peripherial * @peripheral: peripheral id * * Returns true if PAS is supported for this peripheral, otherwise false. */ bool qcom_scm_pas_supported(u32 peripheral) { int ret; ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_IS_SUPPORTED_CMD); if (ret <= 0) return false; return __qcom_scm_pas_supported(__scm->dev, peripheral); } EXPORT_SYMBOL(qcom_scm_pas_supported); /** * qcom_scm_ocmem_lock_available() - is OCMEM lock/unlock interface available */ bool qcom_scm_ocmem_lock_available(void) { return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_OCMEM_SVC, QCOM_SCM_OCMEM_LOCK_CMD); } EXPORT_SYMBOL(qcom_scm_ocmem_lock_available); /** * qcom_scm_ocmem_lock() - call OCMEM lock interface to assign an OCMEM * region to the specified initiator * * @id: tz initiator id * @offset: OCMEM offset * @size: OCMEM size * @mode: access mode (WIDE/NARROW) */ int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, u32 size, u32 mode) { return __qcom_scm_ocmem_lock(__scm->dev, id, offset, size, mode); } EXPORT_SYMBOL(qcom_scm_ocmem_lock); /** * qcom_scm_ocmem_unlock() - call OCMEM unlock interface to release an OCMEM * region from the specified initiator * * @id: tz initiator id * @offset: OCMEM offset * @size: OCMEM size */ int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size) { return __qcom_scm_ocmem_unlock(__scm->dev, id, offset, size); } EXPORT_SYMBOL(qcom_scm_ocmem_unlock); /** * qcom_scm_pas_init_image() - Initialize peripheral authentication service * state machine for a given peripheral, using the * metadata * @peripheral: peripheral id * @metadata: pointer to memory containing ELF header, program header table * and optional blob of data used for authenticating the metadata * and the rest of the firmware * @size: size of the metadata * * Returns 0 on success. */ int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size) { dma_addr_t mdata_phys; void *mdata_buf; int ret; /* * During the scm call memory protection will be enabled for the meta * data blob, so make sure it's physically contiguous, 4K aligned and * non-cachable to avoid XPU violations. */ mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys, GFP_KERNEL); if (!mdata_buf) { dev_err(__scm->dev, "Allocation of metadata buffer failed.\n"); return -ENOMEM; } memcpy(mdata_buf, metadata, size); ret = qcom_scm_clk_enable(); if (ret) goto free_metadata; ret = __qcom_scm_pas_init_image(__scm->dev, peripheral, mdata_phys); qcom_scm_clk_disable(); free_metadata: dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys); return ret; } EXPORT_SYMBOL(qcom_scm_pas_init_image); /** * qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral * for firmware loading * @peripheral: peripheral id * @addr: start address of memory area to prepare * @size: size of the memory area to prepare * * Returns 0 on success. */ int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size) { int ret; ret = qcom_scm_clk_enable(); if (ret) return ret; ret = __qcom_scm_pas_mem_setup(__scm->dev, peripheral, addr, size); qcom_scm_clk_disable(); return ret; } EXPORT_SYMBOL(qcom_scm_pas_mem_setup); /** * qcom_scm_pas_auth_and_reset() - Authenticate the given peripheral firmware * and reset the remote processor * @peripheral: peripheral id * * Return 0 on success. */ int qcom_scm_pas_auth_and_reset(u32 peripheral) { int ret; ret = qcom_scm_clk_enable(); if (ret) return ret; ret = __qcom_scm_pas_auth_and_reset(__scm->dev, peripheral); qcom_scm_clk_disable(); return ret; } EXPORT_SYMBOL(qcom_scm_pas_auth_and_reset); /** * qcom_scm_pas_shutdown() - Shut down the remote processor * @peripheral: peripheral id * * Returns 0 on success. */ int qcom_scm_pas_shutdown(u32 peripheral) { int ret; ret = qcom_scm_clk_enable(); if (ret) return ret; ret = __qcom_scm_pas_shutdown(__scm->dev, peripheral); qcom_scm_clk_disable(); return ret; } EXPORT_SYMBOL(qcom_scm_pas_shutdown); static int qcom_scm_pas_reset_assert(struct reset_controller_dev *rcdev, unsigned long idx) { if (idx != 0) return -EINVAL; return __qcom_scm_pas_mss_reset(__scm->dev, 1); } static int qcom_scm_pas_reset_deassert(struct reset_controller_dev *rcdev, unsigned long idx) { if (idx != 0) return -EINVAL; return __qcom_scm_pas_mss_reset(__scm->dev, 0); } static const struct reset_control_ops qcom_scm_pas_reset_ops = { .assert = qcom_scm_pas_reset_assert, .deassert = qcom_scm_pas_reset_deassert, }; /** * qcom_scm_restore_sec_cfg_available() - Check if secure environment * supports restore security config interface. * * Return true if restore-cfg interface is supported, false if not. */ bool qcom_scm_restore_sec_cfg_available(void) { return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP, QCOM_SCM_RESTORE_SEC_CFG); } EXPORT_SYMBOL(qcom_scm_restore_sec_cfg_available); int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare) { return __qcom_scm_restore_sec_cfg(__scm->dev, device_id, spare); } EXPORT_SYMBOL(qcom_scm_restore_sec_cfg); int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size) { return __qcom_scm_iommu_secure_ptbl_size(__scm->dev, spare, size); } EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_size); int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare) { return __qcom_scm_iommu_secure_ptbl_init(__scm->dev, addr, size, spare); } EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_init); int qcom_scm_qsmmu500_wait_safe_toggle(bool en) { return __qcom_scm_qsmmu500_wait_safe_toggle(__scm->dev, en); } EXPORT_SYMBOL(qcom_scm_qsmmu500_wait_safe_toggle); int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val) { return __qcom_scm_io_readl(__scm->dev, addr, val); } EXPORT_SYMBOL(qcom_scm_io_readl); int qcom_scm_io_writel(phys_addr_t addr, unsigned int val) { return __qcom_scm_io_writel(__scm->dev, addr, val); } EXPORT_SYMBOL(qcom_scm_io_writel); static void qcom_scm_set_download_mode(bool enable) { bool avail; int ret = 0; avail = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_BOOT, QCOM_SCM_SET_DLOAD_MODE); if (avail) { ret = __qcom_scm_set_dload_mode(__scm->dev, enable); } else if (__scm->dload_mode_addr) { ret = __qcom_scm_io_writel(__scm->dev, __scm->dload_mode_addr, enable ? QCOM_SCM_SET_DLOAD_MODE : 0); } else { dev_err(__scm->dev, "No available mechanism for setting download mode\n"); } if (ret) dev_err(__scm->dev, "failed to set download mode: %d\n", ret); } static int qcom_scm_find_dload_address(struct device *dev, u64 *addr) { struct device_node *tcsr; struct device_node *np = dev->of_node; struct resource res; u32 offset; int ret; tcsr = of_parse_phandle(np, "qcom,dload-mode", 0); if (!tcsr) return 0; ret = of_address_to_resource(tcsr, 0, &res); of_node_put(tcsr); if (ret) return ret; ret = of_property_read_u32_index(np, "qcom,dload-mode", 1, &offset); if (ret < 0) return ret; *addr = res.start + offset; return 0; } /** * qcom_scm_is_available() - Checks if SCM is available */ bool qcom_scm_is_available(void) { return !!__scm; } EXPORT_SYMBOL(qcom_scm_is_available); int qcom_scm_set_remote_state(u32 state, u32 id) { return __qcom_scm_set_remote_state(__scm->dev, state, id); } EXPORT_SYMBOL(qcom_scm_set_remote_state); /** * qcom_scm_assign_mem() - Make a secure call to reassign memory ownership * @mem_addr: mem region whose ownership need to be reassigned * @mem_sz: size of the region. * @srcvm: vmid for current set of owners, each set bit in * flag indicate a unique owner * @newvm: array having new owners and corresponding permission * flags * @dest_cnt: number of owners in next set. * * Return negative errno on failure or 0 on success with @srcvm updated. */ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, unsigned int *srcvm, const struct qcom_scm_vmperm *newvm, unsigned int dest_cnt) { struct qcom_scm_current_perm_info *destvm; struct qcom_scm_mem_map_info *mem_to_map; phys_addr_t mem_to_map_phys; phys_addr_t dest_phys; phys_addr_t ptr_phys; dma_addr_t ptr_dma; size_t mem_to_map_sz; size_t dest_sz; size_t src_sz; size_t ptr_sz; int next_vm; __le32 *src; void *ptr; int ret, i, b; unsigned long srcvm_bits = *srcvm; src_sz = hweight_long(srcvm_bits) * sizeof(*src); mem_to_map_sz = sizeof(*mem_to_map); dest_sz = dest_cnt * sizeof(*destvm); ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(dest_sz, SZ_64); ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_dma, GFP_KERNEL); if (!ptr) return -ENOMEM; ptr_phys = dma_to_phys(__scm->dev, ptr_dma); /* Fill source vmid detail */ src = ptr; i = 0; for_each_set_bit(b, &srcvm_bits, BITS_PER_LONG) src[i++] = cpu_to_le32(b); /* Fill details of mem buff to map */ mem_to_map = ptr + ALIGN(src_sz, SZ_64); mem_to_map_phys = ptr_phys + ALIGN(src_sz, SZ_64); mem_to_map->mem_addr = cpu_to_le64(mem_addr); mem_to_map->mem_size = cpu_to_le64(mem_sz); next_vm = 0; /* Fill details of next vmid detail */ destvm = ptr + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64); dest_phys = ptr_phys + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64); for (i = 0; i < dest_cnt; i++, destvm++, newvm++) { destvm->vmid = cpu_to_le32(newvm->vmid); destvm->perm = cpu_to_le32(newvm->perm); destvm->ctx = 0; destvm->ctx_size = 0; next_vm |= BIT(newvm->vmid); } ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz, ptr_phys, src_sz, dest_phys, dest_sz); dma_free_coherent(__scm->dev, ptr_sz, ptr, ptr_dma); if (ret) { dev_err(__scm->dev, "Assign memory protection call failed %d\n", ret); return -EINVAL; } *srcvm = next_vm; return 0; } EXPORT_SYMBOL(qcom_scm_assign_mem); static int qcom_scm_probe(struct platform_device *pdev) { struct qcom_scm *scm; unsigned long clks; int ret; scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL); if (!scm) return -ENOMEM; ret = qcom_scm_find_dload_address(&pdev->dev, &scm->dload_mode_addr); if (ret < 0) return ret; clks = (unsigned long)of_device_get_match_data(&pdev->dev); scm->core_clk = devm_clk_get(&pdev->dev, "core"); if (IS_ERR(scm->core_clk)) { if (PTR_ERR(scm->core_clk) == -EPROBE_DEFER) return PTR_ERR(scm->core_clk); if (clks & SCM_HAS_CORE_CLK) { dev_err(&pdev->dev, "failed to acquire core clk\n"); return PTR_ERR(scm->core_clk); } scm->core_clk = NULL; } scm->iface_clk = devm_clk_get(&pdev->dev, "iface"); if (IS_ERR(scm->iface_clk)) { if (PTR_ERR(scm->iface_clk) == -EPROBE_DEFER) return PTR_ERR(scm->iface_clk); if (clks & SCM_HAS_IFACE_CLK) { dev_err(&pdev->dev, "failed to acquire iface clk\n"); return PTR_ERR(scm->iface_clk); } scm->iface_clk = NULL; } scm->bus_clk = devm_clk_get(&pdev->dev, "bus"); if (IS_ERR(scm->bus_clk)) { if (PTR_ERR(scm->bus_clk) == -EPROBE_DEFER) return PTR_ERR(scm->bus_clk); if (clks & SCM_HAS_BUS_CLK) { dev_err(&pdev->dev, "failed to acquire bus clk\n"); return PTR_ERR(scm->bus_clk); } scm->bus_clk = NULL; } scm->reset.ops = &qcom_scm_pas_reset_ops; scm->reset.nr_resets = 1; scm->reset.of_node = pdev->dev.of_node; ret = devm_reset_controller_register(&pdev->dev, &scm->reset); if (ret) return ret; /* vote for max clk rate for highest performance */ ret = clk_set_rate(scm->core_clk, INT_MAX); if (ret) return ret; __scm = scm; __scm->dev = &pdev->dev; __qcom_scm_init(); /* * If requested enable "download mode", from this point on warmboot * will cause the the boot stages to enter download mode, unless * disabled below by a clean shutdown/reboot. */ if (download_mode) qcom_scm_set_download_mode(true); return 0; } static void qcom_scm_shutdown(struct platform_device *pdev) { /* Clean shutdown, disable download mode to allow normal restart */ if (download_mode) qcom_scm_set_download_mode(false); } static const struct of_device_id qcom_scm_dt_match[] = { { .compatible = "qcom,scm-apq8064", /* FIXME: This should have .data = (void *) SCM_HAS_CORE_CLK */ }, { .compatible = "qcom,scm-apq8084", .data = (void *)(SCM_HAS_CORE_CLK | SCM_HAS_IFACE_CLK | SCM_HAS_BUS_CLK) }, { .compatible = "qcom,scm-ipq4019" }, { .compatible = "qcom,scm-msm8660", .data = (void *) SCM_HAS_CORE_CLK }, { .compatible = "qcom,scm-msm8960", .data = (void *) SCM_HAS_CORE_CLK }, { .compatible = "qcom,scm-msm8916", .data = (void *)(SCM_HAS_CORE_CLK | SCM_HAS_IFACE_CLK | SCM_HAS_BUS_CLK) }, { .compatible = "qcom,scm-msm8974", .data = (void *)(SCM_HAS_CORE_CLK | SCM_HAS_IFACE_CLK | SCM_HAS_BUS_CLK) }, { .compatible = "qcom,scm-msm8996" }, { .compatible = "qcom,scm" }, {} }; static struct platform_driver qcom_scm_driver = { .driver = { .name = "qcom_scm", .of_match_table = qcom_scm_dt_match, }, .probe = qcom_scm_probe, .shutdown = qcom_scm_shutdown, }; static int __init qcom_scm_init(void) { return platform_driver_register(&qcom_scm_driver); } subsys_initcall(qcom_scm_init);
gpl-2.0
Alonso1398/android_kernel_samsung_coriplus
arch/arm/mach-rhea/board-bcm59055.c
11
32772
/***************************************************************************** * Copyright 2001 - 2011 Broadcom Corporation. All rights reserved. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2, available at * http://www.gnu.org/licenses/old-license/gpl-2.0.html (the "GPL"). * * Notwithstanding the above, under no circumstances may you combine this * software in any way with any other Broadcom software provided under a * license other than the GPL, without Broadcom's express prior written * consent. * *****************************************************************************/ #include <linux/version.h> #include <linux/init.h> #include <linux/device.h> #include <linux/platform_device.h> #include <linux/sysdev.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <asm/mach/arch.h> #include <asm/mach-types.h> #include <linux/gpio.h> #include <mach/hardware.h> #include <linux/io.h> #include <linux/uaccess.h> #include <linux/i2c.h> #include <linux/mfd/bcmpmu.h> #ifdef CONFIG_KONA_AVS #include <plat/kona_avs.h> #endif #include "pm_params.h" #define PMU_DEVICE_I2C_ADDR 0x08 #define PMU_DEVICE_I2C_ADDR1 0x0C #define PMU_DEVICE_INT_GPIO 29 #define PMU_DEVICE_I2C_BUSNO 2 static struct bcmpmu_rw_data register_init_data[] = { {.map = 0, .addr = 0x0c, .val = 0x1b, .mask = 0xFF}, {.map = 0, .addr = 0x2e, .val = 0x00, .mask = 0xF3}, {.map = 0, .addr = 0x40, .val = 0xFF, .mask = 0xFF}, {.map = 0, .addr = 0x41, .val = 0xFF, .mask = 0xFF}, {.map = 0, .addr = 0x42, .val = 0xFF, .mask = 0xFF}, {.map = 0, .addr = 0x43, .val = 0xFF, .mask = 0xFF}, {.map = 0, .addr = 0x44, .val = 0xFF, .mask = 0xFF}, {.map = 0, .addr = 0x45, .val = 0xFF, .mask = 0xFF}, {.map = 0, .addr = 0x46, .val = 0xFF, .mask = 0xFF}, {.map = 0, .addr = 0x47, .val = 0xFF, .mask = 0xFF}, {.map = 0, .addr = 0x48, .val = 0xFF, .mask = 0xFF}, {.map = 0, .addr = 0x49, .val = 0xFF, .mask = 0xFF}, {.map = 0, .addr = 0x4a, .val = 0xFF, .mask = 0xFF}, {.map = 0, .addr = 0x4b, .val = 0xFF, .mask = 0xFF}, {.map = 0, .addr = 0x4c, .val = 0xFF, .mask = 0xFF}, {.map = 0, .addr = 0x4d, .val = 0xFF, .mask = 0xFF}, {.map = 0, .addr = 0x50, .val = 0x6B, .mask = 0xFF}, {.map = 0, .addr = 0x51, .val = 0x03, .mask = 0xFF}, {.map = 0, .addr = 0x52, .val = 0x08, .mask = 0xFF}, {.map = 0, .addr = 0x53, .val = 0x00, .mask = 0xFF}, {.map = 0, .addr = 0x54, .val = 0x03, .mask = 0xFF}, {.map = 0, .addr = 0x55, .val = 0x08, .mask = 0xFF}, {.map = 0, .addr = 0x56, .val = 0x06, .mask = 0xFF}, {.map = 0, .addr = 0x57, .val = 0x07, .mask = 0xFF}, {.map = 0, .addr = 0x58, .val = 0x01, .mask = 0xFF}, {.map = 0, .addr = 0x59, .val = 0x00, .mask = 0xFF}, {.map = 0, .addr = 0x5a, .val = 0x07, .mask = 0xFF}, {.map = 0, .addr = 0x69, .val = 0x10, .mask = 0xFF}, /* * OTG registers */ {.map = 0, .addr = 0x71, .val = 0x09, .mask = 0xFF}, {.map = 0, .addr = 0x77, .val = 0xD4, .mask = 0xFF}, {.map = 0, .addr = 0x78, .val = 0x98, .mask = 0xFF}, {.map = 0, .addr = 0x79, .val = 0xF0, .mask = 0xFF}, {.map = 0, .addr = 0x7A, .val = 0x60, .mask = 0xFF}, {.map = 0, .addr = 0x7B, .val = 0xC3, .mask = 0xFF}, {.map = 0, .addr = 0x7C, .val = 0xA7, .mask = 0xFF}, {.map = 0, .addr = 0x7D, .val = 0x08, .mask = 0xFF}, /*Init SDSR NM, NM2 and LPM voltages to 1.2V */ {.map = 0, .addr = 0xD0, .val = 0x13, .mask = 0xFF}, {.map = 0, .addr = 0xD1, .val = 0x13, .mask = 0xFF}, {.map = 0, .addr = 0xD2, .val = 0x13, .mask = 0xFF}, /*Init CSR LPM to 0.88 V CSR NM2 to 1.22V */ {.map = 0, .addr = 0xC1, .val = 0x03, .mask = 0xFF}, {.map = 0, .addr = 0xC2, .val = 0x14, .mask = 0xFF}, /*Set IOSR LMP voltage to 1.8V*/ {.map = 0, .addr = 0xC9, .val = 0x1B, .mask = 0xFF}, /*PLLCTRL, Clear Bit 0 to disable PLL when PC2:PC1 = 0b00*/ {.map = 0, .addr = 0x0A, .val = 0x0E, .mask = 0x0F}, /*CMPCTRL13, Set bits 4, 1 for BSI Sync. Mode */ {.map = 0, .addr = 0x1C, .val = 0x13, .mask = 0xFF}, /*CMPCTRL12, Set bits 4, 1 for NTC Sync. Mode*/ {.map = 0, .addr = 0x1B, .val = 0x13, .mask = 0xFF}, /*Init IOSR NM2 and LPM voltages to 1.8V */ {.map = 0, .addr = 0xC9, .val = 0x1B, .mask = 0xFF}, {.map = 0, .addr = 0xCA, .val = 0x1B, .mask = 0xFF}, }; static struct bcmpmu_temp_map batt_temp_map[] = { /* * This table is hardware dependent and need to get from platform team */ /* * adc temp */ {932, 233}, /* -40 C */ {900, 238}, /* -35 C */ {860, 243}, /* -30 C */ {816, 248}, /* -25 C */ {760, 253}, /* -20 C */ {704, 258}, /* -15 C */ {636, 263}, /* -10 C */ {568, 268}, /* -5 C */ {500, 273}, /* 0 C */ {440, 278}, /* 5 C */ {376, 283}, /* 10 C */ {324, 288}, /* 15 C */ {272, 293}, /* 20 C */ {228, 298}, /* 25 C */ {192, 303}, /* 30 C */ {160, 308}, /* 35 C */ {132, 313}, /* 40 C */ {112, 318}, /* 45 C */ {92, 323}, /* 50 C */ {76, 328}, /* 55 C */ {64, 333}, /* 60 C */ {52, 338}, /* 65 C */ {44, 343}, /* 70 C */ {36, 348}, /* 75 C */ {32, 353}, /* 80 C */ {28, 358}, /* 85 C */ {24, 363}, /* 90 C */ {20, 368}, /* 95 C */ {16, 373}, /* 100 C */ }; static struct bcmpmu_temp_map batt_temp_volt_map[] = { /* * This table is hardware dependent and need to get from platform team */ /* * adc temp */ {1091, 233}, /* -40 C */ {1056, 238}, /* -35 C */ {1011, 243}, /* -30 C */ {956, 248}, /* -25 C */ {893, 253}, /* -20 C */ {823, 258}, /* -15 C */ {748, 263}, /* -10 C */ {669, 268}, /* -5 C */ {591, 273}, /* 0 C */ {515, 278}, /* 5 C */ {443, 283}, /* 10 C */ {378, 288}, /* 15 C */ {320, 293}, /* 20 C */ {270, 298}, /* 25 C */ {226, 303}, /* 30 C */ {189, 308}, /* 35 C */ {158, 313}, /* 40 C */ {132, 318}, /* 45 C */ {111, 323}, /* 50 C */ {93, 328}, /* 55 C */ {78, 333}, /* 60 C */ {65, 338}, /* 65 C */ {55, 343}, /* 70 C */ {47, 348}, /* 75 C */ {40, 353}, /* 80 C */ {34, 358}, /* 85 C */ {29, 363}, /* 90 C */ {25, 368}, /* 95 C */ {21, 373}, /* 100 C */ {18, 378}, /* 105 C */ {16, 383}, /* 110 C */ {14, 388}, /* 115 C */ }; __weak struct regulator_consumer_supply rf_supply[] = { {.supply = "rfldo_uc"}, }; static struct regulator_init_data bcm59055_rfldo_data = { .constraints = { .name = "rfldo", .min_uV = 1300000, .max_uV = 3300000, .always_on = 1, }, .num_consumer_supplies = ARRAY_SIZE(rf_supply), .consumer_supplies = rf_supply, }; __weak struct regulator_consumer_supply cam_supply[] = { {.supply = "camldo_uc"}, }; static struct regulator_init_data bcm59055_camldo_data = { .constraints = { .name = "camldo", .min_uV = 1300000, .max_uV = 3300000, .valid_ops_mask = REGULATOR_CHANGE_STATUS | REGULATOR_CHANGE_VOLTAGE, .always_on = 1, }, .num_consumer_supplies = ARRAY_SIZE(cam_supply), .consumer_supplies = cam_supply, }; __weak struct regulator_consumer_supply hv1_supply[] = { {.supply = "hv1ldo_uc"}, {.supply = "2v9_aud"}, }; static struct regulator_init_data bcm59055_hv1ldo_data = { .constraints = { .name = "hv1ldo", .min_uV = 1300000, .max_uV = 3300000, .valid_ops_mask = REGULATOR_CHANGE_STATUS | REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_VOLTAGE, .always_on = 1, }, .num_consumer_supplies = ARRAY_SIZE(hv1_supply), .consumer_supplies = hv1_supply, }; __weak struct regulator_consumer_supply hv2_supply[] = { {.supply = "hv2ldo_uc"}, }; static struct regulator_init_data bcm59055_hv2ldo_data = { .constraints = { .name = "hv2ldo", .min_uV = 1300000, .max_uV = 3300000, .valid_ops_mask = REGULATOR_CHANGE_STATUS | REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_VOLTAGE, .always_on = 1, }, .num_consumer_supplies = ARRAY_SIZE(hv2_supply), .consumer_supplies = hv2_supply, }; __weak struct regulator_consumer_supply hv3_supply[] = { {.supply = "hv3ldo_uc"}, {.supply = "2v9_vibra"}, }; static struct regulator_init_data bcm59055_hv3ldo_data = { .constraints = { .name = "hv3ldo", .min_uV = 1300000, .max_uV = 3300000, .valid_ops_mask = REGULATOR_CHANGE_STATUS | REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_VOLTAGE, .always_on = 0, }, .num_consumer_supplies = ARRAY_SIZE(hv3_supply), .consumer_supplies = hv3_supply, }; __weak struct regulator_consumer_supply hv4_supply[] = { {.supply = "hv4ldo_uc"}, {.supply = "vdd_sdio"}, }; static struct regulator_init_data bcm59055_hv4ldo_data = { .constraints = { .name = "hv4ldo", .min_uV = 1300000, .max_uV = 3300000, .valid_ops_mask = REGULATOR_CHANGE_STATUS | REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_VOLTAGE, .always_on = 0, }, .num_consumer_supplies = ARRAY_SIZE(hv4_supply), .consumer_supplies = hv4_supply, }; __weak struct regulator_consumer_supply hv5_supply[] = { {.supply = "hv5ldo_uc"}, }; static struct regulator_init_data bcm59055_hv5ldo_data = { .constraints = { .name = "hv5ldo", .min_uV = 1300000, .max_uV = 3300000, .valid_ops_mask = REGULATOR_CHANGE_STATUS | REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_VOLTAGE, .always_on = 1, }, .num_consumer_supplies = ARRAY_SIZE(hv5_supply), .consumer_supplies = hv5_supply, }; __weak struct regulator_consumer_supply hv6_supply[] = { {.supply = "hv6ldo_uc"}, {.supply = "vdd_sdxc"}, }; static struct regulator_init_data bcm59055_hv6ldo_data = { .constraints = { .name = "hv6ldo", .min_uV = 1300000, .max_uV = 3300000, .valid_ops_mask = REGULATOR_CHANGE_STATUS | REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_VOLTAGE, .always_on = 0, }, .num_consumer_supplies = ARRAY_SIZE(hv6_supply), .consumer_supplies = hv6_supply, }; __weak struct regulator_consumer_supply hv7_supply[] = { {.supply = "hv7"}, }; static struct regulator_init_data bcm59055_hv7ldo_data = { .constraints = { .name = "hv7ldo", .min_uV = 1300000, .max_uV = 3300000, .valid_ops_mask = REGULATOR_CHANGE_STATUS | REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_VOLTAGE, .always_on = 0, .valid_modes_mask = REGULATOR_MODE_NORMAL | REGULATOR_MODE_STANDBY | REGULATOR_MODE_IDLE}, .num_consumer_supplies = ARRAY_SIZE(hv7_supply), .consumer_supplies = hv7_supply, }; __weak struct regulator_consumer_supply sim_supply[] = { {.supply = "simldo_uc"}, {.supply = "sim_vcc"}, }; static struct regulator_init_data bcm59055_simldo_data = { .constraints = { .name = "simldo", .min_uV = 1300000, .max_uV = 3300000, .valid_ops_mask = REGULATOR_CHANGE_STATUS | REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_VOLTAGE, /*TODO: We observed that, on Rhearay HW, interrupt from GPIO expander is not detected by baseband if SIMLDO is disabled. As a temp. workaround we keep SIMLDO ON by default for Rhearay till the issue is root casued*/ #ifdef CONFIG_MACH_RHEA_RAY_EDN2X .always_on = 1, #endif }, .num_consumer_supplies = ARRAY_SIZE(sim_supply), .consumer_supplies = sim_supply, }; __weak struct regulator_consumer_supply csr_nm_supply[] = { {.supply = "csr_nm_uc"}, }; static struct regulator_init_data bcm59055_csr_nm_data = { .constraints = { .name = "csr_nm", .min_uV = 700000, .max_uV = 1800000, .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE, .always_on = 1, }, .num_consumer_supplies = ARRAY_SIZE(csr_nm_supply), .consumer_supplies = csr_nm_supply, }; __weak struct regulator_consumer_supply csr_nm2_supply[] = { {.supply = "csr_nm2_uc"}, }; static struct regulator_init_data bcm59055_csr_nm2_data = { .constraints = { .name = "csr_nm2", .min_uV = 700000, .max_uV = 1800000, .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE, .always_on = 1, }, .num_consumer_supplies = ARRAY_SIZE(csr_nm2_supply), .consumer_supplies = csr_nm2_supply, }; __weak struct regulator_consumer_supply csr_lpm_supply[] = { {.supply = "csr_lpm_uc"}, }; static struct regulator_init_data bcm59055_csr_lpm_data = { .constraints = { .name = "csr_lpm", .min_uV = 700000, .max_uV = 1800000, .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE, .always_on = 1, }, .num_consumer_supplies = ARRAY_SIZE(csr_lpm_supply), .consumer_supplies = csr_lpm_supply, }; __weak struct regulator_consumer_supply iosr_nm_supply[] = { {.supply = "iosr_nm_uc"}, }; static struct regulator_init_data bcm59055_iosr_nm_data = { .constraints = { .name = "iosr_nm", .min_uV = 700000, .max_uV = 1800000, .valid_ops_mask = REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_VOLTAGE, .always_on = 1, }, .num_consumer_supplies = ARRAY_SIZE(iosr_nm_supply), .consumer_supplies = iosr_nm_supply, }; __weak struct regulator_consumer_supply iosr_nm2_supply[] = { {.supply = "iosr_nm2_uc"}, }; static struct regulator_init_data bcm59055_iosr_nm2_data = { .constraints = { .name = "iosr_nm2", .min_uV = 700000, .max_uV = 1800000, .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE, .always_on = 1, }, .num_consumer_supplies = ARRAY_SIZE(iosr_nm2_supply), .consumer_supplies = iosr_nm2_supply, }; __weak struct regulator_consumer_supply iosr_lpm_supply[] = { {.supply = "iosr_lmp_uc"}, }; static struct regulator_init_data bcm59055_iosr_lpm_data = { .constraints = { .name = "iosr_lmp", .min_uV = 700000, .max_uV = 1800000, .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE, .always_on = 1, }, .num_consumer_supplies = ARRAY_SIZE(iosr_lpm_supply), .consumer_supplies = iosr_lpm_supply, }; __weak struct regulator_consumer_supply sdsr_nm_supply[] = { {.supply = "sdsr_nm_uc"}, }; static struct regulator_init_data bcm59055_sdsr_nm_data = { .constraints = { .name = "sdsr_nm", .min_uV = 700000, .max_uV = 1800000, .valid_ops_mask = REGULATOR_CHANGE_MODE , .always_on = 1, }, .num_consumer_supplies = ARRAY_SIZE(sdsr_nm_supply), .consumer_supplies = sdsr_nm_supply, }; __weak struct regulator_consumer_supply sdsr_nm2_supply[] = { {.supply = "sdsr_nm2_uc"}, }; static struct regulator_init_data bcm59055_sdsr_nm2_data = { .constraints = { .name = "sdsr_nm2", .min_uV = 700000, .max_uV = 1800000, .valid_ops_mask = REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_VOLTAGE, .always_on = 1, }, .num_consumer_supplies = ARRAY_SIZE(sdsr_nm2_supply), .consumer_supplies = sdsr_nm2_supply, }; __weak struct regulator_consumer_supply sdsr_lpm_supply[] = { {.supply = "sdsr_lpm_uc"}, }; static struct regulator_init_data bcm59055_sdsr_lpm_data = { .constraints = { .name = "sdsr_lpm", .min_uV = 700000, .max_uV = 1800000, .valid_ops_mask = REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_VOLTAGE, .always_on = 1, }, .num_consumer_supplies = ARRAY_SIZE(sdsr_lpm_supply), .consumer_supplies = sdsr_lpm_supply, }; struct bcmpmu_regulator_init_data bcm59055_regulators[BCMPMU_REGULATOR_MAX] = { [BCMPMU_REGULATOR_RFLDO] = { BCMPMU_REGULATOR_RFLDO, &bcm59055_rfldo_data, 0x01, 0 }, [BCMPMU_REGULATOR_CAMLDO] = { BCMPMU_REGULATOR_CAMLDO, &bcm59055_camldo_data, 0x11, 0 }, [BCMPMU_REGULATOR_HV1LDO] = { BCMPMU_REGULATOR_HV1LDO, &bcm59055_hv1ldo_data, 0x11, 0 }, [BCMPMU_REGULATOR_HV2LDO] = { BCMPMU_REGULATOR_HV2LDO, &bcm59055_hv2ldo_data, 0x11, 0 }, [BCMPMU_REGULATOR_HV3LDO] = { BCMPMU_REGULATOR_HV3LDO, &bcm59055_hv3ldo_data, 0xAA, 0 }, [BCMPMU_REGULATOR_HV4LDO] = { BCMPMU_REGULATOR_HV4LDO, &bcm59055_hv4ldo_data, 0xAA, BCMPMU_REGL_LPM_IN_DSM }, [BCMPMU_REGULATOR_HV5LDO] = { BCMPMU_REGULATOR_HV5LDO, &bcm59055_hv5ldo_data, 0x11, 0 }, [BCMPMU_REGULATOR_HV6LDO] = { BCMPMU_REGULATOR_HV6LDO, &bcm59055_hv6ldo_data, 0xAA, BCMPMU_REGL_LPM_IN_DSM }, [BCMPMU_REGULATOR_HV7LDO] = { BCMPMU_REGULATOR_HV7LDO, &bcm59055_hv7ldo_data, 0xAA, 0 }, /*TODO: We observed that, on Rhearay HW, interrupt from GPIO expander is not detected by baseband if SIMLDO is disabled. As a temp. workaround we keep SIMLDO ON by default for Rhearay till the issue is root casued*/ #ifdef CONFIG_MACH_RHEA_RAY_EDN2X [BCMPMU_REGULATOR_SIMLDO] = { BCMPMU_REGULATOR_SIMLDO, &bcm59055_simldo_data, 0x00, BCMPMU_REGL_LPM_IN_DSM }, #else [BCMPMU_REGULATOR_SIMLDO] = { BCMPMU_REGULATOR_SIMLDO, &bcm59055_simldo_data, 0xAA, BCMPMU_REGL_LPM_IN_DSM }, #endif [BCMPMU_REGULATOR_CSR_NM] = { BCMPMU_REGULATOR_CSR_NM, &bcm59055_csr_nm_data, 0x11, 0 }, [BCMPMU_REGULATOR_CSR_NM2] = { BCMPMU_REGULATOR_CSR_NM2, &bcm59055_csr_nm2_data, 0xFF, 0 }, [BCMPMU_REGULATOR_CSR_LPM] = { BCMPMU_REGULATOR_CSR_LPM, &bcm59055_csr_lpm_data, 0xFF, 0 }, [BCMPMU_REGULATOR_IOSR_NM] = { BCMPMU_REGULATOR_IOSR_NM, &bcm59055_iosr_nm_data, 0x01, 0 }, [BCMPMU_REGULATOR_IOSR_NM2] = { BCMPMU_REGULATOR_IOSR_NM2, &bcm59055_iosr_nm2_data, 0xFF, 0 }, [BCMPMU_REGULATOR_IOSR_LPM] = { BCMPMU_REGULATOR_IOSR_LPM, &bcm59055_iosr_lpm_data, 0xFF, 0 }, [BCMPMU_REGULATOR_SDSR_NM] = { BCMPMU_REGULATOR_SDSR_NM, &bcm59055_sdsr_nm_data, 0x11, 0 }, [BCMPMU_REGULATOR_SDSR_NM2] = { BCMPMU_REGULATOR_SDSR_NM2, &bcm59055_sdsr_nm2_data, 0xFF, 0 }, [BCMPMU_REGULATOR_SDSR_LPM] = { BCMPMU_REGULATOR_SDSR_LPM, &bcm59055_sdsr_lpm_data, 0xFF, 0 }, }; static struct bcmpmu_wd_setting bcm59055_wd_setting = { .flags = WATCHDOG_OTP_ENABLED, .watchdog_timeout = 32, }; static struct platform_device bcmpmu_audio_device = { .name = "bcmpmu_audio", .id = -1, .dev.platform_data = NULL, }; static struct platform_device bcmpmu_em_device = { .name = "bcmpmu_em", .id = -1, .dev.platform_data = NULL, }; #ifdef CONFIG_BCMPMU_CSAPI_ADC static struct platform_device bcmpmu_adc_chipset_api = { .name = "bcmpmu_adc_chipset_api", .id = -1, .dev.platform_data = NULL, }; #endif static struct platform_device bcmpmu_otg_xceiv_device = { .name = "bcmpmu_otg_xceiv", .id = -1, .dev.platform_data = NULL, }; #ifdef CONFIG_BCMPMU_SELFTEST static struct platform_device bcmpmu_selftest_device = { .name = "bcmpmu_selftest", .id = -1, .dev.platform_data = NULL, }; #endif #ifdef CONFIG_BCMPMU_RPC static struct platform_device bcmpmu_rpc = { .name = "bcmpmu_rpc", .id = -1, .dev.platform_data = NULL, }; #endif static struct platform_device *bcmpmu_client_devices[] = { &bcmpmu_audio_device, &bcmpmu_em_device, #ifdef CONFIG_BCMPMU_CSAPI_ADC &bcmpmu_adc_chipset_api, #endif &bcmpmu_otg_xceiv_device, #ifdef CONFIG_BCMPMU_SELFTEST &bcmpmu_selftest_device, #endif #ifdef CONFIG_BCMPMU_RPC &bcmpmu_rpc, #endif }; static int bcmpmu_init_platform_hw(struct bcmpmu *bcmpmu) { int i; printk(KERN_INFO "%s: called.\n", __func__); for (i = 0; i < ARRAY_SIZE(bcmpmu_client_devices); i++) bcmpmu_client_devices[i]->dev.platform_data = bcmpmu; platform_add_devices(bcmpmu_client_devices, ARRAY_SIZE(bcmpmu_client_devices)); #ifdef CONFIG_59055_SIM_EM_SHDWN /* In 59055 PMU STAT1 and SIMOFFb pin are muxed. * need to make it for SIMOFFb so that SIM emergency * shutdown can happen. This feature is not present * in 59039 or 59042 */ bcmpmu->write_dev(bcmpmu, PMU_REG_STATMUX, bcmpmu->regmap[PMU_REG_STATMUX].mask, bcmpmu->regmap[PMU_REG_STATMUX].mask); bcmpmu->write_dev(bcmpmu, PMU_REG_SIMOFF_EN, bcmpmu->regmap[PMU_REG_SIMOFF_EN].mask, bcmpmu->regmap[PMU_REG_SIMOFF_EN].mask); #endif return 0; } static int bcmpmu_exit_platform_hw(struct bcmpmu *bcmpmu) { printk(KERN_INFO"REG: pmu_init_platform_hw called\n"); return 0; } static struct i2c_board_info pmu_info_map1 = { I2C_BOARD_INFO("bcmpmu_map1", PMU_DEVICE_I2C_ADDR1), }; static struct bcmpmu_adc_setting adc_setting = { .tx_rx_sel_addr = 0, .tx_delay = 2, .rx_delay = 2, .sw_timeout = 100, /* revisit */ .txrx_timeout = 2000, /* revisit */ .compensation_samples = 8, /* from experiments */ .compensation_volt_lo = 72, /* 6% channel (of 1200 mV) */ .compensation_volt_hi = 1128, /* 94% channel (of 1200 mV) */ .compensation_interval = 900, }; static struct bcmpmu_charge_zone chrg_zone[] = { {.tl = 253, .th = 333, .v = 3000, .fc = 10, .qc = 100}, /* Zone QC */ {.tl = 253, .th = 272, .v = 3900, .fc = 50, .qc = 0}, /* Zone LL */ {.tl = 273, .th = 282, .v = 4100, .fc = 50, .qc = 0}, /* Zone L */ {.tl = 283, .th = 318, .v = 4200, .fc = 100, .qc = 0}, /* Zone N */ {.tl = 319, .th = 323, .v = 4100, .fc = 50, .qc = 0}, /* Zone H */ {.tl = 324, .th = 333, .v = 4100, .fc = 50, .qc = 0}, /* Zone HH */ {.tl = 253, .th = 333, .v = 0, .fc = 0, .qc = 0}, /* Zone OUT */ }; /* * Initialization: batt_temp, pa_temp and x32_temp could use different NTCs, * but that is not the case so far */ static struct bcmpmu_platform_data bcmpmu_plat_data = { .init = bcmpmu_init_platform_hw, .exit = bcmpmu_exit_platform_hw, .i2c_board_info_map1 = &pmu_info_map1, .i2c_adapter_id = PMU_DEVICE_I2C_BUSNO, .i2c_pagesize = 256, .init_data = &register_init_data[0], .init_max = ARRAY_SIZE(register_init_data), .batt_temp_voltmap = &batt_temp_volt_map[0], .batt_temp_voltmap_len = ARRAY_SIZE(batt_temp_volt_map), .pa_temp_voltmap = &batt_temp_volt_map[0], .pa_temp_voltmap_len = ARRAY_SIZE(batt_temp_volt_map), .x32_temp_voltmap = &batt_temp_volt_map[0], .x32_temp_voltmap_len = ARRAY_SIZE(batt_temp_volt_map), .batt_temp_map = &batt_temp_map[0], .batt_temp_map_len = ARRAY_SIZE(batt_temp_map), .adc_setting = &adc_setting, .num_of_regl = ARRAY_SIZE(bcm59055_regulators), .regulator_init_data = bcm59055_regulators, .fg_smpl_rate = 2083, .fg_slp_rate = 32000, .fg_slp_curr_ua = 1000, .fg_factor = 976, /* 59055 specific */ .chrg_1c_rate = 1000, .chrg_eoc = 65, .chrg_zone_map = &chrg_zone[0], .fg_capacity_full = 1500 * 3600, .support_fg = 1, .support_chrg_maint = 1, .bc = BCMPMU_BC_BB_BC12, .wd_setting = &bcm59055_wd_setting, .chrg_resume_lvl = 4000, .fg_fbat_lvl = 4150, .batt_model = "Unknown", .cutoff_volt = 3200, .cutoff_count_max = 3, }; static struct i2c_board_info __initdata pmu_info[] = { { I2C_BOARD_INFO("bcmpmu", PMU_DEVICE_I2C_ADDR), .platform_data = &bcmpmu_plat_data, }, }; /*700 Mhz CSR voltage definitions....*/ #define CSR_REG_VAL_RETN_SS_700M 0x4 #define CSR_REG_VAL_RETN_TT_700M 0x4 #define CSR_REG_VAL_RETN_FF_700M 0x4 #define CSR_REG_VAL_ECO_SS_700M 0x7 #define CSR_REG_VAL_ECO_TT_700M 0x7 #define CSR_REG_VAL_ECO_FF_700M 0x7 #define CSR_REG_VAL_NRML_SS_700M 0xF #define CSR_REG_VAL_NRML_TT_700M 0xC #define CSR_REG_VAL_NRML_FF_700M 0xA #define CSR_REG_VAL_TURBO_SS_700M 0x14 #define CSR_REG_VAL_TURBO_TT_700M 0x11 #define CSR_REG_VAL_TURBO_FF_700M 0xE #define PMU_SCR_VLT_TBL_SS_700M ARRAY_LIST(\ CSR_REG_VAL_RETN_SS_700M,\ CSR_REG_VAL_RETN_SS_700M,\ CSR_REG_VAL_RETN_SS_700M,\ CSR_REG_VAL_RETN_SS_700M,\ CSR_REG_VAL_RETN_SS_700M,\ CSR_REG_VAL_RETN_SS_700M,\ CSR_REG_VAL_RETN_SS_700M,\ CSR_REG_VAL_RETN_SS_700M,\ CSR_REG_VAL_ECO_SS_700M,\ CSR_REG_VAL_ECO_SS_700M,\ CSR_REG_VAL_ECO_SS_700M,\ CSR_REG_VAL_NRML_SS_700M,\ CSR_REG_VAL_NRML_SS_700M,\ CSR_REG_VAL_NRML_SS_700M,\ CSR_REG_VAL_TURBO_SS_700M,\ CSR_REG_VAL_TURBO_SS_700M) #define PMU_SCR_VLT_TBL_TT_700M ARRAY_LIST(\ CSR_REG_VAL_RETN_TT_700M,\ CSR_REG_VAL_RETN_TT_700M,\ CSR_REG_VAL_RETN_TT_700M,\ CSR_REG_VAL_RETN_TT_700M,\ CSR_REG_VAL_RETN_TT_700M,\ CSR_REG_VAL_RETN_TT_700M,\ CSR_REG_VAL_RETN_TT_700M,\ CSR_REG_VAL_RETN_TT_700M,\ CSR_REG_VAL_ECO_TT_700M,\ CSR_REG_VAL_ECO_TT_700M,\ CSR_REG_VAL_ECO_TT_700M,\ CSR_REG_VAL_NRML_TT_700M,\ CSR_REG_VAL_NRML_TT_700M,\ CSR_REG_VAL_NRML_TT_700M,\ CSR_REG_VAL_TURBO_TT_700M,\ CSR_REG_VAL_TURBO_TT_700M) #define PMU_SCR_VLT_TBL_FF_700M ARRAY_LIST(\ CSR_REG_VAL_RETN_FF_700M,\ CSR_REG_VAL_RETN_FF_700M,\ CSR_REG_VAL_RETN_FF_700M,\ CSR_REG_VAL_RETN_FF_700M,\ CSR_REG_VAL_RETN_FF_700M,\ CSR_REG_VAL_RETN_FF_700M,\ CSR_REG_VAL_RETN_FF_700M,\ CSR_REG_VAL_RETN_FF_700M,\ CSR_REG_VAL_ECO_FF_700M,\ CSR_REG_VAL_ECO_FF_700M,\ CSR_REG_VAL_ECO_FF_700M,\ CSR_REG_VAL_NRML_FF_700M,\ CSR_REG_VAL_NRML_FF_700M,\ CSR_REG_VAL_NRML_FF_700M,\ CSR_REG_VAL_TURBO_FF_700M,\ CSR_REG_VAL_TURBO_FF_700M) /*800 Mhz CSR voltage definitions....*/ #define CSR_REG_VAL_RETN_SS_800M 0x3 /*0.88V*/ #define CSR_REG_VAL_RETN_TT_800M 0x3 /*0.88V*/ #define CSR_REG_VAL_RETN_FF_800M 0x3 /*0.88V*/ #define CSR_REG_VAL_ECO_SS_800M 0xd /*1.08V*/ #define CSR_REG_VAL_ECO_TT_800M 0x8 /*0.98V*/ #define CSR_REG_VAL_ECO_FF_800M 0x8 /*0.98V*/ #define CSR_REG_VAL_NRML_SS_800M 0x11 /*1.16V*/ #define CSR_REG_VAL_NRML_TT_800M 0x0b /*1.04V*/ #define CSR_REG_VAL_NRML_FF_800M 0x8 /*0.98V*/ #define CSR_REG_VAL_TURBO_SS_800M 0x1A /*1.34V*/ #define CSR_REG_VAL_TURBO_TT_800M 0x14 /*1.22V*/ #define CSR_REG_VAL_TURBO_FF_800M 0x0F /*1.12V*/ #define PMU_SCR_VLT_TBL_SS_800M ARRAY_LIST(\ CSR_REG_VAL_RETN_SS_800M,\ CSR_REG_VAL_RETN_SS_800M,\ CSR_REG_VAL_RETN_SS_800M,\ CSR_REG_VAL_RETN_SS_800M,\ CSR_REG_VAL_RETN_SS_800M,\ CSR_REG_VAL_RETN_SS_800M,\ CSR_REG_VAL_RETN_SS_800M,\ CSR_REG_VAL_RETN_SS_800M,\ CSR_REG_VAL_ECO_SS_800M,\ CSR_REG_VAL_ECO_SS_800M,\ CSR_REG_VAL_ECO_SS_800M,\ CSR_REG_VAL_NRML_SS_800M,\ CSR_REG_VAL_NRML_SS_800M,\ CSR_REG_VAL_NRML_SS_800M,\ CSR_REG_VAL_TURBO_SS_800M,\ CSR_REG_VAL_TURBO_SS_800M) #define PMU_SCR_VLT_TBL_TT_800M ARRAY_LIST(\ CSR_REG_VAL_RETN_TT_800M,\ CSR_REG_VAL_RETN_TT_800M,\ CSR_REG_VAL_RETN_TT_800M,\ CSR_REG_VAL_RETN_TT_800M,\ CSR_REG_VAL_RETN_TT_800M,\ CSR_REG_VAL_RETN_TT_800M,\ CSR_REG_VAL_RETN_TT_800M,\ CSR_REG_VAL_RETN_TT_800M,\ CSR_REG_VAL_ECO_TT_800M,\ CSR_REG_VAL_ECO_TT_800M,\ CSR_REG_VAL_ECO_TT_800M,\ CSR_REG_VAL_NRML_TT_800M,\ CSR_REG_VAL_NRML_TT_800M,\ CSR_REG_VAL_NRML_TT_800M,\ CSR_REG_VAL_TURBO_TT_800M,\ CSR_REG_VAL_TURBO_TT_800M) #define PMU_SCR_VLT_TBL_FF_800M ARRAY_LIST(\ CSR_REG_VAL_RETN_FF_800M,\ CSR_REG_VAL_RETN_FF_800M,\ CSR_REG_VAL_RETN_FF_800M,\ CSR_REG_VAL_RETN_FF_800M,\ CSR_REG_VAL_RETN_FF_800M,\ CSR_REG_VAL_RETN_FF_800M,\ CSR_REG_VAL_RETN_FF_800M,\ CSR_REG_VAL_RETN_FF_800M,\ CSR_REG_VAL_ECO_FF_800M,\ CSR_REG_VAL_ECO_FF_800M,\ CSR_REG_VAL_ECO_FF_800M,\ CSR_REG_VAL_NRML_FF_800M,\ CSR_REG_VAL_NRML_FF_800M,\ CSR_REG_VAL_NRML_FF_800M,\ CSR_REG_VAL_TURBO_FF_800M,\ CSR_REG_VAL_TURBO_FF_800M) /*850 Mhz CSR voltage definitions....*/ #define CSR_REG_VAL_RETN_SS_850M 0x3 /*0.88V*/ #define CSR_REG_VAL_RETN_TT_850M 0x3 /*0.88V*/ #define CSR_REG_VAL_RETN_FF_850M 0x3 /*0.88V*/ #define CSR_REG_VAL_ECO_SS_850M 0xd /*1.08V*/ #define CSR_REG_VAL_ECO_TT_850M 0x8 /*0.98V*/ #define CSR_REG_VAL_ECO_FF_850M 0x8 /*0.98V*/ #define CSR_REG_VAL_NRML_SS_850M 0x11 /*1.16V*/ #define CSR_REG_VAL_NRML_TT_850M 0x0b /*1.04V*/ #define CSR_REG_VAL_NRML_FF_850M 0x8 /*0.98V*/ #define CSR_REG_VAL_TURBO_SS_850M 0x1A /*1.34V*/ #define CSR_REG_VAL_TURBO_TT_850M 0x15 /*1.24V*/ #define CSR_REG_VAL_TURBO_FF_850M 0x10 /*1.14V*/ #define PMU_SCR_VLT_TBL_SS_850M ARRAY_LIST(\ CSR_REG_VAL_RETN_SS_850M,\ CSR_REG_VAL_RETN_SS_850M,\ CSR_REG_VAL_RETN_SS_850M,\ CSR_REG_VAL_RETN_SS_850M,\ CSR_REG_VAL_RETN_SS_850M,\ CSR_REG_VAL_RETN_SS_850M,\ CSR_REG_VAL_RETN_SS_850M,\ CSR_REG_VAL_RETN_SS_850M,\ CSR_REG_VAL_ECO_SS_850M,\ CSR_REG_VAL_ECO_SS_850M,\ CSR_REG_VAL_ECO_SS_850M,\ CSR_REG_VAL_NRML_SS_850M,\ CSR_REG_VAL_NRML_SS_850M,\ CSR_REG_VAL_NRML_SS_850M,\ CSR_REG_VAL_TURBO_SS_850M,\ CSR_REG_VAL_TURBO_SS_850M) #define PMU_SCR_VLT_TBL_TT_850M ARRAY_LIST(\ CSR_REG_VAL_RETN_TT_850M,\ CSR_REG_VAL_RETN_TT_850M,\ CSR_REG_VAL_RETN_TT_850M,\ CSR_REG_VAL_RETN_TT_850M,\ CSR_REG_VAL_RETN_TT_850M,\ CSR_REG_VAL_RETN_TT_850M,\ CSR_REG_VAL_RETN_TT_850M,\ CSR_REG_VAL_RETN_TT_850M,\ CSR_REG_VAL_ECO_TT_850M,\ CSR_REG_VAL_ECO_TT_850M,\ CSR_REG_VAL_ECO_TT_850M,\ CSR_REG_VAL_NRML_TT_850M,\ CSR_REG_VAL_NRML_TT_850M,\ CSR_REG_VAL_NRML_TT_850M,\ CSR_REG_VAL_TURBO_TT_850M,\ CSR_REG_VAL_TURBO_TT_850M) #define PMU_SCR_VLT_TBL_FF_850M ARRAY_LIST(\ CSR_REG_VAL_RETN_FF_850M,\ CSR_REG_VAL_RETN_FF_850M,\ CSR_REG_VAL_RETN_FF_850M,\ CSR_REG_VAL_RETN_FF_850M,\ CSR_REG_VAL_RETN_FF_850M,\ CSR_REG_VAL_RETN_FF_850M,\ CSR_REG_VAL_RETN_FF_850M,\ CSR_REG_VAL_RETN_FF_850M,\ CSR_REG_VAL_ECO_FF_850M,\ CSR_REG_VAL_ECO_FF_850M,\ CSR_REG_VAL_ECO_FF_850M,\ CSR_REG_VAL_NRML_FF_850M,\ CSR_REG_VAL_NRML_FF_850M,\ CSR_REG_VAL_NRML_FF_850M,\ CSR_REG_VAL_TURBO_FF_850M,\ CSR_REG_VAL_TURBO_FF_850M) /*1 Ghz CSR voltage definitions....*/ #define CSR_REG_VAL_RETN_SS_1G 0x3 /*0.88V*/ #define CSR_REG_VAL_RETN_TT_1G 0x3 /*0.88V*/ #define CSR_REG_VAL_RETN_FF_1G 0x3 /*0.88V*/ #define CSR_REG_VAL_ECO_SS_1G 0xd /*1.08V*/ #define CSR_REG_VAL_ECO_TT_1G 0x8 /*0.98V*/ #define CSR_REG_VAL_ECO_FF_1G 0x8 /*0.98V*/ #define CSR_REG_VAL_NRML_SS_1G 0x11 /*1.16V*/ #define CSR_REG_VAL_NRML_TT_1G 0x0b /*1.04V*/ #define CSR_REG_VAL_NRML_FF_1G 0x8 /*0.98V*/ #define CSR_REG_VAL_TURBO_SS_1G 0x1A /*1.34V*/ #define CSR_REG_VAL_TURBO_TT_1G 0x1A /*1.34V*/ #define CSR_REG_VAL_TURBO_FF_1G 0x15 /*1.24V*/ #define PMU_SCR_VLT_TBL_SS_1G ARRAY_LIST(\ CSR_REG_VAL_RETN_SS_1G,\ CSR_REG_VAL_RETN_SS_1G,\ CSR_REG_VAL_RETN_SS_1G,\ CSR_REG_VAL_RETN_SS_1G,\ CSR_REG_VAL_RETN_SS_1G,\ CSR_REG_VAL_RETN_SS_1G,\ CSR_REG_VAL_RETN_SS_1G,\ CSR_REG_VAL_RETN_SS_1G,\ CSR_REG_VAL_ECO_SS_1G,\ CSR_REG_VAL_ECO_SS_1G,\ CSR_REG_VAL_ECO_SS_1G,\ CSR_REG_VAL_NRML_SS_1G,\ CSR_REG_VAL_NRML_SS_1G,\ CSR_REG_VAL_NRML_SS_1G,\ CSR_REG_VAL_TURBO_SS_1G,\ CSR_REG_VAL_TURBO_SS_1G) #define PMU_SCR_VLT_TBL_TT_1G ARRAY_LIST(\ CSR_REG_VAL_RETN_TT_1G,\ CSR_REG_VAL_RETN_TT_1G,\ CSR_REG_VAL_RETN_TT_1G,\ CSR_REG_VAL_RETN_TT_1G,\ CSR_REG_VAL_RETN_TT_1G,\ CSR_REG_VAL_RETN_TT_1G,\ CSR_REG_VAL_RETN_TT_1G,\ CSR_REG_VAL_RETN_TT_1G,\ CSR_REG_VAL_ECO_TT_1G,\ CSR_REG_VAL_ECO_TT_1G,\ CSR_REG_VAL_ECO_TT_1G,\ CSR_REG_VAL_NRML_TT_1G,\ CSR_REG_VAL_NRML_TT_1G,\ CSR_REG_VAL_NRML_TT_1G,\ CSR_REG_VAL_TURBO_TT_1G,\ CSR_REG_VAL_TURBO_TT_1G) #define PMU_SCR_VLT_TBL_FF_1G ARRAY_LIST(\ CSR_REG_VAL_RETN_FF_1G,\ CSR_REG_VAL_RETN_FF_1G,\ CSR_REG_VAL_RETN_FF_1G,\ CSR_REG_VAL_RETN_FF_1G,\ CSR_REG_VAL_RETN_FF_1G,\ CSR_REG_VAL_RETN_FF_1G,\ CSR_REG_VAL_RETN_FF_1G,\ CSR_REG_VAL_RETN_FF_1G,\ CSR_REG_VAL_ECO_FF_1G,\ CSR_REG_VAL_ECO_FF_1G,\ CSR_REG_VAL_ECO_FF_1G,\ CSR_REG_VAL_NRML_FF_1G,\ CSR_REG_VAL_NRML_FF_1G,\ CSR_REG_VAL_NRML_FF_1G,\ CSR_REG_VAL_TURBO_FF_1G,\ CSR_REG_VAL_TURBO_FF_1G) const u8 csr_vlt_table_ss[A9_FREQ_MAX][SR_VLT_LUT_SIZE] = { [A9_FREQ_700_MHZ] = PMU_SCR_VLT_TBL_SS_700M, [A9_FREQ_800_MHZ] = PMU_SCR_VLT_TBL_SS_800M, [A9_FREQ_850_MHZ] = PMU_SCR_VLT_TBL_SS_850M, [A9_FREQ_1_GHZ] = PMU_SCR_VLT_TBL_SS_1G, }; const u8 csr_vlt_table_tt[A9_FREQ_MAX][SR_VLT_LUT_SIZE] = { [A9_FREQ_700_MHZ] = PMU_SCR_VLT_TBL_TT_700M, [A9_FREQ_800_MHZ] = PMU_SCR_VLT_TBL_TT_800M, [A9_FREQ_850_MHZ] = PMU_SCR_VLT_TBL_TT_850M, [A9_FREQ_1_GHZ] = PMU_SCR_VLT_TBL_TT_1G, }; const u8 csr_vlt_table_ff[A9_FREQ_MAX][SR_VLT_LUT_SIZE] = { [A9_FREQ_700_MHZ] = PMU_SCR_VLT_TBL_FF_700M, [A9_FREQ_800_MHZ] = PMU_SCR_VLT_TBL_FF_800M, [A9_FREQ_850_MHZ] = PMU_SCR_VLT_TBL_FF_850M, [A9_FREQ_1_GHZ] = PMU_SCR_VLT_TBL_FF_1G, }; const u8 *bcmpmu_get_sr_vlt_table(int sr, u32 freq_inx, u32 silicon_type) { pr_info("%s:sr = %i, freq_inx = %d," "silicon_type = %d\n", __func__, sr, freq_inx, silicon_type); BUG_ON(freq_inx > A9_FREQ_1_GHZ); #ifdef CONFIG_KONA_AVS switch (silicon_type) { case SILICON_TYPE_SLOW: return csr_vlt_table_ss[freq_inx]; case SILICON_TYPE_TYPICAL: return csr_vlt_table_tt[freq_inx]; case SILICON_TYPE_FAST: return csr_vlt_table_ff[freq_inx]; default: BUG(); } #else return csr_vlt_table_ss[freq_inx] #endif } __init int board_pmu_init(void) { int ret; int irq; ret = gpio_request(PMU_DEVICE_INT_GPIO, "bcmpmu-irq"); if (ret < 0) { printk(KERN_ERR "%s filed at gpio_request.\n", __func__); goto exit; } ret = gpio_direction_input(PMU_DEVICE_INT_GPIO); if (ret < 0) { printk(KERN_ERR "%s filed at gpio_direction_input.\n", __func__); goto exit; } irq = gpio_to_irq(PMU_DEVICE_INT_GPIO); bcmpmu_plat_data.irq = irq; i2c_register_board_info(PMU_DEVICE_I2C_BUSNO, pmu_info, ARRAY_SIZE(pmu_info)); exit: return ret; } arch_initcall(board_pmu_init);
gpl-2.0
minorua/QGIS
src/gui/qgspanelwidget.cpp
11
3458
/*************************************************************************** qgspanelwidget.cpp --------------------- begin : June 2016 copyright : (C) 2016 by Nathan Woodrow email : *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * ***************************************************************************/ #include <QDialogButtonBox> #include <QPushButton> #include <QDialog> #include <QVBoxLayout> #include "qgssettings.h" #include "qgspanelwidget.h" #include "qgslogger.h" QgsPanelWidget::QgsPanelWidget( QWidget *parent ) : QWidget( parent ) { } void QgsPanelWidget::connectChildPanels( const QList<QgsPanelWidget *> &panels ) { const auto constPanels = panels; for ( QgsPanelWidget *widget : constPanels ) { connectChildPanel( widget ); } } void QgsPanelWidget::connectChildPanel( QgsPanelWidget *panel ) { connect( panel, &QgsPanelWidget::showPanel, this, &QgsPanelWidget::openPanel ); connect( panel, &QgsPanelWidget::widgetChanged, this, &QgsPanelWidget::widgetChanged ); } void QgsPanelWidget::setDockMode( bool dockMode ) { mDockMode = dockMode; } QgsPanelWidget *QgsPanelWidget::findParentPanel( QWidget *widget ) { QWidget *p = widget; while ( p ) { if ( QgsPanelWidget *panel = qobject_cast< QgsPanelWidget * >( p ) ) return panel; if ( p->window() == p ) { // break on encountering a window - e.g., a dialog opened from a panel should not inline // widgets inside the parent panel return nullptr; } p = p->parentWidget(); } return nullptr; } void QgsPanelWidget::openPanel( QgsPanelWidget *panel ) { //panel dock mode inherits from this panel panel->setDockMode( dockMode() ); if ( mDockMode ) { emit showPanel( panel ); } else { // Show the dialog version if no one is connected QDialog *dlg = new QDialog(); QString key = QStringLiteral( "/UI/paneldialog/%1" ).arg( panel->panelTitle() ); QgsSettings settings; dlg->restoreGeometry( settings.value( key ).toByteArray() ); dlg->setWindowTitle( panel->panelTitle() ); dlg->setLayout( new QVBoxLayout() ); dlg->layout()->addWidget( panel ); QDialogButtonBox *buttonBox = new QDialogButtonBox( QDialogButtonBox::Ok ); connect( buttonBox, &QDialogButtonBox::accepted, dlg, &QDialog::accept ); dlg->layout()->addWidget( buttonBox ); dlg->exec(); settings.setValue( key, dlg->saveGeometry() ); panel->acceptPanel(); } } void QgsPanelWidget::acceptPanel() { emit panelAccepted( this ); } void QgsPanelWidget::keyPressEvent( QKeyEvent *event ) { if ( event->key() == Qt::Key_Escape ) { acceptPanel(); } } QgsPanelWidgetWrapper::QgsPanelWidgetWrapper( QWidget *widget, QWidget *parent ) : QgsPanelWidget( parent ) , mWidget( widget ) { this->setLayout( new QVBoxLayout() ); this->layout()->setContentsMargins( 0, 0, 0, 0 ); this->layout()->addWidget( widget ); }
gpl-2.0
HossainKhademian/XBMC
lib/libUPnP/Neptune/Source/Data/TLS/Base/NptTlsTrustAnchor_Base_0071.cpp
267
12398
/***************************************************************** | | Neptune - Trust Anchors | | This file is automatically generated by a script, do not edit! | | Copyright (c) 2002-2010, Axiomatic Systems, LLC. | All rights reserved. | | Redistribution and use in source and binary forms, with or without | modification, are permitted provided that the following conditions are met: | * Redistributions of source code must retain the above copyright | notice, this list of conditions and the following disclaimer. | * Redistributions in binary form must reproduce the above copyright | notice, this list of conditions and the following disclaimer in the | documentation and/or other materials provided with the distribution. | * Neither the name of Axiomatic Systems nor the | names of its contributors may be used to endorse or promote products | derived from this software without specific prior written permission. | | THIS SOFTWARE IS PROVIDED BY AXIOMATIC SYSTEMS ''AS IS'' AND ANY | EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | DISCLAIMED. IN NO EVENT SHALL AXIOMATIC SYSTEMS BE LIABLE FOR ANY | DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ****************************************************************/ /* IPS CLASEA1 root */ const unsigned char NptTlsTrustAnchor_Base_0071_Data[2043] = { 0x30,0x82,0x07,0xf7,0x30,0x82,0x07,0x60 ,0xa0,0x03,0x02,0x01,0x02,0x02,0x01,0x00 ,0x30,0x0d,0x06,0x09,0x2a,0x86,0x48,0x86 ,0xf7,0x0d,0x01,0x01,0x05,0x05,0x00,0x30 ,0x82,0x01,0x14,0x31,0x0b,0x30,0x09,0x06 ,0x03,0x55,0x04,0x06,0x13,0x02,0x45,0x53 ,0x31,0x12,0x30,0x10,0x06,0x03,0x55,0x04 ,0x08,0x13,0x09,0x42,0x61,0x72,0x63,0x65 ,0x6c,0x6f,0x6e,0x61,0x31,0x12,0x30,0x10 ,0x06,0x03,0x55,0x04,0x07,0x13,0x09,0x42 ,0x61,0x72,0x63,0x65,0x6c,0x6f,0x6e,0x61 ,0x31,0x2e,0x30,0x2c,0x06,0x03,0x55,0x04 ,0x0a,0x13,0x25,0x49,0x50,0x53,0x20,0x49 ,0x6e,0x74,0x65,0x72,0x6e,0x65,0x74,0x20 ,0x70,0x75,0x62,0x6c,0x69,0x73,0x68,0x69 ,0x6e,0x67,0x20,0x53,0x65,0x72,0x76,0x69 ,0x63,0x65,0x73,0x20,0x73,0x2e,0x6c,0x2e ,0x31,0x2b,0x30,0x29,0x06,0x03,0x55,0x04 ,0x0a,0x14,0x22,0x69,0x70,0x73,0x40,0x6d ,0x61,0x69,0x6c,0x2e,0x69,0x70,0x73,0x2e ,0x65,0x73,0x20,0x43,0x2e,0x49,0x2e,0x46 ,0x2e,0x20,0x20,0x42,0x2d,0x36,0x30,0x39 ,0x32,0x39,0x34,0x35,0x32,0x31,0x2f,0x30 ,0x2d,0x06,0x03,0x55,0x04,0x0b,0x13,0x26 ,0x49,0x50,0x53,0x20,0x43,0x41,0x20,0x43 ,0x4c,0x41,0x53,0x45,0x41,0x31,0x20,0x43 ,0x65,0x72,0x74,0x69,0x66,0x69,0x63,0x61 ,0x74,0x69,0x6f,0x6e,0x20,0x41,0x75,0x74 ,0x68,0x6f,0x72,0x69,0x74,0x79,0x31,0x2f ,0x30,0x2d,0x06,0x03,0x55,0x04,0x03,0x13 ,0x26,0x49,0x50,0x53,0x20,0x43,0x41,0x20 ,0x43,0x4c,0x41,0x53,0x45,0x41,0x31,0x20 ,0x43,0x65,0x72,0x74,0x69,0x66,0x69,0x63 ,0x61,0x74,0x69,0x6f,0x6e,0x20,0x41,0x75 ,0x74,0x68,0x6f,0x72,0x69,0x74,0x79,0x31 ,0x1e,0x30,0x1c,0x06,0x09,0x2a,0x86,0x48 ,0x86,0xf7,0x0d,0x01,0x09,0x01,0x16,0x0f ,0x69,0x70,0x73,0x40,0x6d,0x61,0x69,0x6c ,0x2e,0x69,0x70,0x73,0x2e,0x65,0x73,0x30 ,0x1e,0x17,0x0d,0x30,0x31,0x31,0x32,0x32 ,0x39,0x30,0x31,0x30,0x35,0x33,0x32,0x5a ,0x17,0x0d,0x32,0x35,0x31,0x32,0x32,0x37 ,0x30,0x31,0x30,0x35,0x33,0x32,0x5a,0x30 ,0x82,0x01,0x14,0x31,0x0b,0x30,0x09,0x06 ,0x03,0x55,0x04,0x06,0x13,0x02,0x45,0x53 ,0x31,0x12,0x30,0x10,0x06,0x03,0x55,0x04 ,0x08,0x13,0x09,0x42,0x61,0x72,0x63,0x65 ,0x6c,0x6f,0x6e,0x61,0x31,0x12,0x30,0x10 ,0x06,0x03,0x55,0x04,0x07,0x13,0x09,0x42 ,0x61,0x72,0x63,0x65,0x6c,0x6f,0x6e,0x61 ,0x31,0x2e,0x30,0x2c,0x06,0x03,0x55,0x04 ,0x0a,0x13,0x25,0x49,0x50,0x53,0x20,0x49 ,0x6e,0x74,0x65,0x72,0x6e,0x65,0x74,0x20 ,0x70,0x75,0x62,0x6c,0x69,0x73,0x68,0x69 ,0x6e,0x67,0x20,0x53,0x65,0x72,0x76,0x69 ,0x63,0x65,0x73,0x20,0x73,0x2e,0x6c,0x2e ,0x31,0x2b,0x30,0x29,0x06,0x03,0x55,0x04 ,0x0a,0x14,0x22,0x69,0x70,0x73,0x40,0x6d ,0x61,0x69,0x6c,0x2e,0x69,0x70,0x73,0x2e ,0x65,0x73,0x20,0x43,0x2e,0x49,0x2e,0x46 ,0x2e,0x20,0x20,0x42,0x2d,0x36,0x30,0x39 ,0x32,0x39,0x34,0x35,0x32,0x31,0x2f,0x30 ,0x2d,0x06,0x03,0x55,0x04,0x0b,0x13,0x26 ,0x49,0x50,0x53,0x20,0x43,0x41,0x20,0x43 ,0x4c,0x41,0x53,0x45,0x41,0x31,0x20,0x43 ,0x65,0x72,0x74,0x69,0x66,0x69,0x63,0x61 ,0x74,0x69,0x6f,0x6e,0x20,0x41,0x75,0x74 ,0x68,0x6f,0x72,0x69,0x74,0x79,0x31,0x2f ,0x30,0x2d,0x06,0x03,0x55,0x04,0x03,0x13 ,0x26,0x49,0x50,0x53,0x20,0x43,0x41,0x20 ,0x43,0x4c,0x41,0x53,0x45,0x41,0x31,0x20 ,0x43,0x65,0x72,0x74,0x69,0x66,0x69,0x63 ,0x61,0x74,0x69,0x6f,0x6e,0x20,0x41,0x75 ,0x74,0x68,0x6f,0x72,0x69,0x74,0x79,0x31 ,0x1e,0x30,0x1c,0x06,0x09,0x2a,0x86,0x48 ,0x86,0xf7,0x0d,0x01,0x09,0x01,0x16,0x0f ,0x69,0x70,0x73,0x40,0x6d,0x61,0x69,0x6c ,0x2e,0x69,0x70,0x73,0x2e,0x65,0x73,0x30 ,0x81,0x9f,0x30,0x0d,0x06,0x09,0x2a,0x86 ,0x48,0x86,0xf7,0x0d,0x01,0x01,0x01,0x05 ,0x00,0x03,0x81,0x8d,0x00,0x30,0x81,0x89 ,0x02,0x81,0x81,0x00,0xbb,0x30,0xd7,0xdc ,0xd0,0x54,0xbd,0x35,0x4e,0x9f,0xc5,0x4c ,0x82,0xea,0xd1,0x50,0x3c,0x47,0x98,0xfc ,0x9b,0x69,0x9d,0x77,0xcd,0x6e,0xe0,0x3f ,0xee,0xeb,0x32,0x5f,0x5f,0x9f,0xd2,0xd0 ,0x79,0xe5,0x95,0x73,0x44,0x21,0x32,0xe0 ,0x0a,0xdb,0x9d,0xd7,0xce,0x8d,0xab,0x52 ,0x8b,0x2b,0x78,0xe0,0x9b,0x5b,0x7d,0xf4 ,0xfd,0x6d,0x09,0xe5,0xae,0xe1,0x6c,0x1d ,0x07,0x23,0xa0,0x17,0xd1,0xf9,0x7d,0xa8 ,0x46,0x46,0x91,0x22,0xa8,0xb2,0x69,0xc6 ,0xad,0xf7,0xf5,0xf5,0x94,0xa1,0x30,0x94 ,0xbd,0x00,0xcc,0x44,0x7f,0xee,0xc4,0x9e ,0xc9,0xc1,0xe6,0x8f,0x0a,0x36,0xc1,0xfd ,0x24,0x3d,0x01,0xa0,0xf5,0x7b,0xe2,0x7c ,0x78,0x66,0x43,0x8b,0x4f,0x59,0xf2,0x9b ,0xd9,0xfa,0x49,0xb3,0x02,0x03,0x01,0x00 ,0x01,0xa3,0x82,0x04,0x53,0x30,0x82,0x04 ,0x4f,0x30,0x1d,0x06,0x03,0x55,0x1d,0x0e ,0x04,0x16,0x04,0x14,0x67,0x26,0x96,0xe7 ,0xa1,0xbf,0xd8,0xb5,0x03,0x9d,0xfe,0x3b ,0xdc,0xfe,0xf2,0x8a,0xe6,0x15,0xdd,0x30 ,0x30,0x82,0x01,0x46,0x06,0x03,0x55,0x1d ,0x23,0x04,0x82,0x01,0x3d,0x30,0x82,0x01 ,0x39,0x80,0x14,0x67,0x26,0x96,0xe7,0xa1 ,0xbf,0xd8,0xb5,0x03,0x9d,0xfe,0x3b,0xdc ,0xfe,0xf2,0x8a,0xe6,0x15,0xdd,0x30,0xa1 ,0x82,0x01,0x1c,0xa4,0x82,0x01,0x18,0x30 ,0x82,0x01,0x14,0x31,0x0b,0x30,0x09,0x06 ,0x03,0x55,0x04,0x06,0x13,0x02,0x45,0x53 ,0x31,0x12,0x30,0x10,0x06,0x03,0x55,0x04 ,0x08,0x13,0x09,0x42,0x61,0x72,0x63,0x65 ,0x6c,0x6f,0x6e,0x61,0x31,0x12,0x30,0x10 ,0x06,0x03,0x55,0x04,0x07,0x13,0x09,0x42 ,0x61,0x72,0x63,0x65,0x6c,0x6f,0x6e,0x61 ,0x31,0x2e,0x30,0x2c,0x06,0x03,0x55,0x04 ,0x0a,0x13,0x25,0x49,0x50,0x53,0x20,0x49 ,0x6e,0x74,0x65,0x72,0x6e,0x65,0x74,0x20 ,0x70,0x75,0x62,0x6c,0x69,0x73,0x68,0x69 ,0x6e,0x67,0x20,0x53,0x65,0x72,0x76,0x69 ,0x63,0x65,0x73,0x20,0x73,0x2e,0x6c,0x2e ,0x31,0x2b,0x30,0x29,0x06,0x03,0x55,0x04 ,0x0a,0x14,0x22,0x69,0x70,0x73,0x40,0x6d ,0x61,0x69,0x6c,0x2e,0x69,0x70,0x73,0x2e ,0x65,0x73,0x20,0x43,0x2e,0x49,0x2e,0x46 ,0x2e,0x20,0x20,0x42,0x2d,0x36,0x30,0x39 ,0x32,0x39,0x34,0x35,0x32,0x31,0x2f,0x30 ,0x2d,0x06,0x03,0x55,0x04,0x0b,0x13,0x26 ,0x49,0x50,0x53,0x20,0x43,0x41,0x20,0x43 ,0x4c,0x41,0x53,0x45,0x41,0x31,0x20,0x43 ,0x65,0x72,0x74,0x69,0x66,0x69,0x63,0x61 ,0x74,0x69,0x6f,0x6e,0x20,0x41,0x75,0x74 ,0x68,0x6f,0x72,0x69,0x74,0x79,0x31,0x2f ,0x30,0x2d,0x06,0x03,0x55,0x04,0x03,0x13 ,0x26,0x49,0x50,0x53,0x20,0x43,0x41,0x20 ,0x43,0x4c,0x41,0x53,0x45,0x41,0x31,0x20 ,0x43,0x65,0x72,0x74,0x69,0x66,0x69,0x63 ,0x61,0x74,0x69,0x6f,0x6e,0x20,0x41,0x75 ,0x74,0x68,0x6f,0x72,0x69,0x74,0x79,0x31 ,0x1e,0x30,0x1c,0x06,0x09,0x2a,0x86,0x48 ,0x86,0xf7,0x0d,0x01,0x09,0x01,0x16,0x0f ,0x69,0x70,0x73,0x40,0x6d,0x61,0x69,0x6c ,0x2e,0x69,0x70,0x73,0x2e,0x65,0x73,0x82 ,0x01,0x00,0x30,0x0c,0x06,0x03,0x55,0x1d ,0x13,0x04,0x05,0x30,0x03,0x01,0x01,0xff ,0x30,0x0c,0x06,0x03,0x55,0x1d,0x0f,0x04 ,0x05,0x03,0x03,0x07,0xff,0x80,0x30,0x6b ,0x06,0x03,0x55,0x1d,0x25,0x04,0x64,0x30 ,0x62,0x06,0x08,0x2b,0x06,0x01,0x05,0x05 ,0x07,0x03,0x01,0x06,0x08,0x2b,0x06,0x01 ,0x05,0x05,0x07,0x03,0x02,0x06,0x08,0x2b ,0x06,0x01,0x05,0x05,0x07,0x03,0x03,0x06 ,0x08,0x2b,0x06,0x01,0x05,0x05,0x07,0x03 ,0x04,0x06,0x08,0x2b,0x06,0x01,0x05,0x05 ,0x07,0x03,0x08,0x06,0x0a,0x2b,0x06,0x01 ,0x04,0x01,0x82,0x37,0x02,0x01,0x15,0x06 ,0x0a,0x2b,0x06,0x01,0x04,0x01,0x82,0x37 ,0x02,0x01,0x16,0x06,0x0a,0x2b,0x06,0x01 ,0x04,0x01,0x82,0x37,0x0a,0x03,0x01,0x06 ,0x0a,0x2b,0x06,0x01,0x04,0x01,0x82,0x37 ,0x0a,0x03,0x04,0x30,0x11,0x06,0x09,0x60 ,0x86,0x48,0x01,0x86,0xf8,0x42,0x01,0x01 ,0x04,0x04,0x03,0x02,0x00,0x07,0x30,0x1a ,0x06,0x03,0x55,0x1d,0x11,0x04,0x13,0x30 ,0x11,0x81,0x0f,0x69,0x70,0x73,0x40,0x6d ,0x61,0x69,0x6c,0x2e,0x69,0x70,0x73,0x2e ,0x65,0x73,0x30,0x1a,0x06,0x03,0x55,0x1d ,0x12,0x04,0x13,0x30,0x11,0x81,0x0f,0x69 ,0x70,0x73,0x40,0x6d,0x61,0x69,0x6c,0x2e ,0x69,0x70,0x73,0x2e,0x65,0x73,0x30,0x42 ,0x06,0x09,0x60,0x86,0x48,0x01,0x86,0xf8 ,0x42,0x01,0x0d,0x04,0x35,0x16,0x33,0x43 ,0x4c,0x41,0x53,0x45,0x41,0x31,0x20,0x43 ,0x41,0x20,0x43,0x65,0x72,0x74,0x69,0x66 ,0x69,0x63,0x61,0x74,0x65,0x20,0x69,0x73 ,0x73,0x75,0x65,0x64,0x20,0x62,0x79,0x20 ,0x68,0x74,0x74,0x70,0x3a,0x2f,0x2f,0x77 ,0x77,0x77,0x2e,0x69,0x70,0x73,0x2e,0x65 ,0x73,0x2f,0x30,0x29,0x06,0x09,0x60,0x86 ,0x48,0x01,0x86,0xf8,0x42,0x01,0x02,0x04 ,0x1c,0x16,0x1a,0x68,0x74,0x74,0x70,0x3a ,0x2f,0x2f,0x77,0x77,0x77,0x2e,0x69,0x70 ,0x73,0x2e,0x65,0x73,0x2f,0x69,0x70,0x73 ,0x32,0x30,0x30,0x32,0x2f,0x30,0x3b,0x06 ,0x09,0x60,0x86,0x48,0x01,0x86,0xf8,0x42 ,0x01,0x04,0x04,0x2e,0x16,0x2c,0x68,0x74 ,0x74,0x70,0x3a,0x2f,0x2f,0x77,0x77,0x77 ,0x2e,0x69,0x70,0x73,0x2e,0x65,0x73,0x2f ,0x69,0x70,0x73,0x32,0x30,0x30,0x32,0x2f ,0x69,0x70,0x73,0x32,0x30,0x30,0x32,0x43 ,0x4c,0x41,0x53,0x45,0x41,0x31,0x2e,0x63 ,0x72,0x6c,0x30,0x40,0x06,0x09,0x60,0x86 ,0x48,0x01,0x86,0xf8,0x42,0x01,0x03,0x04 ,0x33,0x16,0x31,0x68,0x74,0x74,0x70,0x3a ,0x2f,0x2f,0x77,0x77,0x77,0x2e,0x69,0x70 ,0x73,0x2e,0x65,0x73,0x2f,0x69,0x70,0x73 ,0x32,0x30,0x30,0x32,0x2f,0x72,0x65,0x76 ,0x6f,0x63,0x61,0x74,0x69,0x6f,0x6e,0x43 ,0x4c,0x41,0x53,0x45,0x41,0x31,0x2e,0x68 ,0x74,0x6d,0x6c,0x3f,0x30,0x3d,0x06,0x09 ,0x60,0x86,0x48,0x01,0x86,0xf8,0x42,0x01 ,0x07,0x04,0x30,0x16,0x2e,0x68,0x74,0x74 ,0x70,0x3a,0x2f,0x2f,0x77,0x77,0x77,0x2e ,0x69,0x70,0x73,0x2e,0x65,0x73,0x2f,0x69 ,0x70,0x73,0x32,0x30,0x30,0x32,0x2f,0x72 ,0x65,0x6e,0x65,0x77,0x61,0x6c,0x43,0x4c ,0x41,0x53,0x45,0x41,0x31,0x2e,0x68,0x74 ,0x6d,0x6c,0x3f,0x30,0x3b,0x06,0x09,0x60 ,0x86,0x48,0x01,0x86,0xf8,0x42,0x01,0x08 ,0x04,0x2e,0x16,0x2c,0x68,0x74,0x74,0x70 ,0x3a,0x2f,0x2f,0x77,0x77,0x77,0x2e,0x69 ,0x70,0x73,0x2e,0x65,0x73,0x2f,0x69,0x70 ,0x73,0x32,0x30,0x30,0x32,0x2f,0x70,0x6f ,0x6c,0x69,0x63,0x79,0x43,0x4c,0x41,0x53 ,0x45,0x41,0x31,0x2e,0x68,0x74,0x6d,0x6c ,0x30,0x75,0x06,0x03,0x55,0x1d,0x1f,0x04 ,0x6e,0x30,0x6c,0x30,0x32,0xa0,0x30,0xa0 ,0x2e,0x86,0x2c,0x68,0x74,0x74,0x70,0x3a ,0x2f,0x2f,0x77,0x77,0x77,0x2e,0x69,0x70 ,0x73,0x2e,0x65,0x73,0x2f,0x69,0x70,0x73 ,0x32,0x30,0x30,0x32,0x2f,0x69,0x70,0x73 ,0x32,0x30,0x30,0x32,0x43,0x4c,0x41,0x53 ,0x45,0x41,0x31,0x2e,0x63,0x72,0x6c,0x30 ,0x36,0xa0,0x34,0xa0,0x32,0x86,0x30,0x68 ,0x74,0x74,0x70,0x3a,0x2f,0x2f,0x77,0x77 ,0x77,0x62,0x61,0x63,0x6b,0x2e,0x69,0x70 ,0x73,0x2e,0x65,0x73,0x2f,0x69,0x70,0x73 ,0x32,0x30,0x30,0x32,0x2f,0x69,0x70,0x73 ,0x32,0x30,0x30,0x32,0x43,0x4c,0x41,0x53 ,0x45,0x41,0x31,0x2e,0x63,0x72,0x6c,0x30 ,0x2f,0x06,0x08,0x2b,0x06,0x01,0x05,0x05 ,0x07,0x01,0x01,0x04,0x23,0x30,0x21,0x30 ,0x1f,0x06,0x08,0x2b,0x06,0x01,0x05,0x05 ,0x07,0x30,0x01,0x86,0x13,0x68,0x74,0x74 ,0x70,0x3a,0x2f,0x2f,0x6f,0x63,0x73,0x70 ,0x2e,0x69,0x70,0x73,0x2e,0x65,0x73,0x2f ,0x30,0x0d,0x06,0x09,0x2a,0x86,0x48,0x86 ,0xf7,0x0d,0x01,0x01,0x05,0x05,0x00,0x03 ,0x81,0x81,0x00,0x7e,0xba,0x8a,0xac,0x80 ,0x00,0x84,0x15,0x0a,0xd5,0x98,0x51,0x0c ,0x64,0xc5,0x9c,0x02,0x58,0x83,0x66,0xca ,0xad,0x1e,0x07,0xcd,0x7e,0x6a,0xda,0x80 ,0x07,0xdf,0x03,0x34,0x4a,0x1c,0x93,0xc4 ,0x4b,0x58,0x20,0x35,0x36,0x71,0xed,0xa2 ,0x0a,0x35,0x12,0xa5,0xa6,0x65,0xa7,0x85 ,0x69,0x0a,0x0e,0xe3,0x61,0xee,0xea,0xbe ,0x28,0x93,0x33,0xd5,0xec,0xe8,0xbe,0xc4 ,0xdb,0x5f,0x7f,0xa8,0xf9,0x63,0x31,0xc8 ,0x6b,0x96,0xe2,0x29,0xc2,0x5b,0xa0,0xe7 ,0x97,0x36,0x9d,0x77,0x5e,0x31,0x6b,0xfe ,0xd3,0xa7,0xdb,0x2a,0xdb,0xdb,0x96,0x8b ,0x1f,0x66,0xde,0xb6,0x03,0xc0,0x2b,0xb3 ,0x78,0xd6,0x55,0x07,0xe5,0x8f,0x39,0x50 ,0xde,0x07,0x23,0x72,0xe6,0xbd,0x20,0x14 ,0x4b,0xb4,0x86}; const unsigned int NptTlsTrustAnchor_Base_0071_Size = 2043;
gpl-2.0
3Legs/w4118
arch/m68k/q40/config.c
779
7332
/* * arch/m68k/q40/config.c * * Copyright (C) 1999 Richard Zidlicky * * originally based on: * * linux/bvme/config.c * * This file is subject to the terms and conditions of the GNU General Public * License. See the file README.legal in the main directory of this archive * for more details. */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/tty.h> #include <linux/console.h> #include <linux/linkage.h> #include <linux/init.h> #include <linux/major.h> #include <linux/serial_reg.h> #include <linux/rtc.h> #include <linux/vt_kern.h> #include <linux/bcd.h> #include <asm/io.h> #include <asm/rtc.h> #include <asm/bootinfo.h> #include <asm/system.h> #include <asm/pgtable.h> #include <asm/setup.h> #include <asm/irq.h> #include <asm/traps.h> #include <asm/machdep.h> #include <asm/q40_master.h> extern void q40_init_IRQ(void); static void q40_get_model(char *model); extern void q40_sched_init(irq_handler_t handler); static unsigned long q40_gettimeoffset(void); static int q40_hwclk(int, struct rtc_time *); static unsigned int q40_get_ss(void); static int q40_set_clock_mmss(unsigned long); static int q40_get_rtc_pll(struct rtc_pll_info *pll); static int q40_set_rtc_pll(struct rtc_pll_info *pll); extern void q40_mksound(unsigned int /*freq*/, unsigned int /*ticks*/); static void q40_mem_console_write(struct console *co, const char *b, unsigned int count); extern int ql_ticks; static struct console q40_console_driver = { .name = "debug", .write = q40_mem_console_write, .flags = CON_PRINTBUFFER, .index = -1, }; /* early debugging function:*/ extern char *q40_mem_cptr; /*=(char *)0xff020000;*/ static int _cpleft; static void q40_mem_console_write(struct console *co, const char *s, unsigned int count) { const char *p = s; if (count < _cpleft) { while (count-- > 0) { *q40_mem_cptr = *p++; q40_mem_cptr += 4; _cpleft--; } } } static int __init q40_debug_setup(char *arg) { /* useful for early debugging stages - writes kernel messages into SRAM */ if (MACH_IS_Q40 && !strncmp(arg, "mem", 3)) { /*printk("using NVRAM debug, q40_mem_cptr=%p\n",q40_mem_cptr);*/ _cpleft = 2000 - ((long)q40_mem_cptr-0xff020000) / 4; register_console(&q40_console_driver); } return 0; } early_param("debug", q40_debug_setup); #if 0 void printq40(char *str) { int l = strlen(str); char *p = q40_mem_cptr; while (l-- > 0 && _cpleft-- > 0) { *p = *str++; p += 4; } q40_mem_cptr = p; } #endif static int halted; #ifdef CONFIG_HEARTBEAT static void q40_heartbeat(int on) { if (halted) return; if (on) Q40_LED_ON(); else Q40_LED_OFF(); } #endif static void q40_reset(void) { halted = 1; printk("\n\n*******************************************\n" "Called q40_reset : press the RESET button!! \n" "*******************************************\n"); Q40_LED_ON(); while (1) ; } static void q40_halt(void) { halted = 1; printk("\n\n*******************\n" " Called q40_halt\n" "*******************\n"); Q40_LED_ON(); while (1) ; } static void q40_get_model(char *model) { sprintf(model, "Q40"); } static unsigned int serports[] = { 0x3f8,0x2f8,0x3e8,0x2e8,0 }; static void q40_disable_irqs(void) { unsigned i, j; j = 0; while ((i = serports[j++])) outb(0, i + UART_IER); master_outb(0, EXT_ENABLE_REG); master_outb(0, KEY_IRQ_ENABLE_REG); } void __init config_q40(void) { mach_sched_init = q40_sched_init; mach_init_IRQ = q40_init_IRQ; mach_gettimeoffset = q40_gettimeoffset; mach_hwclk = q40_hwclk; mach_get_ss = q40_get_ss; mach_get_rtc_pll = q40_get_rtc_pll; mach_set_rtc_pll = q40_set_rtc_pll; mach_set_clock_mmss = q40_set_clock_mmss; mach_reset = q40_reset; mach_get_model = q40_get_model; #if defined(CONFIG_INPUT_M68K_BEEP) || defined(CONFIG_INPUT_M68K_BEEP_MODULE) mach_beep = q40_mksound; #endif #ifdef CONFIG_HEARTBEAT mach_heartbeat = q40_heartbeat; #endif mach_halt = q40_halt; /* disable a few things that SMSQ might have left enabled */ q40_disable_irqs(); /* no DMA at all, but ide-scsi requires it.. make sure * all physical RAM fits into the boundary - otherwise * allocator may play costly and useless tricks */ mach_max_dma_address = 1024*1024*1024; } int q40_parse_bootinfo(const struct bi_record *rec) { return 1; } static unsigned long q40_gettimeoffset(void) { return 5000 * (ql_ticks != 0); } /* * Looks like op is non-zero for setting the clock, and zero for * reading the clock. * * struct hwclk_time { * unsigned sec; 0..59 * unsigned min; 0..59 * unsigned hour; 0..23 * unsigned day; 1..31 * unsigned mon; 0..11 * unsigned year; 00... * int wday; 0..6, 0 is Sunday, -1 means unknown/don't set * }; */ static int q40_hwclk(int op, struct rtc_time *t) { if (op) { /* Write.... */ Q40_RTC_CTRL |= Q40_RTC_WRITE; Q40_RTC_SECS = bin2bcd(t->tm_sec); Q40_RTC_MINS = bin2bcd(t->tm_min); Q40_RTC_HOUR = bin2bcd(t->tm_hour); Q40_RTC_DATE = bin2bcd(t->tm_mday); Q40_RTC_MNTH = bin2bcd(t->tm_mon + 1); Q40_RTC_YEAR = bin2bcd(t->tm_year%100); if (t->tm_wday >= 0) Q40_RTC_DOW = bin2bcd(t->tm_wday+1); Q40_RTC_CTRL &= ~(Q40_RTC_WRITE); } else { /* Read.... */ Q40_RTC_CTRL |= Q40_RTC_READ; t->tm_year = bcd2bin (Q40_RTC_YEAR); t->tm_mon = bcd2bin (Q40_RTC_MNTH)-1; t->tm_mday = bcd2bin (Q40_RTC_DATE); t->tm_hour = bcd2bin (Q40_RTC_HOUR); t->tm_min = bcd2bin (Q40_RTC_MINS); t->tm_sec = bcd2bin (Q40_RTC_SECS); Q40_RTC_CTRL &= ~(Q40_RTC_READ); if (t->tm_year < 70) t->tm_year += 100; t->tm_wday = bcd2bin(Q40_RTC_DOW)-1; } return 0; } static unsigned int q40_get_ss(void) { return bcd2bin(Q40_RTC_SECS); } /* * Set the minutes and seconds from seconds value 'nowtime'. Fail if * clock is out by > 30 minutes. Logic lifted from atari code. */ static int q40_set_clock_mmss(unsigned long nowtime) { int retval = 0; short real_seconds = nowtime % 60, real_minutes = (nowtime / 60) % 60; int rtc_minutes; rtc_minutes = bcd2bin(Q40_RTC_MINS); if ((rtc_minutes < real_minutes ? real_minutes - rtc_minutes : rtc_minutes - real_minutes) < 30) { Q40_RTC_CTRL |= Q40_RTC_WRITE; Q40_RTC_MINS = bin2bcd(real_minutes); Q40_RTC_SECS = bin2bcd(real_seconds); Q40_RTC_CTRL &= ~(Q40_RTC_WRITE); } else retval = -1; return retval; } /* get and set PLL calibration of RTC clock */ #define Q40_RTC_PLL_MASK ((1<<5)-1) #define Q40_RTC_PLL_SIGN (1<<5) static int q40_get_rtc_pll(struct rtc_pll_info *pll) { int tmp = Q40_RTC_CTRL; pll->pll_value = tmp & Q40_RTC_PLL_MASK; if (tmp & Q40_RTC_PLL_SIGN) pll->pll_value = -pll->pll_value; pll->pll_max = 31; pll->pll_min = -31; pll->pll_posmult = 512; pll->pll_negmult = 256; pll->pll_clock = 125829120; return 0; } static int q40_set_rtc_pll(struct rtc_pll_info *pll) { if (!pll->pll_ctrl) { /* the docs are a bit unclear so I am doublesetting */ /* RTC_WRITE here ... */ int tmp = (pll->pll_value & 31) | (pll->pll_value<0 ? 32 : 0) | Q40_RTC_WRITE; Q40_RTC_CTRL |= Q40_RTC_WRITE; Q40_RTC_CTRL = tmp; Q40_RTC_CTRL &= ~(Q40_RTC_WRITE); return 0; } else return -EINVAL; }
gpl-2.0
ckaestne/TypeChef-linux.2.6.33.3
drivers/gpio/xilinx_gpio.c
779
7174
/* * Xilinx gpio driver * * Copyright 2008 Xilinx, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/errno.h> #include <linux/of_device.h> #include <linux/of_platform.h> #include <linux/of_gpio.h> #include <linux/io.h> #include <linux/gpio.h> /* Register Offset Definitions */ #define XGPIO_DATA_OFFSET (0x0) /* Data register */ #define XGPIO_TRI_OFFSET (0x4) /* I/O direction register */ struct xgpio_instance { struct of_mm_gpio_chip mmchip; u32 gpio_state; /* GPIO state shadow register */ u32 gpio_dir; /* GPIO direction shadow register */ spinlock_t gpio_lock; /* Lock used for synchronization */ }; /** * xgpio_get - Read the specified signal of the GPIO device. * @gc: Pointer to gpio_chip device structure. * @gpio: GPIO signal number. * * This function reads the specified signal of the GPIO device. It returns 0 if * the signal clear, 1 if signal is set or negative value on error. */ static int xgpio_get(struct gpio_chip *gc, unsigned int gpio) { struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); return (in_be32(mm_gc->regs + XGPIO_DATA_OFFSET) >> gpio) & 1; } /** * xgpio_set - Write the specified signal of the GPIO device. * @gc: Pointer to gpio_chip device structure. * @gpio: GPIO signal number. * @val: Value to be written to specified signal. * * This function writes the specified value in to the specified signal of the * GPIO device. */ static void xgpio_set(struct gpio_chip *gc, unsigned int gpio, int val) { unsigned long flags; struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); struct xgpio_instance *chip = container_of(mm_gc, struct xgpio_instance, mmchip); spin_lock_irqsave(&chip->gpio_lock, flags); /* Write to GPIO signal and set its direction to output */ if (val) chip->gpio_state |= 1 << gpio; else chip->gpio_state &= ~(1 << gpio); out_be32(mm_gc->regs + XGPIO_DATA_OFFSET, chip->gpio_state); spin_unlock_irqrestore(&chip->gpio_lock, flags); } /** * xgpio_dir_in - Set the direction of the specified GPIO signal as input. * @gc: Pointer to gpio_chip device structure. * @gpio: GPIO signal number. * * This function sets the direction of specified GPIO signal as input. * It returns 0 if direction of GPIO signals is set as input otherwise it * returns negative error value. */ static int xgpio_dir_in(struct gpio_chip *gc, unsigned int gpio) { unsigned long flags; struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); struct xgpio_instance *chip = container_of(mm_gc, struct xgpio_instance, mmchip); spin_lock_irqsave(&chip->gpio_lock, flags); /* Set the GPIO bit in shadow register and set direction as input */ chip->gpio_dir |= (1 << gpio); out_be32(mm_gc->regs + XGPIO_TRI_OFFSET, chip->gpio_dir); spin_unlock_irqrestore(&chip->gpio_lock, flags); return 0; } /** * xgpio_dir_out - Set the direction of the specified GPIO signal as output. * @gc: Pointer to gpio_chip device structure. * @gpio: GPIO signal number. * @val: Value to be written to specified signal. * * This function sets the direction of specified GPIO signal as output. If all * GPIO signals of GPIO chip is configured as input then it returns * error otherwise it returns 0. */ static int xgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) { unsigned long flags; struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); struct xgpio_instance *chip = container_of(mm_gc, struct xgpio_instance, mmchip); spin_lock_irqsave(&chip->gpio_lock, flags); /* Write state of GPIO signal */ if (val) chip->gpio_state |= 1 << gpio; else chip->gpio_state &= ~(1 << gpio); out_be32(mm_gc->regs + XGPIO_DATA_OFFSET, chip->gpio_state); /* Clear the GPIO bit in shadow register and set direction as output */ chip->gpio_dir &= (~(1 << gpio)); out_be32(mm_gc->regs + XGPIO_TRI_OFFSET, chip->gpio_dir); spin_unlock_irqrestore(&chip->gpio_lock, flags); return 0; } /** * xgpio_save_regs - Set initial values of GPIO pins * @mm_gc: pointer to memory mapped GPIO chip structure */ static void xgpio_save_regs(struct of_mm_gpio_chip *mm_gc) { struct xgpio_instance *chip = container_of(mm_gc, struct xgpio_instance, mmchip); out_be32(mm_gc->regs + XGPIO_DATA_OFFSET, chip->gpio_state); out_be32(mm_gc->regs + XGPIO_TRI_OFFSET, chip->gpio_dir); } /** * xgpio_of_probe - Probe method for the GPIO device. * @np: pointer to device tree node * * This function probes the GPIO device in the device tree. It initializes the * driver data structure. It returns 0, if the driver is bound to the GPIO * device, or a negative value if there is an error. */ static int __devinit xgpio_of_probe(struct device_node *np) { struct xgpio_instance *chip; struct of_gpio_chip *ofchip; int status = 0; const u32 *tree_info; chip = kzalloc(sizeof(*chip), GFP_KERNEL); if (!chip) return -ENOMEM; ofchip = &chip->mmchip.of_gc; /* Update GPIO state shadow register with default value */ tree_info = of_get_property(np, "xlnx,dout-default", NULL); if (tree_info) chip->gpio_state = *tree_info; /* Update GPIO direction shadow register with default value */ chip->gpio_dir = 0xFFFFFFFF; /* By default, all pins are inputs */ tree_info = of_get_property(np, "xlnx,tri-default", NULL); if (tree_info) chip->gpio_dir = *tree_info; /* Check device node and parent device node for device width */ ofchip->gc.ngpio = 32; /* By default assume full GPIO controller */ tree_info = of_get_property(np, "xlnx,gpio-width", NULL); if (!tree_info) tree_info = of_get_property(np->parent, "xlnx,gpio-width", NULL); if (tree_info) ofchip->gc.ngpio = *tree_info; spin_lock_init(&chip->gpio_lock); ofchip->gpio_cells = 2; ofchip->gc.direction_input = xgpio_dir_in; ofchip->gc.direction_output = xgpio_dir_out; ofchip->gc.get = xgpio_get; ofchip->gc.set = xgpio_set; chip->mmchip.save_regs = xgpio_save_regs; /* Call the OF gpio helper to setup and register the GPIO device */ status = of_mm_gpiochip_add(np, &chip->mmchip); if (status) { kfree(chip); pr_err("%s: error in probe function with status %d\n", np->full_name, status); return status; } pr_info("XGpio: %s: registered\n", np->full_name); return 0; } static struct of_device_id xgpio_of_match[] __devinitdata = { { .compatible = "xlnx,xps-gpio-1.00.a", }, { /* end of list */ }, }; static int __init xgpio_init(void) { struct device_node *np; for_each_matching_node(np, xgpio_of_match) xgpio_of_probe(np); return 0; } /* Make sure we get initialized before anyone else tries to use us */ subsys_initcall(xgpio_init); /* No exit call at the moment as we cannot unregister of GPIO chips */ MODULE_AUTHOR("Xilinx, Inc."); MODULE_DESCRIPTION("Xilinx GPIO driver"); MODULE_LICENSE("GPL");
gpl-2.0
schnitzeltony/linux
drivers/video/backlight/lm3639_bl.c
1803
10641
/* * Simple driver for Texas Instruments LM3639 Backlight + Flash LED driver chip * Copyright (C) 2012 Texas Instruments * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/module.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/leds.h> #include <linux/backlight.h> #include <linux/err.h> #include <linux/delay.h> #include <linux/uaccess.h> #include <linux/interrupt.h> #include <linux/regmap.h> #include <linux/platform_data/lm3639_bl.h> #define REG_DEV_ID 0x00 #define REG_CHECKSUM 0x01 #define REG_BL_CONF_1 0x02 #define REG_BL_CONF_2 0x03 #define REG_BL_CONF_3 0x04 #define REG_BL_CONF_4 0x05 #define REG_FL_CONF_1 0x06 #define REG_FL_CONF_2 0x07 #define REG_FL_CONF_3 0x08 #define REG_IO_CTRL 0x09 #define REG_ENABLE 0x0A #define REG_FLAG 0x0B #define REG_MAX REG_FLAG struct lm3639_chip_data { struct device *dev; struct lm3639_platform_data *pdata; struct backlight_device *bled; struct led_classdev cdev_flash; struct led_classdev cdev_torch; struct regmap *regmap; unsigned int bled_mode; unsigned int bled_map; unsigned int last_flag; }; /* initialize chip */ static int lm3639_chip_init(struct lm3639_chip_data *pchip) { int ret; unsigned int reg_val; struct lm3639_platform_data *pdata = pchip->pdata; /* input pins config. */ ret = regmap_update_bits(pchip->regmap, REG_BL_CONF_1, 0x08, pdata->pin_pwm); if (ret < 0) goto out; reg_val = (pdata->pin_pwm & 0x40) | pdata->pin_strobe | pdata->pin_tx; ret = regmap_update_bits(pchip->regmap, REG_IO_CTRL, 0x7C, reg_val); if (ret < 0) goto out; /* init brightness */ ret = regmap_write(pchip->regmap, REG_BL_CONF_4, pdata->init_brt_led); if (ret < 0) goto out; ret = regmap_write(pchip->regmap, REG_BL_CONF_3, pdata->init_brt_led); if (ret < 0) goto out; /* output pins config. */ if (!pdata->init_brt_led) { reg_val = pdata->fled_pins; reg_val |= pdata->bled_pins; } else { reg_val = pdata->fled_pins; reg_val |= pdata->bled_pins | 0x01; } ret = regmap_update_bits(pchip->regmap, REG_ENABLE, 0x79, reg_val); if (ret < 0) goto out; return ret; out: dev_err(pchip->dev, "i2c failed to access register\n"); return ret; } /* update and get brightness */ static int lm3639_bled_update_status(struct backlight_device *bl) { int ret; unsigned int reg_val; struct lm3639_chip_data *pchip = bl_get_data(bl); struct lm3639_platform_data *pdata = pchip->pdata; ret = regmap_read(pchip->regmap, REG_FLAG, &reg_val); if (ret < 0) goto out; if (reg_val != 0) dev_info(pchip->dev, "last flag is 0x%x\n", reg_val); /* pwm control */ if (pdata->pin_pwm) { if (pdata->pwm_set_intensity) pdata->pwm_set_intensity(bl->props.brightness, pdata->max_brt_led); else dev_err(pchip->dev, "No pwm control func. in plat-data\n"); return bl->props.brightness; } /* i2c control and set brigtness */ ret = regmap_write(pchip->regmap, REG_BL_CONF_4, bl->props.brightness); if (ret < 0) goto out; ret = regmap_write(pchip->regmap, REG_BL_CONF_3, bl->props.brightness); if (ret < 0) goto out; if (!bl->props.brightness) ret = regmap_update_bits(pchip->regmap, REG_ENABLE, 0x01, 0x00); else ret = regmap_update_bits(pchip->regmap, REG_ENABLE, 0x01, 0x01); if (ret < 0) goto out; return bl->props.brightness; out: dev_err(pchip->dev, "i2c failed to access registers\n"); return bl->props.brightness; } static int lm3639_bled_get_brightness(struct backlight_device *bl) { int ret; unsigned int reg_val; struct lm3639_chip_data *pchip = bl_get_data(bl); struct lm3639_platform_data *pdata = pchip->pdata; if (pdata->pin_pwm) { if (pdata->pwm_get_intensity) bl->props.brightness = pdata->pwm_get_intensity(); else dev_err(pchip->dev, "No pwm control func. in plat-data\n"); return bl->props.brightness; } ret = regmap_read(pchip->regmap, REG_BL_CONF_1, &reg_val); if (ret < 0) goto out; if (reg_val & 0x10) ret = regmap_read(pchip->regmap, REG_BL_CONF_4, &reg_val); else ret = regmap_read(pchip->regmap, REG_BL_CONF_3, &reg_val); if (ret < 0) goto out; bl->props.brightness = reg_val; return bl->props.brightness; out: dev_err(pchip->dev, "i2c failed to access register\n"); return bl->props.brightness; } static const struct backlight_ops lm3639_bled_ops = { .options = BL_CORE_SUSPENDRESUME, .update_status = lm3639_bled_update_status, .get_brightness = lm3639_bled_get_brightness, }; /* backlight mapping mode */ static ssize_t lm3639_bled_mode_store(struct device *dev, struct device_attribute *devAttr, const char *buf, size_t size) { ssize_t ret; struct lm3639_chip_data *pchip = dev_get_drvdata(dev); unsigned int state; ret = kstrtouint(buf, 10, &state); if (ret) goto out_input; if (!state) ret = regmap_update_bits(pchip->regmap, REG_BL_CONF_1, 0x10, 0x00); else ret = regmap_update_bits(pchip->regmap, REG_BL_CONF_1, 0x10, 0x10); if (ret < 0) goto out; return size; out: dev_err(pchip->dev, "%s:i2c access fail to register\n", __func__); return ret; out_input: dev_err(pchip->dev, "%s:input conversion fail\n", __func__); return ret; } static DEVICE_ATTR(bled_mode, S_IWUSR, NULL, lm3639_bled_mode_store); /* torch */ static void lm3639_torch_brightness_set(struct led_classdev *cdev, enum led_brightness brightness) { int ret; unsigned int reg_val; struct lm3639_chip_data *pchip; pchip = container_of(cdev, struct lm3639_chip_data, cdev_torch); ret = regmap_read(pchip->regmap, REG_FLAG, &reg_val); if (ret < 0) goto out; if (reg_val != 0) dev_info(pchip->dev, "last flag is 0x%x\n", reg_val); /* brightness 0 means off state */ if (!brightness) { ret = regmap_update_bits(pchip->regmap, REG_ENABLE, 0x06, 0x00); if (ret < 0) goto out; return; } ret = regmap_update_bits(pchip->regmap, REG_FL_CONF_1, 0x70, (brightness - 1) << 4); if (ret < 0) goto out; ret = regmap_update_bits(pchip->regmap, REG_ENABLE, 0x06, 0x02); if (ret < 0) goto out; return; out: dev_err(pchip->dev, "i2c failed to access register\n"); } /* flash */ static void lm3639_flash_brightness_set(struct led_classdev *cdev, enum led_brightness brightness) { int ret; unsigned int reg_val; struct lm3639_chip_data *pchip; pchip = container_of(cdev, struct lm3639_chip_data, cdev_flash); ret = regmap_read(pchip->regmap, REG_FLAG, &reg_val); if (ret < 0) goto out; if (reg_val != 0) dev_info(pchip->dev, "last flag is 0x%x\n", reg_val); /* torch off before flash control */ ret = regmap_update_bits(pchip->regmap, REG_ENABLE, 0x06, 0x00); if (ret < 0) goto out; /* brightness 0 means off state */ if (!brightness) return; ret = regmap_update_bits(pchip->regmap, REG_FL_CONF_1, 0x0F, brightness - 1); if (ret < 0) goto out; ret = regmap_update_bits(pchip->regmap, REG_ENABLE, 0x06, 0x06); if (ret < 0) goto out; return; out: dev_err(pchip->dev, "i2c failed to access register\n"); } static const struct regmap_config lm3639_regmap = { .reg_bits = 8, .val_bits = 8, .max_register = REG_MAX, }; static int lm3639_probe(struct i2c_client *client, const struct i2c_device_id *id) { int ret; struct lm3639_chip_data *pchip; struct lm3639_platform_data *pdata = dev_get_platdata(&client->dev); struct backlight_properties props; if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { dev_err(&client->dev, "i2c functionality check fail.\n"); return -EOPNOTSUPP; } if (pdata == NULL) { dev_err(&client->dev, "Needs Platform Data.\n"); return -ENODATA; } pchip = devm_kzalloc(&client->dev, sizeof(struct lm3639_chip_data), GFP_KERNEL); if (!pchip) return -ENOMEM; pchip->pdata = pdata; pchip->dev = &client->dev; pchip->regmap = devm_regmap_init_i2c(client, &lm3639_regmap); if (IS_ERR(pchip->regmap)) { ret = PTR_ERR(pchip->regmap); dev_err(&client->dev, "fail : allocate register map: %d\n", ret); return ret; } i2c_set_clientdata(client, pchip); /* chip initialize */ ret = lm3639_chip_init(pchip); if (ret < 0) { dev_err(&client->dev, "fail : chip init\n"); goto err_out; } /* backlight */ props.type = BACKLIGHT_RAW; props.brightness = pdata->init_brt_led; props.max_brightness = pdata->max_brt_led; pchip->bled = devm_backlight_device_register(pchip->dev, "lm3639_bled", pchip->dev, pchip, &lm3639_bled_ops, &props); if (IS_ERR(pchip->bled)) { dev_err(&client->dev, "fail : backlight register\n"); ret = PTR_ERR(pchip->bled); goto err_out; } ret = device_create_file(&(pchip->bled->dev), &dev_attr_bled_mode); if (ret < 0) { dev_err(&client->dev, "failed : add sysfs entries\n"); goto err_out; } /* flash */ pchip->cdev_flash.name = "lm3639_flash"; pchip->cdev_flash.max_brightness = 16; pchip->cdev_flash.brightness_set = lm3639_flash_brightness_set; ret = led_classdev_register((struct device *) &client->dev, &pchip->cdev_flash); if (ret < 0) { dev_err(&client->dev, "fail : flash register\n"); goto err_flash; } /* torch */ pchip->cdev_torch.name = "lm3639_torch"; pchip->cdev_torch.max_brightness = 8; pchip->cdev_torch.brightness_set = lm3639_torch_brightness_set; ret = led_classdev_register((struct device *) &client->dev, &pchip->cdev_torch); if (ret < 0) { dev_err(&client->dev, "fail : torch register\n"); goto err_torch; } return 0; err_torch: led_classdev_unregister(&pchip->cdev_flash); err_flash: device_remove_file(&(pchip->bled->dev), &dev_attr_bled_mode); err_out: return ret; } static int lm3639_remove(struct i2c_client *client) { struct lm3639_chip_data *pchip = i2c_get_clientdata(client); regmap_write(pchip->regmap, REG_ENABLE, 0x00); if (&pchip->cdev_torch) led_classdev_unregister(&pchip->cdev_torch); if (&pchip->cdev_flash) led_classdev_unregister(&pchip->cdev_flash); if (pchip->bled) device_remove_file(&(pchip->bled->dev), &dev_attr_bled_mode); return 0; } static const struct i2c_device_id lm3639_id[] = { {LM3639_NAME, 0}, {} }; MODULE_DEVICE_TABLE(i2c, lm3639_id); static struct i2c_driver lm3639_i2c_driver = { .driver = { .name = LM3639_NAME, }, .probe = lm3639_probe, .remove = lm3639_remove, .id_table = lm3639_id, }; module_i2c_driver(lm3639_i2c_driver); MODULE_DESCRIPTION("Texas Instruments Backlight+Flash LED driver for LM3639"); MODULE_AUTHOR("Daniel Jeong <gshark.jeong@gmail.com>"); MODULE_AUTHOR("Ldd Mlp <ldd-mlp@list.ti.com>"); MODULE_LICENSE("GPL v2");
gpl-2.0
chrisc93/bullhead
drivers/scsi/fnic/fnic_debugfs.c
2059
8886
/* * Copyright 2012 Cisco Systems, Inc. All rights reserved. * * This program is free software; you may redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/module.h> #include <linux/errno.h> #include <linux/debugfs.h> #include "fnic.h" static struct dentry *fnic_trace_debugfs_root; static struct dentry *fnic_trace_debugfs_file; static struct dentry *fnic_trace_enable; /* * fnic_trace_ctrl_open - Open the trace_enable file * @inode: The inode pointer. * @file: The file pointer to attach the trace enable/disable flag. * * Description: * This routine opens a debugsfs file trace_enable. * * Returns: * This function returns zero if successful. */ static int fnic_trace_ctrl_open(struct inode *inode, struct file *filp) { filp->private_data = inode->i_private; return 0; } /* * fnic_trace_ctrl_read - Read a trace_enable debugfs file * @filp: The file pointer to read from. * @ubuf: The buffer to copy the data to. * @cnt: The number of bytes to read. * @ppos: The position in the file to start reading from. * * Description: * This routine reads value of variable fnic_tracing_enabled * and stores into local @buf. It will start reading file at @ppos and * copy up to @cnt of data to @ubuf from @buf. * * Returns: * This function returns the amount of data that was read. */ static ssize_t fnic_trace_ctrl_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { char buf[64]; int len; len = sprintf(buf, "%u\n", fnic_tracing_enabled); return simple_read_from_buffer(ubuf, cnt, ppos, buf, len); } /* * fnic_trace_ctrl_write - Write to trace_enable debugfs file * @filp: The file pointer to write from. * @ubuf: The buffer to copy the data from. * @cnt: The number of bytes to write. * @ppos: The position in the file to start writing to. * * Description: * This routine writes data from user buffer @ubuf to buffer @buf and * sets fnic_tracing_enabled value as per user input. * * Returns: * This function returns the amount of data that was written. */ static ssize_t fnic_trace_ctrl_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { char buf[64]; unsigned long val; int ret; if (cnt >= sizeof(buf)) return -EINVAL; if (copy_from_user(&buf, ubuf, cnt)) return -EFAULT; buf[cnt] = 0; ret = kstrtoul(buf, 10, &val); if (ret < 0) return ret; fnic_tracing_enabled = val; (*ppos)++; return cnt; } /* * fnic_trace_debugfs_open - Open the fnic trace log * @inode: The inode pointer * @file: The file pointer to attach the log output * * Description: * This routine is the entry point for the debugfs open file operation. * It allocates the necessary buffer for the log, fills the buffer from * the in-memory log and then returns a pointer to that log in * the private_data field in @file. * * Returns: * This function returns zero if successful. On error it will return * a negative error value. */ static int fnic_trace_debugfs_open(struct inode *inode, struct file *file) { fnic_dbgfs_t *fnic_dbg_prt; fnic_dbg_prt = kzalloc(sizeof(fnic_dbgfs_t), GFP_KERNEL); if (!fnic_dbg_prt) return -ENOMEM; fnic_dbg_prt->buffer = vmalloc((3*(trace_max_pages * PAGE_SIZE))); if (!fnic_dbg_prt->buffer) { kfree(fnic_dbg_prt); return -ENOMEM; } memset((void *)fnic_dbg_prt->buffer, 0, (3*(trace_max_pages * PAGE_SIZE))); fnic_dbg_prt->buffer_len = fnic_get_trace_data(fnic_dbg_prt); file->private_data = fnic_dbg_prt; return 0; } /* * fnic_trace_debugfs_lseek - Seek through a debugfs file * @file: The file pointer to seek through. * @offset: The offset to seek to or the amount to seek by. * @howto: Indicates how to seek. * * Description: * This routine is the entry point for the debugfs lseek file operation. * The @howto parameter indicates whether @offset is the offset to directly * seek to, or if it is a value to seek forward or reverse by. This function * figures out what the new offset of the debugfs file will be and assigns * that value to the f_pos field of @file. * * Returns: * This function returns the new offset if successful and returns a negative * error if unable to process the seek. */ static loff_t fnic_trace_debugfs_lseek(struct file *file, loff_t offset, int howto) { fnic_dbgfs_t *fnic_dbg_prt = file->private_data; loff_t pos = -1; switch (howto) { case 0: pos = offset; break; case 1: pos = file->f_pos + offset; break; case 2: pos = fnic_dbg_prt->buffer_len + offset; } return (pos < 0 || pos > fnic_dbg_prt->buffer_len) ? -EINVAL : (file->f_pos = pos); } /* * fnic_trace_debugfs_read - Read a debugfs file * @file: The file pointer to read from. * @ubuf: The buffer to copy the data to. * @nbytes: The number of bytes to read. * @pos: The position in the file to start reading from. * * Description: * This routine reads data from the buffer indicated in the private_data * field of @file. It will start reading at @pos and copy up to @nbytes of * data to @ubuf. * * Returns: * This function returns the amount of data that was read (this could be * less than @nbytes if the end of the file was reached). */ static ssize_t fnic_trace_debugfs_read(struct file *file, char __user *ubuf, size_t nbytes, loff_t *pos) { fnic_dbgfs_t *fnic_dbg_prt = file->private_data; int rc = 0; rc = simple_read_from_buffer(ubuf, nbytes, pos, fnic_dbg_prt->buffer, fnic_dbg_prt->buffer_len); return rc; } /* * fnic_trace_debugfs_release - Release the buffer used to store * debugfs file data * @inode: The inode pointer * @file: The file pointer that contains the buffer to release * * Description: * This routine frees the buffer that was allocated when the debugfs * file was opened. * * Returns: * This function returns zero. */ static int fnic_trace_debugfs_release(struct inode *inode, struct file *file) { fnic_dbgfs_t *fnic_dbg_prt = file->private_data; vfree(fnic_dbg_prt->buffer); kfree(fnic_dbg_prt); return 0; } static const struct file_operations fnic_trace_ctrl_fops = { .owner = THIS_MODULE, .open = fnic_trace_ctrl_open, .read = fnic_trace_ctrl_read, .write = fnic_trace_ctrl_write, }; static const struct file_operations fnic_trace_debugfs_fops = { .owner = THIS_MODULE, .open = fnic_trace_debugfs_open, .llseek = fnic_trace_debugfs_lseek, .read = fnic_trace_debugfs_read, .release = fnic_trace_debugfs_release, }; /* * fnic_trace_debugfs_init - Initialize debugfs for fnic trace logging * * Description: * When Debugfs is configured this routine sets up the fnic debugfs * file system. If not already created, this routine will create the * fnic directory. It will create file trace to log fnic trace buffer * output into debugfs and it will also create file trace_enable to * control enable/disable of trace logging into trace buffer. */ int fnic_trace_debugfs_init(void) { int rc = -1; fnic_trace_debugfs_root = debugfs_create_dir("fnic", NULL); if (!fnic_trace_debugfs_root) { printk(KERN_DEBUG "Cannot create debugfs root\n"); return rc; } fnic_trace_enable = debugfs_create_file("tracing_enable", S_IFREG|S_IRUGO|S_IWUSR, fnic_trace_debugfs_root, NULL, &fnic_trace_ctrl_fops); if (!fnic_trace_enable) { printk(KERN_DEBUG "Cannot create trace_enable file" " under debugfs"); return rc; } fnic_trace_debugfs_file = debugfs_create_file("trace", S_IFREG|S_IRUGO|S_IWUSR, fnic_trace_debugfs_root, NULL, &fnic_trace_debugfs_fops); if (!fnic_trace_debugfs_file) { printk(KERN_DEBUG "Cannot create trace file under debugfs"); return rc; } rc = 0; return rc; } /* * fnic_trace_debugfs_terminate - Tear down debugfs infrastructure * * Description: * When Debugfs is configured this routine removes debugfs file system * elements that are specific to fnic trace logging. */ void fnic_trace_debugfs_terminate(void) { if (fnic_trace_debugfs_file) { debugfs_remove(fnic_trace_debugfs_file); fnic_trace_debugfs_file = NULL; } if (fnic_trace_enable) { debugfs_remove(fnic_trace_enable); fnic_trace_enable = NULL; } if (fnic_trace_debugfs_root) { debugfs_remove(fnic_trace_debugfs_root); fnic_trace_debugfs_root = NULL; } }
gpl-2.0
rofehr/linux-wetek
arch/powerpc/platforms/pseries/pseries_energy.c
2059
7017
/* * POWER platform energy management driver * Copyright (C) 2010 IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This pseries platform device driver provides access to * platform energy management capabilities. */ #include <linux/module.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/seq_file.h> #include <linux/device.h> #include <linux/cpu.h> #include <linux/of.h> #include <asm/cputhreads.h> #include <asm/page.h> #include <asm/hvcall.h> #include <asm/firmware.h> #define MODULE_VERS "1.0" #define MODULE_NAME "pseries_energy" /* Driver flags */ static int sysfs_entries; /* Helper routines */ /* Helper Routines to convert between drc_index to cpu numbers */ static u32 cpu_to_drc_index(int cpu) { struct device_node *dn = NULL; const int *indexes; int i; int rc = 1; u32 ret = 0; dn = of_find_node_by_path("/cpus"); if (dn == NULL) goto err; indexes = of_get_property(dn, "ibm,drc-indexes", NULL); if (indexes == NULL) goto err_of_node_put; /* Convert logical cpu number to core number */ i = cpu_core_index_of_thread(cpu); /* * The first element indexes[0] is the number of drc_indexes * returned in the list. Hence i+1 will get the drc_index * corresponding to core number i. */ WARN_ON(i > indexes[0]); ret = indexes[i + 1]; rc = 0; err_of_node_put: of_node_put(dn); err: if (rc) printk(KERN_WARNING "cpu_to_drc_index(%d) failed", cpu); return ret; } static int drc_index_to_cpu(u32 drc_index) { struct device_node *dn = NULL; const int *indexes; int i, cpu = 0; int rc = 1; dn = of_find_node_by_path("/cpus"); if (dn == NULL) goto err; indexes = of_get_property(dn, "ibm,drc-indexes", NULL); if (indexes == NULL) goto err_of_node_put; /* * First element in the array is the number of drc_indexes * returned. Search through the list to find the matching * drc_index and get the core number */ for (i = 0; i < indexes[0]; i++) { if (indexes[i + 1] == drc_index) break; } /* Convert core number to logical cpu number */ cpu = cpu_first_thread_of_core(i); rc = 0; err_of_node_put: of_node_put(dn); err: if (rc) printk(KERN_WARNING "drc_index_to_cpu(%d) failed", drc_index); return cpu; } /* * pseries hypervisor call H_BEST_ENERGY provides hints to OS on * preferred logical cpus to activate or deactivate for optimized * energy consumption. */ #define FLAGS_MODE1 0x004E200000080E01 #define FLAGS_MODE2 0x004E200000080401 #define FLAGS_ACTIVATE 0x100 static ssize_t get_best_energy_list(char *page, int activate) { int rc, cnt, i, cpu; unsigned long retbuf[PLPAR_HCALL9_BUFSIZE]; unsigned long flags = 0; u32 *buf_page; char *s = page; buf_page = (u32 *) get_zeroed_page(GFP_KERNEL); if (!buf_page) return -ENOMEM; flags = FLAGS_MODE1; if (activate) flags |= FLAGS_ACTIVATE; rc = plpar_hcall9(H_BEST_ENERGY, retbuf, flags, 0, __pa(buf_page), 0, 0, 0, 0, 0, 0); if (rc != H_SUCCESS) { free_page((unsigned long) buf_page); return -EINVAL; } cnt = retbuf[0]; for (i = 0; i < cnt; i++) { cpu = drc_index_to_cpu(buf_page[2*i+1]); if ((cpu_online(cpu) && !activate) || (!cpu_online(cpu) && activate)) s += sprintf(s, "%d,", cpu); } if (s > page) { /* Something to show */ s--; /* Suppress last comma */ s += sprintf(s, "\n"); } free_page((unsigned long) buf_page); return s-page; } static ssize_t get_best_energy_data(struct device *dev, char *page, int activate) { int rc; unsigned long retbuf[PLPAR_HCALL9_BUFSIZE]; unsigned long flags = 0; flags = FLAGS_MODE2; if (activate) flags |= FLAGS_ACTIVATE; rc = plpar_hcall9(H_BEST_ENERGY, retbuf, flags, cpu_to_drc_index(dev->id), 0, 0, 0, 0, 0, 0, 0); if (rc != H_SUCCESS) return -EINVAL; return sprintf(page, "%lu\n", retbuf[1] >> 32); } /* Wrapper functions */ static ssize_t cpu_activate_hint_list_show(struct device *dev, struct device_attribute *attr, char *page) { return get_best_energy_list(page, 1); } static ssize_t cpu_deactivate_hint_list_show(struct device *dev, struct device_attribute *attr, char *page) { return get_best_energy_list(page, 0); } static ssize_t percpu_activate_hint_show(struct device *dev, struct device_attribute *attr, char *page) { return get_best_energy_data(dev, page, 1); } static ssize_t percpu_deactivate_hint_show(struct device *dev, struct device_attribute *attr, char *page) { return get_best_energy_data(dev, page, 0); } /* * Create sysfs interface: * /sys/devices/system/cpu/pseries_activate_hint_list * /sys/devices/system/cpu/pseries_deactivate_hint_list * Comma separated list of cpus to activate or deactivate * /sys/devices/system/cpu/cpuN/pseries_activate_hint * /sys/devices/system/cpu/cpuN/pseries_deactivate_hint * Per-cpu value of the hint */ struct device_attribute attr_cpu_activate_hint_list = __ATTR(pseries_activate_hint_list, 0444, cpu_activate_hint_list_show, NULL); struct device_attribute attr_cpu_deactivate_hint_list = __ATTR(pseries_deactivate_hint_list, 0444, cpu_deactivate_hint_list_show, NULL); struct device_attribute attr_percpu_activate_hint = __ATTR(pseries_activate_hint, 0444, percpu_activate_hint_show, NULL); struct device_attribute attr_percpu_deactivate_hint = __ATTR(pseries_deactivate_hint, 0444, percpu_deactivate_hint_show, NULL); static int __init pseries_energy_init(void) { int cpu, err; struct device *cpu_dev; if (!firmware_has_feature(FW_FEATURE_BEST_ENERGY)) { printk(KERN_INFO "Hypercall H_BEST_ENERGY not supported\n"); return 0; } /* Create the sysfs files */ err = device_create_file(cpu_subsys.dev_root, &attr_cpu_activate_hint_list); if (!err) err = device_create_file(cpu_subsys.dev_root, &attr_cpu_deactivate_hint_list); if (err) return err; for_each_possible_cpu(cpu) { cpu_dev = get_cpu_device(cpu); err = device_create_file(cpu_dev, &attr_percpu_activate_hint); if (err) break; err = device_create_file(cpu_dev, &attr_percpu_deactivate_hint); if (err) break; } if (err) return err; sysfs_entries = 1; /* Removed entries on cleanup */ return 0; } static void __exit pseries_energy_cleanup(void) { int cpu; struct device *cpu_dev; if (!sysfs_entries) return; /* Remove the sysfs files */ device_remove_file(cpu_subsys.dev_root, &attr_cpu_activate_hint_list); device_remove_file(cpu_subsys.dev_root, &attr_cpu_deactivate_hint_list); for_each_possible_cpu(cpu) { cpu_dev = get_cpu_device(cpu); sysfs_remove_file(&cpu_dev->kobj, &attr_percpu_activate_hint.attr); sysfs_remove_file(&cpu_dev->kobj, &attr_percpu_deactivate_hint.attr); } } module_init(pseries_energy_init); module_exit(pseries_energy_cleanup); MODULE_DESCRIPTION("Driver for pSeries platform energy management"); MODULE_AUTHOR("Vaidyanathan Srinivasan"); MODULE_LICENSE("GPL");
gpl-2.0
SamsungGalaxyS6/kernel_samsung_exynos7420
fs/sysv/itree.c
2315
11800
/* * linux/fs/sysv/itree.c * * Handling of indirect blocks' trees. * AV, Sep--Dec 2000 */ #include <linux/buffer_head.h> #include <linux/mount.h> #include <linux/string.h> #include "sysv.h" enum {DIRECT = 10, DEPTH = 4}; /* Have triple indirect */ static inline void dirty_indirect(struct buffer_head *bh, struct inode *inode) { mark_buffer_dirty_inode(bh, inode); if (IS_SYNC(inode)) sync_dirty_buffer(bh); } static int block_to_path(struct inode *inode, long block, int offsets[DEPTH]) { struct super_block *sb = inode->i_sb; struct sysv_sb_info *sbi = SYSV_SB(sb); int ptrs_bits = sbi->s_ind_per_block_bits; unsigned long indirect_blocks = sbi->s_ind_per_block, double_blocks = sbi->s_ind_per_block_2; int n = 0; if (block < 0) { printk("sysv_block_map: block < 0\n"); } else if (block < DIRECT) { offsets[n++] = block; } else if ( (block -= DIRECT) < indirect_blocks) { offsets[n++] = DIRECT; offsets[n++] = block; } else if ((block -= indirect_blocks) < double_blocks) { offsets[n++] = DIRECT+1; offsets[n++] = block >> ptrs_bits; offsets[n++] = block & (indirect_blocks - 1); } else if (((block -= double_blocks) >> (ptrs_bits * 2)) < indirect_blocks) { offsets[n++] = DIRECT+2; offsets[n++] = block >> (ptrs_bits * 2); offsets[n++] = (block >> ptrs_bits) & (indirect_blocks - 1); offsets[n++] = block & (indirect_blocks - 1); } else { /* nothing */; } return n; } static inline int block_to_cpu(struct sysv_sb_info *sbi, sysv_zone_t nr) { return sbi->s_block_base + fs32_to_cpu(sbi, nr); } typedef struct { sysv_zone_t *p; sysv_zone_t key; struct buffer_head *bh; } Indirect; static DEFINE_RWLOCK(pointers_lock); static inline void add_chain(Indirect *p, struct buffer_head *bh, sysv_zone_t *v) { p->key = *(p->p = v); p->bh = bh; } static inline int verify_chain(Indirect *from, Indirect *to) { while (from <= to && from->key == *from->p) from++; return (from > to); } static inline sysv_zone_t *block_end(struct buffer_head *bh) { return (sysv_zone_t*)((char*)bh->b_data + bh->b_size); } /* * Requires read_lock(&pointers_lock) or write_lock(&pointers_lock) */ static Indirect *get_branch(struct inode *inode, int depth, int offsets[], Indirect chain[], int *err) { struct super_block *sb = inode->i_sb; Indirect *p = chain; struct buffer_head *bh; *err = 0; add_chain(chain, NULL, SYSV_I(inode)->i_data + *offsets); if (!p->key) goto no_block; while (--depth) { int block = block_to_cpu(SYSV_SB(sb), p->key); bh = sb_bread(sb, block); if (!bh) goto failure; if (!verify_chain(chain, p)) goto changed; add_chain(++p, bh, (sysv_zone_t*)bh->b_data + *++offsets); if (!p->key) goto no_block; } return NULL; changed: brelse(bh); *err = -EAGAIN; goto no_block; failure: *err = -EIO; no_block: return p; } static int alloc_branch(struct inode *inode, int num, int *offsets, Indirect *branch) { int blocksize = inode->i_sb->s_blocksize; int n = 0; int i; branch[0].key = sysv_new_block(inode->i_sb); if (branch[0].key) for (n = 1; n < num; n++) { struct buffer_head *bh; int parent; /* Allocate the next block */ branch[n].key = sysv_new_block(inode->i_sb); if (!branch[n].key) break; /* * Get buffer_head for parent block, zero it out and set * the pointer to new one, then send parent to disk. */ parent = block_to_cpu(SYSV_SB(inode->i_sb), branch[n-1].key); bh = sb_getblk(inode->i_sb, parent); lock_buffer(bh); memset(bh->b_data, 0, blocksize); branch[n].bh = bh; branch[n].p = (sysv_zone_t*) bh->b_data + offsets[n]; *branch[n].p = branch[n].key; set_buffer_uptodate(bh); unlock_buffer(bh); dirty_indirect(bh, inode); } if (n == num) return 0; /* Allocation failed, free what we already allocated */ for (i = 1; i < n; i++) bforget(branch[i].bh); for (i = 0; i < n; i++) sysv_free_block(inode->i_sb, branch[i].key); return -ENOSPC; } static inline int splice_branch(struct inode *inode, Indirect chain[], Indirect *where, int num) { int i; /* Verify that place we are splicing to is still there and vacant */ write_lock(&pointers_lock); if (!verify_chain(chain, where-1) || *where->p) goto changed; *where->p = where->key; write_unlock(&pointers_lock); inode->i_ctime = CURRENT_TIME_SEC; /* had we spliced it onto indirect block? */ if (where->bh) dirty_indirect(where->bh, inode); if (IS_SYNC(inode)) sysv_sync_inode(inode); else mark_inode_dirty(inode); return 0; changed: write_unlock(&pointers_lock); for (i = 1; i < num; i++) bforget(where[i].bh); for (i = 0; i < num; i++) sysv_free_block(inode->i_sb, where[i].key); return -EAGAIN; } static int get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) { int err = -EIO; int offsets[DEPTH]; Indirect chain[DEPTH]; struct super_block *sb = inode->i_sb; Indirect *partial; int left; int depth = block_to_path(inode, iblock, offsets); if (depth == 0) goto out; reread: read_lock(&pointers_lock); partial = get_branch(inode, depth, offsets, chain, &err); read_unlock(&pointers_lock); /* Simplest case - block found, no allocation needed */ if (!partial) { got_it: map_bh(bh_result, sb, block_to_cpu(SYSV_SB(sb), chain[depth-1].key)); /* Clean up and exit */ partial = chain+depth-1; /* the whole chain */ goto cleanup; } /* Next simple case - plain lookup or failed read of indirect block */ if (!create || err == -EIO) { cleanup: while (partial > chain) { brelse(partial->bh); partial--; } out: return err; } /* * Indirect block might be removed by truncate while we were * reading it. Handling of that case (forget what we've got and * reread) is taken out of the main path. */ if (err == -EAGAIN) goto changed; left = (chain + depth) - partial; err = alloc_branch(inode, left, offsets+(partial-chain), partial); if (err) goto cleanup; if (splice_branch(inode, chain, partial, left) < 0) goto changed; set_buffer_new(bh_result); goto got_it; changed: while (partial > chain) { brelse(partial->bh); partial--; } goto reread; } static inline int all_zeroes(sysv_zone_t *p, sysv_zone_t *q) { while (p < q) if (*p++) return 0; return 1; } static Indirect *find_shared(struct inode *inode, int depth, int offsets[], Indirect chain[], sysv_zone_t *top) { Indirect *partial, *p; int k, err; *top = 0; for (k = depth; k > 1 && !offsets[k-1]; k--) ; write_lock(&pointers_lock); partial = get_branch(inode, k, offsets, chain, &err); if (!partial) partial = chain + k-1; /* * If the branch acquired continuation since we've looked at it - * fine, it should all survive and (new) top doesn't belong to us. */ if (!partial->key && *partial->p) { write_unlock(&pointers_lock); goto no_top; } for (p=partial; p>chain && all_zeroes((sysv_zone_t*)p->bh->b_data,p->p); p--) ; /* * OK, we've found the last block that must survive. The rest of our * branch should be detached before unlocking. However, if that rest * of branch is all ours and does not grow immediately from the inode * it's easier to cheat and just decrement partial->p. */ if (p == chain + k - 1 && p > chain) { p->p--; } else { *top = *p->p; *p->p = 0; } write_unlock(&pointers_lock); while (partial > p) { brelse(partial->bh); partial--; } no_top: return partial; } static inline void free_data(struct inode *inode, sysv_zone_t *p, sysv_zone_t *q) { for ( ; p < q ; p++) { sysv_zone_t nr = *p; if (nr) { *p = 0; sysv_free_block(inode->i_sb, nr); mark_inode_dirty(inode); } } } static void free_branches(struct inode *inode, sysv_zone_t *p, sysv_zone_t *q, int depth) { struct buffer_head * bh; struct super_block *sb = inode->i_sb; if (depth--) { for ( ; p < q ; p++) { int block; sysv_zone_t nr = *p; if (!nr) continue; *p = 0; block = block_to_cpu(SYSV_SB(sb), nr); bh = sb_bread(sb, block); if (!bh) continue; free_branches(inode, (sysv_zone_t*)bh->b_data, block_end(bh), depth); bforget(bh); sysv_free_block(sb, nr); mark_inode_dirty(inode); } } else free_data(inode, p, q); } void sysv_truncate (struct inode * inode) { sysv_zone_t *i_data = SYSV_I(inode)->i_data; int offsets[DEPTH]; Indirect chain[DEPTH]; Indirect *partial; sysv_zone_t nr = 0; int n; long iblock; unsigned blocksize; if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))) return; blocksize = inode->i_sb->s_blocksize; iblock = (inode->i_size + blocksize-1) >> inode->i_sb->s_blocksize_bits; block_truncate_page(inode->i_mapping, inode->i_size, get_block); n = block_to_path(inode, iblock, offsets); if (n == 0) return; if (n == 1) { free_data(inode, i_data+offsets[0], i_data + DIRECT); goto do_indirects; } partial = find_shared(inode, n, offsets, chain, &nr); /* Kill the top of shared branch (already detached) */ if (nr) { if (partial == chain) mark_inode_dirty(inode); else dirty_indirect(partial->bh, inode); free_branches(inode, &nr, &nr+1, (chain+n-1) - partial); } /* Clear the ends of indirect blocks on the shared branch */ while (partial > chain) { free_branches(inode, partial->p + 1, block_end(partial->bh), (chain+n-1) - partial); dirty_indirect(partial->bh, inode); brelse (partial->bh); partial--; } do_indirects: /* Kill the remaining (whole) subtrees (== subtrees deeper than...) */ while (n < DEPTH) { nr = i_data[DIRECT + n - 1]; if (nr) { i_data[DIRECT + n - 1] = 0; mark_inode_dirty(inode); free_branches(inode, &nr, &nr+1, n); } n++; } inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC; if (IS_SYNC(inode)) sysv_sync_inode (inode); else mark_inode_dirty(inode); } static unsigned sysv_nblocks(struct super_block *s, loff_t size) { struct sysv_sb_info *sbi = SYSV_SB(s); int ptrs_bits = sbi->s_ind_per_block_bits; unsigned blocks, res, direct = DIRECT, i = DEPTH; blocks = (size + s->s_blocksize - 1) >> s->s_blocksize_bits; res = blocks; while (--i && blocks > direct) { blocks = ((blocks - direct - 1) >> ptrs_bits) + 1; res += blocks; direct = 1; } return blocks; } int sysv_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) { struct super_block *s = dentry->d_sb; generic_fillattr(dentry->d_inode, stat); stat->blocks = (s->s_blocksize / 512) * sysv_nblocks(s, stat->size); stat->blksize = s->s_blocksize; return 0; } static int sysv_writepage(struct page *page, struct writeback_control *wbc) { return block_write_full_page(page,get_block,wbc); } static int sysv_readpage(struct file *file, struct page *page) { return block_read_full_page(page,get_block); } int sysv_prepare_chunk(struct page *page, loff_t pos, unsigned len) { return __block_write_begin(page, pos, len, get_block); } static void sysv_write_failed(struct address_space *mapping, loff_t to) { struct inode *inode = mapping->host; if (to > inode->i_size) { truncate_pagecache(inode, to, inode->i_size); sysv_truncate(inode); } } static int sysv_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { int ret; ret = block_write_begin(mapping, pos, len, flags, pagep, get_block); if (unlikely(ret)) sysv_write_failed(mapping, pos + len); return ret; } static sector_t sysv_bmap(struct address_space *mapping, sector_t block) { return generic_block_bmap(mapping,block,get_block); } const struct address_space_operations sysv_aops = { .readpage = sysv_readpage, .writepage = sysv_writepage, .write_begin = sysv_write_begin, .write_end = generic_write_end, .bmap = sysv_bmap };
gpl-2.0
lnfamous/Kernel_CyanogenMod9_Pico
arch/m68k/lib/memcpy.c
2571
1557
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. */ #include <linux/module.h> #include <linux/string.h> void *memcpy(void *to, const void *from, size_t n) { void *xto = to; size_t temp, temp1; if (!n) return xto; if ((long)to & 1) { char *cto = to; const char *cfrom = from; *cto++ = *cfrom++; to = cto; from = cfrom; n--; } if (n > 2 && (long)to & 2) { short *sto = to; const short *sfrom = from; *sto++ = *sfrom++; to = sto; from = sfrom; n -= 2; } temp = n >> 2; if (temp) { long *lto = to; const long *lfrom = from; #if defined(CONFIG_M68000) || defined(CONFIG_COLDFIRE) for (; temp; temp--) *lto++ = *lfrom++; #else asm volatile ( " movel %2,%3\n" " andw #7,%3\n" " lsrl #3,%2\n" " negw %3\n" " jmp %%pc@(1f,%3:w:2)\n" "4: movel %0@+,%1@+\n" " movel %0@+,%1@+\n" " movel %0@+,%1@+\n" " movel %0@+,%1@+\n" " movel %0@+,%1@+\n" " movel %0@+,%1@+\n" " movel %0@+,%1@+\n" " movel %0@+,%1@+\n" "1: dbra %2,4b\n" " clrw %2\n" " subql #1,%2\n" " jpl 4b" : "=a" (lfrom), "=a" (lto), "=d" (temp), "=&d" (temp1) : "0" (lfrom), "1" (lto), "2" (temp)); #endif to = lto; from = lfrom; } if (n & 2) { short *sto = to; const short *sfrom = from; *sto++ = *sfrom++; to = sto; from = sfrom; } if (n & 1) { char *cto = to; const char *cfrom = from; *cto = *cfrom; } return xto; } EXPORT_SYMBOL(memcpy);
gpl-2.0
limbo127/KVMGT-kernel
drivers/media/rc/ir-sony-decoder.c
2571
4887
/* ir-sony-decoder.c - handle Sony IR Pulse/Space protocol * * Copyright (C) 2010 by David Härdeman <david@hardeman.nu> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/bitrev.h> #include <linux/module.h> #include "rc-core-priv.h" #define SONY_UNIT 600000 /* ns */ #define SONY_HEADER_PULSE (4 * SONY_UNIT) #define SONY_HEADER_SPACE (1 * SONY_UNIT) #define SONY_BIT_0_PULSE (1 * SONY_UNIT) #define SONY_BIT_1_PULSE (2 * SONY_UNIT) #define SONY_BIT_SPACE (1 * SONY_UNIT) #define SONY_TRAILER_SPACE (10 * SONY_UNIT) /* minimum */ enum sony_state { STATE_INACTIVE, STATE_HEADER_SPACE, STATE_BIT_PULSE, STATE_BIT_SPACE, STATE_FINISHED, }; /** * ir_sony_decode() - Decode one Sony pulse or space * @dev: the struct rc_dev descriptor of the device * @ev: the struct ir_raw_event descriptor of the pulse/space * * This function returns -EINVAL if the pulse violates the state machine */ static int ir_sony_decode(struct rc_dev *dev, struct ir_raw_event ev) { struct sony_dec *data = &dev->raw->sony; u32 scancode; u8 device, subdevice, function; if (!(dev->enabled_protocols & (RC_BIT_SONY12 | RC_BIT_SONY15 | RC_BIT_SONY20))) return 0; if (!is_timing_event(ev)) { if (ev.reset) data->state = STATE_INACTIVE; return 0; } if (!geq_margin(ev.duration, SONY_UNIT, SONY_UNIT / 2)) goto out; IR_dprintk(2, "Sony decode started at state %d (%uus %s)\n", data->state, TO_US(ev.duration), TO_STR(ev.pulse)); switch (data->state) { case STATE_INACTIVE: if (!ev.pulse) break; if (!eq_margin(ev.duration, SONY_HEADER_PULSE, SONY_UNIT / 2)) break; data->count = 0; data->state = STATE_HEADER_SPACE; return 0; case STATE_HEADER_SPACE: if (ev.pulse) break; if (!eq_margin(ev.duration, SONY_HEADER_SPACE, SONY_UNIT / 2)) break; data->state = STATE_BIT_PULSE; return 0; case STATE_BIT_PULSE: if (!ev.pulse) break; data->bits <<= 1; if (eq_margin(ev.duration, SONY_BIT_1_PULSE, SONY_UNIT / 2)) data->bits |= 1; else if (!eq_margin(ev.duration, SONY_BIT_0_PULSE, SONY_UNIT / 2)) break; data->count++; data->state = STATE_BIT_SPACE; return 0; case STATE_BIT_SPACE: if (ev.pulse) break; if (!geq_margin(ev.duration, SONY_BIT_SPACE, SONY_UNIT / 2)) break; decrease_duration(&ev, SONY_BIT_SPACE); if (!geq_margin(ev.duration, SONY_UNIT, SONY_UNIT / 2)) { data->state = STATE_BIT_PULSE; return 0; } data->state = STATE_FINISHED; /* Fall through */ case STATE_FINISHED: if (ev.pulse) break; if (!geq_margin(ev.duration, SONY_TRAILER_SPACE, SONY_UNIT / 2)) break; switch (data->count) { case 12: if (!(dev->enabled_protocols & RC_BIT_SONY12)) { data->state = STATE_INACTIVE; return 0; } device = bitrev8((data->bits << 3) & 0xF8); subdevice = 0; function = bitrev8((data->bits >> 4) & 0xFE); break; case 15: if (!(dev->enabled_protocols & RC_BIT_SONY15)) { data->state = STATE_INACTIVE; return 0; } device = bitrev8((data->bits >> 0) & 0xFF); subdevice = 0; function = bitrev8((data->bits >> 7) & 0xFE); break; case 20: if (!(dev->enabled_protocols & RC_BIT_SONY20)) { data->state = STATE_INACTIVE; return 0; } device = bitrev8((data->bits >> 5) & 0xF8); subdevice = bitrev8((data->bits >> 0) & 0xFF); function = bitrev8((data->bits >> 12) & 0xFE); break; default: IR_dprintk(1, "Sony invalid bitcount %u\n", data->count); goto out; } scancode = device << 16 | subdevice << 8 | function; IR_dprintk(1, "Sony(%u) scancode 0x%05x\n", data->count, scancode); rc_keydown(dev, scancode, 0); data->state = STATE_INACTIVE; return 0; } out: IR_dprintk(1, "Sony decode failed at state %d (%uus %s)\n", data->state, TO_US(ev.duration), TO_STR(ev.pulse)); data->state = STATE_INACTIVE; return -EINVAL; } static struct ir_raw_handler sony_handler = { .protocols = RC_BIT_SONY12 | RC_BIT_SONY15 | RC_BIT_SONY20, .decode = ir_sony_decode, }; static int __init ir_sony_decode_init(void) { ir_raw_handler_register(&sony_handler); printk(KERN_INFO "IR Sony protocol handler initialized\n"); return 0; } static void __exit ir_sony_decode_exit(void) { ir_raw_handler_unregister(&sony_handler); } module_init(ir_sony_decode_init); module_exit(ir_sony_decode_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("David Härdeman <david@hardeman.nu>"); MODULE_DESCRIPTION("Sony IR protocol decoder");
gpl-2.0
mythos234/SimplKernel-LL-BOFJ
drivers/media/rc/ir-rc6-decoder.c
2571
7413
/* ir-rc6-decoder.c - A decoder for the RC6 IR protocol * * Copyright (C) 2010 by David Härdeman <david@hardeman.nu> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "rc-core-priv.h" #include <linux/module.h> /* * This decoder currently supports: * RC6-0-16 (standard toggle bit in header) * RC6-6A-20 (no toggle bit) * RC6-6A-24 (no toggle bit) * RC6-6A-32 (MCE version with toggle bit in body) */ #define RC6_UNIT 444444 /* nanosecs */ #define RC6_HEADER_NBITS 4 /* not including toggle bit */ #define RC6_0_NBITS 16 #define RC6_6A_32_NBITS 32 #define RC6_6A_NBITS 128 /* Variable 8..128 */ #define RC6_PREFIX_PULSE (6 * RC6_UNIT) #define RC6_PREFIX_SPACE (2 * RC6_UNIT) #define RC6_BIT_START (1 * RC6_UNIT) #define RC6_BIT_END (1 * RC6_UNIT) #define RC6_TOGGLE_START (2 * RC6_UNIT) #define RC6_TOGGLE_END (2 * RC6_UNIT) #define RC6_SUFFIX_SPACE (6 * RC6_UNIT) #define RC6_MODE_MASK 0x07 /* for the header bits */ #define RC6_STARTBIT_MASK 0x08 /* for the header bits */ #define RC6_6A_MCE_TOGGLE_MASK 0x8000 /* for the body bits */ #define RC6_6A_LCC_MASK 0xffff0000 /* RC6-6A-32 long customer code mask */ #define RC6_6A_MCE_CC 0x800f0000 /* MCE customer code */ #ifndef CHAR_BIT #define CHAR_BIT 8 /* Normally in <limits.h> */ #endif enum rc6_mode { RC6_MODE_0, RC6_MODE_6A, RC6_MODE_UNKNOWN, }; enum rc6_state { STATE_INACTIVE, STATE_PREFIX_SPACE, STATE_HEADER_BIT_START, STATE_HEADER_BIT_END, STATE_TOGGLE_START, STATE_TOGGLE_END, STATE_BODY_BIT_START, STATE_BODY_BIT_END, STATE_FINISHED, }; static enum rc6_mode rc6_mode(struct rc6_dec *data) { switch (data->header & RC6_MODE_MASK) { case 0: return RC6_MODE_0; case 6: if (!data->toggle) return RC6_MODE_6A; /* fall through */ default: return RC6_MODE_UNKNOWN; } } /** * ir_rc6_decode() - Decode one RC6 pulse or space * @dev: the struct rc_dev descriptor of the device * @ev: the struct ir_raw_event descriptor of the pulse/space * * This function returns -EINVAL if the pulse violates the state machine */ static int ir_rc6_decode(struct rc_dev *dev, struct ir_raw_event ev) { struct rc6_dec *data = &dev->raw->rc6; u32 scancode; u8 toggle; if (!(dev->enabled_protocols & (RC_BIT_RC6_0 | RC_BIT_RC6_6A_20 | RC_BIT_RC6_6A_24 | RC_BIT_RC6_6A_32 | RC_BIT_RC6_MCE))) return 0; if (!is_timing_event(ev)) { if (ev.reset) data->state = STATE_INACTIVE; return 0; } if (!geq_margin(ev.duration, RC6_UNIT, RC6_UNIT / 2)) goto out; again: IR_dprintk(2, "RC6 decode started at state %i (%uus %s)\n", data->state, TO_US(ev.duration), TO_STR(ev.pulse)); if (!geq_margin(ev.duration, RC6_UNIT, RC6_UNIT / 2)) return 0; switch (data->state) { case STATE_INACTIVE: if (!ev.pulse) break; /* Note: larger margin on first pulse since each RC6_UNIT is quite short and some hardware takes some time to adjust to the signal */ if (!eq_margin(ev.duration, RC6_PREFIX_PULSE, RC6_UNIT)) break; data->state = STATE_PREFIX_SPACE; data->count = 0; return 0; case STATE_PREFIX_SPACE: if (ev.pulse) break; if (!eq_margin(ev.duration, RC6_PREFIX_SPACE, RC6_UNIT / 2)) break; data->state = STATE_HEADER_BIT_START; data->header = 0; return 0; case STATE_HEADER_BIT_START: if (!eq_margin(ev.duration, RC6_BIT_START, RC6_UNIT / 2)) break; data->header <<= 1; if (ev.pulse) data->header |= 1; data->count++; data->state = STATE_HEADER_BIT_END; return 0; case STATE_HEADER_BIT_END: if (!is_transition(&ev, &dev->raw->prev_ev)) break; if (data->count == RC6_HEADER_NBITS) data->state = STATE_TOGGLE_START; else data->state = STATE_HEADER_BIT_START; decrease_duration(&ev, RC6_BIT_END); goto again; case STATE_TOGGLE_START: if (!eq_margin(ev.duration, RC6_TOGGLE_START, RC6_UNIT / 2)) break; data->toggle = ev.pulse; data->state = STATE_TOGGLE_END; return 0; case STATE_TOGGLE_END: if (!is_transition(&ev, &dev->raw->prev_ev) || !geq_margin(ev.duration, RC6_TOGGLE_END, RC6_UNIT / 2)) break; if (!(data->header & RC6_STARTBIT_MASK)) { IR_dprintk(1, "RC6 invalid start bit\n"); break; } data->state = STATE_BODY_BIT_START; decrease_duration(&ev, RC6_TOGGLE_END); data->count = 0; data->body = 0; switch (rc6_mode(data)) { case RC6_MODE_0: data->wanted_bits = RC6_0_NBITS; break; case RC6_MODE_6A: data->wanted_bits = RC6_6A_NBITS; break; default: IR_dprintk(1, "RC6 unknown mode\n"); goto out; } goto again; case STATE_BODY_BIT_START: if (eq_margin(ev.duration, RC6_BIT_START, RC6_UNIT / 2)) { /* Discard LSB's that won't fit in data->body */ if (data->count++ < CHAR_BIT * sizeof data->body) { data->body <<= 1; if (ev.pulse) data->body |= 1; } data->state = STATE_BODY_BIT_END; return 0; } else if (RC6_MODE_6A == rc6_mode(data) && !ev.pulse && geq_margin(ev.duration, RC6_SUFFIX_SPACE, RC6_UNIT / 2)) { data->state = STATE_FINISHED; goto again; } break; case STATE_BODY_BIT_END: if (!is_transition(&ev, &dev->raw->prev_ev)) break; if (data->count == data->wanted_bits) data->state = STATE_FINISHED; else data->state = STATE_BODY_BIT_START; decrease_duration(&ev, RC6_BIT_END); goto again; case STATE_FINISHED: if (ev.pulse) break; switch (rc6_mode(data)) { case RC6_MODE_0: scancode = data->body; toggle = data->toggle; IR_dprintk(1, "RC6(0) scancode 0x%04x (toggle: %u)\n", scancode, toggle); break; case RC6_MODE_6A: if (data->count > CHAR_BIT * sizeof data->body) { IR_dprintk(1, "RC6 too many (%u) data bits\n", data->count); goto out; } scancode = data->body; if (data->count == RC6_6A_32_NBITS && (scancode & RC6_6A_LCC_MASK) == RC6_6A_MCE_CC) { /* MCE RC */ toggle = (scancode & RC6_6A_MCE_TOGGLE_MASK) ? 1 : 0; scancode &= ~RC6_6A_MCE_TOGGLE_MASK; } else { toggle = 0; } IR_dprintk(1, "RC6(6A) scancode 0x%08x (toggle: %u)\n", scancode, toggle); break; default: IR_dprintk(1, "RC6 unknown mode\n"); goto out; } rc_keydown(dev, scancode, toggle); data->state = STATE_INACTIVE; return 0; } out: IR_dprintk(1, "RC6 decode failed at state %i (%uus %s)\n", data->state, TO_US(ev.duration), TO_STR(ev.pulse)); data->state = STATE_INACTIVE; return -EINVAL; } static struct ir_raw_handler rc6_handler = { .protocols = RC_BIT_RC6_0 | RC_BIT_RC6_6A_20 | RC_BIT_RC6_6A_24 | RC_BIT_RC6_6A_32 | RC_BIT_RC6_MCE, .decode = ir_rc6_decode, }; static int __init ir_rc6_decode_init(void) { ir_raw_handler_register(&rc6_handler); printk(KERN_INFO "IR RC6 protocol handler initialized\n"); return 0; } static void __exit ir_rc6_decode_exit(void) { ir_raw_handler_unregister(&rc6_handler); } module_init(ir_rc6_decode_init); module_exit(ir_rc6_decode_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("David Härdeman <david@hardeman.nu>"); MODULE_DESCRIPTION("RC6 IR protocol decoder");
gpl-2.0
myjang0507/Polaris-slte-
drivers/media/usb/dvb-usb/a800.c
4363
5586
/* DVB USB framework compliant Linux driver for the AVerMedia AverTV DVB-T * USB2.0 (A800) DVB-T receiver. * * Copyright (C) 2005 Patrick Boettcher (patrick.boettcher@desy.de) * * Thanks to * - AVerMedia who kindly provided information and * - Glen Harris who suffered from my mistakes during development. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation, version 2. * * see Documentation/dvb/README.dvb-usb for more information */ #include "dibusb.h" static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "set debugging level (rc=1 (or-able))." DVB_USB_DEBUG_STATUS); DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); #define deb_rc(args...) dprintk(debug,0x01,args) static int a800_power_ctrl(struct dvb_usb_device *d, int onoff) { /* do nothing for the AVerMedia */ return 0; } /* assure to put cold to 0 for iManufacturer == 1 */ static int a800_identify_state(struct usb_device *udev, struct dvb_usb_device_properties *props, struct dvb_usb_device_description **desc, int *cold) { *cold = udev->descriptor.iManufacturer != 1; return 0; } static struct rc_map_table rc_map_a800_table[] = { { 0x0201, KEY_MODE }, /* SOURCE */ { 0x0200, KEY_POWER2 }, /* POWER */ { 0x0205, KEY_1 }, /* 1 */ { 0x0206, KEY_2 }, /* 2 */ { 0x0207, KEY_3 }, /* 3 */ { 0x0209, KEY_4 }, /* 4 */ { 0x020a, KEY_5 }, /* 5 */ { 0x020b, KEY_6 }, /* 6 */ { 0x020d, KEY_7 }, /* 7 */ { 0x020e, KEY_8 }, /* 8 */ { 0x020f, KEY_9 }, /* 9 */ { 0x0212, KEY_LEFT }, /* L / DISPLAY */ { 0x0211, KEY_0 }, /* 0 */ { 0x0213, KEY_RIGHT }, /* R / CH RTN */ { 0x0217, KEY_CAMERA }, /* SNAP SHOT */ { 0x0210, KEY_LAST }, /* 16-CH PREV */ { 0x021e, KEY_VOLUMEDOWN }, /* VOL DOWN */ { 0x020c, KEY_ZOOM }, /* FULL SCREEN */ { 0x021f, KEY_VOLUMEUP }, /* VOL UP */ { 0x0214, KEY_MUTE }, /* MUTE */ { 0x0208, KEY_AUDIO }, /* AUDIO */ { 0x0219, KEY_RECORD }, /* RECORD */ { 0x0218, KEY_PLAY }, /* PLAY */ { 0x021b, KEY_STOP }, /* STOP */ { 0x021a, KEY_PLAYPAUSE }, /* TIMESHIFT / PAUSE */ { 0x021d, KEY_BACK }, /* << / RED */ { 0x021c, KEY_FORWARD }, /* >> / YELLOW */ { 0x0203, KEY_TEXT }, /* TELETEXT */ { 0x0204, KEY_EPG }, /* EPG */ { 0x0215, KEY_MENU }, /* MENU */ { 0x0303, KEY_CHANNELUP }, /* CH UP */ { 0x0302, KEY_CHANNELDOWN }, /* CH DOWN */ { 0x0301, KEY_FIRST }, /* |<< / GREEN */ { 0x0300, KEY_LAST }, /* >>| / BLUE */ }; static int a800_rc_query(struct dvb_usb_device *d, u32 *event, int *state) { int ret; u8 *key = kmalloc(5, GFP_KERNEL); if (!key) return -ENOMEM; if (usb_control_msg(d->udev,usb_rcvctrlpipe(d->udev,0), 0x04, USB_TYPE_VENDOR | USB_DIR_IN, 0, 0, key, 5, 2000) != 5) { ret = -ENODEV; goto out; } /* call the universal NEC remote processor, to find out the key's state and event */ dvb_usb_nec_rc_key_to_event(d,key,event,state); if (key[0] != 0) deb_rc("key: %*ph\n", 5, key); ret = 0; out: kfree(key); return ret; } /* USB Driver stuff */ static struct dvb_usb_device_properties a800_properties; static int a800_probe(struct usb_interface *intf, const struct usb_device_id *id) { return dvb_usb_device_init(intf, &a800_properties, THIS_MODULE, NULL, adapter_nr); } /* do not change the order of the ID table */ static struct usb_device_id a800_table [] = { /* 00 */ { USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_DVBT_USB2_COLD) }, /* 01 */ { USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_DVBT_USB2_WARM) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE (usb, a800_table); static struct dvb_usb_device_properties a800_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = CYPRESS_FX2, .firmware = "dvb-usb-avertv-a800-02.fw", .num_adapters = 1, .adapter = { { .num_frontends = 1, .fe = {{ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 32, .streaming_ctrl = dibusb2_0_streaming_ctrl, .pid_filter = dibusb_pid_filter, .pid_filter_ctrl = dibusb_pid_filter_ctrl, .frontend_attach = dibusb_dib3000mc_frontend_attach, .tuner_attach = dibusb_dib3000mc_tuner_attach, /* parameter for the MPEG2-data transfer */ .stream = { .type = USB_BULK, .count = 7, .endpoint = 0x06, .u = { .bulk = { .buffersize = 4096, } } }, }}, .size_of_priv = sizeof(struct dibusb_state), }, }, .power_ctrl = a800_power_ctrl, .identify_state = a800_identify_state, .rc.legacy = { .rc_interval = DEFAULT_RC_INTERVAL, .rc_map_table = rc_map_a800_table, .rc_map_size = ARRAY_SIZE(rc_map_a800_table), .rc_query = a800_rc_query, }, .i2c_algo = &dibusb_i2c_algo, .generic_bulk_ctrl_endpoint = 0x01, .num_device_descs = 1, .devices = { { "AVerMedia AverTV DVB-T USB 2.0 (A800)", { &a800_table[0], NULL }, { &a800_table[1], NULL }, }, } }; static struct usb_driver a800_driver = { .name = "dvb_usb_a800", .probe = a800_probe, .disconnect = dvb_usb_device_exit, .id_table = a800_table, }; module_usb_driver(a800_driver); MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>"); MODULE_DESCRIPTION("AVerMedia AverTV DVB-T USB 2.0 (A800)"); MODULE_VERSION("1.0"); MODULE_LICENSE("GPL");
gpl-2.0
konstantinkeller/kernel_lge_hammerhead
sound/pci/lola/lola_mixer.c
5643
26156
/* * Support for Digigram Lola PCI-e boards * * Copyright (c) 2011 Takashi Iwai <tiwai@suse.de> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 * Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/vmalloc.h> #include <linux/io.h> #include <sound/core.h> #include <sound/control.h> #include <sound/pcm.h> #include <sound/tlv.h> #include "lola.h" static int __devinit lola_init_pin(struct lola *chip, struct lola_pin *pin, int dir, int nid) { unsigned int val; int err; pin->nid = nid; err = lola_read_param(chip, nid, LOLA_PAR_AUDIO_WIDGET_CAP, &val); if (err < 0) { printk(KERN_ERR SFX "Can't read wcaps for 0x%x\n", nid); return err; } val &= 0x00f00fff; /* test TYPE and bits 0..11 */ if (val == 0x00400200) /* Type = 4, Digital = 1 */ pin->is_analog = false; else if (val == 0x0040000a && dir == CAPT) /* Dig=0, InAmp/ovrd */ pin->is_analog = true; else if (val == 0x0040000c && dir == PLAY) /* Dig=0, OutAmp/ovrd */ pin->is_analog = true; else { printk(KERN_ERR SFX "Invalid wcaps 0x%x for 0x%x\n", val, nid); return -EINVAL; } /* analog parameters only following, so continue in case of Digital pin */ if (!pin->is_analog) return 0; if (dir == PLAY) err = lola_read_param(chip, nid, LOLA_PAR_AMP_OUT_CAP, &val); else err = lola_read_param(chip, nid, LOLA_PAR_AMP_IN_CAP, &val); if (err < 0) { printk(KERN_ERR SFX "Can't read AMP-caps for 0x%x\n", nid); return err; } pin->amp_mute = LOLA_AMP_MUTE_CAPABLE(val); pin->amp_step_size = LOLA_AMP_STEP_SIZE(val); pin->amp_num_steps = LOLA_AMP_NUM_STEPS(val); if (pin->amp_num_steps) { /* zero as mute state */ pin->amp_num_steps++; pin->amp_step_size++; } pin->amp_offset = LOLA_AMP_OFFSET(val); err = lola_codec_read(chip, nid, LOLA_VERB_GET_MAX_LEVEL, 0, 0, &val, NULL); if (err < 0) { printk(KERN_ERR SFX "Can't get MAX_LEVEL 0x%x\n", nid); return err; } pin->max_level = val & 0x3ff; /* 10 bits */ pin->config_default_reg = 0; pin->fixed_gain_list_len = 0; pin->cur_gain_step = 0; return 0; } int __devinit lola_init_pins(struct lola *chip, int dir, int *nidp) { int i, err, nid; nid = *nidp; for (i = 0; i < chip->pin[dir].num_pins; i++, nid++) { err = lola_init_pin(chip, &chip->pin[dir].pins[i], dir, nid); if (err < 0) return err; if (chip->pin[dir].pins[i].is_analog) chip->pin[dir].num_analog_pins++; } *nidp = nid; return 0; } void lola_free_mixer(struct lola *chip) { if (chip->mixer.array_saved) vfree(chip->mixer.array_saved); } int __devinit lola_init_mixer_widget(struct lola *chip, int nid) { unsigned int val; int err; err = lola_read_param(chip, nid, LOLA_PAR_AUDIO_WIDGET_CAP, &val); if (err < 0) { printk(KERN_ERR SFX "Can't read wcaps for 0x%x\n", nid); return err; } if ((val & 0xfff00000) != 0x02f00000) { /* test SubType and Type */ snd_printdd("No valid mixer widget\n"); return 0; } chip->mixer.nid = nid; chip->mixer.caps = val; chip->mixer.array = (struct lola_mixer_array __iomem *) (chip->bar[BAR1].remap_addr + LOLA_BAR1_SOURCE_GAIN_ENABLE); /* reserve memory to copy mixer data for sleep mode transitions */ chip->mixer.array_saved = vmalloc(sizeof(struct lola_mixer_array)); /* mixer matrix sources are physical input data and play streams */ chip->mixer.src_stream_outs = chip->pcm[PLAY].num_streams; chip->mixer.src_phys_ins = chip->pin[CAPT].num_pins; /* mixer matrix destinations are record streams and physical output */ chip->mixer.dest_stream_ins = chip->pcm[CAPT].num_streams; chip->mixer.dest_phys_outs = chip->pin[PLAY].num_pins; /* mixer matrix may have unused areas between PhysIn and * Play or Record and PhysOut zones */ chip->mixer.src_stream_out_ofs = chip->mixer.src_phys_ins + LOLA_MIXER_SRC_INPUT_PLAY_SEPARATION(val); chip->mixer.dest_phys_out_ofs = chip->mixer.dest_stream_ins + LOLA_MIXER_DEST_REC_OUTPUT_SEPARATION(val); /* example : MixerMatrix of LoLa881 (LoLa16161 uses unused zones) * +-+ 0-------8------16-------8------16 * | | | | | | | * |s| | INPUT | | INPUT | | * | |->| -> |unused | -> |unused | * |r| |CAPTURE| | OUTPUT| | * | | | MIX | | MIX | | * |c| 8-------------------------------- * | | | | | | | * | | | | | | | * |g| |unused |unused |unused |unused | * | | | | | | | * |a| | | | | | * | | 16------------------------------- * |i| | | | | | * | | | PLAYBK| | PLAYBK| | * |n|->| -> |unused | -> |unused | * | | |CAPTURE| | OUTPUT| | * | | | MIX | | MIX | | * |a| 8-------------------------------- * |r| | | | | | * |r| | | | | | * |a| |unused |unused |unused |unused | * |y| | | | | | * | | | | | | | * +++ 16--|---------------|------------ * +---V---------------V-----------+ * | dest_mix_gain_enable array | * +-------------------------------+ */ /* example : MixerMatrix of LoLa280 * +-+ 0-------8-2 * | | | | | * |s| | INPUT | | INPUT * |r|->| -> | | -> * |c| |CAPTURE| | <- OUTPUT * | | | MIX | | MIX * |g| 8---------- * |a| | | | * |i| | PLAYBK| | PLAYBACK * |n|->| -> | | -> * | | |CAPTURE| | <- OUTPUT * |a| | MIX | | MIX * |r| 8---|----|- * |r| +---V----V-------------------+ * |a| | dest_mix_gain_enable array | * |y| +----------------------------+ */ if (chip->mixer.src_stream_out_ofs > MAX_AUDIO_INOUT_COUNT || chip->mixer.dest_phys_out_ofs > MAX_STREAM_IN_COUNT) { printk(KERN_ERR SFX "Invalid mixer widget size\n"); return -EINVAL; } chip->mixer.src_mask = ((1U << chip->mixer.src_phys_ins) - 1) | (((1U << chip->mixer.src_stream_outs) - 1) << chip->mixer.src_stream_out_ofs); chip->mixer.dest_mask = ((1U << chip->mixer.dest_stream_ins) - 1) | (((1U << chip->mixer.dest_phys_outs) - 1) << chip->mixer.dest_phys_out_ofs); snd_printdd("Mixer src_mask=%x, dest_mask=%x\n", chip->mixer.src_mask, chip->mixer.dest_mask); return 0; } static int lola_mixer_set_src_gain(struct lola *chip, unsigned int id, unsigned short gain, bool on) { unsigned int oldval, val; if (!(chip->mixer.src_mask & (1 << id))) return -EINVAL; oldval = val = readl(&chip->mixer.array->src_gain_enable); if (on) val |= (1 << id); else val &= ~(1 << id); /* test if values unchanged */ if ((val == oldval) && (gain == readw(&chip->mixer.array->src_gain[id]))) return 0; snd_printdd("lola_mixer_set_src_gain (id=%d, gain=%d) enable=%x\n", id, gain, val); writew(gain, &chip->mixer.array->src_gain[id]); writel(val, &chip->mixer.array->src_gain_enable); lola_codec_flush(chip); /* inform micro-controller about the new source gain */ return lola_codec_write(chip, chip->mixer.nid, LOLA_VERB_SET_SOURCE_GAIN, id, 0); } #if 0 /* not used */ static int lola_mixer_set_src_gains(struct lola *chip, unsigned int mask, unsigned short *gains) { int i; if ((chip->mixer.src_mask & mask) != mask) return -EINVAL; for (i = 0; i < LOLA_MIXER_DIM; i++) { if (mask & (1 << i)) { writew(*gains, &chip->mixer.array->src_gain[i]); gains++; } } writel(mask, &chip->mixer.array->src_gain_enable); lola_codec_flush(chip); if (chip->mixer.caps & LOLA_PEAK_METER_CAN_AGC_MASK) { /* update for all srcs at once */ return lola_codec_write(chip, chip->mixer.nid, LOLA_VERB_SET_SOURCE_GAIN, 0x80, 0); } /* update manually */ for (i = 0; i < LOLA_MIXER_DIM; i++) { if (mask & (1 << i)) { lola_codec_write(chip, chip->mixer.nid, LOLA_VERB_SET_SOURCE_GAIN, i, 0); } } return 0; } #endif /* not used */ static int lola_mixer_set_mapping_gain(struct lola *chip, unsigned int src, unsigned int dest, unsigned short gain, bool on) { unsigned int val; if (!(chip->mixer.src_mask & (1 << src)) || !(chip->mixer.dest_mask & (1 << dest))) return -EINVAL; if (on) writew(gain, &chip->mixer.array->dest_mix_gain[dest][src]); val = readl(&chip->mixer.array->dest_mix_gain_enable[dest]); if (on) val |= (1 << src); else val &= ~(1 << src); writel(val, &chip->mixer.array->dest_mix_gain_enable[dest]); lola_codec_flush(chip); return lola_codec_write(chip, chip->mixer.nid, LOLA_VERB_SET_MIX_GAIN, src, dest); } #if 0 /* not used */ static int lola_mixer_set_dest_gains(struct lola *chip, unsigned int id, unsigned int mask, unsigned short *gains) { int i; if (!(chip->mixer.dest_mask & (1 << id)) || (chip->mixer.src_mask & mask) != mask) return -EINVAL; for (i = 0; i < LOLA_MIXER_DIM; i++) { if (mask & (1 << i)) { writew(*gains, &chip->mixer.array->dest_mix_gain[id][i]); gains++; } } writel(mask, &chip->mixer.array->dest_mix_gain_enable[id]); lola_codec_flush(chip); /* update for all dests at once */ return lola_codec_write(chip, chip->mixer.nid, LOLA_VERB_SET_DESTINATION_GAIN, id, 0); } #endif /* not used */ /* */ static int set_analog_volume(struct lola *chip, int dir, unsigned int idx, unsigned int val, bool external_call); int lola_setup_all_analog_gains(struct lola *chip, int dir, bool mute) { struct lola_pin *pin; int idx, max_idx; pin = chip->pin[dir].pins; max_idx = chip->pin[dir].num_pins; for (idx = 0; idx < max_idx; idx++) { if (pin[idx].is_analog) { unsigned int val = mute ? 0 : pin[idx].cur_gain_step; /* set volume and do not save the value */ set_analog_volume(chip, dir, idx, val, false); } } return lola_codec_flush(chip); } void lola_save_mixer(struct lola *chip) { /* mute analog output */ if (chip->mixer.array_saved) { /* store contents of mixer array */ memcpy_fromio(chip->mixer.array_saved, chip->mixer.array, sizeof(*chip->mixer.array)); } lola_setup_all_analog_gains(chip, PLAY, true); /* output mute */ } void lola_restore_mixer(struct lola *chip) { int i; /*lola_reset_setups(chip);*/ if (chip->mixer.array_saved) { /* restore contents of mixer array */ memcpy_toio(chip->mixer.array, chip->mixer.array_saved, sizeof(*chip->mixer.array)); /* inform micro-controller about all restored values * and ignore return values */ for (i = 0; i < chip->mixer.src_phys_ins; i++) lola_codec_write(chip, chip->mixer.nid, LOLA_VERB_SET_SOURCE_GAIN, i, 0); for (i = 0; i < chip->mixer.src_stream_outs; i++) lola_codec_write(chip, chip->mixer.nid, LOLA_VERB_SET_SOURCE_GAIN, chip->mixer.src_stream_out_ofs + i, 0); for (i = 0; i < chip->mixer.dest_stream_ins; i++) lola_codec_write(chip, chip->mixer.nid, LOLA_VERB_SET_DESTINATION_GAIN, i, 0); for (i = 0; i < chip->mixer.dest_phys_outs; i++) lola_codec_write(chip, chip->mixer.nid, LOLA_VERB_SET_DESTINATION_GAIN, chip->mixer.dest_phys_out_ofs + i, 0); lola_codec_flush(chip); } } /* */ static int set_analog_volume(struct lola *chip, int dir, unsigned int idx, unsigned int val, bool external_call) { struct lola_pin *pin; int err; if (idx >= chip->pin[dir].num_pins) return -EINVAL; pin = &chip->pin[dir].pins[idx]; if (!pin->is_analog || pin->amp_num_steps <= val) return -EINVAL; if (external_call && pin->cur_gain_step == val) return 0; if (external_call) lola_codec_flush(chip); snd_printdd("set_analog_volume (dir=%d idx=%d, volume=%d)\n", dir, idx, val); err = lola_codec_write(chip, pin->nid, LOLA_VERB_SET_AMP_GAIN_MUTE, val, 0); if (err < 0) return err; if (external_call) pin->cur_gain_step = val; return 0; } int lola_set_src_config(struct lola *chip, unsigned int src_mask, bool update) { int ret = 0; int success = 0; int n, err; /* SRC can be activated and the dwInputSRCMask is valid? */ if ((chip->input_src_caps_mask & src_mask) != src_mask) return -EINVAL; /* handle all even Inputs - SRC is a stereo setting !!! */ for (n = 0; n < chip->pin[CAPT].num_pins; n += 2) { unsigned int mask = 3U << n; /* handle the stereo case */ unsigned int new_src, src_state; if (!(chip->input_src_caps_mask & mask)) continue; /* if one IO needs SRC, both stereo IO will get SRC */ new_src = (src_mask & mask) != 0; if (update) { src_state = (chip->input_src_mask & mask) != 0; if (src_state == new_src) continue; /* nothing to change for this IO */ } err = lola_codec_write(chip, chip->pcm[CAPT].streams[n].nid, LOLA_VERB_SET_SRC, new_src, 0); if (!err) success++; else ret = err; } if (success) ret = lola_codec_flush(chip); if (!ret) chip->input_src_mask = src_mask; return ret; } /* */ static int init_mixer_values(struct lola *chip) { int i; /* all sample rate converters on */ lola_set_src_config(chip, (1 << chip->pin[CAPT].num_pins) - 1, false); /* clear all mixer matrix settings */ memset_io(chip->mixer.array, 0, sizeof(*chip->mixer.array)); /* inform firmware about all updated matrix columns - capture part */ for (i = 0; i < chip->mixer.dest_stream_ins; i++) lola_codec_write(chip, chip->mixer.nid, LOLA_VERB_SET_DESTINATION_GAIN, i, 0); /* inform firmware about all updated matrix columns - output part */ for (i = 0; i < chip->mixer.dest_phys_outs; i++) lola_codec_write(chip, chip->mixer.nid, LOLA_VERB_SET_DESTINATION_GAIN, chip->mixer.dest_phys_out_ofs + i, 0); /* set all digital input source (master) gains to 0dB */ for (i = 0; i < chip->mixer.src_phys_ins; i++) lola_mixer_set_src_gain(chip, i, 336, true); /* 0dB */ /* set all digital playback source (master) gains to 0dB */ for (i = 0; i < chip->mixer.src_stream_outs; i++) lola_mixer_set_src_gain(chip, i + chip->mixer.src_stream_out_ofs, 336, true); /* 0dB */ /* set gain value 0dB diagonally in matrix - part INPUT -> CAPTURE */ for (i = 0; i < chip->mixer.dest_stream_ins; i++) { int src = i % chip->mixer.src_phys_ins; lola_mixer_set_mapping_gain(chip, src, i, 336, true); } /* set gain value 0dB diagonally in matrix , part PLAYBACK -> OUTPUT * (LoLa280 : playback channel 0,2,4,6 linked to output channel 0) * (LoLa280 : playback channel 1,3,5,7 linked to output channel 1) */ for (i = 0; i < chip->mixer.src_stream_outs; i++) { int src = chip->mixer.src_stream_out_ofs + i; int dst = chip->mixer.dest_phys_out_ofs + i % chip->mixer.dest_phys_outs; lola_mixer_set_mapping_gain(chip, src, dst, 336, true); } return 0; } /* * analog mixer control element */ static int lola_analog_vol_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct lola *chip = snd_kcontrol_chip(kcontrol); int dir = kcontrol->private_value; uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = chip->pin[dir].num_pins; uinfo->value.integer.min = 0; uinfo->value.integer.max = chip->pin[dir].pins[0].amp_num_steps; return 0; } static int lola_analog_vol_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct lola *chip = snd_kcontrol_chip(kcontrol); int dir = kcontrol->private_value; int i; for (i = 0; i < chip->pin[dir].num_pins; i++) ucontrol->value.integer.value[i] = chip->pin[dir].pins[i].cur_gain_step; return 0; } static int lola_analog_vol_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct lola *chip = snd_kcontrol_chip(kcontrol); int dir = kcontrol->private_value; int i, err; for (i = 0; i < chip->pin[dir].num_pins; i++) { err = set_analog_volume(chip, dir, i, ucontrol->value.integer.value[i], true); if (err < 0) return err; } return 0; } static int lola_analog_vol_tlv(struct snd_kcontrol *kcontrol, int op_flag, unsigned int size, unsigned int __user *tlv) { struct lola *chip = snd_kcontrol_chip(kcontrol); int dir = kcontrol->private_value; unsigned int val1, val2; struct lola_pin *pin; if (size < 4 * sizeof(unsigned int)) return -ENOMEM; pin = &chip->pin[dir].pins[0]; val2 = pin->amp_step_size * 25; val1 = -1 * (int)pin->amp_offset * (int)val2; #ifdef TLV_DB_SCALE_MUTE val2 |= TLV_DB_SCALE_MUTE; #endif if (put_user(SNDRV_CTL_TLVT_DB_SCALE, tlv)) return -EFAULT; if (put_user(2 * sizeof(unsigned int), tlv + 1)) return -EFAULT; if (put_user(val1, tlv + 2)) return -EFAULT; if (put_user(val2, tlv + 3)) return -EFAULT; return 0; } static struct snd_kcontrol_new lola_analog_mixer __devinitdata = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .access = (SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ | SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK), .info = lola_analog_vol_info, .get = lola_analog_vol_get, .put = lola_analog_vol_put, .tlv.c = lola_analog_vol_tlv, }; static int __devinit create_analog_mixer(struct lola *chip, int dir, char *name) { if (!chip->pin[dir].num_pins) return 0; /* no analog volumes on digital only adapters */ if (chip->pin[dir].num_pins != chip->pin[dir].num_analog_pins) return 0; lola_analog_mixer.name = name; lola_analog_mixer.private_value = dir; return snd_ctl_add(chip->card, snd_ctl_new1(&lola_analog_mixer, chip)); } /* * Hardware sample rate converter on digital input */ static int lola_input_src_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct lola *chip = snd_kcontrol_chip(kcontrol); uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN; uinfo->count = chip->pin[CAPT].num_pins; uinfo->value.integer.min = 0; uinfo->value.integer.max = 1; return 0; } static int lola_input_src_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct lola *chip = snd_kcontrol_chip(kcontrol); int i; for (i = 0; i < chip->pin[CAPT].num_pins; i++) ucontrol->value.integer.value[i] = !!(chip->input_src_mask & (1 << i)); return 0; } static int lola_input_src_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct lola *chip = snd_kcontrol_chip(kcontrol); int i; unsigned int mask; mask = 0; for (i = 0; i < chip->pin[CAPT].num_pins; i++) if (ucontrol->value.integer.value[i]) mask |= 1 << i; return lola_set_src_config(chip, mask, true); } static struct snd_kcontrol_new lola_input_src_mixer __devinitdata = { .name = "Digital SRC Capture Switch", .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .info = lola_input_src_info, .get = lola_input_src_get, .put = lola_input_src_put, }; /* * Lola16161 or Lola881 can have Hardware sample rate converters * on its digital input pins */ static int __devinit create_input_src_mixer(struct lola *chip) { if (!chip->input_src_caps_mask) return 0; return snd_ctl_add(chip->card, snd_ctl_new1(&lola_input_src_mixer, chip)); } /* * src gain mixer */ static int lola_src_gain_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { unsigned int count = (kcontrol->private_value >> 8) & 0xff; uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = count; uinfo->value.integer.min = 0; uinfo->value.integer.max = 409; return 0; } static int lola_src_gain_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct lola *chip = snd_kcontrol_chip(kcontrol); unsigned int ofs = kcontrol->private_value & 0xff; unsigned int count = (kcontrol->private_value >> 8) & 0xff; unsigned int mask, i; mask = readl(&chip->mixer.array->src_gain_enable); for (i = 0; i < count; i++) { unsigned int idx = ofs + i; unsigned short val; if (!(chip->mixer.src_mask & (1 << idx))) return -EINVAL; if (mask & (1 << idx)) val = readw(&chip->mixer.array->src_gain[idx]) + 1; else val = 0; ucontrol->value.integer.value[i] = val; } return 0; } static int lola_src_gain_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct lola *chip = snd_kcontrol_chip(kcontrol); unsigned int ofs = kcontrol->private_value & 0xff; unsigned int count = (kcontrol->private_value >> 8) & 0xff; int i, err; for (i = 0; i < count; i++) { unsigned int idx = ofs + i; unsigned short val = ucontrol->value.integer.value[i]; if (val) val--; err = lola_mixer_set_src_gain(chip, idx, val, !!val); if (err < 0) return err; } return 0; } /* raw value: 0 = -84dB, 336 = 0dB, 408=18dB, incremented 1 for mute */ static const DECLARE_TLV_DB_SCALE(lola_src_gain_tlv, -8425, 25, 1); static struct snd_kcontrol_new lola_src_gain_mixer __devinitdata = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .access = (SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ), .info = lola_src_gain_info, .get = lola_src_gain_get, .put = lola_src_gain_put, .tlv.p = lola_src_gain_tlv, }; static int __devinit create_src_gain_mixer(struct lola *chip, int num, int ofs, char *name) { lola_src_gain_mixer.name = name; lola_src_gain_mixer.private_value = ofs + (num << 8); return snd_ctl_add(chip->card, snd_ctl_new1(&lola_src_gain_mixer, chip)); } #if 0 /* not used */ /* * destination gain (matrix-like) mixer */ static int lola_dest_gain_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { unsigned int src_num = (kcontrol->private_value >> 8) & 0xff; uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = src_num; uinfo->value.integer.min = 0; uinfo->value.integer.max = 433; return 0; } static int lola_dest_gain_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct lola *chip = snd_kcontrol_chip(kcontrol); unsigned int src_ofs = kcontrol->private_value & 0xff; unsigned int src_num = (kcontrol->private_value >> 8) & 0xff; unsigned int dst_ofs = (kcontrol->private_value >> 16) & 0xff; unsigned int dst, mask, i; dst = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id) + dst_ofs; mask = readl(&chip->mixer.array->dest_mix_gain_enable[dst]); for (i = 0; i < src_num; i++) { unsigned int src = src_ofs + i; unsigned short val; if (!(chip->mixer.src_mask & (1 << src))) return -EINVAL; if (mask & (1 << dst)) val = readw(&chip->mixer.array->dest_mix_gain[dst][src]) + 1; else val = 0; ucontrol->value.integer.value[i] = val; } return 0; } static int lola_dest_gain_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct lola *chip = snd_kcontrol_chip(kcontrol); unsigned int src_ofs = kcontrol->private_value & 0xff; unsigned int src_num = (kcontrol->private_value >> 8) & 0xff; unsigned int dst_ofs = (kcontrol->private_value >> 16) & 0xff; unsigned int dst, mask; unsigned short gains[MAX_STREAM_COUNT]; int i, num; mask = 0; num = 0; for (i = 0; i < src_num; i++) { unsigned short val = ucontrol->value.integer.value[i]; if (val) { gains[num++] = val - 1; mask |= 1 << i; } } mask <<= src_ofs; dst = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id) + dst_ofs; return lola_mixer_set_dest_gains(chip, dst, mask, gains); } static const DECLARE_TLV_DB_SCALE(lola_dest_gain_tlv, -8425, 25, 1); static struct snd_kcontrol_new lola_dest_gain_mixer __devinitdata = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .access = (SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ), .info = lola_dest_gain_info, .get = lola_dest_gain_get, .put = lola_dest_gain_put, .tlv.p = lola_dest_gain_tlv, }; static int __devinit create_dest_gain_mixer(struct lola *chip, int src_num, int src_ofs, int num, int ofs, char *name) { lola_dest_gain_mixer.count = num; lola_dest_gain_mixer.name = name; lola_dest_gain_mixer.private_value = src_ofs + (src_num << 8) + (ofs << 16) + (num << 24); return snd_ctl_add(chip->card, snd_ctl_new1(&lola_dest_gain_mixer, chip)); } #endif /* not used */ /* */ int __devinit lola_create_mixer(struct lola *chip) { int err; err = create_analog_mixer(chip, PLAY, "Analog Playback Volume"); if (err < 0) return err; err = create_analog_mixer(chip, CAPT, "Analog Capture Volume"); if (err < 0) return err; err = create_input_src_mixer(chip); if (err < 0) return err; err = create_src_gain_mixer(chip, chip->mixer.src_phys_ins, 0, "Digital Capture Volume"); if (err < 0) return err; err = create_src_gain_mixer(chip, chip->mixer.src_stream_outs, chip->mixer.src_stream_out_ofs, "Digital Playback Volume"); if (err < 0) return err; #if 0 /* FIXME: buggy mixer matrix handling */ err = create_dest_gain_mixer(chip, chip->mixer.src_phys_ins, 0, chip->mixer.dest_stream_ins, 0, "Line Capture Volume"); if (err < 0) return err; err = create_dest_gain_mixer(chip, chip->mixer.src_stream_outs, chip->mixer.src_stream_out_ofs, chip->mixer.dest_stream_ins, 0, "Stream-Loopback Capture Volume"); if (err < 0) return err; err = create_dest_gain_mixer(chip, chip->mixer.src_phys_ins, 0, chip->mixer.dest_phys_outs, chip->mixer.dest_phys_out_ofs, "Line-Loopback Playback Volume"); if (err < 0) return err; err = create_dest_gain_mixer(chip, chip->mixer.src_stream_outs, chip->mixer.src_stream_out_ofs, chip->mixer.dest_phys_outs, chip->mixer.dest_phys_out_ofs, "Stream Playback Volume"); if (err < 0) return err; #endif /* FIXME */ return init_mixer_values(chip); }
gpl-2.0
poondog/kangaroo-m7-mkIV
arch/sh/boards/board-polaris.c
8459
3264
/* * June 2006 steve.glendinning@smsc.com * * Polaris-specific resource declaration * */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/platform_device.h> #include <linux/smsc911x.h> #include <linux/io.h> #include <asm/irq.h> #include <asm/machvec.h> #include <asm/heartbeat.h> #include <cpu/gpio.h> #include <mach-se/mach/se.h> #define BCR2 (0xFFFFFF62) #define WCR2 (0xFFFFFF66) #define AREA5_WAIT_CTRL (0x1C00) #define WAIT_STATES_10 (0x7) static struct resource smsc911x_resources[] = { [0] = { .name = "smsc911x-memory", .start = PA_EXT5, .end = PA_EXT5 + 0x1fff, .flags = IORESOURCE_MEM, }, [1] = { .name = "smsc911x-irq", .start = IRQ0_IRQ, .end = IRQ0_IRQ, .flags = IORESOURCE_IRQ, }, }; static struct smsc911x_platform_config smsc911x_config = { .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW, .irq_type = SMSC911X_IRQ_TYPE_OPEN_DRAIN, .flags = SMSC911X_USE_32BIT, .phy_interface = PHY_INTERFACE_MODE_MII, }; static struct platform_device smsc911x_device = { .name = "smsc911x", .id = 0, .num_resources = ARRAY_SIZE(smsc911x_resources), .resource = smsc911x_resources, .dev = { .platform_data = &smsc911x_config, }, }; static unsigned char heartbeat_bit_pos[] = { 0, 1, 2, 3 }; static struct heartbeat_data heartbeat_data = { .bit_pos = heartbeat_bit_pos, .nr_bits = ARRAY_SIZE(heartbeat_bit_pos), }; static struct resource heartbeat_resource = { .start = PORT_PCDR, .end = PORT_PCDR, .flags = IORESOURCE_MEM | IORESOURCE_MEM_8BIT, }; static struct platform_device heartbeat_device = { .name = "heartbeat", .id = -1, .dev = { .platform_data = &heartbeat_data, }, .num_resources = 1, .resource = &heartbeat_resource, }; static struct platform_device *polaris_devices[] __initdata = { &smsc911x_device, &heartbeat_device, }; static int __init polaris_initialise(void) { u16 wcr, bcr_mask; printk(KERN_INFO "Configuring Polaris external bus\n"); /* Configure area 5 with 2 wait states */ wcr = __raw_readw(WCR2); wcr &= (~AREA5_WAIT_CTRL); wcr |= (WAIT_STATES_10 << 10); __raw_writew(wcr, WCR2); /* Configure area 5 for 32-bit access */ bcr_mask = __raw_readw(BCR2); bcr_mask |= 1 << 10; __raw_writew(bcr_mask, BCR2); return platform_add_devices(polaris_devices, ARRAY_SIZE(polaris_devices)); } arch_initcall(polaris_initialise); static struct ipr_data ipr_irq_table[] = { /* External IRQs */ { IRQ0_IRQ, 0, 0, 1, }, /* IRQ0 */ { IRQ1_IRQ, 0, 4, 1, }, /* IRQ1 */ }; static unsigned long ipr_offsets[] = { INTC_IPRC }; static struct ipr_desc ipr_irq_desc = { .ipr_offsets = ipr_offsets, .nr_offsets = ARRAY_SIZE(ipr_offsets), .ipr_data = ipr_irq_table, .nr_irqs = ARRAY_SIZE(ipr_irq_table), .chip = { .name = "sh7709-ext", }, }; static void __init init_polaris_irq(void) { /* Disable all interrupts */ __raw_writew(0, BCR_ILCRA); __raw_writew(0, BCR_ILCRB); __raw_writew(0, BCR_ILCRC); __raw_writew(0, BCR_ILCRD); __raw_writew(0, BCR_ILCRE); __raw_writew(0, BCR_ILCRF); __raw_writew(0, BCR_ILCRG); register_ipr_controller(&ipr_irq_desc); } static struct sh_machine_vector mv_polaris __initmv = { .mv_name = "Polaris", .mv_nr_irqs = 61, .mv_init_irq = init_polaris_irq, };
gpl-2.0
AOKP/kernel_semc_msm7x30
drivers/media/video/msm_semc/seix006.c
12
88687
/* * Copyright (c) 2008-2009 QUALCOMM USA, INC. * Copyright (C) 2010 Sony Ericsson Mobile Communications AB. * * All source code in this file is licensed under the following license * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, you can find it at http://www.fsf.org */ #include <linux/delay.h> #include <linux/types.h> #include <linux/i2c.h> #include <linux/uaccess.h> #include <linux/miscdevice.h> #include <linux/module.h> #include <linux/kthread.h> #include <media/msm_camera.h> #include <mach/gpio.h> #include <mach/camera.h> #include <mach/board.h> #include <mach/vreg.h> #include "seix006.h" #include "seix006_cam_devdrv_table.h" /* Some Robyn specific functions, also dependent on addition to board.h, not * necessary for Zeus at the moment are removed via the following define. */ #define USE_ZEUS_POWER_MANAGEMENT #ifdef SEDBG #ifdef CDBG #undef CDBG #endif #define CDBG(fmt, args...) printk(KERN_INFO "msm_cam_seix006: " fmt, ##args) #endif /* SEDBG */ #define SEIX006_I2C_NAME "seix006" #define SEIX006_MSM_CAMERA_NAME "msm_camera_seix006" /* ******** Local functions ************* */ static int32_t seix006_gpio_access(int gpio_pin, int dir); #ifndef USE_ZEUS_POWER_MANAGEMENT static int32_t seix006_resource_enable(struct msm_camera_sensor_pwr *resource); static int32_t seix006_resource_disable(struct msm_camera_sensor_pwr *resource); #endif static int32_t seix006_sensor_on(void); static int32_t seix006_sensor_init(void); static void seix006_sensor_off(void); static int seix006_sensor_open(const struct msm_camera_sensor_info *data); static int seix006_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id); static int __exit seix006_i2c_remove(struct i2c_client *client); static int32_t seix006_i2c_write(uint16_t address, uint8_t data_length, const uint8_t * data); static int32_t seix006_i2c_read(uint16_t address, uint8_t length, uint8_t * data); static int32_t seix006_set_sensor_mode(struct sensor_cfg_data cfg_data); static int32_t seix006_monitor_config(void); static int32_t seix006_raw_snapshot_config(void); static int32_t seix006_raw_rgb_snapshot_config(void); static int32_t seix006_raw_snapshot_start(void); static int32_t seix006_snapshot_config(void); static int32_t seix006_half_release_config(void); static int32_t seix006_set_test_pattern(enum set_test_pattern_t mode); static int32_t seix006_set_iso(uint16_t iso_mode); static int32_t seix006_set_focus_mode(enum camera_focus_mode focus_mode); static int32_t seix006_update_focus_mode(enum camera_focus_mode focus_mode); static int32_t seix006_set_scene(enum camera_scene scene); static int32_t seix006_update_scene(enum camera_scene start_scene); static int32_t seix006_set_dimensions(struct camera_dimension_t dimension); static int32_t seix006_set_preview_dimension(struct camera_preview_dimension_t dimension); static int32_t seix006_get_af_status(enum camera_af_status *status); static int32_t seix006_get_exif(struct cam_ctrl_exif_params_t *exif); static int32_t seix006_read_vendor_data(void); static void seix006_add_bytes_in_switched_endian(uint8_t * p_package_buf, uint8_t pkg_position, uint8_t size_of_data, uint8_t * Data); static int32_t seix006_send_reg_table(const struct reg_entry *table, uint32_t tables_to_send); static int32_t seix006_check_msts(uint8_t value, uint32_t timeout); static int32_t seix006_check_bsts(uint8_t value, uint32_t timeout); static int32_t seix006_check_afsts(uint8_t value, uint32_t timeout); static int32_t seix006_refresh_monitor(uint32_t timeout); static int32_t seix006_cam_is_assist_light_needed(int *result); static int32_t seix006_set_framerate(uint16_t fps); static int32_t seix006_write_calibration_data(struct seix006_calibration_data); static int init_thread(void* data); static int autoflash_enable(int onoff); static int autoflash_strobe(int onoff); static int autoflash_adjust(void); static int32_t seix006_movie_config(void); static int32_t seix006_raw_rgb_stream_config(void); static long seix006_set_effect(int mode, int effect); static long seix006_set_exposure_mode(int exp_mode); static long seix006_set_wb(int wb_type); static long seix006_set_sharpness(uint8_t value); static long seix006_set_contrast(uint8_t value); static long seix006_set_img_quality(uint8_t value); static long seix006_set_brightness(uint8_t value); static long seix006_set_exposure_compensation(int8_t value); static int seix006_get_capture_started(void); static long seix006_set_flash(uint8_t flash_mode); static long setix006_set_led_state(uint8_t led_state); /* ********** Local variables/structs ************ */ static struct seix006_ctrl_t *seix006_ctrl = NULL; static DECLARE_WAIT_QUEUE_HEAD(seix006_wait_queue); DEFINE_SEMAPHORE(seix006_sem); static int is_capture_started = 0; enum { FLASH_OFF, FLASH_AUTO, FLASH_ON }; static int camera_flash = FLASH_OFF; static int camera_variable_frame_rate = 1; /* 1 if continous auto focus is active */ static int camera_continous_autofocus; DEFINE_MUTEX(seix006_capture_lock); /* * Switches places of Most Significant Bit (MSB) and Least Significant * * Bit (LSB) before adding the data to the package buffer * */ static void seix006_add_bytes_in_switched_endian(uint8_t *p_package_buf, uint8_t pkg_position, uint8_t size_of_data, uint8_t *Data) { int MSB_byte_number, byte_number; for (MSB_byte_number = size_of_data - 1, byte_number = 0; MSB_byte_number >= 0; --MSB_byte_number, ++byte_number) { memcpy(p_package_buf + pkg_position + byte_number, Data + MSB_byte_number, sizeof(uint8_t)); } } /** * I2C Device ID Structure Body. * */ static const struct i2c_device_id seix006_id[] = { { SEIX006_I2C_NAME, 0}, { } }; /** * I2C Device Structure Body. * */ static struct i2c_driver seix006_driver = { .id_table = seix006_id, .probe = seix006_i2c_probe, .remove = __exit_p(seix006_i2c_remove), .driver = { .name = SEIX006_I2C_NAME, }, }; /** * Precess IOCTL messages. * */ int seix006_sensor_config(void __user *argp) { int32_t ret = 0; struct sensor_cfg_data cfg_data; CDBG("seix006_sensor_config [S]\n"); ret = copy_from_user(&cfg_data, (void *) argp, sizeof(struct sensor_cfg_data)); if (ret) { CDBG("seix006_sensor_config copy_from_user failed\n"); return -EFAULT; } if (cfg_data.cfgtype == CFG_SET_MODE && cfg_data.mode == SENSOR_PREVIEW_MODE && seix006_ctrl->dev_mode == CAMERA_MODE_MONITOR && seix006_ctrl->init_complete) { CDBG("Already running in monitor"); return 0; } down(&seix006_sem); CDBG("seix006_sensor_config cfgtype = %d\n", cfg_data.cfgtype); switch (cfg_data.cfgtype) { case CFG_SET_MODE: ret = seix006_set_sensor_mode(cfg_data); break; case CFG_SET_TEST_PATTERN: ret = seix006_set_test_pattern(cfg_data.cfg.set_test_pattern); break; case CFG_SET_ISO: ret = seix006_set_iso(cfg_data.cfg.iso_mode); break; case CFG_GET_AF_STATUS: ret = seix006_get_af_status(&cfg_data.cfg.af_status); break; case CFG_GET_EXIF: ret = seix006_get_exif(&cfg_data.cfg.exif); break; case CFG_SET_SCENE: ret = seix006_set_scene(cfg_data.cfg.scene); break; case CFG_SET_DIMENSION: ret = seix006_set_dimensions(cfg_data.cfg.dimension); break; case CFG_SET_SENSOR_DIMENSION: ret = seix006_set_preview_dimension(cfg_data.cfg.preview_dimension); break; case CFG_GET_AF_ASSIST_LIGHT: ret = seix006_cam_is_assist_light_needed((int *) &cfg_data.rs); break; case CFG_SET_FPS: ret = seix006_set_framerate(cfg_data.cfg.fps.f_mult); break; case CFG_SET_EFFECT: ret = seix006_set_effect(cfg_data.mode, cfg_data.cfg.effect); break; case CFG_SET_CONTRAST: ret = seix006_set_contrast(cfg_data.cfg.contrast); break; case CFG_SET_BRIGHTNESS: ret = seix006_set_brightness(cfg_data.cfg.brightness); break; case CFG_SET_SHARPNESS: ret = seix006_set_sharpness(cfg_data.cfg.sharpness); break; case CFG_SET_WB: ret = seix006_set_wb(cfg_data.cfg.wb_type); break; case CFG_SET_IMG_QUALITY: ret = seix006_set_img_quality(cfg_data.cfg.quality); break; case CFG_SET_EXPOSURE_COMPENSATION: ret = seix006_set_exposure_compensation(cfg_data.cfg.ev); break; case CFG_SET_EXPOSURE_MODE: ret = seix006_set_exposure_mode(cfg_data.cfg.exp_mode); break; case CFG_SET_FLASH: ret = seix006_set_flash(cfg_data.cfg.flashled); break; default: CDBG("seix006_sensor_config cfgtype failed\n"); ret = -EFAULT; break; } up(&seix006_sem); ret = copy_to_user((void *) argp, &cfg_data, sizeof(struct sensor_cfg_data)); if (ret) { CDBG("seix006_sensor_config copy_to_user failed\n"); return -EFAULT; } CDBG("seix006_sensor_config [E]\n"); return ret; } /** * Release * */ int seix006_sensor_release(void) { CDBG("seix006_sensor_release [S]\n"); down(&seix006_sem); if(seix006_ctrl->opened) { seix006_sensor_off(); seix006_ctrl->opened = 0; } seix006_ctrl->dev_mode = CAMERA_MODE_STANDBY; up(&seix006_sem); CDBG("seix006_sensor_release [E]\n"); return 0; } /** * Exit * */ void seix006_exit(void) { CDBG("seix006_exit [S]\n"); i2c_del_driver(&seix006_driver); CDBG("seix006_exit [E]\n"); } /** * Probe * */ static int seix006_camera_probe(const struct msm_camera_sensor_info *info, struct msm_sensor_ctrl *s) { int rc = 0; CDBG("seix006_camera_probe [S]\n"); seix006_ctrl = kzalloc(sizeof(struct seix006_ctrl_t), GFP_KERNEL); if (NULL == seix006_ctrl || NULL == info) { CDBG("seix006_cam_probe memory allocation failed\n"); rc = -EINVAL; goto probe_done; } seix006_ctrl->sensordata = (struct msm_camera_sensor_info*) info; rc = i2c_add_driver(&seix006_driver); if (IS_ERR_VALUE(rc)) { CDBG("seix failed i2c_add_driver\n"); kfree(seix006_ctrl); goto probe_done; } CDBG("seix pass i2c_add_driver\n"); seix006_ctrl->dev_mode = CAMERA_MODE_STANDBY; /* Power on sensor */ rc = seix006_sensor_on(); if (rc < 0) { CDBG("seix006_camera_probe sensor_on failed\n"); goto probe_done; } /* sensor power on - gpio */ rc = seix006_gpio_access(seix006_ctrl->sensordata->sensor_reset, 0); if (rc) { CDBG("seix006_probe CAM_RESET_N release failed\n"); goto probe_done; } udelay(5); /* (STANDBY_N = low) */ rc = seix006_gpio_access(seix006_ctrl->sensordata->sensor_pwd, 0); if (rc) { CDBG("sexi006_probe STANDBY failed\n"); goto probe_done; } mdelay(2); /* CAM_RESET_N = high */ rc = seix006_gpio_access(seix006_ctrl->sensordata->sensor_reset, 1); if (rc) { CDBG("seix006_probe CAM_RESET_N release failed\n"); goto probe_done; } mdelay(20); rc = seix006_gpio_access(seix006_ctrl->sensordata->sensor_pwd, 1); if (rc) { CDBG("sexi006_probe STANDBY failed\n"); goto probe_done; } mdelay(10); rc = seix006_read_vendor_data(); if (rc != 0) { CDBG("seix006_read_vendr_data failed\n"); goto probe_done; } CDBG("<<<<<<<<<< Camera vendor [%d] >>>>>>>>>>>\n", seix006_ctrl->vendor_id); CDBG("<<<<<<<<<< Camera revision[%d] >>>>>>>>>>>\n", seix006_ctrl->camera_revision); s->s_init = seix006_sensor_open; s->s_release = seix006_sensor_release; s->s_config = seix006_sensor_config; s->s_get_capture_started = seix006_get_capture_started; probe_done: if (seix006_ctrl) { seix006_sensor_off(); } CDBG("seix006_camera_probe [E] %d\n ", rc); return rc; } static int32_t seix006_read_vendor_data(void) { int32_t rc = 0; uint32_t data = 0; uint8_t v_flag_otp0 = 0; uint8_t v_flag_otp1 = 0; enum seix006_otp otp_id = OTP_NO_DATA_WRITTEN; uint32_t c_nr_1 = 0; uint32_t c_nr_2 = 0; uint8_t c_nr_res = 0; uint32_t c_nb = 0; uint32_t c_pr = 0; uint32_t c_pb = 0; uint16_t inormr_k = 0x10FB; uint16_t inormb_k = 0x10AB; uint16_t iawbprer_k = 0x013C; uint16_t iawbpreb_k = 0x0241; uint16_t inormr_s = 0x10BD; uint16_t inormb_s = 0x0EE0; uint16_t iawbprer_s = 0x0146; uint16_t iawbpreb_s = 0x0239; CDBG("seix006_read_vendor_data [S}\n"); rc = seix006_i2c_read(SEIX006_USERCTRL_0000, BYTE_2, (uint8_t *) & data); seix006_ctrl->camera_revision = data & LOW_BIT_32; CDBG("ROM 0x0000: 0x%x", seix006_ctrl->camera_revision); rc += seix006_i2c_read(SEIX006_USERCTRL_0250, BYTE_4, (uint8_t *) & data); if ((data & SEIX006_V_FLAG_OTP_MASK) > 0) v_flag_otp0 = 1; CDBG("---------------0\n"); rc += seix006_i2c_read(SEIX006_USERCTRL_025C, BYTE_4, (uint8_t *) & data); if ((data & SEIX006_V_FLAG_OTP_MASK) > 0) v_flag_otp1 = 1; if (v_flag_otp0 == 0) { if (v_flag_otp1 == 0) otp_id = OTP_NO_DATA_WRITTEN; else otp_id = OTP_1; } else { if (v_flag_otp1 == 0) otp_id = OTP_0; else otp_id = OTP_1; } CDBG("---------------v_flag_otp0=%d v_flag_otp1=%d 1\n", v_flag_otp0, v_flag_otp1); switch (otp_id) { case OTP_NO_DATA_WRITTEN: CDBG("seix006_read_vendor_data failed! No valid OTP\n"); return -1; case OTP_0: rc += seix006_i2c_read(SEIX006_USERCTRL_0258, BYTE_4, (uint8_t *) & data); CDBG("seix006_read_vendor_data OTP_0\n"); break; case OTP_1: rc += seix006_i2c_read(SEIX006_USERCTRL_0264, BYTE_4, (uint8_t *) & data); CDBG("seix006_read_vendor_data OTP_1\n"); break; default: return -1; } if ((data & SEIX006_VENDOR_ID_OTP_MASK) == 0) { seix006_ctrl->vendor_id = VENDOR_ID_0; } else { seix006_ctrl->vendor_id = VENDOR_ID_1; } CDBG("%s seix006_ctrl->vendor_id=%d", __FUNCTION__, seix006_ctrl->vendor_id); if (seix006_ctrl->camera_revision != SEIX006_CAM_REV_ES1) { switch (otp_id) { case OTP_NO_DATA_WRITTEN: CDBG("seix006_read_initial_values failed! No valid OTP\n"); return -1; case OTP_0: rc += seix006_i2c_read(SEIX006_USERCTRL_0258, BYTE_4, (uint8_t *) & data); seix006_ctrl->calibration_data.shd_index = ((data >> 20) & SEIX006_SHD_MASK); rc += seix006_i2c_read(SEIX006_USERCTRL_0250, BYTE_4, (uint8_t *) & data); c_nr_1 = (data >> 26); rc += seix006_i2c_read(SEIX006_USERCTRL_0254, BYTE_4, (uint8_t *) & data); c_nr_2 = ((data & SEIX006_WB_DATA_OTPB0_2_MASK) << 6); break; case OTP_1: rc += seix006_i2c_read(SEIX006_USERCTRL_0264, BYTE_4, (uint8_t *) & data); seix006_ctrl->calibration_data.shd_index = ((data >> 20) & SEIX006_SHD_MASK); rc += seix006_i2c_read(SEIX006_USERCTRL_025C, BYTE_4, (uint8_t *) & data); c_nr_1 = (data >> 26); rc += seix006_i2c_read(SEIX006_USERCTRL_0260, BYTE_4, (uint8_t *) & data); c_nr_2 = ((data & SEIX006_WB_DATA_OTPB0_2_MASK) << 6); break; default: return -1; break; } c_nr_res = (c_nr_1 + c_nr_2); c_nb = ((data >> 2) & LOW_LOW_BIT_32); c_pr = ((data >> 10) & LOW_LOW_BIT_32); c_pb = ((data >> 18) & LOW_LOW_BIT_32); if (seix006_ctrl->vendor_id == VENDOR_ID_0) { seix006_ctrl->calibration_data.normr = (inormr_k * (128 + c_nr_res)) / 256; seix006_ctrl->calibration_data.normb = (inormb_k * (128 + c_nb)) / 256; seix006_ctrl->calibration_data.awbprer = (iawbprer_k * (128 + c_pr)) / 256; seix006_ctrl->calibration_data.awbpreb = (iawbpreb_k * (128 + c_pb)) / 256; } else if (seix006_ctrl->vendor_id == VENDOR_ID_1) { seix006_ctrl->calibration_data.normr = (inormr_s * (128 + c_nr_res)) / 256; seix006_ctrl->calibration_data.normb = (inormb_s * (128 + c_nb)) / 256; seix006_ctrl->calibration_data.awbprer = (iawbprer_s * (128 + c_pr)) / 256; seix006_ctrl->calibration_data.awbpreb = (iawbpreb_s * (128 + c_pb)) / 256; } switch (otp_id) { case OTP_NO_DATA_WRITTEN: CDBG("seix006_read_initial_values failed! No valid OTP\n"); return -1; case OTP_0: rc += seix006_i2c_read(SEIX006_USERCTRL_0250, BYTE_4, (uint8_t *) & data); seix006_ctrl->calibration_data.otp_inf = ((data >> 5) & SEIX006_AF_MASK); seix006_ctrl->calibration_data.otp_macro = ((data >> 16) & SEIX006_AF_MASK); break; case OTP_1: rc += seix006_i2c_read(SEIX006_USERCTRL_025C, BYTE_4, (uint8_t *) & data); seix006_ctrl->calibration_data.otp_inf = ((data >> 5) & SEIX006_AF_MASK); seix006_ctrl->calibration_data.otp_macro = ((data >> 16) & SEIX006_AF_MASK); break; default: return -1; } seix006_ctrl->calibration_data.af_c = (((((8 * seix006_ctrl->calibration_data.otp_macro) - (8 * seix006_ctrl->calibration_data.otp_inf)) / 6) + 6) / 8); seix006_ctrl->calibration_data.af_d = seix006_ctrl->calibration_data.af_c / 4; seix006_ctrl->calibration_data.af_e = seix006_ctrl->calibration_data.otp_inf + (seix006_ctrl->calibration_data.af_c * 8); seix006_ctrl->calibration_data.af_i = seix006_ctrl->calibration_data.otp_inf + (seix006_ctrl->calibration_data.af_c * 5); seix006_ctrl->calibration_data.af_j = seix006_ctrl->calibration_data.af_c * 2; seix006_ctrl->calibration_data.af_k = seix006_ctrl->calibration_data.af_d / 2; seix006_ctrl->calibration_data.af_l = seix006_ctrl->calibration_data.af_d * 4; seix006_ctrl->calibration_data.af_m = seix006_ctrl->calibration_data.otp_inf + (seix006_ctrl->calibration_data.af_c * 6); seix006_ctrl->calibration_data.af_g_k = 1023; seix006_ctrl->calibration_data.af_g_s = 255; } CDBG("--------------- 6\n"); return rc; } /** * Set calibration data * */ static int32_t seix006_write_calibration_data(struct seix006_calibration_data calibration_data) { int32_t ret = 0; seix006_i2c_write(SEIX006_ADJ_4A04, BYTE_2, (uint8_t *) & calibration_data.normr); seix006_i2c_write(SEIX006_ADJ_4A06, BYTE_2, (uint8_t *) & calibration_data.normb); seix006_i2c_write(SEIX006_ADJ_4A08, BYTE_2, (uint8_t *) & calibration_data.awbprer); seix006_i2c_write(SEIX006_ADJ_4A0A, BYTE_2, (uint8_t *) & calibration_data.awbpreb); seix006_i2c_write(SEIX006_AF_4876, BYTE_2, (uint8_t *) & calibration_data.otp_inf); seix006_i2c_write(SEIX006_AF_487A, BYTE_2, (uint8_t *) & calibration_data.otp_inf); seix006_i2c_write(SEIX006_AF_486C, BYTE_2, (uint8_t *) & calibration_data.af_c); seix006_i2c_write(SEIX006_AF_4870, BYTE_2, (uint8_t *) & calibration_data.af_c); seix006_i2c_write(SEIX006_AF_486E, BYTE_2, (uint8_t *) & calibration_data.af_d); seix006_i2c_write(SEIX006_AF_4872, BYTE_2, (uint8_t *) & calibration_data.af_d); if (seix006_ctrl->vendor_id == VENDOR_ID_0) { if (calibration_data.af_e > calibration_data.af_g_k) { calibration_data.af_e = calibration_data.af_g_k; } seix006_i2c_write(SEIX006_AF_4878, BYTE_2, (uint8_t *) & calibration_data.af_e); seix006_i2c_write(SEIX006_AF_4880, BYTE_2, (uint8_t *) & calibration_data.af_m); seix006_i2c_write(SEIX006_AF_495E, BYTE_2, (uint8_t *) & calibration_data.af_e); } if (seix006_ctrl->vendor_id == VENDOR_ID_1) { if (calibration_data.af_e > calibration_data.af_g_s) { calibration_data.af_e = calibration_data.af_g_s; } seix006_i2c_write(SEIX006_AF_4878, BYTE_2, (uint8_t *) & calibration_data.af_e); seix006_i2c_write(SEIX006_AF_4880, BYTE_2, (uint8_t *) & calibration_data.af_m); seix006_i2c_write(SEIX006_AF_495E, BYTE_2, (uint8_t *) & calibration_data.af_e); } seix006_i2c_write(SEIX006_AF_487E, BYTE_2, (uint8_t *) & calibration_data.otp_inf); seix006_i2c_write(SEIX006_AF_487C, BYTE_2, (uint8_t *) & calibration_data.af_i); seix006_i2c_write(SEIX006_AF_4844, BYTE_2, (uint8_t *) & calibration_data.af_j); seix006_i2c_write(SEIX006_AF_486A, BYTE_2, (uint8_t *) & calibration_data.otp_inf); seix006_i2c_write(SEIX006_AF_4960, BYTE_2, (uint8_t *) & calibration_data.otp_inf); seix006_i2c_write(SEIX006_AF_4822, BYTE_2, (uint8_t *) & calibration_data.af_k); seix006_i2c_write(SEIX006_AF_4824, BYTE_2, (uint8_t *) & calibration_data.af_d); seix006_i2c_write(SEIX006_AF_4838, BYTE_2, (uint8_t *) & calibration_data.af_l); return ret; } /** * Set sensor mode * */ static int32_t seix006_set_sensor_mode(struct sensor_cfg_data cfg_data) { int32_t ret = 0; CDBG("seix006_set_sensor_mode [S]\n"); switch (cfg_data.mode) { case SENSOR_PREVIEW_MODE: ret = seix006_monitor_config(); CDBG("seix006_set_sensor_mode SENSOR_PREVIEW_MODE done\n"); break; case SENSOR_SNAPSHOT_MODE: ret = seix006_snapshot_config(); CDBG("seix006_set_sensor_mode SENSOR_SNAPSHOT_MODE done\n"); break; case SENSOR_RAW_SNAPSHOT_MODE: ret = seix006_raw_snapshot_config(); CDBG("seix006_set_sensor_mode SENSOR_RAW_SNAPSHOT_MODE\n"); break; case SENSOR_RAW_SNAPSHOT_START: ret = seix006_raw_snapshot_start(); CDBG("seix006_set_sensor_mode SENSOR_RAW_SNAPSHOT_START\n"); break; case SENSOR_HALF_RELEASE_MODE: if (seix006_ctrl->dev_mode == CAMERA_MODE_HALF_RELEASE) { CDBG("seix006_set_sensor_mode CAMERA_MODE_HALF_RELEASE\ already in half release mode\n"); /* This occurs if application calls autofocus again * without leaving half release mode. The sensor * requires us to go to monitor mode before a new * focus can be performed */ seix006_monitor_config(); /* Delay is necessary to allow a few frames to reach * the sensor so it can adapt tochanged light * conditions etc */ msleep(1000); } seix006_set_focus_mode(cfg_data.cfg.focus_mode); /* check if asked to set focus mode only or start auto focus */ if (cfg_data.rs) break; ret = seix006_half_release_config(); CDBG("seix006_set_sensor_mode SENSOR_HALF_RELEASE_MODE done\n"); break; case SENSOR_RAW_RGB_STREAM_MODE: ret = seix006_raw_rgb_stream_config(); CDBG("seix006_set_sensor_mode SENSOR_RAW_RGB_STREAM_MODE\n"); break; case SENSOR_MOVIE_MODE: ret = seix006_movie_config(); CDBG("seix006_set_sensor_mode SENSOR_MOVIE_MODE done\n"); break; case SENSOR_RAW_RGB_SNAPSHOT_MODE: ret = seix006_raw_rgb_snapshot_config(); CDBG("seix006_set_sensor_mode SENSOR_RAW_RGB_SNAPSHOT_MODE\n"); break; default: CDBG("seix006_set_sensor_mode failed\n"); ret = -EINVAL; break; } CDBG("seix006_set_sensor_mode [E] ret[%d]\n", ret); return ret; } /** * Set monitor mode * */ static int32_t seix006_monitor_config() { int32_t ret = 0; uint8_t afclr = 0x01; char data; uint8_t datawb = 0; CDBG("seix006_monitor_config [S]\n"); if (seix006_ctrl->dev_mode == CAMERA_MODE_MONITOR) { CDBG("seix006_monitor_config Already in monitor mode. Do nothing...\n"); goto monitor_done; } ret = seix006_i2c_read(SEIX006_USERCTRL_0010, BYTE_1, &data); if (data == SEIX006_MSTS_HR_VAL || seix006_ctrl->autoflash_assist_light_on) { switch (seix006_ctrl->scene) { case SENSOR_SCENE_TWILIGHT: case SENSOR_SCENE_TWILIGHT_PORTRAIT: if (seix006_ctrl->autoflash_assist_light_on) ret = setix006_set_led_state(MSM_CAMERA_LED_OFF); ret = seix006_set_regs(seix006_hr_reset); if (ret) { CDBG("seix006_hr_reset failed\n"); return ret; } break; case SENSOR_SCENE_AUTO: case SENSOR_SCENE_SPORTS: case SENSOR_SCENE_BEACH: case SENSOR_SCENE_SNOW: case SENSOR_SCENE_PORTRAIT: if (seix006_ctrl->autoflash_assist_light_on) { ret = setix006_set_led_state(MSM_CAMERA_LED_OFF); ret = seix006_i2c_read(0x0102, BYTE_1, &datawb); if (datawb == 0) { datawb = 0x20; ret |= seix006_i2c_write(0x0102, BYTE_1, &datawb); } ret |= seix006_set_regs(seix006_hr_LED_reset); if (ret) { CDBG("seix006_hr_LED_reset failed\n"); return ret; } } else { ret = seix006_set_regs(seix006_hr_auto_reset); if (ret) { CDBG("seix006_hr__auto_reset failed\n"); return ret; } } break; default: CDBG("seix006_hr_release already set\n"); } seix006_i2c_write(0x4885, BYTE_1, &afclr); ret = seix006_check_afsts(SEIX006_AFSTS_VAL, SEIX006_POLLING_TIMES); if (ret) { CDBG("seix006_monitor_config seix006_check_afsts failed\n"); } } ret = seix006_set_regs(seix006_mode_monitor); if (ret) { CDBG("seix006_monitor_config failed\n"); return ret; } /* set frame rate mode in case overwritten by snapshot */ /* snapshot sets it to variable */ if (camera_variable_frame_rate == 0) seix006_set_regs(seix006_framerate_fixed); /* wait MODESEL_FIX to 0 */ ret = seix006_check_msts(SEIX006_MSTS_MON_VAL, SEIX006_POLLING_TIMES); if (ret) CDBG("seix006_monitor_config seix006_check_msts failed\n"); else CDBG("seix006_monitor_config Now in monitor\n"); seix006_ctrl->dev_mode = CAMERA_MODE_MONITOR; seix006_ctrl->autoflash_used = FALSE; seix006_ctrl->autoflash_assist_light_on = FALSE; seix006_ctrl->autoflash_poll_reg_x2AA = FALSE; monitor_done: if (!seix006_ctrl->init_complete) { kthread_run(init_thread, NULL, "sensor_init"); } CDBG("seix006_monitor_config [E] ret[%d]\n", ret); return ret; } /** * RGB RAW snapshot config * */ static int32_t seix006_raw_rgb_snapshot_config() { int32_t ret; uint8_t data = 0; int32_t count = 0; CDBG("seix006_raw_rgb_snapshot_config [S]\n"); /* Change to capture mode */ ret = seix006_send_reg_table(seix006_mode_capture_RGB, sizeof_seix006_mode_capture_RGB/sizeof(struct reg_entry)); if(ret) { CDBG("seix006_raw_rgb_snapshot_config send_reg_table failed\n"); return ret; } /* wait MODESEL_FIX to 2 */ ret = seix006_check_msts(SEIX006_MSTS_CAP_VAL, SEIX006_POLLING_TIMES); if(ret) { CDBG ("seix006_raw_rgb_snapshot_config seix006_check_msts failed\n"); /* Continue silently */ } while(++count <= 20) { if (count == 20) { CDBG("seix006_raw_rgb_snapshot_config move to RGB mode failed\n"); return -EFAULT; } ret = seix006_i2c_read(0x004, BYTE_1, &data); if (ret) { CDBG("seix006_raw_rgb_snapshot_config: i2c_read failed\n"); return ret; } CDBG("seix006_raw_rgb_snapshot_config: data = 0x%x\n",data); if (0 != (data & 0x10)) { /*Sensor has moved to RGB snapshot mode*/ break; } mdelay(10); } is_capture_started = 1; CDBG("seix006_raw_rgb_snapshot_config [E] ret[%d]\n",ret); seix006_ctrl->dev_mode = CAMERA_MODE_CAPTURE; return ret; } /** * RAW snapshot config * */ static int32_t seix006_raw_snapshot_config() { int ret = 0; CDBG("seix006_raw_snapshot_config [S]\n"); ret = seix006_set_regs(seix006_prepare_mode_capture); if (ret) { CDBG("seix006_raw_snapshot_config failed\n"); return ret; } if (seix006_ctrl->scene == SENSOR_SCENE_TWILIGHT || seix006_ctrl->scene == SENSOR_SCENE_TWILIGHT_PORTRAIT) { ret = seix006_check_bsts(SEIX006_BSTS_VAL, SEIX006_POLLING_TIMES); if (ret) { CDBG("seix006_monitor_config seix006_check_bsts failed\n"); /* continue silently */ } } is_capture_started = 0; mdelay(250); CDBG("seix006_raw_snapshot_config [E]\n"); return ret; } /* Capture started flag*/ static int seix006_get_capture_started() { int capture_started = 1; mutex_lock(&seix006_capture_lock); #ifdef DBG_SEMC_READ_CAP_STS ret = seix006_i2c_read(0x004, BYTE_1, &data); if (ret) { CDBG("seix006_get_capture_started: i2c_read failed\n"); mutex_unlock(&seix006_capture_lock); return capture_started; } CDBG("seix006_get_capture_started: data = 0x%x\n",data); capture_started = ((data & 0x3) == 2); #else capture_started = is_capture_started; CDBG("seix006_get_capture_started %d\n",capture_started); #endif mutex_unlock(&seix006_capture_lock); return capture_started; } /** * RAW snapshot start * */ static int32_t seix006_raw_snapshot_start() { int32_t i = 0; int32_t ret = 0; CDBG("seix006_raw_snapshot_start [S]\n"); /* if flash used, and autoflash_adjust() calculation was done */ if (seix006_ctrl->autoflash_assist_light_on && !seix006_ctrl->autoflash_poll_reg_x2AA) { /** * USE calculated values computed in autoflash_adjust() */ CDBG(".. FLASH is on: sending AE/AWB offset values to register" " 0x282=%d, 0x445c=%d, 0x445e=%d\n", seix006_ctrl->autoflash_cmds[0][1], seix006_ctrl->autoflash_cmds[1][1], seix006_ctrl->autoflash_cmds[2][1]); for (i = 0; i < 3; i++) { ret = seix006_i2c_write( seix006_ctrl->autoflash_cmds[i][0], BYTE_2, (uint8_t *) &(seix006_ctrl->autoflash_cmds[i][1])); if (ret) { CDBG("Failed writing I2C register 0x%4.4x: %d", seix006_ctrl->autoflash_cmds[i][0], ret); return ret; } } } mutex_lock(&seix006_capture_lock); ret = seix006_set_regs(seix006_mode_capture); if (ret) { CDBG("seix006_raw_snapshot_start set_regs failed\n"); mutex_unlock(&seix006_capture_lock); return ret; } /* wait MODESEL_FIX to 2 */ ret = seix006_check_msts(SEIX006_MSTS_CAP_VAL, SEIX006_POLLING_TIMES); if(ret) { CDBG ("seix006_raw_snapshot_config seix006_check_msts failed\n"); /* Continue silently */ ret = 0; } seix006_ctrl->dev_mode = CAMERA_MODE_CAPTURE; is_capture_started = 1; mutex_unlock(&seix006_capture_lock); CDBG("seix006_raw_snapshot_start [E] ret[%d]\n", ret); return ret; } /** * Snapshot config - config sensor as output YUV format * */ static int32_t seix006_snapshot_config() { int32_t ret; CDBG("seix006_snapshot_config [S]\n"); /* Change to capture mode */ ret = seix006_set_regs(seix006_mode_capture_YUV); if (ret) { CDBG("snapshot_config: set_regs failed\n"); return ret; } /* wait MODESEL_FIX to 2 */ ret = seix006_check_msts(SEIX006_MSTS_CAP_VAL, SEIX006_POLLING_TIMES); if(ret) { CDBG ("seix006_snapshot_config seix006_check_msts failed\n"); /* Continue silently */ } is_capture_started = 1; CDBG("seix006_snapshot_config [E] ret[%d]\n", ret); seix006_ctrl->dev_mode = CAMERA_MODE_CAPTURE; return ret; } /** * Half release config * */ static int32_t seix006_half_release_config() { int32_t ret = 0; int onoff = 0; uint8_t datawb = 0; CDBG("seix006_half_release_config [S]\n"); /** * Only Flash LED related code, runs only if CFG_GET_AF_ASSIST_LIGHT was invoked */ if (seix006_ctrl->autoflash_used) { onoff = seix006_ctrl->autoflash_assist_light_on; ret = autoflash_enable(onoff); if(ret) { CDBG("autoflash_enable(%d) failed, ret: %d", onoff, ret); } ret = autoflash_strobe(onoff); if (ret) { CDBG("autoflash_strobe(%d) failed, ret: %d", onoff, ret); } /* if flash needed, then AF polling will continue sequence, set flag to TRUE */ if (onoff) { CDBG("autoflash_assist_light_on, set autoflash_poll_reg_x2AA to TRUE!\n"); seix006_ctrl->autoflash_poll_reg_x2AA = TRUE; seix006_ctrl->aeawb_timeout = jiffies + 2 * HZ; } } else { seix006_ctrl->autoflash_poll_reg_x2AA = FALSE; ret = autoflash_enable(FALSE); if (ret) { CDBG("autoflash_enable(OFF) failed, ret: %d", ret); } } switch (seix006_ctrl->scene) { case SENSOR_SCENE_TWILIGHT: case SENSOR_SCENE_TWILIGHT_PORTRAIT: if (seix006_ctrl->autoflash_assist_light_on) ret = setix006_set_led_state(MSM_CAMERA_LED_HIGH); ret = seix006_set_regs(seix006_hr_twilight); if (ret) { CDBG("seix006_hr_twilight failed\n"); return ret; } break; case SENSOR_SCENE_AUTO: case SENSOR_SCENE_SPORTS: case SENSOR_SCENE_BEACH: case SENSOR_SCENE_SNOW: case SENSOR_SCENE_LANDSCAPE: case SENSOR_SCENE_PORTRAIT: CDBG("seix006_half_release_config focus %d\n", seix006_ctrl->focus_mode); if (seix006_ctrl->focus_mode == SENSOR_FOCUS_MODE_FIXED) { if (seix006_ctrl->autoflash_assist_light_on) { ret = setix006_set_led_state(MSM_CAMERA_LED_HIGH); mdelay(1500); } CDBG("seix006_half_relase_config SENSOR_FOCUS_MODE_FIXED [E]\n"); datawb = 0x00; seix006_i2c_write(0x6D77, BYTE_1, &datawb); return ret; } if (seix006_ctrl->autoflash_assist_light_on) { ret = setix006_set_led_state(MSM_CAMERA_LED_HIGH); ret |= seix006_i2c_read(0x0102, BYTE_1, &datawb); if (datawb == 0x20) { datawb = 0; ret |= seix006_i2c_write(0x0102, BYTE_1, &datawb); } ret |= seix006_set_regs(seix006_hr_LED); if (ret) { CDBG("seix006_hr_LED failed\n"); return ret; } } else { ret = seix006_set_regs(seix006_hr_auto_start); if (ret) { CDBG("seix006_hr_auto_start failed\n"); return ret; } } break; default: ret = seix006_set_regs(seix006_mode_half_release); if(ret) { CDBG("seix006_half_release_config\n"); return ret; } break; } /* wait MODESEL_FIX to 1 */ ret = seix006_check_msts(SEIX006_MSTS_HR_VAL, SEIX006_POLLING_TIMES); if(ret) { CDBG("seix006_half_release_config seix006_check_msts failed\n"); return ret; } seix006_ctrl->dev_mode = CAMERA_MODE_HALF_RELEASE; CDBG("seix006_half_relase_config [E]\n"); return ret; } /** * Get AF status * */ static int32_t seix006_get_af_status(enum camera_af_status* status) { int8_t data = -1; uint8_t datawb = 0; int32_t ret; CDBG("seix006_get_af_status [S]\n"); *status = SENSOR_AF_IN_PROGRESS; if (seix006_ctrl->autoflash_poll_reg_x2AA) { if (time_before(jiffies, seix006_ctrl->aeawb_timeout)) { ret = autoflash_adjust(); return ret; } else { CDBG(".. seix006_get_af_status: TIMED OUT waiting for AWB/AE, do AF status poll only!"); } } ret = seix006_i2c_read(0x6D77, 1, &data); if(ret) { CDBG("seix006_get_af_status failed\n"); } else { if (data != 0x02){ switch (seix006_ctrl->scene) { case SENSOR_SCENE_TWILIGHT: case SENSOR_SCENE_TWILIGHT_PORTRAIT: ret = setix006_set_led_state(MSM_CAMERA_LED_OFF); ret |= seix006_i2c_read(0x0102, BYTE_1, &datawb); if (datawb == 0x20) { datawb = 0; ret |= seix006_i2c_write(0x0102, BYTE_1, &datawb); } ret |= seix006_set_regs(seix006_hr_reset); if(ret) { CDBG("seix006_hr_reset failed\n"); return ret; } break; case SENSOR_SCENE_AUTO: case SENSOR_SCENE_SPORTS: case SENSOR_SCENE_BEACH: case SENSOR_SCENE_SNOW: case SENSOR_SCENE_PORTRAIT: if (seix006_ctrl->autoflash_assist_light_on) { ret = setix006_set_led_state(MSM_CAMERA_LED_OFF); ret |= seix006_set_regs(seix006_hr_LED_reset); if (ret) { CDBG("seix006_hr_LED_reset failed\n"); return ret; } } else { ret = seix006_set_regs(seix006_hr_auto_reset); if (ret) { CDBG("seix006_hr__auto_reset failed\n"); return ret; } } break; default: CDBG("seix006_hr_release already set\n"); } } switch (data) { case 0x00: default: *status = SENSOR_AF_FAILED; CDBG("seix006_get_af_status : SENSOR_AF_FAILED\n"); break; case 0x01: CDBG("seix006_get_af_status : SENSOR_AF_SUCCESS\n"); *status = SENSOR_AF_SUCCESS; break; case 0x02: CDBG("seix006_get_af_status : SENSOR_AF_IN_PROGRESS\n"); *status = SENSOR_AF_IN_PROGRESS; break; } } CDBG("seix006_get_af_status [E] ret[%d]\n", ret); return ret; } /** * Get EXIF data * */ static int32_t seix006_get_exif(struct cam_ctrl_exif_params_t *exif) { int32_t ret = 0; int8_t data_8 = -1; uint16_t data_16_low = -1; uint16_t data_16_high = -1; CDBG("seix006_get_exif [S]\n"); ret = seix006_i2c_read(0x00F0, BYTE_1, &data_8); if(ret) { CDBG("seix006_get_exif failed\n"); return ret; } exif->iso_speed_index = data_8; ret = seix006_i2c_read(0x00F2, BYTE_2, (int8_t*)&data_16_low); if(ret) { CDBG("seix006_get_exif failed\n"); return ret; } ret = seix006_i2c_read(0x00F4, BYTE_2, (int8_t*)&data_16_high); if(ret) { CDBG("seix006_get_exif failed\n"); return ret; } CDBG("data_16_low %d\n", data_16_low); CDBG("data_16_high %d\n", data_16_high); exif->shutter_speed = (data_16_high << SHIFT_16) | data_16_low; CDBG("Shutter speed %d us\n", exif->shutter_speed); exif->camera_revision = seix006_ctrl->camera_revision; CDBG("Camera revision %d\n", exif->camera_revision); exif->flash_fired = seix006_ctrl->autoflash_assist_light_on; CDBG("Flash fired %d\n", exif->flash_fired); CDBG("seix006_get_exif [X]\n"); return ret; } /** * Set test pattern on/off * */ static int32_t seix006_set_test_pattern(enum set_test_pattern_t mode) { int32_t ret; CDBG("seix006_set_test_pattern [S]\n"); if (mode == TEST_PATTERN_ON) { ret = seix006_set_regs(seix006_test_pattern_on); } else { ret = seix006_set_regs(seix006_test_pattern_off); } if (ret) { CDBG("seix006_set_test_pattern send_reg_table failed\n"); return ret; } CDBG("seix006_set_test_pattern [E]\n"); return ret; } /** * Set ISO mode * */ static int32_t seix006_set_iso(uint16_t iso_mode) { int32_t ret = 0; CDBG("seix006_set_iso [S] iso=%d\n", iso_mode); /* iso_mode is set to the actual ISO value, e.g. 200, 800 ... */ /* if 0 or any value not supported should set the sensor to AUTO (0 is used in user space to represent AUTO) */ switch (iso_mode) { case 100: ret = seix006_set_regs(seix006_iso_100); break; case 200: ret = seix006_set_regs(seix006_iso_200); break; case 400: ret = seix006_set_regs(seix006_iso_400); break; case 800: ret = seix006_set_regs(seix006_iso_800); break; case 1600: ret = seix006_set_regs(seix006_iso_1600); break; case 0: default: ret = seix006_set_regs(seix006_iso_auto); break; } CDBG("seix006_set_iso [E]\n"); return ret; } static int32_t seix006_update_scan_range(enum camera_focus_mode focus_mode, enum camera_scene scene) { int32_t ret = 0; int update_register = 0; CDBG("seix006_update_scan_range [S]\n"); switch (scene) { case SENSOR_SCENE_AUTO: case SENSOR_SCENE_BEACH: case SENSOR_SCENE_SNOW: case SENSOR_SCENE_LANDSCAPE: case SENSOR_SCENE_PORTRAIT: case SENSOR_SCENE_DOCUMENT: if (seix006_ctrl->scan_range_reg != SEIX006_SCAN_RANGE_REG_AUTO) { seix006_ctrl->scan_range_reg = SEIX006_SCAN_RANGE_REG_AUTO; update_register = 1; } break; case SENSOR_SCENE_TWILIGHT: case SENSOR_SCENE_TWILIGHT_PORTRAIT: if (seix006_ctrl->scan_range_reg != SEIX006_SCAN_RANGE_REG_TWILIGHT) { seix006_ctrl->scan_range_reg = SEIX006_SCAN_RANGE_REG_TWILIGHT; update_register = 1; } break; case SENSOR_SCENE_SPORTS: if (seix006_ctrl->scan_range_reg != SEIX006_SCAN_RANGE_REG_SPORTS) { seix006_ctrl->scan_range_reg = SEIX006_SCAN_RANGE_REG_SPORTS; update_register = 1; } break; default: CDBG("seix006_update_scan_range scene %d not supported.\n", scene); break; } if (focus_mode == SENSOR_FOCUS_MODE_MACRO) { if (seix006_ctrl->scan_range_val != SEIX006_SCAN_RANGE_MACRO) { seix006_ctrl->scan_range_val = SEIX006_SCAN_RANGE_MACRO; update_register = 1; } } else { if (seix006_ctrl->scan_range_val != SEIX006_SCAN_RANGE_AUTO) { seix006_ctrl->scan_range_val = SEIX006_SCAN_RANGE_AUTO; update_register = 1; } } if (update_register == 1) { CDBG("seix006_update_scan_range setting reg %d to value %d\n", seix006_ctrl->scan_range_reg, seix006_ctrl->scan_range_val); ret = seix006_i2c_write(seix006_ctrl->scan_range_reg, BYTE_2, (uint8_t *) &seix006_ctrl->scan_range_val); } CDBG("seix006_update_scan_range [E]\n"); return ret; } /** * Set focus mode * */ static int32_t seix006_set_focus_mode(enum camera_focus_mode focus_mode) { int32_t ret = 0; CDBG("seix006_set_focus_mode [S]\n"); if (seix006_ctrl->focus_mode != focus_mode) { seix006_ctrl->focus_mode = focus_mode; if (seix006_ctrl->init_complete) ret = seix006_update_focus_mode(focus_mode); } CDBG("seix006_set_focus_mode [E] ret[%d]\n", ret); return ret; } /** * Update focus_mode * */ static int32_t seix006_update_focus_mode(enum camera_focus_mode focus_mode) { int32_t ret = 0; CDBG("seix006_update_focus_mode [S]\n"); switch (focus_mode) { case SENSOR_FOCUS_MODE_AUTO: case SENSOR_FOCUS_MODE_MACRO: default: CDBG("seix006_update_focus_mode setting focus mode auto\n"); ret = seix006_set_regs(seix006_focus_mode_auto); if (!ret) { ret = seix006_set_regs(seix006_primary_focus_window_auto); CDBG("seix006_update_focus_mode setting focus window auto\n"); } if (seix006_ctrl->af_4838_val != seix006_ctrl->calibration_data.af_d * 4) { seix006_ctrl->af_4838_val = seix006_ctrl->calibration_data.af_d * 4; seix006_i2c_write(SEIX006_AF_4838, BYTE_2, (uint8_t *) &seix006_ctrl->af_4838_val); } camera_continous_autofocus = 0; break; case SENSOR_FOCUS_MODE_CONTINUOUS: CDBG("seix006_update_focus_mode setting focus mode continuous\n"); ret = seix006_set_regs(seix006_focus_mode_continuous); if (!ret) { ret = seix006_set_regs(seix006_primary_focus_window_continuous); CDBG("seix006_update_focus_mode setting focus window continuous\n"); camera_continous_autofocus = 1; } if (seix006_ctrl->af_4838_val != seix006_ctrl->calibration_data.af_d * 2) { seix006_ctrl->af_4838_val = seix006_ctrl->calibration_data.af_d * 2; seix006_i2c_write(SEIX006_AF_4838, BYTE_2, (uint8_t *) &seix006_ctrl->af_4838_val); } break; case SENSOR_FOCUS_MODE_FIXED: CDBG("seix006_update_focus_mode focus mode not supported by camera\n"); break; } seix006_update_scan_range(focus_mode, seix006_ctrl->scene); CDBG("seix006_update_focus_mode [E]\n"); return ret; } /** * Set scene * */ static int32_t seix006_set_scene(enum camera_scene scene) { int32_t ret = 0; CDBG("seix006_set_scene [S]\n"); if (seix006_ctrl->scene != scene) { seix006_ctrl->scene = scene; if (seix006_ctrl->init_complete) { ret = seix006_update_scene(scene); } } CDBG("seix006_set_scene [E] ret[%d]\n", ret); return ret; } /** * Update scene * */ static int32_t seix006_update_scene(enum camera_scene scene) { int32_t ret = 0; CDBG("seix006_update_scene [S]\n"); CDBG("seix006_update_scene Requested scene = %d\n",scene); switch (scene) { case SENSOR_SCENE_AUTO: default: CDBG("seix006 Setting normal scene mode\n"); ret = seix006_set_regs(seix006_GEN_scene_normal); break; case SENSOR_SCENE_MACRO: CDBG("seix006 Setting macro scene mode\n"); ret = seix006_set_regs(seix006_GEN_scene_macro); break; case SENSOR_SCENE_TWILIGHT: CDBG("seix006 Setting twilight landscape scene mode\n"); ret = seix006_set_regs(seix006_GEN_scene_twilight); break; case SENSOR_SCENE_SPORTS: CDBG("seix006 Setting sports scene mode\n"); ret = seix006_set_regs(seix006_GEN_scene_sports); break; case SENSOR_SCENE_BEACH: case SENSOR_SCENE_SNOW: CDBG("seix006 Setting beach/snow scene mode\n"); ret = seix006_set_regs(seix006_GEN_scene_beach_and_snow); break; case SENSOR_SCENE_LANDSCAPE: CDBG("seix006 Setting landscape scene mode\n"); ret = seix006_set_regs(seix006_GEN_scene_landscape); break; case SENSOR_SCENE_PORTRAIT: CDBG("seix006 Setting portrait scene mode\n"); ret = seix006_set_regs(seix006_GEN_scene_portrait); break; case SENSOR_SCENE_TWILIGHT_PORTRAIT: CDBG("seix006 Setting twilight portrait scene mode\n"); ret = seix006_set_regs(seix006_GEN_scene_twilight_portrait); break; case SENSOR_SCENE_DOCUMENT: CDBG("seix006 Setting document scene mode\n"); ret = seix006_set_regs(seix006_GEN_scene_document); break; } CDBG("seix006_update_scene [E] ret[%d]\n", ret); return ret; } static int seix006_cam_is_assist_light_needed(int *result) { int32_t ret = 0; uint16_t reg_x288, reg_x26A, reg_x26C; int16_t reg_x284; int A; CDBG("seix006_cam_is_assist_light_needed [S]\n"); ret = seix006_i2c_read(0x288, BYTE_2, (int8_t *) & reg_x288); if (!ret) ret = seix006_i2c_read(0x284, BYTE_2, (int8_t *) & reg_x284); if (!ret) ret = seix006_i2c_read(0x26A, BYTE_2, (int8_t *) & reg_x26A); if (!ret) ret = seix006_i2c_read(0x26C, BYTE_2, (int8_t *) & reg_x26C); CDBG("seix006_cam_is_assist_light_needed, reg 0x288: %d, 0x284: %d, " "0x26A: %d, 0x26C: %d\n", reg_x288, reg_x284, reg_x26A, reg_x26C); if (ret) { CDBG("seix006_cam_is_assist_light_needed failed, result=%d\n", ret); return ret; } A = reg_x288 + reg_x284; CDBG("seix006_cam_is_assist_light_needed: A = (reg x288 + reg x284) = %d\n", A); seix006_ctrl->autoflash_used = TRUE; seix006_ctrl->autoflash_assist_light_on = camera_flash == FLASH_AUTO ? (A < SEIX006_FLASH_NEEDED_AE_MIN) : camera_flash; seix006_ctrl->autoflash_reg_x288 = reg_x288; seix006_ctrl->autoflash_reg_x284 = reg_x284; seix006_ctrl->autoflash_reg_x26A = reg_x26A; seix006_ctrl->autoflash_reg_x26C = reg_x26C; /* return TRUE or FALSE */ *result = seix006_ctrl->autoflash_assist_light_on; CDBG("seix006_cam_is_assist_light_needed: %s, [E] ret[%d]\n", (*result ? "TRUE" : "FALSE"), ret); return ret; } static int32_t seix006_set_preview_dimension(struct camera_preview_dimension_t dimension) { int32_t ret = 0; uint16_t current_width = 0; CDBG("seix006_set_preview_dimension [S]\n"); seix006_i2c_read(0x0022, BYTE_2, (uint8_t *) &current_width); if (dimension.sensor_width == 640 && dimension.sensor_height == 480) { if (current_width == 640) { CDBG("VGA preview size already set\n"); } else { CDBG("Setting VGA preview size\n"); ret = seix006_send_reg_table( seix006_vf_resolution_640x480, sizeof_seix006_vf_resolution_640x480 / sizeof(struct reg_entry)); if (ret) CDBG("Setting VGA preview size failed\n"); else ret = seix006_refresh_monitor(SEIX006_POLLING_TIMES); } } else if (dimension.sensor_width == 800 && dimension.sensor_height == 480) { if (current_width == 800) { CDBG("WVGA preview size already set\n"); } else { CDBG("Setting WVGA preview size\n"); ret = seix006_send_reg_table( seix006_vf_resolution_800x480, sizeof_seix006_vf_resolution_800x480 / sizeof(struct reg_entry)); if (ret) CDBG("Setting WVGA preview size failed\n"); else ret = seix006_refresh_monitor(SEIX006_POLLING_TIMES); } } else if (dimension.sensor_width == 1280 && dimension.sensor_height == 720) { if (current_width == 1280) { CDBG("HD720 preview size already set\n"); } else { CDBG("Setting HD720 preview size 1280x720\n"); ret = seix006_send_reg_table( seix006_vf_resolution_1280x720, sizeof_seix006_vf_resolution_1280x720 / sizeof(struct reg_entry)); if (ret) CDBG("Setting HD720 preview size failed\n"); else ret = seix006_refresh_monitor( SEIX006_POLLING_TIMES); } } seix006_i2c_read(0x0022, BYTE_2, (int8_t *) &current_width); CDBG("seix006_set_preview_dimension New sensor output is %dx%d", current_width, (current_width == 1280 ? 720 : 480)); CDBG("seix006_set_preview_dimension [E] ret[%d]\n", ret); return ret; } static int32_t seix006_set_dimensions(struct camera_dimension_t dimension) { int32_t ret = 0; CDBG("seix006_set_dimensions [S]\n"); if (dimension.picture_width == 640 && dimension.picture_height == 480) { CDBG("Setting VGA snapshot size\n"); ret = seix006_send_reg_table( seix006_snapshot_resolution_640x480, sizeof_seix006_snapshot_resolution_640x480 / sizeof(struct reg_entry)); } else if (dimension.picture_width == 800 && dimension.picture_height == 480) { CDBG("Setting WVGA snapshot size\n"); ret = seix006_send_reg_table( seix006_snapshot_resolution_800x480, sizeof_seix006_snapshot_resolution_800x480 / sizeof(struct reg_entry)); } else if (dimension.picture_width == 176 && dimension.picture_height == 144) { CDBG("Setting 176x144 snapshot size\n"); ret = seix006_send_reg_table( seix006_snapshot_resolution_176x144, sizeof_seix006_snapshot_resolution_176x144 / sizeof(struct reg_entry)); } else if (dimension.picture_width == 320 && dimension.picture_height == 240) { CDBG("Setting 320x240 snapshot size\n"); ret = seix006_send_reg_table( seix006_snapshot_resolution_320x240, sizeof_seix006_snapshot_resolution_320x240 / sizeof(struct reg_entry)); } else if (dimension.picture_width == 352 && dimension.picture_height == 288) { CDBG("Setting 352x288 snapshot size\n"); ret = seix006_send_reg_table( seix006_snapshot_resolution_352x288, sizeof_seix006_snapshot_resolution_352x288 / sizeof(struct reg_entry)); } else if (dimension.picture_width == 800 && dimension.picture_height == 600) { CDBG("Setting 800x600 snapshot size\n"); ret = seix006_send_reg_table( seix006_snapshot_resolution_800x600, sizeof_seix006_snapshot_resolution_800x600 / sizeof(struct reg_entry)); } else if (dimension.picture_width == 1024 && dimension.picture_height == 768) { CDBG("Setting 1024x768 snapshot size\n"); ret = seix006_send_reg_table( seix006_snapshot_resolution_1024x768, sizeof_seix006_snapshot_resolution_1024x768 / sizeof(struct reg_entry)); } else if (dimension.picture_width == 1280 && dimension.picture_height == 720) { CDBG("Setting 1280x720 snapshot size\n"); ret = seix006_send_reg_table( seix006_snapshot_resolution_1280x720, sizeof_seix006_snapshot_resolution_1280x720 / sizeof(struct reg_entry)); } else if (dimension.picture_width == 1280 && dimension.picture_height == 960) { CDBG("Setting 1280x960 snapshot size\n"); ret = seix006_send_reg_table( seix006_snapshot_resolution_1280x960, sizeof_seix006_snapshot_resolution_1280x960 / sizeof(struct reg_entry)); } else if (dimension.picture_width == 1632 && dimension.picture_height == 1224) { CDBG("Setting 1632x1224 snapshot size\n"); ret = seix006_send_reg_table( seix006_snapshot_resolution_1632x1224, sizeof_seix006_snapshot_resolution_1632x1224 / sizeof(struct reg_entry)); } else if (dimension.picture_width == 1280 && dimension.picture_height == 768) { CDBG("Setting 1280x768 snapshot size\n"); ret = seix006_send_reg_table( seix006_snapshot_resolution_1280x768, sizeof_seix006_snapshot_resolution_1280x768 / sizeof(struct reg_entry)); } else if (dimension.picture_width == 1600 && dimension.picture_height == 1200) { CDBG("Setting 1600x1200 snapshot size\n"); ret = seix006_send_reg_table( seix006_snapshot_resolution_1600x1200, sizeof_seix006_snapshot_resolution_1600x1200 / sizeof(struct reg_entry)); } else if (dimension.picture_width == 1920 && dimension.picture_height == 1080) { CDBG("Setting 1920x1080 snapshot size\n"); ret = seix006_send_reg_table( seix006_snapshot_resolution_1920x1080, sizeof_seix006_snapshot_resolution_1920x1080 / sizeof(struct reg_entry)); } else if (dimension.picture_width == 2048 && dimension.picture_height == 1536) { CDBG("Setting 2048x1536 snapshot size\n"); ret = seix006_send_reg_table( seix006_snapshot_resolution_2048x1536, sizeof_seix006_snapshot_resolution_2048x1536 / sizeof(struct reg_entry)); } else { CDBG("Setting 5MP snapshot size, input %d x %d\n", dimension.picture_width, dimension.picture_height); ret = seix006_send_reg_table( seix006_snapshot_resolution_2592x1944, sizeof_seix006_snapshot_resolution_2592x1944 / sizeof(struct reg_entry)); } if (dimension.thumbnail_width == 800 && dimension.thumbnail_height == 480) { CDBG("Setting WVGA thumbnail size\n"); ret = seix006_send_reg_table(seix006_thumbnail_size_WVGA, sizeof_seix006_thumbnail_size_WVGA / sizeof(struct reg_entry)); } else if (dimension.thumbnail_width == 640 && dimension.thumbnail_height == 480) { CDBG("Setting VGA thumbnail size\n"); ret = seix006_send_reg_table(seix006_thumbnail_size_VGA, sizeof_seix006_thumbnail_size_VGA / sizeof(struct reg_entry)); } else if (dimension.thumbnail_width == 176 && dimension.thumbnail_height == 144) { CDBG("Setting QCIF thumbnail size\n"); ret = seix006_send_reg_table(seix006_thumbnail_size_QCIF, sizeof_seix006_thumbnail_size_QCIF / sizeof(struct reg_entry)); } else if (dimension.thumbnail_width == 512 && dimension.thumbnail_height == 384) { CDBG("Setting 512x384 thumbnail size\n"); ret = seix006_send_reg_table(seix006_thumbnail_size_512x384, sizeof_seix006_thumbnail_size_512x384 / sizeof(struct reg_entry)); } else if (dimension.thumbnail_width == 512 && dimension.thumbnail_height == 288) { CDBG("Setting 512x288 thumbnail size\n"); ret = seix006_send_reg_table(seix006_thumbnail_size_512x288, sizeof_seix006_thumbnail_size_512x288 / sizeof(struct reg_entry)); } else if (dimension.thumbnail_width == 480 && dimension.thumbnail_height == 288) { CDBG("Setting 480x288 thumbnail size\n"); ret = seix006_send_reg_table(seix006_thumbnail_size_480x288, sizeof_seix006_thumbnail_size_480x288 / sizeof(struct reg_entry)); } else if (dimension.thumbnail_width == 432 && dimension.thumbnail_height == 288) { CDBG("Setting 432x288 thumbnail size\n"); ret = seix006_send_reg_table(seix006_thumbnail_size_432x288, sizeof_seix006_thumbnail_size_432x288 / sizeof(struct reg_entry)); } else if (dimension.thumbnail_width == 352 && dimension.thumbnail_height == 288) { CDBG("Setting 352x288 thumbnail size\n"); ret = seix006_send_reg_table(seix006_thumbnail_size_352x288, sizeof_seix006_thumbnail_size_352x288 / sizeof(struct reg_entry)); } else if (dimension.thumbnail_width == 400 && dimension.thumbnail_height == 240) { CDBG("Setting WQVGA thumbnail size\n"); ret = seix006_send_reg_table(seix006_thumbnail_size_WQVGA, sizeof_seix006_thumbnail_size_WQVGA / sizeof(struct reg_entry)); } else { CDBG("Setting QVGA thumbnail size, input %d x %d\n", dimension.thumbnail_width, dimension.thumbnail_height); ret = seix006_send_reg_table(seix006_thumbnail_size_QVGA, sizeof_seix006_thumbnail_size_QVGA / sizeof(struct reg_entry)); } CDBG("seix006_set_dimensions [E] ret[%d]\n", ret); return ret; } static int32_t seix006_set_framerate(uint16_t fps) { int32_t ret = 0; uint8_t register_value = 0x0; CDBG("seix006_set_framerate fps=%d [S]\n", fps); /* Block reset of focus on state change */ if (camera_continous_autofocus == 1) { seix006_i2c_write(0x4884, BYTE_1, &register_value); CDBG("seix006_set_framerate block refocus"); } if (fps == 0) { CDBG("seix006_set_framerate set variable"); camera_variable_frame_rate = 1; ret = seix006_set_regs(seix006_framerate_variable); } else if (fps == 30) { CDBG("seix006_set_framerate set 30"); camera_variable_frame_rate = 0; ret = seix006_set_regs(seix006_framerate_30); ret |= seix006_set_regs(seix006_framerate_fixed); } else if (fps == 15) { CDBG("seix006_set_framerate set 15"); camera_variable_frame_rate = 0; ret = seix006_set_regs(seix006_framerate_15); ret |= seix006_set_regs(seix006_framerate_fixed); } else { CDBG("seix006_set_framerate error, %d fps not supported by camera\n", fps); ret = -EFAULT; } /* Start and wait for state change */ CDBG("seix006_set_framerate restart"); seix006_refresh_monitor(1000); /* Restore reset focus setting on state change */ register_value = 0x1; seix006_i2c_write(0x4884, BYTE_1, &register_value); CDBG("seix006_set_framerate [E] ret[%d]\n", ret); return ret; } /** * Access GPIO */ static int32_t seix006_gpio_access(int gpio_pin, int dir) { int rc = 0; CDBG("seix006_gpio_access [S]\n"); rc = gpio_request(gpio_pin, "seix006_camera"); if (!rc) { gpio_direction_output(gpio_pin, dir); } gpio_free(gpio_pin); CDBG("seix006_gpio_access [E] rc[%d]\n", rc); return rc; } /* Robyn specific functions, also dependant on addition to board.h, not necessary for Zeus at the moment */ #ifndef USE_ZEUS_POWER_MANAGEMENT /** * Enable a resource (GPIO or VREG) */ static int32_t seix006_resource_enable(struct msm_camera_sensor_pwr *resource) { int32_t ret; if (!resource) { CDBG("seix006_resource_enable argument is NULL.\n"); return 1; } switch (resource->type) { case MSM_CAMERA_SENSOR_PWR_GPIO: CDBG("seix006_resource_enable GPIO[%d]\n", resource->resource.number); ret = seix006_gpio_access(resource->resource.number, TRUE); break; case MSM_CAMERA_SENSOR_PWR_VREG: CDBG("seix006_resource_enable VREG[%s]\n", resource->resource.name); ret = vreg_enable(vreg_get(0, resource->resource.name)); break; default: CDBG("seix006_resource_enable invalid resource type[%d]\n", resource->type); ret = 1; break; } return ret; } /** * Disable a resource (GPIO or VREG) */ static int32_t seix006_resource_disable(struct msm_camera_sensor_pwr *resource) { int32_t ret; if (!resource) { CDBG("seix006_resource_disable argument is NULL.\n"); return 1; } switch (resource->type) { case MSM_CAMERA_SENSOR_PWR_GPIO: CDBG("seix006_resource_disable GPIO[%d]\n", resource->resource.number); ret = seix006_gpio_access(resource->resource.number, FALSE); break; case MSM_CAMERA_SENSOR_PWR_VREG: CDBG("seix006_resource_disable VREG[%s]\n", resource->resource.name); ret = vreg_disable(vreg_get(0, resource->resource.name)); break; default: CDBG("seix006_resource_disable invalid resource type[%d]\n", resource->type); ret = 1; break; } return ret; } #endif /* USE_ZEUS_POWER_MANAGEMENT */ /** * Power on sensor * */ static int32_t seix006_sensor_on(void) { #ifdef USE_ZEUS_POWER_MANAGEMENT CDBG("seix006_sensor_on [S]\n"); msm_camio_clk_enable(CAMIO_VFE_CLK); msm_camio_clk_enable(CAMIO_MDC_CLK); msm_camio_clk_enable(CAMIO_VFE_MDC_CLK); /* Output CAM_MCLK(19.2MHz) */ msm_camio_clk_rate_set(SEIX006_DEFAULT_CLOCK_RATE); CDBG("seix006_sensor_on [E]\n"); return 0; #else int32_t ret = 0; CDBG("seix006_sensor_on [S]\n"); /* Power on VCAM_SD12(GPI117 = High) 1,2V Core */ ret = seix006_resource_enable(&seix006_ctrl->sensordata->vcam_sd12); if (ret) { CDBG("seix006_sensor_on Power on VCAM_SD12 failed\n"); return ret; } mdelay(5); /* Power on VCAM_IO(PM7540/REG_GP4) 2,6V */ ret = seix006_resource_enable(&seix006_ctrl->sensordata->vcam_io); if (ret) { CDBG("seix006_sensor_on Power on VCAM_IO failed\n"); return ret; } mdelay(5); /* Power on VCAM_SA28(PM7540/RFRX2) */ ret = seix006_resource_enable(&seix006_ctrl->sensordata->vcam_sa28); if (ret) { CDBG("seix006_sensor_on Power on VCAM_SA28 failed\n"); return ret; } mdelay(5); /* Power on AF(PM7540/RFTX) */ ret = seix006_resource_enable(&seix006_ctrl->sensordata->vcam_af30); if (ret) { CDBG("seix006_sensor_on Power on VCAM_AF failed\n"); return ret; } mdelay(5); CDBG("seix006_sensor_on [E] ret[%d]\n", ret); return ret; #endif /* USE_ZEUS_POWER_MANAGEMENT */ } /** * Initialize sensor * */ static int32_t seix006_sensor_init(void) { int32_t ret = 0; CDBG("seix006_sensor_init [S] %d\n",seix006_ctrl->camera_revision); if (seix006_ctrl->camera_revision == SEIX006_CAM_REV_ES1) { ret = seix006_set_regs(seix006_GEN_period_1_ES1); if (ret) { CDBG("seix006_set_regs seix006_GEN_Period_1_ES1 failed\n"); } } else if (seix006_ctrl->camera_revision == SEIX006_CAM_REV_ES2) { ret = seix006_set_regs(seix006_GEN_period_1_ES2); mdelay(5); if (ret) { CDBG("seix006_set_regs seix006_GEN_Period_1_ES2 failed\n"); } ret = seix006_write_calibration_data(seix006_ctrl->calibration_data); if (ret) { CDBG("seix006_write_calibration_data failed\n"); } } seix006_ctrl->dev_mode = CAMERA_MODE_MONITOR; seix006_ctrl->scene = SENSOR_SCENE_AUTO; seix006_ctrl->focus_mode = SENSOR_FOCUS_MODE_AUTO; seix006_ctrl->init_complete = 0; seix006_ctrl->af_4838_val = seix006_ctrl->calibration_data.af_l; seix006_ctrl->scan_range_reg = SEIX006_SCAN_RANGE_REG_AUTO; seix006_ctrl->scan_range_val = SEIX006_SCAN_RANGE_AUTO; /* set continous autofocus flag to off by default */ camera_continous_autofocus = 0; CDBG("seix006_sensor_init [E] ret[%d]\n", ret); return ret; } /** * Power off sensor * */ static void seix006_sensor_off(void) { #ifdef USE_ZEUS_POWER_MANAGEMENT int32_t ret = 0; CDBG("seix006_sensor_off [S]\n"); /* Power off = LOW */ ret = seix006_gpio_access(seix006_ctrl->sensordata->sensor_pwd, 0); if(ret) { CDBG("seix006_sensor_off Power off STANDBY failed\n"); } mdelay(5); /* CAM_RESET_N = LOW */ ret = seix006_gpio_access(seix006_ctrl->sensordata->sensor_reset, 0); if(ret) { CDBG("seix006_sensor_off CAM_RESET_N release failed\n"); } mdelay(20); /* Output CAM_MCLK(0MHz) */ msm_camio_clk_rate_set(0); msm_camio_clk_disable(CAMIO_VFE_CLK); msm_camio_clk_disable(CAMIO_MDC_CLK); msm_camio_clk_disable(CAMIO_VFE_MDC_CLK); mdelay(5); CDBG("seix006_sensor_off [E]\n"); #else int32_t ret = 0; CDBG("seix006_sensor_off [S]\n"); /* Power off STANDBY (GPIO2 = LOW) */ ret = seix006_resource_disable(&seix006_ctrl->sensordata->standby); if (ret) { CDBG("seix006_sensor_off Power off STANDBY failed\n"); } mdelay(5); /* CAM_RESET_N release(GPIO0 = LOW) */ ret = seix006_gpio_access(seix006_ctrl->sensordata->sensor_reset, FALSE); if (ret) { CDBG("seix006_sensor_off CAM_RESET_N release failed\n"); } msleep(20); /* Output CAM_MCLK(0MHz) */ msm_camio_clk_rate_set(0); msm_camio_clk_disable(CAMIO_VFE_CLK); mdelay(5); /* Power off VCAM_AF */ ret = seix006_resource_disable(&seix006_ctrl->sensordata->vcam_af30); if (ret) { CDBG("seix006_sensor_off Power off VCAM_AF failed\n"); } mdelay(5); /* Power off VCAM_L2(GPIO43 = LOW) */ ret = seix006_resource_disable(&seix006_ctrl->sensordata->vcam_sa28); if (ret) { CDBG("seix006_sensor_off Power off VCAM_L2 failed\n"); } msleep(250); /* Power off VCAM_IO(PM7540/REG_RFRX2) */ ret = seix006_resource_disable(&seix006_ctrl->sensordata->vcam_io); if (ret) { CDBG("seix006_sensor_off Power off VCAM_IO failed\n"); } msleep(150); /* Power off VCAM_SD(GPIO142 = LOW) */ ret = seix006_resource_disable(&seix006_ctrl->sensordata->vcam_sd12); if (ret) { CDBG("seix006_sensor_off Power off VCAM_SD failed\n"); } CDBG("seix006_sensor_off [E]\n"); #endif /* USE_ZEUS_POWER_MANAGEMENT */ } /** * Open Processing. * */ static int seix006_sensor_open(const struct msm_camera_sensor_info *data) { #ifdef USE_ZEUS_POWER_MANAGEMENT int32_t ret = 0; CDBG("%s <--\n", __FUNCTION__); down(&seix006_sem); CDBG("seix006_open [S]\n"); if (seix006_ctrl->opened) { CDBG("seix006_open already opened\n"); ret = 0; goto open_done; } ret = seix006_sensor_on(); if (ret) { CDBG("seix006_open sensor_on failed\n"); goto open_done; } msm_camio_camif_pad_reg_reset(); /* Moved from vfe_31_init(...) */ mdelay(40); /* STANDBY low then high before reg settings */ ret = seix006_gpio_access(seix006_ctrl->sensordata->sensor_pwd, 0); if(ret) { CDBG("seix006_sensor_open STANDBY failed\n"); goto open_done; } mdelay(5); /* CAM_RESET_N release(GPI89 = High) */ ret = seix006_gpio_access(seix006_ctrl->sensordata->sensor_reset, 1); if (ret) { CDBG("seix006_sensor_open CAM_RESET_N release failed\n"); goto open_done; } mdelay(20); ret = seix006_sensor_init(); if (ret) { CDBG("seix006_open sensor_init failed\n"); goto open_done; } mdelay(5); /** * End of Init sensor */ /* STANDBY (GPIO0 = High) */ ret = seix006_gpio_access(seix006_ctrl->sensordata->sensor_pwd, 1); mdelay(10); if(ret) { CDBG("seix006_sensor_open STANDBY failed\n"); /* goto open_done; */ } open_done: if(ret) { CDBG("seix006_sensor_going to be off\n"); seix006_sensor_off(); ret = -EFAULT; } else { seix006_ctrl->opened = 1; } up(&seix006_sem); CDBG("%s <-- ret[%d]\n",__FUNCTION__, ret); return ret; #else int32_t ret = 0; down(&seix006_sem); CDBG("seix006_open [S]\n"); if (seix006_ctrl->opened) { CDBG("seix006_open already opened\n"); ret = 0; goto open_done; } ret = seix006_sensor_on(); if (ret) { CDBG("seix006_open sensor_on failed\n"); goto open_done; } msm_camio_clk_enable(CAMIO_VFE_CLK); msm_camio_clk_enable(CAMIO_MDC_CLK); msm_camio_clk_enable(CAMIO_VFE_MDC_CLK); /* Output CAM_MCLK(19.2MHz) */ msm_camio_clk_rate_set(SEIX006_DEFAULT_CLOCK_RATE); msm_camio_camif_pad_reg_reset(); msleep(40); /* CAM_RESET_N release(GPI89 = High) */ ret = seix006_gpio_access(seix006_ctrl->sensordata->sensor_reset, TRUE); if (ret) { CDBG("seix006_sensor_open CAM_RESET_N release failed\n"); goto open_done; } msleep(20); ret = seix006_sensor_init(); if (ret) { CDBG("seix006_open sensor_init failed\n"); goto open_done; } mdelay(5); /* STANDBY (GPIO0 = High) */ ret = seix006_resource_enable(&seix006_ctrl->sensordata->standby); mdelay(10); if (ret) { CDBG("seix006_sensor_open STANDBY failed\n"); goto open_done; } open_done: if (ret) { seix006_sensor_off(); ret = -EFAULT; } else { seix006_ctrl->opened = 1; } up(&seix006_sem); CDBG("seix006_open [E]\n"); return ret; #endif /* USE_ZEUS_POWER_MANAGEMENT */ } /** * Probe Processing. * */ static int seix006_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id) { int ret = 0; CDBG("seix006_i2c_probe [S]\n"); if(!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { CDBG("seix006_probe i2c_check_functionality failed\n"); kfree(seix006_ctrl->sensorw); seix006_ctrl->sensorw = NULL; return -ENOTSUPP; } seix006_ctrl->sensorw = kzalloc(sizeof(struct seix006_work), GFP_KERNEL); if(NULL == seix006_ctrl->sensorw) { CDBG("seix006_probe sensorw failed\n"); kfree(seix006_ctrl->sensorw); seix006_ctrl->sensorw = NULL; return -ENOMEM; } i2c_set_clientdata(client, seix006_ctrl->sensorw); seix006_ctrl->client = client; CDBG("seix006_seix006_ctrl->client->addr %d\n", seix006_ctrl->client->addr); /* Initialize the MSM_CAMI2C Chip */ init_waitqueue_head(&seix006_wait_queue); CDBG("seix006_probe [E] ret[%d]\n", ret); return ret; } /** * Remove Processing. * */ static int __exit seix006_i2c_remove(struct i2c_client *client) { struct seix006_work_t *sensorw = i2c_get_clientdata(client); CDBG("seix006_remove [S]\n"); free_irq(client->irq, sensorw); seix006_ctrl->client = NULL; kfree(sensorw); CDBG("seix006_remove [E]\n"); return 0; } /** * Single or Sequential Write to Random Location. * */ static int32_t seix006_i2c_write(uint16_t address, uint8_t data_length, const uint8_t * data) { uint8_t i2c_package[SEIX006_I2C_MAX_BYTES + SEIX006_I2C_WRITE_FOOTPRINT]; uint8_t package_length = 0; if (data_length > SEIX006_I2C_MAX_BYTES) { CDBG("seix006_i2c_write length[%d] failed\n", data_length); return -EFAULT; } /* Add 2 byte Register Address in switched endian */ seix006_add_bytes_in_switched_endian(i2c_package, 0, SEIX006_I2C_WRITE_FOOTPRINT, (uint8_t *) & address); if (data != NULL) { memcpy(i2c_package + SEIX006_I2C_WRITE_FOOTPRINT, data, data_length); /* Add Data */ } package_length = SEIX006_I2C_WRITE_FOOTPRINT + data_length; if (i2c_master_send(seix006_ctrl->client, i2c_package, package_length) != package_length) { CDBG("seix006_i2c_write i2c_master_send failed\n"); return -EIO; } return 0; } /** * Single or Sequential Read to Random Location. * */ static int32_t seix006_i2c_read(uint16_t address, uint8_t length, uint8_t * data) { int32_t ret = 0; CDBG("seix006_i2c_read [S]\n"); if (!data) { CDBG("seix006_i2c_read *data failed\n"); return -EFAULT; } ret = seix006_i2c_write(address, 0, NULL); if (ret < 0) { CDBG("seix006_i2c_read i2c_write failed\n"); return ret; } if (i2c_master_recv(seix006_ctrl->client, data, length) < 0) { CDBG("seix006_i2c_read i2c_master_recv failed\n"); return -EIO; } CDBG("seix006_i2c_read [E]\n"); return 0; } static int32_t seix006_send_reg_table(const struct reg_entry *table, uint32_t tables_to_send) { int32_t ret = 0; uint32_t table_index = 0; uint32_t data_index = 0; uint32_t j; uint8_t data_buffer[SEIX006_I2C_MAX_BYTES] = {0}; uint32_t grouped_data = 0; /* The amount of continued data to group with one i2c write */ uint8_t grouped_tables = 0; CDBG("seix006_send_reg_table [tables: %d] [S]\n", tables_to_send); if(!table || tables_to_send == 0) { CDBG("seix006_send_reg_table *table tables_to_send[%d] failed\n", tables_to_send); return -EFAULT; } /* Will loop thru entire table and send all data, if possible data will be grouped */ while(table_index < tables_to_send) { grouped_data = 0; grouped_tables = 0; switch(table[table_index].reg_bits) { case REG_BITS_8: { /* Gather the amount of data we can send in one i2c stream, if next address is one incremented the i2c write will continue to write at next address automaticly when sending more data then one register can hold. Only group 8 bits registers since we can't be sure that when writing 32 bits register that is is really 32 bits or 4 grouped 8 bits.*/ while( ((table_index + grouped_tables + 1) < tables_to_send) && /* Only if there are more tables to be found */ (grouped_data < (SEIX006_I2C_MAX_BYTES - 1)) && /* Make sure we only send MAX allowed bytes */ ((table[table_index + grouped_tables + 1].address) == /* Only if next tables address is */ (table[table_index + grouped_tables].address + 1)) && /* one incremented address as current */ (table[table_index + grouped_tables + 1].reg_bits == REG_BITS_8) /* Only if next table is the same amount of bit holder */ ) { grouped_data++; grouped_tables++; } /* Load all tables, default to one */ for(j = 0; j < grouped_tables + 1; j++) data_buffer[j] = table[table_index + j].data & LOW_LOW_BIT_32; } break; case REG_BITS_16: while( ((table_index + grouped_tables + 1) < tables_to_send) && /* Only if there are more tables to be found */ (grouped_data < (SEIX006_I2C_MAX_BYTES - 2)) && /* Make sure we only send MAX allowed bytes */ ((table[table_index + grouped_tables + 1].address) == /* Only if next tables address is */ (table[table_index + grouped_tables].address + 2)) && /* one incremented address as current */ (table[table_index + grouped_tables + 1].reg_bits == REG_BITS_16) /* Only if next table is the same amount of bit holder */ ) { grouped_data +=2; grouped_tables++; } /* Load all tables, default to one */ for (j = 0, data_index = 0; j < grouped_tables + 1; j++, data_index += 2) { data_buffer[data_index] = (table[table_index + j].data & LOW_HIGH_BIT_32) >> SHIFT_8; data_buffer[data_index + 1] = (table[table_index + j].data & LOW_LOW_BIT_32); } grouped_data++; /* To hold the "extra" byte compared to REG_BITS_8 */ break; case REG_BITS_32: while( ((table_index + grouped_tables + 1) < tables_to_send) && /* Only if there are more tables to be found */ (grouped_data < (SEIX006_I2C_MAX_BYTES - 4)) && /* Make sure we only send MAX allowed bytes */ ((table[table_index + grouped_tables + 1].address) == /* Only if next tables address is */ (table[table_index + grouped_tables].address + 4)) && /* one incremented address as current */ (table[table_index + grouped_tables + 1].reg_bits == REG_BITS_32) /* Only if next table is the same amount of bit holder */ ) { grouped_data += 4; grouped_tables++; } /* Load all tables, default to one*/ for(j = 0, data_index = 0; j < grouped_tables + 1; j++, data_index += 4) { data_buffer[data_index] = (table[table_index + j].data & HIGH_HIGH_BIT_32) >> SHIFT_24; data_buffer[data_index + 1] = (table[table_index + j].data & HIGH_LOW_BIT_32) >> SHIFT_16; data_buffer[data_index + 2] = (table[table_index + j].data & LOW_HIGH_BIT_32) >> SHIFT_8; data_buffer[data_index + 3] = (table[table_index + j].data & LOW_LOW_BIT_32); } grouped_data += 3; /* To hold the "three extra" bytes compared to REG_BITS_8 */ break; default: CDBG("seix006_send_reg_table wrong reg_bits\n"); break; } ret = seix006_i2c_write(table[table_index].address, grouped_data + 1, &data_buffer[0]); if(ret) { CDBG("seix006_send_reg_table i2c_write failed\n"); break; } mdelay(4); table_index += grouped_tables + 1; } CDBG("seix006_send_reg_table [E] ret[%d]\n", ret); return ret; } static int32_t seix006_check_msts(uint8_t value, uint32_t timeout) { char data; uint32_t nRetry = 0; int32_t ret = 0; while (nRetry < timeout) { ret = seix006_i2c_read(SEIX006_USERCTRL_0010, BYTE_1, &data); if (ret) { CDBG("seix006_check_msts: i2c_read failed\n"); return ret; } CDBG("seix006_check_msts: %X\n", data); if ( data == value ) { CDBG("seix006_check_msts match %X\n", value); return 0; } mdelay(SEIX006_POLLING_PERIOD); nRetry++; } CDBG("seix006_check_msts: timeout \n"); return 1; } static int32_t seix006_refresh_monitor(uint32_t timeout) { char data; uint32_t nRetry = 0; int32_t ret = 0; /* tell sensor to restart monitor mode after settings has been changed */ ret = seix006_send_reg_table(seix006_MONI_REFRESH_F, sizeof_seix006_MONI_REFRESH_F/sizeof(struct reg_entry)); if (ret) { CDBG("seix006_refresh_monitor: i2c_write failed\n"); return ret; } /* 0x0012 returns to 0 after the sensor has finished the restart */ while (nRetry < timeout) { ret = seix006_i2c_read(0x0012, BYTE_1, (int8_t *) &data); if (ret) { CDBG("seix006_refresh_monitor: i2c_read failed\n"); return ret; } if (data == 0x0) { CDBG("seix006_refresh_monitor done\n"); return 0; } mdelay(SEIX006_POLLING_PERIOD); nRetry++; } CDBG("seix006_refresh_monitor: timeout \n"); return 1; } static int32_t seix006_check_bsts(uint8_t value, uint32_t timeout) { char data; uint32_t nRetry = 0; int32_t ret = 0; while (nRetry < timeout) { ret = seix006_i2c_read(SEIX006_USERCTRL_0004, BYTE_1, &data); if (ret) { CDBG("seix006_check_bsts: i2c_read failed\n"); return ret; } CDBG("seix006_check_bsts: %X\n", data); if (data == value) { CDBG("seix006_check_bsts match %X\n", value); return 0; } mdelay(SEIX006_POLLING_PERIOD); nRetry++; } CDBG("seix006_check_bsts: timeout \n"); return 1; } static int32_t seix006_check_afsts(uint8_t value, uint32_t timeout) { char data; uint32_t nRetry = 0; int32_t ret = 0; while (nRetry < timeout) { ret = seix006_i2c_read(SEIX006_SOUT_6D76, BYTE_1, &data); if (ret) { CDBG("seix006_check_afsts: i2c_read failed\n"); return ret; } CDBG("seix006_check_afsts: %X\n", data); if (data == value) { CDBG("seix006_check_afsts match %X\n", value); return 0; } mdelay(SEIX006_POLLING_PERIOD); nRetry++; } CDBG("seix006_check_afsts: timeout \n"); return 1; } static int autoflash_enable(int onoff) { const uint16_t cmds[3][2] = { {0x027D, 0x05}, {0x028C, 0x01}, {0x0097, 0x02} }; uint8_t zero; int nret, i; nret = -1; zero = 0; for (i = 0; i < 3; i++) { if (onoff) { /* enable */ nret = seix006_i2c_write(cmds[i][0], BYTE_1, (uint8_t *) & (cmds[i][1])); } else { /* disable - all are 0 */ nret = seix006_i2c_write(cmds[i][0], BYTE_1, &zero); } /* break if ioctl fails */ if (nret) { break; } } return nret; } static int autoflash_adjust() { static const uint16_t CMDS[3][2] = { {0x0282, 0xffff}, {0x445c, 0xffff}, {0x445e, 0xffff} }; uint8_t reg_x2AA; uint16_t reg_x28A; int16_t reg_x286; uint16_t reg_x26E; uint16_t reg_x270; uint16_t reg_6C26; uint16_t reg_6C28; int nret, AEO, SR, SB; nret = seix006_i2c_read(0x02AA, BYTE_1, &reg_x2AA); if (nret) { CDBG("autoflash_adjust: Failed reading I2C reg 0x02AA: %d\n", nret); return nret; } if (reg_x2AA != 0) { CDBG("autoflash_adjust: Camera is still busy, reg. 0x02AA=%d\n", reg_x2AA); return 0; } reg_x28A = 0; reg_x286 = 0; reg_x26E = 0; reg_x270 = 0; reg_6C26 = 0; reg_6C28 = 0; AEO = 0; SR = 0; SB = 0; CDBG("autoflash_adjust: Camera finished AE adjust, reg. 0x02AA=%d\n", reg_x2AA); if ((nret = seix006_i2c_read(0x28A, BYTE_2, (uint8_t *) & reg_x28A)) || (nret = seix006_i2c_read(0x286, BYTE_2, (uint8_t *) & reg_x286)) || (nret = seix006_i2c_read(0x26E, BYTE_2, (uint8_t *) & reg_x26E)) || (nret = seix006_i2c_read(0x270, BYTE_2, (uint8_t *) & reg_x270)) || (nret = seix006_i2c_read(0x6C26, BYTE_2, (uint8_t *) & reg_6C26)) || (nret = seix006_i2c_read(0x6C28, BYTE_2, (uint8_t *) & reg_6C28))) { CDBG("autoflash_adjust: Failed reading camera I2C register: %d", nret); return nret; } CDBG(".. set autoflash_poll_reg_x2AA to FALSE!\n"); seix006_ctrl->autoflash_poll_reg_x2AA = FALSE; memcpy(&(seix006_ctrl->autoflash_cmds[0][0]), CMDS, sizeof(CMDS)); { int A, B, C, D, E, F, CR, CB, R1, R2, KL; int K, RF, RM; A = seix006_ctrl->autoflash_reg_x288 + seix006_ctrl->autoflash_reg_x284; C = seix006_ctrl->autoflash_reg_x26A; D = seix006_ctrl->autoflash_reg_x26C; B = reg_x28A + reg_x286; E = reg_x26E; F = reg_x270; CR = reg_6C26; CB = reg_6C28; R1 = 4000; R2 = 10000; KL = 500; if ((B - A) >= 5000) { AEO = -2317 - reg_x286; } else { AEO = -AEO_table[(B - A) / 10] - reg_x286; } RF = (1000 * (B - A)) / A; RM = ((373 * RF * RF) - (4137 * RF)) / 1000 + 224; if (RM < R1) { K = 1000; } if (RM > R1 && RM < R2) { K = 1000 - (((RM - R1) * (1000 - KL)) / (R2 - R1)); } if (RM > R2) { K = KL; } SR = ((K * (1172 - ((C * 334) / 1000))) / 1000) - 200; SB = 1788 - ((D * 345) / 1000); } /* these are stored in device header structure, and reused in .._raw_snapshot_config */ seix006_ctrl->autoflash_cmds[0][1] = AEO; seix006_ctrl->autoflash_cmds[1][1] = SR; seix006_ctrl->autoflash_cmds[2][1] = SB; return 0; } static int autoflash_strobe(int onoff) { int nret; uint8_t tmp; tmp = (onoff ? 0x09 : 0x08); nret = seix006_i2c_write(0x069, BYTE_1, &tmp); return nret; } static int __seix006_probe(struct platform_device *pdev) { printk(KERN_INFO "%s \n", __func__); return msm_camera_drv_start(pdev, seix006_camera_probe); } static struct platform_driver msm_camera_driver = { .probe = __seix006_probe, .driver = { .name = SEIX006_MSM_CAMERA_NAME, .owner = THIS_MODULE, }, }; static int __init seix006_init(void) { printk(KERN_INFO "%s \n", __func__); return platform_driver_register(&msm_camera_driver); } module_init(seix006_init); static int init_thread(void* data) { int32_t ret = 0; CDBG("Camera init thread started\n"); down(&seix006_sem); if ( seix006_ctrl->vendor_id == VENDOR_ID_0 ) { if (seix006_ctrl->camera_revision == SEIX006_CAM_REV_ES1) { ret = seix006_set_regs(seix006_vendor_0_period_2_ES1); if(ret) { CDBG("seix006_set_regs seix006_KM0_period_2_ES1 failed\n"); } ret = seix006_set_regs(seix006_vendor_0_period_3_ES1); if(ret) { CDBG("seix006_set_regs seix006_vendor_0_period_3_ES1 failed\n"); } } else if (seix006_ctrl->camera_revision == SEIX006_CAM_REV_ES2) { if (seix006_ctrl->calibration_data.shd_index == 1) { ret = seix006_set_regs(seix006_vendor_0_SHD_1_ES2); } if(ret) { CDBG("seix006_set_regs seix006_vendor_0_SHD_1_ES2 failed\n"); } if (seix006_ctrl->calibration_data.shd_index == 2) { ret = seix006_set_regs(seix006_vendor_0_SHD_2_ES2); } if(ret) { CDBG("seix006_set_regs seix006_vendor_0_SHD_2_ES2 failed\n"); } if (seix006_ctrl->calibration_data.shd_index == 3) { ret = seix006_set_regs(seix006_vendor_0_SHD_3_ES2); } if(ret) { CDBG("seix006_set_regs seix006_vendor_0_SHD_3_ES2 failed\n"); } ret = seix006_set_regs(seix006_vendor_0_period_2_ES2); if(ret) { CDBG("seix006_set_regs seix006_vendor_0_period_2_ES2 failed\n"); } ret = seix006_set_regs(seix006_vendor_0_period_3_ES2); if(ret) { CDBG("seix006_set_regs seix006_vendor_0_period_3_ES2 failed\n"); } CDBG("seix006_send_reg_table seix006_vendor_0_period_3_ES2 GOT IT\n"); } } else if (seix006_ctrl->vendor_id == VENDOR_ID_1) { if ( seix006_ctrl->camera_revision == SEIX006_CAM_REV_ES1) { ret = seix006_set_regs(seix006_vendor_1_period_2_ES1); if(ret) { CDBG("seix006_set_regs seix006_vendor_1_period_2_ES1 failed\n"); } ret = seix006_set_regs(seix006_vendor_1_period_3_ES1); if(ret) { CDBG("seix006_set_regs seix006_vendor_1_period_3_ES1 failed\n"); } } else if (seix006_ctrl->camera_revision == SEIX006_CAM_REV_ES2) { if (seix006_ctrl->calibration_data.shd_index == 1) { ret = seix006_set_regs(seix006_vendor_1_SHD_1_ES2); } if(ret) { CDBG("seix006_set_regs seix006_vendor_1_SHD_1_ES2 failed\n"); } if (seix006_ctrl->calibration_data.shd_index == 2) { ret = seix006_set_regs(seix006_vendor_1_SHD_2_ES2); } if(ret) { CDBG("seix006_set_regs seix006_vendor_1_SHD_2_ES2 failed\n"); } if (seix006_ctrl->calibration_data.shd_index == 3) { ret = seix006_set_regs(seix006_vendor_1_SHD_3_ES2); } if(ret) { CDBG("seix006_set_regs seix006_vendor_1_SHD_3_ES2 failed\n"); } ret = seix006_set_regs(seix006_vendor_1_period_2_ES2); if(ret) { CDBG("seix006_set_regs seix006_vendor_1_period_2_ES2 failed\n"); } ret = seix006_set_regs(seix006_vendor_1_period_3_ES2); if(ret) { CDBG("seix006_set_regs seix006_vendor_1_period_3_ES2 failed\n"); } } } ret = seix006_update_scene(seix006_ctrl->scene); seix006_ctrl->init_complete = 1; up(&seix006_sem); CDBG("Camera init thread end"); return ret; } static int32_t seix006_raw_rgb_stream_config() { int32_t ret; /* config the rgb stream regitser */ uint8_t data = 0x04; /* RGB mode */ CDBG("seix006_raw_rgb_stream_config [S]\n"); seix006_i2c_write(0x001E, BYTE_1, &data); ret = seix006_send_reg_table(seix006_mode_movie, sizeof_seix006_mode_movie/sizeof(struct reg_entry)); if(ret) { CDBG("seix006_raw_rgb_stream_config failed\n"); return ret; } /* wait MODESEL_FIX to 3 */ ret = seix006_check_msts(SEIX006_MSTS_MOV_VAL, SEIX006_POLLING_TIMES); if(ret) { CDBG("seix006_raw_rgb_stream_config " "seix006_check_msts failed\n"); /* Continue silently */ } seix006_ctrl->dev_mode = CAMERA_MODE_MOVIE; CDBG("seix006_raw_rgb_stream_config [E] ret[%d]\n", ret); return ret; } static int32_t seix006_movie_config() { int32_t ret = 0; CDBG("seix006_movie_config [S]\n"); if (seix006_ctrl->dev_mode == CAMERA_MODE_MOVIE) { CDBG("seix006_movie_config Already in movie mode. Do nothing...\n"); goto movie_done; } ret = seix006_send_reg_table(seix006_mode_movie, sizeof_seix006_mode_movie/sizeof(struct reg_entry)); if(ret) { CDBG("seix006_movie_config failed\n"); return ret; } /* wait MODESEL_FIX to 3 */ ret = seix006_check_msts(SEIX006_MSTS_MOV_VAL, SEIX006_POLLING_TIMES); if(ret) { CDBG("seix006_movie_config seix006_check_msts failed\n"); /* Continue silently */ } seix006_ctrl->dev_mode = CAMERA_MODE_MOVIE; movie_done: if (!seix006_ctrl->init_complete) { kthread_run(init_thread, NULL, "sensor_init"); } CDBG("seix006_movie_config [E] ret[%d]\n",ret); return ret; } static long seix006_set_exposure_compensation(int8_t value) { long rc = 0; uint8_t register_value = 0; CDBG("seix006_set_exposure_compensation [S] value=%d\n",value); if(value < -2 || value > 2) { CDBG("seix006_set_exposure_compensation %d is an invalid value, has to be between -2 to +2 \n",value); return -EINVAL; } register_value = value * 3; rc = seix006_i2c_write(0x0080, BYTE_1, &register_value); CDBG("seix006_set_exposure_compensation [E] ret[%ld]\n",rc); return rc; } static long seix006_set_brightness(uint8_t value) { long rc = 0; CDBG("seix006_set_brightness [S] %d\n",value); rc = seix006_i2c_write(0x0060, BYTE_1, &value); CDBG("seix006_set_brightness [E] ret[%ld]\n",rc); return rc; } static long seix006_set_img_quality(uint8_t value) { long rc = 0; CDBG("seix006_set_img_quality [S] %d\n",value); switch(value) { case 0: CDBG("seix006_set_img_quality STANDARD\n"); break; case 1: CDBG("seix006_set_img_quality FINE\n"); break; case 2: CDBG("seix006_set_img_quality SUPER FINE\n"); break; default: CDBG("seix006_set_img_quality Incorrect value\n"); rc = -EINVAL; } if(rc == 0) { rc = seix006_i2c_write(0x204, BYTE_1, &value); } CDBG("seix006_set_img_quality [E] ret[%ld]\n",rc); return rc; } static long seix006_set_contrast(uint8_t value) { long rc = 0; CDBG("seix006_set_contrast [S] %d\n",value); rc = seix006_i2c_write(0x0061, BYTE_1, &value); CDBG("seix006_set_contrast [E] ret[%ld]\n",rc); return rc; } static long seix006_set_sharpness(uint8_t value) { long rc = 0; CDBG("seix006_set_sharpness [S] %d\n",value); rc = seix006_i2c_write(0x0062, BYTE_1, &value); CDBG("seix006_set_sharpness [E] ret[%ld]\n",rc); return rc; } static long seix006_set_wb(int wb_type) { uint8_t data = 0; long rc = 0; CDBG("seix006_set_wb [S] wb_type %d\n",wb_type); switch (wb_type) { case CAMERA_WBTYPE_AUTO: data = 0x20; break; case CAMERA_WBTYPE_INCANDESCENT: data = 0x28; break; case CAMERA_WBTYPE_FLUORESCENT: data = 0x27; break; case CAMERA_WBTYPE_DAYLIGHT: data = 0x24; break; case CAMERA_WBTYPE_CLOUDY_DAYLIGHT: data = 0x26; break; default: data = 0x20; } rc = seix006_i2c_write(0x0102, BYTE_1, &data); if(rc) { CDBG("seix006_set_wb [X] Failed 0x0102 data %d ret[%ld]\n",data, rc); return rc; } rc = seix006_i2c_write(0x0107, BYTE_1, &data); if(rc) { CDBG("seix006_set_wb [X] Failed 0x0107 data %d ret[%ld]\n",data, rc); return rc; } rc = seix006_i2c_write(0x010C, BYTE_1, &data); if(rc) { CDBG("seix006_set_wb [X] Failed 0x010C data %d ret[%ld]\n",data, rc); return rc; } rc = seix006_i2c_write(0x0111, BYTE_1, &data); if(rc) { CDBG("seix006_set_wb [X] Failed 0x0111 data %d ret[%ld]\n",data, rc); return rc; } rc = seix006_i2c_write(0x0116, BYTE_1, &data); if(rc) { CDBG("seix006_set_wb [X] Failed 0x0116 data %d ret[%ld]\n",data, rc); return rc; } rc = seix006_i2c_write(0x011B, BYTE_1, &data); if(rc) { CDBG("seix006_set_wb [X] Failed 0x011B data %d ret[%ld]\n",data, rc); return rc; } rc = seix006_i2c_write(0x0120, BYTE_1, &data); if(rc) { CDBG("seix006_set_wb [X] Failed 0x0120 data %d ret[%ld]\n",data, rc); return rc; } rc = seix006_i2c_write(0x0125, BYTE_1, &data); if(rc) { CDBG("seix006_set_wb [X] Failed 0x0125 data %d ret[%ld]\n",data, rc); return rc; } return rc; } static long seix006_set_exposure_mode(int exp_mode) { uint8_t data = 0; uint8_t mode = 0; long rc = 0; CDBG("seix006_set_exposure_mode [S] exp_mode %d\n",exp_mode); switch (exp_mode) { case CAMERA_AUTO_EXPOSURE_FRAME_AVG: mode = 0x0; break; case CAMERA_AUTO_EXPOSURE_CENTER_WEIGHTED: mode = 0x1; break; case CAMERA_AUTO_EXPOSURE_SPOT_METERING: mode = 0x2; break; default: return rc; } data = 0x40 + mode; rc = seix006_i2c_write(0x0104, BYTE_1, &data); if(rc) { CDBG("seix006_set_wb [X] Failed 0x0102 data %d ret[%ld]\n",data, rc); return rc; } data = 0x4C + mode; rc = seix006_i2c_write(0x0109, BYTE_1, &data); if(rc) { CDBG("seix006_set_wb [X] Failed 0x0107 data %d ret[%ld]\n",data, rc); return rc; } data = 0x40 + mode; rc = seix006_i2c_write(0x010E, BYTE_1, &data); if(rc) { CDBG("seix006_set_wb [X] Failed 0x010C data %d ret[%ld]\n",data, rc); return rc; } data = 0x40 + mode; rc = seix006_i2c_write(0x0113, BYTE_1, &data); if(rc) { CDBG("seix006_set_wb [X] Failed 0x0111 data %d ret[%ld]\n",data, rc); return rc; } data = mode; rc = seix006_i2c_write(0x0118, BYTE_1, &data); if(rc) { CDBG("seix006_set_wb [X] Failed 0x0116 data %d ret[%ld]\n",data, rc); return rc; } data = 0xCC + mode; rc = seix006_i2c_write(0x011D, BYTE_1, &data); if(rc) { CDBG("seix006_set_wb [X] Failed 0x011B data %d ret[%ld]\n",data, rc); return rc; } data = mode; rc = seix006_i2c_write(0x0122, BYTE_1, &data); if(rc) { CDBG("seix006_set_wb [X] Failed 0x0120 data %d ret[%ld]\n",data, rc); return rc; } data = 0xC0 + mode; rc = seix006_i2c_write(0x0127, BYTE_1, &data); if(rc) { CDBG("seix006_set_wb [X] Failed 0x0125 data %d ret[%ld]\n",data, rc); return rc; } return rc; } static long seix006_set_effect(int mode, int effect) { uint8_t data = 0; long rc = 0; CDBG("seix006_set_effect [S] effect %d\n",effect); switch (effect) { case CAMERA_EFFECT_OFF: data = 0x0; break; case CAMERA_EFFECT_MONO: data = 0x4; break; case CAMERA_EFFECT_NEGATIVE: data = 0x2; break; case CAMERA_EFFECT_SOLARIZE: data = 0x1; break; case CAMERA_EFFECT_SEPIA: data = 0x3; break; default: data = 0x0; } rc = seix006_i2c_write(0x005F, BYTE_1, &data); CDBG("seix006_set_effect [E] data %d ret[%ld]\n",data, rc); return rc; } static long seix006_set_flash(uint8_t flash_mode) { long rc = 0; CDBG("seix006_set_flash [S] data %d ret[%ld]\n", flash_mode, rc); camera_flash = flash_mode; return rc; } static long setix006_set_led_state(uint8_t led_state) { long rc = 0; CDBG("setix006_set_led_state [S] led state %d\n", led_state); if (seix006_ctrl->sensordata->flash_data->flash_src == NULL) return -ENODEV; rc = msm_camera_flash_set_led_state(seix006_ctrl->sensordata->flash_data, MSM_CAMERA_LED_OFF); if (rc) pr_err("setix006_set_led_state ret[%ld]\n", rc); rc = msm_camera_flash_set_led_state(seix006_ctrl->sensordata->flash_data, led_state); CDBG("setix006_set_led_state [E] led state %d ret[%ld]\n", led_state, rc); return rc; }
gpl-2.0
Declipe/TrinityCore
src/server/scripts/Kalimdor/Maraudon/boss_noxxion.cpp
12
4290
/* * This file is part of the TrinityCore Project. See AUTHORS file for Copyright information * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program. If not, see <http://www.gnu.org/licenses/>. */ /* ScriptData SDName: Boss_Noxxion SD%Complete: 100 SDComment: SDCategory: Maraudon EndScriptData */ #include "ScriptMgr.h" #include "maraudon.h" #include "ScriptedCreature.h" enum Spells { SPELL_TOXICVOLLEY = 21687, SPELL_UPPERCUT = 22916 }; class boss_noxxion : public CreatureScript { public: boss_noxxion() : CreatureScript("boss_noxxion") { } CreatureAI* GetAI(Creature* creature) const override { return GetMaraudonAI<boss_noxxionAI>(creature); } struct boss_noxxionAI : public ScriptedAI { boss_noxxionAI(Creature* creature) : ScriptedAI(creature) { Initialize(); } void Initialize() { ToxicVolleyTimer = 7000; UppercutTimer = 16000; AddsTimer = 19000; InvisibleTimer = 15000; //Too much too low? Invisible = false; } uint32 ToxicVolleyTimer; uint32 UppercutTimer; uint32 AddsTimer; uint32 InvisibleTimer; bool Invisible; void Reset() override { Initialize(); } void JustEngagedWith(Unit* /*who*/) override { } void SummonAdds(Unit* victim) { if (Creature* Add = DoSpawnCreature(13456, float(irand(-7, 7)), float(irand(-7, 7)), 0, 0, TEMPSUMMON_TIMED_OR_CORPSE_DESPAWN, 90s)) Add->AI()->AttackStart(victim); } void UpdateAI(uint32 diff) override { if (Invisible && InvisibleTimer <= diff) { //Become visible again me->SetFaction(FACTION_MONSTER); me->RemoveUnitFlag(UNIT_FLAG_UNINTERACTIBLE); //Noxxion model me->SetDisplayId(11172); Invisible = false; //me->m_canMove = true; } else if (Invisible) { InvisibleTimer -= diff; //Do nothing while invisible return; } //Return since we have no target if (!UpdateVictim()) return; //ToxicVolleyTimer if (ToxicVolleyTimer <= diff) { DoCastVictim(SPELL_TOXICVOLLEY); ToxicVolleyTimer = 9000; } else ToxicVolleyTimer -= diff; //UppercutTimer if (UppercutTimer <= diff) { DoCastVictim(SPELL_UPPERCUT); UppercutTimer = 12000; } else UppercutTimer -= diff; //AddsTimer if (!Invisible && AddsTimer <= diff) { //Interrupt any spell casting //me->m_canMove = true; me->InterruptNonMeleeSpells(false); me->SetFaction(FACTION_FRIENDLY); me->SetUnitFlag(UNIT_FLAG_UNINTERACTIBLE); // Invisible Model me->SetDisplayId(11686); SummonAdds(me->GetVictim()); SummonAdds(me->GetVictim()); SummonAdds(me->GetVictim()); SummonAdds(me->GetVictim()); SummonAdds(me->GetVictim()); Invisible = true; InvisibleTimer = 15000; AddsTimer = 40000; } else AddsTimer -= diff; DoMeleeAttackIfReady(); } }; }; void AddSC_boss_noxxion() { new boss_noxxion(); }
gpl-2.0
rhuitl/uClinux
user/tcsh/tw.comp.c
12
14340
/* $Header: /src/pub/tcsh/tw.comp.c,v 1.31 1998/10/25 15:10:49 christos Exp $ */ /* * tw.comp.c: File completion builtin */ /*- * Copyright (c) 1980, 1991 The Regents of the University of California. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "sh.h" RCSID("$Id: tw.comp.c,v 1.31 1998/10/25 15:10:49 christos Exp $") #include "tw.h" #include "ed.h" #include "tc.h" /* #define TDEBUG */ struct varent completions; static int tw_result __P((Char *, Char **)); static Char **tw_find __P((Char *, struct varent *, int)); static Char *tw_tok __P((Char *)); static bool tw_pos __P((Char *, int)); static void tw_pr __P((Char **)); static int tw_match __P((Char *, Char *)); static void tw_prlist __P((struct varent *)); static Char *tw_dollar __P((Char *,Char **, int, Char *, int, const char *)); /* docomplete(): * Add or list completions in the completion list */ /*ARGSUSED*/ void docomplete(v, t) Char **v; struct command *t; { register struct varent *vp; register Char *p; USE(t); v++; p = *v++; if (p == 0) tw_prlist(&completions); else if (*v == 0) { vp = adrof1(strip(p), &completions); if (vp) tw_pr(vp->vec), xputchar('\n'); } else set1(strip(p), saveblk(v), &completions, VAR_READWRITE); } /* end docomplete */ /* douncomplete(): * Remove completions from the completion list */ /*ARGSUSED*/ void douncomplete(v, t) Char **v; struct command *t; { USE(t); unset1(v, &completions); } /* end douncomplete */ /* tw_prlist(): * Pretty print a list of variables */ static void tw_prlist(p) struct varent *p; { register struct varent *c; if (setintr) #ifdef BSDSIGS (void) sigsetmask(sigblock((sigmask_t) 0) & ~sigmask(SIGINT)); #else /* BSDSIGS */ (void) sigrelse(SIGINT); #endif /* BSDSIGS */ for (;;) { while (p->v_left) p = p->v_left; x: if (p->v_parent == 0) /* is it the header? */ return; xprintf("%s\t", short2str(p->v_name)); tw_pr(p->vec); xputchar('\n'); if (p->v_right) { p = p->v_right; continue; } do { c = p; p = p->v_parent; } while (p->v_right == c); goto x; } } /* end tw_prlist */ /* tw_pr(): * Pretty print a completion, adding single quotes around * a completion argument and collapsing multiple spaces to one. */ static void tw_pr(cmp) Char **cmp; { bool sp, osp; Char *ptr; for (; *cmp; cmp++) { xputchar('\''); for (osp = 0, ptr = *cmp; *ptr; ptr++) { sp = Isspace(*ptr); if (sp && osp) continue; xputchar(*ptr); osp = sp; } xputchar('\''); if (cmp[1]) xputchar(' '); } } /* end tw_pr */ /* tw_find(): * Find the first matching completion. * For commands we only look at names that start with - */ static Char ** tw_find(nam, vp, cmd) Char *nam; register struct varent *vp; int cmd; { register Char **rv; for (vp = vp->v_left; vp; vp = vp->v_right) { if (vp->v_left && (rv = tw_find(nam, vp, cmd)) != NULL) return rv; if (cmd) { if (vp->v_name[0] != '-') continue; if (Gmatch(nam, &vp->v_name[1]) && vp->vec != NULL) return vp->vec; } else if (Gmatch(nam, vp->v_name) && vp->vec != NULL) return vp->vec; } return NULL; } /* end tw_find */ /* tw_pos(): * Return true if the position is within the specified range */ static bool tw_pos(ran, wno) Char *ran; int wno; { Char *p; if (ran[0] == '*' && ran[1] == '\0') return 1; for (p = ran; *p && *p != '-'; p++) continue; if (*p == '\0') /* range == <number> */ return wno == getn(ran); if (ran == p) /* range = - <number> */ return wno <= getn(&ran[1]); *p++ = '\0'; if (*p == '\0') /* range = <number> - */ return getn(ran) <= wno; else /* range = <number> - <number> */ return (getn(ran) <= wno) && (wno <= getn(p)); } /* end tw_pos */ /* tw_tok(): * Return the next word from string, unquoteing it. */ static Char * tw_tok(str) Char *str; { static Char *bf = NULL; if (str != NULL) bf = str; /* skip leading spaces */ for (; *bf && Isspace(*bf); bf++) continue; for (str = bf; *bf && !Isspace(*bf); bf++) { if (ismeta(*bf)) return INVPTR; *bf = *bf & ~QUOTE; } if (*bf != '\0') *bf++ = '\0'; return *str ? str : NULL; } /* end tw_tok */ /* tw_match(): * Match a string against the pattern given. * and return the number of matched characters * in a prefix of the string. */ static int tw_match(str, pat) Char *str, *pat; { Char *estr; int rv = Gnmatch(str, pat, &estr); #ifdef TDEBUG xprintf("Gnmatch(%s, ", short2str(str)); xprintf("%s, ", short2str(pat)); xprintf("%s) = %d [%d]\n", short2str(estr), rv, estr - str); #endif /* TDEBUG */ return (int) (rv ? estr - str : -1); } /* tw_result(): * Return what the completion action should be depending on the * string */ static int tw_result(act, pat) Char *act, **pat; { int looking; static Char* res = NULL; if (res != NULL) xfree((ptr_t) res), res = NULL; switch (act[0] & ~QUOTE) { case 'X': looking = TW_COMPLETION; break; case 'S': looking = TW_SIGNAL; break; case 'a': looking = TW_ALIAS; break; case 'b': looking = TW_BINDING; break; case 'c': looking = TW_COMMAND; break; case 'C': looking = TW_PATH | TW_COMMAND; break; case 'd': looking = TW_DIRECTORY; break; case 'D': looking = TW_PATH | TW_DIRECTORY; break; case 'e': looking = TW_ENVVAR; break; case 'f': looking = TW_FILE; break; #ifdef COMPAT case 'p': #endif /* COMPAT */ case 'F': looking = TW_PATH | TW_FILE; break; case 'g': looking = TW_GRPNAME; break; case 'j': looking = TW_JOB; break; case 'l': looking = TW_LIMIT; break; case 'n': looking = TW_NONE; break; case 's': looking = TW_SHELLVAR; break; case 't': looking = TW_TEXT; break; case 'T': looking = TW_PATH | TW_TEXT; break; case 'v': looking = TW_VARIABLE; break; case 'u': looking = TW_USER; break; case 'x': looking = TW_EXPLAIN; break; case '$': *pat = res = Strsave(&act[1]); (void) strip(res); return(TW_VARLIST); case '(': *pat = res = Strsave(&act[1]); if ((act = Strchr(res, ')')) != NULL) *act = '\0'; (void) strip(res); return TW_WORDLIST; case '`': res = Strsave(act); if ((act = Strchr(&res[1], '`')) != NULL) *++act = '\0'; if (didfds == 0) { /* * Make sure that we have some file descriptors to * play with, so that the processes have at least 0, 1, 2 * open */ (void) dcopy(SHIN, 0); (void) dcopy(SHOUT, 1); (void) dcopy(SHDIAG, 2); } if ((act = globone(res, G_APPEND)) != NULL) { xfree((ptr_t) res), res = NULL; *pat = res = Strsave(act); xfree((ptr_t) act); return TW_WORDLIST; } return TW_ZERO; default: stderror(ERR_COMPCOM, short2str(act)); return TW_ZERO; } switch (act[1] & ~QUOTE) { case '\0': return looking; case ':': *pat = res = Strsave(&act[2]); (void) strip(res); return looking; default: stderror(ERR_COMPCOM, short2str(act)); return TW_ZERO; } } /* end tw_result */ /* tw_dollar(): * Expand $<n> args in buffer */ static Char * tw_dollar(str, wl, nwl, buffer, sep, msg) Char *str, **wl; int nwl; Char *buffer; int sep; const char *msg; { Char *sp, *bp = buffer, *ebp = &buffer[MAXPATHLEN]; for (sp = str; *sp && *sp != sep && bp < ebp;) if (sp[0] == '$' && sp[1] == ':' && Isdigit(sp[sp[2] == '-' ? 3 : 2])) { int num, neg = 0; sp += 2; if (*sp == '-') { neg = 1; sp++; } for (num = *sp++ - '0'; Isdigit(*sp); num += 10 * num + *sp++ - '0') continue; if (neg) num = nwl - num - 1; if (num >= 0 && num < nwl) { Char *ptr; for (ptr = wl[num]; *ptr && bp < ebp - 1; *bp++ = *ptr++) continue; } } else *bp++ = *sp++; *bp = '\0'; if (*sp++ == sep) return sp; stderror(ERR_COMPMIS, sep, msg, short2str(str)); return --sp; } /* end tw_dollar */ /* tw_complete(): * Return the appropriate completion for the command * * valid completion strings are: * p/<range>/<completion>/[<suffix>/] positional * c/<pattern>/<completion>/[<suffix>/] current word ignore pattern * C/<pattern>/<completion>/[<suffix>/] current word with pattern * n/<pattern>/<completion>/[<suffix>/] next word * N/<pattern>/<completion>/[<suffix>/] next-next word */ int tw_complete(line, word, pat, looking, suf) Char *line, **word, **pat; int looking, *suf; { Char buf[MAXPATHLEN + 1], **vec, *ptr; Char *wl[MAXPATHLEN/6]; static Char nomatch[2] = { (Char) ~0, 0x00 }; int wordno, n; copyn(buf, line, MAXPATHLEN); /* find the command */ if ((wl[0] = tw_tok(buf)) == NULL || wl[0] == INVPTR) return TW_ZERO; /* * look for hardwired command completions using a globbing * search and for arguments using a normal search. */ if ((vec = tw_find(wl[0], &completions, (looking == TW_COMMAND))) == NULL) return looking; /* tokenize the line one more time :-( */ for (wordno = 1; (wl[wordno] = tw_tok(NULL)) != NULL && wl[wordno] != INVPTR; wordno++) continue; if (wl[wordno] == INVPTR) /* Found a meta character */ return TW_ZERO; /* de-activate completions */ #ifdef TDEBUG { int i; for (i = 0; i < wordno; i++) xprintf("'%s' ", short2str(wl[i])); xprintf("\n"); } #endif /* TDEBUG */ /* if the current word is empty move the last word to the next */ if (**word == '\0') { wl[wordno] = *word; wordno++; } wl[wordno] = NULL; #ifdef TDEBUG xprintf("\r\n"); xprintf(" w#: %d\n", wordno); xprintf("line: %s\n", short2str(line)); xprintf(" cmd: %s\n", short2str(wl[0])); xprintf("word: %s\n", short2str(*word)); xprintf("last: %s\n", wordno - 2 >= 0 ? short2str(wl[wordno-2]) : "n/a"); xprintf("this: %s\n", wordno - 1 >= 0 ? short2str(wl[wordno-1]) : "n/a"); #endif /* TDEBUG */ for (;vec != NULL && (ptr = vec[0]) != NULL; vec++) { Char ran[MAXPATHLEN+1],/* The pattern or range X/<range>/XXXX/ */ com[MAXPATHLEN+1],/* The completion X/XXXXX/<completion>/ */ *pos = NULL; /* scratch pointer */ int cmd, sep; /* the command and separator characters */ if (ptr[0] == '\0') continue; #ifdef TDEBUG xprintf("match %s\n", short2str(ptr)); #endif /* TDEBUG */ switch (cmd = ptr[0]) { case 'N': pos = (wordno - 3 < 0) ? nomatch : wl[wordno - 3]; break; case 'n': pos = (wordno - 2 < 0) ? nomatch : wl[wordno - 2]; break; case 'c': case 'C': pos = (wordno - 1 < 0) ? nomatch : wl[wordno - 1]; break; case 'p': break; default: stderror(ERR_COMPINV, CGETS(27, 1, "command"), cmd); return TW_ZERO; } sep = ptr[1]; if (!Ispunct(sep)) { stderror(ERR_COMPINV, CGETS(27, 2, "separator"), sep); return TW_ZERO; } ptr = tw_dollar(&ptr[2], wl, wordno, ran, sep, CGETS(27, 3, "pattern")); if (ran[0] == '\0') /* check for empty pattern (disallowed) */ { stderror(ERR_COMPINC, cmd == 'p' ? CGETS(27, 4, "range") : CGETS(27, 3, "pattern"), ""); return TW_ZERO; } ptr = tw_dollar(ptr, wl, wordno, com, sep, CGETS(27, 5, "completion")); if (*ptr != '\0') { if (*ptr == sep) *suf = ~0; else *suf = *ptr; } else *suf = '\0'; #ifdef TDEBUG xprintf("command: %c\nseparator: %c\n", cmd, sep); xprintf("pattern: %s\n", short2str(ran)); xprintf("completion: %s\n", short2str(com)); xprintf("suffix: "); switch (*suf) { case 0: xprintf("*auto suffix*\n"); break; case ~0: xprintf("*no suffix*\n"); break; default: xprintf("%c\n", *suf); break; } #endif /* TDEBUG */ switch (cmd) { case 'p': /* positional completion */ #ifdef TDEBUG xprintf("p: tw_pos(%s, %d) = ", short2str(ran), wordno - 1); xprintf("%d\n", tw_pos(ran, wordno - 1)); #endif /* TDEBUG */ if (!tw_pos(ran, wordno - 1)) continue; return tw_result(com, pat); case 'N': /* match with the next-next word */ case 'n': /* match with the next word */ case 'c': /* match with the current word */ case 'C': #ifdef TDEBUG xprintf("%c: ", cmd); #endif /* TDEBUG */ if ((n = tw_match(pos, ran)) < 0) continue; if (cmd == 'c') *word += n; return tw_result(com, pat); default: return TW_ZERO; /* Cannot happen */ } } *suf = '\0'; return TW_ZERO; } /* end tw_complete */
gpl-2.0
capturePointer/codelite
codelite_terminal/my_config.cpp
12
2348
#include "my_config.h" #include <wx/font.h> #include <wx/settings.h> #include <wx/stdpaths.h> #include <wx/filename.h> MyConfig::MyConfig() : wxFileConfig("", "", (wxStandardPaths::Get().GetUserDataDir() + "/codelite-terminal.ini"), "", wxCONFIG_USE_LOCAL_FILE) { wxFileName fn(wxStandardPaths::Get().GetUserDataDir(), "codelite-terminal.ini"); fn.Mkdir(wxS_DIR_DEFAULT, wxPATH_MKDIR_FULL); } MyConfig::~MyConfig() { Save(); } wxPoint MyConfig::GetTerminalPosition() const { wxPoint pt(100, 100); Read("frame_position_x", &pt.x); Read("frame_position_y", &pt.y); return pt; } wxSize MyConfig::GetTerminalSize() const { wxSize size(600, 400); Read("frame_size_width", &size.x); Read("frame_size_height", &size.y); return size; } void MyConfig::SetTerminalPosition(const wxPoint& pt) { Write("frame_position_x", pt.x); Write("frame_position_y", pt.y); } void MyConfig::SetTerminalSize(const wxSize& size) { Write("frame_size_width", size.x); Write("frame_size_height", size.y); } void MyConfig::Save() { Flush(); } wxColour MyConfig::GetBgColour() const { wxString col; if(Read("bg_colour", &col) && !col.IsEmpty()) { wxColour colour(col); return colour; } return *wxBLACK; } wxColour MyConfig::GetFgColour() const { wxString col; if(Read("fg_colour", &col) && !col.IsEmpty()) { wxColour colour(col); return colour; } return *wxWHITE; } void MyConfig::SetBgColour(const wxColour& col) { Write("bg_colour", col.GetAsString()); } void MyConfig::SetFgColour(const wxColour& col) { Write("fg_colour", col.GetAsString()); } wxFont MyConfig::GetFont() const { wxFont defaultFont = wxSystemSettings::GetFont(wxSYS_ANSI_FIXED_FONT); defaultFont.SetFamily(wxFONTFAMILY_TELETYPE); // read the attributes wxString facename; int pointSize; Read("font_facename", &facename, defaultFont.GetFaceName()); Read("font_size", &pointSize, defaultFont.GetPointSize()); wxFont f(wxFontInfo(pointSize).FaceName(facename).Family(wxFONTFAMILY_TELETYPE)); return f; } void MyConfig::SetFont(const wxFont& font) { Write("font_facename", font.GetFaceName()); Write("font_size", font.GetPointSize()); }
gpl-2.0
119/aircam-openwrt
build_dir/toolchain-arm_v5te_gcc-linaro_uClibc-0.9.32_eabi/uClibc-0.9.32/libc/inet/rpc/svc.c
12
13025
/* * Sun RPC is a product of Sun Microsystems, Inc. and is provided for * unrestricted use provided that this legend is included on all tape * media and as a part of the software program in whole or part. Users * may copy or modify Sun RPC without charge, but are not authorized * to license or distribute it to anyone else except as part of a product or * program developed by the user. * * SUN RPC IS PROVIDED AS IS WITH NO WARRANTIES OF ANY KIND INCLUDING THE * WARRANTIES OF DESIGN, MERCHANTIBILITY AND FITNESS FOR A PARTICULAR * PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE. * * Sun RPC is provided with no support and without any obligation on the * part of Sun Microsystems, Inc. to assist in its use, correction, * modification or enhancement. * * SUN MICROSYSTEMS, INC. SHALL HAVE NO LIABILITY WITH RESPECT TO THE * INFRINGEMENT OF COPYRIGHTS, TRADE SECRETS OR ANY PATENTS BY SUN RPC * OR ANY PART THEREOF. * * In no event will Sun Microsystems, Inc. be liable for any lost revenue * or profits or other special, indirect and consequential damages, even if * Sun has been advised of the possibility of such damages. * * Sun Microsystems, Inc. * 2550 Garcia Avenue * Mountain View, California 94043 */ /* * svc.c, Server-side remote procedure call interface. * * There are two sets of procedures here. The xprt routines are * for handling transport handles. The svc routines handle the * list of service routines. * * Copyright (C) 1984, Sun Microsystems, Inc. */ #define __FORCE_GLIBC #include <features.h> #include <errno.h> #include <unistd.h> #include <string.h> #include "rpc_private.h" #include <rpc/svc.h> #include <rpc/pmap_clnt.h> #include <sys/poll.h> /* used by svc_[max_]pollfd */ /* used by svc_fdset */ #ifdef __UCLIBC_HAS_THREADS__ #define xports (*(SVCXPRT ***)&RPC_THREAD_VARIABLE(svc_xports_s)) #else static SVCXPRT **xports; #endif #define NULL_SVC ((struct svc_callout *)0) #define RQCRED_SIZE 400 /* this size is excessive */ /* The services list Each entry represents a set of procedures (an rpc program). The dispatch routine takes request structs and runs the appropriate procedure. */ struct svc_callout { struct svc_callout *sc_next; rpcprog_t sc_prog; rpcvers_t sc_vers; void (*sc_dispatch) (struct svc_req *, SVCXPRT *); }; #ifdef __UCLIBC_HAS_THREADS__ #define svc_head (*(struct svc_callout **)&RPC_THREAD_VARIABLE(svc_head_s)) #else static struct svc_callout *svc_head; #endif /* *************** SVCXPRT related stuff **************** */ /* Activate a transport handle. */ void xprt_register (SVCXPRT *xprt) { register int sock = xprt->xp_sock; register int i; if (xports == NULL) { xports = (SVCXPRT **) malloc (_rpc_dtablesize () * sizeof (SVCXPRT *)); if (xports == NULL) /* Don´t add handle */ return; } if (sock < _rpc_dtablesize ()) { xports[sock] = xprt; if (sock < FD_SETSIZE) FD_SET (sock, &svc_fdset); /* Check if we have an empty slot */ for (i = 0; i < svc_max_pollfd; ++i) if (svc_pollfd[i].fd == -1) { svc_pollfd[i].fd = sock; svc_pollfd[i].events = (POLLIN | POLLPRI | POLLRDNORM | POLLRDBAND); return; } ++svc_max_pollfd; svc_pollfd = realloc (svc_pollfd, sizeof (struct pollfd) * svc_max_pollfd); if (svc_pollfd == NULL) /* Out of memory */ return; svc_pollfd[svc_max_pollfd - 1].fd = sock; svc_pollfd[svc_max_pollfd - 1].events = (POLLIN | POLLPRI | POLLRDNORM | POLLRDBAND); } } libc_hidden_def(xprt_register) /* De-activate a transport handle. */ void xprt_unregister (SVCXPRT *xprt) { register int sock = xprt->xp_sock; register int i; if ((sock < _rpc_dtablesize ()) && (xports[sock] == xprt)) { xports[sock] = (SVCXPRT *) 0; if (sock < FD_SETSIZE) FD_CLR (sock, &svc_fdset); for (i = 0; i < svc_max_pollfd; ++i) if (svc_pollfd[i].fd == sock) svc_pollfd[i].fd = -1; } } libc_hidden_def(xprt_unregister) /* ********************** CALLOUT list related stuff ************* */ /* Search the callout list for a program number, return the callout struct. */ static struct svc_callout * svc_find (rpcprog_t prog, rpcvers_t vers, struct svc_callout **prev) { register struct svc_callout *s, *p; p = NULL_SVC; for (s = svc_head; s != NULL_SVC; s = s->sc_next) { if ((s->sc_prog == prog) && (s->sc_vers == vers)) goto done; p = s; } done: *prev = p; return s; } /* Add a service program to the callout list. The dispatch routine will be called when a rpc request for this program number comes in. */ bool_t svc_register (SVCXPRT * xprt, rpcprog_t prog, rpcvers_t vers, void (*dispatch) (struct svc_req *, SVCXPRT *), rpcproc_t protocol) { struct svc_callout *prev; register struct svc_callout *s; if ((s = svc_find (prog, vers, &prev)) != NULL_SVC) { if (s->sc_dispatch == dispatch) goto pmap_it; /* he is registering another xptr */ return FALSE; } s = (struct svc_callout *) mem_alloc (sizeof (struct svc_callout)); if (s == (struct svc_callout *) 0) return FALSE; s->sc_prog = prog; s->sc_vers = vers; s->sc_dispatch = dispatch; s->sc_next = svc_head; svc_head = s; pmap_it: /* now register the information with the local binder service */ if (protocol) return pmap_set (prog, vers, protocol, xprt->xp_port); return TRUE; } libc_hidden_def(svc_register) /* Remove a service program from the callout list. */ void svc_unregister (rpcprog_t prog, rpcvers_t vers) { struct svc_callout *prev; register struct svc_callout *s; if ((s = svc_find (prog, vers, &prev)) == NULL_SVC) return; if (prev == NULL_SVC) svc_head = s->sc_next; else prev->sc_next = s->sc_next; s->sc_next = NULL_SVC; mem_free ((char *) s, (u_int) sizeof (struct svc_callout)); /* now unregister the information with the local binder service */ pmap_unset (prog, vers); } libc_hidden_def(svc_unregister) /* ******************* REPLY GENERATION ROUTINES ************ */ /* Send a reply to an rpc request */ bool_t svc_sendreply (register SVCXPRT *xprt, xdrproc_t xdr_results, caddr_t xdr_location) { struct rpc_msg rply; rply.rm_direction = REPLY; rply.rm_reply.rp_stat = MSG_ACCEPTED; rply.acpted_rply.ar_verf = xprt->xp_verf; rply.acpted_rply.ar_stat = SUCCESS; rply.acpted_rply.ar_results.where = xdr_location; rply.acpted_rply.ar_results.proc = xdr_results; return SVC_REPLY (xprt, &rply); } libc_hidden_def(svc_sendreply) /* No procedure error reply */ void svcerr_noproc (register SVCXPRT *xprt) { struct rpc_msg rply; rply.rm_direction = REPLY; rply.rm_reply.rp_stat = MSG_ACCEPTED; rply.acpted_rply.ar_verf = xprt->xp_verf; rply.acpted_rply.ar_stat = PROC_UNAVAIL; SVC_REPLY (xprt, &rply); } /* Can't decode args error reply */ void svcerr_decode (register SVCXPRT *xprt) { struct rpc_msg rply; rply.rm_direction = REPLY; rply.rm_reply.rp_stat = MSG_ACCEPTED; rply.acpted_rply.ar_verf = xprt->xp_verf; rply.acpted_rply.ar_stat = GARBAGE_ARGS; SVC_REPLY (xprt, &rply); } libc_hidden_def(svcerr_decode) /* Some system error */ void svcerr_systemerr (register SVCXPRT *xprt) { struct rpc_msg rply; rply.rm_direction = REPLY; rply.rm_reply.rp_stat = MSG_ACCEPTED; rply.acpted_rply.ar_verf = xprt->xp_verf; rply.acpted_rply.ar_stat = SYSTEM_ERR; SVC_REPLY (xprt, &rply); } /* Authentication error reply */ void svcerr_auth (SVCXPRT *xprt, enum auth_stat why) { struct rpc_msg rply; rply.rm_direction = REPLY; rply.rm_reply.rp_stat = MSG_DENIED; rply.rjcted_rply.rj_stat = AUTH_ERROR; rply.rjcted_rply.rj_why = why; SVC_REPLY (xprt, &rply); } libc_hidden_def(svcerr_auth) /* Auth too weak error reply */ void svcerr_weakauth (SVCXPRT *xprt) { svcerr_auth (xprt, AUTH_TOOWEAK); } /* Program unavailable error reply */ void svcerr_noprog (register SVCXPRT *xprt) { struct rpc_msg rply; rply.rm_direction = REPLY; rply.rm_reply.rp_stat = MSG_ACCEPTED; rply.acpted_rply.ar_verf = xprt->xp_verf; rply.acpted_rply.ar_stat = PROG_UNAVAIL; SVC_REPLY (xprt, &rply); } libc_hidden_def(svcerr_noprog) /* Program version mismatch error reply */ void svcerr_progvers (register SVCXPRT *xprt, rpcvers_t low_vers, rpcvers_t high_vers) { struct rpc_msg rply; rply.rm_direction = REPLY; rply.rm_reply.rp_stat = MSG_ACCEPTED; rply.acpted_rply.ar_verf = xprt->xp_verf; rply.acpted_rply.ar_stat = PROG_MISMATCH; rply.acpted_rply.ar_vers.low = low_vers; rply.acpted_rply.ar_vers.high = high_vers; SVC_REPLY (xprt, &rply); } libc_hidden_def(svcerr_progvers) /* ******************* SERVER INPUT STUFF ******************* */ /* * Get server side input from some transport. * * Statement of authentication parameters management: * This function owns and manages all authentication parameters, specifically * the "raw" parameters (msg.rm_call.cb_cred and msg.rm_call.cb_verf) and * the "cooked" credentials (rqst->rq_clntcred). * However, this function does not know the structure of the cooked * credentials, so it make the following assumptions: * a) the structure is contiguous (no pointers), and * b) the cred structure size does not exceed RQCRED_SIZE bytes. * In all events, all three parameters are freed upon exit from this routine. * The storage is trivially management on the call stack in user land, but * is mallocated in kernel land. */ void svc_getreq_common (const int fd) { enum xprt_stat stat; struct rpc_msg msg; register SVCXPRT *xprt; char cred_area[2 * MAX_AUTH_BYTES + RQCRED_SIZE]; msg.rm_call.cb_cred.oa_base = cred_area; msg.rm_call.cb_verf.oa_base = &(cred_area[MAX_AUTH_BYTES]); xprt = xports[fd]; /* Do we control fd? */ if (xprt == NULL) return; /* now receive msgs from xprtprt (support batch calls) */ do { if (SVC_RECV (xprt, &msg)) { /* now find the exported program and call it */ struct svc_callout *s; struct svc_req r; enum auth_stat why; rpcvers_t low_vers; rpcvers_t high_vers; int prog_found; r.rq_clntcred = &(cred_area[2 * MAX_AUTH_BYTES]); r.rq_xprt = xprt; r.rq_prog = msg.rm_call.cb_prog; r.rq_vers = msg.rm_call.cb_vers; r.rq_proc = msg.rm_call.cb_proc; r.rq_cred = msg.rm_call.cb_cred; /* first authenticate the message */ /* Check for null flavor and bypass these calls if possible */ if (msg.rm_call.cb_cred.oa_flavor == AUTH_NULL) { r.rq_xprt->xp_verf.oa_flavor = _null_auth.oa_flavor; r.rq_xprt->xp_verf.oa_length = 0; } else if ((why = _authenticate (&r, &msg)) != AUTH_OK) { svcerr_auth (xprt, why); goto call_done; } /* now match message with a registered service */ prog_found = FALSE; low_vers = 0 - 1; high_vers = 0; for (s = svc_head; s != NULL_SVC; s = s->sc_next) { if (s->sc_prog == r.rq_prog) { if (s->sc_vers == r.rq_vers) { (*s->sc_dispatch) (&r, xprt); goto call_done; } /* found correct version */ prog_found = TRUE; if (s->sc_vers < low_vers) low_vers = s->sc_vers; if (s->sc_vers > high_vers) high_vers = s->sc_vers; } /* found correct program */ } /* if we got here, the program or version is not served ... */ if (prog_found) svcerr_progvers (xprt, low_vers, high_vers); else svcerr_noprog (xprt); /* Fall through to ... */ } call_done: if ((stat = SVC_STAT (xprt)) == XPRT_DIED) { SVC_DESTROY (xprt); break; } } while (stat == XPRT_MOREREQS); } libc_hidden_def(svc_getreq_common) void svc_getreqset (fd_set *readfds) { register u_int32_t mask; register u_int32_t *maskp; register int setsize; register int sock; register int bit; setsize = _rpc_dtablesize (); maskp = (u_int32_t *) readfds->fds_bits; for (sock = 0; sock < setsize; sock += 32) for (mask = *maskp++; (bit = ffs (mask)); mask ^= (1 << (bit - 1))) svc_getreq_common (sock + bit - 1); } libc_hidden_def(svc_getreqset) void svc_getreq (int rdfds) { fd_set readfds; FD_ZERO (&readfds); readfds.fds_bits[0] = rdfds; svc_getreqset (&readfds); } libc_hidden_def(svc_getreq) void svc_getreq_poll (struct pollfd *pfdp, int pollretval) { register int i; register int fds_found; for (i = fds_found = 0; i < svc_max_pollfd && fds_found < pollretval; ++i) { register struct pollfd *p = &pfdp[i]; if (p->fd != -1 && p->revents) { /* fd has input waiting */ ++fds_found; if (p->revents & POLLNVAL) xprt_unregister (xports[p->fd]); else svc_getreq_common (p->fd); } } } libc_hidden_def(svc_getreq_poll) #ifdef __UCLIBC_HAS_THREADS__ void attribute_hidden __rpc_thread_svc_cleanup (void) { struct svc_callout *svcp; while ((svcp = svc_head) != NULL) svc_unregister (svcp->sc_prog, svcp->sc_vers); } #endif /* __UCLIBC_HAS_THREADS__ */
gpl-2.0
puuksl/tptresized
720/src/simulation/SaveRenderer.cpp
12
4070
#include "SaveRenderer.h" #include "client/GameSave.h" #include "graphics/Graphics.h" #include "Simulation.h" #include "graphics/Renderer.h" #include "gui/search/Thumbnail.h" SaveRenderer::SaveRenderer(){ g = new Graphics(); sim = new Simulation(); ren = new Renderer(g, sim); ren->decorations_enable = true; ren->blackDecorations = true; #if defined(OGLR) || defined(OGLI) glEnable(GL_TEXTURE_2D); glGenTextures(1, &fboTex); glBindTexture(GL_TEXTURE_2D, fboTex); glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, XRES, YRES, 0, GL_RGBA, GL_FLOAT, NULL); glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_NEAREST); glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_NEAREST); //FBO glGenFramebuffers(1, &fbo); glBindFramebuffer(GL_DRAW_FRAMEBUFFER, fbo); glEnable(GL_BLEND); glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, fboTex, 0); glBindTexture(GL_TEXTURE_2D, 0); glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0); // Reset framebuffer binding glDisable(GL_TEXTURE_2D); #endif } VideoBuffer * SaveRenderer::Render(GameSave * save, bool decorations, bool fire) { int width, height; VideoBuffer * tempThumb; width = save->blockWidth; height = save->blockHeight; bool doCollapse = save->Collapsed(); g->Acquire(); g->Clear(); sim->clear_sim(); if(!sim->Load(save)) { ren->decorations_enable = true; ren->blackDecorations = !decorations; #if defined(OGLR) || defined(OGLI) pixel * pData = NULL; unsigned char * texData = NULL; glTranslated(0, MENUSIZE, 0); glBindFramebuffer(GL_DRAW_FRAMEBUFFER, fbo); glClearColor(0.0f, 0.0f, 0.0f, 1.0f); glClear(GL_COLOR_BUFFER_BIT); ren->clearScreen(1.0f); ren->ClearAccumulation(); #ifdef OGLR ren->RenderBegin(); ren->RenderEnd(); #else if (fire) { int frame = 15; while(frame) { frame--; ren->render_parts(); ren->render_fire(); ren->clearScreen(1.0f); } } ren->RenderBegin(); ren->RenderEnd(); #endif glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0); glTranslated(0, -MENUSIZE, 0); glEnable( GL_TEXTURE_2D ); glBindTexture(GL_TEXTURE_2D, fboTex); pData = new pixel[XRES*YRES]; texData = new unsigned char[(XRES*YRES)*PIXELSIZE]; std::fill(texData, texData+(XRES*YRES)*PIXELSIZE, 0xDD); glGetTexImage(GL_TEXTURE_2D, 0, GL_RGBA, GL_UNSIGNED_BYTE, texData); glDisable(GL_TEXTURE_2D); for(int x = 0; x < width*CELL; x++) { for(int y = 0; y < height*CELL; y++) { unsigned char red = texData[((((YRES-1-y)*XRES)+x)*4)]; unsigned char green = texData[((((YRES-1-y)*XRES)+x)*4)+1]; unsigned char blue = texData[((((YRES-1-y)*XRES)+x)*4)+2]; pData[(y*(width*CELL))+x] = PIXRGBA(red, green, blue, 255); } } tempThumb = new VideoBuffer(pData, width*CELL, height*CELL); delete[] pData; delete[] texData; pData = NULL; #else pixel * pData = NULL; pixel * dst; pixel * src = g->vid; ren->ClearAccumulation(); if (fire) { int frame = 15; while(frame) { frame--; ren->render_parts(); ren->render_fire(); ren->clearScreen(1.0f); } } ren->RenderBegin(); ren->RenderEnd(); pData = (pixel *)malloc(PIXELSIZE * ((width*CELL)*(height*CELL))); dst = pData; for(int i = 0; i < height*CELL; i++) { memcpy(dst, src, (width*CELL)*PIXELSIZE); dst+=(width*CELL);///PIXELSIZE; src+=WINDOWW; } tempThumb = new VideoBuffer(pData, width*CELL, height*CELL); if(pData) free(pData); #endif } if(doCollapse) save->Collapse(); g->Release(); return tempThumb; } VideoBuffer * SaveRenderer::Render(unsigned char * saveData, int dataSize, bool decorations, bool fire) { GameSave * tempSave; try { tempSave = new GameSave((char*)saveData, dataSize); } catch (std::exception & e) { //Todo: make this look a little less shit VideoBuffer * buffer = new VideoBuffer(64, 64); buffer->BlendCharacter(32, 32, 'x', 255, 255, 255, 255); return buffer; } VideoBuffer * thumb = Render(tempSave, decorations, fire); delete tempSave; return thumb; } SaveRenderer::~SaveRenderer() { }
gpl-2.0
smx-smx/dsl-n55u-bender
release/src/router/udhcpd/script.c
12
9802
/* * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, * MA 02111-1307 USA */ /* script.c * * Functions to call the DHCP client notification scripts * * Russ Dill <Russ.Dill@asu.edu> July 2001 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <string.h> #include <unistd.h> #include <stdio.h> #include <stdlib.h> #include <sys/socket.h> #include <netinet/in.h> #include <arpa/inet.h> #include <sys/types.h> #include <sys/wait.h> #include <errno.h> #include "options.h" #include "dhcpd.h" #include "dhcpc.h" #include "packet.h" #include "options.h" #include "debug.h" /* get a rough idea of how long an option will be (rounding up...) */ static int max_option_length[] = { [OPTION_IP] = sizeof("255.255.255.255 "), [OPTION_IP_PAIR] = sizeof("255.255.255.255 ") * 2, [OPTION_STRING] = 1, [OPTION_BOOLEAN] = sizeof("yes "), [OPTION_U8] = sizeof("255 "), [OPTION_U16] = sizeof("65535 "), [OPTION_S16] = sizeof("-32768 "), [OPTION_U32] = sizeof("4294967295 "), [OPTION_S32] = sizeof("-2147483684 "), [OPTION_6RD] = sizeof("32 128 FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF 255.255.255.255 ") }; static int upper_length(int length, struct dhcp_option *option) { return max_option_length[option->flags & TYPE_MASK] * (length / option_lengths[option->flags & TYPE_MASK]); } static int sprintip(char *dest, char *pre, unsigned char *ip) { return sprintf(dest, "%s%d.%d.%d.%d ", pre, ip[0], ip[1], ip[2], ip[3]); } static int sprintip6(char *dest, unsigned char *ip) { int i, len = 0; for (i = 0; i < 16; i+=2) { if (i>0) dest[len++]=':'; sprintf(dest + len, "%02X%02X", ip[i], ip[i+1]); len += 4; } dest[len] = '\0'; return len; } /* Fill dest with the text of option 'option'. */ static void fill_options(char *dest, unsigned char *option, struct dhcp_option *type_p) { int type, optlen; u_int16_t val_u16; int16_t val_s16; u_int32_t val_u32; int32_t val_s32; int len = option[OPT_LEN - 2]; dest += sprintf(dest, "%s=", type_p->name); type = type_p->flags & TYPE_MASK; optlen = option_lengths[type]; for(;;) { switch (type) { case OPTION_IP_PAIR: dest += sprintip(dest, "", option); *(dest++) = '/'; option += 4; optlen = 4; case OPTION_IP: /* Works regardless of host byte order. */ dest += sprintip(dest, "", option); break; case OPTION_BOOLEAN: dest += sprintf(dest, *option ? "yes " : "no "); break; case OPTION_U8: dest += sprintf(dest, "%u ", *option); break; case OPTION_U16: memcpy(&val_u16, option, 2); dest += sprintf(dest, "%u ", ntohs(val_u16)); break; case OPTION_S16: memcpy(&val_s16, option, 2); dest += sprintf(dest, "%d ", ntohs(val_s16)); break; case OPTION_U32: memcpy(&val_u32, option, 4); dest += sprintf(dest, "%lu ", (unsigned long) ntohl(val_u32)); break; case OPTION_S32: memcpy(&val_s32, option, 4); dest += sprintf(dest, "%ld ", (long) ntohl(val_s32)); break; case OPTION_STRING: memcpy(dest, option, len); dest[len] = '\0'; return; /* Short circuit this case */ case OPTION_6RD: /* Option binary format: * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | OPTION_6RD | option-length | IPv4MaskLen | 6rdPrefixLen | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | | * | 6rdPrefix | * | (16 octets) | * | | * | | * | | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | 6rdBRIPv4Address(es) | * . . * . . * . . * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * We convert it to a string "IPv4MaskLen 6rdPrefixLen 6rdPrefix 6rdBRIPv4Address" */ /* Sanity check: ensure that our length is at least 22 bytes, that * IPv4MaskLen is <= 32, 6rdPrefixLen <= 128 and that the sum of * (32 - IPv4MaskLen) + 6rdPrefixLen is less than or equal to 128. * If any of these requirements is not fulfilled, return with empty * value. */ if ((len >= 22) && (*option <= 32) && (*(option+1) <= 128) && (((32 - *option) + *(option+1)) <= 128)) { /* IPv4MaskLen */ dest += sprintf(dest, "%u ", *option++); len--; /* 6rdPrefixLen */ dest += sprintf(dest, "%u ", *option++); len--; /* 6rdPrefix */ dest += sprintip6(dest, option); option += 16; len -= 16; /* 6rdBRIPv4Addresses */ while (len >= 4) { dest += sprintip(dest, " ", option); option += 4; len -= 4; /* the code to determine the option size fails to work with * lengths that are not a multiple of the minimum length, * adding all advertised 6rdBRIPv4Addresses here would * overflow the destination buffer, therefore skip the rest * for now */ break; } } return; // Short circuit this case } option += optlen; len -= optlen; if (len <= 0) break; } } static char *find_env(const char *prefix, char *defaultstr) { extern char **environ; char **ptr; const int len = strlen(prefix); for (ptr = environ; *ptr != NULL; ptr++) { if (strncmp(prefix, *ptr, len) == 0) return *ptr; } return defaultstr; } /* put all the paramaters into an environment */ static char **fill_envp(struct dhcpMessage *packet) { int num_options = 0; int i, j; char **envp; unsigned char *temp; char over = 0; if (packet == NULL) num_options = 0; else { for (i = 0; options[i].code; i++) if (get_option(packet, options[i].code)) num_options++; if (packet->siaddr) num_options++; if ((temp = get_option(packet, DHCP_OPTION_OVER))) over = *temp; if (!(over & FILE_FIELD) && packet->file[0]) num_options++; if (!(over & SNAME_FIELD) && packet->sname[0]) num_options++; } envp = xmalloc((num_options + 5) * sizeof(char *)); envp[0] = xmalloc(sizeof("interface=") + strlen(client_config.interface)); sprintf(envp[0], "interface=%s", client_config.interface); envp[1] = find_env("PATH", "PATH=/bin:/usr/bin:/sbin:/usr/sbin"); envp[2] = find_env("HOME", "HOME=/"); if (packet == NULL) { envp[3] = NULL; return envp; } envp[3] = xmalloc(sizeof("ip=255.255.255.255")); sprintip(envp[3], "ip=", (unsigned char *) &packet->yiaddr); for (i = 0, j = 4; options[i].code; i++) { if ((temp = get_option(packet, options[i].code))) { envp[j] = xmalloc(upper_length(temp[OPT_LEN - 2], &options[i]) + strlen(options[i].name) + 2); fill_options(envp[j], temp, &options[i]); j++; } } if (packet->siaddr) { envp[j] = xmalloc(sizeof("siaddr=255.255.255.255")); sprintip(envp[j++], "siaddr=", (unsigned char *) &packet->siaddr); } if (!(over & FILE_FIELD) && packet->file[0]) { /* watch out for invalid packets */ packet->file[sizeof(packet->file) - 1] = '\0'; envp[j] = xmalloc(sizeof("boot_file=") + strlen(packet->file)); sprintf(envp[j++], "boot_file=%s", packet->file); } if (!(over & SNAME_FIELD) && packet->sname[0]) { /* watch out for invalid packets */ packet->sname[sizeof(packet->sname) - 1] = '\0'; envp[j] = xmalloc(sizeof("sname=") + strlen(packet->sname)); sprintf(envp[j++], "sname=%s", packet->sname); } envp[j] = NULL; return envp; } /* Call a script with a par file and env vars */ void run_script(struct dhcpMessage *packet, const char *name) { int pid; char **envp; if (client_config.script == NULL) return; /* call script */ pid = fork(); if (pid) { waitpid(pid, NULL, 0); return; } else if (pid == 0) { envp = fill_envp(packet); /* close fd's? */ /* exec script */ DEBUG(LOG_INFO, "execle'ing %s", client_config.script); execle(client_config.script, client_config.script, name, NULL, envp); LOG(LOG_ERR, "script %s failed: %s", client_config.script, strerror(errno)); exit(1); } }
gpl-2.0
eldarerathis/android_kernel_msm
drivers/net/wireless/wcnss/wcnss_vreg.c
268
14957
/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/module.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/io.h> #include <linux/gpio.h> #include <linux/delay.h> #include <linux/regulator/consumer.h> #include <linux/mfd/pm8xxx/pm8921.h> #include <linux/mfd/pm8xxx/gpio.h> #include <linux/wcnss_wlan.h> #include <linux/semaphore.h> #include <linux/list.h> #include <linux/slab.h> #include <linux/clk.h> #include <mach/msm_xo.h> #include <mach/msm_iomap.h> static void __iomem *msm_wcnss_base; static LIST_HEAD(power_on_lock_list); static DEFINE_MUTEX(list_lock); static DEFINE_SEMAPHORE(wcnss_power_on_lock); static int auto_detect; #define MSM_RIVA_PHYS 0x03204000 #define MSM_PRONTO_PHYS 0xfb21b000 #define RIVA_PMU_OFFSET 0x28 #define PRONTO_PMU_OFFSET 0x1004 #define RIVA_SPARE_OFFSET 0x0b4 #define PRONTO_SPARE_OFFSET 0x1088 #define NVBIN_DLND_BIT BIT(25) #define PRONTO_IRIS_REG_READ_OFFSET 0x1134 #define PRONTO_IRIS_REG_CHIP_ID 0x04 #define WCNSS_PMU_CFG_IRIS_XO_CFG BIT(3) #define WCNSS_PMU_CFG_IRIS_XO_EN BIT(4) #define WCNSS_PMU_CFG_GC_BUS_MUX_SEL_TOP BIT(5) #define WCNSS_PMU_CFG_IRIS_XO_CFG_STS BIT(6) /* 1: in progress, 0: done */ #define WCNSS_PMU_CFG_IRIS_XO_READ BIT(9) #define WCNSS_PMU_CFG_IRIS_XO_READ_STS BIT(10) #define WCNSS_PMU_CFG_IRIS_XO_MODE 0x6 #define WCNSS_PMU_CFG_IRIS_XO_MODE_48 (3 << 1) #define VREG_NULL_CONFIG 0x0000 #define VREG_GET_REGULATOR_MASK 0x0001 #define VREG_SET_VOLTAGE_MASK 0x0002 #define VREG_OPTIMUM_MODE_MASK 0x0004 #define VREG_ENABLE_MASK 0x0008 #define WCNSS_INVALID_IRIS_REG 0xbaadbaad struct vregs_info { const char * const name; int state; const int nominal_min; const int low_power_min; const int max_voltage; const int uA_load; struct regulator *regulator; }; /* IRIS regulators for Riva hardware */ static struct vregs_info iris_vregs_riva[] = { {"iris_vddxo", VREG_NULL_CONFIG, 1800000, 0, 1800000, 10000, NULL}, {"iris_vddrfa", VREG_NULL_CONFIG, 1300000, 0, 1300000, 100000, NULL}, {"iris_vddpa", VREG_NULL_CONFIG, 2900000, 0, 3000000, 515000, NULL}, {"iris_vdddig", VREG_NULL_CONFIG, 1200000, 0, 1225000, 10000, NULL}, }; /* WCNSS regulators for Riva hardware */ static struct vregs_info riva_vregs[] = { /* Riva */ {"riva_vddmx", VREG_NULL_CONFIG, 1050000, 0, 1150000, 0, NULL}, {"riva_vddcx", VREG_NULL_CONFIG, 1050000, 0, 1150000, 0, NULL}, {"riva_vddpx", VREG_NULL_CONFIG, 1800000, 0, 1800000, 0, NULL}, }; /* IRIS regulators for Pronto hardware */ static struct vregs_info iris_vregs_pronto[] = { {"qcom,iris-vddxo", VREG_NULL_CONFIG, 1800000, 0, 1800000, 10000, NULL}, {"qcom,iris-vddrfa", VREG_NULL_CONFIG, 1300000, 0, 1300000, 100000, NULL}, {"qcom,iris-vddpa", VREG_NULL_CONFIG, 2900000, 0, 3000000, 515000, NULL}, {"qcom,iris-vdddig", VREG_NULL_CONFIG, 1225000, 0, 1300000, 10000, NULL}, }; /* WCNSS regulators for Pronto hardware */ static struct vregs_info pronto_vregs[] = { {"qcom,pronto-vddmx", VREG_NULL_CONFIG, 950000, 0, 1150000, 0, NULL}, {"qcom,pronto-vddcx", VREG_NULL_CONFIG, 900000, 0, 1150000, 0, NULL}, {"qcom,pronto-vddpx", VREG_NULL_CONFIG, 1800000, 0, 1800000, 0, NULL}, }; struct host_driver { char name[20]; struct list_head list; }; enum { WCNSS_XO_48MHZ = 1, WCNSS_XO_19MHZ, WCNSS_XO_INVALID, }; enum { IRIS_3660, /* also 3660A and 3680 */ IRIS_3620 }; int xo_auto_detect(u32 reg) { reg >>= 30; switch (reg) { case IRIS_3660: return WCNSS_XO_48MHZ; case IRIS_3620: return WCNSS_XO_19MHZ; default: return WCNSS_XO_INVALID; } } static int configure_iris_xo(struct device *dev, bool use_48mhz_xo, int on) { u32 reg = 0; u32 iris_reg = WCNSS_INVALID_IRIS_REG; int rc = 0; int size = 0; int pmu_offset = 0; int spare_offset = 0; unsigned long wcnss_phys_addr; void __iomem *pmu_conf_reg; void __iomem *spare_reg; void __iomem *iris_read_reg; struct clk *clk; struct clk *clk_rf = NULL; if (wcnss_hardware_type() == WCNSS_PRONTO_HW) { wcnss_phys_addr = MSM_PRONTO_PHYS; pmu_offset = PRONTO_PMU_OFFSET; spare_offset = PRONTO_SPARE_OFFSET; size = 0x3000; clk = clk_get(dev, "xo"); if (IS_ERR(clk)) { pr_err("Couldn't get xo clock\n"); return PTR_ERR(clk); } } else { wcnss_phys_addr = MSM_RIVA_PHYS; pmu_offset = RIVA_PMU_OFFSET; spare_offset = RIVA_SPARE_OFFSET; size = SZ_256; clk = clk_get(dev, "cxo"); if (IS_ERR(clk)) { pr_err("Couldn't get cxo clock\n"); return PTR_ERR(clk); } } if (on) { msm_wcnss_base = ioremap(wcnss_phys_addr, size); if (!msm_wcnss_base) { pr_err("ioremap wcnss physical failed\n"); goto fail; } /* Enable IRIS XO */ rc = clk_prepare_enable(clk); if (rc) { pr_err("clk enable failed\n"); goto fail; } /* NV bit is set to indicate that platform driver is capable * of doing NV download. */ pr_debug("wcnss: Indicate NV bin download\n"); spare_reg = msm_wcnss_base + spare_offset; reg = readl_relaxed(spare_reg); reg |= NVBIN_DLND_BIT; writel_relaxed(reg, spare_reg); pmu_conf_reg = msm_wcnss_base + pmu_offset; writel_relaxed(0, pmu_conf_reg); reg = readl_relaxed(pmu_conf_reg); reg |= WCNSS_PMU_CFG_GC_BUS_MUX_SEL_TOP | WCNSS_PMU_CFG_IRIS_XO_EN; writel_relaxed(reg, pmu_conf_reg); if (wcnss_xo_auto_detect_enabled()) { iris_read_reg = msm_wcnss_base + PRONTO_IRIS_REG_READ_OFFSET; iris_reg = readl_relaxed(iris_read_reg); } if (iris_reg != WCNSS_INVALID_IRIS_REG) { iris_reg &= 0xffff; iris_reg |= PRONTO_IRIS_REG_CHIP_ID; writel_relaxed(iris_reg, iris_read_reg); /* Iris read */ reg = readl_relaxed(pmu_conf_reg); reg |= WCNSS_PMU_CFG_IRIS_XO_READ; writel_relaxed(reg, pmu_conf_reg); /* Wait for PMU_CFG.iris_reg_read_sts */ while (readl_relaxed(pmu_conf_reg) & WCNSS_PMU_CFG_IRIS_XO_READ_STS) cpu_relax(); iris_reg = readl_relaxed(iris_read_reg); auto_detect = xo_auto_detect(iris_reg); /* Reset iris read bit */ reg &= ~WCNSS_PMU_CFG_IRIS_XO_READ; } else if (wcnss_xo_auto_detect_enabled()) /* Default to 48 MHZ */ auto_detect = WCNSS_XO_48MHZ; else auto_detect = WCNSS_XO_INVALID; /* Clear XO_MODE[b2:b1] bits. Clear implies 19.2 MHz TCXO */ reg &= ~(WCNSS_PMU_CFG_IRIS_XO_MODE); if ((use_48mhz_xo && auto_detect == WCNSS_XO_INVALID) || auto_detect == WCNSS_XO_48MHZ) reg |= WCNSS_PMU_CFG_IRIS_XO_MODE_48; writel_relaxed(reg, pmu_conf_reg); /* Start IRIS XO configuration */ reg |= WCNSS_PMU_CFG_IRIS_XO_CFG; writel_relaxed(reg, pmu_conf_reg); /* Wait for XO configuration to finish */ while (readl_relaxed(pmu_conf_reg) & WCNSS_PMU_CFG_IRIS_XO_CFG_STS) cpu_relax(); /* Stop IRIS XO configuration */ reg &= ~(WCNSS_PMU_CFG_GC_BUS_MUX_SEL_TOP | WCNSS_PMU_CFG_IRIS_XO_CFG); writel_relaxed(reg, pmu_conf_reg); clk_disable_unprepare(clk); if ((!use_48mhz_xo && auto_detect == WCNSS_XO_INVALID) || auto_detect == WCNSS_XO_19MHZ) { clk_rf = clk_get(dev, "rf_clk"); if (IS_ERR(clk_rf)) { pr_err("Couldn't get rf_clk\n"); goto fail; } rc = clk_prepare_enable(clk_rf); if (rc) { pr_err("clk_rf enable failed\n"); goto fail; } } } else if ((!use_48mhz_xo && auto_detect == WCNSS_XO_INVALID) || auto_detect == WCNSS_XO_19MHZ) { clk_rf = clk_get(dev, "rf_clk"); if (IS_ERR(clk_rf)) { pr_err("Couldn't get rf_clk\n"); goto fail; } clk_disable_unprepare(clk_rf); } /* Add some delay for XO to settle */ msleep(20); fail: clk_put(clk); if (clk_rf != NULL) clk_put(clk_rf); return rc; } /* Helper routine to turn off all WCNSS & IRIS vregs */ static void wcnss_vregs_off(struct vregs_info regulators[], uint size) { int i, rc = 0; /* Regulators need to be turned off in the reverse order */ for (i = (size-1); i >= 0; i--) { if (regulators[i].state == VREG_NULL_CONFIG) continue; /* Remove PWM mode */ if (regulators[i].state & VREG_OPTIMUM_MODE_MASK) { rc = regulator_set_optimum_mode( regulators[i].regulator, 0); if (rc < 0) pr_err("regulator_set_optimum_mode(%s) failed (%d)\n", regulators[i].name, rc); } /* Set voltage to lowest level */ if (regulators[i].state & VREG_SET_VOLTAGE_MASK) { rc = regulator_set_voltage(regulators[i].regulator, regulators[i].low_power_min, regulators[i].max_voltage); if (rc) pr_err("regulator_set_voltage(%s) failed (%d)\n", regulators[i].name, rc); } /* Disable regulator */ if (regulators[i].state & VREG_ENABLE_MASK) { rc = regulator_disable(regulators[i].regulator); if (rc < 0) pr_err("vreg %s disable failed (%d)\n", regulators[i].name, rc); } /* Free the regulator source */ if (regulators[i].state & VREG_GET_REGULATOR_MASK) regulator_put(regulators[i].regulator); regulators[i].state = VREG_NULL_CONFIG; } } /* Common helper routine to turn on all WCNSS & IRIS vregs */ static int wcnss_vregs_on(struct device *dev, struct vregs_info regulators[], uint size) { int i, rc = 0, reg_cnt; for (i = 0; i < size; i++) { /* Get regulator source */ regulators[i].regulator = regulator_get(dev, regulators[i].name); if (IS_ERR(regulators[i].regulator)) { rc = PTR_ERR(regulators[i].regulator); pr_err("regulator get of %s failed (%d)\n", regulators[i].name, rc); goto fail; } regulators[i].state |= VREG_GET_REGULATOR_MASK; reg_cnt = regulator_count_voltages(regulators[i].regulator); /* Set voltage to nominal. Exclude swtiches e.g. LVS */ if ((regulators[i].nominal_min || regulators[i].max_voltage) && (reg_cnt > 0)) { rc = regulator_set_voltage(regulators[i].regulator, regulators[i].nominal_min, regulators[i].max_voltage); if (rc) { pr_err("regulator_set_voltage(%s) failed (%d)\n", regulators[i].name, rc); goto fail; } regulators[i].state |= VREG_SET_VOLTAGE_MASK; } /* Vote for PWM/PFM mode if needed */ if (regulators[i].uA_load && (reg_cnt > 0)) { rc = regulator_set_optimum_mode(regulators[i].regulator, regulators[i].uA_load); if (rc < 0) { pr_err("regulator_set_optimum_mode(%s) failed (%d)\n", regulators[i].name, rc); goto fail; } regulators[i].state |= VREG_OPTIMUM_MODE_MASK; } /* Enable the regulator */ rc = regulator_enable(regulators[i].regulator); if (rc) { pr_err("vreg %s enable failed (%d)\n", regulators[i].name, rc); goto fail; } regulators[i].state |= VREG_ENABLE_MASK; } return rc; fail: wcnss_vregs_off(regulators, size); return rc; } static void wcnss_iris_vregs_off(enum wcnss_hw_type hw_type) { switch (hw_type) { case WCNSS_RIVA_HW: wcnss_vregs_off(iris_vregs_riva, ARRAY_SIZE(iris_vregs_riva)); break; case WCNSS_PRONTO_HW: wcnss_vregs_off(iris_vregs_pronto, ARRAY_SIZE(iris_vregs_pronto)); break; default: pr_err("%s invalid hardware %d\n", __func__, hw_type); } } static int wcnss_iris_vregs_on(struct device *dev, enum wcnss_hw_type hw_type) { int ret = -1; switch (hw_type) { case WCNSS_RIVA_HW: ret = wcnss_vregs_on(dev, iris_vregs_riva, ARRAY_SIZE(iris_vregs_riva)); break; case WCNSS_PRONTO_HW: ret = wcnss_vregs_on(dev, iris_vregs_pronto, ARRAY_SIZE(iris_vregs_pronto)); break; default: pr_err("%s invalid hardware %d\n", __func__, hw_type); } return ret; } static void wcnss_core_vregs_off(enum wcnss_hw_type hw_type) { switch (hw_type) { case WCNSS_RIVA_HW: wcnss_vregs_off(riva_vregs, ARRAY_SIZE(riva_vregs)); break; case WCNSS_PRONTO_HW: wcnss_vregs_off(pronto_vregs, ARRAY_SIZE(pronto_vregs)); break; default: pr_err("%s invalid hardware %d\n", __func__, hw_type); } } static int wcnss_core_vregs_on(struct device *dev, enum wcnss_hw_type hw_type) { int ret = -1; switch (hw_type) { case WCNSS_RIVA_HW: ret = wcnss_vregs_on(dev, riva_vregs, ARRAY_SIZE(riva_vregs)); break; case WCNSS_PRONTO_HW: ret = wcnss_vregs_on(dev, pronto_vregs, ARRAY_SIZE(pronto_vregs)); break; default: pr_err("%s invalid hardware %d\n", __func__, hw_type); } return ret; } int wcnss_wlan_power(struct device *dev, struct wcnss_wlan_config *cfg, enum wcnss_opcode on) { int rc = 0; enum wcnss_hw_type hw_type = wcnss_hardware_type(); if (on) { down(&wcnss_power_on_lock); /* RIVA regulator settings */ rc = wcnss_core_vregs_on(dev, hw_type); if (rc) goto fail_wcnss_on; /* IRIS regulator settings */ rc = wcnss_iris_vregs_on(dev, hw_type); if (rc) goto fail_iris_on; /* Configure IRIS XO */ rc = configure_iris_xo(dev, cfg->use_48mhz_xo, WCNSS_WLAN_SWITCH_ON); if (rc) goto fail_iris_xo; up(&wcnss_power_on_lock); } else { configure_iris_xo(dev, cfg->use_48mhz_xo, WCNSS_WLAN_SWITCH_OFF); wcnss_iris_vregs_off(hw_type); wcnss_core_vregs_off(hw_type); } return rc; fail_iris_xo: wcnss_iris_vregs_off(hw_type); fail_iris_on: wcnss_core_vregs_off(hw_type); fail_wcnss_on: up(&wcnss_power_on_lock); return rc; } EXPORT_SYMBOL(wcnss_wlan_power); /* * During SSR WCNSS should not be 'powered on' until all the host drivers * finish their shutdown routines. Host drivers use below APIs to * synchronize power-on. WCNSS will not be 'powered on' until all the * requests(to lock power-on) are freed. */ int wcnss_req_power_on_lock(char *driver_name) { struct host_driver *node; if (!driver_name) goto err; node = kmalloc(sizeof(struct host_driver), GFP_KERNEL); if (!node) goto err; strlcpy(node->name, driver_name, sizeof(node->name)); mutex_lock(&list_lock); /* Lock when the first request is added */ if (list_empty(&power_on_lock_list)) down(&wcnss_power_on_lock); list_add(&node->list, &power_on_lock_list); mutex_unlock(&list_lock); return 0; err: return -EINVAL; } EXPORT_SYMBOL(wcnss_req_power_on_lock); int wcnss_free_power_on_lock(char *driver_name) { int ret = -1; struct host_driver *node; mutex_lock(&list_lock); list_for_each_entry(node, &power_on_lock_list, list) { if (!strncmp(node->name, driver_name, sizeof(node->name))) { list_del(&node->list); kfree(node); ret = 0; break; } } /* unlock when the last host driver frees the lock */ if (list_empty(&power_on_lock_list)) up(&wcnss_power_on_lock); mutex_unlock(&list_lock); return ret; } EXPORT_SYMBOL(wcnss_free_power_on_lock);
gpl-2.0
AshishNamdev/linux
sound/soc/codecs/cs4265.c
268
16754
/* * cs4265.c -- CS4265 ALSA SoC audio driver * * Copyright 2014 Cirrus Logic, Inc. * * Author: Paul Handrigan <paul.handrigan@cirrus.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/gpio/consumer.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/i2c.h> #include <linux/input.h> #include <linux/regmap.h> #include <linux/slab.h> #include <linux/platform_device.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/soc-dapm.h> #include <sound/initval.h> #include <sound/tlv.h> #include "cs4265.h" struct cs4265_private { struct regmap *regmap; struct gpio_desc *reset_gpio; u8 format; u32 sysclk; }; static const struct reg_default cs4265_reg_defaults[] = { { CS4265_PWRCTL, 0x0F }, { CS4265_DAC_CTL, 0x08 }, { CS4265_ADC_CTL, 0x00 }, { CS4265_MCLK_FREQ, 0x00 }, { CS4265_SIG_SEL, 0x40 }, { CS4265_CHB_PGA_CTL, 0x00 }, { CS4265_CHA_PGA_CTL, 0x00 }, { CS4265_ADC_CTL2, 0x19 }, { CS4265_DAC_CHA_VOL, 0x00 }, { CS4265_DAC_CHB_VOL, 0x00 }, { CS4265_DAC_CTL2, 0xC0 }, { CS4265_SPDIF_CTL1, 0x00 }, { CS4265_SPDIF_CTL2, 0x00 }, { CS4265_INT_MASK, 0x00 }, { CS4265_STATUS_MODE_MSB, 0x00 }, { CS4265_STATUS_MODE_LSB, 0x00 }, }; static bool cs4265_readable_register(struct device *dev, unsigned int reg) { switch (reg) { case CS4265_CHIP_ID ... CS4265_SPDIF_CTL2: return true; default: return false; } } static bool cs4265_volatile_register(struct device *dev, unsigned int reg) { switch (reg) { case CS4265_INT_STATUS: return true; default: return false; } } static DECLARE_TLV_DB_SCALE(pga_tlv, -1200, 50, 0); static DECLARE_TLV_DB_SCALE(dac_tlv, -12750, 50, 0); static const char * const digital_input_mux_text[] = { "SDIN1", "SDIN2" }; static SOC_ENUM_SINGLE_DECL(digital_input_mux_enum, CS4265_SIG_SEL, 7, digital_input_mux_text); static const struct snd_kcontrol_new digital_input_mux = SOC_DAPM_ENUM("Digital Input Mux", digital_input_mux_enum); static const char * const mic_linein_text[] = { "MIC", "LINEIN" }; static SOC_ENUM_SINGLE_DECL(mic_linein_enum, CS4265_ADC_CTL2, 0, mic_linein_text); static const char * const cam_mode_text[] = { "One Byte", "Two Byte" }; static SOC_ENUM_SINGLE_DECL(cam_mode_enum, CS4265_SPDIF_CTL1, 5, cam_mode_text); static const char * const cam_mono_stereo_text[] = { "Stereo", "Mono" }; static SOC_ENUM_SINGLE_DECL(spdif_mono_stereo_enum, CS4265_SPDIF_CTL2, 2, cam_mono_stereo_text); static const char * const mono_select_text[] = { "Channel A", "Channel B" }; static SOC_ENUM_SINGLE_DECL(spdif_mono_select_enum, CS4265_SPDIF_CTL2, 0, mono_select_text); static const struct snd_kcontrol_new mic_linein_mux = SOC_DAPM_ENUM("ADC Input Capture Mux", mic_linein_enum); static const struct snd_kcontrol_new loopback_ctl = SOC_DAPM_SINGLE("Switch", CS4265_SIG_SEL, 1, 1, 0); static const struct snd_kcontrol_new spdif_switch = SOC_DAPM_SINGLE("Switch", SND_SOC_NOPM, 0, 0, 0); static const struct snd_kcontrol_new dac_switch = SOC_DAPM_SINGLE("Switch", CS4265_PWRCTL, 1, 1, 0); static const struct snd_kcontrol_new cs4265_snd_controls[] = { SOC_DOUBLE_R_SX_TLV("PGA Volume", CS4265_CHA_PGA_CTL, CS4265_CHB_PGA_CTL, 0, 0x28, 0x30, pga_tlv), SOC_DOUBLE_R_TLV("DAC Volume", CS4265_DAC_CHA_VOL, CS4265_DAC_CHB_VOL, 0, 0xFF, 1, dac_tlv), SOC_SINGLE("De-emp 44.1kHz Switch", CS4265_DAC_CTL, 1, 1, 0), SOC_SINGLE("DAC INV Switch", CS4265_DAC_CTL2, 5, 1, 0), SOC_SINGLE("DAC Zero Cross Switch", CS4265_DAC_CTL2, 6, 1, 0), SOC_SINGLE("DAC Soft Ramp Switch", CS4265_DAC_CTL2, 7, 1, 0), SOC_SINGLE("ADC HPF Switch", CS4265_ADC_CTL, 1, 1, 0), SOC_SINGLE("ADC Zero Cross Switch", CS4265_ADC_CTL2, 3, 1, 1), SOC_SINGLE("ADC Soft Ramp Switch", CS4265_ADC_CTL2, 7, 1, 0), SOC_SINGLE("E to F Buffer Disable Switch", CS4265_SPDIF_CTL1, 6, 1, 0), SOC_ENUM("C Data Access", cam_mode_enum), SOC_SINGLE("Validity Bit Control Switch", CS4265_SPDIF_CTL2, 3, 1, 0), SOC_ENUM("SPDIF Mono/Stereo", spdif_mono_stereo_enum), SOC_SINGLE("MMTLR Data Switch", 0, 1, 1, 0), SOC_ENUM("Mono Channel Select", spdif_mono_select_enum), SND_SOC_BYTES("C Data Buffer", CS4265_C_DATA_BUFF, 24), }; static const struct snd_soc_dapm_widget cs4265_dapm_widgets[] = { SND_SOC_DAPM_INPUT("LINEINL"), SND_SOC_DAPM_INPUT("LINEINR"), SND_SOC_DAPM_INPUT("MICL"), SND_SOC_DAPM_INPUT("MICR"), SND_SOC_DAPM_AIF_OUT("DOUT", NULL, 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("SPDIFOUT", NULL, 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_MUX("ADC Mux", SND_SOC_NOPM, 0, 0, &mic_linein_mux), SND_SOC_DAPM_ADC("ADC", NULL, CS4265_PWRCTL, 2, 1), SND_SOC_DAPM_PGA("Pre-amp MIC", CS4265_PWRCTL, 3, 1, NULL, 0), SND_SOC_DAPM_MUX("Input Mux", SND_SOC_NOPM, 0, 0, &digital_input_mux), SND_SOC_DAPM_MIXER("SDIN1 Input Mixer", SND_SOC_NOPM, 0, 0, NULL, 0), SND_SOC_DAPM_MIXER("SDIN2 Input Mixer", SND_SOC_NOPM, 0, 0, NULL, 0), SND_SOC_DAPM_MIXER("SPDIF Transmitter", SND_SOC_NOPM, 0, 0, NULL, 0), SND_SOC_DAPM_SWITCH("Loopback", SND_SOC_NOPM, 0, 0, &loopback_ctl), SND_SOC_DAPM_SWITCH("SPDIF", SND_SOC_NOPM, 0, 0, &spdif_switch), SND_SOC_DAPM_SWITCH("DAC", CS4265_PWRCTL, 1, 1, &dac_switch), SND_SOC_DAPM_AIF_IN("DIN1", NULL, 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("DIN2", NULL, 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("TXIN", NULL, 0, CS4265_SPDIF_CTL2, 5, 1), SND_SOC_DAPM_OUTPUT("LINEOUTL"), SND_SOC_DAPM_OUTPUT("LINEOUTR"), }; static const struct snd_soc_dapm_route cs4265_audio_map[] = { {"DIN1", NULL, "DAI1 Playback"}, {"DIN2", NULL, "DAI2 Playback"}, {"SDIN1 Input Mixer", NULL, "DIN1"}, {"SDIN2 Input Mixer", NULL, "DIN2"}, {"Input Mux", "SDIN1", "SDIN1 Input Mixer"}, {"Input Mux", "SDIN2", "SDIN2 Input Mixer"}, {"DAC", "Switch", "Input Mux"}, {"SPDIF", "Switch", "Input Mux"}, {"LINEOUTL", NULL, "DAC"}, {"LINEOUTR", NULL, "DAC"}, {"SPDIFOUT", NULL, "SPDIF"}, {"ADC Mux", "LINEIN", "LINEINL"}, {"ADC Mux", "LINEIN", "LINEINR"}, {"ADC Mux", "MIC", "MICL"}, {"ADC Mux", "MIC", "MICR"}, {"ADC", NULL, "ADC Mux"}, {"DOUT", NULL, "ADC"}, {"DAI1 Capture", NULL, "DOUT"}, {"DAI2 Capture", NULL, "DOUT"}, /* Loopback */ {"Loopback", "Switch", "ADC"}, {"DAC", NULL, "Loopback"}, }; struct cs4265_clk_para { u32 mclk; u32 rate; u8 fm_mode; /* values 1, 2, or 4 */ u8 mclkdiv; }; static const struct cs4265_clk_para clk_map_table[] = { /*32k*/ {8192000, 32000, 0, 0}, {12288000, 32000, 0, 1}, {16384000, 32000, 0, 2}, {24576000, 32000, 0, 3}, {32768000, 32000, 0, 4}, /*44.1k*/ {11289600, 44100, 0, 0}, {16934400, 44100, 0, 1}, {22579200, 44100, 0, 2}, {33868000, 44100, 0, 3}, {45158400, 44100, 0, 4}, /*48k*/ {12288000, 48000, 0, 0}, {18432000, 48000, 0, 1}, {24576000, 48000, 0, 2}, {36864000, 48000, 0, 3}, {49152000, 48000, 0, 4}, /*64k*/ {8192000, 64000, 1, 0}, {12288000, 64000, 1, 1}, {16934400, 64000, 1, 2}, {24576000, 64000, 1, 3}, {32768000, 64000, 1, 4}, /* 88.2k */ {11289600, 88200, 1, 0}, {16934400, 88200, 1, 1}, {22579200, 88200, 1, 2}, {33868000, 88200, 1, 3}, {45158400, 88200, 1, 4}, /* 96k */ {12288000, 96000, 1, 0}, {18432000, 96000, 1, 1}, {24576000, 96000, 1, 2}, {36864000, 96000, 1, 3}, {49152000, 96000, 1, 4}, /* 128k */ {8192000, 128000, 2, 0}, {12288000, 128000, 2, 1}, {16934400, 128000, 2, 2}, {24576000, 128000, 2, 3}, {32768000, 128000, 2, 4}, /* 176.4k */ {11289600, 176400, 2, 0}, {16934400, 176400, 2, 1}, {22579200, 176400, 2, 2}, {33868000, 176400, 2, 3}, {49152000, 176400, 2, 4}, /* 192k */ {12288000, 192000, 2, 0}, {18432000, 192000, 2, 1}, {24576000, 192000, 2, 2}, {36864000, 192000, 2, 3}, {49152000, 192000, 2, 4}, }; static int cs4265_get_clk_index(int mclk, int rate) { int i; for (i = 0; i < ARRAY_SIZE(clk_map_table); i++) { if (clk_map_table[i].rate == rate && clk_map_table[i].mclk == mclk) return i; } return -EINVAL; } static int cs4265_set_sysclk(struct snd_soc_dai *codec_dai, int clk_id, unsigned int freq, int dir) { struct snd_soc_codec *codec = codec_dai->codec; struct cs4265_private *cs4265 = snd_soc_codec_get_drvdata(codec); int i; if (clk_id != 0) { dev_err(codec->dev, "Invalid clk_id %d\n", clk_id); return -EINVAL; } for (i = 0; i < ARRAY_SIZE(clk_map_table); i++) { if (clk_map_table[i].mclk == freq) { cs4265->sysclk = freq; return 0; } } cs4265->sysclk = 0; dev_err(codec->dev, "Invalid freq parameter %d\n", freq); return -EINVAL; } static int cs4265_set_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt) { struct snd_soc_codec *codec = codec_dai->codec; struct cs4265_private *cs4265 = snd_soc_codec_get_drvdata(codec); u8 iface = 0; switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBM_CFM: snd_soc_update_bits(codec, CS4265_ADC_CTL, CS4265_ADC_MASTER, CS4265_ADC_MASTER); break; case SND_SOC_DAIFMT_CBS_CFS: snd_soc_update_bits(codec, CS4265_ADC_CTL, CS4265_ADC_MASTER, 0); break; default: return -EINVAL; } /* interface format */ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: iface |= SND_SOC_DAIFMT_I2S; break; case SND_SOC_DAIFMT_RIGHT_J: iface |= SND_SOC_DAIFMT_RIGHT_J; break; case SND_SOC_DAIFMT_LEFT_J: iface |= SND_SOC_DAIFMT_LEFT_J; break; default: return -EINVAL; } cs4265->format = iface; return 0; } static int cs4265_digital_mute(struct snd_soc_dai *dai, int mute) { struct snd_soc_codec *codec = dai->codec; if (mute) { snd_soc_update_bits(codec, CS4265_DAC_CTL, CS4265_DAC_CTL_MUTE, CS4265_DAC_CTL_MUTE); snd_soc_update_bits(codec, CS4265_SPDIF_CTL2, CS4265_SPDIF_CTL2_MUTE, CS4265_SPDIF_CTL2_MUTE); } else { snd_soc_update_bits(codec, CS4265_DAC_CTL, CS4265_DAC_CTL_MUTE, 0); snd_soc_update_bits(codec, CS4265_SPDIF_CTL2, CS4265_SPDIF_CTL2_MUTE, 0); } return 0; } static int cs4265_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct snd_soc_codec *codec = dai->codec; struct cs4265_private *cs4265 = snd_soc_codec_get_drvdata(codec); int index; if (substream->stream == SNDRV_PCM_STREAM_CAPTURE && ((cs4265->format & SND_SOC_DAIFMT_FORMAT_MASK) == SND_SOC_DAIFMT_RIGHT_J)) return -EINVAL; index = cs4265_get_clk_index(cs4265->sysclk, params_rate(params)); if (index >= 0) { snd_soc_update_bits(codec, CS4265_ADC_CTL, CS4265_ADC_FM, clk_map_table[index].fm_mode << 6); snd_soc_update_bits(codec, CS4265_MCLK_FREQ, CS4265_MCLK_FREQ_MASK, clk_map_table[index].mclkdiv << 4); } else { dev_err(codec->dev, "can't get correct mclk\n"); return -EINVAL; } switch (cs4265->format & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: snd_soc_update_bits(codec, CS4265_DAC_CTL, CS4265_DAC_CTL_DIF, (1 << 4)); snd_soc_update_bits(codec, CS4265_ADC_CTL, CS4265_ADC_DIF, (1 << 4)); snd_soc_update_bits(codec, CS4265_SPDIF_CTL2, CS4265_SPDIF_CTL2_DIF, (1 << 6)); break; case SND_SOC_DAIFMT_RIGHT_J: if (params_width(params) == 16) { snd_soc_update_bits(codec, CS4265_DAC_CTL, CS4265_DAC_CTL_DIF, (2 << 4)); snd_soc_update_bits(codec, CS4265_SPDIF_CTL2, CS4265_SPDIF_CTL2_DIF, (2 << 6)); } else { snd_soc_update_bits(codec, CS4265_DAC_CTL, CS4265_DAC_CTL_DIF, (3 << 4)); snd_soc_update_bits(codec, CS4265_SPDIF_CTL2, CS4265_SPDIF_CTL2_DIF, (3 << 6)); } break; case SND_SOC_DAIFMT_LEFT_J: snd_soc_update_bits(codec, CS4265_DAC_CTL, CS4265_DAC_CTL_DIF, 0); snd_soc_update_bits(codec, CS4265_ADC_CTL, CS4265_ADC_DIF, 0); snd_soc_update_bits(codec, CS4265_SPDIF_CTL2, CS4265_SPDIF_CTL2_DIF, 0); break; default: return -EINVAL; } return 0; } static int cs4265_set_bias_level(struct snd_soc_codec *codec, enum snd_soc_bias_level level) { switch (level) { case SND_SOC_BIAS_ON: break; case SND_SOC_BIAS_PREPARE: snd_soc_update_bits(codec, CS4265_PWRCTL, CS4265_PWRCTL_PDN, 0); break; case SND_SOC_BIAS_STANDBY: snd_soc_update_bits(codec, CS4265_PWRCTL, CS4265_PWRCTL_PDN, CS4265_PWRCTL_PDN); break; case SND_SOC_BIAS_OFF: snd_soc_update_bits(codec, CS4265_PWRCTL, CS4265_PWRCTL_PDN, CS4265_PWRCTL_PDN); break; } return 0; } #define CS4265_RATES (SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | \ SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_64000 | \ SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000 | \ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_192000) #define CS4265_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_U16_LE | \ SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_U24_LE) static const struct snd_soc_dai_ops cs4265_ops = { .hw_params = cs4265_pcm_hw_params, .digital_mute = cs4265_digital_mute, .set_fmt = cs4265_set_fmt, .set_sysclk = cs4265_set_sysclk, }; static struct snd_soc_dai_driver cs4265_dai[] = { { .name = "cs4265-dai1", .playback = { .stream_name = "DAI1 Playback", .channels_min = 1, .channels_max = 2, .rates = CS4265_RATES, .formats = CS4265_FORMATS, }, .capture = { .stream_name = "DAI1 Capture", .channels_min = 1, .channels_max = 2, .rates = CS4265_RATES, .formats = CS4265_FORMATS, }, .ops = &cs4265_ops, }, { .name = "cs4265-dai2", .playback = { .stream_name = "DAI2 Playback", .channels_min = 1, .channels_max = 2, .rates = CS4265_RATES, .formats = CS4265_FORMATS, }, .capture = { .stream_name = "DAI2 Capture", .channels_min = 1, .channels_max = 2, .rates = CS4265_RATES, .formats = CS4265_FORMATS, }, .ops = &cs4265_ops, }, }; static const struct snd_soc_codec_driver soc_codec_cs4265 = { .set_bias_level = cs4265_set_bias_level, .component_driver = { .controls = cs4265_snd_controls, .num_controls = ARRAY_SIZE(cs4265_snd_controls), .dapm_widgets = cs4265_dapm_widgets, .num_dapm_widgets = ARRAY_SIZE(cs4265_dapm_widgets), .dapm_routes = cs4265_audio_map, .num_dapm_routes = ARRAY_SIZE(cs4265_audio_map), }, }; static const struct regmap_config cs4265_regmap = { .reg_bits = 8, .val_bits = 8, .max_register = CS4265_MAX_REGISTER, .reg_defaults = cs4265_reg_defaults, .num_reg_defaults = ARRAY_SIZE(cs4265_reg_defaults), .readable_reg = cs4265_readable_register, .volatile_reg = cs4265_volatile_register, .cache_type = REGCACHE_RBTREE, }; static int cs4265_i2c_probe(struct i2c_client *i2c_client, const struct i2c_device_id *id) { struct cs4265_private *cs4265; int ret = 0; unsigned int devid = 0; unsigned int reg; cs4265 = devm_kzalloc(&i2c_client->dev, sizeof(struct cs4265_private), GFP_KERNEL); if (cs4265 == NULL) return -ENOMEM; cs4265->regmap = devm_regmap_init_i2c(i2c_client, &cs4265_regmap); if (IS_ERR(cs4265->regmap)) { ret = PTR_ERR(cs4265->regmap); dev_err(&i2c_client->dev, "regmap_init() failed: %d\n", ret); return ret; } cs4265->reset_gpio = devm_gpiod_get_optional(&i2c_client->dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(cs4265->reset_gpio)) return PTR_ERR(cs4265->reset_gpio); if (cs4265->reset_gpio) { mdelay(1); gpiod_set_value_cansleep(cs4265->reset_gpio, 1); } i2c_set_clientdata(i2c_client, cs4265); ret = regmap_read(cs4265->regmap, CS4265_CHIP_ID, &reg); devid = reg & CS4265_CHIP_ID_MASK; if (devid != CS4265_CHIP_ID_VAL) { ret = -ENODEV; dev_err(&i2c_client->dev, "CS4265 Device ID (%X). Expected %X\n", devid, CS4265_CHIP_ID); return ret; } dev_info(&i2c_client->dev, "CS4265 Version %x\n", reg & CS4265_REV_ID_MASK); regmap_write(cs4265->regmap, CS4265_PWRCTL, 0x0F); ret = snd_soc_register_codec(&i2c_client->dev, &soc_codec_cs4265, cs4265_dai, ARRAY_SIZE(cs4265_dai)); return ret; } static int cs4265_i2c_remove(struct i2c_client *client) { snd_soc_unregister_codec(&client->dev); return 0; } static const struct of_device_id cs4265_of_match[] = { { .compatible = "cirrus,cs4265", }, { } }; MODULE_DEVICE_TABLE(of, cs4265_of_match); static const struct i2c_device_id cs4265_id[] = { { "cs4265", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, cs4265_id); static struct i2c_driver cs4265_i2c_driver = { .driver = { .name = "cs4265", .of_match_table = cs4265_of_match, }, .id_table = cs4265_id, .probe = cs4265_i2c_probe, .remove = cs4265_i2c_remove, }; module_i2c_driver(cs4265_i2c_driver); MODULE_DESCRIPTION("ASoC CS4265 driver"); MODULE_AUTHOR("Paul Handrigan, Cirrus Logic Inc, <paul.handrigan@cirrus.com>"); MODULE_LICENSE("GPL");
gpl-2.0
basr/Hammerhead
drivers/net/wireless/wcnss/wcnss_vreg.c
268
14957
/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/module.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/io.h> #include <linux/gpio.h> #include <linux/delay.h> #include <linux/regulator/consumer.h> #include <linux/mfd/pm8xxx/pm8921.h> #include <linux/mfd/pm8xxx/gpio.h> #include <linux/wcnss_wlan.h> #include <linux/semaphore.h> #include <linux/list.h> #include <linux/slab.h> #include <linux/clk.h> #include <mach/msm_xo.h> #include <mach/msm_iomap.h> static void __iomem *msm_wcnss_base; static LIST_HEAD(power_on_lock_list); static DEFINE_MUTEX(list_lock); static DEFINE_SEMAPHORE(wcnss_power_on_lock); static int auto_detect; #define MSM_RIVA_PHYS 0x03204000 #define MSM_PRONTO_PHYS 0xfb21b000 #define RIVA_PMU_OFFSET 0x28 #define PRONTO_PMU_OFFSET 0x1004 #define RIVA_SPARE_OFFSET 0x0b4 #define PRONTO_SPARE_OFFSET 0x1088 #define NVBIN_DLND_BIT BIT(25) #define PRONTO_IRIS_REG_READ_OFFSET 0x1134 #define PRONTO_IRIS_REG_CHIP_ID 0x04 #define WCNSS_PMU_CFG_IRIS_XO_CFG BIT(3) #define WCNSS_PMU_CFG_IRIS_XO_EN BIT(4) #define WCNSS_PMU_CFG_GC_BUS_MUX_SEL_TOP BIT(5) #define WCNSS_PMU_CFG_IRIS_XO_CFG_STS BIT(6) /* 1: in progress, 0: done */ #define WCNSS_PMU_CFG_IRIS_XO_READ BIT(9) #define WCNSS_PMU_CFG_IRIS_XO_READ_STS BIT(10) #define WCNSS_PMU_CFG_IRIS_XO_MODE 0x6 #define WCNSS_PMU_CFG_IRIS_XO_MODE_48 (3 << 1) #define VREG_NULL_CONFIG 0x0000 #define VREG_GET_REGULATOR_MASK 0x0001 #define VREG_SET_VOLTAGE_MASK 0x0002 #define VREG_OPTIMUM_MODE_MASK 0x0004 #define VREG_ENABLE_MASK 0x0008 #define WCNSS_INVALID_IRIS_REG 0xbaadbaad struct vregs_info { const char * const name; int state; const int nominal_min; const int low_power_min; const int max_voltage; const int uA_load; struct regulator *regulator; }; /* IRIS regulators for Riva hardware */ static struct vregs_info iris_vregs_riva[] = { {"iris_vddxo", VREG_NULL_CONFIG, 1800000, 0, 1800000, 10000, NULL}, {"iris_vddrfa", VREG_NULL_CONFIG, 1300000, 0, 1300000, 100000, NULL}, {"iris_vddpa", VREG_NULL_CONFIG, 2900000, 0, 3000000, 515000, NULL}, {"iris_vdddig", VREG_NULL_CONFIG, 1200000, 0, 1225000, 10000, NULL}, }; /* WCNSS regulators for Riva hardware */ static struct vregs_info riva_vregs[] = { /* Riva */ {"riva_vddmx", VREG_NULL_CONFIG, 1050000, 0, 1150000, 0, NULL}, {"riva_vddcx", VREG_NULL_CONFIG, 1050000, 0, 1150000, 0, NULL}, {"riva_vddpx", VREG_NULL_CONFIG, 1800000, 0, 1800000, 0, NULL}, }; /* IRIS regulators for Pronto hardware */ static struct vregs_info iris_vregs_pronto[] = { {"qcom,iris-vddxo", VREG_NULL_CONFIG, 1800000, 0, 1800000, 10000, NULL}, {"qcom,iris-vddrfa", VREG_NULL_CONFIG, 1300000, 0, 1300000, 100000, NULL}, {"qcom,iris-vddpa", VREG_NULL_CONFIG, 2900000, 0, 3000000, 515000, NULL}, {"qcom,iris-vdddig", VREG_NULL_CONFIG, 1225000, 0, 1300000, 10000, NULL}, }; /* WCNSS regulators for Pronto hardware */ static struct vregs_info pronto_vregs[] = { {"qcom,pronto-vddmx", VREG_NULL_CONFIG, 950000, 0, 1150000, 0, NULL}, {"qcom,pronto-vddcx", VREG_NULL_CONFIG, 900000, 0, 1150000, 0, NULL}, {"qcom,pronto-vddpx", VREG_NULL_CONFIG, 1800000, 0, 1800000, 0, NULL}, }; struct host_driver { char name[20]; struct list_head list; }; enum { WCNSS_XO_48MHZ = 1, WCNSS_XO_19MHZ, WCNSS_XO_INVALID, }; enum { IRIS_3660, /* also 3660A and 3680 */ IRIS_3620 }; int xo_auto_detect(u32 reg) { reg >>= 30; switch (reg) { case IRIS_3660: return WCNSS_XO_48MHZ; case IRIS_3620: return WCNSS_XO_19MHZ; default: return WCNSS_XO_INVALID; } } static int configure_iris_xo(struct device *dev, bool use_48mhz_xo, int on) { u32 reg = 0; u32 iris_reg = WCNSS_INVALID_IRIS_REG; int rc = 0; int size = 0; int pmu_offset = 0; int spare_offset = 0; unsigned long wcnss_phys_addr; void __iomem *pmu_conf_reg; void __iomem *spare_reg; void __iomem *iris_read_reg; struct clk *clk; struct clk *clk_rf = NULL; if (wcnss_hardware_type() == WCNSS_PRONTO_HW) { wcnss_phys_addr = MSM_PRONTO_PHYS; pmu_offset = PRONTO_PMU_OFFSET; spare_offset = PRONTO_SPARE_OFFSET; size = 0x3000; clk = clk_get(dev, "xo"); if (IS_ERR(clk)) { pr_err("Couldn't get xo clock\n"); return PTR_ERR(clk); } } else { wcnss_phys_addr = MSM_RIVA_PHYS; pmu_offset = RIVA_PMU_OFFSET; spare_offset = RIVA_SPARE_OFFSET; size = SZ_256; clk = clk_get(dev, "cxo"); if (IS_ERR(clk)) { pr_err("Couldn't get cxo clock\n"); return PTR_ERR(clk); } } if (on) { msm_wcnss_base = ioremap(wcnss_phys_addr, size); if (!msm_wcnss_base) { pr_err("ioremap wcnss physical failed\n"); goto fail; } /* Enable IRIS XO */ rc = clk_prepare_enable(clk); if (rc) { pr_err("clk enable failed\n"); goto fail; } /* NV bit is set to indicate that platform driver is capable * of doing NV download. */ pr_debug("wcnss: Indicate NV bin download\n"); spare_reg = msm_wcnss_base + spare_offset; reg = readl_relaxed(spare_reg); reg |= NVBIN_DLND_BIT; writel_relaxed(reg, spare_reg); pmu_conf_reg = msm_wcnss_base + pmu_offset; writel_relaxed(0, pmu_conf_reg); reg = readl_relaxed(pmu_conf_reg); reg |= WCNSS_PMU_CFG_GC_BUS_MUX_SEL_TOP | WCNSS_PMU_CFG_IRIS_XO_EN; writel_relaxed(reg, pmu_conf_reg); if (wcnss_xo_auto_detect_enabled()) { iris_read_reg = msm_wcnss_base + PRONTO_IRIS_REG_READ_OFFSET; iris_reg = readl_relaxed(iris_read_reg); } if (iris_reg != WCNSS_INVALID_IRIS_REG) { iris_reg &= 0xffff; iris_reg |= PRONTO_IRIS_REG_CHIP_ID; writel_relaxed(iris_reg, iris_read_reg); /* Iris read */ reg = readl_relaxed(pmu_conf_reg); reg |= WCNSS_PMU_CFG_IRIS_XO_READ; writel_relaxed(reg, pmu_conf_reg); /* Wait for PMU_CFG.iris_reg_read_sts */ while (readl_relaxed(pmu_conf_reg) & WCNSS_PMU_CFG_IRIS_XO_READ_STS) cpu_relax(); iris_reg = readl_relaxed(iris_read_reg); auto_detect = xo_auto_detect(iris_reg); /* Reset iris read bit */ reg &= ~WCNSS_PMU_CFG_IRIS_XO_READ; } else if (wcnss_xo_auto_detect_enabled()) /* Default to 48 MHZ */ auto_detect = WCNSS_XO_48MHZ; else auto_detect = WCNSS_XO_INVALID; /* Clear XO_MODE[b2:b1] bits. Clear implies 19.2 MHz TCXO */ reg &= ~(WCNSS_PMU_CFG_IRIS_XO_MODE); if ((use_48mhz_xo && auto_detect == WCNSS_XO_INVALID) || auto_detect == WCNSS_XO_48MHZ) reg |= WCNSS_PMU_CFG_IRIS_XO_MODE_48; writel_relaxed(reg, pmu_conf_reg); /* Start IRIS XO configuration */ reg |= WCNSS_PMU_CFG_IRIS_XO_CFG; writel_relaxed(reg, pmu_conf_reg); /* Wait for XO configuration to finish */ while (readl_relaxed(pmu_conf_reg) & WCNSS_PMU_CFG_IRIS_XO_CFG_STS) cpu_relax(); /* Stop IRIS XO configuration */ reg &= ~(WCNSS_PMU_CFG_GC_BUS_MUX_SEL_TOP | WCNSS_PMU_CFG_IRIS_XO_CFG); writel_relaxed(reg, pmu_conf_reg); clk_disable_unprepare(clk); if ((!use_48mhz_xo && auto_detect == WCNSS_XO_INVALID) || auto_detect == WCNSS_XO_19MHZ) { clk_rf = clk_get(dev, "rf_clk"); if (IS_ERR(clk_rf)) { pr_err("Couldn't get rf_clk\n"); goto fail; } rc = clk_prepare_enable(clk_rf); if (rc) { pr_err("clk_rf enable failed\n"); goto fail; } } } else if ((!use_48mhz_xo && auto_detect == WCNSS_XO_INVALID) || auto_detect == WCNSS_XO_19MHZ) { clk_rf = clk_get(dev, "rf_clk"); if (IS_ERR(clk_rf)) { pr_err("Couldn't get rf_clk\n"); goto fail; } clk_disable_unprepare(clk_rf); } /* Add some delay for XO to settle */ msleep(20); fail: clk_put(clk); if (clk_rf != NULL) clk_put(clk_rf); return rc; } /* Helper routine to turn off all WCNSS & IRIS vregs */ static void wcnss_vregs_off(struct vregs_info regulators[], uint size) { int i, rc = 0; /* Regulators need to be turned off in the reverse order */ for (i = (size-1); i >= 0; i--) { if (regulators[i].state == VREG_NULL_CONFIG) continue; /* Remove PWM mode */ if (regulators[i].state & VREG_OPTIMUM_MODE_MASK) { rc = regulator_set_optimum_mode( regulators[i].regulator, 0); if (rc < 0) pr_err("regulator_set_optimum_mode(%s) failed (%d)\n", regulators[i].name, rc); } /* Set voltage to lowest level */ if (regulators[i].state & VREG_SET_VOLTAGE_MASK) { rc = regulator_set_voltage(regulators[i].regulator, regulators[i].low_power_min, regulators[i].max_voltage); if (rc) pr_err("regulator_set_voltage(%s) failed (%d)\n", regulators[i].name, rc); } /* Disable regulator */ if (regulators[i].state & VREG_ENABLE_MASK) { rc = regulator_disable(regulators[i].regulator); if (rc < 0) pr_err("vreg %s disable failed (%d)\n", regulators[i].name, rc); } /* Free the regulator source */ if (regulators[i].state & VREG_GET_REGULATOR_MASK) regulator_put(regulators[i].regulator); regulators[i].state = VREG_NULL_CONFIG; } } /* Common helper routine to turn on all WCNSS & IRIS vregs */ static int wcnss_vregs_on(struct device *dev, struct vregs_info regulators[], uint size) { int i, rc = 0, reg_cnt; for (i = 0; i < size; i++) { /* Get regulator source */ regulators[i].regulator = regulator_get(dev, regulators[i].name); if (IS_ERR(regulators[i].regulator)) { rc = PTR_ERR(regulators[i].regulator); pr_err("regulator get of %s failed (%d)\n", regulators[i].name, rc); goto fail; } regulators[i].state |= VREG_GET_REGULATOR_MASK; reg_cnt = regulator_count_voltages(regulators[i].regulator); /* Set voltage to nominal. Exclude swtiches e.g. LVS */ if ((regulators[i].nominal_min || regulators[i].max_voltage) && (reg_cnt > 0)) { rc = regulator_set_voltage(regulators[i].regulator, regulators[i].nominal_min, regulators[i].max_voltage); if (rc) { pr_err("regulator_set_voltage(%s) failed (%d)\n", regulators[i].name, rc); goto fail; } regulators[i].state |= VREG_SET_VOLTAGE_MASK; } /* Vote for PWM/PFM mode if needed */ if (regulators[i].uA_load && (reg_cnt > 0)) { rc = regulator_set_optimum_mode(regulators[i].regulator, regulators[i].uA_load); if (rc < 0) { pr_err("regulator_set_optimum_mode(%s) failed (%d)\n", regulators[i].name, rc); goto fail; } regulators[i].state |= VREG_OPTIMUM_MODE_MASK; } /* Enable the regulator */ rc = regulator_enable(regulators[i].regulator); if (rc) { pr_err("vreg %s enable failed (%d)\n", regulators[i].name, rc); goto fail; } regulators[i].state |= VREG_ENABLE_MASK; } return rc; fail: wcnss_vregs_off(regulators, size); return rc; } static void wcnss_iris_vregs_off(enum wcnss_hw_type hw_type) { switch (hw_type) { case WCNSS_RIVA_HW: wcnss_vregs_off(iris_vregs_riva, ARRAY_SIZE(iris_vregs_riva)); break; case WCNSS_PRONTO_HW: wcnss_vregs_off(iris_vregs_pronto, ARRAY_SIZE(iris_vregs_pronto)); break; default: pr_err("%s invalid hardware %d\n", __func__, hw_type); } } static int wcnss_iris_vregs_on(struct device *dev, enum wcnss_hw_type hw_type) { int ret = -1; switch (hw_type) { case WCNSS_RIVA_HW: ret = wcnss_vregs_on(dev, iris_vregs_riva, ARRAY_SIZE(iris_vregs_riva)); break; case WCNSS_PRONTO_HW: ret = wcnss_vregs_on(dev, iris_vregs_pronto, ARRAY_SIZE(iris_vregs_pronto)); break; default: pr_err("%s invalid hardware %d\n", __func__, hw_type); } return ret; } static void wcnss_core_vregs_off(enum wcnss_hw_type hw_type) { switch (hw_type) { case WCNSS_RIVA_HW: wcnss_vregs_off(riva_vregs, ARRAY_SIZE(riva_vregs)); break; case WCNSS_PRONTO_HW: wcnss_vregs_off(pronto_vregs, ARRAY_SIZE(pronto_vregs)); break; default: pr_err("%s invalid hardware %d\n", __func__, hw_type); } } static int wcnss_core_vregs_on(struct device *dev, enum wcnss_hw_type hw_type) { int ret = -1; switch (hw_type) { case WCNSS_RIVA_HW: ret = wcnss_vregs_on(dev, riva_vregs, ARRAY_SIZE(riva_vregs)); break; case WCNSS_PRONTO_HW: ret = wcnss_vregs_on(dev, pronto_vregs, ARRAY_SIZE(pronto_vregs)); break; default: pr_err("%s invalid hardware %d\n", __func__, hw_type); } return ret; } int wcnss_wlan_power(struct device *dev, struct wcnss_wlan_config *cfg, enum wcnss_opcode on) { int rc = 0; enum wcnss_hw_type hw_type = wcnss_hardware_type(); if (on) { down(&wcnss_power_on_lock); /* RIVA regulator settings */ rc = wcnss_core_vregs_on(dev, hw_type); if (rc) goto fail_wcnss_on; /* IRIS regulator settings */ rc = wcnss_iris_vregs_on(dev, hw_type); if (rc) goto fail_iris_on; /* Configure IRIS XO */ rc = configure_iris_xo(dev, cfg->use_48mhz_xo, WCNSS_WLAN_SWITCH_ON); if (rc) goto fail_iris_xo; up(&wcnss_power_on_lock); } else { configure_iris_xo(dev, cfg->use_48mhz_xo, WCNSS_WLAN_SWITCH_OFF); wcnss_iris_vregs_off(hw_type); wcnss_core_vregs_off(hw_type); } return rc; fail_iris_xo: wcnss_iris_vregs_off(hw_type); fail_iris_on: wcnss_core_vregs_off(hw_type); fail_wcnss_on: up(&wcnss_power_on_lock); return rc; } EXPORT_SYMBOL(wcnss_wlan_power); /* * During SSR WCNSS should not be 'powered on' until all the host drivers * finish their shutdown routines. Host drivers use below APIs to * synchronize power-on. WCNSS will not be 'powered on' until all the * requests(to lock power-on) are freed. */ int wcnss_req_power_on_lock(char *driver_name) { struct host_driver *node; if (!driver_name) goto err; node = kmalloc(sizeof(struct host_driver), GFP_KERNEL); if (!node) goto err; strlcpy(node->name, driver_name, sizeof(node->name)); mutex_lock(&list_lock); /* Lock when the first request is added */ if (list_empty(&power_on_lock_list)) down(&wcnss_power_on_lock); list_add(&node->list, &power_on_lock_list); mutex_unlock(&list_lock); return 0; err: return -EINVAL; } EXPORT_SYMBOL(wcnss_req_power_on_lock); int wcnss_free_power_on_lock(char *driver_name) { int ret = -1; struct host_driver *node; mutex_lock(&list_lock); list_for_each_entry(node, &power_on_lock_list, list) { if (!strncmp(node->name, driver_name, sizeof(node->name))) { list_del(&node->list); kfree(node); ret = 0; break; } } /* unlock when the last host driver frees the lock */ if (list_empty(&power_on_lock_list)) up(&wcnss_power_on_lock); mutex_unlock(&list_lock); return ret; } EXPORT_SYMBOL(wcnss_free_power_on_lock);
gpl-2.0
sbu-fsl/fuse-kernel-instrumentation
tools/lguest/lguest.c
524
97190
/*P:100 * This is the Launcher code, a simple program which lays out the "physical" * memory for the new Guest by mapping the kernel image and the virtual * devices, then opens /dev/lguest to tell the kernel about the Guest and * control it. :*/ #define _LARGEFILE64_SOURCE #define _GNU_SOURCE #include <stdio.h> #include <string.h> #include <unistd.h> #include <err.h> #include <stdint.h> #include <stdlib.h> #include <elf.h> #include <sys/mman.h> #include <sys/param.h> #include <sys/types.h> #include <sys/stat.h> #include <sys/wait.h> #include <sys/eventfd.h> #include <fcntl.h> #include <stdbool.h> #include <errno.h> #include <ctype.h> #include <sys/socket.h> #include <sys/ioctl.h> #include <sys/time.h> #include <time.h> #include <netinet/in.h> #include <net/if.h> #include <linux/sockios.h> #include <linux/if_tun.h> #include <sys/uio.h> #include <termios.h> #include <getopt.h> #include <assert.h> #include <sched.h> #include <limits.h> #include <stddef.h> #include <signal.h> #include <pwd.h> #include <grp.h> #include <sys/user.h> #include <linux/pci_regs.h> #ifndef VIRTIO_F_ANY_LAYOUT #define VIRTIO_F_ANY_LAYOUT 27 #endif /*L:110 * We can ignore the 43 include files we need for this program, but I do want * to draw attention to the use of kernel-style types. * * As Linus said, "C is a Spartan language, and so should your naming be." I * like these abbreviations, so we define them here. Note that u64 is always * unsigned long long, which works on all Linux systems: this means that we can * use %llu in printf for any u64. */ typedef unsigned long long u64; typedef uint32_t u32; typedef uint16_t u16; typedef uint8_t u8; /*:*/ #define VIRTIO_CONFIG_NO_LEGACY #define VIRTIO_PCI_NO_LEGACY #define VIRTIO_BLK_NO_LEGACY #define VIRTIO_NET_NO_LEGACY /* Use in-kernel ones, which defines VIRTIO_F_VERSION_1 */ #include "../../include/uapi/linux/virtio_config.h" #include "../../include/uapi/linux/virtio_net.h" #include "../../include/uapi/linux/virtio_blk.h" #include "../../include/uapi/linux/virtio_console.h" #include "../../include/uapi/linux/virtio_rng.h" #include <linux/virtio_ring.h> #include "../../include/uapi/linux/virtio_pci.h" #include <asm/bootparam.h> #include "../../include/linux/lguest_launcher.h" #define BRIDGE_PFX "bridge:" #ifndef SIOCBRADDIF #define SIOCBRADDIF 0x89a2 /* add interface to bridge */ #endif /* We can have up to 256 pages for devices. */ #define DEVICE_PAGES 256 /* This will occupy 3 pages: it must be a power of 2. */ #define VIRTQUEUE_NUM 256 /*L:120 * verbose is both a global flag and a macro. The C preprocessor allows * this, and although I wouldn't recommend it, it works quite nicely here. */ static bool verbose; #define verbose(args...) \ do { if (verbose) printf(args); } while(0) /*:*/ /* The pointer to the start of guest memory. */ static void *guest_base; /* The maximum guest physical address allowed, and maximum possible. */ static unsigned long guest_limit, guest_max, guest_mmio; /* The /dev/lguest file descriptor. */ static int lguest_fd; /* a per-cpu variable indicating whose vcpu is currently running */ static unsigned int __thread cpu_id; /* 5 bit device number in the PCI_CONFIG_ADDR => 32 only */ #define MAX_PCI_DEVICES 32 /* This is our list of devices. */ struct device_list { /* Counter to assign interrupt numbers. */ unsigned int next_irq; /* Counter to print out convenient device numbers. */ unsigned int device_num; /* PCI devices. */ struct device *pci[MAX_PCI_DEVICES]; }; /* The list of Guest devices, based on command line arguments. */ static struct device_list devices; struct virtio_pci_cfg_cap { struct virtio_pci_cap cap; u32 pci_cfg_data; /* Data for BAR access. */ }; struct virtio_pci_mmio { struct virtio_pci_common_cfg cfg; u16 notify; u8 isr; u8 padding; /* Device-specific configuration follows this. */ }; /* This is the layout (little-endian) of the PCI config space. */ struct pci_config { u16 vendor_id, device_id; u16 command, status; u8 revid, prog_if, subclass, class; u8 cacheline_size, lat_timer, header_type, bist; u32 bar[6]; u32 cardbus_cis_ptr; u16 subsystem_vendor_id, subsystem_device_id; u32 expansion_rom_addr; u8 capabilities, reserved1[3]; u32 reserved2; u8 irq_line, irq_pin, min_grant, max_latency; /* Now, this is the linked capability list. */ struct virtio_pci_cap common; struct virtio_pci_notify_cap notify; struct virtio_pci_cap isr; struct virtio_pci_cap device; struct virtio_pci_cfg_cap cfg_access; }; /* The device structure describes a single device. */ struct device { /* The name of this device, for --verbose. */ const char *name; /* Any queues attached to this device */ struct virtqueue *vq; /* Is it operational */ bool running; /* Has it written FEATURES_OK but not re-checked it? */ bool wrote_features_ok; /* PCI configuration */ union { struct pci_config config; u32 config_words[sizeof(struct pci_config) / sizeof(u32)]; }; /* Features we offer, and those accepted. */ u64 features, features_accepted; /* Device-specific config hangs off the end of this. */ struct virtio_pci_mmio *mmio; /* PCI MMIO resources (all in BAR0) */ size_t mmio_size; u32 mmio_addr; /* Device-specific data. */ void *priv; }; /* The virtqueue structure describes a queue attached to a device. */ struct virtqueue { struct virtqueue *next; /* Which device owns me. */ struct device *dev; /* Name for printing errors. */ const char *name; /* The actual ring of buffers. */ struct vring vring; /* The information about this virtqueue (we only use queue_size on) */ struct virtio_pci_common_cfg pci_config; /* Last available index we saw. */ u16 last_avail_idx; /* How many are used since we sent last irq? */ unsigned int pending_used; /* Eventfd where Guest notifications arrive. */ int eventfd; /* Function for the thread which is servicing this virtqueue. */ void (*service)(struct virtqueue *vq); pid_t thread; }; /* Remember the arguments to the program so we can "reboot" */ static char **main_args; /* The original tty settings to restore on exit. */ static struct termios orig_term; /* * We have to be careful with barriers: our devices are all run in separate * threads and so we need to make sure that changes visible to the Guest happen * in precise order. */ #define wmb() __asm__ __volatile__("" : : : "memory") #define rmb() __asm__ __volatile__("lock; addl $0,0(%%esp)" : : : "memory") #define mb() __asm__ __volatile__("lock; addl $0,0(%%esp)" : : : "memory") /* Wrapper for the last available index. Makes it easier to change. */ #define lg_last_avail(vq) ((vq)->last_avail_idx) /* * The virtio configuration space is defined to be little-endian. x86 is * little-endian too, but it's nice to be explicit so we have these helpers. */ #define cpu_to_le16(v16) (v16) #define cpu_to_le32(v32) (v32) #define cpu_to_le64(v64) (v64) #define le16_to_cpu(v16) (v16) #define le32_to_cpu(v32) (v32) #define le64_to_cpu(v64) (v64) /* * A real device would ignore weird/non-compliant driver behaviour. We * stop and flag it, to help debugging Linux problems. */ #define bad_driver(d, fmt, ...) \ errx(1, "%s: bad driver: " fmt, (d)->name, ## __VA_ARGS__) #define bad_driver_vq(vq, fmt, ...) \ errx(1, "%s vq %s: bad driver: " fmt, (vq)->dev->name, \ vq->name, ## __VA_ARGS__) /* Is this iovec empty? */ static bool iov_empty(const struct iovec iov[], unsigned int num_iov) { unsigned int i; for (i = 0; i < num_iov; i++) if (iov[i].iov_len) return false; return true; } /* Take len bytes from the front of this iovec. */ static void iov_consume(struct device *d, struct iovec iov[], unsigned num_iov, void *dest, unsigned len) { unsigned int i; for (i = 0; i < num_iov; i++) { unsigned int used; used = iov[i].iov_len < len ? iov[i].iov_len : len; if (dest) { memcpy(dest, iov[i].iov_base, used); dest += used; } iov[i].iov_base += used; iov[i].iov_len -= used; len -= used; } if (len != 0) bad_driver(d, "iovec too short!"); } /*L:100 * The Launcher code itself takes us out into userspace, that scary place where * pointers run wild and free! Unfortunately, like most userspace programs, * it's quite boring (which is why everyone likes to hack on the kernel!). * Perhaps if you make up an Lguest Drinking Game at this point, it will get * you through this section. Or, maybe not. * * The Launcher sets up a big chunk of memory to be the Guest's "physical" * memory and stores it in "guest_base". In other words, Guest physical == * Launcher virtual with an offset. * * This can be tough to get your head around, but usually it just means that we * use these trivial conversion functions when the Guest gives us its * "physical" addresses: */ static void *from_guest_phys(unsigned long addr) { return guest_base + addr; } static unsigned long to_guest_phys(const void *addr) { return (addr - guest_base); } /*L:130 * Loading the Kernel. * * We start with couple of simple helper routines. open_or_die() avoids * error-checking code cluttering the callers: */ static int open_or_die(const char *name, int flags) { int fd = open(name, flags); if (fd < 0) err(1, "Failed to open %s", name); return fd; } /* map_zeroed_pages() takes a number of pages. */ static void *map_zeroed_pages(unsigned int num) { int fd = open_or_die("/dev/zero", O_RDONLY); void *addr; /* * We use a private mapping (ie. if we write to the page, it will be * copied). We allocate an extra two pages PROT_NONE to act as guard * pages against read/write attempts that exceed allocated space. */ addr = mmap(NULL, getpagesize() * (num+2), PROT_NONE, MAP_PRIVATE, fd, 0); if (addr == MAP_FAILED) err(1, "Mmapping %u pages of /dev/zero", num); if (mprotect(addr + getpagesize(), getpagesize() * num, PROT_READ|PROT_WRITE) == -1) err(1, "mprotect rw %u pages failed", num); /* * One neat mmap feature is that you can close the fd, and it * stays mapped. */ close(fd); /* Return address after PROT_NONE page */ return addr + getpagesize(); } /* Get some bytes which won't be mapped into the guest. */ static unsigned long get_mmio_region(size_t size) { unsigned long addr = guest_mmio; size_t i; if (!size) return addr; /* Size has to be a power of 2 (and multiple of 16) */ for (i = 1; i < size; i <<= 1); guest_mmio += i; return addr; } /* * This routine is used to load the kernel or initrd. It tries mmap, but if * that fails (Plan 9's kernel file isn't nicely aligned on page boundaries), * it falls back to reading the memory in. */ static void map_at(int fd, void *addr, unsigned long offset, unsigned long len) { ssize_t r; /* * We map writable even though for some segments are marked read-only. * The kernel really wants to be writable: it patches its own * instructions. * * MAP_PRIVATE means that the page won't be copied until a write is * done to it. This allows us to share untouched memory between * Guests. */ if (mmap(addr, len, PROT_READ|PROT_WRITE, MAP_FIXED|MAP_PRIVATE, fd, offset) != MAP_FAILED) return; /* pread does a seek and a read in one shot: saves a few lines. */ r = pread(fd, addr, len, offset); if (r != len) err(1, "Reading offset %lu len %lu gave %zi", offset, len, r); } /* * This routine takes an open vmlinux image, which is in ELF, and maps it into * the Guest memory. ELF = Embedded Linking Format, which is the format used * by all modern binaries on Linux including the kernel. * * The ELF headers give *two* addresses: a physical address, and a virtual * address. We use the physical address; the Guest will map itself to the * virtual address. * * We return the starting address. */ static unsigned long map_elf(int elf_fd, const Elf32_Ehdr *ehdr) { Elf32_Phdr phdr[ehdr->e_phnum]; unsigned int i; /* * Sanity checks on the main ELF header: an x86 executable with a * reasonable number of correctly-sized program headers. */ if (ehdr->e_type != ET_EXEC || ehdr->e_machine != EM_386 || ehdr->e_phentsize != sizeof(Elf32_Phdr) || ehdr->e_phnum < 1 || ehdr->e_phnum > 65536U/sizeof(Elf32_Phdr)) errx(1, "Malformed elf header"); /* * An ELF executable contains an ELF header and a number of "program" * headers which indicate which parts ("segments") of the program to * load where. */ /* We read in all the program headers at once: */ if (lseek(elf_fd, ehdr->e_phoff, SEEK_SET) < 0) err(1, "Seeking to program headers"); if (read(elf_fd, phdr, sizeof(phdr)) != sizeof(phdr)) err(1, "Reading program headers"); /* * Try all the headers: there are usually only three. A read-only one, * a read-write one, and a "note" section which we don't load. */ for (i = 0; i < ehdr->e_phnum; i++) { /* If this isn't a loadable segment, we ignore it */ if (phdr[i].p_type != PT_LOAD) continue; verbose("Section %i: size %i addr %p\n", i, phdr[i].p_memsz, (void *)phdr[i].p_paddr); /* We map this section of the file at its physical address. */ map_at(elf_fd, from_guest_phys(phdr[i].p_paddr), phdr[i].p_offset, phdr[i].p_filesz); } /* The entry point is given in the ELF header. */ return ehdr->e_entry; } /*L:150 * A bzImage, unlike an ELF file, is not meant to be loaded. You're supposed * to jump into it and it will unpack itself. We used to have to perform some * hairy magic because the unpacking code scared me. * * Fortunately, Jeremy Fitzhardinge convinced me it wasn't that hard and wrote * a small patch to jump over the tricky bits in the Guest, so now we just read * the funky header so we know where in the file to load, and away we go! */ static unsigned long load_bzimage(int fd) { struct boot_params boot; int r; /* Modern bzImages get loaded at 1M. */ void *p = from_guest_phys(0x100000); /* * Go back to the start of the file and read the header. It should be * a Linux boot header (see Documentation/x86/boot.txt) */ lseek(fd, 0, SEEK_SET); read(fd, &boot, sizeof(boot)); /* Inside the setup_hdr, we expect the magic "HdrS" */ if (memcmp(&boot.hdr.header, "HdrS", 4) != 0) errx(1, "This doesn't look like a bzImage to me"); /* Skip over the extra sectors of the header. */ lseek(fd, (boot.hdr.setup_sects+1) * 512, SEEK_SET); /* Now read everything into memory. in nice big chunks. */ while ((r = read(fd, p, 65536)) > 0) p += r; /* Finally, code32_start tells us where to enter the kernel. */ return boot.hdr.code32_start; } /*L:140 * Loading the kernel is easy when it's a "vmlinux", but most kernels * come wrapped up in the self-decompressing "bzImage" format. With a little * work, we can load those, too. */ static unsigned long load_kernel(int fd) { Elf32_Ehdr hdr; /* Read in the first few bytes. */ if (read(fd, &hdr, sizeof(hdr)) != sizeof(hdr)) err(1, "Reading kernel"); /* If it's an ELF file, it starts with "\177ELF" */ if (memcmp(hdr.e_ident, ELFMAG, SELFMAG) == 0) return map_elf(fd, &hdr); /* Otherwise we assume it's a bzImage, and try to load it. */ return load_bzimage(fd); } /* * This is a trivial little helper to align pages. Andi Kleen hated it because * it calls getpagesize() twice: "it's dumb code." * * Kernel guys get really het up about optimization, even when it's not * necessary. I leave this code as a reaction against that. */ static inline unsigned long page_align(unsigned long addr) { /* Add upwards and truncate downwards. */ return ((addr + getpagesize()-1) & ~(getpagesize()-1)); } /*L:180 * An "initial ram disk" is a disk image loaded into memory along with the * kernel which the kernel can use to boot from without needing any drivers. * Most distributions now use this as standard: the initrd contains the code to * load the appropriate driver modules for the current machine. * * Importantly, James Morris works for RedHat, and Fedora uses initrds for its * kernels. He sent me this (and tells me when I break it). */ static unsigned long load_initrd(const char *name, unsigned long mem) { int ifd; struct stat st; unsigned long len; ifd = open_or_die(name, O_RDONLY); /* fstat() is needed to get the file size. */ if (fstat(ifd, &st) < 0) err(1, "fstat() on initrd '%s'", name); /* * We map the initrd at the top of memory, but mmap wants it to be * page-aligned, so we round the size up for that. */ len = page_align(st.st_size); map_at(ifd, from_guest_phys(mem - len), 0, st.st_size); /* * Once a file is mapped, you can close the file descriptor. It's a * little odd, but quite useful. */ close(ifd); verbose("mapped initrd %s size=%lu @ %p\n", name, len, (void*)mem-len); /* We return the initrd size. */ return len; } /*:*/ /* * Simple routine to roll all the commandline arguments together with spaces * between them. */ static void concat(char *dst, char *args[]) { unsigned int i, len = 0; for (i = 0; args[i]; i++) { if (i) { strcat(dst+len, " "); len++; } strcpy(dst+len, args[i]); len += strlen(args[i]); } /* In case it's empty. */ dst[len] = '\0'; } /*L:185 * This is where we actually tell the kernel to initialize the Guest. We * saw the arguments it expects when we looked at initialize() in lguest_user.c: * the base of Guest "physical" memory, the top physical page to allow and the * entry point for the Guest. */ static void tell_kernel(unsigned long start) { unsigned long args[] = { LHREQ_INITIALIZE, (unsigned long)guest_base, guest_limit / getpagesize(), start, (guest_mmio+getpagesize()-1) / getpagesize() }; verbose("Guest: %p - %p (%#lx, MMIO %#lx)\n", guest_base, guest_base + guest_limit, guest_limit, guest_mmio); lguest_fd = open_or_die("/dev/lguest", O_RDWR); if (write(lguest_fd, args, sizeof(args)) < 0) err(1, "Writing to /dev/lguest"); } /*:*/ /*L:200 * Device Handling. * * When the Guest gives us a buffer, it sends an array of addresses and sizes. * We need to make sure it's not trying to reach into the Launcher itself, so * we have a convenient routine which checks it and exits with an error message * if something funny is going on: */ static void *_check_pointer(struct device *d, unsigned long addr, unsigned int size, unsigned int line) { /* * Check if the requested address and size exceeds the allocated memory, * or addr + size wraps around. */ if ((addr + size) > guest_limit || (addr + size) < addr) bad_driver(d, "%s:%i: Invalid address %#lx", __FILE__, line, addr); /* * We return a pointer for the caller's convenience, now we know it's * safe to use. */ return from_guest_phys(addr); } /* A macro which transparently hands the line number to the real function. */ #define check_pointer(d,addr,size) _check_pointer(d, addr, size, __LINE__) /* * Each buffer in the virtqueues is actually a chain of descriptors. This * function returns the next descriptor in the chain, or vq->vring.num if we're * at the end. */ static unsigned next_desc(struct device *d, struct vring_desc *desc, unsigned int i, unsigned int max) { unsigned int next; /* If this descriptor says it doesn't chain, we're done. */ if (!(desc[i].flags & VRING_DESC_F_NEXT)) return max; /* Check they're not leading us off end of descriptors. */ next = desc[i].next; /* Make sure compiler knows to grab that: we don't want it changing! */ wmb(); if (next >= max) bad_driver(d, "Desc next is %u", next); return next; } /* * This actually sends the interrupt for this virtqueue, if we've used a * buffer. */ static void trigger_irq(struct virtqueue *vq) { unsigned long buf[] = { LHREQ_IRQ, vq->dev->config.irq_line }; /* Don't inform them if nothing used. */ if (!vq->pending_used) return; vq->pending_used = 0; /* * 2.4.7.1: * * If the VIRTIO_F_EVENT_IDX feature bit is not negotiated: * The driver MUST set flags to 0 or 1. */ if (vq->vring.avail->flags > 1) bad_driver_vq(vq, "avail->flags = %u\n", vq->vring.avail->flags); /* * 2.4.7.2: * * If the VIRTIO_F_EVENT_IDX feature bit is not negotiated: * * - The device MUST ignore the used_event value. * - After the device writes a descriptor index into the used ring: * - If flags is 1, the device SHOULD NOT send an interrupt. * - If flags is 0, the device MUST send an interrupt. */ if (vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT) { return; } /* * 4.1.4.5.1: * * If MSI-X capability is disabled, the device MUST set the Queue * Interrupt bit in ISR status before sending a virtqueue notification * to the driver. */ vq->dev->mmio->isr = 0x1; /* Send the Guest an interrupt tell them we used something up. */ if (write(lguest_fd, buf, sizeof(buf)) != 0) err(1, "Triggering irq %i", vq->dev->config.irq_line); } /* * This looks in the virtqueue for the first available buffer, and converts * it to an iovec for convenient access. Since descriptors consist of some * number of output then some number of input descriptors, it's actually two * iovecs, but we pack them into one and note how many of each there were. * * This function waits if necessary, and returns the descriptor number found. */ static unsigned wait_for_vq_desc(struct virtqueue *vq, struct iovec iov[], unsigned int *out_num, unsigned int *in_num) { unsigned int i, head, max; struct vring_desc *desc; u16 last_avail = lg_last_avail(vq); /* * 2.4.7.1: * * The driver MUST handle spurious interrupts from the device. * * That's why this is a while loop. */ /* There's nothing available? */ while (last_avail == vq->vring.avail->idx) { u64 event; /* * Since we're about to sleep, now is a good time to tell the * Guest about what we've used up to now. */ trigger_irq(vq); /* OK, now we need to know about added descriptors. */ vq->vring.used->flags &= ~VRING_USED_F_NO_NOTIFY; /* * They could have slipped one in as we were doing that: make * sure it's written, then check again. */ mb(); if (last_avail != vq->vring.avail->idx) { vq->vring.used->flags |= VRING_USED_F_NO_NOTIFY; break; } /* Nothing new? Wait for eventfd to tell us they refilled. */ if (read(vq->eventfd, &event, sizeof(event)) != sizeof(event)) errx(1, "Event read failed?"); /* We don't need to be notified again. */ vq->vring.used->flags |= VRING_USED_F_NO_NOTIFY; } /* Check it isn't doing very strange things with descriptor numbers. */ if ((u16)(vq->vring.avail->idx - last_avail) > vq->vring.num) bad_driver_vq(vq, "Guest moved used index from %u to %u", last_avail, vq->vring.avail->idx); /* * Make sure we read the descriptor number *after* we read the ring * update; don't let the cpu or compiler change the order. */ rmb(); /* * Grab the next descriptor number they're advertising, and increment * the index we've seen. */ head = vq->vring.avail->ring[last_avail % vq->vring.num]; lg_last_avail(vq)++; /* If their number is silly, that's a fatal mistake. */ if (head >= vq->vring.num) bad_driver_vq(vq, "Guest says index %u is available", head); /* When we start there are none of either input nor output. */ *out_num = *in_num = 0; max = vq->vring.num; desc = vq->vring.desc; i = head; /* * We have to read the descriptor after we read the descriptor number, * but there's a data dependency there so the CPU shouldn't reorder * that: no rmb() required. */ do { /* * If this is an indirect entry, then this buffer contains a * descriptor table which we handle as if it's any normal * descriptor chain. */ if (desc[i].flags & VRING_DESC_F_INDIRECT) { /* 2.4.5.3.1: * * The driver MUST NOT set the VIRTQ_DESC_F_INDIRECT * flag unless the VIRTIO_F_INDIRECT_DESC feature was * negotiated. */ if (!(vq->dev->features_accepted & (1<<VIRTIO_RING_F_INDIRECT_DESC))) bad_driver_vq(vq, "vq indirect not negotiated"); /* * 2.4.5.3.1: * * The driver MUST NOT set the VIRTQ_DESC_F_INDIRECT * flag within an indirect descriptor (ie. only one * table per descriptor). */ if (desc != vq->vring.desc) bad_driver_vq(vq, "Indirect within indirect"); /* * Proposed update VIRTIO-134 spells this out: * * A driver MUST NOT set both VIRTQ_DESC_F_INDIRECT * and VIRTQ_DESC_F_NEXT in flags. */ if (desc[i].flags & VRING_DESC_F_NEXT) bad_driver_vq(vq, "indirect and next together"); if (desc[i].len % sizeof(struct vring_desc)) bad_driver_vq(vq, "Invalid size for indirect table"); /* * 2.4.5.3.2: * * The device MUST ignore the write-only flag * (flags&VIRTQ_DESC_F_WRITE) in the descriptor that * refers to an indirect table. * * We ignore it here: :) */ max = desc[i].len / sizeof(struct vring_desc); desc = check_pointer(vq->dev, desc[i].addr, desc[i].len); i = 0; /* 2.4.5.3.1: * * A driver MUST NOT create a descriptor chain longer * than the Queue Size of the device. */ if (max > vq->pci_config.queue_size) bad_driver_vq(vq, "indirect has too many entries"); } /* Grab the first descriptor, and check it's OK. */ iov[*out_num + *in_num].iov_len = desc[i].len; iov[*out_num + *in_num].iov_base = check_pointer(vq->dev, desc[i].addr, desc[i].len); /* If this is an input descriptor, increment that count. */ if (desc[i].flags & VRING_DESC_F_WRITE) (*in_num)++; else { /* * If it's an output descriptor, they're all supposed * to come before any input descriptors. */ if (*in_num) bad_driver_vq(vq, "Descriptor has out after in"); (*out_num)++; } /* If we've got too many, that implies a descriptor loop. */ if (*out_num + *in_num > max) bad_driver_vq(vq, "Looped descriptor"); } while ((i = next_desc(vq->dev, desc, i, max)) != max); return head; } /* * After we've used one of their buffers, we tell the Guest about it. Sometime * later we'll want to send them an interrupt using trigger_irq(); note that * wait_for_vq_desc() does that for us if it has to wait. */ static void add_used(struct virtqueue *vq, unsigned int head, int len) { struct vring_used_elem *used; /* * The virtqueue contains a ring of used buffers. Get a pointer to the * next entry in that used ring. */ used = &vq->vring.used->ring[vq->vring.used->idx % vq->vring.num]; used->id = head; used->len = len; /* Make sure buffer is written before we update index. */ wmb(); vq->vring.used->idx++; vq->pending_used++; } /* And here's the combo meal deal. Supersize me! */ static void add_used_and_trigger(struct virtqueue *vq, unsigned head, int len) { add_used(vq, head, len); trigger_irq(vq); } /* * The Console * * We associate some data with the console for our exit hack. */ struct console_abort { /* How many times have they hit ^C? */ int count; /* When did they start? */ struct timeval start; }; /* This is the routine which handles console input (ie. stdin). */ static void console_input(struct virtqueue *vq) { int len; unsigned int head, in_num, out_num; struct console_abort *abort = vq->dev->priv; struct iovec iov[vq->vring.num]; /* Make sure there's a descriptor available. */ head = wait_for_vq_desc(vq, iov, &out_num, &in_num); if (out_num) bad_driver_vq(vq, "Output buffers in console in queue?"); /* Read into it. This is where we usually wait. */ len = readv(STDIN_FILENO, iov, in_num); if (len <= 0) { /* Ran out of input? */ warnx("Failed to get console input, ignoring console."); /* * For simplicity, dying threads kill the whole Launcher. So * just nap here. */ for (;;) pause(); } /* Tell the Guest we used a buffer. */ add_used_and_trigger(vq, head, len); /* * Three ^C within one second? Exit. * * This is such a hack, but works surprisingly well. Each ^C has to * be in a buffer by itself, so they can't be too fast. But we check * that we get three within about a second, so they can't be too * slow. */ if (len != 1 || ((char *)iov[0].iov_base)[0] != 3) { abort->count = 0; return; } abort->count++; if (abort->count == 1) gettimeofday(&abort->start, NULL); else if (abort->count == 3) { struct timeval now; gettimeofday(&now, NULL); /* Kill all Launcher processes with SIGINT, like normal ^C */ if (now.tv_sec <= abort->start.tv_sec+1) kill(0, SIGINT); abort->count = 0; } } /* This is the routine which handles console output (ie. stdout). */ static void console_output(struct virtqueue *vq) { unsigned int head, out, in; struct iovec iov[vq->vring.num]; /* We usually wait in here, for the Guest to give us something. */ head = wait_for_vq_desc(vq, iov, &out, &in); if (in) bad_driver_vq(vq, "Input buffers in console output queue?"); /* writev can return a partial write, so we loop here. */ while (!iov_empty(iov, out)) { int len = writev(STDOUT_FILENO, iov, out); if (len <= 0) { warn("Write to stdout gave %i (%d)", len, errno); break; } iov_consume(vq->dev, iov, out, NULL, len); } /* * We're finished with that buffer: if we're going to sleep, * wait_for_vq_desc() will prod the Guest with an interrupt. */ add_used(vq, head, 0); } /* * The Network * * Handling output for network is also simple: we get all the output buffers * and write them to /dev/net/tun. */ struct net_info { int tunfd; }; static void net_output(struct virtqueue *vq) { struct net_info *net_info = vq->dev->priv; unsigned int head, out, in; struct iovec iov[vq->vring.num]; /* We usually wait in here for the Guest to give us a packet. */ head = wait_for_vq_desc(vq, iov, &out, &in); if (in) bad_driver_vq(vq, "Input buffers in net output queue?"); /* * Send the whole thing through to /dev/net/tun. It expects the exact * same format: what a coincidence! */ if (writev(net_info->tunfd, iov, out) < 0) warnx("Write to tun failed (%d)?", errno); /* * Done with that one; wait_for_vq_desc() will send the interrupt if * all packets are processed. */ add_used(vq, head, 0); } /* * Handling network input is a bit trickier, because I've tried to optimize it. * * First we have a helper routine which tells is if from this file descriptor * (ie. the /dev/net/tun device) will block: */ static bool will_block(int fd) { fd_set fdset; struct timeval zero = { 0, 0 }; FD_ZERO(&fdset); FD_SET(fd, &fdset); return select(fd+1, &fdset, NULL, NULL, &zero) != 1; } /* * This handles packets coming in from the tun device to our Guest. Like all * service routines, it gets called again as soon as it returns, so you don't * see a while(1) loop here. */ static void net_input(struct virtqueue *vq) { int len; unsigned int head, out, in; struct iovec iov[vq->vring.num]; struct net_info *net_info = vq->dev->priv; /* * Get a descriptor to write an incoming packet into. This will also * send an interrupt if they're out of descriptors. */ head = wait_for_vq_desc(vq, iov, &out, &in); if (out) bad_driver_vq(vq, "Output buffers in net input queue?"); /* * If it looks like we'll block reading from the tun device, send them * an interrupt. */ if (vq->pending_used && will_block(net_info->tunfd)) trigger_irq(vq); /* * Read in the packet. This is where we normally wait (when there's no * incoming network traffic). */ len = readv(net_info->tunfd, iov, in); if (len <= 0) warn("Failed to read from tun (%d).", errno); /* * Mark that packet buffer as used, but don't interrupt here. We want * to wait until we've done as much work as we can. */ add_used(vq, head, len); } /*:*/ /* This is the helper to create threads: run the service routine in a loop. */ static int do_thread(void *_vq) { struct virtqueue *vq = _vq; for (;;) vq->service(vq); return 0; } /* * When a child dies, we kill our entire process group with SIGTERM. This * also has the side effect that the shell restores the console for us! */ static void kill_launcher(int signal) { kill(0, SIGTERM); } static void reset_vq_pci_config(struct virtqueue *vq) { vq->pci_config.queue_size = VIRTQUEUE_NUM; vq->pci_config.queue_enable = 0; } static void reset_device(struct device *dev) { struct virtqueue *vq; verbose("Resetting device %s\n", dev->name); /* Clear any features they've acked. */ dev->features_accepted = 0; /* We're going to be explicitly killing threads, so ignore them. */ signal(SIGCHLD, SIG_IGN); /* * 4.1.4.3.1: * * The device MUST present a 0 in queue_enable on reset. * * This means we set it here, and reset the saved ones in every vq. */ dev->mmio->cfg.queue_enable = 0; /* Get rid of the virtqueue threads */ for (vq = dev->vq; vq; vq = vq->next) { vq->last_avail_idx = 0; reset_vq_pci_config(vq); if (vq->thread != (pid_t)-1) { kill(vq->thread, SIGTERM); waitpid(vq->thread, NULL, 0); vq->thread = (pid_t)-1; } } dev->running = false; dev->wrote_features_ok = false; /* Now we care if threads die. */ signal(SIGCHLD, (void *)kill_launcher); } static void cleanup_devices(void) { unsigned int i; for (i = 1; i < MAX_PCI_DEVICES; i++) { struct device *d = devices.pci[i]; if (!d) continue; reset_device(d); } /* If we saved off the original terminal settings, restore them now. */ if (orig_term.c_lflag & (ISIG|ICANON|ECHO)) tcsetattr(STDIN_FILENO, TCSANOW, &orig_term); } /*L:217 * We do PCI. This is mainly done to let us test the kernel virtio PCI * code. */ /* Linux expects a PCI host bridge: ours is a dummy, and first on the bus. */ static struct device pci_host_bridge; static void init_pci_host_bridge(void) { pci_host_bridge.name = "PCI Host Bridge"; pci_host_bridge.config.class = 0x06; /* bridge */ pci_host_bridge.config.subclass = 0; /* host bridge */ devices.pci[0] = &pci_host_bridge; } /* The IO ports used to read the PCI config space. */ #define PCI_CONFIG_ADDR 0xCF8 #define PCI_CONFIG_DATA 0xCFC /* * Not really portable, but does help readability: this is what the Guest * writes to the PCI_CONFIG_ADDR IO port. */ union pci_config_addr { struct { unsigned mbz: 2; unsigned offset: 6; unsigned funcnum: 3; unsigned devnum: 5; unsigned busnum: 8; unsigned reserved: 7; unsigned enabled : 1; } bits; u32 val; }; /* * We cache what they wrote to the address port, so we know what they're * talking about when they access the data port. */ static union pci_config_addr pci_config_addr; static struct device *find_pci_device(unsigned int index) { return devices.pci[index]; } /* PCI can do 1, 2 and 4 byte reads; we handle that here. */ static void ioread(u16 off, u32 v, u32 mask, u32 *val) { assert(off < 4); assert(mask == 0xFF || mask == 0xFFFF || mask == 0xFFFFFFFF); *val = (v >> (off * 8)) & mask; } /* PCI can do 1, 2 and 4 byte writes; we handle that here. */ static void iowrite(u16 off, u32 v, u32 mask, u32 *dst) { assert(off < 4); assert(mask == 0xFF || mask == 0xFFFF || mask == 0xFFFFFFFF); *dst &= ~(mask << (off * 8)); *dst |= (v & mask) << (off * 8); } /* * Where PCI_CONFIG_DATA accesses depends on the previous write to * PCI_CONFIG_ADDR. */ static struct device *dev_and_reg(u32 *reg) { if (!pci_config_addr.bits.enabled) return NULL; if (pci_config_addr.bits.funcnum != 0) return NULL; if (pci_config_addr.bits.busnum != 0) return NULL; if (pci_config_addr.bits.offset * 4 >= sizeof(struct pci_config)) return NULL; *reg = pci_config_addr.bits.offset; return find_pci_device(pci_config_addr.bits.devnum); } /* * We can get invalid combinations of values while they're writing, so we * only fault if they try to write with some invalid bar/offset/length. */ static bool valid_bar_access(struct device *d, struct virtio_pci_cfg_cap *cfg_access) { /* We only have 1 bar (BAR0) */ if (cfg_access->cap.bar != 0) return false; /* Check it's within BAR0. */ if (cfg_access->cap.offset >= d->mmio_size || cfg_access->cap.offset + cfg_access->cap.length > d->mmio_size) return false; /* Check length is 1, 2 or 4. */ if (cfg_access->cap.length != 1 && cfg_access->cap.length != 2 && cfg_access->cap.length != 4) return false; /* * 4.1.4.7.2: * * The driver MUST NOT write a cap.offset which is not a multiple of * cap.length (ie. all accesses MUST be aligned). */ if (cfg_access->cap.offset % cfg_access->cap.length != 0) return false; /* Return pointer into word in BAR0. */ return true; } /* Is this accessing the PCI config address port?. */ static bool is_pci_addr_port(u16 port) { return port >= PCI_CONFIG_ADDR && port < PCI_CONFIG_ADDR + 4; } static bool pci_addr_iowrite(u16 port, u32 mask, u32 val) { iowrite(port - PCI_CONFIG_ADDR, val, mask, &pci_config_addr.val); verbose("PCI%s: %#x/%x: bus %u dev %u func %u reg %u\n", pci_config_addr.bits.enabled ? "" : " DISABLED", val, mask, pci_config_addr.bits.busnum, pci_config_addr.bits.devnum, pci_config_addr.bits.funcnum, pci_config_addr.bits.offset); return true; } static void pci_addr_ioread(u16 port, u32 mask, u32 *val) { ioread(port - PCI_CONFIG_ADDR, pci_config_addr.val, mask, val); } /* Is this accessing the PCI config data port?. */ static bool is_pci_data_port(u16 port) { return port >= PCI_CONFIG_DATA && port < PCI_CONFIG_DATA + 4; } static void emulate_mmio_write(struct device *d, u32 off, u32 val, u32 mask); static bool pci_data_iowrite(u16 port, u32 mask, u32 val) { u32 reg, portoff; struct device *d = dev_and_reg(&reg); /* Complain if they don't belong to a device. */ if (!d) return false; /* They can do 1 byte writes, etc. */ portoff = port - PCI_CONFIG_DATA; /* * PCI uses a weird way to determine the BAR size: the OS * writes all 1's, and sees which ones stick. */ if (&d->config_words[reg] == &d->config.bar[0]) { int i; iowrite(portoff, val, mask, &d->config.bar[0]); for (i = 0; (1 << i) < d->mmio_size; i++) d->config.bar[0] &= ~(1 << i); return true; } else if ((&d->config_words[reg] > &d->config.bar[0] && &d->config_words[reg] <= &d->config.bar[6]) || &d->config_words[reg] == &d->config.expansion_rom_addr) { /* Allow writing to any other BAR, or expansion ROM */ iowrite(portoff, val, mask, &d->config_words[reg]); return true; /* We let them overide latency timer and cacheline size */ } else if (&d->config_words[reg] == (void *)&d->config.cacheline_size) { /* Only let them change the first two fields. */ if (mask == 0xFFFFFFFF) mask = 0xFFFF; iowrite(portoff, val, mask, &d->config_words[reg]); return true; } else if (&d->config_words[reg] == (void *)&d->config.command && mask == 0xFFFF) { /* Ignore command writes. */ return true; } else if (&d->config_words[reg] == (void *)&d->config.cfg_access.cap.bar || &d->config_words[reg] == &d->config.cfg_access.cap.length || &d->config_words[reg] == &d->config.cfg_access.cap.offset) { /* * The VIRTIO_PCI_CAP_PCI_CFG capability * provides a backdoor to access the MMIO * regions without mapping them. Weird, but * useful. */ iowrite(portoff, val, mask, &d->config_words[reg]); return true; } else if (&d->config_words[reg] == &d->config.cfg_access.pci_cfg_data) { u32 write_mask; /* * 4.1.4.7.1: * * Upon detecting driver write access to pci_cfg_data, the * device MUST execute a write access at offset cap.offset at * BAR selected by cap.bar using the first cap.length bytes * from pci_cfg_data. */ /* Must be bar 0 */ if (!valid_bar_access(d, &d->config.cfg_access)) return false; iowrite(portoff, val, mask, &d->config.cfg_access.pci_cfg_data); /* * Now emulate a write. The mask we use is set by * len, *not* this write! */ write_mask = (1ULL<<(8*d->config.cfg_access.cap.length)) - 1; verbose("Window writing %#x/%#x to bar %u, offset %u len %u\n", d->config.cfg_access.pci_cfg_data, write_mask, d->config.cfg_access.cap.bar, d->config.cfg_access.cap.offset, d->config.cfg_access.cap.length); emulate_mmio_write(d, d->config.cfg_access.cap.offset, d->config.cfg_access.pci_cfg_data, write_mask); return true; } /* * 4.1.4.1: * * The driver MUST NOT write into any field of the capability * structure, with the exception of those with cap_type * VIRTIO_PCI_CAP_PCI_CFG... */ return false; } static u32 emulate_mmio_read(struct device *d, u32 off, u32 mask); static void pci_data_ioread(u16 port, u32 mask, u32 *val) { u32 reg; struct device *d = dev_and_reg(&reg); if (!d) return; /* Read through the PCI MMIO access window is special */ if (&d->config_words[reg] == &d->config.cfg_access.pci_cfg_data) { u32 read_mask; /* * 4.1.4.7.1: * * Upon detecting driver read access to pci_cfg_data, the * device MUST execute a read access of length cap.length at * offset cap.offset at BAR selected by cap.bar and store the * first cap.length bytes in pci_cfg_data. */ /* Must be bar 0 */ if (!valid_bar_access(d, &d->config.cfg_access)) bad_driver(d, "Invalid cfg_access to bar%u, offset %u len %u", d->config.cfg_access.cap.bar, d->config.cfg_access.cap.offset, d->config.cfg_access.cap.length); /* * Read into the window. The mask we use is set by * len, *not* this read! */ read_mask = (1ULL<<(8*d->config.cfg_access.cap.length))-1; d->config.cfg_access.pci_cfg_data = emulate_mmio_read(d, d->config.cfg_access.cap.offset, read_mask); verbose("Window read %#x/%#x from bar %u, offset %u len %u\n", d->config.cfg_access.pci_cfg_data, read_mask, d->config.cfg_access.cap.bar, d->config.cfg_access.cap.offset, d->config.cfg_access.cap.length); } ioread(port - PCI_CONFIG_DATA, d->config_words[reg], mask, val); } /*L:216 * This is where we emulate a handful of Guest instructions. It's ugly * and we used to do it in the kernel but it grew over time. */ /* * We use the ptrace syscall's pt_regs struct to talk about registers * to lguest: these macros convert the names to the offsets. */ #define getreg(name) getreg_off(offsetof(struct user_regs_struct, name)) #define setreg(name, val) \ setreg_off(offsetof(struct user_regs_struct, name), (val)) static u32 getreg_off(size_t offset) { u32 r; unsigned long args[] = { LHREQ_GETREG, offset }; if (pwrite(lguest_fd, args, sizeof(args), cpu_id) < 0) err(1, "Getting register %u", offset); if (pread(lguest_fd, &r, sizeof(r), cpu_id) != sizeof(r)) err(1, "Reading register %u", offset); return r; } static void setreg_off(size_t offset, u32 val) { unsigned long args[] = { LHREQ_SETREG, offset, val }; if (pwrite(lguest_fd, args, sizeof(args), cpu_id) < 0) err(1, "Setting register %u", offset); } /* Get register by instruction encoding */ static u32 getreg_num(unsigned regnum, u32 mask) { /* 8 bit ops use regnums 4-7 for high parts of word */ if (mask == 0xFF && (regnum & 0x4)) return getreg_num(regnum & 0x3, 0xFFFF) >> 8; switch (regnum) { case 0: return getreg(eax) & mask; case 1: return getreg(ecx) & mask; case 2: return getreg(edx) & mask; case 3: return getreg(ebx) & mask; case 4: return getreg(esp) & mask; case 5: return getreg(ebp) & mask; case 6: return getreg(esi) & mask; case 7: return getreg(edi) & mask; } abort(); } /* Set register by instruction encoding */ static void setreg_num(unsigned regnum, u32 val, u32 mask) { /* Don't try to set bits out of range */ assert(~(val & ~mask)); /* 8 bit ops use regnums 4-7 for high parts of word */ if (mask == 0xFF && (regnum & 0x4)) { /* Construct the 16 bits we want. */ val = (val << 8) | getreg_num(regnum & 0x3, 0xFF); setreg_num(regnum & 0x3, val, 0xFFFF); return; } switch (regnum) { case 0: setreg(eax, val | (getreg(eax) & ~mask)); return; case 1: setreg(ecx, val | (getreg(ecx) & ~mask)); return; case 2: setreg(edx, val | (getreg(edx) & ~mask)); return; case 3: setreg(ebx, val | (getreg(ebx) & ~mask)); return; case 4: setreg(esp, val | (getreg(esp) & ~mask)); return; case 5: setreg(ebp, val | (getreg(ebp) & ~mask)); return; case 6: setreg(esi, val | (getreg(esi) & ~mask)); return; case 7: setreg(edi, val | (getreg(edi) & ~mask)); return; } abort(); } /* Get bytes of displacement appended to instruction, from r/m encoding */ static u32 insn_displacement_len(u8 mod_reg_rm) { /* Switch on the mod bits */ switch (mod_reg_rm >> 6) { case 0: /* If mod == 0, and r/m == 101, 16-bit displacement follows */ if ((mod_reg_rm & 0x7) == 0x5) return 2; /* Normally, mod == 0 means no literal displacement */ return 0; case 1: /* One byte displacement */ return 1; case 2: /* Four byte displacement */ return 4; case 3: /* Register mode */ return 0; } abort(); } static void emulate_insn(const u8 insn[]) { unsigned long args[] = { LHREQ_TRAP, 13 }; unsigned int insnlen = 0, in = 0, small_operand = 0, byte_access; unsigned int eax, port, mask; /* * Default is to return all-ones on IO port reads, which traditionally * means "there's nothing there". */ u32 val = 0xFFFFFFFF; /* * This must be the Guest kernel trying to do something, not userspace! * The bottom two bits of the CS segment register are the privilege * level. */ if ((getreg(xcs) & 3) != 0x1) goto no_emulate; /* Decoding x86 instructions is icky. */ /* * Around 2.6.33, the kernel started using an emulation for the * cmpxchg8b instruction in early boot on many configurations. This * code isn't paravirtualized, and it tries to disable interrupts. * Ignore it, which will Mostly Work. */ if (insn[insnlen] == 0xfa) { /* "cli", or Clear Interrupt Enable instruction. Skip it. */ insnlen = 1; goto skip_insn; } /* * 0x66 is an "operand prefix". It means a 16, not 32 bit in/out. */ if (insn[insnlen] == 0x66) { small_operand = 1; /* The instruction is 1 byte so far, read the next byte. */ insnlen = 1; } /* If the lower bit isn't set, it's a single byte access */ byte_access = !(insn[insnlen] & 1); /* * Now we can ignore the lower bit and decode the 4 opcodes * we need to emulate. */ switch (insn[insnlen] & 0xFE) { case 0xE4: /* in <next byte>,%al */ port = insn[insnlen+1]; insnlen += 2; in = 1; break; case 0xEC: /* in (%dx),%al */ port = getreg(edx) & 0xFFFF; insnlen += 1; in = 1; break; case 0xE6: /* out %al,<next byte> */ port = insn[insnlen+1]; insnlen += 2; break; case 0xEE: /* out %al,(%dx) */ port = getreg(edx) & 0xFFFF; insnlen += 1; break; default: /* OK, we don't know what this is, can't emulate. */ goto no_emulate; } /* Set a mask of the 1, 2 or 4 bytes, depending on size of IO */ if (byte_access) mask = 0xFF; else if (small_operand) mask = 0xFFFF; else mask = 0xFFFFFFFF; /* * If it was an "IN" instruction, they expect the result to be read * into %eax, so we change %eax. */ eax = getreg(eax); if (in) { /* This is the PS/2 keyboard status; 1 means ready for output */ if (port == 0x64) val = 1; else if (is_pci_addr_port(port)) pci_addr_ioread(port, mask, &val); else if (is_pci_data_port(port)) pci_data_ioread(port, mask, &val); /* Clear the bits we're about to read */ eax &= ~mask; /* Copy bits in from val. */ eax |= val & mask; /* Now update the register. */ setreg(eax, eax); } else { if (is_pci_addr_port(port)) { if (!pci_addr_iowrite(port, mask, eax)) goto bad_io; } else if (is_pci_data_port(port)) { if (!pci_data_iowrite(port, mask, eax)) goto bad_io; } /* There are many other ports, eg. CMOS clock, serial * and parallel ports, so we ignore them all. */ } verbose("IO %s of %x to %u: %#08x\n", in ? "IN" : "OUT", mask, port, eax); skip_insn: /* Finally, we've "done" the instruction, so move past it. */ setreg(eip, getreg(eip) + insnlen); return; bad_io: warnx("Attempt to %s port %u (%#x mask)", in ? "read from" : "write to", port, mask); no_emulate: /* Inject trap into Guest. */ if (write(lguest_fd, args, sizeof(args)) < 0) err(1, "Reinjecting trap 13 for fault at %#x", getreg(eip)); } static struct device *find_mmio_region(unsigned long paddr, u32 *off) { unsigned int i; for (i = 1; i < MAX_PCI_DEVICES; i++) { struct device *d = devices.pci[i]; if (!d) continue; if (paddr < d->mmio_addr) continue; if (paddr >= d->mmio_addr + d->mmio_size) continue; *off = paddr - d->mmio_addr; return d; } return NULL; } /* FIXME: Use vq array. */ static struct virtqueue *vq_by_num(struct device *d, u32 num) { struct virtqueue *vq = d->vq; while (num-- && vq) vq = vq->next; return vq; } static void save_vq_config(const struct virtio_pci_common_cfg *cfg, struct virtqueue *vq) { vq->pci_config = *cfg; } static void restore_vq_config(struct virtio_pci_common_cfg *cfg, struct virtqueue *vq) { /* Only restore the per-vq part */ size_t off = offsetof(struct virtio_pci_common_cfg, queue_size); memcpy((void *)cfg + off, (void *)&vq->pci_config + off, sizeof(*cfg) - off); } /* * 4.1.4.3.2: * * The driver MUST configure the other virtqueue fields before * enabling the virtqueue with queue_enable. * * When they enable the virtqueue, we check that their setup is valid. */ static void check_virtqueue(struct device *d, struct virtqueue *vq) { /* Because lguest is 32 bit, all the descriptor high bits must be 0 */ if (vq->pci_config.queue_desc_hi || vq->pci_config.queue_avail_hi || vq->pci_config.queue_used_hi) bad_driver_vq(vq, "invalid 64-bit queue address"); /* * 2.4.1: * * The driver MUST ensure that the physical address of the first byte * of each virtqueue part is a multiple of the specified alignment * value in the above table. */ if (vq->pci_config.queue_desc_lo % 16 || vq->pci_config.queue_avail_lo % 2 || vq->pci_config.queue_used_lo % 4) bad_driver_vq(vq, "invalid alignment in queue addresses"); /* Initialize the virtqueue and check they're all in range. */ vq->vring.num = vq->pci_config.queue_size; vq->vring.desc = check_pointer(vq->dev, vq->pci_config.queue_desc_lo, sizeof(*vq->vring.desc) * vq->vring.num); vq->vring.avail = check_pointer(vq->dev, vq->pci_config.queue_avail_lo, sizeof(*vq->vring.avail) + (sizeof(vq->vring.avail->ring[0]) * vq->vring.num)); vq->vring.used = check_pointer(vq->dev, vq->pci_config.queue_used_lo, sizeof(*vq->vring.used) + (sizeof(vq->vring.used->ring[0]) * vq->vring.num)); /* * 2.4.9.1: * * The driver MUST initialize flags in the used ring to 0 * when allocating the used ring. */ if (vq->vring.used->flags != 0) bad_driver_vq(vq, "invalid initial used.flags %#x", vq->vring.used->flags); } static void start_virtqueue(struct virtqueue *vq) { /* * Create stack for thread. Since the stack grows upwards, we point * the stack pointer to the end of this region. */ char *stack = malloc(32768); /* Create a zero-initialized eventfd. */ vq->eventfd = eventfd(0, 0); if (vq->eventfd < 0) err(1, "Creating eventfd"); /* * CLONE_VM: because it has to access the Guest memory, and SIGCHLD so * we get a signal if it dies. */ vq->thread = clone(do_thread, stack + 32768, CLONE_VM | SIGCHLD, vq); if (vq->thread == (pid_t)-1) err(1, "Creating clone"); } static void start_virtqueues(struct device *d) { struct virtqueue *vq; for (vq = d->vq; vq; vq = vq->next) { if (vq->pci_config.queue_enable) start_virtqueue(vq); } } static void emulate_mmio_write(struct device *d, u32 off, u32 val, u32 mask) { struct virtqueue *vq; switch (off) { case offsetof(struct virtio_pci_mmio, cfg.device_feature_select): /* * 4.1.4.3.1: * * The device MUST present the feature bits it is offering in * device_feature, starting at bit device_feature_select ∗ 32 * for any device_feature_select written by the driver */ if (val == 0) d->mmio->cfg.device_feature = d->features; else if (val == 1) d->mmio->cfg.device_feature = (d->features >> 32); else d->mmio->cfg.device_feature = 0; goto feature_write_through32; case offsetof(struct virtio_pci_mmio, cfg.guest_feature_select): if (val > 1) bad_driver(d, "Unexpected driver select %u", val); goto feature_write_through32; case offsetof(struct virtio_pci_mmio, cfg.guest_feature): if (d->mmio->cfg.guest_feature_select == 0) { d->features_accepted &= ~((u64)0xFFFFFFFF); d->features_accepted |= val; } else { assert(d->mmio->cfg.guest_feature_select == 1); d->features_accepted &= 0xFFFFFFFF; d->features_accepted |= ((u64)val) << 32; } /* * 2.2.1: * * The driver MUST NOT accept a feature which the device did * not offer */ if (d->features_accepted & ~d->features) bad_driver(d, "over-accepted features %#llx of %#llx", d->features_accepted, d->features); goto feature_write_through32; case offsetof(struct virtio_pci_mmio, cfg.device_status): { u8 prev; verbose("%s: device status -> %#x\n", d->name, val); /* * 4.1.4.3.1: * * The device MUST reset when 0 is written to device_status, * and present a 0 in device_status once that is done. */ if (val == 0) { reset_device(d); goto write_through8; } /* 2.1.1: The driver MUST NOT clear a device status bit. */ if (d->mmio->cfg.device_status & ~val) bad_driver(d, "unset of device status bit %#x -> %#x", d->mmio->cfg.device_status, val); /* * 2.1.2: * * The device MUST NOT consume buffers or notify the driver * before DRIVER_OK. */ if (val & VIRTIO_CONFIG_S_DRIVER_OK && !(d->mmio->cfg.device_status & VIRTIO_CONFIG_S_DRIVER_OK)) start_virtqueues(d); /* * 3.1.1: * * The driver MUST follow this sequence to initialize a device: * - Reset the device. * - Set the ACKNOWLEDGE status bit: the guest OS has * notice the device. * - Set the DRIVER status bit: the guest OS knows how * to drive the device. * - Read device feature bits, and write the subset * of feature bits understood by the OS and driver * to the device. During this step the driver MAY * read (but MUST NOT write) the device-specific * configuration fields to check that it can * support the device before accepting it. * - Set the FEATURES_OK status bit. The driver * MUST not accept new feature bits after this * step. * - Re-read device status to ensure the FEATURES_OK * bit is still set: otherwise, the device does * not support our subset of features and the * device is unusable. * - Perform device-specific setup, including * discovery of virtqueues for the device, * optional per-bus setup, reading and possibly * writing the device’s virtio configuration * space, and population of virtqueues. * - Set the DRIVER_OK status bit. At this point the * device is “live”. */ prev = 0; switch (val & ~d->mmio->cfg.device_status) { case VIRTIO_CONFIG_S_DRIVER_OK: prev |= VIRTIO_CONFIG_S_FEATURES_OK; /* fall thru */ case VIRTIO_CONFIG_S_FEATURES_OK: prev |= VIRTIO_CONFIG_S_DRIVER; /* fall thru */ case VIRTIO_CONFIG_S_DRIVER: prev |= VIRTIO_CONFIG_S_ACKNOWLEDGE; /* fall thru */ case VIRTIO_CONFIG_S_ACKNOWLEDGE: break; default: bad_driver(d, "unknown device status bit %#x -> %#x", d->mmio->cfg.device_status, val); } if (d->mmio->cfg.device_status != prev) bad_driver(d, "unexpected status transition %#x -> %#x", d->mmio->cfg.device_status, val); /* If they just wrote FEATURES_OK, we make sure they read */ switch (val & ~d->mmio->cfg.device_status) { case VIRTIO_CONFIG_S_FEATURES_OK: d->wrote_features_ok = true; break; case VIRTIO_CONFIG_S_DRIVER_OK: if (d->wrote_features_ok) bad_driver(d, "did not re-read FEATURES_OK"); break; } goto write_through8; } case offsetof(struct virtio_pci_mmio, cfg.queue_select): vq = vq_by_num(d, val); /* * 4.1.4.3.1: * * The device MUST present a 0 in queue_size if the virtqueue * corresponding to the current queue_select is unavailable. */ if (!vq) { d->mmio->cfg.queue_size = 0; goto write_through16; } /* Save registers for old vq, if it was a valid vq */ if (d->mmio->cfg.queue_size) save_vq_config(&d->mmio->cfg, vq_by_num(d, d->mmio->cfg.queue_select)); /* Restore the registers for the queue they asked for */ restore_vq_config(&d->mmio->cfg, vq); goto write_through16; case offsetof(struct virtio_pci_mmio, cfg.queue_size): /* * 4.1.4.3.2: * * The driver MUST NOT write a value which is not a power of 2 * to queue_size. */ if (val & (val-1)) bad_driver(d, "invalid queue size %u", val); if (d->mmio->cfg.queue_enable) bad_driver(d, "changing queue size on live device"); goto write_through16; case offsetof(struct virtio_pci_mmio, cfg.queue_msix_vector): bad_driver(d, "attempt to set MSIX vector to %u", val); case offsetof(struct virtio_pci_mmio, cfg.queue_enable): { struct virtqueue *vq = vq_by_num(d, d->mmio->cfg.queue_select); /* * 4.1.4.3.2: * * The driver MUST NOT write a 0 to queue_enable. */ if (val != 1) bad_driver(d, "setting queue_enable to %u", val); /* * 3.1.1: * * 7. Perform device-specific setup, including discovery of * virtqueues for the device, optional per-bus setup, * reading and possibly writing the device’s virtio * configuration space, and population of virtqueues. * 8. Set the DRIVER_OK status bit. * * All our devices require all virtqueues to be enabled, so * they should have done that before setting DRIVER_OK. */ if (d->mmio->cfg.device_status & VIRTIO_CONFIG_S_DRIVER_OK) bad_driver(d, "enabling vq after DRIVER_OK"); d->mmio->cfg.queue_enable = val; save_vq_config(&d->mmio->cfg, vq); check_virtqueue(d, vq); goto write_through16; } case offsetof(struct virtio_pci_mmio, cfg.queue_notify_off): bad_driver(d, "attempt to write to queue_notify_off"); case offsetof(struct virtio_pci_mmio, cfg.queue_desc_lo): case offsetof(struct virtio_pci_mmio, cfg.queue_desc_hi): case offsetof(struct virtio_pci_mmio, cfg.queue_avail_lo): case offsetof(struct virtio_pci_mmio, cfg.queue_avail_hi): case offsetof(struct virtio_pci_mmio, cfg.queue_used_lo): case offsetof(struct virtio_pci_mmio, cfg.queue_used_hi): /* * 4.1.4.3.2: * * The driver MUST configure the other virtqueue fields before * enabling the virtqueue with queue_enable. */ if (d->mmio->cfg.queue_enable) bad_driver(d, "changing queue on live device"); /* * 3.1.1: * * The driver MUST follow this sequence to initialize a device: *... * 5. Set the FEATURES_OK status bit. The driver MUST not * accept new feature bits after this step. */ if (!(d->mmio->cfg.device_status & VIRTIO_CONFIG_S_FEATURES_OK)) bad_driver(d, "setting up vq before FEATURES_OK"); /* * 6. Re-read device status to ensure the FEATURES_OK bit is * still set... */ if (d->wrote_features_ok) bad_driver(d, "didn't re-read FEATURES_OK before setup"); goto write_through32; case offsetof(struct virtio_pci_mmio, notify): vq = vq_by_num(d, val); if (!vq) bad_driver(d, "Invalid vq notification on %u", val); /* Notify the process handling this vq by adding 1 to eventfd */ write(vq->eventfd, "\1\0\0\0\0\0\0\0", 8); goto write_through16; case offsetof(struct virtio_pci_mmio, isr): bad_driver(d, "Unexpected write to isr"); /* Weird corner case: write to emerg_wr of console */ case sizeof(struct virtio_pci_mmio) + offsetof(struct virtio_console_config, emerg_wr): if (strcmp(d->name, "console") == 0) { char c = val; write(STDOUT_FILENO, &c, 1); goto write_through32; } /* Fall through... */ default: /* * 4.1.4.3.2: * * The driver MUST NOT write to device_feature, num_queues, * config_generation or queue_notify_off. */ bad_driver(d, "Unexpected write to offset %u", off); } feature_write_through32: /* * 3.1.1: * * The driver MUST follow this sequence to initialize a device: *... * - Set the DRIVER status bit: the guest OS knows how * to drive the device. * - Read device feature bits, and write the subset * of feature bits understood by the OS and driver * to the device. *... * - Set the FEATURES_OK status bit. The driver MUST not * accept new feature bits after this step. */ if (!(d->mmio->cfg.device_status & VIRTIO_CONFIG_S_DRIVER)) bad_driver(d, "feature write before VIRTIO_CONFIG_S_DRIVER"); if (d->mmio->cfg.device_status & VIRTIO_CONFIG_S_FEATURES_OK) bad_driver(d, "feature write after VIRTIO_CONFIG_S_FEATURES_OK"); /* * 4.1.3.1: * * The driver MUST access each field using the “natural” access * method, i.e. 32-bit accesses for 32-bit fields, 16-bit accesses for * 16-bit fields and 8-bit accesses for 8-bit fields. */ write_through32: if (mask != 0xFFFFFFFF) { bad_driver(d, "non-32-bit write to offset %u (%#x)", off, getreg(eip)); return; } memcpy((char *)d->mmio + off, &val, 4); return; write_through16: if (mask != 0xFFFF) bad_driver(d, "non-16-bit write to offset %u (%#x)", off, getreg(eip)); memcpy((char *)d->mmio + off, &val, 2); return; write_through8: if (mask != 0xFF) bad_driver(d, "non-8-bit write to offset %u (%#x)", off, getreg(eip)); memcpy((char *)d->mmio + off, &val, 1); return; } static u32 emulate_mmio_read(struct device *d, u32 off, u32 mask) { u8 isr; u32 val = 0; switch (off) { case offsetof(struct virtio_pci_mmio, cfg.device_feature_select): case offsetof(struct virtio_pci_mmio, cfg.device_feature): case offsetof(struct virtio_pci_mmio, cfg.guest_feature_select): case offsetof(struct virtio_pci_mmio, cfg.guest_feature): /* * 3.1.1: * * The driver MUST follow this sequence to initialize a device: *... * - Set the DRIVER status bit: the guest OS knows how * to drive the device. * - Read device feature bits, and write the subset * of feature bits understood by the OS and driver * to the device. */ if (!(d->mmio->cfg.device_status & VIRTIO_CONFIG_S_DRIVER)) bad_driver(d, "feature read before VIRTIO_CONFIG_S_DRIVER"); goto read_through32; case offsetof(struct virtio_pci_mmio, cfg.msix_config): bad_driver(d, "read of msix_config"); case offsetof(struct virtio_pci_mmio, cfg.num_queues): goto read_through16; case offsetof(struct virtio_pci_mmio, cfg.device_status): /* As they did read, any write of FEATURES_OK is now fine. */ d->wrote_features_ok = false; goto read_through8; case offsetof(struct virtio_pci_mmio, cfg.config_generation): /* * 4.1.4.3.1: * * The device MUST present a changed config_generation after * the driver has read a device-specific configuration value * which has changed since any part of the device-specific * configuration was last read. * * This is simple: none of our devices change config, so this * is always 0. */ goto read_through8; case offsetof(struct virtio_pci_mmio, notify): /* * 3.1.1: * * The driver MUST NOT notify the device before setting * DRIVER_OK. */ if (!(d->mmio->cfg.device_status & VIRTIO_CONFIG_S_DRIVER_OK)) bad_driver(d, "notify before VIRTIO_CONFIG_S_DRIVER_OK"); goto read_through16; case offsetof(struct virtio_pci_mmio, isr): if (mask != 0xFF) bad_driver(d, "non-8-bit read from offset %u (%#x)", off, getreg(eip)); isr = d->mmio->isr; /* * 4.1.4.5.1: * * The device MUST reset ISR status to 0 on driver read. */ d->mmio->isr = 0; return isr; case offsetof(struct virtio_pci_mmio, padding): bad_driver(d, "read from padding (%#x)", getreg(eip)); default: /* Read from device config space, beware unaligned overflow */ if (off > d->mmio_size - 4) bad_driver(d, "read past end (%#x)", getreg(eip)); /* * 3.1.1: * The driver MUST follow this sequence to initialize a device: *... * 3. Set the DRIVER status bit: the guest OS knows how to * drive the device. * 4. Read device feature bits, and write the subset of * feature bits understood by the OS and driver to the * device. During this step the driver MAY read (but MUST NOT * write) the device-specific configuration fields to check * that it can support the device before accepting it. */ if (!(d->mmio->cfg.device_status & VIRTIO_CONFIG_S_DRIVER)) bad_driver(d, "config read before VIRTIO_CONFIG_S_DRIVER"); if (mask == 0xFFFFFFFF) goto read_through32; else if (mask == 0xFFFF) goto read_through16; else goto read_through8; } /* * 4.1.3.1: * * The driver MUST access each field using the “natural” access * method, i.e. 32-bit accesses for 32-bit fields, 16-bit accesses for * 16-bit fields and 8-bit accesses for 8-bit fields. */ read_through32: if (mask != 0xFFFFFFFF) bad_driver(d, "non-32-bit read to offset %u (%#x)", off, getreg(eip)); memcpy(&val, (char *)d->mmio + off, 4); return val; read_through16: if (mask != 0xFFFF) bad_driver(d, "non-16-bit read to offset %u (%#x)", off, getreg(eip)); memcpy(&val, (char *)d->mmio + off, 2); return val; read_through8: if (mask != 0xFF) bad_driver(d, "non-8-bit read to offset %u (%#x)", off, getreg(eip)); memcpy(&val, (char *)d->mmio + off, 1); return val; } static void emulate_mmio(unsigned long paddr, const u8 *insn) { u32 val, off, mask = 0xFFFFFFFF, insnlen = 0; struct device *d = find_mmio_region(paddr, &off); unsigned long args[] = { LHREQ_TRAP, 14 }; if (!d) { warnx("MMIO touching %#08lx (not a device)", paddr); goto reinject; } /* Prefix makes it a 16 bit op */ if (insn[0] == 0x66) { mask = 0xFFFF; insnlen++; } /* iowrite */ if (insn[insnlen] == 0x89) { /* Next byte is r/m byte: bits 3-5 are register. */ val = getreg_num((insn[insnlen+1] >> 3) & 0x7, mask); emulate_mmio_write(d, off, val, mask); insnlen += 2 + insn_displacement_len(insn[insnlen+1]); } else if (insn[insnlen] == 0x8b) { /* ioread */ /* Next byte is r/m byte: bits 3-5 are register. */ val = emulate_mmio_read(d, off, mask); setreg_num((insn[insnlen+1] >> 3) & 0x7, val, mask); insnlen += 2 + insn_displacement_len(insn[insnlen+1]); } else if (insn[0] == 0x88) { /* 8-bit iowrite */ mask = 0xff; /* Next byte is r/m byte: bits 3-5 are register. */ val = getreg_num((insn[1] >> 3) & 0x7, mask); emulate_mmio_write(d, off, val, mask); insnlen = 2 + insn_displacement_len(insn[1]); } else if (insn[0] == 0x8a) { /* 8-bit ioread */ mask = 0xff; val = emulate_mmio_read(d, off, mask); setreg_num((insn[1] >> 3) & 0x7, val, mask); insnlen = 2 + insn_displacement_len(insn[1]); } else { warnx("Unknown MMIO instruction touching %#08lx:" " %02x %02x %02x %02x at %u", paddr, insn[0], insn[1], insn[2], insn[3], getreg(eip)); reinject: /* Inject trap into Guest. */ if (write(lguest_fd, args, sizeof(args)) < 0) err(1, "Reinjecting trap 14 for fault at %#x", getreg(eip)); return; } /* Finally, we've "done" the instruction, so move past it. */ setreg(eip, getreg(eip) + insnlen); } /*L:190 * Device Setup * * All devices need a descriptor so the Guest knows it exists, and a "struct * device" so the Launcher can keep track of it. We have common helper * routines to allocate and manage them. */ static void add_pci_virtqueue(struct device *dev, void (*service)(struct virtqueue *), const char *name) { struct virtqueue **i, *vq = malloc(sizeof(*vq)); /* Initialize the virtqueue */ vq->next = NULL; vq->last_avail_idx = 0; vq->dev = dev; vq->name = name; /* * This is the routine the service thread will run, and its Process ID * once it's running. */ vq->service = service; vq->thread = (pid_t)-1; /* Initialize the configuration. */ reset_vq_pci_config(vq); vq->pci_config.queue_notify_off = 0; /* Add one to the number of queues */ vq->dev->mmio->cfg.num_queues++; /* * Add to tail of list, so dev->vq is first vq, dev->vq->next is * second. */ for (i = &dev->vq; *i; i = &(*i)->next); *i = vq; } /* The Guest accesses the feature bits via the PCI common config MMIO region */ static void add_pci_feature(struct device *dev, unsigned bit) { dev->features |= (1ULL << bit); } /* For devices with no config. */ static void no_device_config(struct device *dev) { dev->mmio_addr = get_mmio_region(dev->mmio_size); dev->config.bar[0] = dev->mmio_addr; /* Bottom 4 bits must be zero */ assert(~(dev->config.bar[0] & 0xF)); } /* This puts the device config into BAR0 */ static void set_device_config(struct device *dev, const void *conf, size_t len) { /* Set up BAR 0 */ dev->mmio_size += len; dev->mmio = realloc(dev->mmio, dev->mmio_size); memcpy(dev->mmio + 1, conf, len); /* * 4.1.4.6: * * The device MUST present at least one VIRTIO_PCI_CAP_DEVICE_CFG * capability for any device type which has a device-specific * configuration. */ /* Hook up device cfg */ dev->config.cfg_access.cap.cap_next = offsetof(struct pci_config, device); /* * 4.1.4.6.1: * * The offset for the device-specific configuration MUST be 4-byte * aligned. */ assert(dev->config.cfg_access.cap.cap_next % 4 == 0); /* Fix up device cfg field length. */ dev->config.device.length = len; /* The rest is the same as the no-config case */ no_device_config(dev); } static void init_cap(struct virtio_pci_cap *cap, size_t caplen, int type, size_t bar_offset, size_t bar_bytes, u8 next) { cap->cap_vndr = PCI_CAP_ID_VNDR; cap->cap_next = next; cap->cap_len = caplen; cap->cfg_type = type; cap->bar = 0; memset(cap->padding, 0, sizeof(cap->padding)); cap->offset = bar_offset; cap->length = bar_bytes; } /* * This sets up the pci_config structure, as defined in the virtio 1.0 * standard (and PCI standard). */ static void init_pci_config(struct pci_config *pci, u16 type, u8 class, u8 subclass) { size_t bar_offset, bar_len; /* * 4.1.4.4.1: * * The device MUST either present notify_off_multiplier as an even * power of 2, or present notify_off_multiplier as 0. * * 2.1.2: * * The device MUST initialize device status to 0 upon reset. */ memset(pci, 0, sizeof(*pci)); /* 4.1.2.1: Devices MUST have the PCI Vendor ID 0x1AF4 */ pci->vendor_id = 0x1AF4; /* 4.1.2.1: ... PCI Device ID calculated by adding 0x1040 ... */ pci->device_id = 0x1040 + type; /* * PCI have specific codes for different types of devices. * Linux doesn't care, but it's a good clue for people looking * at the device. */ pci->class = class; pci->subclass = subclass; /* * 4.1.2.1: * * Non-transitional devices SHOULD have a PCI Revision ID of 1 or * higher */ pci->revid = 1; /* * 4.1.2.1: * * Non-transitional devices SHOULD have a PCI Subsystem Device ID of * 0x40 or higher. */ pci->subsystem_device_id = 0x40; /* We use our dummy interrupt controller, and irq_line is the irq */ pci->irq_line = devices.next_irq++; pci->irq_pin = 0; /* Support for extended capabilities. */ pci->status = (1 << 4); /* Link them in. */ /* * 4.1.4.3.1: * * The device MUST present at least one common configuration * capability. */ pci->capabilities = offsetof(struct pci_config, common); /* 4.1.4.3.1 ... offset MUST be 4-byte aligned. */ assert(pci->capabilities % 4 == 0); bar_offset = offsetof(struct virtio_pci_mmio, cfg); bar_len = sizeof(((struct virtio_pci_mmio *)0)->cfg); init_cap(&pci->common, sizeof(pci->common), VIRTIO_PCI_CAP_COMMON_CFG, bar_offset, bar_len, offsetof(struct pci_config, notify)); /* * 4.1.4.4.1: * * The device MUST present at least one notification capability. */ bar_offset += bar_len; bar_len = sizeof(((struct virtio_pci_mmio *)0)->notify); /* * 4.1.4.4.1: * * The cap.offset MUST be 2-byte aligned. */ assert(pci->common.cap_next % 2 == 0); /* FIXME: Use a non-zero notify_off, for per-queue notification? */ /* * 4.1.4.4.1: * * The value cap.length presented by the device MUST be at least 2 and * MUST be large enough to support queue notification offsets for all * supported queues in all possible configurations. */ assert(bar_len >= 2); init_cap(&pci->notify.cap, sizeof(pci->notify), VIRTIO_PCI_CAP_NOTIFY_CFG, bar_offset, bar_len, offsetof(struct pci_config, isr)); bar_offset += bar_len; bar_len = sizeof(((struct virtio_pci_mmio *)0)->isr); /* * 4.1.4.5.1: * * The device MUST present at least one VIRTIO_PCI_CAP_ISR_CFG * capability. */ init_cap(&pci->isr, sizeof(pci->isr), VIRTIO_PCI_CAP_ISR_CFG, bar_offset, bar_len, offsetof(struct pci_config, cfg_access)); /* * 4.1.4.7.1: * * The device MUST present at least one VIRTIO_PCI_CAP_PCI_CFG * capability. */ /* This doesn't have any presence in the BAR */ init_cap(&pci->cfg_access.cap, sizeof(pci->cfg_access), VIRTIO_PCI_CAP_PCI_CFG, 0, 0, 0); bar_offset += bar_len + sizeof(((struct virtio_pci_mmio *)0)->padding); assert(bar_offset == sizeof(struct virtio_pci_mmio)); /* * This gets sewn in and length set in set_device_config(). * Some devices don't have a device configuration interface, so * we never expose this if we don't call set_device_config(). */ init_cap(&pci->device, sizeof(pci->device), VIRTIO_PCI_CAP_DEVICE_CFG, bar_offset, 0, 0); } /* * This routine does all the creation and setup of a new device, but we don't * actually place the MMIO region until we know the size (if any) of the * device-specific config. And we don't actually start the service threads * until later. * * See what I mean about userspace being boring? */ static struct device *new_pci_device(const char *name, u16 type, u8 class, u8 subclass) { struct device *dev = malloc(sizeof(*dev)); /* Now we populate the fields one at a time. */ dev->name = name; dev->vq = NULL; dev->running = false; dev->wrote_features_ok = false; dev->mmio_size = sizeof(struct virtio_pci_mmio); dev->mmio = calloc(1, dev->mmio_size); dev->features = (u64)1 << VIRTIO_F_VERSION_1; dev->features_accepted = 0; if (devices.device_num + 1 >= MAX_PCI_DEVICES) errx(1, "Can only handle 31 PCI devices"); init_pci_config(&dev->config, type, class, subclass); assert(!devices.pci[devices.device_num+1]); devices.pci[++devices.device_num] = dev; return dev; } /* * Our first setup routine is the console. It's a fairly simple device, but * UNIX tty handling makes it uglier than it could be. */ static void setup_console(void) { struct device *dev; struct virtio_console_config conf; /* If we can save the initial standard input settings... */ if (tcgetattr(STDIN_FILENO, &orig_term) == 0) { struct termios term = orig_term; /* * Then we turn off echo, line buffering and ^C etc: We want a * raw input stream to the Guest. */ term.c_lflag &= ~(ISIG|ICANON|ECHO); tcsetattr(STDIN_FILENO, TCSANOW, &term); } dev = new_pci_device("console", VIRTIO_ID_CONSOLE, 0x07, 0x00); /* We store the console state in dev->priv, and initialize it. */ dev->priv = malloc(sizeof(struct console_abort)); ((struct console_abort *)dev->priv)->count = 0; /* * The console needs two virtqueues: the input then the output. When * they put something the input queue, we make sure we're listening to * stdin. When they put something in the output queue, we write it to * stdout. */ add_pci_virtqueue(dev, console_input, "input"); add_pci_virtqueue(dev, console_output, "output"); /* We need a configuration area for the emerg_wr early writes. */ add_pci_feature(dev, VIRTIO_CONSOLE_F_EMERG_WRITE); set_device_config(dev, &conf, sizeof(conf)); verbose("device %u: console\n", devices.device_num); } /*:*/ /*M:010 * Inter-guest networking is an interesting area. Simplest is to have a * --sharenet=<name> option which opens or creates a named pipe. This can be * used to send packets to another guest in a 1:1 manner. * * More sophisticated is to use one of the tools developed for project like UML * to do networking. * * Faster is to do virtio bonding in kernel. Doing this 1:1 would be * completely generic ("here's my vring, attach to your vring") and would work * for any traffic. Of course, namespace and permissions issues need to be * dealt with. A more sophisticated "multi-channel" virtio_net.c could hide * multiple inter-guest channels behind one interface, although it would * require some manner of hotplugging new virtio channels. * * Finally, we could use a virtio network switch in the kernel, ie. vhost. :*/ static u32 str2ip(const char *ipaddr) { unsigned int b[4]; if (sscanf(ipaddr, "%u.%u.%u.%u", &b[0], &b[1], &b[2], &b[3]) != 4) errx(1, "Failed to parse IP address '%s'", ipaddr); return (b[0] << 24) | (b[1] << 16) | (b[2] << 8) | b[3]; } static void str2mac(const char *macaddr, unsigned char mac[6]) { unsigned int m[6]; if (sscanf(macaddr, "%02x:%02x:%02x:%02x:%02x:%02x", &m[0], &m[1], &m[2], &m[3], &m[4], &m[5]) != 6) errx(1, "Failed to parse mac address '%s'", macaddr); mac[0] = m[0]; mac[1] = m[1]; mac[2] = m[2]; mac[3] = m[3]; mac[4] = m[4]; mac[5] = m[5]; } /* * This code is "adapted" from libbridge: it attaches the Host end of the * network device to the bridge device specified by the command line. * * This is yet another James Morris contribution (I'm an IP-level guy, so I * dislike bridging), and I just try not to break it. */ static void add_to_bridge(int fd, const char *if_name, const char *br_name) { int ifidx; struct ifreq ifr; if (!*br_name) errx(1, "must specify bridge name"); ifidx = if_nametoindex(if_name); if (!ifidx) errx(1, "interface %s does not exist!", if_name); strncpy(ifr.ifr_name, br_name, IFNAMSIZ); ifr.ifr_name[IFNAMSIZ-1] = '\0'; ifr.ifr_ifindex = ifidx; if (ioctl(fd, SIOCBRADDIF, &ifr) < 0) err(1, "can't add %s to bridge %s", if_name, br_name); } /* * This sets up the Host end of the network device with an IP address, brings * it up so packets will flow, the copies the MAC address into the hwaddr * pointer. */ static void configure_device(int fd, const char *tapif, u32 ipaddr) { struct ifreq ifr; struct sockaddr_in sin; memset(&ifr, 0, sizeof(ifr)); strcpy(ifr.ifr_name, tapif); /* Don't read these incantations. Just cut & paste them like I did! */ sin.sin_family = AF_INET; sin.sin_addr.s_addr = htonl(ipaddr); memcpy(&ifr.ifr_addr, &sin, sizeof(sin)); if (ioctl(fd, SIOCSIFADDR, &ifr) != 0) err(1, "Setting %s interface address", tapif); ifr.ifr_flags = IFF_UP; if (ioctl(fd, SIOCSIFFLAGS, &ifr) != 0) err(1, "Bringing interface %s up", tapif); } static int get_tun_device(char tapif[IFNAMSIZ]) { struct ifreq ifr; int vnet_hdr_sz; int netfd; /* Start with this zeroed. Messy but sure. */ memset(&ifr, 0, sizeof(ifr)); /* * We open the /dev/net/tun device and tell it we want a tap device. A * tap device is like a tun device, only somehow different. To tell * the truth, I completely blundered my way through this code, but it * works now! */ netfd = open_or_die("/dev/net/tun", O_RDWR); ifr.ifr_flags = IFF_TAP | IFF_NO_PI | IFF_VNET_HDR; strcpy(ifr.ifr_name, "tap%d"); if (ioctl(netfd, TUNSETIFF, &ifr) != 0) err(1, "configuring /dev/net/tun"); if (ioctl(netfd, TUNSETOFFLOAD, TUN_F_CSUM|TUN_F_TSO4|TUN_F_TSO6|TUN_F_TSO_ECN) != 0) err(1, "Could not set features for tun device"); /* * We don't need checksums calculated for packets coming in this * device: trust us! */ ioctl(netfd, TUNSETNOCSUM, 1); /* * In virtio before 1.0 (aka legacy virtio), we added a 16-bit * field at the end of the network header iff * VIRTIO_NET_F_MRG_RXBUF was negotiated. For virtio 1.0, * that became the norm, but we need to tell the tun device * about our expanded header (which is called * virtio_net_hdr_mrg_rxbuf in the legacy system). */ vnet_hdr_sz = sizeof(struct virtio_net_hdr_v1); if (ioctl(netfd, TUNSETVNETHDRSZ, &vnet_hdr_sz) != 0) err(1, "Setting tun header size to %u", vnet_hdr_sz); memcpy(tapif, ifr.ifr_name, IFNAMSIZ); return netfd; } /*L:195 * Our network is a Host<->Guest network. This can either use bridging or * routing, but the principle is the same: it uses the "tun" device to inject * packets into the Host as if they came in from a normal network card. We * just shunt packets between the Guest and the tun device. */ static void setup_tun_net(char *arg) { struct device *dev; struct net_info *net_info = malloc(sizeof(*net_info)); int ipfd; u32 ip = INADDR_ANY; bool bridging = false; char tapif[IFNAMSIZ], *p; struct virtio_net_config conf; net_info->tunfd = get_tun_device(tapif); /* First we create a new network device. */ dev = new_pci_device("net", VIRTIO_ID_NET, 0x02, 0x00); dev->priv = net_info; /* Network devices need a recv and a send queue, just like console. */ add_pci_virtqueue(dev, net_input, "rx"); add_pci_virtqueue(dev, net_output, "tx"); /* * We need a socket to perform the magic network ioctls to bring up the * tap interface, connect to the bridge etc. Any socket will do! */ ipfd = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP); if (ipfd < 0) err(1, "opening IP socket"); /* If the command line was --tunnet=bridge:<name> do bridging. */ if (!strncmp(BRIDGE_PFX, arg, strlen(BRIDGE_PFX))) { arg += strlen(BRIDGE_PFX); bridging = true; } /* A mac address may follow the bridge name or IP address */ p = strchr(arg, ':'); if (p) { str2mac(p+1, conf.mac); add_pci_feature(dev, VIRTIO_NET_F_MAC); *p = '\0'; } /* arg is now either an IP address or a bridge name */ if (bridging) add_to_bridge(ipfd, tapif, arg); else ip = str2ip(arg); /* Set up the tun device. */ configure_device(ipfd, tapif, ip); /* Expect Guest to handle everything except UFO */ add_pci_feature(dev, VIRTIO_NET_F_CSUM); add_pci_feature(dev, VIRTIO_NET_F_GUEST_CSUM); add_pci_feature(dev, VIRTIO_NET_F_GUEST_TSO4); add_pci_feature(dev, VIRTIO_NET_F_GUEST_TSO6); add_pci_feature(dev, VIRTIO_NET_F_GUEST_ECN); add_pci_feature(dev, VIRTIO_NET_F_HOST_TSO4); add_pci_feature(dev, VIRTIO_NET_F_HOST_TSO6); add_pci_feature(dev, VIRTIO_NET_F_HOST_ECN); /* We handle indirect ring entries */ add_pci_feature(dev, VIRTIO_RING_F_INDIRECT_DESC); set_device_config(dev, &conf, sizeof(conf)); /* We don't need the socket any more; setup is done. */ close(ipfd); if (bridging) verbose("device %u: tun %s attached to bridge: %s\n", devices.device_num, tapif, arg); else verbose("device %u: tun %s: %s\n", devices.device_num, tapif, arg); } /*:*/ /* This hangs off device->priv. */ struct vblk_info { /* The size of the file. */ off64_t len; /* The file descriptor for the file. */ int fd; }; /*L:210 * The Disk * * The disk only has one virtqueue, so it only has one thread. It is really * simple: the Guest asks for a block number and we read or write that position * in the file. * * Before we serviced each virtqueue in a separate thread, that was unacceptably * slow: the Guest waits until the read is finished before running anything * else, even if it could have been doing useful work. * * We could have used async I/O, except it's reputed to suck so hard that * characters actually go missing from your code when you try to use it. */ static void blk_request(struct virtqueue *vq) { struct vblk_info *vblk = vq->dev->priv; unsigned int head, out_num, in_num, wlen; int ret, i; u8 *in; struct virtio_blk_outhdr out; struct iovec iov[vq->vring.num]; off64_t off; /* * Get the next request, where we normally wait. It triggers the * interrupt to acknowledge previously serviced requests (if any). */ head = wait_for_vq_desc(vq, iov, &out_num, &in_num); /* Copy the output header from the front of the iov (adjusts iov) */ iov_consume(vq->dev, iov, out_num, &out, sizeof(out)); /* Find and trim end of iov input array, for our status byte. */ in = NULL; for (i = out_num + in_num - 1; i >= out_num; i--) { if (iov[i].iov_len > 0) { in = iov[i].iov_base + iov[i].iov_len - 1; iov[i].iov_len--; break; } } if (!in) bad_driver_vq(vq, "Bad virtblk cmd with no room for status"); /* * For historical reasons, block operations are expressed in 512 byte * "sectors". */ off = out.sector * 512; if (out.type & VIRTIO_BLK_T_OUT) { /* * Write * * Move to the right location in the block file. This can fail * if they try to write past end. */ if (lseek64(vblk->fd, off, SEEK_SET) != off) err(1, "Bad seek to sector %llu", out.sector); ret = writev(vblk->fd, iov, out_num); verbose("WRITE to sector %llu: %i\n", out.sector, ret); /* * Grr... Now we know how long the descriptor they sent was, we * make sure they didn't try to write over the end of the block * file (possibly extending it). */ if (ret > 0 && off + ret > vblk->len) { /* Trim it back to the correct length */ ftruncate64(vblk->fd, vblk->len); /* Die, bad Guest, die. */ bad_driver_vq(vq, "Write past end %llu+%u", off, ret); } wlen = sizeof(*in); *in = (ret >= 0 ? VIRTIO_BLK_S_OK : VIRTIO_BLK_S_IOERR); } else if (out.type & VIRTIO_BLK_T_FLUSH) { /* Flush */ ret = fdatasync(vblk->fd); verbose("FLUSH fdatasync: %i\n", ret); wlen = sizeof(*in); *in = (ret >= 0 ? VIRTIO_BLK_S_OK : VIRTIO_BLK_S_IOERR); } else { /* * Read * * Move to the right location in the block file. This can fail * if they try to read past end. */ if (lseek64(vblk->fd, off, SEEK_SET) != off) err(1, "Bad seek to sector %llu", out.sector); ret = readv(vblk->fd, iov + out_num, in_num); if (ret >= 0) { wlen = sizeof(*in) + ret; *in = VIRTIO_BLK_S_OK; } else { wlen = sizeof(*in); *in = VIRTIO_BLK_S_IOERR; } } /* Finished that request. */ add_used(vq, head, wlen); } /*L:198 This actually sets up a virtual block device. */ static void setup_block_file(const char *filename) { struct device *dev; struct vblk_info *vblk; struct virtio_blk_config conf; /* Create the device. */ dev = new_pci_device("block", VIRTIO_ID_BLOCK, 0x01, 0x80); /* The device has one virtqueue, where the Guest places requests. */ add_pci_virtqueue(dev, blk_request, "request"); /* Allocate the room for our own bookkeeping */ vblk = dev->priv = malloc(sizeof(*vblk)); /* First we open the file and store the length. */ vblk->fd = open_or_die(filename, O_RDWR|O_LARGEFILE); vblk->len = lseek64(vblk->fd, 0, SEEK_END); /* Tell Guest how many sectors this device has. */ conf.capacity = cpu_to_le64(vblk->len / 512); /* * Tell Guest not to put in too many descriptors at once: two are used * for the in and out elements. */ add_pci_feature(dev, VIRTIO_BLK_F_SEG_MAX); conf.seg_max = cpu_to_le32(VIRTQUEUE_NUM - 2); set_device_config(dev, &conf, sizeof(struct virtio_blk_config)); verbose("device %u: virtblock %llu sectors\n", devices.device_num, le64_to_cpu(conf.capacity)); } /*L:211 * Our random number generator device reads from /dev/urandom into the Guest's * input buffers. The usual case is that the Guest doesn't want random numbers * and so has no buffers although /dev/urandom is still readable, whereas * console is the reverse. * * The same logic applies, however. */ struct rng_info { int rfd; }; static void rng_input(struct virtqueue *vq) { int len; unsigned int head, in_num, out_num, totlen = 0; struct rng_info *rng_info = vq->dev->priv; struct iovec iov[vq->vring.num]; /* First we need a buffer from the Guests's virtqueue. */ head = wait_for_vq_desc(vq, iov, &out_num, &in_num); if (out_num) bad_driver_vq(vq, "Output buffers in rng?"); /* * Just like the console write, we loop to cover the whole iovec. * In this case, short reads actually happen quite a bit. */ while (!iov_empty(iov, in_num)) { len = readv(rng_info->rfd, iov, in_num); if (len <= 0) err(1, "Read from /dev/urandom gave %i", len); iov_consume(vq->dev, iov, in_num, NULL, len); totlen += len; } /* Tell the Guest about the new input. */ add_used(vq, head, totlen); } /*L:199 * This creates a "hardware" random number device for the Guest. */ static void setup_rng(void) { struct device *dev; struct rng_info *rng_info = malloc(sizeof(*rng_info)); /* Our device's private info simply contains the /dev/urandom fd. */ rng_info->rfd = open_or_die("/dev/urandom", O_RDONLY); /* Create the new device. */ dev = new_pci_device("rng", VIRTIO_ID_RNG, 0xff, 0); dev->priv = rng_info; /* The device has one virtqueue, where the Guest places inbufs. */ add_pci_virtqueue(dev, rng_input, "input"); /* We don't have any configuration space */ no_device_config(dev); verbose("device %u: rng\n", devices.device_num); } /* That's the end of device setup. */ /*L:230 Reboot is pretty easy: clean up and exec() the Launcher afresh. */ static void __attribute__((noreturn)) restart_guest(void) { unsigned int i; /* * Since we don't track all open fds, we simply close everything beyond * stderr. */ for (i = 3; i < FD_SETSIZE; i++) close(i); /* Reset all the devices (kills all threads). */ cleanup_devices(); execv(main_args[0], main_args); err(1, "Could not exec %s", main_args[0]); } /*L:220 * Finally we reach the core of the Launcher which runs the Guest, serves * its input and output, and finally, lays it to rest. */ static void __attribute__((noreturn)) run_guest(void) { for (;;) { struct lguest_pending notify; int readval; /* We read from the /dev/lguest device to run the Guest. */ readval = pread(lguest_fd, &notify, sizeof(notify), cpu_id); if (readval == sizeof(notify)) { if (notify.trap == 13) { verbose("Emulating instruction at %#x\n", getreg(eip)); emulate_insn(notify.insn); } else if (notify.trap == 14) { verbose("Emulating MMIO at %#x\n", getreg(eip)); emulate_mmio(notify.addr, notify.insn); } else errx(1, "Unknown trap %i addr %#08x\n", notify.trap, notify.addr); /* ENOENT means the Guest died. Reading tells us why. */ } else if (errno == ENOENT) { char reason[1024] = { 0 }; pread(lguest_fd, reason, sizeof(reason)-1, cpu_id); errx(1, "%s", reason); /* ERESTART means that we need to reboot the guest */ } else if (errno == ERESTART) { restart_guest(); /* Anything else means a bug or incompatible change. */ } else err(1, "Running guest failed"); } } /*L:240 * This is the end of the Launcher. The good news: we are over halfway * through! The bad news: the most fiendish part of the code still lies ahead * of us. * * Are you ready? Take a deep breath and join me in the core of the Host, in * "make Host". :*/ static struct option opts[] = { { "verbose", 0, NULL, 'v' }, { "tunnet", 1, NULL, 't' }, { "block", 1, NULL, 'b' }, { "rng", 0, NULL, 'r' }, { "initrd", 1, NULL, 'i' }, { "username", 1, NULL, 'u' }, { "chroot", 1, NULL, 'c' }, { NULL }, }; static void usage(void) { errx(1, "Usage: lguest [--verbose] " "[--tunnet=(<ipaddr>:<macaddr>|bridge:<bridgename>:<macaddr>)\n" "|--block=<filename>|--initrd=<filename>]...\n" "<mem-in-mb> vmlinux [args...]"); } /*L:105 The main routine is where the real work begins: */ int main(int argc, char *argv[]) { /* Memory, code startpoint and size of the (optional) initrd. */ unsigned long mem = 0, start, initrd_size = 0; /* Two temporaries. */ int i, c; /* The boot information for the Guest. */ struct boot_params *boot; /* If they specify an initrd file to load. */ const char *initrd_name = NULL; /* Password structure for initgroups/setres[gu]id */ struct passwd *user_details = NULL; /* Directory to chroot to */ char *chroot_path = NULL; /* Save the args: we "reboot" by execing ourselves again. */ main_args = argv; /* * First we initialize the device list. We remember next interrupt * number to use for devices (1: remember that 0 is used by the timer). */ devices.next_irq = 1; /* We're CPU 0. In fact, that's the only CPU possible right now. */ cpu_id = 0; /* * We need to know how much memory so we can set up the device * descriptor and memory pages for the devices as we parse the command * line. So we quickly look through the arguments to find the amount * of memory now. */ for (i = 1; i < argc; i++) { if (argv[i][0] != '-') { mem = atoi(argv[i]) * 1024 * 1024; /* * We start by mapping anonymous pages over all of * guest-physical memory range. This fills it with 0, * and ensures that the Guest won't be killed when it * tries to access it. */ guest_base = map_zeroed_pages(mem / getpagesize() + DEVICE_PAGES); guest_limit = mem; guest_max = guest_mmio = mem + DEVICE_PAGES*getpagesize(); break; } } /* We always have a console device, and it's always device 1. */ setup_console(); /* The options are fairly straight-forward */ while ((c = getopt_long(argc, argv, "v", opts, NULL)) != EOF) { switch (c) { case 'v': verbose = true; break; case 't': setup_tun_net(optarg); break; case 'b': setup_block_file(optarg); break; case 'r': setup_rng(); break; case 'i': initrd_name = optarg; break; case 'u': user_details = getpwnam(optarg); if (!user_details) err(1, "getpwnam failed, incorrect username?"); break; case 'c': chroot_path = optarg; break; default: warnx("Unknown argument %s", argv[optind]); usage(); } } /* * After the other arguments we expect memory and kernel image name, * followed by command line arguments for the kernel. */ if (optind + 2 > argc) usage(); verbose("Guest base is at %p\n", guest_base); /* Initialize the (fake) PCI host bridge device. */ init_pci_host_bridge(); /* Now we load the kernel */ start = load_kernel(open_or_die(argv[optind+1], O_RDONLY)); /* Boot information is stashed at physical address 0 */ boot = from_guest_phys(0); /* Map the initrd image if requested (at top of physical memory) */ if (initrd_name) { initrd_size = load_initrd(initrd_name, mem); /* * These are the location in the Linux boot header where the * start and size of the initrd are expected to be found. */ boot->hdr.ramdisk_image = mem - initrd_size; boot->hdr.ramdisk_size = initrd_size; /* The bootloader type 0xFF means "unknown"; that's OK. */ boot->hdr.type_of_loader = 0xFF; } /* * The Linux boot header contains an "E820" memory map: ours is a * simple, single region. */ boot->e820_entries = 1; boot->e820_map[0] = ((struct e820entry) { 0, mem, E820_RAM }); /* * The boot header contains a command line pointer: we put the command * line after the boot header. */ boot->hdr.cmd_line_ptr = to_guest_phys(boot + 1); /* We use a simple helper to copy the arguments separated by spaces. */ concat((char *)(boot + 1), argv+optind+2); /* Set kernel alignment to 16M (CONFIG_PHYSICAL_ALIGN) */ boot->hdr.kernel_alignment = 0x1000000; /* Boot protocol version: 2.07 supports the fields for lguest. */ boot->hdr.version = 0x207; /* The hardware_subarch value of "1" tells the Guest it's an lguest. */ boot->hdr.hardware_subarch = 1; /* Tell the entry path not to try to reload segment registers. */ boot->hdr.loadflags |= KEEP_SEGMENTS; /* We tell the kernel to initialize the Guest. */ tell_kernel(start); /* Ensure that we terminate if a device-servicing child dies. */ signal(SIGCHLD, kill_launcher); /* If we exit via err(), this kills all the threads, restores tty. */ atexit(cleanup_devices); /* If requested, chroot to a directory */ if (chroot_path) { if (chroot(chroot_path) != 0) err(1, "chroot(\"%s\") failed", chroot_path); if (chdir("/") != 0) err(1, "chdir(\"/\") failed"); verbose("chroot done\n"); } /* If requested, drop privileges */ if (user_details) { uid_t u; gid_t g; u = user_details->pw_uid; g = user_details->pw_gid; if (initgroups(user_details->pw_name, g) != 0) err(1, "initgroups failed"); if (setresgid(g, g, g) != 0) err(1, "setresgid failed"); if (setresuid(u, u, u) != 0) err(1, "setresuid failed"); verbose("Dropping privileges completed\n"); } /* Finally, run the Guest. This doesn't return. */ run_guest(); } /*:*/ /*M:999 * Mastery is done: you now know everything I do. * * But surely you have seen code, features and bugs in your wanderings which * you now yearn to attack? That is the real game, and I look forward to you * patching and forking lguest into the Your-Name-Here-visor. * * Farewell, and good coding! * Rusty Russell. */
gpl-2.0
jderrick/linux-torvalds
drivers/android/binder.c
524
103880
/* binder.c * * Android IPC Subsystem * * Copyright (C) 2007-2008 Google, Inc. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <asm/cacheflush.h> #include <linux/fdtable.h> #include <linux/file.h> #include <linux/freezer.h> #include <linux/fs.h> #include <linux/list.h> #include <linux/miscdevice.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/nsproxy.h> #include <linux/poll.h> #include <linux/debugfs.h> #include <linux/rbtree.h> #include <linux/sched.h> #include <linux/seq_file.h> #include <linux/uaccess.h> #include <linux/vmalloc.h> #include <linux/slab.h> #include <linux/pid_namespace.h> #include <linux/security.h> #ifdef CONFIG_ANDROID_BINDER_IPC_32BIT #define BINDER_IPC_32BIT 1 #endif #include <uapi/linux/android/binder.h> #include "binder_trace.h" static DEFINE_MUTEX(binder_main_lock); static DEFINE_MUTEX(binder_deferred_lock); static DEFINE_MUTEX(binder_mmap_lock); static HLIST_HEAD(binder_procs); static HLIST_HEAD(binder_deferred_list); static HLIST_HEAD(binder_dead_nodes); static struct dentry *binder_debugfs_dir_entry_root; static struct dentry *binder_debugfs_dir_entry_proc; static struct binder_node *binder_context_mgr_node; static kuid_t binder_context_mgr_uid = INVALID_UID; static int binder_last_id; static struct workqueue_struct *binder_deferred_workqueue; #define BINDER_DEBUG_ENTRY(name) \ static int binder_##name##_open(struct inode *inode, struct file *file) \ { \ return single_open(file, binder_##name##_show, inode->i_private); \ } \ \ static const struct file_operations binder_##name##_fops = { \ .owner = THIS_MODULE, \ .open = binder_##name##_open, \ .read = seq_read, \ .llseek = seq_lseek, \ .release = single_release, \ } static int binder_proc_show(struct seq_file *m, void *unused); BINDER_DEBUG_ENTRY(proc); /* This is only defined in include/asm-arm/sizes.h */ #ifndef SZ_1K #define SZ_1K 0x400 #endif #ifndef SZ_4M #define SZ_4M 0x400000 #endif #define FORBIDDEN_MMAP_FLAGS (VM_WRITE) #define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64) enum { BINDER_DEBUG_USER_ERROR = 1U << 0, BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1, BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2, BINDER_DEBUG_OPEN_CLOSE = 1U << 3, BINDER_DEBUG_DEAD_BINDER = 1U << 4, BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5, BINDER_DEBUG_READ_WRITE = 1U << 6, BINDER_DEBUG_USER_REFS = 1U << 7, BINDER_DEBUG_THREADS = 1U << 8, BINDER_DEBUG_TRANSACTION = 1U << 9, BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10, BINDER_DEBUG_FREE_BUFFER = 1U << 11, BINDER_DEBUG_INTERNAL_REFS = 1U << 12, BINDER_DEBUG_BUFFER_ALLOC = 1U << 13, BINDER_DEBUG_PRIORITY_CAP = 1U << 14, BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 15, }; static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR | BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION; module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO); static bool binder_debug_no_lock; module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO); static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait); static int binder_stop_on_user_error; static int binder_set_stop_on_user_error(const char *val, struct kernel_param *kp) { int ret; ret = param_set_int(val, kp); if (binder_stop_on_user_error < 2) wake_up(&binder_user_error_wait); return ret; } module_param_call(stop_on_user_error, binder_set_stop_on_user_error, param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO); #define binder_debug(mask, x...) \ do { \ if (binder_debug_mask & mask) \ pr_info(x); \ } while (0) #define binder_user_error(x...) \ do { \ if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \ pr_info(x); \ if (binder_stop_on_user_error) \ binder_stop_on_user_error = 2; \ } while (0) enum binder_stat_types { BINDER_STAT_PROC, BINDER_STAT_THREAD, BINDER_STAT_NODE, BINDER_STAT_REF, BINDER_STAT_DEATH, BINDER_STAT_TRANSACTION, BINDER_STAT_TRANSACTION_COMPLETE, BINDER_STAT_COUNT }; struct binder_stats { int br[_IOC_NR(BR_FAILED_REPLY) + 1]; int bc[_IOC_NR(BC_DEAD_BINDER_DONE) + 1]; int obj_created[BINDER_STAT_COUNT]; int obj_deleted[BINDER_STAT_COUNT]; }; static struct binder_stats binder_stats; static inline void binder_stats_deleted(enum binder_stat_types type) { binder_stats.obj_deleted[type]++; } static inline void binder_stats_created(enum binder_stat_types type) { binder_stats.obj_created[type]++; } struct binder_transaction_log_entry { int debug_id; int call_type; int from_proc; int from_thread; int target_handle; int to_proc; int to_thread; int to_node; int data_size; int offsets_size; }; struct binder_transaction_log { int next; int full; struct binder_transaction_log_entry entry[32]; }; static struct binder_transaction_log binder_transaction_log; static struct binder_transaction_log binder_transaction_log_failed; static struct binder_transaction_log_entry *binder_transaction_log_add( struct binder_transaction_log *log) { struct binder_transaction_log_entry *e; e = &log->entry[log->next]; memset(e, 0, sizeof(*e)); log->next++; if (log->next == ARRAY_SIZE(log->entry)) { log->next = 0; log->full = 1; } return e; } struct binder_work { struct list_head entry; enum { BINDER_WORK_TRANSACTION = 1, BINDER_WORK_TRANSACTION_COMPLETE, BINDER_WORK_NODE, BINDER_WORK_DEAD_BINDER, BINDER_WORK_DEAD_BINDER_AND_CLEAR, BINDER_WORK_CLEAR_DEATH_NOTIFICATION, } type; }; struct binder_node { int debug_id; struct binder_work work; union { struct rb_node rb_node; struct hlist_node dead_node; }; struct binder_proc *proc; struct hlist_head refs; int internal_strong_refs; int local_weak_refs; int local_strong_refs; binder_uintptr_t ptr; binder_uintptr_t cookie; unsigned has_strong_ref:1; unsigned pending_strong_ref:1; unsigned has_weak_ref:1; unsigned pending_weak_ref:1; unsigned has_async_transaction:1; unsigned accept_fds:1; unsigned min_priority:8; struct list_head async_todo; }; struct binder_ref_death { struct binder_work work; binder_uintptr_t cookie; }; struct binder_ref { /* Lookups needed: */ /* node + proc => ref (transaction) */ /* desc + proc => ref (transaction, inc/dec ref) */ /* node => refs + procs (proc exit) */ int debug_id; struct rb_node rb_node_desc; struct rb_node rb_node_node; struct hlist_node node_entry; struct binder_proc *proc; struct binder_node *node; uint32_t desc; int strong; int weak; struct binder_ref_death *death; }; struct binder_buffer { struct list_head entry; /* free and allocated entries by address */ struct rb_node rb_node; /* free entry by size or allocated entry */ /* by address */ unsigned free:1; unsigned allow_user_free:1; unsigned async_transaction:1; unsigned debug_id:29; struct binder_transaction *transaction; struct binder_node *target_node; size_t data_size; size_t offsets_size; uint8_t data[0]; }; enum binder_deferred_state { BINDER_DEFERRED_PUT_FILES = 0x01, BINDER_DEFERRED_FLUSH = 0x02, BINDER_DEFERRED_RELEASE = 0x04, }; struct binder_proc { struct hlist_node proc_node; struct rb_root threads; struct rb_root nodes; struct rb_root refs_by_desc; struct rb_root refs_by_node; int pid; struct vm_area_struct *vma; struct mm_struct *vma_vm_mm; struct task_struct *tsk; struct files_struct *files; struct hlist_node deferred_work_node; int deferred_work; void *buffer; ptrdiff_t user_buffer_offset; struct list_head buffers; struct rb_root free_buffers; struct rb_root allocated_buffers; size_t free_async_space; struct page **pages; size_t buffer_size; uint32_t buffer_free; struct list_head todo; wait_queue_head_t wait; struct binder_stats stats; struct list_head delivered_death; int max_threads; int requested_threads; int requested_threads_started; int ready_threads; long default_priority; struct dentry *debugfs_entry; }; enum { BINDER_LOOPER_STATE_REGISTERED = 0x01, BINDER_LOOPER_STATE_ENTERED = 0x02, BINDER_LOOPER_STATE_EXITED = 0x04, BINDER_LOOPER_STATE_INVALID = 0x08, BINDER_LOOPER_STATE_WAITING = 0x10, BINDER_LOOPER_STATE_NEED_RETURN = 0x20 }; struct binder_thread { struct binder_proc *proc; struct rb_node rb_node; int pid; int looper; struct binder_transaction *transaction_stack; struct list_head todo; uint32_t return_error; /* Write failed, return error code in read buf */ uint32_t return_error2; /* Write failed, return error code in read */ /* buffer. Used when sending a reply to a dead process that */ /* we are also waiting on */ wait_queue_head_t wait; struct binder_stats stats; }; struct binder_transaction { int debug_id; struct binder_work work; struct binder_thread *from; struct binder_transaction *from_parent; struct binder_proc *to_proc; struct binder_thread *to_thread; struct binder_transaction *to_parent; unsigned need_reply:1; /* unsigned is_dead:1; */ /* not used at the moment */ struct binder_buffer *buffer; unsigned int code; unsigned int flags; long priority; long saved_priority; kuid_t sender_euid; }; static void binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer); static int task_get_unused_fd_flags(struct binder_proc *proc, int flags) { struct files_struct *files = proc->files; unsigned long rlim_cur; unsigned long irqs; if (files == NULL) return -ESRCH; if (!lock_task_sighand(proc->tsk, &irqs)) return -EMFILE; rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE); unlock_task_sighand(proc->tsk, &irqs); return __alloc_fd(files, 0, rlim_cur, flags); } /* * copied from fd_install */ static void task_fd_install( struct binder_proc *proc, unsigned int fd, struct file *file) { if (proc->files) __fd_install(proc->files, fd, file); } /* * copied from sys_close */ static long task_close_fd(struct binder_proc *proc, unsigned int fd) { int retval; if (proc->files == NULL) return -ESRCH; retval = __close_fd(proc->files, fd); /* can't restart close syscall because file table entry was cleared */ if (unlikely(retval == -ERESTARTSYS || retval == -ERESTARTNOINTR || retval == -ERESTARTNOHAND || retval == -ERESTART_RESTARTBLOCK)) retval = -EINTR; return retval; } static inline void binder_lock(const char *tag) { trace_binder_lock(tag); mutex_lock(&binder_main_lock); trace_binder_locked(tag); } static inline void binder_unlock(const char *tag) { trace_binder_unlock(tag); mutex_unlock(&binder_main_lock); } static void binder_set_nice(long nice) { long min_nice; if (can_nice(current, nice)) { set_user_nice(current, nice); return; } min_nice = rlimit_to_nice(current->signal->rlim[RLIMIT_NICE].rlim_cur); binder_debug(BINDER_DEBUG_PRIORITY_CAP, "%d: nice value %ld not allowed use %ld instead\n", current->pid, nice, min_nice); set_user_nice(current, min_nice); if (min_nice <= MAX_NICE) return; binder_user_error("%d RLIMIT_NICE not set\n", current->pid); } static size_t binder_buffer_size(struct binder_proc *proc, struct binder_buffer *buffer) { if (list_is_last(&buffer->entry, &proc->buffers)) return proc->buffer + proc->buffer_size - (void *)buffer->data; return (size_t)list_entry(buffer->entry.next, struct binder_buffer, entry) - (size_t)buffer->data; } static void binder_insert_free_buffer(struct binder_proc *proc, struct binder_buffer *new_buffer) { struct rb_node **p = &proc->free_buffers.rb_node; struct rb_node *parent = NULL; struct binder_buffer *buffer; size_t buffer_size; size_t new_buffer_size; BUG_ON(!new_buffer->free); new_buffer_size = binder_buffer_size(proc, new_buffer); binder_debug(BINDER_DEBUG_BUFFER_ALLOC, "%d: add free buffer, size %zd, at %p\n", proc->pid, new_buffer_size, new_buffer); while (*p) { parent = *p; buffer = rb_entry(parent, struct binder_buffer, rb_node); BUG_ON(!buffer->free); buffer_size = binder_buffer_size(proc, buffer); if (new_buffer_size < buffer_size) p = &parent->rb_left; else p = &parent->rb_right; } rb_link_node(&new_buffer->rb_node, parent, p); rb_insert_color(&new_buffer->rb_node, &proc->free_buffers); } static void binder_insert_allocated_buffer(struct binder_proc *proc, struct binder_buffer *new_buffer) { struct rb_node **p = &proc->allocated_buffers.rb_node; struct rb_node *parent = NULL; struct binder_buffer *buffer; BUG_ON(new_buffer->free); while (*p) { parent = *p; buffer = rb_entry(parent, struct binder_buffer, rb_node); BUG_ON(buffer->free); if (new_buffer < buffer) p = &parent->rb_left; else if (new_buffer > buffer) p = &parent->rb_right; else BUG(); } rb_link_node(&new_buffer->rb_node, parent, p); rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers); } static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc, uintptr_t user_ptr) { struct rb_node *n = proc->allocated_buffers.rb_node; struct binder_buffer *buffer; struct binder_buffer *kern_ptr; kern_ptr = (struct binder_buffer *)(user_ptr - proc->user_buffer_offset - offsetof(struct binder_buffer, data)); while (n) { buffer = rb_entry(n, struct binder_buffer, rb_node); BUG_ON(buffer->free); if (kern_ptr < buffer) n = n->rb_left; else if (kern_ptr > buffer) n = n->rb_right; else return buffer; } return NULL; } static int binder_update_page_range(struct binder_proc *proc, int allocate, void *start, void *end, struct vm_area_struct *vma) { void *page_addr; unsigned long user_page_addr; struct page **page; struct mm_struct *mm; binder_debug(BINDER_DEBUG_BUFFER_ALLOC, "%d: %s pages %p-%p\n", proc->pid, allocate ? "allocate" : "free", start, end); if (end <= start) return 0; trace_binder_update_page_range(proc, allocate, start, end); if (vma) mm = NULL; else mm = get_task_mm(proc->tsk); if (mm) { down_write(&mm->mmap_sem); vma = proc->vma; if (vma && mm != proc->vma_vm_mm) { pr_err("%d: vma mm and task mm mismatch\n", proc->pid); vma = NULL; } } if (allocate == 0) goto free_range; if (vma == NULL) { pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n", proc->pid); goto err_no_vma; } for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { int ret; page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; BUG_ON(*page); *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); if (*page == NULL) { pr_err("%d: binder_alloc_buf failed for page at %p\n", proc->pid, page_addr); goto err_alloc_page_failed; } ret = map_kernel_range_noflush((unsigned long)page_addr, PAGE_SIZE, PAGE_KERNEL, page); flush_cache_vmap((unsigned long)page_addr, (unsigned long)page_addr + PAGE_SIZE); if (ret != 1) { pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n", proc->pid, page_addr); goto err_map_kernel_failed; } user_page_addr = (uintptr_t)page_addr + proc->user_buffer_offset; ret = vm_insert_page(vma, user_page_addr, page[0]); if (ret) { pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n", proc->pid, user_page_addr); goto err_vm_insert_page_failed; } /* vm_insert_page does not seem to increment the refcount */ } if (mm) { up_write(&mm->mmap_sem); mmput(mm); } return 0; free_range: for (page_addr = end - PAGE_SIZE; page_addr >= start; page_addr -= PAGE_SIZE) { page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; if (vma) zap_page_range(vma, (uintptr_t)page_addr + proc->user_buffer_offset, PAGE_SIZE, NULL); err_vm_insert_page_failed: unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); err_map_kernel_failed: __free_page(*page); *page = NULL; err_alloc_page_failed: ; } err_no_vma: if (mm) { up_write(&mm->mmap_sem); mmput(mm); } return -ENOMEM; } static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc, size_t data_size, size_t offsets_size, int is_async) { struct rb_node *n = proc->free_buffers.rb_node; struct binder_buffer *buffer; size_t buffer_size; struct rb_node *best_fit = NULL; void *has_page_addr; void *end_page_addr; size_t size; if (proc->vma == NULL) { pr_err("%d: binder_alloc_buf, no vma\n", proc->pid); return NULL; } size = ALIGN(data_size, sizeof(void *)) + ALIGN(offsets_size, sizeof(void *)); if (size < data_size || size < offsets_size) { binder_user_error("%d: got transaction with invalid size %zd-%zd\n", proc->pid, data_size, offsets_size); return NULL; } if (is_async && proc->free_async_space < size + sizeof(struct binder_buffer)) { binder_debug(BINDER_DEBUG_BUFFER_ALLOC, "%d: binder_alloc_buf size %zd failed, no async space left\n", proc->pid, size); return NULL; } while (n) { buffer = rb_entry(n, struct binder_buffer, rb_node); BUG_ON(!buffer->free); buffer_size = binder_buffer_size(proc, buffer); if (size < buffer_size) { best_fit = n; n = n->rb_left; } else if (size > buffer_size) n = n->rb_right; else { best_fit = n; break; } } if (best_fit == NULL) { pr_err("%d: binder_alloc_buf size %zd failed, no address space\n", proc->pid, size); return NULL; } if (n == NULL) { buffer = rb_entry(best_fit, struct binder_buffer, rb_node); buffer_size = binder_buffer_size(proc, buffer); } binder_debug(BINDER_DEBUG_BUFFER_ALLOC, "%d: binder_alloc_buf size %zd got buffer %p size %zd\n", proc->pid, size, buffer, buffer_size); has_page_addr = (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK); if (n == NULL) { if (size + sizeof(struct binder_buffer) + 4 >= buffer_size) buffer_size = size; /* no room for other buffers */ else buffer_size = size + sizeof(struct binder_buffer); } end_page_addr = (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size); if (end_page_addr > has_page_addr) end_page_addr = has_page_addr; if (binder_update_page_range(proc, 1, (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL)) return NULL; rb_erase(best_fit, &proc->free_buffers); buffer->free = 0; binder_insert_allocated_buffer(proc, buffer); if (buffer_size != size) { struct binder_buffer *new_buffer = (void *)buffer->data + size; list_add(&new_buffer->entry, &buffer->entry); new_buffer->free = 1; binder_insert_free_buffer(proc, new_buffer); } binder_debug(BINDER_DEBUG_BUFFER_ALLOC, "%d: binder_alloc_buf size %zd got %p\n", proc->pid, size, buffer); buffer->data_size = data_size; buffer->offsets_size = offsets_size; buffer->async_transaction = is_async; if (is_async) { proc->free_async_space -= size + sizeof(struct binder_buffer); binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, "%d: binder_alloc_buf size %zd async free %zd\n", proc->pid, size, proc->free_async_space); } return buffer; } static void *buffer_start_page(struct binder_buffer *buffer) { return (void *)((uintptr_t)buffer & PAGE_MASK); } static void *buffer_end_page(struct binder_buffer *buffer) { return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK); } static void binder_delete_free_buffer(struct binder_proc *proc, struct binder_buffer *buffer) { struct binder_buffer *prev, *next = NULL; int free_page_end = 1; int free_page_start = 1; BUG_ON(proc->buffers.next == &buffer->entry); prev = list_entry(buffer->entry.prev, struct binder_buffer, entry); BUG_ON(!prev->free); if (buffer_end_page(prev) == buffer_start_page(buffer)) { free_page_start = 0; if (buffer_end_page(prev) == buffer_end_page(buffer)) free_page_end = 0; binder_debug(BINDER_DEBUG_BUFFER_ALLOC, "%d: merge free, buffer %p share page with %p\n", proc->pid, buffer, prev); } if (!list_is_last(&buffer->entry, &proc->buffers)) { next = list_entry(buffer->entry.next, struct binder_buffer, entry); if (buffer_start_page(next) == buffer_end_page(buffer)) { free_page_end = 0; if (buffer_start_page(next) == buffer_start_page(buffer)) free_page_start = 0; binder_debug(BINDER_DEBUG_BUFFER_ALLOC, "%d: merge free, buffer %p share page with %p\n", proc->pid, buffer, prev); } } list_del(&buffer->entry); if (free_page_start || free_page_end) { binder_debug(BINDER_DEBUG_BUFFER_ALLOC, "%d: merge free, buffer %p do not share page%s%s with %p or %p\n", proc->pid, buffer, free_page_start ? "" : " end", free_page_end ? "" : " start", prev, next); binder_update_page_range(proc, 0, free_page_start ? buffer_start_page(buffer) : buffer_end_page(buffer), (free_page_end ? buffer_end_page(buffer) : buffer_start_page(buffer)) + PAGE_SIZE, NULL); } } static void binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer) { size_t size, buffer_size; buffer_size = binder_buffer_size(proc, buffer); size = ALIGN(buffer->data_size, sizeof(void *)) + ALIGN(buffer->offsets_size, sizeof(void *)); binder_debug(BINDER_DEBUG_BUFFER_ALLOC, "%d: binder_free_buf %p size %zd buffer_size %zd\n", proc->pid, buffer, size, buffer_size); BUG_ON(buffer->free); BUG_ON(size > buffer_size); BUG_ON(buffer->transaction != NULL); BUG_ON((void *)buffer < proc->buffer); BUG_ON((void *)buffer > proc->buffer + proc->buffer_size); if (buffer->async_transaction) { proc->free_async_space += size + sizeof(struct binder_buffer); binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, "%d: binder_free_buf size %zd async free %zd\n", proc->pid, size, proc->free_async_space); } binder_update_page_range(proc, 0, (void *)PAGE_ALIGN((uintptr_t)buffer->data), (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK), NULL); rb_erase(&buffer->rb_node, &proc->allocated_buffers); buffer->free = 1; if (!list_is_last(&buffer->entry, &proc->buffers)) { struct binder_buffer *next = list_entry(buffer->entry.next, struct binder_buffer, entry); if (next->free) { rb_erase(&next->rb_node, &proc->free_buffers); binder_delete_free_buffer(proc, next); } } if (proc->buffers.next != &buffer->entry) { struct binder_buffer *prev = list_entry(buffer->entry.prev, struct binder_buffer, entry); if (prev->free) { binder_delete_free_buffer(proc, buffer); rb_erase(&prev->rb_node, &proc->free_buffers); buffer = prev; } } binder_insert_free_buffer(proc, buffer); } static struct binder_node *binder_get_node(struct binder_proc *proc, binder_uintptr_t ptr) { struct rb_node *n = proc->nodes.rb_node; struct binder_node *node; while (n) { node = rb_entry(n, struct binder_node, rb_node); if (ptr < node->ptr) n = n->rb_left; else if (ptr > node->ptr) n = n->rb_right; else return node; } return NULL; } static struct binder_node *binder_new_node(struct binder_proc *proc, binder_uintptr_t ptr, binder_uintptr_t cookie) { struct rb_node **p = &proc->nodes.rb_node; struct rb_node *parent = NULL; struct binder_node *node; while (*p) { parent = *p; node = rb_entry(parent, struct binder_node, rb_node); if (ptr < node->ptr) p = &(*p)->rb_left; else if (ptr > node->ptr) p = &(*p)->rb_right; else return NULL; } node = kzalloc(sizeof(*node), GFP_KERNEL); if (node == NULL) return NULL; binder_stats_created(BINDER_STAT_NODE); rb_link_node(&node->rb_node, parent, p); rb_insert_color(&node->rb_node, &proc->nodes); node->debug_id = ++binder_last_id; node->proc = proc; node->ptr = ptr; node->cookie = cookie; node->work.type = BINDER_WORK_NODE; INIT_LIST_HEAD(&node->work.entry); INIT_LIST_HEAD(&node->async_todo); binder_debug(BINDER_DEBUG_INTERNAL_REFS, "%d:%d node %d u%016llx c%016llx created\n", proc->pid, current->pid, node->debug_id, (u64)node->ptr, (u64)node->cookie); return node; } static int binder_inc_node(struct binder_node *node, int strong, int internal, struct list_head *target_list) { if (strong) { if (internal) { if (target_list == NULL && node->internal_strong_refs == 0 && !(node == binder_context_mgr_node && node->has_strong_ref)) { pr_err("invalid inc strong node for %d\n", node->debug_id); return -EINVAL; } node->internal_strong_refs++; } else node->local_strong_refs++; if (!node->has_strong_ref && target_list) { list_del_init(&node->work.entry); list_add_tail(&node->work.entry, target_list); } } else { if (!internal) node->local_weak_refs++; if (!node->has_weak_ref && list_empty(&node->work.entry)) { if (target_list == NULL) { pr_err("invalid inc weak node for %d\n", node->debug_id); return -EINVAL; } list_add_tail(&node->work.entry, target_list); } } return 0; } static int binder_dec_node(struct binder_node *node, int strong, int internal) { if (strong) { if (internal) node->internal_strong_refs--; else node->local_strong_refs--; if (node->local_strong_refs || node->internal_strong_refs) return 0; } else { if (!internal) node->local_weak_refs--; if (node->local_weak_refs || !hlist_empty(&node->refs)) return 0; } if (node->proc && (node->has_strong_ref || node->has_weak_ref)) { if (list_empty(&node->work.entry)) { list_add_tail(&node->work.entry, &node->proc->todo); wake_up_interruptible(&node->proc->wait); } } else { if (hlist_empty(&node->refs) && !node->local_strong_refs && !node->local_weak_refs) { list_del_init(&node->work.entry); if (node->proc) { rb_erase(&node->rb_node, &node->proc->nodes); binder_debug(BINDER_DEBUG_INTERNAL_REFS, "refless node %d deleted\n", node->debug_id); } else { hlist_del(&node->dead_node); binder_debug(BINDER_DEBUG_INTERNAL_REFS, "dead node %d deleted\n", node->debug_id); } kfree(node); binder_stats_deleted(BINDER_STAT_NODE); } } return 0; } static struct binder_ref *binder_get_ref(struct binder_proc *proc, uint32_t desc) { struct rb_node *n = proc->refs_by_desc.rb_node; struct binder_ref *ref; while (n) { ref = rb_entry(n, struct binder_ref, rb_node_desc); if (desc < ref->desc) n = n->rb_left; else if (desc > ref->desc) n = n->rb_right; else return ref; } return NULL; } static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc, struct binder_node *node) { struct rb_node *n; struct rb_node **p = &proc->refs_by_node.rb_node; struct rb_node *parent = NULL; struct binder_ref *ref, *new_ref; while (*p) { parent = *p; ref = rb_entry(parent, struct binder_ref, rb_node_node); if (node < ref->node) p = &(*p)->rb_left; else if (node > ref->node) p = &(*p)->rb_right; else return ref; } new_ref = kzalloc(sizeof(*ref), GFP_KERNEL); if (new_ref == NULL) return NULL; binder_stats_created(BINDER_STAT_REF); new_ref->debug_id = ++binder_last_id; new_ref->proc = proc; new_ref->node = node; rb_link_node(&new_ref->rb_node_node, parent, p); rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node); new_ref->desc = (node == binder_context_mgr_node) ? 0 : 1; for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { ref = rb_entry(n, struct binder_ref, rb_node_desc); if (ref->desc > new_ref->desc) break; new_ref->desc = ref->desc + 1; } p = &proc->refs_by_desc.rb_node; while (*p) { parent = *p; ref = rb_entry(parent, struct binder_ref, rb_node_desc); if (new_ref->desc < ref->desc) p = &(*p)->rb_left; else if (new_ref->desc > ref->desc) p = &(*p)->rb_right; else BUG(); } rb_link_node(&new_ref->rb_node_desc, parent, p); rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc); if (node) { hlist_add_head(&new_ref->node_entry, &node->refs); binder_debug(BINDER_DEBUG_INTERNAL_REFS, "%d new ref %d desc %d for node %d\n", proc->pid, new_ref->debug_id, new_ref->desc, node->debug_id); } else { binder_debug(BINDER_DEBUG_INTERNAL_REFS, "%d new ref %d desc %d for dead node\n", proc->pid, new_ref->debug_id, new_ref->desc); } return new_ref; } static void binder_delete_ref(struct binder_ref *ref) { binder_debug(BINDER_DEBUG_INTERNAL_REFS, "%d delete ref %d desc %d for node %d\n", ref->proc->pid, ref->debug_id, ref->desc, ref->node->debug_id); rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc); rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node); if (ref->strong) binder_dec_node(ref->node, 1, 1); hlist_del(&ref->node_entry); binder_dec_node(ref->node, 0, 1); if (ref->death) { binder_debug(BINDER_DEBUG_DEAD_BINDER, "%d delete ref %d desc %d has death notification\n", ref->proc->pid, ref->debug_id, ref->desc); list_del(&ref->death->work.entry); kfree(ref->death); binder_stats_deleted(BINDER_STAT_DEATH); } kfree(ref); binder_stats_deleted(BINDER_STAT_REF); } static int binder_inc_ref(struct binder_ref *ref, int strong, struct list_head *target_list) { int ret; if (strong) { if (ref->strong == 0) { ret = binder_inc_node(ref->node, 1, 1, target_list); if (ret) return ret; } ref->strong++; } else { if (ref->weak == 0) { ret = binder_inc_node(ref->node, 0, 1, target_list); if (ret) return ret; } ref->weak++; } return 0; } static int binder_dec_ref(struct binder_ref *ref, int strong) { if (strong) { if (ref->strong == 0) { binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n", ref->proc->pid, ref->debug_id, ref->desc, ref->strong, ref->weak); return -EINVAL; } ref->strong--; if (ref->strong == 0) { int ret; ret = binder_dec_node(ref->node, strong, 1); if (ret) return ret; } } else { if (ref->weak == 0) { binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n", ref->proc->pid, ref->debug_id, ref->desc, ref->strong, ref->weak); return -EINVAL; } ref->weak--; } if (ref->strong == 0 && ref->weak == 0) binder_delete_ref(ref); return 0; } static void binder_pop_transaction(struct binder_thread *target_thread, struct binder_transaction *t) { if (target_thread) { BUG_ON(target_thread->transaction_stack != t); BUG_ON(target_thread->transaction_stack->from != target_thread); target_thread->transaction_stack = target_thread->transaction_stack->from_parent; t->from = NULL; } t->need_reply = 0; if (t->buffer) t->buffer->transaction = NULL; kfree(t); binder_stats_deleted(BINDER_STAT_TRANSACTION); } static void binder_send_failed_reply(struct binder_transaction *t, uint32_t error_code) { struct binder_thread *target_thread; struct binder_transaction *next; BUG_ON(t->flags & TF_ONE_WAY); while (1) { target_thread = t->from; if (target_thread) { if (target_thread->return_error != BR_OK && target_thread->return_error2 == BR_OK) { target_thread->return_error2 = target_thread->return_error; target_thread->return_error = BR_OK; } if (target_thread->return_error == BR_OK) { binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, "send failed reply for transaction %d to %d:%d\n", t->debug_id, target_thread->proc->pid, target_thread->pid); binder_pop_transaction(target_thread, t); target_thread->return_error = error_code; wake_up_interruptible(&target_thread->wait); } else { pr_err("reply failed, target thread, %d:%d, has error code %d already\n", target_thread->proc->pid, target_thread->pid, target_thread->return_error); } return; } next = t->from_parent; binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, "send failed reply for transaction %d, target dead\n", t->debug_id); binder_pop_transaction(target_thread, t); if (next == NULL) { binder_debug(BINDER_DEBUG_DEAD_BINDER, "reply failed, no target thread at root\n"); return; } t = next; binder_debug(BINDER_DEBUG_DEAD_BINDER, "reply failed, no target thread -- retry %d\n", t->debug_id); } } static void binder_transaction_buffer_release(struct binder_proc *proc, struct binder_buffer *buffer, binder_size_t *failed_at) { binder_size_t *offp, *off_end; int debug_id = buffer->debug_id; binder_debug(BINDER_DEBUG_TRANSACTION, "%d buffer release %d, size %zd-%zd, failed at %p\n", proc->pid, buffer->debug_id, buffer->data_size, buffer->offsets_size, failed_at); if (buffer->target_node) binder_dec_node(buffer->target_node, 1, 0); offp = (binder_size_t *)(buffer->data + ALIGN(buffer->data_size, sizeof(void *))); if (failed_at) off_end = failed_at; else off_end = (void *)offp + buffer->offsets_size; for (; offp < off_end; offp++) { struct flat_binder_object *fp; if (*offp > buffer->data_size - sizeof(*fp) || buffer->data_size < sizeof(*fp) || !IS_ALIGNED(*offp, sizeof(u32))) { pr_err("transaction release %d bad offset %lld, size %zd\n", debug_id, (u64)*offp, buffer->data_size); continue; } fp = (struct flat_binder_object *)(buffer->data + *offp); switch (fp->type) { case BINDER_TYPE_BINDER: case BINDER_TYPE_WEAK_BINDER: { struct binder_node *node = binder_get_node(proc, fp->binder); if (node == NULL) { pr_err("transaction release %d bad node %016llx\n", debug_id, (u64)fp->binder); break; } binder_debug(BINDER_DEBUG_TRANSACTION, " node %d u%016llx\n", node->debug_id, (u64)node->ptr); binder_dec_node(node, fp->type == BINDER_TYPE_BINDER, 0); } break; case BINDER_TYPE_HANDLE: case BINDER_TYPE_WEAK_HANDLE: { struct binder_ref *ref = binder_get_ref(proc, fp->handle); if (ref == NULL) { pr_err("transaction release %d bad handle %d\n", debug_id, fp->handle); break; } binder_debug(BINDER_DEBUG_TRANSACTION, " ref %d desc %d (node %d)\n", ref->debug_id, ref->desc, ref->node->debug_id); binder_dec_ref(ref, fp->type == BINDER_TYPE_HANDLE); } break; case BINDER_TYPE_FD: binder_debug(BINDER_DEBUG_TRANSACTION, " fd %d\n", fp->handle); if (failed_at) task_close_fd(proc, fp->handle); break; default: pr_err("transaction release %d bad object type %x\n", debug_id, fp->type); break; } } } static void binder_transaction(struct binder_proc *proc, struct binder_thread *thread, struct binder_transaction_data *tr, int reply) { struct binder_transaction *t; struct binder_work *tcomplete; binder_size_t *offp, *off_end; struct binder_proc *target_proc; struct binder_thread *target_thread = NULL; struct binder_node *target_node = NULL; struct list_head *target_list; wait_queue_head_t *target_wait; struct binder_transaction *in_reply_to = NULL; struct binder_transaction_log_entry *e; uint32_t return_error; e = binder_transaction_log_add(&binder_transaction_log); e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY); e->from_proc = proc->pid; e->from_thread = thread->pid; e->target_handle = tr->target.handle; e->data_size = tr->data_size; e->offsets_size = tr->offsets_size; if (reply) { in_reply_to = thread->transaction_stack; if (in_reply_to == NULL) { binder_user_error("%d:%d got reply transaction with no transaction stack\n", proc->pid, thread->pid); return_error = BR_FAILED_REPLY; goto err_empty_call_stack; } binder_set_nice(in_reply_to->saved_priority); if (in_reply_to->to_thread != thread) { binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n", proc->pid, thread->pid, in_reply_to->debug_id, in_reply_to->to_proc ? in_reply_to->to_proc->pid : 0, in_reply_to->to_thread ? in_reply_to->to_thread->pid : 0); return_error = BR_FAILED_REPLY; in_reply_to = NULL; goto err_bad_call_stack; } thread->transaction_stack = in_reply_to->to_parent; target_thread = in_reply_to->from; if (target_thread == NULL) { return_error = BR_DEAD_REPLY; goto err_dead_binder; } if (target_thread->transaction_stack != in_reply_to) { binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n", proc->pid, thread->pid, target_thread->transaction_stack ? target_thread->transaction_stack->debug_id : 0, in_reply_to->debug_id); return_error = BR_FAILED_REPLY; in_reply_to = NULL; target_thread = NULL; goto err_dead_binder; } target_proc = target_thread->proc; } else { if (tr->target.handle) { struct binder_ref *ref; ref = binder_get_ref(proc, tr->target.handle); if (ref == NULL) { binder_user_error("%d:%d got transaction to invalid handle\n", proc->pid, thread->pid); return_error = BR_FAILED_REPLY; goto err_invalid_target_handle; } target_node = ref->node; } else { target_node = binder_context_mgr_node; if (target_node == NULL) { return_error = BR_DEAD_REPLY; goto err_no_context_mgr_node; } } e->to_node = target_node->debug_id; target_proc = target_node->proc; if (target_proc == NULL) { return_error = BR_DEAD_REPLY; goto err_dead_binder; } if (security_binder_transaction(proc->tsk, target_proc->tsk) < 0) { return_error = BR_FAILED_REPLY; goto err_invalid_target_handle; } if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) { struct binder_transaction *tmp; tmp = thread->transaction_stack; if (tmp->to_thread != thread) { binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n", proc->pid, thread->pid, tmp->debug_id, tmp->to_proc ? tmp->to_proc->pid : 0, tmp->to_thread ? tmp->to_thread->pid : 0); return_error = BR_FAILED_REPLY; goto err_bad_call_stack; } while (tmp) { if (tmp->from && tmp->from->proc == target_proc) target_thread = tmp->from; tmp = tmp->from_parent; } } } if (target_thread) { e->to_thread = target_thread->pid; target_list = &target_thread->todo; target_wait = &target_thread->wait; } else { target_list = &target_proc->todo; target_wait = &target_proc->wait; } e->to_proc = target_proc->pid; /* TODO: reuse incoming transaction for reply */ t = kzalloc(sizeof(*t), GFP_KERNEL); if (t == NULL) { return_error = BR_FAILED_REPLY; goto err_alloc_t_failed; } binder_stats_created(BINDER_STAT_TRANSACTION); tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL); if (tcomplete == NULL) { return_error = BR_FAILED_REPLY; goto err_alloc_tcomplete_failed; } binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE); t->debug_id = ++binder_last_id; e->debug_id = t->debug_id; if (reply) binder_debug(BINDER_DEBUG_TRANSACTION, "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld\n", proc->pid, thread->pid, t->debug_id, target_proc->pid, target_thread->pid, (u64)tr->data.ptr.buffer, (u64)tr->data.ptr.offsets, (u64)tr->data_size, (u64)tr->offsets_size); else binder_debug(BINDER_DEBUG_TRANSACTION, "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld\n", proc->pid, thread->pid, t->debug_id, target_proc->pid, target_node->debug_id, (u64)tr->data.ptr.buffer, (u64)tr->data.ptr.offsets, (u64)tr->data_size, (u64)tr->offsets_size); if (!reply && !(tr->flags & TF_ONE_WAY)) t->from = thread; else t->from = NULL; t->sender_euid = task_euid(proc->tsk); t->to_proc = target_proc; t->to_thread = target_thread; t->code = tr->code; t->flags = tr->flags; t->priority = task_nice(current); trace_binder_transaction(reply, t, target_node); t->buffer = binder_alloc_buf(target_proc, tr->data_size, tr->offsets_size, !reply && (t->flags & TF_ONE_WAY)); if (t->buffer == NULL) { return_error = BR_FAILED_REPLY; goto err_binder_alloc_buf_failed; } t->buffer->allow_user_free = 0; t->buffer->debug_id = t->debug_id; t->buffer->transaction = t; t->buffer->target_node = target_node; trace_binder_transaction_alloc_buf(t->buffer); if (target_node) binder_inc_node(target_node, 1, 0, NULL); offp = (binder_size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void *))); if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t) tr->data.ptr.buffer, tr->data_size)) { binder_user_error("%d:%d got transaction with invalid data ptr\n", proc->pid, thread->pid); return_error = BR_FAILED_REPLY; goto err_copy_data_failed; } if (copy_from_user(offp, (const void __user *)(uintptr_t) tr->data.ptr.offsets, tr->offsets_size)) { binder_user_error("%d:%d got transaction with invalid offsets ptr\n", proc->pid, thread->pid); return_error = BR_FAILED_REPLY; goto err_copy_data_failed; } if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) { binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n", proc->pid, thread->pid, (u64)tr->offsets_size); return_error = BR_FAILED_REPLY; goto err_bad_offset; } off_end = (void *)offp + tr->offsets_size; for (; offp < off_end; offp++) { struct flat_binder_object *fp; if (*offp > t->buffer->data_size - sizeof(*fp) || t->buffer->data_size < sizeof(*fp) || !IS_ALIGNED(*offp, sizeof(u32))) { binder_user_error("%d:%d got transaction with invalid offset, %lld\n", proc->pid, thread->pid, (u64)*offp); return_error = BR_FAILED_REPLY; goto err_bad_offset; } fp = (struct flat_binder_object *)(t->buffer->data + *offp); switch (fp->type) { case BINDER_TYPE_BINDER: case BINDER_TYPE_WEAK_BINDER: { struct binder_ref *ref; struct binder_node *node = binder_get_node(proc, fp->binder); if (node == NULL) { node = binder_new_node(proc, fp->binder, fp->cookie); if (node == NULL) { return_error = BR_FAILED_REPLY; goto err_binder_new_node_failed; } node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK; node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS); } if (fp->cookie != node->cookie) { binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n", proc->pid, thread->pid, (u64)fp->binder, node->debug_id, (u64)fp->cookie, (u64)node->cookie); return_error = BR_FAILED_REPLY; goto err_binder_get_ref_for_node_failed; } if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) { return_error = BR_FAILED_REPLY; goto err_binder_get_ref_for_node_failed; } ref = binder_get_ref_for_node(target_proc, node); if (ref == NULL) { return_error = BR_FAILED_REPLY; goto err_binder_get_ref_for_node_failed; } if (fp->type == BINDER_TYPE_BINDER) fp->type = BINDER_TYPE_HANDLE; else fp->type = BINDER_TYPE_WEAK_HANDLE; fp->handle = ref->desc; binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE, &thread->todo); trace_binder_transaction_node_to_ref(t, node, ref); binder_debug(BINDER_DEBUG_TRANSACTION, " node %d u%016llx -> ref %d desc %d\n", node->debug_id, (u64)node->ptr, ref->debug_id, ref->desc); } break; case BINDER_TYPE_HANDLE: case BINDER_TYPE_WEAK_HANDLE: { struct binder_ref *ref = binder_get_ref(proc, fp->handle); if (ref == NULL) { binder_user_error("%d:%d got transaction with invalid handle, %d\n", proc->pid, thread->pid, fp->handle); return_error = BR_FAILED_REPLY; goto err_binder_get_ref_failed; } if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) { return_error = BR_FAILED_REPLY; goto err_binder_get_ref_failed; } if (ref->node->proc == target_proc) { if (fp->type == BINDER_TYPE_HANDLE) fp->type = BINDER_TYPE_BINDER; else fp->type = BINDER_TYPE_WEAK_BINDER; fp->binder = ref->node->ptr; fp->cookie = ref->node->cookie; binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL); trace_binder_transaction_ref_to_node(t, ref); binder_debug(BINDER_DEBUG_TRANSACTION, " ref %d desc %d -> node %d u%016llx\n", ref->debug_id, ref->desc, ref->node->debug_id, (u64)ref->node->ptr); } else { struct binder_ref *new_ref; new_ref = binder_get_ref_for_node(target_proc, ref->node); if (new_ref == NULL) { return_error = BR_FAILED_REPLY; goto err_binder_get_ref_for_node_failed; } fp->handle = new_ref->desc; binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL); trace_binder_transaction_ref_to_ref(t, ref, new_ref); binder_debug(BINDER_DEBUG_TRANSACTION, " ref %d desc %d -> ref %d desc %d (node %d)\n", ref->debug_id, ref->desc, new_ref->debug_id, new_ref->desc, ref->node->debug_id); } } break; case BINDER_TYPE_FD: { int target_fd; struct file *file; if (reply) { if (!(in_reply_to->flags & TF_ACCEPT_FDS)) { binder_user_error("%d:%d got reply with fd, %d, but target does not allow fds\n", proc->pid, thread->pid, fp->handle); return_error = BR_FAILED_REPLY; goto err_fd_not_allowed; } } else if (!target_node->accept_fds) { binder_user_error("%d:%d got transaction with fd, %d, but target does not allow fds\n", proc->pid, thread->pid, fp->handle); return_error = BR_FAILED_REPLY; goto err_fd_not_allowed; } file = fget(fp->handle); if (file == NULL) { binder_user_error("%d:%d got transaction with invalid fd, %d\n", proc->pid, thread->pid, fp->handle); return_error = BR_FAILED_REPLY; goto err_fget_failed; } if (security_binder_transfer_file(proc->tsk, target_proc->tsk, file) < 0) { fput(file); return_error = BR_FAILED_REPLY; goto err_get_unused_fd_failed; } target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC); if (target_fd < 0) { fput(file); return_error = BR_FAILED_REPLY; goto err_get_unused_fd_failed; } task_fd_install(target_proc, target_fd, file); trace_binder_transaction_fd(t, fp->handle, target_fd); binder_debug(BINDER_DEBUG_TRANSACTION, " fd %d -> %d\n", fp->handle, target_fd); /* TODO: fput? */ fp->handle = target_fd; } break; default: binder_user_error("%d:%d got transaction with invalid object type, %x\n", proc->pid, thread->pid, fp->type); return_error = BR_FAILED_REPLY; goto err_bad_object_type; } } if (reply) { BUG_ON(t->buffer->async_transaction != 0); binder_pop_transaction(target_thread, in_reply_to); } else if (!(t->flags & TF_ONE_WAY)) { BUG_ON(t->buffer->async_transaction != 0); t->need_reply = 1; t->from_parent = thread->transaction_stack; thread->transaction_stack = t; } else { BUG_ON(target_node == NULL); BUG_ON(t->buffer->async_transaction != 1); if (target_node->has_async_transaction) { target_list = &target_node->async_todo; target_wait = NULL; } else target_node->has_async_transaction = 1; } t->work.type = BINDER_WORK_TRANSACTION; list_add_tail(&t->work.entry, target_list); tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; list_add_tail(&tcomplete->entry, &thread->todo); if (target_wait) wake_up_interruptible(target_wait); return; err_get_unused_fd_failed: err_fget_failed: err_fd_not_allowed: err_binder_get_ref_for_node_failed: err_binder_get_ref_failed: err_binder_new_node_failed: err_bad_object_type: err_bad_offset: err_copy_data_failed: trace_binder_transaction_failed_buffer_release(t->buffer); binder_transaction_buffer_release(target_proc, t->buffer, offp); t->buffer->transaction = NULL; binder_free_buf(target_proc, t->buffer); err_binder_alloc_buf_failed: kfree(tcomplete); binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); err_alloc_tcomplete_failed: kfree(t); binder_stats_deleted(BINDER_STAT_TRANSACTION); err_alloc_t_failed: err_bad_call_stack: err_empty_call_stack: err_dead_binder: err_invalid_target_handle: err_no_context_mgr_node: binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, "%d:%d transaction failed %d, size %lld-%lld\n", proc->pid, thread->pid, return_error, (u64)tr->data_size, (u64)tr->offsets_size); { struct binder_transaction_log_entry *fe; fe = binder_transaction_log_add(&binder_transaction_log_failed); *fe = *e; } BUG_ON(thread->return_error != BR_OK); if (in_reply_to) { thread->return_error = BR_TRANSACTION_COMPLETE; binder_send_failed_reply(in_reply_to, return_error); } else thread->return_error = return_error; } static int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread, binder_uintptr_t binder_buffer, size_t size, binder_size_t *consumed) { uint32_t cmd; void __user *buffer = (void __user *)(uintptr_t)binder_buffer; void __user *ptr = buffer + *consumed; void __user *end = buffer + size; while (ptr < end && thread->return_error == BR_OK) { if (get_user(cmd, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); trace_binder_command(cmd); if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) { binder_stats.bc[_IOC_NR(cmd)]++; proc->stats.bc[_IOC_NR(cmd)]++; thread->stats.bc[_IOC_NR(cmd)]++; } switch (cmd) { case BC_INCREFS: case BC_ACQUIRE: case BC_RELEASE: case BC_DECREFS: { uint32_t target; struct binder_ref *ref; const char *debug_string; if (get_user(target, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); if (target == 0 && binder_context_mgr_node && (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) { ref = binder_get_ref_for_node(proc, binder_context_mgr_node); if (ref->desc != target) { binder_user_error("%d:%d tried to acquire reference to desc 0, got %d instead\n", proc->pid, thread->pid, ref->desc); } } else ref = binder_get_ref(proc, target); if (ref == NULL) { binder_user_error("%d:%d refcount change on invalid ref %d\n", proc->pid, thread->pid, target); break; } switch (cmd) { case BC_INCREFS: debug_string = "IncRefs"; binder_inc_ref(ref, 0, NULL); break; case BC_ACQUIRE: debug_string = "Acquire"; binder_inc_ref(ref, 1, NULL); break; case BC_RELEASE: debug_string = "Release"; binder_dec_ref(ref, 1); break; case BC_DECREFS: default: debug_string = "DecRefs"; binder_dec_ref(ref, 0); break; } binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s ref %d desc %d s %d w %d for node %d\n", proc->pid, thread->pid, debug_string, ref->debug_id, ref->desc, ref->strong, ref->weak, ref->node->debug_id); break; } case BC_INCREFS_DONE: case BC_ACQUIRE_DONE: { binder_uintptr_t node_ptr; binder_uintptr_t cookie; struct binder_node *node; if (get_user(node_ptr, (binder_uintptr_t __user *)ptr)) return -EFAULT; ptr += sizeof(binder_uintptr_t); if (get_user(cookie, (binder_uintptr_t __user *)ptr)) return -EFAULT; ptr += sizeof(binder_uintptr_t); node = binder_get_node(proc, node_ptr); if (node == NULL) { binder_user_error("%d:%d %s u%016llx no match\n", proc->pid, thread->pid, cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", (u64)node_ptr); break; } if (cookie != node->cookie) { binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n", proc->pid, thread->pid, cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", (u64)node_ptr, node->debug_id, (u64)cookie, (u64)node->cookie); break; } if (cmd == BC_ACQUIRE_DONE) { if (node->pending_strong_ref == 0) { binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n", proc->pid, thread->pid, node->debug_id); break; } node->pending_strong_ref = 0; } else { if (node->pending_weak_ref == 0) { binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n", proc->pid, thread->pid, node->debug_id); break; } node->pending_weak_ref = 0; } binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0); binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s node %d ls %d lw %d\n", proc->pid, thread->pid, cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", node->debug_id, node->local_strong_refs, node->local_weak_refs); break; } case BC_ATTEMPT_ACQUIRE: pr_err("BC_ATTEMPT_ACQUIRE not supported\n"); return -EINVAL; case BC_ACQUIRE_RESULT: pr_err("BC_ACQUIRE_RESULT not supported\n"); return -EINVAL; case BC_FREE_BUFFER: { binder_uintptr_t data_ptr; struct binder_buffer *buffer; if (get_user(data_ptr, (binder_uintptr_t __user *)ptr)) return -EFAULT; ptr += sizeof(binder_uintptr_t); buffer = binder_buffer_lookup(proc, data_ptr); if (buffer == NULL) { binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n", proc->pid, thread->pid, (u64)data_ptr); break; } if (!buffer->allow_user_free) { binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n", proc->pid, thread->pid, (u64)data_ptr); break; } binder_debug(BINDER_DEBUG_FREE_BUFFER, "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n", proc->pid, thread->pid, (u64)data_ptr, buffer->debug_id, buffer->transaction ? "active" : "finished"); if (buffer->transaction) { buffer->transaction->buffer = NULL; buffer->transaction = NULL; } if (buffer->async_transaction && buffer->target_node) { BUG_ON(!buffer->target_node->has_async_transaction); if (list_empty(&buffer->target_node->async_todo)) buffer->target_node->has_async_transaction = 0; else list_move_tail(buffer->target_node->async_todo.next, &thread->todo); } trace_binder_transaction_buffer_release(buffer); binder_transaction_buffer_release(proc, buffer, NULL); binder_free_buf(proc, buffer); break; } case BC_TRANSACTION: case BC_REPLY: { struct binder_transaction_data tr; if (copy_from_user(&tr, ptr, sizeof(tr))) return -EFAULT; ptr += sizeof(tr); binder_transaction(proc, thread, &tr, cmd == BC_REPLY); break; } case BC_REGISTER_LOOPER: binder_debug(BINDER_DEBUG_THREADS, "%d:%d BC_REGISTER_LOOPER\n", proc->pid, thread->pid); if (thread->looper & BINDER_LOOPER_STATE_ENTERED) { thread->looper |= BINDER_LOOPER_STATE_INVALID; binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n", proc->pid, thread->pid); } else if (proc->requested_threads == 0) { thread->looper |= BINDER_LOOPER_STATE_INVALID; binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n", proc->pid, thread->pid); } else { proc->requested_threads--; proc->requested_threads_started++; } thread->looper |= BINDER_LOOPER_STATE_REGISTERED; break; case BC_ENTER_LOOPER: binder_debug(BINDER_DEBUG_THREADS, "%d:%d BC_ENTER_LOOPER\n", proc->pid, thread->pid); if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) { thread->looper |= BINDER_LOOPER_STATE_INVALID; binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n", proc->pid, thread->pid); } thread->looper |= BINDER_LOOPER_STATE_ENTERED; break; case BC_EXIT_LOOPER: binder_debug(BINDER_DEBUG_THREADS, "%d:%d BC_EXIT_LOOPER\n", proc->pid, thread->pid); thread->looper |= BINDER_LOOPER_STATE_EXITED; break; case BC_REQUEST_DEATH_NOTIFICATION: case BC_CLEAR_DEATH_NOTIFICATION: { uint32_t target; binder_uintptr_t cookie; struct binder_ref *ref; struct binder_ref_death *death; if (get_user(target, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); if (get_user(cookie, (binder_uintptr_t __user *)ptr)) return -EFAULT; ptr += sizeof(binder_uintptr_t); ref = binder_get_ref(proc, target); if (ref == NULL) { binder_user_error("%d:%d %s invalid ref %d\n", proc->pid, thread->pid, cmd == BC_REQUEST_DEATH_NOTIFICATION ? "BC_REQUEST_DEATH_NOTIFICATION" : "BC_CLEAR_DEATH_NOTIFICATION", target); break; } binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n", proc->pid, thread->pid, cmd == BC_REQUEST_DEATH_NOTIFICATION ? "BC_REQUEST_DEATH_NOTIFICATION" : "BC_CLEAR_DEATH_NOTIFICATION", (u64)cookie, ref->debug_id, ref->desc, ref->strong, ref->weak, ref->node->debug_id); if (cmd == BC_REQUEST_DEATH_NOTIFICATION) { if (ref->death) { binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n", proc->pid, thread->pid); break; } death = kzalloc(sizeof(*death), GFP_KERNEL); if (death == NULL) { thread->return_error = BR_ERROR; binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n", proc->pid, thread->pid); break; } binder_stats_created(BINDER_STAT_DEATH); INIT_LIST_HEAD(&death->work.entry); death->cookie = cookie; ref->death = death; if (ref->node->proc == NULL) { ref->death->work.type = BINDER_WORK_DEAD_BINDER; if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { list_add_tail(&ref->death->work.entry, &thread->todo); } else { list_add_tail(&ref->death->work.entry, &proc->todo); wake_up_interruptible(&proc->wait); } } } else { if (ref->death == NULL) { binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n", proc->pid, thread->pid); break; } death = ref->death; if (death->cookie != cookie) { binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n", proc->pid, thread->pid, (u64)death->cookie, (u64)cookie); break; } ref->death = NULL; if (list_empty(&death->work.entry)) { death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { list_add_tail(&death->work.entry, &thread->todo); } else { list_add_tail(&death->work.entry, &proc->todo); wake_up_interruptible(&proc->wait); } } else { BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER); death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR; } } } break; case BC_DEAD_BINDER_DONE: { struct binder_work *w; binder_uintptr_t cookie; struct binder_ref_death *death = NULL; if (get_user(cookie, (binder_uintptr_t __user *)ptr)) return -EFAULT; ptr += sizeof(void *); list_for_each_entry(w, &proc->delivered_death, entry) { struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work); if (tmp_death->cookie == cookie) { death = tmp_death; break; } } binder_debug(BINDER_DEBUG_DEAD_BINDER, "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n", proc->pid, thread->pid, (u64)cookie, death); if (death == NULL) { binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n", proc->pid, thread->pid, (u64)cookie); break; } list_del_init(&death->work.entry); if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) { death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { list_add_tail(&death->work.entry, &thread->todo); } else { list_add_tail(&death->work.entry, &proc->todo); wake_up_interruptible(&proc->wait); } } } break; default: pr_err("%d:%d unknown command %d\n", proc->pid, thread->pid, cmd); return -EINVAL; } *consumed = ptr - buffer; } return 0; } static void binder_stat_br(struct binder_proc *proc, struct binder_thread *thread, uint32_t cmd) { trace_binder_return(cmd); if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) { binder_stats.br[_IOC_NR(cmd)]++; proc->stats.br[_IOC_NR(cmd)]++; thread->stats.br[_IOC_NR(cmd)]++; } } static int binder_has_proc_work(struct binder_proc *proc, struct binder_thread *thread) { return !list_empty(&proc->todo) || (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN); } static int binder_has_thread_work(struct binder_thread *thread) { return !list_empty(&thread->todo) || thread->return_error != BR_OK || (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN); } static int binder_thread_read(struct binder_proc *proc, struct binder_thread *thread, binder_uintptr_t binder_buffer, size_t size, binder_size_t *consumed, int non_block) { void __user *buffer = (void __user *)(uintptr_t)binder_buffer; void __user *ptr = buffer + *consumed; void __user *end = buffer + size; int ret = 0; int wait_for_proc_work; if (*consumed == 0) { if (put_user(BR_NOOP, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); } retry: wait_for_proc_work = thread->transaction_stack == NULL && list_empty(&thread->todo); if (thread->return_error != BR_OK && ptr < end) { if (thread->return_error2 != BR_OK) { if (put_user(thread->return_error2, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); binder_stat_br(proc, thread, thread->return_error2); if (ptr == end) goto done; thread->return_error2 = BR_OK; } if (put_user(thread->return_error, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); binder_stat_br(proc, thread, thread->return_error); thread->return_error = BR_OK; goto done; } thread->looper |= BINDER_LOOPER_STATE_WAITING; if (wait_for_proc_work) proc->ready_threads++; binder_unlock(__func__); trace_binder_wait_for_work(wait_for_proc_work, !!thread->transaction_stack, !list_empty(&thread->todo)); if (wait_for_proc_work) { if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED))) { binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n", proc->pid, thread->pid, thread->looper); wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); } binder_set_nice(proc->default_priority); if (non_block) { if (!binder_has_proc_work(proc, thread)) ret = -EAGAIN; } else ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread)); } else { if (non_block) { if (!binder_has_thread_work(thread)) ret = -EAGAIN; } else ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread)); } binder_lock(__func__); if (wait_for_proc_work) proc->ready_threads--; thread->looper &= ~BINDER_LOOPER_STATE_WAITING; if (ret) return ret; while (1) { uint32_t cmd; struct binder_transaction_data tr; struct binder_work *w; struct binder_transaction *t = NULL; if (!list_empty(&thread->todo)) { w = list_first_entry(&thread->todo, struct binder_work, entry); } else if (!list_empty(&proc->todo) && wait_for_proc_work) { w = list_first_entry(&proc->todo, struct binder_work, entry); } else { /* no data added */ if (ptr - buffer == 4 && !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN)) goto retry; break; } if (end - ptr < sizeof(tr) + 4) break; switch (w->type) { case BINDER_WORK_TRANSACTION: { t = container_of(w, struct binder_transaction, work); } break; case BINDER_WORK_TRANSACTION_COMPLETE: { cmd = BR_TRANSACTION_COMPLETE; if (put_user(cmd, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); binder_stat_br(proc, thread, cmd); binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE, "%d:%d BR_TRANSACTION_COMPLETE\n", proc->pid, thread->pid); list_del(&w->entry); kfree(w); binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); } break; case BINDER_WORK_NODE: { struct binder_node *node = container_of(w, struct binder_node, work); uint32_t cmd = BR_NOOP; const char *cmd_name; int strong = node->internal_strong_refs || node->local_strong_refs; int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong; if (weak && !node->has_weak_ref) { cmd = BR_INCREFS; cmd_name = "BR_INCREFS"; node->has_weak_ref = 1; node->pending_weak_ref = 1; node->local_weak_refs++; } else if (strong && !node->has_strong_ref) { cmd = BR_ACQUIRE; cmd_name = "BR_ACQUIRE"; node->has_strong_ref = 1; node->pending_strong_ref = 1; node->local_strong_refs++; } else if (!strong && node->has_strong_ref) { cmd = BR_RELEASE; cmd_name = "BR_RELEASE"; node->has_strong_ref = 0; } else if (!weak && node->has_weak_ref) { cmd = BR_DECREFS; cmd_name = "BR_DECREFS"; node->has_weak_ref = 0; } if (cmd != BR_NOOP) { if (put_user(cmd, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); if (put_user(node->ptr, (binder_uintptr_t __user *)ptr)) return -EFAULT; ptr += sizeof(binder_uintptr_t); if (put_user(node->cookie, (binder_uintptr_t __user *)ptr)) return -EFAULT; ptr += sizeof(binder_uintptr_t); binder_stat_br(proc, thread, cmd); binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n", proc->pid, thread->pid, cmd_name, node->debug_id, (u64)node->ptr, (u64)node->cookie); } else { list_del_init(&w->entry); if (!weak && !strong) { binder_debug(BINDER_DEBUG_INTERNAL_REFS, "%d:%d node %d u%016llx c%016llx deleted\n", proc->pid, thread->pid, node->debug_id, (u64)node->ptr, (u64)node->cookie); rb_erase(&node->rb_node, &proc->nodes); kfree(node); binder_stats_deleted(BINDER_STAT_NODE); } else { binder_debug(BINDER_DEBUG_INTERNAL_REFS, "%d:%d node %d u%016llx c%016llx state unchanged\n", proc->pid, thread->pid, node->debug_id, (u64)node->ptr, (u64)node->cookie); } } } break; case BINDER_WORK_DEAD_BINDER: case BINDER_WORK_DEAD_BINDER_AND_CLEAR: case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { struct binder_ref_death *death; uint32_t cmd; death = container_of(w, struct binder_ref_death, work); if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE; else cmd = BR_DEAD_BINDER; if (put_user(cmd, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); if (put_user(death->cookie, (binder_uintptr_t __user *)ptr)) return -EFAULT; ptr += sizeof(binder_uintptr_t); binder_stat_br(proc, thread, cmd); binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, "%d:%d %s %016llx\n", proc->pid, thread->pid, cmd == BR_DEAD_BINDER ? "BR_DEAD_BINDER" : "BR_CLEAR_DEATH_NOTIFICATION_DONE", (u64)death->cookie); if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) { list_del(&w->entry); kfree(death); binder_stats_deleted(BINDER_STAT_DEATH); } else list_move(&w->entry, &proc->delivered_death); if (cmd == BR_DEAD_BINDER) goto done; /* DEAD_BINDER notifications can cause transactions */ } break; } if (!t) continue; BUG_ON(t->buffer == NULL); if (t->buffer->target_node) { struct binder_node *target_node = t->buffer->target_node; tr.target.ptr = target_node->ptr; tr.cookie = target_node->cookie; t->saved_priority = task_nice(current); if (t->priority < target_node->min_priority && !(t->flags & TF_ONE_WAY)) binder_set_nice(t->priority); else if (!(t->flags & TF_ONE_WAY) || t->saved_priority > target_node->min_priority) binder_set_nice(target_node->min_priority); cmd = BR_TRANSACTION; } else { tr.target.ptr = 0; tr.cookie = 0; cmd = BR_REPLY; } tr.code = t->code; tr.flags = t->flags; tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid); if (t->from) { struct task_struct *sender = t->from->proc->tsk; tr.sender_pid = task_tgid_nr_ns(sender, task_active_pid_ns(current)); } else { tr.sender_pid = 0; } tr.data_size = t->buffer->data_size; tr.offsets_size = t->buffer->offsets_size; tr.data.ptr.buffer = (binder_uintptr_t)( (uintptr_t)t->buffer->data + proc->user_buffer_offset); tr.data.ptr.offsets = tr.data.ptr.buffer + ALIGN(t->buffer->data_size, sizeof(void *)); if (put_user(cmd, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); if (copy_to_user(ptr, &tr, sizeof(tr))) return -EFAULT; ptr += sizeof(tr); trace_binder_transaction_received(t); binder_stat_br(proc, thread, cmd); binder_debug(BINDER_DEBUG_TRANSACTION, "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n", proc->pid, thread->pid, (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" : "BR_REPLY", t->debug_id, t->from ? t->from->proc->pid : 0, t->from ? t->from->pid : 0, cmd, t->buffer->data_size, t->buffer->offsets_size, (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets); list_del(&t->work.entry); t->buffer->allow_user_free = 1; if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) { t->to_parent = thread->transaction_stack; t->to_thread = thread; thread->transaction_stack = t; } else { t->buffer->transaction = NULL; kfree(t); binder_stats_deleted(BINDER_STAT_TRANSACTION); } break; } done: *consumed = ptr - buffer; if (proc->requested_threads + proc->ready_threads == 0 && proc->requested_threads_started < proc->max_threads && (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */ /*spawn a new thread if we leave this out */) { proc->requested_threads++; binder_debug(BINDER_DEBUG_THREADS, "%d:%d BR_SPAWN_LOOPER\n", proc->pid, thread->pid); if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer)) return -EFAULT; binder_stat_br(proc, thread, BR_SPAWN_LOOPER); } return 0; } static void binder_release_work(struct list_head *list) { struct binder_work *w; while (!list_empty(list)) { w = list_first_entry(list, struct binder_work, entry); list_del_init(&w->entry); switch (w->type) { case BINDER_WORK_TRANSACTION: { struct binder_transaction *t; t = container_of(w, struct binder_transaction, work); if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) { binder_send_failed_reply(t, BR_DEAD_REPLY); } else { binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, "undelivered transaction %d\n", t->debug_id); t->buffer->transaction = NULL; kfree(t); binder_stats_deleted(BINDER_STAT_TRANSACTION); } } break; case BINDER_WORK_TRANSACTION_COMPLETE: { binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, "undelivered TRANSACTION_COMPLETE\n"); kfree(w); binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); } break; case BINDER_WORK_DEAD_BINDER_AND_CLEAR: case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { struct binder_ref_death *death; death = container_of(w, struct binder_ref_death, work); binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, "undelivered death notification, %016llx\n", (u64)death->cookie); kfree(death); binder_stats_deleted(BINDER_STAT_DEATH); } break; default: pr_err("unexpected work type, %d, not freed\n", w->type); break; } } } static struct binder_thread *binder_get_thread(struct binder_proc *proc) { struct binder_thread *thread = NULL; struct rb_node *parent = NULL; struct rb_node **p = &proc->threads.rb_node; while (*p) { parent = *p; thread = rb_entry(parent, struct binder_thread, rb_node); if (current->pid < thread->pid) p = &(*p)->rb_left; else if (current->pid > thread->pid) p = &(*p)->rb_right; else break; } if (*p == NULL) { thread = kzalloc(sizeof(*thread), GFP_KERNEL); if (thread == NULL) return NULL; binder_stats_created(BINDER_STAT_THREAD); thread->proc = proc; thread->pid = current->pid; init_waitqueue_head(&thread->wait); INIT_LIST_HEAD(&thread->todo); rb_link_node(&thread->rb_node, parent, p); rb_insert_color(&thread->rb_node, &proc->threads); thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN; thread->return_error = BR_OK; thread->return_error2 = BR_OK; } return thread; } static int binder_free_thread(struct binder_proc *proc, struct binder_thread *thread) { struct binder_transaction *t; struct binder_transaction *send_reply = NULL; int active_transactions = 0; rb_erase(&thread->rb_node, &proc->threads); t = thread->transaction_stack; if (t && t->to_thread == thread) send_reply = t; while (t) { active_transactions++; binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, "release %d:%d transaction %d %s, still active\n", proc->pid, thread->pid, t->debug_id, (t->to_thread == thread) ? "in" : "out"); if (t->to_thread == thread) { t->to_proc = NULL; t->to_thread = NULL; if (t->buffer) { t->buffer->transaction = NULL; t->buffer = NULL; } t = t->to_parent; } else if (t->from == thread) { t->from = NULL; t = t->from_parent; } else BUG(); } if (send_reply) binder_send_failed_reply(send_reply, BR_DEAD_REPLY); binder_release_work(&thread->todo); kfree(thread); binder_stats_deleted(BINDER_STAT_THREAD); return active_transactions; } static unsigned int binder_poll(struct file *filp, struct poll_table_struct *wait) { struct binder_proc *proc = filp->private_data; struct binder_thread *thread = NULL; int wait_for_proc_work; binder_lock(__func__); thread = binder_get_thread(proc); wait_for_proc_work = thread->transaction_stack == NULL && list_empty(&thread->todo) && thread->return_error == BR_OK; binder_unlock(__func__); if (wait_for_proc_work) { if (binder_has_proc_work(proc, thread)) return POLLIN; poll_wait(filp, &proc->wait, wait); if (binder_has_proc_work(proc, thread)) return POLLIN; } else { if (binder_has_thread_work(thread)) return POLLIN; poll_wait(filp, &thread->wait, wait); if (binder_has_thread_work(thread)) return POLLIN; } return 0; } static int binder_ioctl_write_read(struct file *filp, unsigned int cmd, unsigned long arg, struct binder_thread *thread) { int ret = 0; struct binder_proc *proc = filp->private_data; unsigned int size = _IOC_SIZE(cmd); void __user *ubuf = (void __user *)arg; struct binder_write_read bwr; if (size != sizeof(struct binder_write_read)) { ret = -EINVAL; goto out; } if (copy_from_user(&bwr, ubuf, sizeof(bwr))) { ret = -EFAULT; goto out; } binder_debug(BINDER_DEBUG_READ_WRITE, "%d:%d write %lld at %016llx, read %lld at %016llx\n", proc->pid, thread->pid, (u64)bwr.write_size, (u64)bwr.write_buffer, (u64)bwr.read_size, (u64)bwr.read_buffer); if (bwr.write_size > 0) { ret = binder_thread_write(proc, thread, bwr.write_buffer, bwr.write_size, &bwr.write_consumed); trace_binder_write_done(ret); if (ret < 0) { bwr.read_consumed = 0; if (copy_to_user(ubuf, &bwr, sizeof(bwr))) ret = -EFAULT; goto out; } } if (bwr.read_size > 0) { ret = binder_thread_read(proc, thread, bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK); trace_binder_read_done(ret); if (!list_empty(&proc->todo)) wake_up_interruptible(&proc->wait); if (ret < 0) { if (copy_to_user(ubuf, &bwr, sizeof(bwr))) ret = -EFAULT; goto out; } } binder_debug(BINDER_DEBUG_READ_WRITE, "%d:%d wrote %lld of %lld, read return %lld of %lld\n", proc->pid, thread->pid, (u64)bwr.write_consumed, (u64)bwr.write_size, (u64)bwr.read_consumed, (u64)bwr.read_size); if (copy_to_user(ubuf, &bwr, sizeof(bwr))) { ret = -EFAULT; goto out; } out: return ret; } static int binder_ioctl_set_ctx_mgr(struct file *filp) { int ret = 0; struct binder_proc *proc = filp->private_data; kuid_t curr_euid = current_euid(); if (binder_context_mgr_node != NULL) { pr_err("BINDER_SET_CONTEXT_MGR already set\n"); ret = -EBUSY; goto out; } ret = security_binder_set_context_mgr(proc->tsk); if (ret < 0) goto out; if (uid_valid(binder_context_mgr_uid)) { if (!uid_eq(binder_context_mgr_uid, curr_euid)) { pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n", from_kuid(&init_user_ns, curr_euid), from_kuid(&init_user_ns, binder_context_mgr_uid)); ret = -EPERM; goto out; } } else { binder_context_mgr_uid = curr_euid; } binder_context_mgr_node = binder_new_node(proc, 0, 0); if (binder_context_mgr_node == NULL) { ret = -ENOMEM; goto out; } binder_context_mgr_node->local_weak_refs++; binder_context_mgr_node->local_strong_refs++; binder_context_mgr_node->has_strong_ref = 1; binder_context_mgr_node->has_weak_ref = 1; out: return ret; } static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { int ret; struct binder_proc *proc = filp->private_data; struct binder_thread *thread; unsigned int size = _IOC_SIZE(cmd); void __user *ubuf = (void __user *)arg; /*pr_info("binder_ioctl: %d:%d %x %lx\n", proc->pid, current->pid, cmd, arg);*/ trace_binder_ioctl(cmd, arg); ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); if (ret) goto err_unlocked; binder_lock(__func__); thread = binder_get_thread(proc); if (thread == NULL) { ret = -ENOMEM; goto err; } switch (cmd) { case BINDER_WRITE_READ: ret = binder_ioctl_write_read(filp, cmd, arg, thread); if (ret) goto err; break; case BINDER_SET_MAX_THREADS: if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) { ret = -EINVAL; goto err; } break; case BINDER_SET_CONTEXT_MGR: ret = binder_ioctl_set_ctx_mgr(filp); if (ret) goto err; break; case BINDER_THREAD_EXIT: binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n", proc->pid, thread->pid); binder_free_thread(proc, thread); thread = NULL; break; case BINDER_VERSION: { struct binder_version __user *ver = ubuf; if (size != sizeof(struct binder_version)) { ret = -EINVAL; goto err; } if (put_user(BINDER_CURRENT_PROTOCOL_VERSION, &ver->protocol_version)) { ret = -EINVAL; goto err; } break; } default: ret = -EINVAL; goto err; } ret = 0; err: if (thread) thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN; binder_unlock(__func__); wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); if (ret && ret != -ERESTARTSYS) pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret); err_unlocked: trace_binder_ioctl_done(ret); return ret; } static void binder_vma_open(struct vm_area_struct *vma) { struct binder_proc *proc = vma->vm_private_data; binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", proc->pid, vma->vm_start, vma->vm_end, (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, (unsigned long)pgprot_val(vma->vm_page_prot)); } static void binder_vma_close(struct vm_area_struct *vma) { struct binder_proc *proc = vma->vm_private_data; binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", proc->pid, vma->vm_start, vma->vm_end, (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, (unsigned long)pgprot_val(vma->vm_page_prot)); proc->vma = NULL; proc->vma_vm_mm = NULL; binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES); } static int binder_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { return VM_FAULT_SIGBUS; } static struct vm_operations_struct binder_vm_ops = { .open = binder_vma_open, .close = binder_vma_close, .fault = binder_vm_fault, }; static int binder_mmap(struct file *filp, struct vm_area_struct *vma) { int ret; struct vm_struct *area; struct binder_proc *proc = filp->private_data; const char *failure_string; struct binder_buffer *buffer; if (proc->tsk != current) return -EINVAL; if ((vma->vm_end - vma->vm_start) > SZ_4M) vma->vm_end = vma->vm_start + SZ_4M; binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n", proc->pid, vma->vm_start, vma->vm_end, (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, (unsigned long)pgprot_val(vma->vm_page_prot)); if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) { ret = -EPERM; failure_string = "bad vm_flags"; goto err_bad_arg; } vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE; mutex_lock(&binder_mmap_lock); if (proc->buffer) { ret = -EBUSY; failure_string = "already mapped"; goto err_already_mapped; } area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP); if (area == NULL) { ret = -ENOMEM; failure_string = "get_vm_area"; goto err_get_vm_area_failed; } proc->buffer = area->addr; proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer; mutex_unlock(&binder_mmap_lock); #ifdef CONFIG_CPU_CACHE_VIPT if (cache_is_vipt_aliasing()) { while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) { pr_info("binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer); vma->vm_start += PAGE_SIZE; } } #endif proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL); if (proc->pages == NULL) { ret = -ENOMEM; failure_string = "alloc page array"; goto err_alloc_pages_failed; } proc->buffer_size = vma->vm_end - vma->vm_start; vma->vm_ops = &binder_vm_ops; vma->vm_private_data = proc; if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) { ret = -ENOMEM; failure_string = "alloc small buf"; goto err_alloc_small_buf_failed; } buffer = proc->buffer; INIT_LIST_HEAD(&proc->buffers); list_add(&buffer->entry, &proc->buffers); buffer->free = 1; binder_insert_free_buffer(proc, buffer); proc->free_async_space = proc->buffer_size / 2; barrier(); proc->files = get_files_struct(current); proc->vma = vma; proc->vma_vm_mm = vma->vm_mm; /*pr_info("binder_mmap: %d %lx-%lx maps %p\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/ return 0; err_alloc_small_buf_failed: kfree(proc->pages); proc->pages = NULL; err_alloc_pages_failed: mutex_lock(&binder_mmap_lock); vfree(proc->buffer); proc->buffer = NULL; err_get_vm_area_failed: err_already_mapped: mutex_unlock(&binder_mmap_lock); err_bad_arg: pr_err("binder_mmap: %d %lx-%lx %s failed %d\n", proc->pid, vma->vm_start, vma->vm_end, failure_string, ret); return ret; } static int binder_open(struct inode *nodp, struct file *filp) { struct binder_proc *proc; binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n", current->group_leader->pid, current->pid); proc = kzalloc(sizeof(*proc), GFP_KERNEL); if (proc == NULL) return -ENOMEM; get_task_struct(current); proc->tsk = current; INIT_LIST_HEAD(&proc->todo); init_waitqueue_head(&proc->wait); proc->default_priority = task_nice(current); binder_lock(__func__); binder_stats_created(BINDER_STAT_PROC); hlist_add_head(&proc->proc_node, &binder_procs); proc->pid = current->group_leader->pid; INIT_LIST_HEAD(&proc->delivered_death); filp->private_data = proc; binder_unlock(__func__); if (binder_debugfs_dir_entry_proc) { char strbuf[11]; snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO, binder_debugfs_dir_entry_proc, proc, &binder_proc_fops); } return 0; } static int binder_flush(struct file *filp, fl_owner_t id) { struct binder_proc *proc = filp->private_data; binder_defer_work(proc, BINDER_DEFERRED_FLUSH); return 0; } static void binder_deferred_flush(struct binder_proc *proc) { struct rb_node *n; int wake_count = 0; for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node); thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN; if (thread->looper & BINDER_LOOPER_STATE_WAITING) { wake_up_interruptible(&thread->wait); wake_count++; } } wake_up_interruptible_all(&proc->wait); binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_flush: %d woke %d threads\n", proc->pid, wake_count); } static int binder_release(struct inode *nodp, struct file *filp) { struct binder_proc *proc = filp->private_data; debugfs_remove(proc->debugfs_entry); binder_defer_work(proc, BINDER_DEFERRED_RELEASE); return 0; } static int binder_node_release(struct binder_node *node, int refs) { struct binder_ref *ref; int death = 0; list_del_init(&node->work.entry); binder_release_work(&node->async_todo); if (hlist_empty(&node->refs)) { kfree(node); binder_stats_deleted(BINDER_STAT_NODE); return refs; } node->proc = NULL; node->local_strong_refs = 0; node->local_weak_refs = 0; hlist_add_head(&node->dead_node, &binder_dead_nodes); hlist_for_each_entry(ref, &node->refs, node_entry) { refs++; if (!ref->death) continue; death++; if (list_empty(&ref->death->work.entry)) { ref->death->work.type = BINDER_WORK_DEAD_BINDER; list_add_tail(&ref->death->work.entry, &ref->proc->todo); wake_up_interruptible(&ref->proc->wait); } else BUG(); } binder_debug(BINDER_DEBUG_DEAD_BINDER, "node %d now dead, refs %d, death %d\n", node->debug_id, refs, death); return refs; } static void binder_deferred_release(struct binder_proc *proc) { struct binder_transaction *t; struct rb_node *n; int threads, nodes, incoming_refs, outgoing_refs, buffers, active_transactions, page_count; BUG_ON(proc->vma); BUG_ON(proc->files); hlist_del(&proc->proc_node); if (binder_context_mgr_node && binder_context_mgr_node->proc == proc) { binder_debug(BINDER_DEBUG_DEAD_BINDER, "%s: %d context_mgr_node gone\n", __func__, proc->pid); binder_context_mgr_node = NULL; } threads = 0; active_transactions = 0; while ((n = rb_first(&proc->threads))) { struct binder_thread *thread; thread = rb_entry(n, struct binder_thread, rb_node); threads++; active_transactions += binder_free_thread(proc, thread); } nodes = 0; incoming_refs = 0; while ((n = rb_first(&proc->nodes))) { struct binder_node *node; node = rb_entry(n, struct binder_node, rb_node); nodes++; rb_erase(&node->rb_node, &proc->nodes); incoming_refs = binder_node_release(node, incoming_refs); } outgoing_refs = 0; while ((n = rb_first(&proc->refs_by_desc))) { struct binder_ref *ref; ref = rb_entry(n, struct binder_ref, rb_node_desc); outgoing_refs++; binder_delete_ref(ref); } binder_release_work(&proc->todo); binder_release_work(&proc->delivered_death); buffers = 0; while ((n = rb_first(&proc->allocated_buffers))) { struct binder_buffer *buffer; buffer = rb_entry(n, struct binder_buffer, rb_node); t = buffer->transaction; if (t) { t->buffer = NULL; buffer->transaction = NULL; pr_err("release proc %d, transaction %d, not freed\n", proc->pid, t->debug_id); /*BUG();*/ } binder_free_buf(proc, buffer); buffers++; } binder_stats_deleted(BINDER_STAT_PROC); page_count = 0; if (proc->pages) { int i; for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) { void *page_addr; if (!proc->pages[i]) continue; page_addr = proc->buffer + i * PAGE_SIZE; binder_debug(BINDER_DEBUG_BUFFER_ALLOC, "%s: %d: page %d at %p not freed\n", __func__, proc->pid, i, page_addr); unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); __free_page(proc->pages[i]); page_count++; } kfree(proc->pages); vfree(proc->buffer); } put_task_struct(proc->tsk); binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d, buffers %d, pages %d\n", __func__, proc->pid, threads, nodes, incoming_refs, outgoing_refs, active_transactions, buffers, page_count); kfree(proc); } static void binder_deferred_func(struct work_struct *work) { struct binder_proc *proc; struct files_struct *files; int defer; do { binder_lock(__func__); mutex_lock(&binder_deferred_lock); if (!hlist_empty(&binder_deferred_list)) { proc = hlist_entry(binder_deferred_list.first, struct binder_proc, deferred_work_node); hlist_del_init(&proc->deferred_work_node); defer = proc->deferred_work; proc->deferred_work = 0; } else { proc = NULL; defer = 0; } mutex_unlock(&binder_deferred_lock); files = NULL; if (defer & BINDER_DEFERRED_PUT_FILES) { files = proc->files; if (files) proc->files = NULL; } if (defer & BINDER_DEFERRED_FLUSH) binder_deferred_flush(proc); if (defer & BINDER_DEFERRED_RELEASE) binder_deferred_release(proc); /* frees proc */ binder_unlock(__func__); if (files) put_files_struct(files); } while (proc); } static DECLARE_WORK(binder_deferred_work, binder_deferred_func); static void binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer) { mutex_lock(&binder_deferred_lock); proc->deferred_work |= defer; if (hlist_unhashed(&proc->deferred_work_node)) { hlist_add_head(&proc->deferred_work_node, &binder_deferred_list); queue_work(binder_deferred_workqueue, &binder_deferred_work); } mutex_unlock(&binder_deferred_lock); } static void print_binder_transaction(struct seq_file *m, const char *prefix, struct binder_transaction *t) { seq_printf(m, "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d", prefix, t->debug_id, t, t->from ? t->from->proc->pid : 0, t->from ? t->from->pid : 0, t->to_proc ? t->to_proc->pid : 0, t->to_thread ? t->to_thread->pid : 0, t->code, t->flags, t->priority, t->need_reply); if (t->buffer == NULL) { seq_puts(m, " buffer free\n"); return; } if (t->buffer->target_node) seq_printf(m, " node %d", t->buffer->target_node->debug_id); seq_printf(m, " size %zd:%zd data %p\n", t->buffer->data_size, t->buffer->offsets_size, t->buffer->data); } static void print_binder_buffer(struct seq_file *m, const char *prefix, struct binder_buffer *buffer) { seq_printf(m, "%s %d: %p size %zd:%zd %s\n", prefix, buffer->debug_id, buffer->data, buffer->data_size, buffer->offsets_size, buffer->transaction ? "active" : "delivered"); } static void print_binder_work(struct seq_file *m, const char *prefix, const char *transaction_prefix, struct binder_work *w) { struct binder_node *node; struct binder_transaction *t; switch (w->type) { case BINDER_WORK_TRANSACTION: t = container_of(w, struct binder_transaction, work); print_binder_transaction(m, transaction_prefix, t); break; case BINDER_WORK_TRANSACTION_COMPLETE: seq_printf(m, "%stransaction complete\n", prefix); break; case BINDER_WORK_NODE: node = container_of(w, struct binder_node, work); seq_printf(m, "%snode work %d: u%016llx c%016llx\n", prefix, node->debug_id, (u64)node->ptr, (u64)node->cookie); break; case BINDER_WORK_DEAD_BINDER: seq_printf(m, "%shas dead binder\n", prefix); break; case BINDER_WORK_DEAD_BINDER_AND_CLEAR: seq_printf(m, "%shas cleared dead binder\n", prefix); break; case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: seq_printf(m, "%shas cleared death notification\n", prefix); break; default: seq_printf(m, "%sunknown work: type %d\n", prefix, w->type); break; } } static void print_binder_thread(struct seq_file *m, struct binder_thread *thread, int print_always) { struct binder_transaction *t; struct binder_work *w; size_t start_pos = m->count; size_t header_pos; seq_printf(m, " thread %d: l %02x\n", thread->pid, thread->looper); header_pos = m->count; t = thread->transaction_stack; while (t) { if (t->from == thread) { print_binder_transaction(m, " outgoing transaction", t); t = t->from_parent; } else if (t->to_thread == thread) { print_binder_transaction(m, " incoming transaction", t); t = t->to_parent; } else { print_binder_transaction(m, " bad transaction", t); t = NULL; } } list_for_each_entry(w, &thread->todo, entry) { print_binder_work(m, " ", " pending transaction", w); } if (!print_always && m->count == header_pos) m->count = start_pos; } static void print_binder_node(struct seq_file *m, struct binder_node *node) { struct binder_ref *ref; struct binder_work *w; int count; count = 0; hlist_for_each_entry(ref, &node->refs, node_entry) count++; seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d", node->debug_id, (u64)node->ptr, (u64)node->cookie, node->has_strong_ref, node->has_weak_ref, node->local_strong_refs, node->local_weak_refs, node->internal_strong_refs, count); if (count) { seq_puts(m, " proc"); hlist_for_each_entry(ref, &node->refs, node_entry) seq_printf(m, " %d", ref->proc->pid); } seq_puts(m, "\n"); list_for_each_entry(w, &node->async_todo, entry) print_binder_work(m, " ", " pending async transaction", w); } static void print_binder_ref(struct seq_file *m, struct binder_ref *ref) { seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %p\n", ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ", ref->node->debug_id, ref->strong, ref->weak, ref->death); } static void print_binder_proc(struct seq_file *m, struct binder_proc *proc, int print_all) { struct binder_work *w; struct rb_node *n; size_t start_pos = m->count; size_t header_pos; seq_printf(m, "proc %d\n", proc->pid); header_pos = m->count; for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) print_binder_thread(m, rb_entry(n, struct binder_thread, rb_node), print_all); for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { struct binder_node *node = rb_entry(n, struct binder_node, rb_node); if (print_all || node->has_async_transaction) print_binder_node(m, node); } if (print_all) { for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) print_binder_ref(m, rb_entry(n, struct binder_ref, rb_node_desc)); } for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n)) print_binder_buffer(m, " buffer", rb_entry(n, struct binder_buffer, rb_node)); list_for_each_entry(w, &proc->todo, entry) print_binder_work(m, " ", " pending transaction", w); list_for_each_entry(w, &proc->delivered_death, entry) { seq_puts(m, " has delivered dead binder\n"); break; } if (!print_all && m->count == header_pos) m->count = start_pos; } static const char * const binder_return_strings[] = { "BR_ERROR", "BR_OK", "BR_TRANSACTION", "BR_REPLY", "BR_ACQUIRE_RESULT", "BR_DEAD_REPLY", "BR_TRANSACTION_COMPLETE", "BR_INCREFS", "BR_ACQUIRE", "BR_RELEASE", "BR_DECREFS", "BR_ATTEMPT_ACQUIRE", "BR_NOOP", "BR_SPAWN_LOOPER", "BR_FINISHED", "BR_DEAD_BINDER", "BR_CLEAR_DEATH_NOTIFICATION_DONE", "BR_FAILED_REPLY" }; static const char * const binder_command_strings[] = { "BC_TRANSACTION", "BC_REPLY", "BC_ACQUIRE_RESULT", "BC_FREE_BUFFER", "BC_INCREFS", "BC_ACQUIRE", "BC_RELEASE", "BC_DECREFS", "BC_INCREFS_DONE", "BC_ACQUIRE_DONE", "BC_ATTEMPT_ACQUIRE", "BC_REGISTER_LOOPER", "BC_ENTER_LOOPER", "BC_EXIT_LOOPER", "BC_REQUEST_DEATH_NOTIFICATION", "BC_CLEAR_DEATH_NOTIFICATION", "BC_DEAD_BINDER_DONE" }; static const char * const binder_objstat_strings[] = { "proc", "thread", "node", "ref", "death", "transaction", "transaction_complete" }; static void print_binder_stats(struct seq_file *m, const char *prefix, struct binder_stats *stats) { int i; BUILD_BUG_ON(ARRAY_SIZE(stats->bc) != ARRAY_SIZE(binder_command_strings)); for (i = 0; i < ARRAY_SIZE(stats->bc); i++) { if (stats->bc[i]) seq_printf(m, "%s%s: %d\n", prefix, binder_command_strings[i], stats->bc[i]); } BUILD_BUG_ON(ARRAY_SIZE(stats->br) != ARRAY_SIZE(binder_return_strings)); for (i = 0; i < ARRAY_SIZE(stats->br); i++) { if (stats->br[i]) seq_printf(m, "%s%s: %d\n", prefix, binder_return_strings[i], stats->br[i]); } BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != ARRAY_SIZE(binder_objstat_strings)); BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != ARRAY_SIZE(stats->obj_deleted)); for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) { if (stats->obj_created[i] || stats->obj_deleted[i]) seq_printf(m, "%s%s: active %d total %d\n", prefix, binder_objstat_strings[i], stats->obj_created[i] - stats->obj_deleted[i], stats->obj_created[i]); } } static void print_binder_proc_stats(struct seq_file *m, struct binder_proc *proc) { struct binder_work *w; struct rb_node *n; int count, strong, weak; seq_printf(m, "proc %d\n", proc->pid); count = 0; for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) count++; seq_printf(m, " threads: %d\n", count); seq_printf(m, " requested threads: %d+%d/%d\n" " ready threads %d\n" " free async space %zd\n", proc->requested_threads, proc->requested_threads_started, proc->max_threads, proc->ready_threads, proc->free_async_space); count = 0; for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) count++; seq_printf(m, " nodes: %d\n", count); count = 0; strong = 0; weak = 0; for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { struct binder_ref *ref = rb_entry(n, struct binder_ref, rb_node_desc); count++; strong += ref->strong; weak += ref->weak; } seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak); count = 0; for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n)) count++; seq_printf(m, " buffers: %d\n", count); count = 0; list_for_each_entry(w, &proc->todo, entry) { switch (w->type) { case BINDER_WORK_TRANSACTION: count++; break; default: break; } } seq_printf(m, " pending transactions: %d\n", count); print_binder_stats(m, " ", &proc->stats); } static int binder_state_show(struct seq_file *m, void *unused) { struct binder_proc *proc; struct binder_node *node; int do_lock = !binder_debug_no_lock; if (do_lock) binder_lock(__func__); seq_puts(m, "binder state:\n"); if (!hlist_empty(&binder_dead_nodes)) seq_puts(m, "dead nodes:\n"); hlist_for_each_entry(node, &binder_dead_nodes, dead_node) print_binder_node(m, node); hlist_for_each_entry(proc, &binder_procs, proc_node) print_binder_proc(m, proc, 1); if (do_lock) binder_unlock(__func__); return 0; } static int binder_stats_show(struct seq_file *m, void *unused) { struct binder_proc *proc; int do_lock = !binder_debug_no_lock; if (do_lock) binder_lock(__func__); seq_puts(m, "binder stats:\n"); print_binder_stats(m, "", &binder_stats); hlist_for_each_entry(proc, &binder_procs, proc_node) print_binder_proc_stats(m, proc); if (do_lock) binder_unlock(__func__); return 0; } static int binder_transactions_show(struct seq_file *m, void *unused) { struct binder_proc *proc; int do_lock = !binder_debug_no_lock; if (do_lock) binder_lock(__func__); seq_puts(m, "binder transactions:\n"); hlist_for_each_entry(proc, &binder_procs, proc_node) print_binder_proc(m, proc, 0); if (do_lock) binder_unlock(__func__); return 0; } static int binder_proc_show(struct seq_file *m, void *unused) { struct binder_proc *proc = m->private; int do_lock = !binder_debug_no_lock; if (do_lock) binder_lock(__func__); seq_puts(m, "binder proc state:\n"); print_binder_proc(m, proc, 1); if (do_lock) binder_unlock(__func__); return 0; } static void print_binder_transaction_log_entry(struct seq_file *m, struct binder_transaction_log_entry *e) { seq_printf(m, "%d: %s from %d:%d to %d:%d node %d handle %d size %d:%d\n", e->debug_id, (e->call_type == 2) ? "reply" : ((e->call_type == 1) ? "async" : "call "), e->from_proc, e->from_thread, e->to_proc, e->to_thread, e->to_node, e->target_handle, e->data_size, e->offsets_size); } static int binder_transaction_log_show(struct seq_file *m, void *unused) { struct binder_transaction_log *log = m->private; int i; if (log->full) { for (i = log->next; i < ARRAY_SIZE(log->entry); i++) print_binder_transaction_log_entry(m, &log->entry[i]); } for (i = 0; i < log->next; i++) print_binder_transaction_log_entry(m, &log->entry[i]); return 0; } static const struct file_operations binder_fops = { .owner = THIS_MODULE, .poll = binder_poll, .unlocked_ioctl = binder_ioctl, .compat_ioctl = binder_ioctl, .mmap = binder_mmap, .open = binder_open, .flush = binder_flush, .release = binder_release, }; static struct miscdevice binder_miscdev = { .minor = MISC_DYNAMIC_MINOR, .name = "binder", .fops = &binder_fops }; BINDER_DEBUG_ENTRY(state); BINDER_DEBUG_ENTRY(stats); BINDER_DEBUG_ENTRY(transactions); BINDER_DEBUG_ENTRY(transaction_log); static int __init binder_init(void) { int ret; binder_deferred_workqueue = create_singlethread_workqueue("binder"); if (!binder_deferred_workqueue) return -ENOMEM; binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL); if (binder_debugfs_dir_entry_root) binder_debugfs_dir_entry_proc = debugfs_create_dir("proc", binder_debugfs_dir_entry_root); ret = misc_register(&binder_miscdev); if (binder_debugfs_dir_entry_root) { debugfs_create_file("state", S_IRUGO, binder_debugfs_dir_entry_root, NULL, &binder_state_fops); debugfs_create_file("stats", S_IRUGO, binder_debugfs_dir_entry_root, NULL, &binder_stats_fops); debugfs_create_file("transactions", S_IRUGO, binder_debugfs_dir_entry_root, NULL, &binder_transactions_fops); debugfs_create_file("transaction_log", S_IRUGO, binder_debugfs_dir_entry_root, &binder_transaction_log, &binder_transaction_log_fops); debugfs_create_file("failed_transaction_log", S_IRUGO, binder_debugfs_dir_entry_root, &binder_transaction_log_failed, &binder_transaction_log_fops); } return ret; } device_initcall(binder_init); #define CREATE_TRACE_POINTS #include "binder_trace.h" MODULE_LICENSE("GPL v2");
gpl-2.0
Valchovski/tpr
dep/acelite/ace/POSIX_Proactor.cpp
524
60782
// $Id: POSIX_Proactor.cpp 92069 2010-09-28 11:38:59Z johnnyw $ #include "ace/POSIX_Proactor.h" #if defined (ACE_HAS_AIO_CALLS) #if !defined (__ACE_INLINE__) #include "ace/POSIX_Proactor.inl" #endif /* __ACE_INLINE__ */ # if defined (ACE_HAS_SYS_SYSTEMINFO_H) # include /**/ <sys/systeminfo.h> # endif /* ACE_HAS_SYS_SYSTEMINFO_H */ #include "ace/ACE.h" #include "ace/Flag_Manip.h" #include "ace/Task_T.h" #include "ace/Log_Msg.h" #include "ace/Object_Manager.h" #include "ace/OS_NS_sys_socket.h" #include "ace/OS_NS_signal.h" #include "ace/OS_NS_unistd.h" #if defined (sun) # include "ace/OS_NS_strings.h" #endif /* sun */ // ********************************************************************* ACE_BEGIN_VERSIONED_NAMESPACE_DECL /** * @class ACE_POSIX_Wakeup_Completion * * This result object is used by the <end_event_loop> of the * ACE_Proactor interface to wake up all the threads blocking * for completions. */ class ACE_POSIX_Wakeup_Completion : public ACE_POSIX_Asynch_Result { public: /// Constructor. ACE_POSIX_Wakeup_Completion (const ACE_Handler::Proxy_Ptr &handler_proxy, const void *act = 0, ACE_HANDLE event = ACE_INVALID_HANDLE, int priority = 0, int signal_number = ACE_SIGRTMIN); /// Destructor. virtual ~ACE_POSIX_Wakeup_Completion (void); /// This method calls the <handler>'s <handle_wakeup> method. virtual void complete (size_t bytes_transferred = 0, int success = 1, const void *completion_key = 0, u_long error = 0); }; // ********************************************************************* ACE_POSIX_Proactor::ACE_POSIX_Proactor (void) : os_id_ (ACE_OS_UNDEFINED) { #if defined(sun) os_id_ = ACE_OS_SUN; // set family char Buf [32]; ::memset(Buf,0,sizeof(Buf)); ACE_OS::sysinfo (SI_RELEASE , Buf, sizeof(Buf)-1); if (ACE_OS::strcasecmp (Buf , "5.6") == 0) os_id_ = ACE_OS_SUN_56; else if (ACE_OS::strcasecmp (Buf , "5.7") == 0) os_id_ = ACE_OS_SUN_57; else if (ACE_OS::strcasecmp (Buf , "5.8") == 0) os_id_ = ACE_OS_SUN_58; #elif defined(HPUX) os_id_ = ACE_OS_HPUX; // set family #elif defined(__OpenBSD) os_id_ = ACE_OS_OPENBSD; // set family // do the same //#else defined (LINUX, __FreeBSD__ ...) //setup here os_id_ #endif } ACE_POSIX_Proactor::~ACE_POSIX_Proactor (void) { this->close (); } int ACE_POSIX_Proactor::close (void) { return 0; } int ACE_POSIX_Proactor::register_handle (ACE_HANDLE handle, const void *completion_key) { ACE_UNUSED_ARG (handle); ACE_UNUSED_ARG (completion_key); return 0; } int ACE_POSIX_Proactor::wake_up_dispatch_threads (void) { return 0; } int ACE_POSIX_Proactor::close_dispatch_threads (int) { return 0; } size_t ACE_POSIX_Proactor::number_of_threads (void) const { // @@ Implement it. ACE_NOTSUP_RETURN (0); } void ACE_POSIX_Proactor::number_of_threads (size_t threads) { // @@ Implement it. ACE_UNUSED_ARG (threads); } ACE_HANDLE ACE_POSIX_Proactor::get_handle (void) const { return ACE_INVALID_HANDLE; } ACE_Asynch_Read_Stream_Impl * ACE_POSIX_Proactor::create_asynch_read_stream (void) { ACE_Asynch_Read_Stream_Impl *implementation = 0; ACE_NEW_RETURN (implementation, ACE_POSIX_Asynch_Read_Stream (this), 0); return implementation; } ACE_Asynch_Read_Stream_Result_Impl * ACE_POSIX_Proactor::create_asynch_read_stream_result (const ACE_Handler::Proxy_Ptr &handler_proxy, ACE_HANDLE handle, ACE_Message_Block &message_block, size_t bytes_to_read, const void* act, ACE_HANDLE event, int priority, int signal_number) { ACE_Asynch_Read_Stream_Result_Impl *implementation; ACE_NEW_RETURN (implementation, ACE_POSIX_Asynch_Read_Stream_Result (handler_proxy, handle, message_block, bytes_to_read, act, event, priority, signal_number), 0); return implementation; } ACE_Asynch_Write_Stream_Impl * ACE_POSIX_Proactor::create_asynch_write_stream (void) { ACE_Asynch_Write_Stream_Impl *implementation = 0; ACE_NEW_RETURN (implementation, ACE_POSIX_Asynch_Write_Stream (this), 0); return implementation; } ACE_Asynch_Write_Stream_Result_Impl * ACE_POSIX_Proactor::create_asynch_write_stream_result (const ACE_Handler::Proxy_Ptr &handler_proxy, ACE_HANDLE handle, ACE_Message_Block &message_block, size_t bytes_to_write, const void* act, ACE_HANDLE event, int priority, int signal_number) { ACE_Asynch_Write_Stream_Result_Impl *implementation; ACE_NEW_RETURN (implementation, ACE_POSIX_Asynch_Write_Stream_Result (handler_proxy, handle, message_block, bytes_to_write, act, event, priority, signal_number), 0); return implementation; } ACE_Asynch_Read_File_Impl * ACE_POSIX_Proactor::create_asynch_read_file (void) { ACE_Asynch_Read_File_Impl *implementation = 0; ACE_NEW_RETURN (implementation, ACE_POSIX_Asynch_Read_File (this), 0); return implementation; } ACE_Asynch_Read_File_Result_Impl * ACE_POSIX_Proactor::create_asynch_read_file_result (const ACE_Handler::Proxy_Ptr &handler_proxy, ACE_HANDLE handle, ACE_Message_Block &message_block, size_t bytes_to_read, const void* act, u_long offset, u_long offset_high, ACE_HANDLE event, int priority, int signal_number) { ACE_Asynch_Read_File_Result_Impl *implementation; ACE_NEW_RETURN (implementation, ACE_POSIX_Asynch_Read_File_Result (handler_proxy, handle, message_block, bytes_to_read, act, offset, offset_high, event, priority, signal_number), 0); return implementation; } ACE_Asynch_Write_File_Impl * ACE_POSIX_Proactor::create_asynch_write_file (void) { ACE_Asynch_Write_File_Impl *implementation = 0; ACE_NEW_RETURN (implementation, ACE_POSIX_Asynch_Write_File (this), 0); return implementation; } ACE_Asynch_Write_File_Result_Impl * ACE_POSIX_Proactor::create_asynch_write_file_result (const ACE_Handler::Proxy_Ptr &handler_proxy, ACE_HANDLE handle, ACE_Message_Block &message_block, size_t bytes_to_write, const void* act, u_long offset, u_long offset_high, ACE_HANDLE event, int priority, int signal_number) { ACE_Asynch_Write_File_Result_Impl *implementation; ACE_NEW_RETURN (implementation, ACE_POSIX_Asynch_Write_File_Result (handler_proxy, handle, message_block, bytes_to_write, act, offset, offset_high, event, priority, signal_number), 0); return implementation; } ACE_Asynch_Read_Dgram_Impl * ACE_POSIX_Proactor::create_asynch_read_dgram (void) { ACE_Asynch_Read_Dgram_Impl *implementation = 0; ACE_NEW_RETURN (implementation, ACE_POSIX_Asynch_Read_Dgram (this), 0); return implementation; } ACE_Asynch_Read_Dgram_Result_Impl * ACE_POSIX_Proactor::create_asynch_read_dgram_result (const ACE_Handler::Proxy_Ptr &handler_proxy, ACE_HANDLE handle, ACE_Message_Block *message_block, size_t bytes_to_read, int flags, int protocol_family, const void* act, ACE_HANDLE event , int priority , int signal_number) { ACE_Asynch_Read_Dgram_Result_Impl *implementation=0; ACE_NEW_RETURN (implementation, ACE_POSIX_Asynch_Read_Dgram_Result(handler_proxy, handle, message_block, bytes_to_read, flags, protocol_family, act, event, priority, signal_number), 0); return implementation; } ACE_Asynch_Write_Dgram_Impl * ACE_POSIX_Proactor::create_asynch_write_dgram (void) { ACE_Asynch_Write_Dgram_Impl *implementation = 0; ACE_NEW_RETURN (implementation, ACE_POSIX_Asynch_Write_Dgram (this), 0); return implementation; } ACE_Asynch_Write_Dgram_Result_Impl * ACE_POSIX_Proactor::create_asynch_write_dgram_result (const ACE_Handler::Proxy_Ptr &handler_proxy, ACE_HANDLE handle, ACE_Message_Block *message_block, size_t bytes_to_write, int flags, const void* act, ACE_HANDLE event, int priority , int signal_number) { ACE_Asynch_Write_Dgram_Result_Impl *implementation=0; ACE_NEW_RETURN (implementation, ACE_POSIX_Asynch_Write_Dgram_Result(handler_proxy, handle, message_block, bytes_to_write, flags, act, event, priority, signal_number), 0); return implementation; } ACE_Asynch_Accept_Impl * ACE_POSIX_Proactor::create_asynch_accept (void) { ACE_Asynch_Accept_Impl *implementation = 0; ACE_NEW_RETURN (implementation, ACE_POSIX_Asynch_Accept (this), 0); return implementation; } ACE_Asynch_Accept_Result_Impl * ACE_POSIX_Proactor::create_asynch_accept_result (const ACE_Handler::Proxy_Ptr &handler_proxy, ACE_HANDLE listen_handle, ACE_HANDLE accept_handle, ACE_Message_Block &message_block, size_t bytes_to_read, const void* act, ACE_HANDLE event, int priority, int signal_number) { ACE_Asynch_Accept_Result_Impl *implementation; ACE_NEW_RETURN (implementation, ACE_POSIX_Asynch_Accept_Result (handler_proxy, listen_handle, accept_handle, message_block, bytes_to_read, act, event, priority, signal_number), 0); return implementation; } ACE_Asynch_Connect_Impl * ACE_POSIX_Proactor::create_asynch_connect (void) { ACE_Asynch_Connect_Impl *implementation = 0; ACE_NEW_RETURN (implementation, ACE_POSIX_Asynch_Connect (this), 0); return implementation; } ACE_Asynch_Connect_Result_Impl * ACE_POSIX_Proactor::create_asynch_connect_result (const ACE_Handler::Proxy_Ptr &handler_proxy, ACE_HANDLE connect_handle, const void* act, ACE_HANDLE event, int priority, int signal_number) { ACE_Asynch_Connect_Result_Impl *implementation; ACE_NEW_RETURN (implementation, ACE_POSIX_Asynch_Connect_Result (handler_proxy, connect_handle, act, event, priority, signal_number), 0); return implementation; } ACE_Asynch_Transmit_File_Impl * ACE_POSIX_Proactor::create_asynch_transmit_file (void) { ACE_Asynch_Transmit_File_Impl *implementation = 0; ACE_NEW_RETURN (implementation, ACE_POSIX_Asynch_Transmit_File (this), 0); return implementation; } ACE_Asynch_Transmit_File_Result_Impl * ACE_POSIX_Proactor::create_asynch_transmit_file_result (const ACE_Handler::Proxy_Ptr &handler_proxy, ACE_HANDLE socket, ACE_HANDLE file, ACE_Asynch_Transmit_File::Header_And_Trailer *header_and_trailer, size_t bytes_to_write, u_long offset, u_long offset_high, size_t bytes_per_send, u_long flags, const void *act, ACE_HANDLE event, int priority, int signal_number) { ACE_Asynch_Transmit_File_Result_Impl *implementation; ACE_NEW_RETURN (implementation, ACE_POSIX_Asynch_Transmit_File_Result (handler_proxy, socket, file, header_and_trailer, bytes_to_write, offset, offset_high, bytes_per_send, flags, act, event, priority, signal_number), 0); return implementation; } ACE_Asynch_Result_Impl * ACE_POSIX_Proactor::create_asynch_timer (const ACE_Handler::Proxy_Ptr &handler_proxy, const void *act, const ACE_Time_Value &tv, ACE_HANDLE event, int priority, int signal_number) { ACE_POSIX_Asynch_Timer *implementation; ACE_NEW_RETURN (implementation, ACE_POSIX_Asynch_Timer (handler_proxy, act, tv, event, priority, signal_number), 0); return implementation; } void ACE_POSIX_Proactor::application_specific_code (ACE_POSIX_Asynch_Result *asynch_result, size_t bytes_transferred, const void */* completion_key*/, u_long error) { ACE_SEH_TRY { // Call completion hook asynch_result->complete (bytes_transferred, error ? 0 : 1, 0, // No completion key. error); } ACE_SEH_FINALLY { // This is crucial to prevent memory leaks delete asynch_result; } } int ACE_POSIX_Proactor::post_wakeup_completions (int how_many) { ACE_POSIX_Wakeup_Completion *wakeup_completion = 0; for (int ci = 0; ci < how_many; ci++) { ACE_NEW_RETURN (wakeup_completion, ACE_POSIX_Wakeup_Completion (this->wakeup_handler_.proxy ()), -1); if (this->post_completion (wakeup_completion) == -1) return -1; } return 0; } ACE_POSIX_Proactor::Proactor_Type ACE_POSIX_Proactor::get_impl_type (void) { return PROACTOR_POSIX; } /** * @class ACE_AIOCB_Notify_Pipe_Manager * * @brief This class manages the notify pipe of the AIOCB Proactor. * * This class acts as the Handler for the * <Asynch_Read> operations issued on the notify pipe. This * class is very useful in implementing <Asynch_Accept> operation * class for the <AIOCB_Proactor>. This is also useful for * implementing <post_completion> for <AIOCB_Proactor>. * <AIOCB_Proactor> class issues a <Asynch_Read> on * the pipe, using this class as the * Handler. <POSIX_Asynch_Result *>'s are sent through the * notify pipe. When <POSIX_Asynch_Result *>'s show up on the * notify pipe, the <POSIX_AIOCB_Proactor> dispatches the * completion of the <Asynch_Read_Stream> and calls the * <handle_read_stream> of this class. This class calls * <complete> on the <POSIX_Asynch_Result *> and thus calls the * application handler. * Handling the MessageBlock: * We give this message block to read the result pointer through * the notify pipe. We expect that to read 4 bytes from the * notify pipe, for each <accept> call. Before giving this * message block to another <accept>, we update <wr_ptr> and put * it in its initial position. */ class ACE_AIOCB_Notify_Pipe_Manager : public ACE_Handler { public: /// Constructor. You need the posix proactor because you need to call /// <application_specific_code> ACE_AIOCB_Notify_Pipe_Manager (ACE_POSIX_AIOCB_Proactor *posix_aiocb_proactor); /// Destructor. virtual ~ACE_AIOCB_Notify_Pipe_Manager (void); /// Send the result pointer through the notification pipe. int notify (); /// This is the call back method when <Asynch_Read> from the pipe is /// complete. virtual void handle_read_stream (const ACE_Asynch_Read_Stream::Result &result); private: /// The implementation proactor class. ACE_POSIX_AIOCB_Proactor *posix_aiocb_proactor_; /// Message block to get ACE_POSIX_Asynch_Result pointer from the pipe. ACE_Message_Block message_block_; /// Pipe for the communication between Proactor and the /// Asynch_Accept/Asynch_Connect and other post_completions ACE_Pipe pipe_; /// To do asynch_read on the pipe. ACE_POSIX_Asynch_Read_Stream read_stream_; /// Default constructor. Shouldnt be called. ACE_AIOCB_Notify_Pipe_Manager (void); }; ACE_AIOCB_Notify_Pipe_Manager::ACE_AIOCB_Notify_Pipe_Manager (ACE_POSIX_AIOCB_Proactor *posix_aiocb_proactor) : posix_aiocb_proactor_ (posix_aiocb_proactor), message_block_ (sizeof (2)), read_stream_ (posix_aiocb_proactor) { // Open the pipe. this->pipe_.open (); // Set write side in NONBLOCK mode ACE::set_flags (this->pipe_.write_handle (), ACE_NONBLOCK); // Set read side in BLOCK mode ACE::clr_flags (this->pipe_.read_handle (), ACE_NONBLOCK); // Let AIOCB_Proactor know about our handle posix_aiocb_proactor_->set_notify_handle (this->pipe_.read_handle ()); // Open the read stream. if (this->read_stream_.open (this->proxy (), this->pipe_.read_handle (), 0, // Completion Key 0) // Proactor == -1) ACE_ERROR ((LM_ERROR, ACE_TEXT("%N:%l:%p\n"), ACE_TEXT("ACE_AIOCB_Notify_Pipe_Manager::ACE_AIOCB_Notify_Pipe_Manager:") ACE_TEXT("Open on Read Stream failed"))); // Issue an asynch_read on the read_stream of the notify pipe. if (this->read_stream_.read (this->message_block_, 1, // enough to read 1 byte 0, // ACT 0) // Priority == -1) ACE_ERROR ((LM_ERROR, ACE_TEXT("%N:%l:%p\n"), ACE_TEXT("ACE_AIOCB_Notify_Pipe_Manager::ACE_AIOCB_Notify_Pipe_Manager:") ACE_TEXT("Read from pipe failed"))); } ACE_AIOCB_Notify_Pipe_Manager::~ACE_AIOCB_Notify_Pipe_Manager (void) { // 1. try to cancel pending aio this->read_stream_.cancel (); // 2. close both handles // Destuctor of ACE_Pipe does not close handles. // We can not use ACE_Pipe::close() as it // closes read_handle and than write_handle. // In some systems close() may wait for // completion for all asynch. pending requests. // So we should close write_handle firstly // to force read completion ( if 1. does not help ) // and then read_handle and not vice versa ACE_HANDLE h = this->pipe_.write_handle (); if (h != ACE_INVALID_HANDLE) ACE_OS::closesocket (h); h = this->pipe_.read_handle (); if ( h != ACE_INVALID_HANDLE) ACE_OS::closesocket (h); } int ACE_AIOCB_Notify_Pipe_Manager::notify () { // Send the result pointer through the pipe. char char_send = 0; ssize_t ret_val = ACE::send (this->pipe_.write_handle (), &char_send, sizeof (char_send)); if (ret_val < 0) { if (errno != EWOULDBLOCK) #if 0 ACE_ERROR ((LM_ERROR, ACE_TEXT ("(%P %t):%p\n"), ACE_TEXT ("ACE_AIOCB_Notify_Pipe_Manager::notify") ACE_TEXT ("Error:Writing on to notify pipe failed"))); #endif /* 0 */ return -1; } return 0; } void ACE_AIOCB_Notify_Pipe_Manager::handle_read_stream (const ACE_Asynch_Read_Stream::Result & /*result*/) { // 1. Start new read to avoid pipe overflow // Set the message block properly. Put the <wr_ptr> back in the // initial position. if (this->message_block_.length () > 0) this->message_block_.wr_ptr (this->message_block_.rd_ptr ()); // One accept has completed. Issue a read to handle any // <post_completion>s in the future. if (-1 == this->read_stream_.read (this->message_block_, 1, // enough to read 1 byte 0, // ACT 0)) // Priority ACE_ERROR ((LM_ERROR, ACE_TEXT ("%N:%l:(%P | %t):%p\n"), ACE_TEXT ("ACE_AIOCB_Notify_Pipe_Manager::handle_read_stream:") ACE_TEXT ("Read from pipe failed"))); // 2. Do the upcalls // this->posix_aiocb_proactor_->process_result_queue (); } // Public constructor for common use. ACE_POSIX_AIOCB_Proactor::ACE_POSIX_AIOCB_Proactor (size_t max_aio_operations) : aiocb_notify_pipe_manager_ (0), aiocb_list_ (0), result_list_ (0), aiocb_list_max_size_ (max_aio_operations), aiocb_list_cur_size_ (0), notify_pipe_read_handle_ (ACE_INVALID_HANDLE), num_deferred_aiocb_ (0), num_started_aio_ (0) { // Check for correct value for max_aio_operations check_max_aio_num (); this->create_result_aiocb_list (); this->create_notify_manager (); // start pseudo-asynchronous accept task // one per all future acceptors this->get_asynch_pseudo_task().start (); } // Special protected constructor for ACE_SUN_Proactor ACE_POSIX_AIOCB_Proactor::ACE_POSIX_AIOCB_Proactor (size_t max_aio_operations, ACE_POSIX_Proactor::Proactor_Type) : aiocb_notify_pipe_manager_ (0), aiocb_list_ (0), result_list_ (0), aiocb_list_max_size_ (max_aio_operations), aiocb_list_cur_size_ (0), notify_pipe_read_handle_ (ACE_INVALID_HANDLE), num_deferred_aiocb_ (0), num_started_aio_ (0) { //check for correct value for max_aio_operations this->check_max_aio_num (); this->create_result_aiocb_list (); // @@ We should create Notify_Pipe_Manager in the derived class to // provide correct calls for virtual functions !!! } // Destructor. ACE_POSIX_AIOCB_Proactor::~ACE_POSIX_AIOCB_Proactor (void) { this->close(); } ACE_POSIX_Proactor::Proactor_Type ACE_POSIX_AIOCB_Proactor::get_impl_type (void) { return PROACTOR_AIOCB; } int ACE_POSIX_AIOCB_Proactor::close (void) { // stop asynch accept task this->get_asynch_pseudo_task().stop (); this->delete_notify_manager (); this->clear_result_queue (); return this->delete_result_aiocb_list (); } void ACE_POSIX_AIOCB_Proactor::set_notify_handle (ACE_HANDLE h) { notify_pipe_read_handle_ = h; } int ACE_POSIX_AIOCB_Proactor::create_result_aiocb_list (void) { if (aiocb_list_ != 0) return 0; ACE_NEW_RETURN (aiocb_list_, aiocb *[aiocb_list_max_size_], -1); ACE_NEW_RETURN (result_list_, ACE_POSIX_Asynch_Result *[aiocb_list_max_size_], -1); // Initialize the array. for (size_t ai = 0; ai < this->aiocb_list_max_size_; ai++) { aiocb_list_[ai] = 0; result_list_[ai] = 0; } return 0; } int ACE_POSIX_AIOCB_Proactor::delete_result_aiocb_list (void) { if (aiocb_list_ == 0) // already deleted return 0; size_t ai; // Try to cancel all uncompleted operations; POSIX systems may have // hidden system threads that still can work with our aiocbs! for (ai = 0; ai < aiocb_list_max_size_; ai++) if (this->aiocb_list_[ai] != 0) // active operation this->cancel_aiocb (result_list_[ai]); int num_pending = 0; for (ai = 0; ai < aiocb_list_max_size_; ai++) { if (this->aiocb_list_[ai] == 0 ) // not active operation continue; // Get the error and return status of the aio_ operation. int error_status = 0; size_t transfer_count = 0; int flg_completed = this->get_result_status (result_list_[ai], error_status, transfer_count); //don't delete uncompleted AIOCB's if (flg_completed == 0) // not completed !!! { num_pending++; #if 0 char * errtxt = ACE_OS::strerror (error_status); if (errtxt == 0) errtxt ="?????????"; char * op = (aiocb_list_[ai]->aio_lio_opcode == LIO_WRITE )? "WRITE":"READ" ; ACE_ERROR ((LM_ERROR, ACE_TEXT("slot=%d op=%s status=%d xfercnt=%d %s\n"), ai, op, error_status, transfer_count, errtxt)); #endif /* 0 */ } else // completed , OK { delete this->result_list_[ai]; this->result_list_[ai] = 0; this->aiocb_list_[ai] = 0; } } // If it is not possible cancel some operation (num_pending > 0 ), // we can do only one thing -report about this // and complain about POSIX implementation. // We know that we have memory leaks, but it is better than // segmentation fault! ACE_DEBUG ((LM_DEBUG, ACE_TEXT("ACE_POSIX_AIOCB_Proactor::delete_result_aiocb_list\n") ACE_TEXT(" number pending AIO=%d\n"), num_pending)); delete [] this->aiocb_list_; this->aiocb_list_ = 0; delete [] this->result_list_; this->result_list_ = 0; return (num_pending == 0 ? 0 : -1); // ?? or just always return 0; } void ACE_POSIX_AIOCB_Proactor::check_max_aio_num () { long max_os_aio_num = ACE_OS::sysconf (_SC_AIO_MAX); // Define max limit AIO's for concrete OS // -1 means that there is no limit, but it is not true // (example, SunOS 5.6) if (max_os_aio_num > 0 && aiocb_list_max_size_ > (unsigned long) max_os_aio_num) aiocb_list_max_size_ = max_os_aio_num; #if defined (HPUX) || defined (__FreeBSD__) // Although HPUX 11.00 allows to start 2048 AIO's for all process in // system it has a limit 256 max elements for aio_suspend () It is a // pity, but ... long max_os_listio_num = ACE_OS::sysconf (_SC_AIO_LISTIO_MAX); if (max_os_listio_num > 0 && aiocb_list_max_size_ > (unsigned long) max_os_listio_num) aiocb_list_max_size_ = max_os_listio_num; #endif /* HPUX || __FreeBSD__ */ // check for user-defined value // ACE_AIO_MAX_SIZE if defined in POSIX_Proactor.h if (aiocb_list_max_size_ <= 0 || aiocb_list_max_size_ > ACE_AIO_MAX_SIZE) aiocb_list_max_size_ = ACE_AIO_MAX_SIZE; // check for max number files to open int max_num_files = ACE::max_handles (); if (max_num_files > 0 && aiocb_list_max_size_ > (unsigned long) max_num_files) { ACE::set_handle_limit (aiocb_list_max_size_); max_num_files = ACE::max_handles (); } if (max_num_files > 0 && aiocb_list_max_size_ > (unsigned long) max_num_files) aiocb_list_max_size_ = (unsigned long) max_num_files; ACE_DEBUG ((LM_DEBUG, "(%P | %t) ACE_POSIX_AIOCB_Proactor::Max Number of AIOs=%d\n", aiocb_list_max_size_)); #if defined(__sgi) ACE_DEBUG((LM_DEBUG, ACE_TEXT( "SGI IRIX specific: aio_init!\n"))); //typedef struct aioinit { // int aio_threads; /* The number of aio threads to start (5) */ // int aio_locks; /* Initial number of preallocated locks (3) */ // int aio_num; /* estimated total simultanious aiobc structs (1000) */ // int aio_usedba; /* Try to use DBA for raw I/O in lio_listio (0) */ // int aio_debug; /* turn on debugging (0) */ // int aio_numusers; /* max number of user sprocs making aio_* calls (5) */ // int aio_reserved[3]; //} aioinit_t; aioinit_t aioinit; aioinit.aio_threads = 10; /* The number of aio threads to start (5) */ aioinit.aio_locks = 20; /* Initial number of preallocated locks (3) */ /* estimated total simultaneous aiobc structs (1000) */ aioinit.aio_num = aiocb_list_max_size_; aioinit.aio_usedba = 0; /* Try to use DBA for raw IO in lio_listio (0) */ aioinit.aio_debug = 0; /* turn on debugging (0) */ aioinit.aio_numusers = 100; /* max number of user sprocs making aio_* calls (5) */ aioinit.aio_reserved[0] = 0; aioinit.aio_reserved[1] = 0; aioinit.aio_reserved[2] = 0; aio_sgi_init (&aioinit); #endif return; } void ACE_POSIX_AIOCB_Proactor::create_notify_manager (void) { // Remember! this issues a Asynch_Read // on the notify pipe for doing the Asynch_Accept/Connect. if (aiocb_notify_pipe_manager_ == 0) ACE_NEW (aiocb_notify_pipe_manager_, ACE_AIOCB_Notify_Pipe_Manager (this)); } void ACE_POSIX_AIOCB_Proactor::delete_notify_manager (void) { // We are responsible for delete as all pointers set to 0 after // delete, it is save to delete twice delete aiocb_notify_pipe_manager_; aiocb_notify_pipe_manager_ = 0; } int ACE_POSIX_AIOCB_Proactor::handle_events (ACE_Time_Value &wait_time) { // Decrement <wait_time> with the amount of time spent in the method ACE_Countdown_Time countdown (&wait_time); return this->handle_events_i (wait_time.msec ()); } int ACE_POSIX_AIOCB_Proactor::handle_events (void) { return this->handle_events_i (ACE_INFINITE); } int ACE_POSIX_AIOCB_Proactor::notify_completion(int sig_num) { ACE_UNUSED_ARG (sig_num); return this->aiocb_notify_pipe_manager_->notify (); } int ACE_POSIX_AIOCB_Proactor::post_completion (ACE_POSIX_Asynch_Result *result) { ACE_MT (ACE_GUARD_RETURN (ACE_SYNCH_MUTEX, ace_mon, this->mutex_, -1)); int ret_val = this->putq_result (result); return ret_val; } int ACE_POSIX_AIOCB_Proactor::putq_result (ACE_POSIX_Asynch_Result *result) { // this protected method should be called with locked mutex_ // we can't use GUARD as Proactor uses non-recursive mutex if (!result) return -1; int sig_num = result->signal_number (); int ret_val = this->result_queue_.enqueue_tail (result); if (ret_val == -1) ACE_ERROR_RETURN ((LM_ERROR, "%N:%l:ACE_POSIX_AIOCB_Proactor::putq_result failed\n"), -1); this->notify_completion (sig_num); return 0; } ACE_POSIX_Asynch_Result * ACE_POSIX_AIOCB_Proactor::getq_result (void) { ACE_MT (ACE_GUARD_RETURN (ACE_SYNCH_MUTEX, ace_mon, this->mutex_, 0)); ACE_POSIX_Asynch_Result* result = 0; if (this->result_queue_.dequeue_head (result) != 0) return 0; // don't waste time if queue is empty - it is normal // or check queue size before dequeue_head // ACE_ERROR_RETURN ((LM_ERROR, // ACE_TEXT("%N:%l:(%P | %t):%p\n"), // ACE_TEXT("ACE_POSIX_AIOCB_Proactor::getq_result failed")), // 0); return result; } int ACE_POSIX_AIOCB_Proactor::clear_result_queue (void) { int ret_val = 0; ACE_POSIX_Asynch_Result* result = 0; while ((result = this->getq_result ()) != 0) { delete result; ret_val++; } return ret_val; } int ACE_POSIX_AIOCB_Proactor::process_result_queue (void) { int ret_val = 0; ACE_POSIX_Asynch_Result* result = 0; while ((result = this->getq_result ()) != 0) { this->application_specific_code (result, result->bytes_transferred(), // 0, No bytes transferred. 0, // No completion key. result->error()); //0, No error. ret_val++; } return ret_val; } int ACE_POSIX_AIOCB_Proactor::handle_events_i (u_long milli_seconds) { int result_suspend = 0; int retval= 0; if (milli_seconds == ACE_INFINITE) // Indefinite blocking. result_suspend = aio_suspend (aiocb_list_, aiocb_list_max_size_, 0); else { // Block on <aio_suspend> for <milli_seconds> timespec timeout; timeout.tv_sec = milli_seconds / 1000; timeout.tv_nsec = (milli_seconds - (timeout.tv_sec * 1000)) * 1000000; result_suspend = aio_suspend (aiocb_list_, aiocb_list_max_size_, &timeout); } // Check for errors if (result_suspend == -1) { if (errno != EAGAIN && // Timeout errno != EINTR ) // Interrupted call ACE_ERROR ((LM_ERROR, ACE_TEXT ("%N:%l:(%P|%t)::%p\n"), ACE_TEXT ("handle_events: aio_suspend failed"))); // let continue work // we should check "post_completed" queue } else { size_t index = 0; size_t count = aiocb_list_max_size_; // max number to iterate int error_status = 0; size_t transfer_count = 0; for (;; retval++) { ACE_POSIX_Asynch_Result *asynch_result = find_completed_aio (error_status, transfer_count, index, count); if (asynch_result == 0) break; // Call the application code. this->application_specific_code (asynch_result, transfer_count, 0, // No completion key. error_status); } } // process post_completed results retval += this->process_result_queue (); return retval > 0 ? 1 : 0; } int ACE_POSIX_AIOCB_Proactor::get_result_status (ACE_POSIX_Asynch_Result *asynch_result, int &error_status, size_t &transfer_count) { transfer_count = 0; // Get the error status of the aio_ operation. // The following aio_ptr anathema is required to work around a bug in an over-aggressive // optimizer in GCC 4.1.2. aiocb *aio_ptr (asynch_result); error_status = aio_error (aio_ptr); if (error_status == EINPROGRESS) return 0; // not completed ssize_t op_return = aio_return (aio_ptr); if (op_return > 0) transfer_count = static_cast<size_t> (op_return); // else transfer_count is already 0, error_status reports the error. return 1; // completed } ACE_POSIX_Asynch_Result * ACE_POSIX_AIOCB_Proactor::find_completed_aio (int &error_status, size_t &transfer_count, size_t &index, size_t &count) { // parameter index defines initial slot to scan // parameter count tells us how many slots should we scan ACE_MT (ACE_GUARD_RETURN (ACE_Thread_Mutex, ace_mon, this->mutex_, 0)); ACE_POSIX_Asynch_Result *asynch_result = 0; if (num_started_aio_ == 0) // save time return 0; for (; count > 0; index++ , count--) { if (index >= aiocb_list_max_size_) // like a wheel index = 0; if (aiocb_list_[index] == 0) // Dont process null blocks. continue; if (0 != this->get_result_status (result_list_[index], error_status, transfer_count)) // completed break; } // end for if (count == 0) // all processed , nothing found return 0; asynch_result = result_list_[index]; aiocb_list_[index] = 0; result_list_[index] = 0; aiocb_list_cur_size_--; num_started_aio_--; // decrement count active aios index++; // for next iteration count--; // for next iteration this->start_deferred_aio (); //make attempt to start deferred AIO //It is safe as we are protected by mutex_ return asynch_result; } int ACE_POSIX_AIOCB_Proactor::start_aio (ACE_POSIX_Asynch_Result *result, ACE_POSIX_Proactor::Opcode op) { ACE_TRACE ("ACE_POSIX_AIOCB_Proactor::start_aio"); ACE_MT (ACE_GUARD_RETURN (ACE_Thread_Mutex, ace_mon, this->mutex_, -1)); int ret_val = (aiocb_list_cur_size_ >= aiocb_list_max_size_) ? -1 : 0; if (result == 0) // Just check the status of the list return ret_val; // Save operation code in the aiocb switch (op) { case ACE_POSIX_Proactor::ACE_OPCODE_READ: result->aio_lio_opcode = LIO_READ; break; case ACE_POSIX_Proactor::ACE_OPCODE_WRITE: result->aio_lio_opcode = LIO_WRITE; break; default: ACE_ERROR_RETURN ((LM_ERROR, ACE_TEXT ("%N:%l:(%P|%t)::") ACE_TEXT ("start_aio: Invalid op code %d\n"), op), -1); } if (ret_val != 0) // No free slot { errno = EAGAIN; return -1; } // Find a free slot and store. ssize_t slot = allocate_aio_slot (result); if (slot < 0) return -1; size_t index = static_cast<size_t> (slot); result_list_[index] = result; //Store result ptr anyway aiocb_list_cur_size_++; ret_val = start_aio_i (result); switch (ret_val) { case 0: // started OK aiocb_list_[index] = result; return 0; case 1: // OS AIO queue overflow num_deferred_aiocb_ ++; return 0; default: // Invalid request, there is no point break; // to start it later } result_list_[index] = 0; aiocb_list_cur_size_--; return -1; } ssize_t ACE_POSIX_AIOCB_Proactor::allocate_aio_slot (ACE_POSIX_Asynch_Result *result) { size_t i = 0; // we reserve zero slot for ACE_AIOCB_Notify_Pipe_Manager // so make check for ACE_AIOCB_Notify_Pipe_Manager request if (notify_pipe_read_handle_ == result->aio_fildes) // Notify_Pipe ? { // should be free, if (result_list_[i] != 0) // only 1 request { // is allowed errno = EAGAIN; ACE_ERROR_RETURN ((LM_ERROR, "%N:%l:(%P | %t)::\n" "ACE_POSIX_AIOCB_Proactor::allocate_aio_slot:" "internal Proactor error 0\n"), -1); } } else //try to find free slot as usual, but starting from 1 { for (i= 1; i < this->aiocb_list_max_size_; i++) if (result_list_[i] == 0) break; } if (i >= this->aiocb_list_max_size_) ACE_ERROR_RETURN ((LM_ERROR, "%N:%l:(%P | %t)::\n" "ACE_POSIX_AIOCB_Proactor::allocate_aio_slot:" "internal Proactor error 1\n"), -1); //setup OS notification methods for this aio result->aio_sigevent.sigev_notify = SIGEV_NONE; return static_cast<ssize_t> (i); } // start_aio_i has new return codes // 0 AIO was started successfully // 1 AIO was not started, OS AIO queue overflow // -1 AIO was not started, other errors int ACE_POSIX_AIOCB_Proactor::start_aio_i (ACE_POSIX_Asynch_Result *result) { ACE_TRACE ("ACE_POSIX_AIOCB_Proactor::start_aio_i"); int ret_val; const ACE_TCHAR *ptype = 0; // Start IO // The following aio_ptr anathema is required to work around a bug in // the optimizer for GCC 4.1.2 aiocb * aio_ptr (result); switch (result->aio_lio_opcode ) { case LIO_READ : ptype = ACE_TEXT ("read "); ret_val = aio_read (aio_ptr); break; case LIO_WRITE : ptype = ACE_TEXT ("write"); ret_val = aio_write (aio_ptr); break; default: ptype = ACE_TEXT ("?????"); ret_val = -1; break; } if (ret_val == 0) { ++this->num_started_aio_; } else // if (ret_val == -1) { if (errno == EAGAIN || errno == ENOMEM) //Ok, it will be deferred AIO ret_val = 1; else ACE_ERROR ((LM_ERROR, ACE_TEXT ("%N:%l:(%P | %t)::start_aio_i: aio_%s %p\n"), ptype, ACE_TEXT ("queueing failed"))); } return ret_val; } int ACE_POSIX_AIOCB_Proactor::start_deferred_aio () { ACE_TRACE ("ACE_POSIX_AIOCB_Proactor::start_deferred_aio"); // This protected method is called from // find_completed_aio after any AIO completion // We should call this method always with locked // ACE_POSIX_AIOCB_Proactor::mutex_ // // It tries to start the first deferred AIO // if such exists if (num_deferred_aiocb_ == 0) return 0; // nothing to do size_t i = 0; for (i= 0; i < this->aiocb_list_max_size_; i++) if (result_list_[i] !=0 // check for && aiocb_list_[i] ==0) // deferred AIO break; if (i >= this->aiocb_list_max_size_) ACE_ERROR_RETURN ((LM_ERROR, "%N:%l:(%P | %t)::\n" "start_deferred_aio:" "internal Proactor error 3\n"), -1); ACE_POSIX_Asynch_Result *result = result_list_[i]; int ret_val = start_aio_i (result); switch (ret_val) { case 0 : //started OK , decrement count of deferred AIOs aiocb_list_[i] = result; num_deferred_aiocb_ --; return 0; case 1 : return 0; //try again later default : // Invalid Parameters , should never be break; } //AL notify user result_list_[i] = 0; --aiocb_list_cur_size_; --num_deferred_aiocb_; result->set_error (errno); result->set_bytes_transferred (0); this->putq_result (result); // we are with locked mutex_ here ! return -1; } int ACE_POSIX_AIOCB_Proactor::cancel_aio (ACE_HANDLE handle) { // This new method should be called from // ACE_POSIX_Asynch_Operation instead of usual ::aio_cancel // It scans the result_list_ and defines all AIO requests // that were issued for handle "handle" // // For all deferred AIO requests with handle "handle" // it removes its from the lists and notifies user // // For all running AIO requests with handle "handle" // it calls ::aio_cancel. According to the POSIX standards // we will receive ECANCELED for all ::aio_canceled AIO requests // later on return from ::aio_suspend ACE_TRACE ("ACE_POSIX_AIOCB_Proactor::cancel_aio"); int num_total = 0; int num_cancelled = 0; { ACE_MT (ACE_GUARD_RETURN (ACE_Thread_Mutex, ace_mon, this->mutex_, -1)); size_t ai = 0; for (ai = 0; ai < this->aiocb_list_max_size_; ai++) { if (this->result_list_[ai] == 0) // Skip empty slot continue; if (this->result_list_[ai]->aio_fildes != handle) // Not ours continue; ++num_total; ACE_POSIX_Asynch_Result *asynch_result = this->result_list_[ai]; if (this->aiocb_list_[ai] == 0) // Canceling a deferred operation { num_cancelled++; this->num_deferred_aiocb_--; this->aiocb_list_[ai] = 0; this->result_list_[ai] = 0; this->aiocb_list_cur_size_--; asynch_result->set_error (ECANCELED); asynch_result->set_bytes_transferred (0); this->putq_result (asynch_result); // we are with locked mutex_ here ! } else // Cancel started aio { int rc_cancel = this->cancel_aiocb (asynch_result); if (rc_cancel == 0) //notification in the future num_cancelled++; //it is OS responsiblity } } } // release mutex_ if (num_total == 0) return 1; // ALLDONE if (num_cancelled == num_total) return 0; // CANCELLED return 2; // NOT CANCELLED } int ACE_POSIX_AIOCB_Proactor::cancel_aiocb (ACE_POSIX_Asynch_Result * result) { // This method is called from cancel_aio // to cancel a previously submitted AIO request int rc = ::aio_cancel (0, result); // Check the return value and return 0/1/2 appropriately. if (rc == AIO_CANCELED) return 0; else if (rc == AIO_ALLDONE) return 1; else // (rc == AIO_NOTCANCELED) return 2; } // ********************************************************************* #if defined(ACE_HAS_POSIX_REALTIME_SIGNALS) ACE_POSIX_SIG_Proactor::ACE_POSIX_SIG_Proactor (size_t max_aio_operations) : ACE_POSIX_AIOCB_Proactor (max_aio_operations, ACE_POSIX_Proactor::PROACTOR_SIG) { // = Set up the mask we'll use to block waiting for SIGRTMIN. Use that // to add it to the signal mask for this thread, and also set the process // signal action to pass signal information when we want it. // Clear the signal set. ACE_OS::sigemptyset (&this->RT_completion_signals_); // Add the signal number to the signal set. if (ACE_OS::sigaddset (&this->RT_completion_signals_, ACE_SIGRTMIN) == -1) ACE_ERROR ((LM_ERROR, ACE_TEXT ("ACE_POSIX_SIG_Proactor: %p\n"), ACE_TEXT ("sigaddset"))); this->block_signals (); // Set up the signal action for SIGRTMIN. this->setup_signal_handler (ACE_SIGRTMIN); // we do not have to create notify manager // but we should start pseudo-asynchronous accept task // one per all future acceptors this->get_asynch_pseudo_task().start (); return; } ACE_POSIX_SIG_Proactor::ACE_POSIX_SIG_Proactor (const sigset_t signal_set, size_t max_aio_operations) : ACE_POSIX_AIOCB_Proactor (max_aio_operations, ACE_POSIX_Proactor::PROACTOR_SIG) { // = Keep <Signal_set> with the Proactor, mask all the signals and // setup signal actions for the signals in the <signal_set>. // = Keep <signal_set> with the Proactor. // Empty the signal set first. if (sigemptyset (&this->RT_completion_signals_) == -1) ACE_ERROR ((LM_ERROR, ACE_TEXT("Error:(%P | %t):%p\n"), ACE_TEXT("sigemptyset failed"))); // For each signal number present in the <signal_set>, add it to // the signal set we use, and also set up its process signal action // to allow signal info to be passed into sigwait/sigtimedwait. int member = 0; for (int si = ACE_SIGRTMIN; si <= ACE_SIGRTMAX; si++) { member = sigismember (&signal_set, si); if (member == -1) ACE_ERROR ((LM_ERROR, ACE_TEXT("%N:%l:(%P | %t)::%p\n"), ACE_TEXT("ACE_POSIX_SIG_Proactor::ACE_POSIX_SIG_Proactor:") ACE_TEXT("sigismember failed"))); else if (member == 1) { sigaddset (&this->RT_completion_signals_, si); this->setup_signal_handler (si); } } // Mask all the signals. this->block_signals (); // we do not have to create notify manager // but we should start pseudo-asynchronous accept task // one per all future acceptors this->get_asynch_pseudo_task().start (); return; } ACE_POSIX_SIG_Proactor::~ACE_POSIX_SIG_Proactor (void) { this->close (); // @@ Enable the masked signals again. } ACE_POSIX_Proactor::Proactor_Type ACE_POSIX_SIG_Proactor::get_impl_type (void) { return PROACTOR_SIG; } int ACE_POSIX_SIG_Proactor::handle_events (ACE_Time_Value &wait_time) { // Decrement <wait_time> with the amount of time spent in the method ACE_Countdown_Time countdown (&wait_time); return this->handle_events_i (&wait_time); } int ACE_POSIX_SIG_Proactor::handle_events (void) { return this->handle_events_i (0); } int ACE_POSIX_SIG_Proactor::notify_completion (int sig_num) { // Get this process id. pid_t const pid = ACE_OS::getpid (); if (pid == (pid_t) -1) ACE_ERROR_RETURN ((LM_ERROR, ACE_TEXT("Error:%N:%l(%P | %t):%p"), ACE_TEXT("<getpid> failed")), -1); // Set the signal information. sigval value; #if defined (ACE_HAS_SIGVAL_SIGVAL_INT) value.sigval_int = -1; #else value.sival_int = -1; #endif /* ACE_HAS_SIGVAL_SIGVAL_INT */ // Queue the signal. if (sigqueue (pid, sig_num, value) == 0) return 0; if (errno != EAGAIN) ACE_ERROR_RETURN ((LM_ERROR, ACE_TEXT("Error:%N:%l:(%P | %t):%p\n"), ACE_TEXT("<sigqueue> failed")), -1); return -1; } ACE_Asynch_Result_Impl * ACE_POSIX_SIG_Proactor::create_asynch_timer (const ACE_Handler::Proxy_Ptr &handler_proxy, const void *act, const ACE_Time_Value &tv, ACE_HANDLE event, int priority, int signal_number) { int is_member = 0; // Fix the signal number. if (signal_number == -1) { int si; for (si = ACE_SIGRTMAX; (is_member == 0) && (si >= ACE_SIGRTMIN); si--) { is_member = sigismember (&this->RT_completion_signals_, si); if (is_member == -1) ACE_ERROR_RETURN ((LM_ERROR, "%N:%l:(%P | %t)::%s\n", "ACE_POSIX_SIG_Proactor::create_asynch_timer:" "sigismember failed"), 0); } if (is_member == 0) ACE_ERROR_RETURN ((LM_ERROR, "Error:%N:%l:(%P | %t)::%s\n", "ACE_POSIX_SIG_Proactor::ACE_POSIX_SIG_Proactor:" "Signal mask set empty"), 0); else // + 1 to nullify loop increment. signal_number = si + 1; } ACE_Asynch_Result_Impl *implementation; ACE_NEW_RETURN (implementation, ACE_POSIX_Asynch_Timer (handler_proxy, act, tv, event, priority, signal_number), 0); return implementation; } #if 0 static void sig_handler (int sig_num, siginfo_t *, ucontext_t *) { // Should never be called ACE_DEBUG ((LM_DEBUG, "%N:%l:(%P | %t)::sig_handler received signal: %d\n", sig_num)); } #endif /*if 0*/ int ACE_POSIX_SIG_Proactor::setup_signal_handler (int signal_number) const { // Set up the specified signal so that signal information will be // passed to sigwaitinfo/sigtimedwait. Don't change the default // signal handler - having a handler and waiting for the signal can // produce undefined behavior. // But can not use SIG_DFL // With SIG_DFL after delivering the first signal // SIG_DFL handler resets SA_SIGINFO flags // and we will lose all information sig_info // At least all SunOS have such behavior #if 0 struct sigaction reaction; sigemptyset (&reaction.sa_mask); // Nothing else to mask. reaction.sa_flags = SA_SIGINFO; // Realtime flag. reaction.sa_sigaction = ACE_SIGNAL_C_FUNC (sig_handler); // (SIG_DFL); int sigaction_return = ACE_OS::sigaction (signal_number, &reaction, 0); if (sigaction_return == -1) ACE_ERROR_RETURN ((LM_ERROR, ACE_TEXT("Error:%p\n"), ACE_TEXT("Proactor couldnt do sigaction for the RT SIGNAL")), -1); #else ACE_UNUSED_ARG(signal_number); #endif return 0; } int ACE_POSIX_SIG_Proactor::block_signals (void) const { return ACE_OS::pthread_sigmask (SIG_BLOCK, &this->RT_completion_signals_, 0); } ssize_t ACE_POSIX_SIG_Proactor::allocate_aio_slot (ACE_POSIX_Asynch_Result *result) { size_t i = 0; //try to find free slot as usual, starting from 0 for (i = 0; i < this->aiocb_list_max_size_; i++) if (result_list_[i] == 0) break; if (i >= this->aiocb_list_max_size_) ACE_ERROR_RETURN ((LM_ERROR, "%N:%l:(%P | %t)::\n" "ACE_POSIX_SIG_Proactor::allocate_aio_slot " "internal Proactor error 1\n"), -1); // setup OS notification methods for this aio // store index!!, not pointer in signal info result->aio_sigevent.sigev_notify = SIGEV_SIGNAL; result->aio_sigevent.sigev_signo = result->signal_number (); #if defined (ACE_HAS_SIGVAL_SIGVAL_INT) result->aio_sigevent.sigev_value.sigval_int = static_cast<int> (i); #else result->aio_sigevent.sigev_value.sival_int = static_cast<int> (i); #endif /* ACE_HAS_SIGVAL_SIGVAL_INT */ return static_cast<ssize_t> (i); } int ACE_POSIX_SIG_Proactor::handle_events_i (const ACE_Time_Value *timeout) { int result_sigwait = 0; siginfo_t sig_info; do { // Wait for the signals. if (timeout == 0) { result_sigwait = ACE_OS::sigwaitinfo (&this->RT_completion_signals_, &sig_info); } else { result_sigwait = ACE_OS::sigtimedwait (&this->RT_completion_signals_, &sig_info, timeout); if (result_sigwait == -1 && errno == EAGAIN) return 0; } } while (result_sigwait == -1 && errno == EINTR); if (result_sigwait == -1) // Not a timeout, not EINTR: tell caller of error return -1; // Decide what to do. We always check the completion queue since it's an // easy, quick check. What is decided here is whether to check for // I/O completions and, if so, how completely to scan. int flg_aio = 0; // 1 if AIO Completion possible size_t index = 0; // start index to scan aiocb list size_t count = 1; // max number of aiocbs to scan int error_status = 0; size_t transfer_count = 0; if (sig_info.si_code == SI_ASYNCIO || this->os_id_ == ACE_OS_SUN_56) { flg_aio = 1; // AIO signal received // define index to start // nothing will happen if it contains garbage #if defined (ACE_HAS_SIGVAL_SIGVAL_INT) index = static_cast<size_t> (sig_info.si_value.sigval_int); #else index = static_cast<size_t> (sig_info.si_value.sival_int); #endif /* ACE_HAS_SIGVAL_SIGVAL_INT */ // Assume we have a correctly-functioning implementation, and that // there is one I/O to process, and it's correctly specified in the // siginfo received. There are, however, some special situations // where this isn't true... if (os_id_ == ACE_OS_SUN_56) // Solaris 6 { // 1. Solaris 6 always loses any RT signal, // if it has more SIGQUEMAX=32 pending signals // so we should scan the whole aiocb list // 2. Moreover,it has one more bad habit // to notify aio completion // with SI_QUEUE code instead of SI_ASYNCIO, hence the // OS_SUN_56 addition to the si_code check, above. count = aiocb_list_max_size_; } } else if (sig_info.si_code != SI_QUEUE) { // Unknown signal code. // may some other third-party libraries could send it // or message queue could also generate it ! // So print the message and check our completions ACE_ERROR ((LM_DEBUG, ACE_TEXT ("%N:%l:(%P | %t): ") ACE_TEXT ("ACE_POSIX_SIG_Proactor::handle_events: ") ACE_TEXT ("Unexpected signal code (%d) returned ") ACE_TEXT ("from sigwait; expecting %d\n"), result_sigwait, sig_info.si_code)); flg_aio = 1; } int ret_aio = 0; int ret_que = 0; if (flg_aio) for (;; ret_aio++) { ACE_POSIX_Asynch_Result *asynch_result = find_completed_aio (error_status, transfer_count, index, count); if (asynch_result == 0) break; // Call the application code. this->application_specific_code (asynch_result, transfer_count, 0, // No completion key. error_status); // Error } // process post_completed results ret_que = this->process_result_queue (); // Uncomment this if you want to test // and research the behavior of you system #if 0 ACE_DEBUG ((LM_DEBUG, "(%t) NumAIO=%d NumQueue=%d\n", ret_aio, ret_que)); #endif return ret_aio + ret_que > 0 ? 1 : 0; } #endif /* ACE_HAS_POSIX_REALTIME_SIGNALS */ // ********************************************************************* ACE_POSIX_Asynch_Timer::ACE_POSIX_Asynch_Timer (const ACE_Handler::Proxy_Ptr &handler_proxy, const void *act, const ACE_Time_Value &tv, ACE_HANDLE event, int priority, int signal_number) : ACE_POSIX_Asynch_Result (handler_proxy, act, event, 0, 0, priority, signal_number), time_ (tv) { } void ACE_POSIX_Asynch_Timer::complete (size_t /* bytes_transferred */, int /* success */, const void * /* completion_key */, u_long /* error */) { ACE_Handler *handler = this->handler_proxy_.get ()->handler (); if (handler != 0) handler->handle_time_out (this->time_, this->act ()); } // ********************************************************************* ACE_POSIX_Wakeup_Completion::ACE_POSIX_Wakeup_Completion (const ACE_Handler::Proxy_Ptr &handler_proxy, const void *act, ACE_HANDLE event, int priority, int signal_number) : ACE_Asynch_Result_Impl (), ACE_POSIX_Asynch_Result (handler_proxy, act, event, 0, 0, priority, signal_number) { } ACE_POSIX_Wakeup_Completion::~ACE_POSIX_Wakeup_Completion (void) { } void ACE_POSIX_Wakeup_Completion::complete (size_t /* bytes_transferred */, int /* success */, const void * /* completion_key */, u_long /* error */) { ACE_Handler *handler = this->handler_proxy_.get ()->handler (); if (handler != 0) handler->handle_wakeup (); } ACE_END_VERSIONED_NAMESPACE_DECL #endif /* ACE_HAS_AIO_CALLS */
gpl-2.0
zarboz/Monarudo_GPU_M7
drivers/mmc/host/sdhci-pci.c
1548
35885
/* linux/drivers/mmc/host/sdhci-pci.c - SDHCI on PCI bus interface * * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * Thanks to the following companies for their support: * * - JMicron (hardware and technical support) */ #include <linux/delay.h> #include <linux/highmem.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/dma-mapping.h> #include <linux/slab.h> #include <linux/device.h> #include <linux/mmc/host.h> #include <linux/scatterlist.h> #include <linux/io.h> #include <linux/gpio.h> #include <linux/pm_runtime.h> #include <linux/mmc/sdhci-pci-data.h> #include "sdhci.h" /* * PCI device IDs */ #define PCI_DEVICE_ID_INTEL_PCH_SDIO0 0x8809 #define PCI_DEVICE_ID_INTEL_PCH_SDIO1 0x880a /* * PCI registers */ #define PCI_SDHCI_IFPIO 0x00 #define PCI_SDHCI_IFDMA 0x01 #define PCI_SDHCI_IFVENDOR 0x02 #define PCI_SLOT_INFO 0x40 /* 8 bits */ #define PCI_SLOT_INFO_SLOTS(x) ((x >> 4) & 7) #define PCI_SLOT_INFO_FIRST_BAR_MASK 0x07 #define MAX_SLOTS 8 struct sdhci_pci_chip; struct sdhci_pci_slot; struct sdhci_pci_fixes { unsigned int quirks; unsigned int quirks2; bool allow_runtime_pm; int (*probe) (struct sdhci_pci_chip *); int (*probe_slot) (struct sdhci_pci_slot *); void (*remove_slot) (struct sdhci_pci_slot *, int); int (*suspend) (struct sdhci_pci_chip *); int (*resume) (struct sdhci_pci_chip *); }; struct sdhci_pci_slot { struct sdhci_pci_chip *chip; struct sdhci_host *host; struct sdhci_pci_data *data; int pci_bar; int rst_n_gpio; int cd_gpio; int cd_irq; }; struct sdhci_pci_chip { struct pci_dev *pdev; unsigned int quirks; unsigned int quirks2; bool allow_runtime_pm; const struct sdhci_pci_fixes *fixes; int num_slots; /* Slots on controller */ struct sdhci_pci_slot *slots[MAX_SLOTS]; /* Pointers to host slots */ }; /*****************************************************************************\ * * * Hardware specific quirk handling * * * \*****************************************************************************/ static int ricoh_probe(struct sdhci_pci_chip *chip) { if (chip->pdev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG || chip->pdev->subsystem_vendor == PCI_VENDOR_ID_SONY) chip->quirks |= SDHCI_QUIRK_NO_CARD_NO_RESET; return 0; } static int ricoh_mmc_probe_slot(struct sdhci_pci_slot *slot) { slot->host->caps = ((0x21 << SDHCI_TIMEOUT_CLK_SHIFT) & SDHCI_TIMEOUT_CLK_MASK) | ((0x21 << SDHCI_CLOCK_BASE_SHIFT) & SDHCI_CLOCK_BASE_MASK) | SDHCI_TIMEOUT_CLK_UNIT | SDHCI_CAN_VDD_330 | SDHCI_CAN_DO_SDMA; return 0; } static int ricoh_mmc_resume(struct sdhci_pci_chip *chip) { /* Apply a delay to allow controller to settle */ /* Otherwise it becomes confused if card state changed during suspend */ msleep(500); return 0; } static const struct sdhci_pci_fixes sdhci_ricoh = { .probe = ricoh_probe, .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR | SDHCI_QUIRK_FORCE_DMA | SDHCI_QUIRK_CLOCK_BEFORE_RESET, }; static const struct sdhci_pci_fixes sdhci_ricoh_mmc = { .probe_slot = ricoh_mmc_probe_slot, .resume = ricoh_mmc_resume, .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR | SDHCI_QUIRK_CLOCK_BEFORE_RESET | SDHCI_QUIRK_NO_CARD_NO_RESET | SDHCI_QUIRK_MISSING_CAPS }; static const struct sdhci_pci_fixes sdhci_ene_712 = { .quirks = SDHCI_QUIRK_SINGLE_POWER_WRITE | SDHCI_QUIRK_BROKEN_DMA, }; static const struct sdhci_pci_fixes sdhci_ene_714 = { .quirks = SDHCI_QUIRK_SINGLE_POWER_WRITE | SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS | SDHCI_QUIRK_BROKEN_DMA, }; static const struct sdhci_pci_fixes sdhci_cafe = { .quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER | SDHCI_QUIRK_NO_BUSY_IRQ | SDHCI_QUIRK_BROKEN_CARD_DETECTION | SDHCI_QUIRK_BROKEN_TIMEOUT_VAL, }; static int mrst_hc_probe_slot(struct sdhci_pci_slot *slot) { slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA; return 0; } /* * ADMA operation is disabled for Moorestown platform due to * hardware bugs. */ static int mrst_hc_probe(struct sdhci_pci_chip *chip) { /* * slots number is fixed here for MRST as SDIO3/5 are never used and * have hardware bugs. */ chip->num_slots = 1; return 0; } static int pch_hc_probe_slot(struct sdhci_pci_slot *slot) { slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA; return 0; } #ifdef CONFIG_PM_RUNTIME static irqreturn_t sdhci_pci_sd_cd(int irq, void *dev_id) { struct sdhci_pci_slot *slot = dev_id; struct sdhci_host *host = slot->host; mmc_detect_change(host->mmc, msecs_to_jiffies(200)); return IRQ_HANDLED; } static void sdhci_pci_add_own_cd(struct sdhci_pci_slot *slot) { int err, irq, gpio = slot->cd_gpio; slot->cd_gpio = -EINVAL; slot->cd_irq = -EINVAL; if (!gpio_is_valid(gpio)) return; err = gpio_request(gpio, "sd_cd"); if (err < 0) goto out; err = gpio_direction_input(gpio); if (err < 0) goto out_free; irq = gpio_to_irq(gpio); if (irq < 0) goto out_free; err = request_irq(irq, sdhci_pci_sd_cd, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, "sd_cd", slot); if (err) goto out_free; slot->cd_gpio = gpio; slot->cd_irq = irq; return; out_free: gpio_free(gpio); out: dev_warn(&slot->chip->pdev->dev, "failed to setup card detect wake up\n"); } static void sdhci_pci_remove_own_cd(struct sdhci_pci_slot *slot) { if (slot->cd_irq >= 0) free_irq(slot->cd_irq, slot); if (gpio_is_valid(slot->cd_gpio)) gpio_free(slot->cd_gpio); } #else static inline void sdhci_pci_add_own_cd(struct sdhci_pci_slot *slot) { } static inline void sdhci_pci_remove_own_cd(struct sdhci_pci_slot *slot) { } #endif static int mfd_emmc_probe_slot(struct sdhci_pci_slot *slot) { slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE; slot->host->mmc->caps2 |= MMC_CAP2_BOOTPART_NOACC | MMC_CAP2_HC_ERASE_SZ; return 0; } static int mfd_sdio_probe_slot(struct sdhci_pci_slot *slot) { slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE; return 0; } static const struct sdhci_pci_fixes sdhci_intel_mrst_hc0 = { .quirks = SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_NO_HISPD_BIT, .probe_slot = mrst_hc_probe_slot, }; static const struct sdhci_pci_fixes sdhci_intel_mrst_hc1_hc2 = { .quirks = SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_NO_HISPD_BIT, .probe = mrst_hc_probe, }; static const struct sdhci_pci_fixes sdhci_intel_mfd_sd = { .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, .allow_runtime_pm = true, }; static const struct sdhci_pci_fixes sdhci_intel_mfd_sdio = { .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, .quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON, .allow_runtime_pm = true, .probe_slot = mfd_sdio_probe_slot, }; static const struct sdhci_pci_fixes sdhci_intel_mfd_emmc = { .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, .allow_runtime_pm = true, .probe_slot = mfd_emmc_probe_slot, }; static const struct sdhci_pci_fixes sdhci_intel_pch_sdio = { .quirks = SDHCI_QUIRK_BROKEN_ADMA, .probe_slot = pch_hc_probe_slot, }; /* O2Micro extra registers */ #define O2_SD_LOCK_WP 0xD3 #define O2_SD_MULTI_VCC3V 0xEE #define O2_SD_CLKREQ 0xEC #define O2_SD_CAPS 0xE0 #define O2_SD_ADMA1 0xE2 #define O2_SD_ADMA2 0xE7 #define O2_SD_INF_MOD 0xF1 static int o2_probe(struct sdhci_pci_chip *chip) { int ret; u8 scratch; switch (chip->pdev->device) { case PCI_DEVICE_ID_O2_8220: case PCI_DEVICE_ID_O2_8221: case PCI_DEVICE_ID_O2_8320: case PCI_DEVICE_ID_O2_8321: /* This extra setup is required due to broken ADMA. */ ret = pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch); if (ret) return ret; scratch &= 0x7f; pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch); /* Set Multi 3 to VCC3V# */ pci_write_config_byte(chip->pdev, O2_SD_MULTI_VCC3V, 0x08); /* Disable CLK_REQ# support after media DET */ ret = pci_read_config_byte(chip->pdev, O2_SD_CLKREQ, &scratch); if (ret) return ret; scratch |= 0x20; pci_write_config_byte(chip->pdev, O2_SD_CLKREQ, scratch); /* Choose capabilities, enable SDMA. We have to write 0x01 * to the capabilities register first to unlock it. */ ret = pci_read_config_byte(chip->pdev, O2_SD_CAPS, &scratch); if (ret) return ret; scratch |= 0x01; pci_write_config_byte(chip->pdev, O2_SD_CAPS, scratch); pci_write_config_byte(chip->pdev, O2_SD_CAPS, 0x73); /* Disable ADMA1/2 */ pci_write_config_byte(chip->pdev, O2_SD_ADMA1, 0x39); pci_write_config_byte(chip->pdev, O2_SD_ADMA2, 0x08); /* Disable the infinite transfer mode */ ret = pci_read_config_byte(chip->pdev, O2_SD_INF_MOD, &scratch); if (ret) return ret; scratch |= 0x08; pci_write_config_byte(chip->pdev, O2_SD_INF_MOD, scratch); /* Lock WP */ ret = pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch); if (ret) return ret; scratch |= 0x80; pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch); } return 0; } static int jmicron_pmos(struct sdhci_pci_chip *chip, int on) { u8 scratch; int ret; ret = pci_read_config_byte(chip->pdev, 0xAE, &scratch); if (ret) return ret; /* * Turn PMOS on [bit 0], set over current detection to 2.4 V * [bit 1:2] and enable over current debouncing [bit 6]. */ if (on) scratch |= 0x47; else scratch &= ~0x47; ret = pci_write_config_byte(chip->pdev, 0xAE, scratch); if (ret) return ret; return 0; } static int jmicron_probe(struct sdhci_pci_chip *chip) { int ret; u16 mmcdev = 0; if (chip->pdev->revision == 0) { chip->quirks |= SDHCI_QUIRK_32BIT_DMA_ADDR | SDHCI_QUIRK_32BIT_DMA_SIZE | SDHCI_QUIRK_32BIT_ADMA_SIZE | SDHCI_QUIRK_RESET_AFTER_REQUEST | SDHCI_QUIRK_BROKEN_SMALL_PIO; } /* * JMicron chips can have two interfaces to the same hardware * in order to work around limitations in Microsoft's driver. * We need to make sure we only bind to one of them. * * This code assumes two things: * * 1. The PCI code adds subfunctions in order. * * 2. The MMC interface has a lower subfunction number * than the SD interface. */ if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_SD) mmcdev = PCI_DEVICE_ID_JMICRON_JMB38X_MMC; else if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_SD) mmcdev = PCI_DEVICE_ID_JMICRON_JMB388_ESD; if (mmcdev) { struct pci_dev *sd_dev; sd_dev = NULL; while ((sd_dev = pci_get_device(PCI_VENDOR_ID_JMICRON, mmcdev, sd_dev)) != NULL) { if ((PCI_SLOT(chip->pdev->devfn) == PCI_SLOT(sd_dev->devfn)) && (chip->pdev->bus == sd_dev->bus)) break; } if (sd_dev) { pci_dev_put(sd_dev); dev_info(&chip->pdev->dev, "Refusing to bind to " "secondary interface.\n"); return -ENODEV; } } /* * JMicron chips need a bit of a nudge to enable the power * output pins. */ ret = jmicron_pmos(chip, 1); if (ret) { dev_err(&chip->pdev->dev, "Failure enabling card power\n"); return ret; } /* quirk for unsable RO-detection on JM388 chips */ if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_SD || chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) chip->quirks |= SDHCI_QUIRK_UNSTABLE_RO_DETECT; return 0; } static void jmicron_enable_mmc(struct sdhci_host *host, int on) { u8 scratch; scratch = readb(host->ioaddr + 0xC0); if (on) scratch |= 0x01; else scratch &= ~0x01; writeb(scratch, host->ioaddr + 0xC0); } static int jmicron_probe_slot(struct sdhci_pci_slot *slot) { if (slot->chip->pdev->revision == 0) { u16 version; version = readl(slot->host->ioaddr + SDHCI_HOST_VERSION); version = (version & SDHCI_VENDOR_VER_MASK) >> SDHCI_VENDOR_VER_SHIFT; /* * Older versions of the chip have lots of nasty glitches * in the ADMA engine. It's best just to avoid it * completely. */ if (version < 0xAC) slot->host->quirks |= SDHCI_QUIRK_BROKEN_ADMA; } /* JM388 MMC doesn't support 1.8V while SD supports it */ if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) { slot->host->ocr_avail_sd = MMC_VDD_32_33 | MMC_VDD_33_34 | MMC_VDD_29_30 | MMC_VDD_30_31 | MMC_VDD_165_195; /* allow 1.8V */ slot->host->ocr_avail_mmc = MMC_VDD_32_33 | MMC_VDD_33_34 | MMC_VDD_29_30 | MMC_VDD_30_31; /* no 1.8V for MMC */ } /* * The secondary interface requires a bit set to get the * interrupts. */ if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC || slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) jmicron_enable_mmc(slot->host, 1); slot->host->mmc->caps |= MMC_CAP_BUS_WIDTH_TEST; return 0; } static void jmicron_remove_slot(struct sdhci_pci_slot *slot, int dead) { if (dead) return; if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC || slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) jmicron_enable_mmc(slot->host, 0); } static int jmicron_suspend(struct sdhci_pci_chip *chip) { int i; if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC || chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) { for (i = 0; i < chip->num_slots; i++) jmicron_enable_mmc(chip->slots[i]->host, 0); } return 0; } static int jmicron_resume(struct sdhci_pci_chip *chip) { int ret, i; if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC || chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) { for (i = 0; i < chip->num_slots; i++) jmicron_enable_mmc(chip->slots[i]->host, 1); } ret = jmicron_pmos(chip, 1); if (ret) { dev_err(&chip->pdev->dev, "Failure enabling card power\n"); return ret; } return 0; } static const struct sdhci_pci_fixes sdhci_o2 = { .probe = o2_probe, }; static const struct sdhci_pci_fixes sdhci_jmicron = { .probe = jmicron_probe, .probe_slot = jmicron_probe_slot, .remove_slot = jmicron_remove_slot, .suspend = jmicron_suspend, .resume = jmicron_resume, }; /* SysKonnect CardBus2SDIO extra registers */ #define SYSKT_CTRL 0x200 #define SYSKT_RDFIFO_STAT 0x204 #define SYSKT_WRFIFO_STAT 0x208 #define SYSKT_POWER_DATA 0x20c #define SYSKT_POWER_330 0xef #define SYSKT_POWER_300 0xf8 #define SYSKT_POWER_184 0xcc #define SYSKT_POWER_CMD 0x20d #define SYSKT_POWER_START (1 << 7) #define SYSKT_POWER_STATUS 0x20e #define SYSKT_POWER_STATUS_OK (1 << 0) #define SYSKT_BOARD_REV 0x210 #define SYSKT_CHIP_REV 0x211 #define SYSKT_CONF_DATA 0x212 #define SYSKT_CONF_DATA_1V8 (1 << 2) #define SYSKT_CONF_DATA_2V5 (1 << 1) #define SYSKT_CONF_DATA_3V3 (1 << 0) static int syskt_probe(struct sdhci_pci_chip *chip) { if ((chip->pdev->class & 0x0000FF) == PCI_SDHCI_IFVENDOR) { chip->pdev->class &= ~0x0000FF; chip->pdev->class |= PCI_SDHCI_IFDMA; } return 0; } static int syskt_probe_slot(struct sdhci_pci_slot *slot) { int tm, ps; u8 board_rev = readb(slot->host->ioaddr + SYSKT_BOARD_REV); u8 chip_rev = readb(slot->host->ioaddr + SYSKT_CHIP_REV); dev_info(&slot->chip->pdev->dev, "SysKonnect CardBus2SDIO, " "board rev %d.%d, chip rev %d.%d\n", board_rev >> 4, board_rev & 0xf, chip_rev >> 4, chip_rev & 0xf); if (chip_rev >= 0x20) slot->host->quirks |= SDHCI_QUIRK_FORCE_DMA; writeb(SYSKT_POWER_330, slot->host->ioaddr + SYSKT_POWER_DATA); writeb(SYSKT_POWER_START, slot->host->ioaddr + SYSKT_POWER_CMD); udelay(50); tm = 10; /* Wait max 1 ms */ do { ps = readw(slot->host->ioaddr + SYSKT_POWER_STATUS); if (ps & SYSKT_POWER_STATUS_OK) break; udelay(100); } while (--tm); if (!tm) { dev_err(&slot->chip->pdev->dev, "power regulator never stabilized"); writeb(0, slot->host->ioaddr + SYSKT_POWER_CMD); return -ENODEV; } return 0; } static const struct sdhci_pci_fixes sdhci_syskt = { .quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER, .probe = syskt_probe, .probe_slot = syskt_probe_slot, }; static int via_probe(struct sdhci_pci_chip *chip) { if (chip->pdev->revision == 0x10) chip->quirks |= SDHCI_QUIRK_DELAY_AFTER_POWER; return 0; } static const struct sdhci_pci_fixes sdhci_via = { .probe = via_probe, }; static const struct pci_device_id pci_ids[] __devinitdata = { { .vendor = PCI_VENDOR_ID_RICOH, .device = PCI_DEVICE_ID_RICOH_R5C822, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = (kernel_ulong_t)&sdhci_ricoh, }, { .vendor = PCI_VENDOR_ID_RICOH, .device = 0x843, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = (kernel_ulong_t)&sdhci_ricoh_mmc, }, { .vendor = PCI_VENDOR_ID_RICOH, .device = 0xe822, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = (kernel_ulong_t)&sdhci_ricoh_mmc, }, { .vendor = PCI_VENDOR_ID_RICOH, .device = 0xe823, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = (kernel_ulong_t)&sdhci_ricoh_mmc, }, { .vendor = PCI_VENDOR_ID_ENE, .device = PCI_DEVICE_ID_ENE_CB712_SD, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = (kernel_ulong_t)&sdhci_ene_712, }, { .vendor = PCI_VENDOR_ID_ENE, .device = PCI_DEVICE_ID_ENE_CB712_SD_2, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = (kernel_ulong_t)&sdhci_ene_712, }, { .vendor = PCI_VENDOR_ID_ENE, .device = PCI_DEVICE_ID_ENE_CB714_SD, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = (kernel_ulong_t)&sdhci_ene_714, }, { .vendor = PCI_VENDOR_ID_ENE, .device = PCI_DEVICE_ID_ENE_CB714_SD_2, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = (kernel_ulong_t)&sdhci_ene_714, }, { .vendor = PCI_VENDOR_ID_MARVELL, .device = PCI_DEVICE_ID_MARVELL_88ALP01_SD, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = (kernel_ulong_t)&sdhci_cafe, }, { .vendor = PCI_VENDOR_ID_JMICRON, .device = PCI_DEVICE_ID_JMICRON_JMB38X_SD, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = (kernel_ulong_t)&sdhci_jmicron, }, { .vendor = PCI_VENDOR_ID_JMICRON, .device = PCI_DEVICE_ID_JMICRON_JMB38X_MMC, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = (kernel_ulong_t)&sdhci_jmicron, }, { .vendor = PCI_VENDOR_ID_JMICRON, .device = PCI_DEVICE_ID_JMICRON_JMB388_SD, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = (kernel_ulong_t)&sdhci_jmicron, }, { .vendor = PCI_VENDOR_ID_JMICRON, .device = PCI_DEVICE_ID_JMICRON_JMB388_ESD, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = (kernel_ulong_t)&sdhci_jmicron, }, { .vendor = PCI_VENDOR_ID_SYSKONNECT, .device = 0x8000, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = (kernel_ulong_t)&sdhci_syskt, }, { .vendor = PCI_VENDOR_ID_VIA, .device = 0x95d0, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = (kernel_ulong_t)&sdhci_via, }, { .vendor = PCI_VENDOR_ID_INTEL, .device = PCI_DEVICE_ID_INTEL_MRST_SD0, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = (kernel_ulong_t)&sdhci_intel_mrst_hc0, }, { .vendor = PCI_VENDOR_ID_INTEL, .device = PCI_DEVICE_ID_INTEL_MRST_SD1, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = (kernel_ulong_t)&sdhci_intel_mrst_hc1_hc2, }, { .vendor = PCI_VENDOR_ID_INTEL, .device = PCI_DEVICE_ID_INTEL_MRST_SD2, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = (kernel_ulong_t)&sdhci_intel_mrst_hc1_hc2, }, { .vendor = PCI_VENDOR_ID_INTEL, .device = PCI_DEVICE_ID_INTEL_MFD_SD, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_sd, }, { .vendor = PCI_VENDOR_ID_INTEL, .device = PCI_DEVICE_ID_INTEL_MFD_SDIO1, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_sdio, }, { .vendor = PCI_VENDOR_ID_INTEL, .device = PCI_DEVICE_ID_INTEL_MFD_SDIO2, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_sdio, }, { .vendor = PCI_VENDOR_ID_INTEL, .device = PCI_DEVICE_ID_INTEL_MFD_EMMC0, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc, }, { .vendor = PCI_VENDOR_ID_INTEL, .device = PCI_DEVICE_ID_INTEL_MFD_EMMC1, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc, }, { .vendor = PCI_VENDOR_ID_INTEL, .device = PCI_DEVICE_ID_INTEL_PCH_SDIO0, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = (kernel_ulong_t)&sdhci_intel_pch_sdio, }, { .vendor = PCI_VENDOR_ID_INTEL, .device = PCI_DEVICE_ID_INTEL_PCH_SDIO1, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = (kernel_ulong_t)&sdhci_intel_pch_sdio, }, { .vendor = PCI_VENDOR_ID_O2, .device = PCI_DEVICE_ID_O2_8120, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = (kernel_ulong_t)&sdhci_o2, }, { .vendor = PCI_VENDOR_ID_O2, .device = PCI_DEVICE_ID_O2_8220, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = (kernel_ulong_t)&sdhci_o2, }, { .vendor = PCI_VENDOR_ID_O2, .device = PCI_DEVICE_ID_O2_8221, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = (kernel_ulong_t)&sdhci_o2, }, { .vendor = PCI_VENDOR_ID_O2, .device = PCI_DEVICE_ID_O2_8320, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = (kernel_ulong_t)&sdhci_o2, }, { .vendor = PCI_VENDOR_ID_O2, .device = PCI_DEVICE_ID_O2_8321, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = (kernel_ulong_t)&sdhci_o2, }, { /* Generic SD host controller */ PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00) }, { /* end: all zeroes */ }, }; MODULE_DEVICE_TABLE(pci, pci_ids); /*****************************************************************************\ * * * SDHCI core callbacks * * * \*****************************************************************************/ static int sdhci_pci_enable_dma(struct sdhci_host *host) { struct sdhci_pci_slot *slot; struct pci_dev *pdev; int ret; slot = sdhci_priv(host); pdev = slot->chip->pdev; if (((pdev->class & 0xFFFF00) == (PCI_CLASS_SYSTEM_SDHCI << 8)) && ((pdev->class & 0x0000FF) != PCI_SDHCI_IFDMA) && (host->flags & SDHCI_USE_SDMA)) { dev_warn(&pdev->dev, "Will use DMA mode even though HW " "doesn't fully claim to support it.\n"); } ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (ret) return ret; pci_set_master(pdev); return 0; } static int sdhci_pci_8bit_width(struct sdhci_host *host, int width) { u8 ctrl; ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); switch (width) { case MMC_BUS_WIDTH_8: ctrl |= SDHCI_CTRL_8BITBUS; ctrl &= ~SDHCI_CTRL_4BITBUS; break; case MMC_BUS_WIDTH_4: ctrl |= SDHCI_CTRL_4BITBUS; ctrl &= ~SDHCI_CTRL_8BITBUS; break; default: ctrl &= ~(SDHCI_CTRL_8BITBUS | SDHCI_CTRL_4BITBUS); break; } sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); return 0; } static void sdhci_pci_hw_reset(struct sdhci_host *host) { struct sdhci_pci_slot *slot = sdhci_priv(host); int rst_n_gpio = slot->rst_n_gpio; if (!gpio_is_valid(rst_n_gpio)) return; gpio_set_value_cansleep(rst_n_gpio, 0); /* For eMMC, minimum is 1us but give it 10us for good measure */ udelay(10); gpio_set_value_cansleep(rst_n_gpio, 1); /* For eMMC, minimum is 200us but give it 300us for good measure */ usleep_range(300, 1000); } static struct sdhci_ops sdhci_pci_ops = { .enable_dma = sdhci_pci_enable_dma, .platform_8bit_width = sdhci_pci_8bit_width, .hw_reset = sdhci_pci_hw_reset, }; /*****************************************************************************\ * * * Suspend/resume * * * \*****************************************************************************/ #ifdef CONFIG_PM static int sdhci_pci_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct sdhci_pci_chip *chip; struct sdhci_pci_slot *slot; mmc_pm_flag_t slot_pm_flags; mmc_pm_flag_t pm_flags = 0; int i, ret; chip = pci_get_drvdata(pdev); if (!chip) return 0; for (i = 0; i < chip->num_slots; i++) { slot = chip->slots[i]; if (!slot) continue; ret = sdhci_suspend_host(slot->host); if (ret) goto err_pci_suspend; slot_pm_flags = slot->host->mmc->pm_flags; if (slot_pm_flags & MMC_PM_WAKE_SDIO_IRQ) sdhci_enable_irq_wakeups(slot->host); pm_flags |= slot_pm_flags; } if (chip->fixes && chip->fixes->suspend) { ret = chip->fixes->suspend(chip); if (ret) goto err_pci_suspend; } pci_save_state(pdev); if (pm_flags & MMC_PM_KEEP_POWER) { if (pm_flags & MMC_PM_WAKE_SDIO_IRQ) { pci_pme_active(pdev, true); pci_enable_wake(pdev, PCI_D3hot, 1); } pci_set_power_state(pdev, PCI_D3hot); } else { pci_enable_wake(pdev, PCI_D3hot, 0); pci_disable_device(pdev); pci_set_power_state(pdev, PCI_D3hot); } return 0; err_pci_suspend: while (--i >= 0) sdhci_resume_host(chip->slots[i]->host); return ret; } static int sdhci_pci_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct sdhci_pci_chip *chip; struct sdhci_pci_slot *slot; int i, ret; chip = pci_get_drvdata(pdev); if (!chip) return 0; pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); ret = pci_enable_device(pdev); if (ret) return ret; if (chip->fixes && chip->fixes->resume) { ret = chip->fixes->resume(chip); if (ret) return ret; } for (i = 0; i < chip->num_slots; i++) { slot = chip->slots[i]; if (!slot) continue; ret = sdhci_resume_host(slot->host); if (ret) return ret; } return 0; } #else /* CONFIG_PM */ #define sdhci_pci_suspend NULL #define sdhci_pci_resume NULL #endif /* CONFIG_PM */ #ifdef CONFIG_PM_RUNTIME static int sdhci_pci_runtime_suspend(struct device *dev) { struct pci_dev *pdev = container_of(dev, struct pci_dev, dev); struct sdhci_pci_chip *chip; struct sdhci_pci_slot *slot; int i, ret; chip = pci_get_drvdata(pdev); if (!chip) return 0; for (i = 0; i < chip->num_slots; i++) { slot = chip->slots[i]; if (!slot) continue; ret = sdhci_runtime_suspend_host(slot->host); if (ret) goto err_pci_runtime_suspend; } if (chip->fixes && chip->fixes->suspend) { ret = chip->fixes->suspend(chip); if (ret) goto err_pci_runtime_suspend; } return 0; err_pci_runtime_suspend: while (--i >= 0) sdhci_runtime_resume_host(chip->slots[i]->host); return ret; } static int sdhci_pci_runtime_resume(struct device *dev) { struct pci_dev *pdev = container_of(dev, struct pci_dev, dev); struct sdhci_pci_chip *chip; struct sdhci_pci_slot *slot; int i, ret; chip = pci_get_drvdata(pdev); if (!chip) return 0; if (chip->fixes && chip->fixes->resume) { ret = chip->fixes->resume(chip); if (ret) return ret; } for (i = 0; i < chip->num_slots; i++) { slot = chip->slots[i]; if (!slot) continue; ret = sdhci_runtime_resume_host(slot->host); if (ret) return ret; } return 0; } static int sdhci_pci_runtime_idle(struct device *dev) { return 0; } #else #define sdhci_pci_runtime_suspend NULL #define sdhci_pci_runtime_resume NULL #define sdhci_pci_runtime_idle NULL #endif static const struct dev_pm_ops sdhci_pci_pm_ops = { .suspend = sdhci_pci_suspend, .resume = sdhci_pci_resume, .runtime_suspend = sdhci_pci_runtime_suspend, .runtime_resume = sdhci_pci_runtime_resume, .runtime_idle = sdhci_pci_runtime_idle, }; /*****************************************************************************\ * * * Device probing/removal * * * \*****************************************************************************/ static struct sdhci_pci_slot * __devinit sdhci_pci_probe_slot( struct pci_dev *pdev, struct sdhci_pci_chip *chip, int first_bar, int slotno) { struct sdhci_pci_slot *slot; struct sdhci_host *host; int ret, bar = first_bar + slotno; if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) { dev_err(&pdev->dev, "BAR %d is not iomem. Aborting.\n", bar); return ERR_PTR(-ENODEV); } if (pci_resource_len(pdev, bar) != 0x100) { dev_err(&pdev->dev, "Invalid iomem size. You may " "experience problems.\n"); } if ((pdev->class & 0x0000FF) == PCI_SDHCI_IFVENDOR) { dev_err(&pdev->dev, "Vendor specific interface. Aborting.\n"); return ERR_PTR(-ENODEV); } if ((pdev->class & 0x0000FF) > PCI_SDHCI_IFVENDOR) { dev_err(&pdev->dev, "Unknown interface. Aborting.\n"); return ERR_PTR(-ENODEV); } host = sdhci_alloc_host(&pdev->dev, sizeof(struct sdhci_pci_slot)); if (IS_ERR(host)) { dev_err(&pdev->dev, "cannot allocate host\n"); return ERR_CAST(host); } slot = sdhci_priv(host); slot->chip = chip; slot->host = host; slot->pci_bar = bar; slot->rst_n_gpio = -EINVAL; slot->cd_gpio = -EINVAL; /* Retrieve platform data if there is any */ if (*sdhci_pci_get_data) slot->data = sdhci_pci_get_data(pdev, slotno); if (slot->data) { if (slot->data->setup) { ret = slot->data->setup(slot->data); if (ret) { dev_err(&pdev->dev, "platform setup failed\n"); goto free; } } slot->rst_n_gpio = slot->data->rst_n_gpio; slot->cd_gpio = slot->data->cd_gpio; } host->hw_name = "PCI"; host->ops = &sdhci_pci_ops; host->quirks = chip->quirks; host->quirks2 = chip->quirks2; host->irq = pdev->irq; ret = pci_request_region(pdev, bar, mmc_hostname(host->mmc)); if (ret) { dev_err(&pdev->dev, "cannot request region\n"); goto cleanup; } host->ioaddr = pci_ioremap_bar(pdev, bar); if (!host->ioaddr) { dev_err(&pdev->dev, "failed to remap registers\n"); ret = -ENOMEM; goto release; } if (chip->fixes && chip->fixes->probe_slot) { ret = chip->fixes->probe_slot(slot); if (ret) goto unmap; } if (gpio_is_valid(slot->rst_n_gpio)) { if (!gpio_request(slot->rst_n_gpio, "eMMC_reset")) { gpio_direction_output(slot->rst_n_gpio, 1); slot->host->mmc->caps |= MMC_CAP_HW_RESET; } else { dev_warn(&pdev->dev, "failed to request rst_n_gpio\n"); slot->rst_n_gpio = -EINVAL; } } host->mmc->pm_caps = MMC_PM_KEEP_POWER | MMC_PM_WAKE_SDIO_IRQ; ret = sdhci_add_host(host); if (ret) goto remove; sdhci_pci_add_own_cd(slot); return slot; remove: if (gpio_is_valid(slot->rst_n_gpio)) gpio_free(slot->rst_n_gpio); if (chip->fixes && chip->fixes->remove_slot) chip->fixes->remove_slot(slot, 0); unmap: iounmap(host->ioaddr); release: pci_release_region(pdev, bar); cleanup: if (slot->data && slot->data->cleanup) slot->data->cleanup(slot->data); free: sdhci_free_host(host); return ERR_PTR(ret); } static void sdhci_pci_remove_slot(struct sdhci_pci_slot *slot) { int dead; u32 scratch; sdhci_pci_remove_own_cd(slot); dead = 0; scratch = readl(slot->host->ioaddr + SDHCI_INT_STATUS); if (scratch == (u32)-1) dead = 1; sdhci_remove_host(slot->host, dead); if (gpio_is_valid(slot->rst_n_gpio)) gpio_free(slot->rst_n_gpio); if (slot->chip->fixes && slot->chip->fixes->remove_slot) slot->chip->fixes->remove_slot(slot, dead); if (slot->data && slot->data->cleanup) slot->data->cleanup(slot->data); pci_release_region(slot->chip->pdev, slot->pci_bar); sdhci_free_host(slot->host); } static void __devinit sdhci_pci_runtime_pm_allow(struct device *dev) { pm_runtime_put_noidle(dev); pm_runtime_allow(dev); pm_runtime_set_autosuspend_delay(dev, 50); pm_runtime_use_autosuspend(dev); pm_suspend_ignore_children(dev, 1); } static void __devexit sdhci_pci_runtime_pm_forbid(struct device *dev) { pm_runtime_forbid(dev); pm_runtime_get_noresume(dev); } static int __devinit sdhci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct sdhci_pci_chip *chip; struct sdhci_pci_slot *slot; u8 slots, first_bar; int ret, i; BUG_ON(pdev == NULL); BUG_ON(ent == NULL); dev_info(&pdev->dev, "SDHCI controller found [%04x:%04x] (rev %x)\n", (int)pdev->vendor, (int)pdev->device, (int)pdev->revision); ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &slots); if (ret) return ret; slots = PCI_SLOT_INFO_SLOTS(slots) + 1; dev_dbg(&pdev->dev, "found %d slot(s)\n", slots); if (slots == 0) return -ENODEV; BUG_ON(slots > MAX_SLOTS); ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &first_bar); if (ret) return ret; first_bar &= PCI_SLOT_INFO_FIRST_BAR_MASK; if (first_bar > 5) { dev_err(&pdev->dev, "Invalid first BAR. Aborting.\n"); return -ENODEV; } ret = pci_enable_device(pdev); if (ret) return ret; chip = kzalloc(sizeof(struct sdhci_pci_chip), GFP_KERNEL); if (!chip) { ret = -ENOMEM; goto err; } chip->pdev = pdev; chip->fixes = (const struct sdhci_pci_fixes *)ent->driver_data; if (chip->fixes) { chip->quirks = chip->fixes->quirks; chip->quirks2 = chip->fixes->quirks2; chip->allow_runtime_pm = chip->fixes->allow_runtime_pm; } chip->num_slots = slots; pci_set_drvdata(pdev, chip); if (chip->fixes && chip->fixes->probe) { ret = chip->fixes->probe(chip); if (ret) goto free; } slots = chip->num_slots; /* Quirk may have changed this */ for (i = 0; i < slots; i++) { slot = sdhci_pci_probe_slot(pdev, chip, first_bar, i); if (IS_ERR(slot)) { for (i--; i >= 0; i--) sdhci_pci_remove_slot(chip->slots[i]); ret = PTR_ERR(slot); goto free; } chip->slots[i] = slot; } if (chip->allow_runtime_pm) sdhci_pci_runtime_pm_allow(&pdev->dev); return 0; free: pci_set_drvdata(pdev, NULL); kfree(chip); err: pci_disable_device(pdev); return ret; } static void __devexit sdhci_pci_remove(struct pci_dev *pdev) { int i; struct sdhci_pci_chip *chip; chip = pci_get_drvdata(pdev); if (chip) { if (chip->allow_runtime_pm) sdhci_pci_runtime_pm_forbid(&pdev->dev); for (i = 0; i < chip->num_slots; i++) sdhci_pci_remove_slot(chip->slots[i]); pci_set_drvdata(pdev, NULL); kfree(chip); } pci_disable_device(pdev); } static struct pci_driver sdhci_driver = { .name = "sdhci-pci", .id_table = pci_ids, .probe = sdhci_pci_probe, .remove = __devexit_p(sdhci_pci_remove), .driver = { .pm = &sdhci_pci_pm_ops }, }; /*****************************************************************************\ * * * Driver init/exit * * * \*****************************************************************************/ static int __init sdhci_drv_init(void) { return pci_register_driver(&sdhci_driver); } static void __exit sdhci_drv_exit(void) { pci_unregister_driver(&sdhci_driver); } module_init(sdhci_drv_init); module_exit(sdhci_drv_exit); MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>"); MODULE_DESCRIPTION("Secure Digital Host Controller Interface PCI driver"); MODULE_LICENSE("GPL");
gpl-2.0
yangjoo/kernel_samsung_smdk4412
drivers/scsi/lpfc/lpfc_attr.c
2316
152846
/******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * * Copyright (C) 2004-2011 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * * * * This program is free software; you can redistribute it and/or * * modify it under the terms of version 2 of the GNU General * * Public License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful. * * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * * TO BE LEGALLY INVALID. See the GNU General Public License for * * more details, a copy of which can be found in the file COPYING * * included with this package. * *******************************************************************/ #include <linux/ctype.h> #include <linux/delay.h> #include <linux/pci.h> #include <linux/interrupt.h> #include <linux/aer.h> #include <linux/gfp.h> #include <linux/kernel.h> #include <scsi/scsi.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_tcq.h> #include <scsi/scsi_transport_fc.h> #include <scsi/fc/fc_fs.h> #include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" #include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc_scsi.h" #include "lpfc.h" #include "lpfc_logmsg.h" #include "lpfc_version.h" #include "lpfc_compat.h" #include "lpfc_crtn.h" #include "lpfc_vport.h" #define LPFC_DEF_DEVLOSS_TMO 30 #define LPFC_MIN_DEVLOSS_TMO 1 #define LPFC_MAX_DEVLOSS_TMO 255 /** * lpfc_jedec_to_ascii - Hex to ascii convertor according to JEDEC rules * @incr: integer to convert. * @hdw: ascii string holding converted integer plus a string terminator. * * Description: * JEDEC Joint Electron Device Engineering Council. * Convert a 32 bit integer composed of 8 nibbles into an 8 byte ascii * character string. The string is then terminated with a NULL in byte 9. * Hex 0-9 becomes ascii '0' to '9'. * Hex a-f becomes ascii '=' to 'B' capital B. * * Notes: * Coded for 32 bit integers only. **/ static void lpfc_jedec_to_ascii(int incr, char hdw[]) { int i, j; for (i = 0; i < 8; i++) { j = (incr & 0xf); if (j <= 9) hdw[7 - i] = 0x30 + j; else hdw[7 - i] = 0x61 + j - 10; incr = (incr >> 4); } hdw[8] = 0; return; } /** * lpfc_drvr_version_show - Return the Emulex driver string with version number * @dev: class unused variable. * @attr: device attribute, not used. * @buf: on return contains the module description text. * * Returns: size of formatted string. **/ static ssize_t lpfc_drvr_version_show(struct device *dev, struct device_attribute *attr, char *buf) { return snprintf(buf, PAGE_SIZE, LPFC_MODULE_DESC "\n"); } /** * lpfc_enable_fip_show - Return the fip mode of the HBA * @dev: class unused variable. * @attr: device attribute, not used. * @buf: on return contains the module description text. * * Returns: size of formatted string. **/ static ssize_t lpfc_enable_fip_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; if (phba->hba_flag & HBA_FIP_SUPPORT) return snprintf(buf, PAGE_SIZE, "1\n"); else return snprintf(buf, PAGE_SIZE, "0\n"); } static ssize_t lpfc_bg_info_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; if (phba->cfg_enable_bg) if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) return snprintf(buf, PAGE_SIZE, "BlockGuard Enabled\n"); else return snprintf(buf, PAGE_SIZE, "BlockGuard Not Supported\n"); else return snprintf(buf, PAGE_SIZE, "BlockGuard Disabled\n"); } static ssize_t lpfc_bg_guard_err_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; return snprintf(buf, PAGE_SIZE, "%llu\n", (unsigned long long)phba->bg_guard_err_cnt); } static ssize_t lpfc_bg_apptag_err_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; return snprintf(buf, PAGE_SIZE, "%llu\n", (unsigned long long)phba->bg_apptag_err_cnt); } static ssize_t lpfc_bg_reftag_err_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; return snprintf(buf, PAGE_SIZE, "%llu\n", (unsigned long long)phba->bg_reftag_err_cnt); } /** * lpfc_info_show - Return some pci info about the host in ascii * @dev: class converted to a Scsi_host structure. * @attr: device attribute, not used. * @buf: on return contains the formatted text from lpfc_info(). * * Returns: size of formatted string. **/ static ssize_t lpfc_info_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *host = class_to_shost(dev); return snprintf(buf, PAGE_SIZE, "%s\n",lpfc_info(host)); } /** * lpfc_serialnum_show - Return the hba serial number in ascii * @dev: class converted to a Scsi_host structure. * @attr: device attribute, not used. * @buf: on return contains the formatted text serial number. * * Returns: size of formatted string. **/ static ssize_t lpfc_serialnum_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; return snprintf(buf, PAGE_SIZE, "%s\n",phba->SerialNumber); } /** * lpfc_temp_sensor_show - Return the temperature sensor level * @dev: class converted to a Scsi_host structure. * @attr: device attribute, not used. * @buf: on return contains the formatted support level. * * Description: * Returns a number indicating the temperature sensor level currently * supported, zero or one in ascii. * * Returns: size of formatted string. **/ static ssize_t lpfc_temp_sensor_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; return snprintf(buf, PAGE_SIZE, "%d\n",phba->temp_sensor_support); } /** * lpfc_modeldesc_show - Return the model description of the hba * @dev: class converted to a Scsi_host structure. * @attr: device attribute, not used. * @buf: on return contains the scsi vpd model description. * * Returns: size of formatted string. **/ static ssize_t lpfc_modeldesc_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; return snprintf(buf, PAGE_SIZE, "%s\n",phba->ModelDesc); } /** * lpfc_modelname_show - Return the model name of the hba * @dev: class converted to a Scsi_host structure. * @attr: device attribute, not used. * @buf: on return contains the scsi vpd model name. * * Returns: size of formatted string. **/ static ssize_t lpfc_modelname_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; return snprintf(buf, PAGE_SIZE, "%s\n",phba->ModelName); } /** * lpfc_programtype_show - Return the program type of the hba * @dev: class converted to a Scsi_host structure. * @attr: device attribute, not used. * @buf: on return contains the scsi vpd program type. * * Returns: size of formatted string. **/ static ssize_t lpfc_programtype_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; return snprintf(buf, PAGE_SIZE, "%s\n",phba->ProgramType); } /** * lpfc_mlomgmt_show - Return the Menlo Maintenance sli flag * @dev: class converted to a Scsi_host structure. * @attr: device attribute, not used. * @buf: on return contains the Menlo Maintenance sli flag. * * Returns: size of formatted string. **/ static ssize_t lpfc_mlomgmt_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; struct lpfc_hba *phba = vport->phba; return snprintf(buf, PAGE_SIZE, "%d\n", (phba->sli.sli_flag & LPFC_MENLO_MAINT)); } /** * lpfc_vportnum_show - Return the port number in ascii of the hba * @dev: class converted to a Scsi_host structure. * @attr: device attribute, not used. * @buf: on return contains scsi vpd program type. * * Returns: size of formatted string. **/ static ssize_t lpfc_vportnum_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; return snprintf(buf, PAGE_SIZE, "%s\n",phba->Port); } /** * lpfc_fwrev_show - Return the firmware rev running in the hba * @dev: class converted to a Scsi_host structure. * @attr: device attribute, not used. * @buf: on return contains the scsi vpd program type. * * Returns: size of formatted string. **/ static ssize_t lpfc_fwrev_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; char fwrev[32]; lpfc_decode_firmware_rev(phba, fwrev, 1); return snprintf(buf, PAGE_SIZE, "%s, sli-%d\n", fwrev, phba->sli_rev); } /** * lpfc_hdw_show - Return the jedec information about the hba * @dev: class converted to a Scsi_host structure. * @attr: device attribute, not used. * @buf: on return contains the scsi vpd program type. * * Returns: size of formatted string. **/ static ssize_t lpfc_hdw_show(struct device *dev, struct device_attribute *attr, char *buf) { char hdw[9]; struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; lpfc_vpd_t *vp = &phba->vpd; lpfc_jedec_to_ascii(vp->rev.biuRev, hdw); return snprintf(buf, PAGE_SIZE, "%s\n", hdw); } /** * lpfc_option_rom_version_show - Return the adapter ROM FCode version * @dev: class converted to a Scsi_host structure. * @attr: device attribute, not used. * @buf: on return contains the ROM and FCode ascii strings. * * Returns: size of formatted string. **/ static ssize_t lpfc_option_rom_version_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; return snprintf(buf, PAGE_SIZE, "%s\n", phba->OptionROMVersion); } /** * lpfc_state_show - Return the link state of the port * @dev: class converted to a Scsi_host structure. * @attr: device attribute, not used. * @buf: on return contains text describing the state of the link. * * Notes: * The switch statement has no default so zero will be returned. * * Returns: size of formatted string. **/ static ssize_t lpfc_link_state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; int len = 0; switch (phba->link_state) { case LPFC_LINK_UNKNOWN: case LPFC_WARM_START: case LPFC_INIT_START: case LPFC_INIT_MBX_CMDS: case LPFC_LINK_DOWN: case LPFC_HBA_ERROR: if (phba->hba_flag & LINK_DISABLED) len += snprintf(buf + len, PAGE_SIZE-len, "Link Down - User disabled\n"); else len += snprintf(buf + len, PAGE_SIZE-len, "Link Down\n"); break; case LPFC_LINK_UP: case LPFC_CLEAR_LA: case LPFC_HBA_READY: len += snprintf(buf + len, PAGE_SIZE-len, "Link Up - "); switch (vport->port_state) { case LPFC_LOCAL_CFG_LINK: len += snprintf(buf + len, PAGE_SIZE-len, "Configuring Link\n"); break; case LPFC_FDISC: case LPFC_FLOGI: case LPFC_FABRIC_CFG_LINK: case LPFC_NS_REG: case LPFC_NS_QRY: case LPFC_BUILD_DISC_LIST: case LPFC_DISC_AUTH: len += snprintf(buf + len, PAGE_SIZE - len, "Discovery\n"); break; case LPFC_VPORT_READY: len += snprintf(buf + len, PAGE_SIZE - len, "Ready\n"); break; case LPFC_VPORT_FAILED: len += snprintf(buf + len, PAGE_SIZE - len, "Failed\n"); break; case LPFC_VPORT_UNKNOWN: len += snprintf(buf + len, PAGE_SIZE - len, "Unknown\n"); break; } if (phba->sli.sli_flag & LPFC_MENLO_MAINT) len += snprintf(buf + len, PAGE_SIZE-len, " Menlo Maint Mode\n"); else if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { if (vport->fc_flag & FC_PUBLIC_LOOP) len += snprintf(buf + len, PAGE_SIZE-len, " Public Loop\n"); else len += snprintf(buf + len, PAGE_SIZE-len, " Private Loop\n"); } else { if (vport->fc_flag & FC_FABRIC) len += snprintf(buf + len, PAGE_SIZE-len, " Fabric\n"); else len += snprintf(buf + len, PAGE_SIZE-len, " Point-2-Point\n"); } } return len; } /** * lpfc_link_state_store - Transition the link_state on an HBA port * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: one or more lpfc_polling_flags values. * @count: not used. * * Returns: * -EINVAL if the buffer is not "up" or "down" * return from link state change function if non-zero * length of the buf on success **/ static ssize_t lpfc_link_state_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; int status = -EINVAL; if ((strncmp(buf, "up", sizeof("up") - 1) == 0) && (phba->link_state == LPFC_LINK_DOWN)) status = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); else if ((strncmp(buf, "down", sizeof("down") - 1) == 0) && (phba->link_state >= LPFC_LINK_UP)) status = phba->lpfc_hba_down_link(phba, MBX_NOWAIT); if (status == 0) return strlen(buf); else return status; } /** * lpfc_num_discovered_ports_show - Return sum of mapped and unmapped vports * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: on return contains the sum of fc mapped and unmapped. * * Description: * Returns the ascii text number of the sum of the fc mapped and unmapped * vport counts. * * Returns: size of formatted string. **/ static ssize_t lpfc_num_discovered_ports_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; return snprintf(buf, PAGE_SIZE, "%d\n", vport->fc_map_cnt + vport->fc_unmap_cnt); } /** * lpfc_issue_lip - Misnomer, name carried over from long ago * @shost: Scsi_Host pointer. * * Description: * Bring the link down gracefully then re-init the link. The firmware will * re-init the fiber channel interface as required. Does not issue a LIP. * * Returns: * -EPERM port offline or management commands are being blocked * -ENOMEM cannot allocate memory for the mailbox command * -EIO error sending the mailbox command * zero for success **/ static int lpfc_issue_lip(struct Scsi_Host *shost) { struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; LPFC_MBOXQ_t *pmboxq; int mbxstatus = MBXERR_ERROR; if ((vport->fc_flag & FC_OFFLINE_MODE) || (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)) return -EPERM; pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL); if (!pmboxq) return -ENOMEM; memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t)); pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK; pmboxq->u.mb.mbxOwner = OWN_HOST; mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2); if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0 || pmboxq->u.mb.mbxStatus == MBXERR_LINK_DOWN)) { memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t)); lpfc_init_link(phba, pmboxq, phba->cfg_topology, phba->cfg_link_speed); mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION)) lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, "2859 SLI authentication is required " "for INIT_LINK but has not done yet\n"); } lpfc_set_loopback_flag(phba); if (mbxstatus != MBX_TIMEOUT) mempool_free(pmboxq, phba->mbox_mem_pool); if (mbxstatus == MBXERR_ERROR) return -EIO; return 0; } /** * lpfc_do_offline - Issues a mailbox command to bring the link down * @phba: lpfc_hba pointer. * @type: LPFC_EVT_OFFLINE, LPFC_EVT_WARM_START, LPFC_EVT_KILL. * * Notes: * Assumes any error from lpfc_do_offline() will be negative. * Can wait up to 5 seconds for the port ring buffers count * to reach zero, prints a warning if it is not zero and continues. * lpfc_workq_post_event() returns a non-zero return code if call fails. * * Returns: * -EIO error posting the event * zero for success **/ static int lpfc_do_offline(struct lpfc_hba *phba, uint32_t type) { struct completion online_compl; struct lpfc_sli_ring *pring; struct lpfc_sli *psli; int status = 0; int cnt = 0; int i; int rc; init_completion(&online_compl); rc = lpfc_workq_post_event(phba, &status, &online_compl, LPFC_EVT_OFFLINE_PREP); if (rc == 0) return -ENOMEM; wait_for_completion(&online_compl); if (status != 0) return -EIO; psli = &phba->sli; /* Wait a little for things to settle down, but not * long enough for dev loss timeout to expire. */ for (i = 0; i < psli->num_rings; i++) { pring = &psli->ring[i]; while (pring->txcmplq_cnt) { msleep(10); if (cnt++ > 500) { /* 5 secs */ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "0466 Outstanding IO when " "bringing Adapter offline\n"); break; } } } init_completion(&online_compl); rc = lpfc_workq_post_event(phba, &status, &online_compl, type); if (rc == 0) return -ENOMEM; wait_for_completion(&online_compl); if (status != 0) return -EIO; return 0; } /** * lpfc_selective_reset - Offline then onlines the port * @phba: lpfc_hba pointer. * * Description: * If the port is configured to allow a reset then the hba is brought * offline then online. * * Notes: * Assumes any error from lpfc_do_offline() will be negative. * Do not make this function static. * * Returns: * lpfc_do_offline() return code if not zero * -EIO reset not configured or error posting the event * zero for success **/ int lpfc_selective_reset(struct lpfc_hba *phba) { struct completion online_compl; int status = 0; int rc; if (!phba->cfg_enable_hba_reset) return -EIO; status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); if (status != 0) return status; init_completion(&online_compl); rc = lpfc_workq_post_event(phba, &status, &online_compl, LPFC_EVT_ONLINE); if (rc == 0) return -ENOMEM; wait_for_completion(&online_compl); if (status != 0) return -EIO; return 0; } /** * lpfc_issue_reset - Selectively resets an adapter * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: containing the string "selective". * @count: unused variable. * * Description: * If the buf contains the string "selective" then lpfc_selective_reset() * is called to perform the reset. * * Notes: * Assumes any error from lpfc_selective_reset() will be negative. * If lpfc_selective_reset() returns zero then the length of the buffer * is returned which indicates success * * Returns: * -EINVAL if the buffer does not contain the string "selective" * length of buf if lpfc-selective_reset() if the call succeeds * return value of lpfc_selective_reset() if the call fails **/ static ssize_t lpfc_issue_reset(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; int status = -EINVAL; if (strncmp(buf, "selective", sizeof("selective") - 1) == 0) status = phba->lpfc_selective_reset(phba); if (status == 0) return strlen(buf); else return status; } /** * lpfc_sli4_pdev_status_reg_wait - Wait for pdev status register for readyness * @phba: lpfc_hba pointer. * * Description: * SLI4 interface type-2 device to wait on the sliport status register for * the readyness after performing a firmware reset. * * Returns: * zero for success **/ static int lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba *phba) { struct lpfc_register portstat_reg; int i; lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, &portstat_reg.word0); /* wait for the SLI port firmware ready after firmware reset */ for (i = 0; i < LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT; i++) { msleep(10); lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, &portstat_reg.word0); if (!bf_get(lpfc_sliport_status_err, &portstat_reg)) continue; if (!bf_get(lpfc_sliport_status_rn, &portstat_reg)) continue; if (!bf_get(lpfc_sliport_status_rdy, &portstat_reg)) continue; break; } if (i < LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT) return 0; else return -EIO; } /** * lpfc_sli4_pdev_reg_request - Request physical dev to perform a register acc * @phba: lpfc_hba pointer. * * Description: * Request SLI4 interface type-2 device to perform a physical register set * access. * * Returns: * zero for success **/ static ssize_t lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode) { struct completion online_compl; uint32_t reg_val; int status = 0; int rc; if (!phba->cfg_enable_hba_reset) return -EIO; if ((phba->sli_rev < LPFC_SLI_REV4) || (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != LPFC_SLI_INTF_IF_TYPE_2)) return -EPERM; status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); if (status != 0) return status; /* wait for the device to be quiesced before firmware reset */ msleep(100); reg_val = readl(phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET); if (opcode == LPFC_FW_DUMP) reg_val |= LPFC_FW_DUMP_REQUEST; else if (opcode == LPFC_FW_RESET) reg_val |= LPFC_CTL_PDEV_CTL_FRST; else if (opcode == LPFC_DV_RESET) reg_val |= LPFC_CTL_PDEV_CTL_DRST; writel(reg_val, phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET); /* flush */ readl(phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET); /* delay driver action following IF_TYPE_2 reset */ rc = lpfc_sli4_pdev_status_reg_wait(phba); if (rc) return -EIO; init_completion(&online_compl); rc = lpfc_workq_post_event(phba, &status, &online_compl, LPFC_EVT_ONLINE); if (rc == 0) return -ENOMEM; wait_for_completion(&online_compl); if (status != 0) return -EIO; return 0; } /** * lpfc_nport_evt_cnt_show - Return the number of nport events * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: on return contains the ascii number of nport events. * * Returns: size of formatted string. **/ static ssize_t lpfc_nport_evt_cnt_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; return snprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt); } /** * lpfc_board_mode_show - Return the state of the board * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: on return contains the state of the adapter. * * Returns: size of formatted string. **/ static ssize_t lpfc_board_mode_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; char * state; if (phba->link_state == LPFC_HBA_ERROR) state = "error"; else if (phba->link_state == LPFC_WARM_START) state = "warm start"; else if (phba->link_state == LPFC_INIT_START) state = "offline"; else state = "online"; return snprintf(buf, PAGE_SIZE, "%s\n", state); } /** * lpfc_board_mode_store - Puts the hba in online, offline, warm or error state * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: containing one of the strings "online", "offline", "warm" or "error". * @count: unused variable. * * Returns: * -EACCES if enable hba reset not enabled * -EINVAL if the buffer does not contain a valid string (see above) * -EIO if lpfc_workq_post_event() or lpfc_do_offline() fails * buf length greater than zero indicates success **/ static ssize_t lpfc_board_mode_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; struct completion online_compl; int status=0; int rc; if (!phba->cfg_enable_hba_reset) return -EACCES; lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "3050 lpfc_board_mode set to %s\n", buf); init_completion(&online_compl); if(strncmp(buf, "online", sizeof("online") - 1) == 0) { rc = lpfc_workq_post_event(phba, &status, &online_compl, LPFC_EVT_ONLINE); if (rc == 0) return -ENOMEM; wait_for_completion(&online_compl); } else if (strncmp(buf, "offline", sizeof("offline") - 1) == 0) status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); else if (strncmp(buf, "warm", sizeof("warm") - 1) == 0) if (phba->sli_rev == LPFC_SLI_REV4) return -EINVAL; else status = lpfc_do_offline(phba, LPFC_EVT_WARM_START); else if (strncmp(buf, "error", sizeof("error") - 1) == 0) if (phba->sli_rev == LPFC_SLI_REV4) return -EINVAL; else status = lpfc_do_offline(phba, LPFC_EVT_KILL); else if (strncmp(buf, "dump", sizeof("dump") - 1) == 0) status = lpfc_sli4_pdev_reg_request(phba, LPFC_FW_DUMP); else if (strncmp(buf, "fw_reset", sizeof("fw_reset") - 1) == 0) status = lpfc_sli4_pdev_reg_request(phba, LPFC_FW_RESET); else if (strncmp(buf, "dv_reset", sizeof("dv_reset") - 1) == 0) status = lpfc_sli4_pdev_reg_request(phba, LPFC_DV_RESET); else return -EINVAL; if (!status) return strlen(buf); else return -EIO; } /** * lpfc_get_hba_info - Return various bits of informaton about the adapter * @phba: pointer to the adapter structure. * @mxri: max xri count. * @axri: available xri count. * @mrpi: max rpi count. * @arpi: available rpi count. * @mvpi: max vpi count. * @avpi: available vpi count. * * Description: * If an integer pointer for an count is not null then the value for the * count is returned. * * Returns: * zero on error * one for success **/ static int lpfc_get_hba_info(struct lpfc_hba *phba, uint32_t *mxri, uint32_t *axri, uint32_t *mrpi, uint32_t *arpi, uint32_t *mvpi, uint32_t *avpi) { struct lpfc_mbx_read_config *rd_config; LPFC_MBOXQ_t *pmboxq; MAILBOX_t *pmb; int rc = 0; uint32_t max_vpi; /* * prevent udev from issuing mailbox commands until the port is * configured. */ if (phba->link_state < LPFC_LINK_DOWN || !phba->mbox_mem_pool || (phba->sli.sli_flag & LPFC_SLI_ACTIVE) == 0) return 0; if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) return 0; pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmboxq) return 0; memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t)); pmb = &pmboxq->u.mb; pmb->mbxCommand = MBX_READ_CONFIG; pmb->mbxOwner = OWN_HOST; pmboxq->context1 = NULL; if (phba->pport->fc_flag & FC_OFFLINE_MODE) rc = MBX_NOT_FINISHED; else rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); if (rc != MBX_SUCCESS) { if (rc != MBX_TIMEOUT) mempool_free(pmboxq, phba->mbox_mem_pool); return 0; } if (phba->sli_rev == LPFC_SLI_REV4) { rd_config = &pmboxq->u.mqe.un.rd_config; if (mrpi) *mrpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config); if (arpi) *arpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config) - phba->sli4_hba.max_cfg_param.rpi_used; if (mxri) *mxri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config); if (axri) *axri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config) - phba->sli4_hba.max_cfg_param.xri_used; /* Account for differences with SLI-3. Get vpi count from * mailbox data and subtract one for max vpi value. */ max_vpi = (bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) > 0) ? (bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) - 1) : 0; if (mvpi) *mvpi = max_vpi; if (avpi) *avpi = max_vpi - phba->sli4_hba.max_cfg_param.vpi_used; } else { if (mrpi) *mrpi = pmb->un.varRdConfig.max_rpi; if (arpi) *arpi = pmb->un.varRdConfig.avail_rpi; if (mxri) *mxri = pmb->un.varRdConfig.max_xri; if (axri) *axri = pmb->un.varRdConfig.avail_xri; if (mvpi) *mvpi = pmb->un.varRdConfig.max_vpi; if (avpi) *avpi = pmb->un.varRdConfig.avail_vpi; } mempool_free(pmboxq, phba->mbox_mem_pool); return 1; } /** * lpfc_max_rpi_show - Return maximum rpi * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: on return contains the maximum rpi count in decimal or "Unknown". * * Description: * Calls lpfc_get_hba_info() asking for just the mrpi count. * If lpfc_get_hba_info() returns zero (failure) the buffer text is set * to "Unknown" and the buffer length is returned, therefore the caller * must check for "Unknown" in the buffer to detect a failure. * * Returns: size of formatted string. **/ static ssize_t lpfc_max_rpi_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; uint32_t cnt; if (lpfc_get_hba_info(phba, NULL, NULL, &cnt, NULL, NULL, NULL)) return snprintf(buf, PAGE_SIZE, "%d\n", cnt); return snprintf(buf, PAGE_SIZE, "Unknown\n"); } /** * lpfc_used_rpi_show - Return maximum rpi minus available rpi * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: containing the used rpi count in decimal or "Unknown". * * Description: * Calls lpfc_get_hba_info() asking for just the mrpi and arpi counts. * If lpfc_get_hba_info() returns zero (failure) the buffer text is set * to "Unknown" and the buffer length is returned, therefore the caller * must check for "Unknown" in the buffer to detect a failure. * * Returns: size of formatted string. **/ static ssize_t lpfc_used_rpi_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; uint32_t cnt, acnt; if (lpfc_get_hba_info(phba, NULL, NULL, &cnt, &acnt, NULL, NULL)) return snprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt)); return snprintf(buf, PAGE_SIZE, "Unknown\n"); } /** * lpfc_max_xri_show - Return maximum xri * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: on return contains the maximum xri count in decimal or "Unknown". * * Description: * Calls lpfc_get_hba_info() asking for just the mrpi count. * If lpfc_get_hba_info() returns zero (failure) the buffer text is set * to "Unknown" and the buffer length is returned, therefore the caller * must check for "Unknown" in the buffer to detect a failure. * * Returns: size of formatted string. **/ static ssize_t lpfc_max_xri_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; uint32_t cnt; if (lpfc_get_hba_info(phba, &cnt, NULL, NULL, NULL, NULL, NULL)) return snprintf(buf, PAGE_SIZE, "%d\n", cnt); return snprintf(buf, PAGE_SIZE, "Unknown\n"); } /** * lpfc_used_xri_show - Return maximum xpi minus the available xpi * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: on return contains the used xri count in decimal or "Unknown". * * Description: * Calls lpfc_get_hba_info() asking for just the mxri and axri counts. * If lpfc_get_hba_info() returns zero (failure) the buffer text is set * to "Unknown" and the buffer length is returned, therefore the caller * must check for "Unknown" in the buffer to detect a failure. * * Returns: size of formatted string. **/ static ssize_t lpfc_used_xri_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; uint32_t cnt, acnt; if (lpfc_get_hba_info(phba, &cnt, &acnt, NULL, NULL, NULL, NULL)) return snprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt)); return snprintf(buf, PAGE_SIZE, "Unknown\n"); } /** * lpfc_max_vpi_show - Return maximum vpi * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: on return contains the maximum vpi count in decimal or "Unknown". * * Description: * Calls lpfc_get_hba_info() asking for just the mvpi count. * If lpfc_get_hba_info() returns zero (failure) the buffer text is set * to "Unknown" and the buffer length is returned, therefore the caller * must check for "Unknown" in the buffer to detect a failure. * * Returns: size of formatted string. **/ static ssize_t lpfc_max_vpi_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; uint32_t cnt; if (lpfc_get_hba_info(phba, NULL, NULL, NULL, NULL, &cnt, NULL)) return snprintf(buf, PAGE_SIZE, "%d\n", cnt); return snprintf(buf, PAGE_SIZE, "Unknown\n"); } /** * lpfc_used_vpi_show - Return maximum vpi minus the available vpi * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: on return contains the used vpi count in decimal or "Unknown". * * Description: * Calls lpfc_get_hba_info() asking for just the mvpi and avpi counts. * If lpfc_get_hba_info() returns zero (failure) the buffer text is set * to "Unknown" and the buffer length is returned, therefore the caller * must check for "Unknown" in the buffer to detect a failure. * * Returns: size of formatted string. **/ static ssize_t lpfc_used_vpi_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; uint32_t cnt, acnt; if (lpfc_get_hba_info(phba, NULL, NULL, NULL, NULL, &cnt, &acnt)) return snprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt)); return snprintf(buf, PAGE_SIZE, "Unknown\n"); } /** * lpfc_npiv_info_show - Return text about NPIV support for the adapter * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: text that must be interpreted to determine if npiv is supported. * * Description: * Buffer will contain text indicating npiv is not suppoerted on the port, * the port is an NPIV physical port, or it is an npiv virtual port with * the id of the vport. * * Returns: size of formatted string. **/ static ssize_t lpfc_npiv_info_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; if (!(phba->max_vpi)) return snprintf(buf, PAGE_SIZE, "NPIV Not Supported\n"); if (vport->port_type == LPFC_PHYSICAL_PORT) return snprintf(buf, PAGE_SIZE, "NPIV Physical\n"); return snprintf(buf, PAGE_SIZE, "NPIV Virtual (VPI %d)\n", vport->vpi); } /** * lpfc_poll_show - Return text about poll support for the adapter * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: on return contains the cfg_poll in hex. * * Notes: * cfg_poll should be a lpfc_polling_flags type. * * Returns: size of formatted string. **/ static ssize_t lpfc_poll_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; return snprintf(buf, PAGE_SIZE, "%#x\n", phba->cfg_poll); } /** * lpfc_poll_store - Set the value of cfg_poll for the adapter * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: one or more lpfc_polling_flags values. * @count: not used. * * Notes: * buf contents converted to integer and checked for a valid value. * * Returns: * -EINVAL if the buffer connot be converted or is out of range * length of the buf on success **/ static ssize_t lpfc_poll_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; uint32_t creg_val; uint32_t old_val; int val=0; if (!isdigit(buf[0])) return -EINVAL; if (sscanf(buf, "%i", &val) != 1) return -EINVAL; if ((val & 0x3) != val) return -EINVAL; if (phba->sli_rev == LPFC_SLI_REV4) val = 0; lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "3051 lpfc_poll changed from %d to %d\n", phba->cfg_poll, val); spin_lock_irq(&phba->hbalock); old_val = phba->cfg_poll; if (val & ENABLE_FCP_RING_POLLING) { if ((val & DISABLE_FCP_RING_INT) && !(old_val & DISABLE_FCP_RING_INT)) { if (lpfc_readl(phba->HCregaddr, &creg_val)) { spin_unlock_irq(&phba->hbalock); return -EINVAL; } creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING); writel(creg_val, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ lpfc_poll_start_timer(phba); } } else if (val != 0x0) { spin_unlock_irq(&phba->hbalock); return -EINVAL; } if (!(val & DISABLE_FCP_RING_INT) && (old_val & DISABLE_FCP_RING_INT)) { spin_unlock_irq(&phba->hbalock); del_timer(&phba->fcp_poll_timer); spin_lock_irq(&phba->hbalock); if (lpfc_readl(phba->HCregaddr, &creg_val)) { spin_unlock_irq(&phba->hbalock); return -EINVAL; } creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); writel(creg_val, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ } phba->cfg_poll = val; spin_unlock_irq(&phba->hbalock); return strlen(buf); } /** * lpfc_fips_level_show - Return the current FIPS level for the HBA * @dev: class unused variable. * @attr: device attribute, not used. * @buf: on return contains the module description text. * * Returns: size of formatted string. **/ static ssize_t lpfc_fips_level_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; return snprintf(buf, PAGE_SIZE, "%d\n", phba->fips_level); } /** * lpfc_fips_rev_show - Return the FIPS Spec revision for the HBA * @dev: class unused variable. * @attr: device attribute, not used. * @buf: on return contains the module description text. * * Returns: size of formatted string. **/ static ssize_t lpfc_fips_rev_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; return snprintf(buf, PAGE_SIZE, "%d\n", phba->fips_spec_rev); } /** * lpfc_dss_show - Return the current state of dss and the configured state * @dev: class converted to a Scsi_host structure. * @attr: device attribute, not used. * @buf: on return contains the formatted text. * * Returns: size of formatted string. **/ static ssize_t lpfc_dss_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; return snprintf(buf, PAGE_SIZE, "%s - %sOperational\n", (phba->cfg_enable_dss) ? "Enabled" : "Disabled", (phba->sli3_options & LPFC_SLI3_DSS_ENABLED) ? "" : "Not "); } /** * lpfc_sriov_hw_max_virtfn_show - Return maximum number of virtual functions * @dev: class converted to a Scsi_host structure. * @attr: device attribute, not used. * @buf: on return contains the formatted support level. * * Description: * Returns the maximum number of virtual functions a physical function can * support, 0 will be returned if called on virtual function. * * Returns: size of formatted string. **/ static ssize_t lpfc_sriov_hw_max_virtfn_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; uint16_t max_nr_virtfn; max_nr_virtfn = lpfc_sli_sriov_nr_virtfn_get(phba); return snprintf(buf, PAGE_SIZE, "%d\n", max_nr_virtfn); } /** * lpfc_param_show - Return a cfg attribute value in decimal * * Description: * Macro that given an attr e.g. hba_queue_depth expands * into a function with the name lpfc_hba_queue_depth_show. * * lpfc_##attr##_show: Return the decimal value of an adapters cfg_xxx field. * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: on return contains the attribute value in decimal. * * Returns: size of formatted string. **/ #define lpfc_param_show(attr) \ static ssize_t \ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ struct Scsi_Host *shost = class_to_shost(dev);\ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ struct lpfc_hba *phba = vport->phba;\ uint val = 0;\ val = phba->cfg_##attr;\ return snprintf(buf, PAGE_SIZE, "%d\n",\ phba->cfg_##attr);\ } /** * lpfc_param_hex_show - Return a cfg attribute value in hex * * Description: * Macro that given an attr e.g. hba_queue_depth expands * into a function with the name lpfc_hba_queue_depth_show * * lpfc_##attr##_show: Return the hex value of an adapters cfg_xxx field. * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: on return contains the attribute value in hexadecimal. * * Returns: size of formatted string. **/ #define lpfc_param_hex_show(attr) \ static ssize_t \ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ struct Scsi_Host *shost = class_to_shost(dev);\ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ struct lpfc_hba *phba = vport->phba;\ uint val = 0;\ val = phba->cfg_##attr;\ return snprintf(buf, PAGE_SIZE, "%#x\n",\ phba->cfg_##attr);\ } /** * lpfc_param_init - Initializes a cfg attribute * * Description: * Macro that given an attr e.g. hba_queue_depth expands * into a function with the name lpfc_hba_queue_depth_init. The macro also * takes a default argument, a minimum and maximum argument. * * lpfc_##attr##_init: Initializes an attribute. * @phba: pointer the the adapter structure. * @val: integer attribute value. * * Validates the min and max values then sets the adapter config field * accordingly, or uses the default if out of range and prints an error message. * * Returns: * zero on success * -EINVAL if default used **/ #define lpfc_param_init(attr, default, minval, maxval) \ static int \ lpfc_##attr##_init(struct lpfc_hba *phba, uint val) \ { \ if (val >= minval && val <= maxval) {\ phba->cfg_##attr = val;\ return 0;\ }\ lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \ "0449 lpfc_"#attr" attribute cannot be set to %d, "\ "allowed range is ["#minval", "#maxval"]\n", val); \ phba->cfg_##attr = default;\ return -EINVAL;\ } /** * lpfc_param_set - Set a cfg attribute value * * Description: * Macro that given an attr e.g. hba_queue_depth expands * into a function with the name lpfc_hba_queue_depth_set * * lpfc_##attr##_set: Sets an attribute value. * @phba: pointer the the adapter structure. * @val: integer attribute value. * * Description: * Validates the min and max values then sets the * adapter config field if in the valid range. prints error message * and does not set the parameter if invalid. * * Returns: * zero on success * -EINVAL if val is invalid **/ #define lpfc_param_set(attr, default, minval, maxval) \ static int \ lpfc_##attr##_set(struct lpfc_hba *phba, uint val) \ { \ if (val >= minval && val <= maxval) {\ lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \ "3052 lpfc_" #attr " changed from %d to %d\n", \ phba->cfg_##attr, val); \ phba->cfg_##attr = val;\ return 0;\ }\ lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \ "0450 lpfc_"#attr" attribute cannot be set to %d, "\ "allowed range is ["#minval", "#maxval"]\n", val); \ return -EINVAL;\ } /** * lpfc_param_store - Set a vport attribute value * * Description: * Macro that given an attr e.g. hba_queue_depth expands * into a function with the name lpfc_hba_queue_depth_store. * * lpfc_##attr##_store: Set an sttribute value. * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: contains the attribute value in ascii. * @count: not used. * * Description: * Convert the ascii text number to an integer, then * use the lpfc_##attr##_set function to set the value. * * Returns: * -EINVAL if val is invalid or lpfc_##attr##_set() fails * length of buffer upon success. **/ #define lpfc_param_store(attr) \ static ssize_t \ lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \ const char *buf, size_t count) \ { \ struct Scsi_Host *shost = class_to_shost(dev);\ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ struct lpfc_hba *phba = vport->phba;\ uint val = 0;\ if (!isdigit(buf[0]))\ return -EINVAL;\ if (sscanf(buf, "%i", &val) != 1)\ return -EINVAL;\ if (lpfc_##attr##_set(phba, val) == 0) \ return strlen(buf);\ else \ return -EINVAL;\ } /** * lpfc_vport_param_show - Return decimal formatted cfg attribute value * * Description: * Macro that given an attr e.g. hba_queue_depth expands * into a function with the name lpfc_hba_queue_depth_show * * lpfc_##attr##_show: prints the attribute value in decimal. * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: on return contains the attribute value in decimal. * * Returns: length of formatted string. **/ #define lpfc_vport_param_show(attr) \ static ssize_t \ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ struct Scsi_Host *shost = class_to_shost(dev);\ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ uint val = 0;\ val = vport->cfg_##attr;\ return snprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_##attr);\ } /** * lpfc_vport_param_hex_show - Return hex formatted attribute value * * Description: * Macro that given an attr e.g. * hba_queue_depth expands into a function with the name * lpfc_hba_queue_depth_show * * lpfc_##attr##_show: prints the attribute value in hexadecimal. * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: on return contains the attribute value in hexadecimal. * * Returns: length of formatted string. **/ #define lpfc_vport_param_hex_show(attr) \ static ssize_t \ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ struct Scsi_Host *shost = class_to_shost(dev);\ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ uint val = 0;\ val = vport->cfg_##attr;\ return snprintf(buf, PAGE_SIZE, "%#x\n", vport->cfg_##attr);\ } /** * lpfc_vport_param_init - Initialize a vport cfg attribute * * Description: * Macro that given an attr e.g. hba_queue_depth expands * into a function with the name lpfc_hba_queue_depth_init. The macro also * takes a default argument, a minimum and maximum argument. * * lpfc_##attr##_init: validates the min and max values then sets the * adapter config field accordingly, or uses the default if out of range * and prints an error message. * @phba: pointer the the adapter structure. * @val: integer attribute value. * * Returns: * zero on success * -EINVAL if default used **/ #define lpfc_vport_param_init(attr, default, minval, maxval) \ static int \ lpfc_##attr##_init(struct lpfc_vport *vport, uint val) \ { \ if (val >= minval && val <= maxval) {\ vport->cfg_##attr = val;\ return 0;\ }\ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \ "0423 lpfc_"#attr" attribute cannot be set to %d, "\ "allowed range is ["#minval", "#maxval"]\n", val); \ vport->cfg_##attr = default;\ return -EINVAL;\ } /** * lpfc_vport_param_set - Set a vport cfg attribute * * Description: * Macro that given an attr e.g. hba_queue_depth expands * into a function with the name lpfc_hba_queue_depth_set * * lpfc_##attr##_set: validates the min and max values then sets the * adapter config field if in the valid range. prints error message * and does not set the parameter if invalid. * @phba: pointer the the adapter structure. * @val: integer attribute value. * * Returns: * zero on success * -EINVAL if val is invalid **/ #define lpfc_vport_param_set(attr, default, minval, maxval) \ static int \ lpfc_##attr##_set(struct lpfc_vport *vport, uint val) \ { \ if (val >= minval && val <= maxval) {\ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \ "3053 lpfc_" #attr " changed from %d to %d\n", \ vport->cfg_##attr, val); \ vport->cfg_##attr = val;\ return 0;\ }\ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \ "0424 lpfc_"#attr" attribute cannot be set to %d, "\ "allowed range is ["#minval", "#maxval"]\n", val); \ return -EINVAL;\ } /** * lpfc_vport_param_store - Set a vport attribute * * Description: * Macro that given an attr e.g. hba_queue_depth * expands into a function with the name lpfc_hba_queue_depth_store * * lpfc_##attr##_store: convert the ascii text number to an integer, then * use the lpfc_##attr##_set function to set the value. * @cdev: class device that is converted into a Scsi_host. * @buf: contains the attribute value in decimal. * @count: not used. * * Returns: * -EINVAL if val is invalid or lpfc_##attr##_set() fails * length of buffer upon success. **/ #define lpfc_vport_param_store(attr) \ static ssize_t \ lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \ const char *buf, size_t count) \ { \ struct Scsi_Host *shost = class_to_shost(dev);\ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ uint val = 0;\ if (!isdigit(buf[0]))\ return -EINVAL;\ if (sscanf(buf, "%i", &val) != 1)\ return -EINVAL;\ if (lpfc_##attr##_set(vport, val) == 0) \ return strlen(buf);\ else \ return -EINVAL;\ } #define LPFC_ATTR(name, defval, minval, maxval, desc) \ static uint lpfc_##name = defval;\ module_param(lpfc_##name, uint, S_IRUGO);\ MODULE_PARM_DESC(lpfc_##name, desc);\ lpfc_param_init(name, defval, minval, maxval) #define LPFC_ATTR_R(name, defval, minval, maxval, desc) \ static uint lpfc_##name = defval;\ module_param(lpfc_##name, uint, S_IRUGO);\ MODULE_PARM_DESC(lpfc_##name, desc);\ lpfc_param_show(name)\ lpfc_param_init(name, defval, minval, maxval)\ static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL) #define LPFC_ATTR_RW(name, defval, minval, maxval, desc) \ static uint lpfc_##name = defval;\ module_param(lpfc_##name, uint, S_IRUGO);\ MODULE_PARM_DESC(lpfc_##name, desc);\ lpfc_param_show(name)\ lpfc_param_init(name, defval, minval, maxval)\ lpfc_param_set(name, defval, minval, maxval)\ lpfc_param_store(name)\ static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\ lpfc_##name##_show, lpfc_##name##_store) #define LPFC_ATTR_HEX_R(name, defval, minval, maxval, desc) \ static uint lpfc_##name = defval;\ module_param(lpfc_##name, uint, S_IRUGO);\ MODULE_PARM_DESC(lpfc_##name, desc);\ lpfc_param_hex_show(name)\ lpfc_param_init(name, defval, minval, maxval)\ static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL) #define LPFC_ATTR_HEX_RW(name, defval, minval, maxval, desc) \ static uint lpfc_##name = defval;\ module_param(lpfc_##name, uint, S_IRUGO);\ MODULE_PARM_DESC(lpfc_##name, desc);\ lpfc_param_hex_show(name)\ lpfc_param_init(name, defval, minval, maxval)\ lpfc_param_set(name, defval, minval, maxval)\ lpfc_param_store(name)\ static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\ lpfc_##name##_show, lpfc_##name##_store) #define LPFC_VPORT_ATTR(name, defval, minval, maxval, desc) \ static uint lpfc_##name = defval;\ module_param(lpfc_##name, uint, S_IRUGO);\ MODULE_PARM_DESC(lpfc_##name, desc);\ lpfc_vport_param_init(name, defval, minval, maxval) #define LPFC_VPORT_ATTR_R(name, defval, minval, maxval, desc) \ static uint lpfc_##name = defval;\ module_param(lpfc_##name, uint, S_IRUGO);\ MODULE_PARM_DESC(lpfc_##name, desc);\ lpfc_vport_param_show(name)\ lpfc_vport_param_init(name, defval, minval, maxval)\ static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL) #define LPFC_VPORT_ATTR_RW(name, defval, minval, maxval, desc) \ static uint lpfc_##name = defval;\ module_param(lpfc_##name, uint, S_IRUGO);\ MODULE_PARM_DESC(lpfc_##name, desc);\ lpfc_vport_param_show(name)\ lpfc_vport_param_init(name, defval, minval, maxval)\ lpfc_vport_param_set(name, defval, minval, maxval)\ lpfc_vport_param_store(name)\ static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\ lpfc_##name##_show, lpfc_##name##_store) #define LPFC_VPORT_ATTR_HEX_R(name, defval, minval, maxval, desc) \ static uint lpfc_##name = defval;\ module_param(lpfc_##name, uint, S_IRUGO);\ MODULE_PARM_DESC(lpfc_##name, desc);\ lpfc_vport_param_hex_show(name)\ lpfc_vport_param_init(name, defval, minval, maxval)\ static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL) #define LPFC_VPORT_ATTR_HEX_RW(name, defval, minval, maxval, desc) \ static uint lpfc_##name = defval;\ module_param(lpfc_##name, uint, S_IRUGO);\ MODULE_PARM_DESC(lpfc_##name, desc);\ lpfc_vport_param_hex_show(name)\ lpfc_vport_param_init(name, defval, minval, maxval)\ lpfc_vport_param_set(name, defval, minval, maxval)\ lpfc_vport_param_store(name)\ static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\ lpfc_##name##_show, lpfc_##name##_store) static DEVICE_ATTR(bg_info, S_IRUGO, lpfc_bg_info_show, NULL); static DEVICE_ATTR(bg_guard_err, S_IRUGO, lpfc_bg_guard_err_show, NULL); static DEVICE_ATTR(bg_apptag_err, S_IRUGO, lpfc_bg_apptag_err_show, NULL); static DEVICE_ATTR(bg_reftag_err, S_IRUGO, lpfc_bg_reftag_err_show, NULL); static DEVICE_ATTR(info, S_IRUGO, lpfc_info_show, NULL); static DEVICE_ATTR(serialnum, S_IRUGO, lpfc_serialnum_show, NULL); static DEVICE_ATTR(modeldesc, S_IRUGO, lpfc_modeldesc_show, NULL); static DEVICE_ATTR(modelname, S_IRUGO, lpfc_modelname_show, NULL); static DEVICE_ATTR(programtype, S_IRUGO, lpfc_programtype_show, NULL); static DEVICE_ATTR(portnum, S_IRUGO, lpfc_vportnum_show, NULL); static DEVICE_ATTR(fwrev, S_IRUGO, lpfc_fwrev_show, NULL); static DEVICE_ATTR(hdw, S_IRUGO, lpfc_hdw_show, NULL); static DEVICE_ATTR(link_state, S_IRUGO | S_IWUSR, lpfc_link_state_show, lpfc_link_state_store); static DEVICE_ATTR(option_rom_version, S_IRUGO, lpfc_option_rom_version_show, NULL); static DEVICE_ATTR(num_discovered_ports, S_IRUGO, lpfc_num_discovered_ports_show, NULL); static DEVICE_ATTR(menlo_mgmt_mode, S_IRUGO, lpfc_mlomgmt_show, NULL); static DEVICE_ATTR(nport_evt_cnt, S_IRUGO, lpfc_nport_evt_cnt_show, NULL); static DEVICE_ATTR(lpfc_drvr_version, S_IRUGO, lpfc_drvr_version_show, NULL); static DEVICE_ATTR(lpfc_enable_fip, S_IRUGO, lpfc_enable_fip_show, NULL); static DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR, lpfc_board_mode_show, lpfc_board_mode_store); static DEVICE_ATTR(issue_reset, S_IWUSR, NULL, lpfc_issue_reset); static DEVICE_ATTR(max_vpi, S_IRUGO, lpfc_max_vpi_show, NULL); static DEVICE_ATTR(used_vpi, S_IRUGO, lpfc_used_vpi_show, NULL); static DEVICE_ATTR(max_rpi, S_IRUGO, lpfc_max_rpi_show, NULL); static DEVICE_ATTR(used_rpi, S_IRUGO, lpfc_used_rpi_show, NULL); static DEVICE_ATTR(max_xri, S_IRUGO, lpfc_max_xri_show, NULL); static DEVICE_ATTR(used_xri, S_IRUGO, lpfc_used_xri_show, NULL); static DEVICE_ATTR(npiv_info, S_IRUGO, lpfc_npiv_info_show, NULL); static DEVICE_ATTR(lpfc_temp_sensor, S_IRUGO, lpfc_temp_sensor_show, NULL); static DEVICE_ATTR(lpfc_fips_level, S_IRUGO, lpfc_fips_level_show, NULL); static DEVICE_ATTR(lpfc_fips_rev, S_IRUGO, lpfc_fips_rev_show, NULL); static DEVICE_ATTR(lpfc_dss, S_IRUGO, lpfc_dss_show, NULL); static DEVICE_ATTR(lpfc_sriov_hw_max_virtfn, S_IRUGO, lpfc_sriov_hw_max_virtfn_show, NULL); static char *lpfc_soft_wwn_key = "C99G71SL8032A"; /** * lpfc_soft_wwn_enable_store - Allows setting of the wwn if the key is valid * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: containing the string lpfc_soft_wwn_key. * @count: must be size of lpfc_soft_wwn_key. * * Returns: * -EINVAL if the buffer does not contain lpfc_soft_wwn_key * length of buf indicates success **/ static ssize_t lpfc_soft_wwn_enable_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; unsigned int cnt = count; /* * We're doing a simple sanity check for soft_wwpn setting. * We require that the user write a specific key to enable * the soft_wwpn attribute to be settable. Once the attribute * is written, the enable key resets. If further updates are * desired, the key must be written again to re-enable the * attribute. * * The "key" is not secret - it is a hardcoded string shown * here. The intent is to protect against the random user or * application that is just writing attributes. */ /* count may include a LF at end of string */ if (buf[cnt-1] == '\n') cnt--; if ((cnt != strlen(lpfc_soft_wwn_key)) || (strncmp(buf, lpfc_soft_wwn_key, strlen(lpfc_soft_wwn_key)) != 0)) return -EINVAL; phba->soft_wwn_enable = 1; return count; } static DEVICE_ATTR(lpfc_soft_wwn_enable, S_IWUSR, NULL, lpfc_soft_wwn_enable_store); /** * lpfc_soft_wwpn_show - Return the cfg soft ww port name of the adapter * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: on return contains the wwpn in hexadecimal. * * Returns: size of formatted string. **/ static ssize_t lpfc_soft_wwpn_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; return snprintf(buf, PAGE_SIZE, "0x%llx\n", (unsigned long long)phba->cfg_soft_wwpn); } /** * lpfc_soft_wwpn_store - Set the ww port name of the adapter * @dev class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: contains the wwpn in hexadecimal. * @count: number of wwpn bytes in buf * * Returns: * -EACCES hba reset not enabled, adapter over temp * -EINVAL soft wwn not enabled, count is invalid, invalid wwpn byte invalid * -EIO error taking adapter offline or online * value of count on success **/ static ssize_t lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; struct completion online_compl; int stat1=0, stat2=0; unsigned int i, j, cnt=count; u8 wwpn[8]; int rc; if (!phba->cfg_enable_hba_reset) return -EACCES; spin_lock_irq(&phba->hbalock); if (phba->over_temp_state == HBA_OVER_TEMP) { spin_unlock_irq(&phba->hbalock); return -EACCES; } spin_unlock_irq(&phba->hbalock); /* count may include a LF at end of string */ if (buf[cnt-1] == '\n') cnt--; if (!phba->soft_wwn_enable || (cnt < 16) || (cnt > 18) || ((cnt == 17) && (*buf++ != 'x')) || ((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x')))) return -EINVAL; phba->soft_wwn_enable = 0; memset(wwpn, 0, sizeof(wwpn)); /* Validate and store the new name */ for (i=0, j=0; i < 16; i++) { int value; value = hex_to_bin(*buf++); if (value >= 0) j = (j << 4) | value; else return -EINVAL; if (i % 2) { wwpn[i/2] = j & 0xff; j = 0; } } phba->cfg_soft_wwpn = wwn_to_u64(wwpn); fc_host_port_name(shost) = phba->cfg_soft_wwpn; if (phba->cfg_soft_wwnn) fc_host_node_name(shost) = phba->cfg_soft_wwnn; dev_printk(KERN_NOTICE, &phba->pcidev->dev, "lpfc%d: Reinitializing to use soft_wwpn\n", phba->brd_no); stat1 = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); if (stat1) lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0463 lpfc_soft_wwpn attribute set failed to " "reinit adapter - %d\n", stat1); init_completion(&online_compl); rc = lpfc_workq_post_event(phba, &stat2, &online_compl, LPFC_EVT_ONLINE); if (rc == 0) return -ENOMEM; wait_for_completion(&online_compl); if (stat2) lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0464 lpfc_soft_wwpn attribute set failed to " "reinit adapter - %d\n", stat2); return (stat1 || stat2) ? -EIO : count; } static DEVICE_ATTR(lpfc_soft_wwpn, S_IRUGO | S_IWUSR,\ lpfc_soft_wwpn_show, lpfc_soft_wwpn_store); /** * lpfc_soft_wwnn_show - Return the cfg soft ww node name for the adapter * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: on return contains the wwnn in hexadecimal. * * Returns: size of formatted string. **/ static ssize_t lpfc_soft_wwnn_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; return snprintf(buf, PAGE_SIZE, "0x%llx\n", (unsigned long long)phba->cfg_soft_wwnn); } /** * lpfc_soft_wwnn_store - sets the ww node name of the adapter * @cdev: class device that is converted into a Scsi_host. * @buf: contains the ww node name in hexadecimal. * @count: number of wwnn bytes in buf. * * Returns: * -EINVAL soft wwn not enabled, count is invalid, invalid wwnn byte invalid * value of count on success **/ static ssize_t lpfc_soft_wwnn_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; unsigned int i, j, cnt=count; u8 wwnn[8]; /* count may include a LF at end of string */ if (buf[cnt-1] == '\n') cnt--; if (!phba->soft_wwn_enable || (cnt < 16) || (cnt > 18) || ((cnt == 17) && (*buf++ != 'x')) || ((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x')))) return -EINVAL; /* * Allow wwnn to be set many times, as long as the enable is set. * However, once the wwpn is set, everything locks. */ memset(wwnn, 0, sizeof(wwnn)); /* Validate and store the new name */ for (i=0, j=0; i < 16; i++) { int value; value = hex_to_bin(*buf++); if (value >= 0) j = (j << 4) | value; else return -EINVAL; if (i % 2) { wwnn[i/2] = j & 0xff; j = 0; } } phba->cfg_soft_wwnn = wwn_to_u64(wwnn); dev_printk(KERN_NOTICE, &phba->pcidev->dev, "lpfc%d: soft_wwnn set. Value will take effect upon " "setting of the soft_wwpn\n", phba->brd_no); return count; } static DEVICE_ATTR(lpfc_soft_wwnn, S_IRUGO | S_IWUSR,\ lpfc_soft_wwnn_show, lpfc_soft_wwnn_store); static int lpfc_poll = 0; module_param(lpfc_poll, int, S_IRUGO); MODULE_PARM_DESC(lpfc_poll, "FCP ring polling mode control:" " 0 - none," " 1 - poll with interrupts enabled" " 3 - poll and disable FCP ring interrupts"); static DEVICE_ATTR(lpfc_poll, S_IRUGO | S_IWUSR, lpfc_poll_show, lpfc_poll_store); int lpfc_sli_mode = 0; module_param(lpfc_sli_mode, int, S_IRUGO); MODULE_PARM_DESC(lpfc_sli_mode, "SLI mode selector:" " 0 - auto (SLI-3 if supported)," " 2 - select SLI-2 even on SLI-3 capable HBAs," " 3 - select SLI-3"); int lpfc_enable_npiv = 1; module_param(lpfc_enable_npiv, int, S_IRUGO); MODULE_PARM_DESC(lpfc_enable_npiv, "Enable NPIV functionality"); lpfc_param_show(enable_npiv); lpfc_param_init(enable_npiv, 1, 0, 1); static DEVICE_ATTR(lpfc_enable_npiv, S_IRUGO, lpfc_enable_npiv_show, NULL); int lpfc_enable_rrq; module_param(lpfc_enable_rrq, int, S_IRUGO); MODULE_PARM_DESC(lpfc_enable_rrq, "Enable RRQ functionality"); lpfc_param_show(enable_rrq); lpfc_param_init(enable_rrq, 0, 0, 1); static DEVICE_ATTR(lpfc_enable_rrq, S_IRUGO, lpfc_enable_rrq_show, NULL); /* # lpfc_suppress_link_up: Bring link up at initialization # 0x0 = bring link up (issue MBX_INIT_LINK) # 0x1 = do NOT bring link up at initialization(MBX_INIT_LINK) # 0x2 = never bring up link # Default value is 0. */ LPFC_ATTR_R(suppress_link_up, LPFC_INITIALIZE_LINK, LPFC_INITIALIZE_LINK, LPFC_DELAY_INIT_LINK_INDEFINITELY, "Suppress Link Up at initialization"); /* # lpfc_cnt: Number of IOCBs allocated for ELS, CT, and ABTS # 1 - (1024) # 2 - (2048) # 3 - (3072) # 4 - (4096) # 5 - (5120) */ static ssize_t lpfc_iocb_hw_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba; return snprintf(buf, PAGE_SIZE, "%d\n", phba->iocb_max); } static DEVICE_ATTR(iocb_hw, S_IRUGO, lpfc_iocb_hw_show, NULL); static ssize_t lpfc_txq_hw_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba; return snprintf(buf, PAGE_SIZE, "%d\n", phba->sli.ring[LPFC_ELS_RING].txq_max); } static DEVICE_ATTR(txq_hw, S_IRUGO, lpfc_txq_hw_show, NULL); static ssize_t lpfc_txcmplq_hw_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba; return snprintf(buf, PAGE_SIZE, "%d\n", phba->sli.ring[LPFC_ELS_RING].txcmplq_max); } static DEVICE_ATTR(txcmplq_hw, S_IRUGO, lpfc_txcmplq_hw_show, NULL); int lpfc_iocb_cnt = 2; module_param(lpfc_iocb_cnt, int, S_IRUGO); MODULE_PARM_DESC(lpfc_iocb_cnt, "Number of IOCBs alloc for ELS, CT, and ABTS: 1k to 5k IOCBs"); lpfc_param_show(iocb_cnt); lpfc_param_init(iocb_cnt, 2, 1, 5); static DEVICE_ATTR(lpfc_iocb_cnt, S_IRUGO, lpfc_iocb_cnt_show, NULL); /* # lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear # until the timer expires. Value range is [0,255]. Default value is 30. */ static int lpfc_nodev_tmo = LPFC_DEF_DEVLOSS_TMO; static int lpfc_devloss_tmo = LPFC_DEF_DEVLOSS_TMO; module_param(lpfc_nodev_tmo, int, 0); MODULE_PARM_DESC(lpfc_nodev_tmo, "Seconds driver will hold I/O waiting " "for a device to come back"); /** * lpfc_nodev_tmo_show - Return the hba dev loss timeout value * @dev: class converted to a Scsi_host structure. * @attr: device attribute, not used. * @buf: on return contains the dev loss timeout in decimal. * * Returns: size of formatted string. **/ static ssize_t lpfc_nodev_tmo_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; return snprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_devloss_tmo); } /** * lpfc_nodev_tmo_init - Set the hba nodev timeout value * @vport: lpfc vport structure pointer. * @val: contains the nodev timeout value. * * Description: * If the devloss tmo is already set then nodev tmo is set to devloss tmo, * a kernel error message is printed and zero is returned. * Else if val is in range then nodev tmo and devloss tmo are set to val. * Otherwise nodev tmo is set to the default value. * * Returns: * zero if already set or if val is in range * -EINVAL val out of range **/ static int lpfc_nodev_tmo_init(struct lpfc_vport *vport, int val) { if (vport->cfg_devloss_tmo != LPFC_DEF_DEVLOSS_TMO) { vport->cfg_nodev_tmo = vport->cfg_devloss_tmo; if (val != LPFC_DEF_DEVLOSS_TMO) lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "0407 Ignoring nodev_tmo module " "parameter because devloss_tmo is " "set.\n"); return 0; } if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) { vport->cfg_nodev_tmo = val; vport->cfg_devloss_tmo = val; return 0; } lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "0400 lpfc_nodev_tmo attribute cannot be set to" " %d, allowed range is [%d, %d]\n", val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO); vport->cfg_nodev_tmo = LPFC_DEF_DEVLOSS_TMO; return -EINVAL; } /** * lpfc_update_rport_devloss_tmo - Update dev loss tmo value * @vport: lpfc vport structure pointer. * * Description: * Update all the ndlp's dev loss tmo with the vport devloss tmo value. **/ static void lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport) { struct Scsi_Host *shost; struct lpfc_nodelist *ndlp; shost = lpfc_shost_from_vport(vport); spin_lock_irq(shost->host_lock); list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) if (NLP_CHK_NODE_ACT(ndlp) && ndlp->rport) ndlp->rport->dev_loss_tmo = vport->cfg_devloss_tmo; spin_unlock_irq(shost->host_lock); } /** * lpfc_nodev_tmo_set - Set the vport nodev tmo and devloss tmo values * @vport: lpfc vport structure pointer. * @val: contains the tmo value. * * Description: * If the devloss tmo is already set or the vport dev loss tmo has changed * then a kernel error message is printed and zero is returned. * Else if val is in range then nodev tmo and devloss tmo are set to val. * Otherwise nodev tmo is set to the default value. * * Returns: * zero if already set or if val is in range * -EINVAL val out of range **/ static int lpfc_nodev_tmo_set(struct lpfc_vport *vport, int val) { if (vport->dev_loss_tmo_changed || (lpfc_devloss_tmo != LPFC_DEF_DEVLOSS_TMO)) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "0401 Ignoring change to nodev_tmo " "because devloss_tmo is set.\n"); return 0; } if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) { vport->cfg_nodev_tmo = val; vport->cfg_devloss_tmo = val; /* * For compat: set the fc_host dev loss so new rports * will get the value. */ fc_host_dev_loss_tmo(lpfc_shost_from_vport(vport)) = val; lpfc_update_rport_devloss_tmo(vport); return 0; } lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "0403 lpfc_nodev_tmo attribute cannot be set to" "%d, allowed range is [%d, %d]\n", val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO); return -EINVAL; } lpfc_vport_param_store(nodev_tmo) static DEVICE_ATTR(lpfc_nodev_tmo, S_IRUGO | S_IWUSR, lpfc_nodev_tmo_show, lpfc_nodev_tmo_store); /* # lpfc_devloss_tmo: If set, it will hold all I/O errors on devices that # disappear until the timer expires. Value range is [0,255]. Default # value is 30. */ module_param(lpfc_devloss_tmo, int, S_IRUGO); MODULE_PARM_DESC(lpfc_devloss_tmo, "Seconds driver will hold I/O waiting " "for a device to come back"); lpfc_vport_param_init(devloss_tmo, LPFC_DEF_DEVLOSS_TMO, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO) lpfc_vport_param_show(devloss_tmo) /** * lpfc_devloss_tmo_set - Sets vport nodev tmo, devloss tmo values, changed bit * @vport: lpfc vport structure pointer. * @val: contains the tmo value. * * Description: * If val is in a valid range then set the vport nodev tmo, * devloss tmo, also set the vport dev loss tmo changed flag. * Else a kernel error message is printed. * * Returns: * zero if val is in range * -EINVAL val out of range **/ static int lpfc_devloss_tmo_set(struct lpfc_vport *vport, int val) { if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) { vport->cfg_nodev_tmo = val; vport->cfg_devloss_tmo = val; vport->dev_loss_tmo_changed = 1; fc_host_dev_loss_tmo(lpfc_shost_from_vport(vport)) = val; lpfc_update_rport_devloss_tmo(vport); return 0; } lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "0404 lpfc_devloss_tmo attribute cannot be set to" " %d, allowed range is [%d, %d]\n", val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO); return -EINVAL; } lpfc_vport_param_store(devloss_tmo) static DEVICE_ATTR(lpfc_devloss_tmo, S_IRUGO | S_IWUSR, lpfc_devloss_tmo_show, lpfc_devloss_tmo_store); /* # lpfc_log_verbose: Only turn this flag on if you are willing to risk being # deluged with LOTS of information. # You can set a bit mask to record specific types of verbose messages: # See lpfc_logmsh.h for definitions. */ LPFC_VPORT_ATTR_HEX_RW(log_verbose, 0x0, 0x0, 0xffffffff, "Verbose logging bit-mask"); /* # lpfc_enable_da_id: This turns on the DA_ID CT command that deregisters # objects that have been registered with the nameserver after login. */ LPFC_VPORT_ATTR_R(enable_da_id, 0, 0, 1, "Deregister nameserver objects before LOGO"); /* # lun_queue_depth: This parameter is used to limit the number of outstanding # commands per FCP LUN. Value range is [1,128]. Default value is 30. */ LPFC_VPORT_ATTR_R(lun_queue_depth, 30, 1, 128, "Max number of FCP commands we can queue to a specific LUN"); /* # tgt_queue_depth: This parameter is used to limit the number of outstanding # commands per target port. Value range is [10,65535]. Default value is 65535. */ LPFC_VPORT_ATTR_R(tgt_queue_depth, 65535, 10, 65535, "Max number of FCP commands we can queue to a specific target port"); /* # hba_queue_depth: This parameter is used to limit the number of outstanding # commands per lpfc HBA. Value range is [32,8192]. If this parameter # value is greater than the maximum number of exchanges supported by the HBA, # then maximum number of exchanges supported by the HBA is used to determine # the hba_queue_depth. */ LPFC_ATTR_R(hba_queue_depth, 8192, 32, 8192, "Max number of FCP commands we can queue to a lpfc HBA"); /* # peer_port_login: This parameter allows/prevents logins # between peer ports hosted on the same physical port. # When this parameter is set 0 peer ports of same physical port # are not allowed to login to each other. # When this parameter is set 1 peer ports of same physical port # are allowed to login to each other. # Default value of this parameter is 0. */ LPFC_VPORT_ATTR_R(peer_port_login, 0, 0, 1, "Allow peer ports on the same physical port to login to each " "other."); /* # restrict_login: This parameter allows/prevents logins # between Virtual Ports and remote initiators. # When this parameter is not set (0) Virtual Ports will accept PLOGIs from # other initiators and will attempt to PLOGI all remote ports. # When this parameter is set (1) Virtual Ports will reject PLOGIs from # remote ports and will not attempt to PLOGI to other initiators. # This parameter does not restrict to the physical port. # This parameter does not restrict logins to Fabric resident remote ports. # Default value of this parameter is 1. */ static int lpfc_restrict_login = 1; module_param(lpfc_restrict_login, int, S_IRUGO); MODULE_PARM_DESC(lpfc_restrict_login, "Restrict virtual ports login to remote initiators."); lpfc_vport_param_show(restrict_login); /** * lpfc_restrict_login_init - Set the vport restrict login flag * @vport: lpfc vport structure pointer. * @val: contains the restrict login value. * * Description: * If val is not in a valid range then log a kernel error message and set * the vport restrict login to one. * If the port type is physical clear the restrict login flag and return. * Else set the restrict login flag to val. * * Returns: * zero if val is in range * -EINVAL val out of range **/ static int lpfc_restrict_login_init(struct lpfc_vport *vport, int val) { if (val < 0 || val > 1) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "0422 lpfc_restrict_login attribute cannot " "be set to %d, allowed range is [0, 1]\n", val); vport->cfg_restrict_login = 1; return -EINVAL; } if (vport->port_type == LPFC_PHYSICAL_PORT) { vport->cfg_restrict_login = 0; return 0; } vport->cfg_restrict_login = val; return 0; } /** * lpfc_restrict_login_set - Set the vport restrict login flag * @vport: lpfc vport structure pointer. * @val: contains the restrict login value. * * Description: * If val is not in a valid range then log a kernel error message and set * the vport restrict login to one. * If the port type is physical and the val is not zero log a kernel * error message, clear the restrict login flag and return zero. * Else set the restrict login flag to val. * * Returns: * zero if val is in range * -EINVAL val out of range **/ static int lpfc_restrict_login_set(struct lpfc_vport *vport, int val) { if (val < 0 || val > 1) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "0425 lpfc_restrict_login attribute cannot " "be set to %d, allowed range is [0, 1]\n", val); vport->cfg_restrict_login = 1; return -EINVAL; } if (vport->port_type == LPFC_PHYSICAL_PORT && val != 0) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "0468 lpfc_restrict_login must be 0 for " "Physical ports.\n"); vport->cfg_restrict_login = 0; return 0; } vport->cfg_restrict_login = val; return 0; } lpfc_vport_param_store(restrict_login); static DEVICE_ATTR(lpfc_restrict_login, S_IRUGO | S_IWUSR, lpfc_restrict_login_show, lpfc_restrict_login_store); /* # Some disk devices have a "select ID" or "select Target" capability. # From a protocol standpoint "select ID" usually means select the # Fibre channel "ALPA". In the FC-AL Profile there is an "informative # annex" which contains a table that maps a "select ID" (a number # between 0 and 7F) to an ALPA. By default, for compatibility with # older drivers, the lpfc driver scans this table from low ALPA to high # ALPA. # # Turning on the scan-down variable (on = 1, off = 0) will # cause the lpfc driver to use an inverted table, effectively # scanning ALPAs from high to low. Value range is [0,1]. Default value is 1. # # (Note: This "select ID" functionality is a LOOP ONLY characteristic # and will not work across a fabric. Also this parameter will take # effect only in the case when ALPA map is not available.) */ LPFC_VPORT_ATTR_R(scan_down, 1, 0, 1, "Start scanning for devices from highest ALPA to lowest"); /* # lpfc_topology: link topology for init link # 0x0 = attempt loop mode then point-to-point # 0x01 = internal loopback mode # 0x02 = attempt point-to-point mode only # 0x04 = attempt loop mode only # 0x06 = attempt point-to-point mode then loop # Set point-to-point mode if you want to run as an N_Port. # Set loop mode if you want to run as an NL_Port. Value range is [0,0x6]. # Default value is 0. */ /** * lpfc_topology_set - Set the adapters topology field * @phba: lpfc_hba pointer. * @val: topology value. * * Description: * If val is in a valid range then set the adapter's topology field and * issue a lip; if the lip fails reset the topology to the old value. * * If the value is not in range log a kernel error message and return an error. * * Returns: * zero if val is in range and lip okay * non-zero return value from lpfc_issue_lip() * -EINVAL val out of range **/ static ssize_t lpfc_topology_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; int val = 0; int nolip = 0; const char *val_buf = buf; int err; uint32_t prev_val; if (!strncmp(buf, "nolip ", strlen("nolip "))) { nolip = 1; val_buf = &buf[strlen("nolip ")]; } if (!isdigit(val_buf[0])) return -EINVAL; if (sscanf(val_buf, "%i", &val) != 1) return -EINVAL; if (val >= 0 && val <= 6) { prev_val = phba->cfg_topology; phba->cfg_topology = val; if (nolip) return strlen(buf); lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "3054 lpfc_topology changed from %d to %d\n", prev_val, val); err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport)); if (err) { phba->cfg_topology = prev_val; return -EINVAL; } else return strlen(buf); } lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "%d:0467 lpfc_topology attribute cannot be set to %d, " "allowed range is [0, 6]\n", phba->brd_no, val); return -EINVAL; } static int lpfc_topology = 0; module_param(lpfc_topology, int, S_IRUGO); MODULE_PARM_DESC(lpfc_topology, "Select Fibre Channel topology"); lpfc_param_show(topology) lpfc_param_init(topology, 0, 0, 6) static DEVICE_ATTR(lpfc_topology, S_IRUGO | S_IWUSR, lpfc_topology_show, lpfc_topology_store); /** * lpfc_static_vport_show: Read callback function for * lpfc_static_vport sysfs file. * @dev: Pointer to class device object. * @attr: device attribute structure. * @buf: Data buffer. * * This function is the read call back function for * lpfc_static_vport sysfs file. The lpfc_static_vport * sysfs file report the mageability of the vport. **/ static ssize_t lpfc_static_vport_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; if (vport->vport_flag & STATIC_VPORT) sprintf(buf, "1\n"); else sprintf(buf, "0\n"); return strlen(buf); } /* * Sysfs attribute to control the statistical data collection. */ static DEVICE_ATTR(lpfc_static_vport, S_IRUGO, lpfc_static_vport_show, NULL); /** * lpfc_stat_data_ctrl_store - write call back for lpfc_stat_data_ctrl sysfs file * @dev: Pointer to class device. * @buf: Data buffer. * @count: Size of the data buffer. * * This function get called when an user write to the lpfc_stat_data_ctrl * sysfs file. This function parse the command written to the sysfs file * and take appropriate action. These commands are used for controlling * driver statistical data collection. * Following are the command this function handles. * * setbucket <bucket_type> <base> <step> * = Set the latency buckets. * destroybucket = destroy all the buckets. * start = start data collection * stop = stop data collection * reset = reset the collected data **/ static ssize_t lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; #define LPFC_MAX_DATA_CTRL_LEN 1024 static char bucket_data[LPFC_MAX_DATA_CTRL_LEN]; unsigned long i; char *str_ptr, *token; struct lpfc_vport **vports; struct Scsi_Host *v_shost; char *bucket_type_str, *base_str, *step_str; unsigned long base, step, bucket_type; if (!strncmp(buf, "setbucket", strlen("setbucket"))) { if (strlen(buf) > (LPFC_MAX_DATA_CTRL_LEN - 1)) return -EINVAL; strcpy(bucket_data, buf); str_ptr = &bucket_data[0]; /* Ignore this token - this is command token */ token = strsep(&str_ptr, "\t "); if (!token) return -EINVAL; bucket_type_str = strsep(&str_ptr, "\t "); if (!bucket_type_str) return -EINVAL; if (!strncmp(bucket_type_str, "linear", strlen("linear"))) bucket_type = LPFC_LINEAR_BUCKET; else if (!strncmp(bucket_type_str, "power2", strlen("power2"))) bucket_type = LPFC_POWER2_BUCKET; else return -EINVAL; base_str = strsep(&str_ptr, "\t "); if (!base_str) return -EINVAL; base = simple_strtoul(base_str, NULL, 0); step_str = strsep(&str_ptr, "\t "); if (!step_str) return -EINVAL; step = simple_strtoul(step_str, NULL, 0); if (!step) return -EINVAL; /* Block the data collection for every vport */ vports = lpfc_create_vport_work_array(phba); if (vports == NULL) return -ENOMEM; for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { v_shost = lpfc_shost_from_vport(vports[i]); spin_lock_irq(v_shost->host_lock); /* Block and reset data collection */ vports[i]->stat_data_blocked = 1; if (vports[i]->stat_data_enabled) lpfc_vport_reset_stat_data(vports[i]); spin_unlock_irq(v_shost->host_lock); } /* Set the bucket attributes */ phba->bucket_type = bucket_type; phba->bucket_base = base; phba->bucket_step = step; for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { v_shost = lpfc_shost_from_vport(vports[i]); /* Unblock data collection */ spin_lock_irq(v_shost->host_lock); vports[i]->stat_data_blocked = 0; spin_unlock_irq(v_shost->host_lock); } lpfc_destroy_vport_work_array(phba, vports); return strlen(buf); } if (!strncmp(buf, "destroybucket", strlen("destroybucket"))) { vports = lpfc_create_vport_work_array(phba); if (vports == NULL) return -ENOMEM; for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { v_shost = lpfc_shost_from_vport(vports[i]); spin_lock_irq(shost->host_lock); vports[i]->stat_data_blocked = 1; lpfc_free_bucket(vport); vport->stat_data_enabled = 0; vports[i]->stat_data_blocked = 0; spin_unlock_irq(shost->host_lock); } lpfc_destroy_vport_work_array(phba, vports); phba->bucket_type = LPFC_NO_BUCKET; phba->bucket_base = 0; phba->bucket_step = 0; return strlen(buf); } if (!strncmp(buf, "start", strlen("start"))) { /* If no buckets configured return error */ if (phba->bucket_type == LPFC_NO_BUCKET) return -EINVAL; spin_lock_irq(shost->host_lock); if (vport->stat_data_enabled) { spin_unlock_irq(shost->host_lock); return strlen(buf); } lpfc_alloc_bucket(vport); vport->stat_data_enabled = 1; spin_unlock_irq(shost->host_lock); return strlen(buf); } if (!strncmp(buf, "stop", strlen("stop"))) { spin_lock_irq(shost->host_lock); if (vport->stat_data_enabled == 0) { spin_unlock_irq(shost->host_lock); return strlen(buf); } lpfc_free_bucket(vport); vport->stat_data_enabled = 0; spin_unlock_irq(shost->host_lock); return strlen(buf); } if (!strncmp(buf, "reset", strlen("reset"))) { if ((phba->bucket_type == LPFC_NO_BUCKET) || !vport->stat_data_enabled) return strlen(buf); spin_lock_irq(shost->host_lock); vport->stat_data_blocked = 1; lpfc_vport_reset_stat_data(vport); vport->stat_data_blocked = 0; spin_unlock_irq(shost->host_lock); return strlen(buf); } return -EINVAL; } /** * lpfc_stat_data_ctrl_show - Read function for lpfc_stat_data_ctrl sysfs file * @dev: Pointer to class device object. * @buf: Data buffer. * * This function is the read call back function for * lpfc_stat_data_ctrl sysfs file. This function report the * current statistical data collection state. **/ static ssize_t lpfc_stat_data_ctrl_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; int index = 0; int i; char *bucket_type; unsigned long bucket_value; switch (phba->bucket_type) { case LPFC_LINEAR_BUCKET: bucket_type = "linear"; break; case LPFC_POWER2_BUCKET: bucket_type = "power2"; break; default: bucket_type = "No Bucket"; break; } sprintf(&buf[index], "Statistical Data enabled :%d, " "blocked :%d, Bucket type :%s, Bucket base :%d," " Bucket step :%d\nLatency Ranges :", vport->stat_data_enabled, vport->stat_data_blocked, bucket_type, phba->bucket_base, phba->bucket_step); index = strlen(buf); if (phba->bucket_type != LPFC_NO_BUCKET) { for (i = 0; i < LPFC_MAX_BUCKET_COUNT; i++) { if (phba->bucket_type == LPFC_LINEAR_BUCKET) bucket_value = phba->bucket_base + phba->bucket_step * i; else bucket_value = phba->bucket_base + (1 << i) * phba->bucket_step; if (index + 10 > PAGE_SIZE) break; sprintf(&buf[index], "%08ld ", bucket_value); index = strlen(buf); } } sprintf(&buf[index], "\n"); return strlen(buf); } /* * Sysfs attribute to control the statistical data collection. */ static DEVICE_ATTR(lpfc_stat_data_ctrl, S_IRUGO | S_IWUSR, lpfc_stat_data_ctrl_show, lpfc_stat_data_ctrl_store); /* * lpfc_drvr_stat_data: sysfs attr to get driver statistical data. */ /* * Each Bucket takes 11 characters and 1 new line + 17 bytes WWN * for each target. */ #define STAT_DATA_SIZE_PER_TARGET(NUM_BUCKETS) ((NUM_BUCKETS) * 11 + 18) #define MAX_STAT_DATA_SIZE_PER_TARGET \ STAT_DATA_SIZE_PER_TARGET(LPFC_MAX_BUCKET_COUNT) /** * sysfs_drvr_stat_data_read - Read function for lpfc_drvr_stat_data attribute * @filp: sysfs file * @kobj: Pointer to the kernel object * @bin_attr: Attribute object * @buff: Buffer pointer * @off: File offset * @count: Buffer size * * This function is the read call back function for lpfc_drvr_stat_data * sysfs file. This function export the statistical data to user * applications. **/ static ssize_t sysfs_drvr_stat_data_read(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct device *dev = container_of(kobj, struct device, kobj); struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; int i = 0, index = 0; unsigned long nport_index; struct lpfc_nodelist *ndlp = NULL; nport_index = (unsigned long)off / MAX_STAT_DATA_SIZE_PER_TARGET; if (!vport->stat_data_enabled || vport->stat_data_blocked || (phba->bucket_type == LPFC_NO_BUCKET)) return 0; spin_lock_irq(shost->host_lock); list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { if (!NLP_CHK_NODE_ACT(ndlp) || !ndlp->lat_data) continue; if (nport_index > 0) { nport_index--; continue; } if ((index + MAX_STAT_DATA_SIZE_PER_TARGET) > count) break; if (!ndlp->lat_data) continue; /* Print the WWN */ sprintf(&buf[index], "%02x%02x%02x%02x%02x%02x%02x%02x:", ndlp->nlp_portname.u.wwn[0], ndlp->nlp_portname.u.wwn[1], ndlp->nlp_portname.u.wwn[2], ndlp->nlp_portname.u.wwn[3], ndlp->nlp_portname.u.wwn[4], ndlp->nlp_portname.u.wwn[5], ndlp->nlp_portname.u.wwn[6], ndlp->nlp_portname.u.wwn[7]); index = strlen(buf); for (i = 0; i < LPFC_MAX_BUCKET_COUNT; i++) { sprintf(&buf[index], "%010u,", ndlp->lat_data[i].cmd_count); index = strlen(buf); } sprintf(&buf[index], "\n"); index = strlen(buf); } spin_unlock_irq(shost->host_lock); return index; } static struct bin_attribute sysfs_drvr_stat_data_attr = { .attr = { .name = "lpfc_drvr_stat_data", .mode = S_IRUSR, }, .size = LPFC_MAX_TARGET * MAX_STAT_DATA_SIZE_PER_TARGET, .read = sysfs_drvr_stat_data_read, .write = NULL, }; /* # lpfc_link_speed: Link speed selection for initializing the Fibre Channel # connection. # Value range is [0,16]. Default value is 0. */ /** * lpfc_link_speed_set - Set the adapters link speed * @phba: lpfc_hba pointer. * @val: link speed value. * * Description: * If val is in a valid range then set the adapter's link speed field and * issue a lip; if the lip fails reset the link speed to the old value. * * Notes: * If the value is not in range log a kernel error message and return an error. * * Returns: * zero if val is in range and lip okay. * non-zero return value from lpfc_issue_lip() * -EINVAL val out of range **/ static ssize_t lpfc_link_speed_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; int val = LPFC_USER_LINK_SPEED_AUTO; int nolip = 0; const char *val_buf = buf; int err; uint32_t prev_val; if (!strncmp(buf, "nolip ", strlen("nolip "))) { nolip = 1; val_buf = &buf[strlen("nolip ")]; } if (!isdigit(val_buf[0])) return -EINVAL; if (sscanf(val_buf, "%i", &val) != 1) return -EINVAL; lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "3055 lpfc_link_speed changed from %d to %d %s\n", phba->cfg_link_speed, val, nolip ? "(nolip)" : "(lip)"); if (((val == LPFC_USER_LINK_SPEED_1G) && !(phba->lmt & LMT_1Gb)) || ((val == LPFC_USER_LINK_SPEED_2G) && !(phba->lmt & LMT_2Gb)) || ((val == LPFC_USER_LINK_SPEED_4G) && !(phba->lmt & LMT_4Gb)) || ((val == LPFC_USER_LINK_SPEED_8G) && !(phba->lmt & LMT_8Gb)) || ((val == LPFC_USER_LINK_SPEED_10G) && !(phba->lmt & LMT_10Gb)) || ((val == LPFC_USER_LINK_SPEED_16G) && !(phba->lmt & LMT_16Gb))) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2879 lpfc_link_speed attribute cannot be set " "to %d. Speed is not supported by this port.\n", val); return -EINVAL; } if ((val >= 0) && (val <= LPFC_USER_LINK_SPEED_MAX) && (LPFC_USER_LINK_SPEED_BITMAP & (1 << val))) { prev_val = phba->cfg_link_speed; phba->cfg_link_speed = val; if (nolip) return strlen(buf); err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport)); if (err) { phba->cfg_link_speed = prev_val; return -EINVAL; } else return strlen(buf); } lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0469 lpfc_link_speed attribute cannot be set to %d, " "allowed values are ["LPFC_LINK_SPEED_STRING"]\n", val); return -EINVAL; } static int lpfc_link_speed = 0; module_param(lpfc_link_speed, int, S_IRUGO); MODULE_PARM_DESC(lpfc_link_speed, "Select link speed"); lpfc_param_show(link_speed) /** * lpfc_link_speed_init - Set the adapters link speed * @phba: lpfc_hba pointer. * @val: link speed value. * * Description: * If val is in a valid range then set the adapter's link speed field. * * Notes: * If the value is not in range log a kernel error message, clear the link * speed and return an error. * * Returns: * zero if val saved. * -EINVAL val out of range **/ static int lpfc_link_speed_init(struct lpfc_hba *phba, int val) { if ((val >= 0) && (val <= LPFC_USER_LINK_SPEED_MAX) && (LPFC_USER_LINK_SPEED_BITMAP & (1 << val))) { phba->cfg_link_speed = val; return 0; } lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0405 lpfc_link_speed attribute cannot " "be set to %d, allowed values are " "["LPFC_LINK_SPEED_STRING"]\n", val); phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO; return -EINVAL; } static DEVICE_ATTR(lpfc_link_speed, S_IRUGO | S_IWUSR, lpfc_link_speed_show, lpfc_link_speed_store); /* # lpfc_aer_support: Support PCIe device Advanced Error Reporting (AER) # 0 = aer disabled or not supported # 1 = aer supported and enabled (default) # Value range is [0,1]. Default value is 1. */ /** * lpfc_aer_support_store - Set the adapter for aer support * * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: containing enable or disable aer flag. * @count: unused variable. * * Description: * If the val is 1 and currently the device's AER capability was not * enabled, invoke the kernel's enable AER helper routine, trying to * enable the device's AER capability. If the helper routine enabling * AER returns success, update the device's cfg_aer_support flag to * indicate AER is supported by the device; otherwise, if the device * AER capability is already enabled to support AER, then do nothing. * * If the val is 0 and currently the device's AER support was enabled, * invoke the kernel's disable AER helper routine. After that, update * the device's cfg_aer_support flag to indicate AER is not supported * by the device; otherwise, if the device AER capability is already * disabled from supporting AER, then do nothing. * * Returns: * length of the buf on success if val is in range the intended mode * is supported. * -EINVAL if val out of range or intended mode is not supported. **/ static ssize_t lpfc_aer_support_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; struct lpfc_hba *phba = vport->phba; int val = 0, rc = -EINVAL; if (!isdigit(buf[0])) return -EINVAL; if (sscanf(buf, "%i", &val) != 1) return -EINVAL; switch (val) { case 0: if (phba->hba_flag & HBA_AER_ENABLED) { rc = pci_disable_pcie_error_reporting(phba->pcidev); if (!rc) { spin_lock_irq(&phba->hbalock); phba->hba_flag &= ~HBA_AER_ENABLED; spin_unlock_irq(&phba->hbalock); phba->cfg_aer_support = 0; rc = strlen(buf); } else rc = -EPERM; } else { phba->cfg_aer_support = 0; rc = strlen(buf); } break; case 1: if (!(phba->hba_flag & HBA_AER_ENABLED)) { rc = pci_enable_pcie_error_reporting(phba->pcidev); if (!rc) { spin_lock_irq(&phba->hbalock); phba->hba_flag |= HBA_AER_ENABLED; spin_unlock_irq(&phba->hbalock); phba->cfg_aer_support = 1; rc = strlen(buf); } else rc = -EPERM; } else { phba->cfg_aer_support = 1; rc = strlen(buf); } break; default: rc = -EINVAL; break; } return rc; } static int lpfc_aer_support = 1; module_param(lpfc_aer_support, int, S_IRUGO); MODULE_PARM_DESC(lpfc_aer_support, "Enable PCIe device AER support"); lpfc_param_show(aer_support) /** * lpfc_aer_support_init - Set the initial adapters aer support flag * @phba: lpfc_hba pointer. * @val: enable aer or disable aer flag. * * Description: * If val is in a valid range [0,1], then set the adapter's initial * cfg_aer_support field. It will be up to the driver's probe_one * routine to determine whether the device's AER support can be set * or not. * * Notes: * If the value is not in range log a kernel error message, and * choose the default value of setting AER support and return. * * Returns: * zero if val saved. * -EINVAL val out of range **/ static int lpfc_aer_support_init(struct lpfc_hba *phba, int val) { if (val == 0 || val == 1) { phba->cfg_aer_support = val; return 0; } lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2712 lpfc_aer_support attribute value %d out " "of range, allowed values are 0|1, setting it " "to default value of 1\n", val); /* By default, try to enable AER on a device */ phba->cfg_aer_support = 1; return -EINVAL; } static DEVICE_ATTR(lpfc_aer_support, S_IRUGO | S_IWUSR, lpfc_aer_support_show, lpfc_aer_support_store); /** * lpfc_aer_cleanup_state - Clean up aer state to the aer enabled device * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: containing flag 1 for aer cleanup state. * @count: unused variable. * * Description: * If the @buf contains 1 and the device currently has the AER support * enabled, then invokes the kernel AER helper routine * pci_cleanup_aer_uncorrect_error_status to clean up the uncorrectable * error status register. * * Notes: * * Returns: * -EINVAL if the buf does not contain the 1 or the device is not currently * enabled with the AER support. **/ static ssize_t lpfc_aer_cleanup_state(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; int val, rc = -1; if (!isdigit(buf[0])) return -EINVAL; if (sscanf(buf, "%i", &val) != 1) return -EINVAL; if (val != 1) return -EINVAL; if (phba->hba_flag & HBA_AER_ENABLED) rc = pci_cleanup_aer_uncorrect_error_status(phba->pcidev); if (rc == 0) return strlen(buf); else return -EPERM; } static DEVICE_ATTR(lpfc_aer_state_cleanup, S_IWUSR, NULL, lpfc_aer_cleanup_state); /** * lpfc_sriov_nr_virtfn_store - Enable the adapter for sr-iov virtual functions * * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: containing the string the number of vfs to be enabled. * @count: unused variable. * * Description: * When this api is called either through user sysfs, the driver shall * try to enable or disable SR-IOV virtual functions according to the * following: * * If zero virtual function has been enabled to the physical function, * the driver shall invoke the pci enable virtual function api trying * to enable the virtual functions. If the nr_vfn provided is greater * than the maximum supported, the maximum virtual function number will * be used for invoking the api; otherwise, the nr_vfn provided shall * be used for invoking the api. If the api call returned success, the * actual number of virtual functions enabled will be set to the driver * cfg_sriov_nr_virtfn; otherwise, -EINVAL shall be returned and driver * cfg_sriov_nr_virtfn remains zero. * * If none-zero virtual functions have already been enabled to the * physical function, as reflected by the driver's cfg_sriov_nr_virtfn, * -EINVAL will be returned and the driver does nothing; * * If the nr_vfn provided is zero and none-zero virtual functions have * been enabled, as indicated by the driver's cfg_sriov_nr_virtfn, the * disabling virtual function api shall be invoded to disable all the * virtual functions and driver's cfg_sriov_nr_virtfn shall be set to * zero. Otherwise, if zero virtual function has been enabled, do * nothing. * * Returns: * length of the buf on success if val is in range the intended mode * is supported. * -EINVAL if val out of range or intended mode is not supported. **/ static ssize_t lpfc_sriov_nr_virtfn_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; struct lpfc_hba *phba = vport->phba; struct pci_dev *pdev = phba->pcidev; int val = 0, rc = -EINVAL; /* Sanity check on user data */ if (!isdigit(buf[0])) return -EINVAL; if (sscanf(buf, "%i", &val) != 1) return -EINVAL; if (val < 0) return -EINVAL; /* Request disabling virtual functions */ if (val == 0) { if (phba->cfg_sriov_nr_virtfn > 0) { pci_disable_sriov(pdev); phba->cfg_sriov_nr_virtfn = 0; } return strlen(buf); } /* Request enabling virtual functions */ if (phba->cfg_sriov_nr_virtfn > 0) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "3018 There are %d virtual functions " "enabled on physical function.\n", phba->cfg_sriov_nr_virtfn); return -EEXIST; } if (val <= LPFC_MAX_VFN_PER_PFN) phba->cfg_sriov_nr_virtfn = val; else { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "3019 Enabling %d virtual functions is not " "allowed.\n", val); return -EINVAL; } rc = lpfc_sli_probe_sriov_nr_virtfn(phba, phba->cfg_sriov_nr_virtfn); if (rc) { phba->cfg_sriov_nr_virtfn = 0; rc = -EPERM; } else rc = strlen(buf); return rc; } static int lpfc_sriov_nr_virtfn = LPFC_DEF_VFN_PER_PFN; module_param(lpfc_sriov_nr_virtfn, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(lpfc_sriov_nr_virtfn, "Enable PCIe device SR-IOV virtual fn"); lpfc_param_show(sriov_nr_virtfn) /** * lpfc_sriov_nr_virtfn_init - Set the initial sr-iov virtual function enable * @phba: lpfc_hba pointer. * @val: link speed value. * * Description: * If val is in a valid range [0,255], then set the adapter's initial * cfg_sriov_nr_virtfn field. If it's greater than the maximum, the maximum * number shall be used instead. It will be up to the driver's probe_one * routine to determine whether the device's SR-IOV is supported or not. * * Returns: * zero if val saved. * -EINVAL val out of range **/ static int lpfc_sriov_nr_virtfn_init(struct lpfc_hba *phba, int val) { if (val >= 0 && val <= LPFC_MAX_VFN_PER_PFN) { phba->cfg_sriov_nr_virtfn = val; return 0; } lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "3017 Enabling %d virtual functions is not " "allowed.\n", val); return -EINVAL; } static DEVICE_ATTR(lpfc_sriov_nr_virtfn, S_IRUGO | S_IWUSR, lpfc_sriov_nr_virtfn_show, lpfc_sriov_nr_virtfn_store); /* # lpfc_fcp_class: Determines FC class to use for the FCP protocol. # Value range is [2,3]. Default value is 3. */ LPFC_VPORT_ATTR_R(fcp_class, 3, 2, 3, "Select Fibre Channel class of service for FCP sequences"); /* # lpfc_use_adisc: Use ADISC for FCP rediscovery instead of PLOGI. Value range # is [0,1]. Default value is 0. */ LPFC_VPORT_ATTR_RW(use_adisc, 0, 0, 1, "Use ADISC on rediscovery to authenticate FCP devices"); /* # lpfc_max_scsicmpl_time: Use scsi command completion time to control I/O queue # depth. Default value is 0. When the value of this parameter is zero the # SCSI command completion time is not used for controlling I/O queue depth. When # the parameter is set to a non-zero value, the I/O queue depth is controlled # to limit the I/O completion time to the parameter value. # The value is set in milliseconds. */ static int lpfc_max_scsicmpl_time; module_param(lpfc_max_scsicmpl_time, int, S_IRUGO); MODULE_PARM_DESC(lpfc_max_scsicmpl_time, "Use command completion time to control queue depth"); lpfc_vport_param_show(max_scsicmpl_time); lpfc_vport_param_init(max_scsicmpl_time, 0, 0, 60000); static int lpfc_max_scsicmpl_time_set(struct lpfc_vport *vport, int val) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_nodelist *ndlp, *next_ndlp; if (val == vport->cfg_max_scsicmpl_time) return 0; if ((val < 0) || (val > 60000)) return -EINVAL; vport->cfg_max_scsicmpl_time = val; spin_lock_irq(shost->host_lock); list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { if (!NLP_CHK_NODE_ACT(ndlp)) continue; if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) continue; ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth; } spin_unlock_irq(shost->host_lock); return 0; } lpfc_vport_param_store(max_scsicmpl_time); static DEVICE_ATTR(lpfc_max_scsicmpl_time, S_IRUGO | S_IWUSR, lpfc_max_scsicmpl_time_show, lpfc_max_scsicmpl_time_store); /* # lpfc_ack0: Use ACK0, instead of ACK1 for class 2 acknowledgement. Value # range is [0,1]. Default value is 0. */ LPFC_ATTR_R(ack0, 0, 0, 1, "Enable ACK0 support"); /* # lpfc_cr_delay & lpfc_cr_count: Default values for I/O colaesing # cr_delay (msec) or cr_count outstanding commands. cr_delay can take # value [0,63]. cr_count can take value [1,255]. Default value of cr_delay # is 0. Default value of cr_count is 1. The cr_count feature is disabled if # cr_delay is set to 0. */ LPFC_ATTR_RW(cr_delay, 0, 0, 63, "A count of milliseconds after which an " "interrupt response is generated"); LPFC_ATTR_RW(cr_count, 1, 1, 255, "A count of I/O completions after which an " "interrupt response is generated"); /* # lpfc_multi_ring_support: Determines how many rings to spread available # cmd/rsp IOCB entries across. # Value range is [1,2]. Default value is 1. */ LPFC_ATTR_R(multi_ring_support, 1, 1, 2, "Determines number of primary " "SLI rings to spread IOCB entries across"); /* # lpfc_multi_ring_rctl: If lpfc_multi_ring_support is enabled, this # identifies what rctl value to configure the additional ring for. # Value range is [1,0xff]. Default value is 4 (Unsolicated Data). */ LPFC_ATTR_R(multi_ring_rctl, FC_RCTL_DD_UNSOL_DATA, 1, 255, "Identifies RCTL for additional ring configuration"); /* # lpfc_multi_ring_type: If lpfc_multi_ring_support is enabled, this # identifies what type value to configure the additional ring for. # Value range is [1,0xff]. Default value is 5 (LLC/SNAP). */ LPFC_ATTR_R(multi_ring_type, FC_TYPE_IP, 1, 255, "Identifies TYPE for additional ring configuration"); /* # lpfc_fdmi_on: controls FDMI support. # 0 = no FDMI support # 1 = support FDMI without attribute of hostname # 2 = support FDMI with attribute of hostname # Value range [0,2]. Default value is 0. */ LPFC_VPORT_ATTR_RW(fdmi_on, 0, 0, 2, "Enable FDMI support"); /* # Specifies the maximum number of ELS cmds we can have outstanding (for # discovery). Value range is [1,64]. Default value = 32. */ LPFC_VPORT_ATTR(discovery_threads, 32, 1, 64, "Maximum number of ELS commands " "during discovery"); /* # lpfc_max_luns: maximum allowed LUN. # Value range is [0,65535]. Default value is 255. # NOTE: The SCSI layer might probe all allowed LUN on some old targets. */ LPFC_VPORT_ATTR_R(max_luns, 255, 0, 65535, "Maximum allowed LUN"); /* # lpfc_poll_tmo: .Milliseconds driver will wait between polling FCP ring. # Value range is [1,255], default value is 10. */ LPFC_ATTR_RW(poll_tmo, 10, 1, 255, "Milliseconds driver will wait between polling FCP ring"); /* # lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that # support this feature # 0 = MSI disabled # 1 = MSI enabled # 2 = MSI-X enabled (default) # Value range is [0,2]. Default value is 2. */ LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Message Signaled Interrupts (1) or " "MSI-X (2), if possible"); /* # lpfc_fcp_imax: Set the maximum number of fast-path FCP interrupts per second # # Value range is [636,651042]. Default value is 10000. */ LPFC_ATTR_R(fcp_imax, LPFC_FP_DEF_IMAX, LPFC_MIM_IMAX, LPFC_DMULT_CONST, "Set the maximum number of fast-path FCP interrupts per second"); /* # lpfc_fcp_wq_count: Set the number of fast-path FCP work queues # # Value range is [1,31]. Default value is 4. */ LPFC_ATTR_R(fcp_wq_count, LPFC_FP_WQN_DEF, LPFC_FP_WQN_MIN, LPFC_FP_WQN_MAX, "Set the number of fast-path FCP work queues, if possible"); /* # lpfc_fcp_eq_count: Set the number of fast-path FCP event queues # # Value range is [1,7]. Default value is 1. */ LPFC_ATTR_R(fcp_eq_count, LPFC_FP_EQN_DEF, LPFC_FP_EQN_MIN, LPFC_FP_EQN_MAX, "Set the number of fast-path FCP event queues, if possible"); /* # lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware. # 0 = HBA resets disabled # 1 = HBA resets enabled (default) # Value range is [0,1]. Default value is 1. */ LPFC_ATTR_R(enable_hba_reset, 1, 0, 1, "Enable HBA resets from the driver."); /* # lpfc_enable_hba_heartbeat: Disable HBA heartbeat timer.. # 0 = HBA Heartbeat disabled # 1 = HBA Heartbeat enabled (default) # Value range is [0,1]. Default value is 1. */ LPFC_ATTR_R(enable_hba_heartbeat, 0, 0, 1, "Enable HBA Heartbeat."); /* # lpfc_enable_bg: Enable BlockGuard (Emulex's Implementation of T10-DIF) # 0 = BlockGuard disabled (default) # 1 = BlockGuard enabled # Value range is [0,1]. Default value is 0. */ LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support"); /* # lpfc_prot_mask: i # - Bit mask of host protection capabilities used to register with the # SCSI mid-layer # - Only meaningful if BG is turned on (lpfc_enable_bg=1). # - Allows you to ultimately specify which profiles to use # - Default will result in registering capabilities for all profiles. # */ unsigned int lpfc_prot_mask = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIX_TYPE0_PROTECTION | SHOST_DIX_TYPE1_PROTECTION; module_param(lpfc_prot_mask, uint, S_IRUGO); MODULE_PARM_DESC(lpfc_prot_mask, "host protection mask"); /* # lpfc_prot_guard: i # - Bit mask of protection guard types to register with the SCSI mid-layer # - Guard types are currently either 1) IP checksum 2) T10-DIF CRC # - Allows you to ultimately specify which profiles to use # - Default will result in registering capabilities for all guard types # */ unsigned char lpfc_prot_guard = SHOST_DIX_GUARD_IP; module_param(lpfc_prot_guard, byte, S_IRUGO); MODULE_PARM_DESC(lpfc_prot_guard, "host protection guard type"); /* * Delay initial NPort discovery when Clean Address bit is cleared in * FLOGI/FDISC accept and FCID/Fabric name/Fabric portname is changed. * This parameter can have value 0 or 1. * When this parameter is set to 0, no delay is added to the initial * discovery. * When this parameter is set to non-zero value, initial Nport discovery is * delayed by ra_tov seconds when Clean Address bit is cleared in FLOGI/FDISC * accept and FCID/Fabric name/Fabric portname is changed. * Driver always delay Nport discovery for subsequent FLOGI/FDISC completion * when Clean Address bit is cleared in FLOGI/FDISC * accept and FCID/Fabric name/Fabric portname is changed. * Default value is 0. */ int lpfc_delay_discovery; module_param(lpfc_delay_discovery, int, S_IRUGO); MODULE_PARM_DESC(lpfc_delay_discovery, "Delay NPort discovery when Clean Address bit is cleared. " "Allowed values: 0,1."); /* * lpfc_sg_seg_cnt - Initial Maximum DMA Segment Count * This value can be set to values between 64 and 256. The default value is * 64, but may be increased to allow for larger Max I/O sizes. The scsi layer * will be allowed to request I/Os of sizes up to (MAX_SEG_COUNT * SEG_SIZE). */ LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_DEFAULT_SG_SEG_CNT, LPFC_MAX_SG_SEG_CNT, "Max Scatter Gather Segment Count"); LPFC_ATTR_R(prot_sg_seg_cnt, LPFC_DEFAULT_PROT_SG_SEG_CNT, LPFC_DEFAULT_PROT_SG_SEG_CNT, LPFC_MAX_PROT_SG_SEG_CNT, "Max Protection Scatter Gather Segment Count"); struct device_attribute *lpfc_hba_attrs[] = { &dev_attr_bg_info, &dev_attr_bg_guard_err, &dev_attr_bg_apptag_err, &dev_attr_bg_reftag_err, &dev_attr_info, &dev_attr_serialnum, &dev_attr_modeldesc, &dev_attr_modelname, &dev_attr_programtype, &dev_attr_portnum, &dev_attr_fwrev, &dev_attr_hdw, &dev_attr_option_rom_version, &dev_attr_link_state, &dev_attr_num_discovered_ports, &dev_attr_menlo_mgmt_mode, &dev_attr_lpfc_drvr_version, &dev_attr_lpfc_enable_fip, &dev_attr_lpfc_temp_sensor, &dev_attr_lpfc_log_verbose, &dev_attr_lpfc_lun_queue_depth, &dev_attr_lpfc_tgt_queue_depth, &dev_attr_lpfc_hba_queue_depth, &dev_attr_lpfc_peer_port_login, &dev_attr_lpfc_nodev_tmo, &dev_attr_lpfc_devloss_tmo, &dev_attr_lpfc_fcp_class, &dev_attr_lpfc_use_adisc, &dev_attr_lpfc_ack0, &dev_attr_lpfc_topology, &dev_attr_lpfc_scan_down, &dev_attr_lpfc_link_speed, &dev_attr_lpfc_cr_delay, &dev_attr_lpfc_cr_count, &dev_attr_lpfc_multi_ring_support, &dev_attr_lpfc_multi_ring_rctl, &dev_attr_lpfc_multi_ring_type, &dev_attr_lpfc_fdmi_on, &dev_attr_lpfc_max_luns, &dev_attr_lpfc_enable_npiv, &dev_attr_lpfc_enable_rrq, &dev_attr_nport_evt_cnt, &dev_attr_board_mode, &dev_attr_max_vpi, &dev_attr_used_vpi, &dev_attr_max_rpi, &dev_attr_used_rpi, &dev_attr_max_xri, &dev_attr_used_xri, &dev_attr_npiv_info, &dev_attr_issue_reset, &dev_attr_lpfc_poll, &dev_attr_lpfc_poll_tmo, &dev_attr_lpfc_use_msi, &dev_attr_lpfc_fcp_imax, &dev_attr_lpfc_fcp_wq_count, &dev_attr_lpfc_fcp_eq_count, &dev_attr_lpfc_enable_bg, &dev_attr_lpfc_soft_wwnn, &dev_attr_lpfc_soft_wwpn, &dev_attr_lpfc_soft_wwn_enable, &dev_attr_lpfc_enable_hba_reset, &dev_attr_lpfc_enable_hba_heartbeat, &dev_attr_lpfc_sg_seg_cnt, &dev_attr_lpfc_max_scsicmpl_time, &dev_attr_lpfc_stat_data_ctrl, &dev_attr_lpfc_prot_sg_seg_cnt, &dev_attr_lpfc_aer_support, &dev_attr_lpfc_aer_state_cleanup, &dev_attr_lpfc_sriov_nr_virtfn, &dev_attr_lpfc_suppress_link_up, &dev_attr_lpfc_iocb_cnt, &dev_attr_iocb_hw, &dev_attr_txq_hw, &dev_attr_txcmplq_hw, &dev_attr_lpfc_fips_level, &dev_attr_lpfc_fips_rev, &dev_attr_lpfc_dss, &dev_attr_lpfc_sriov_hw_max_virtfn, NULL, }; struct device_attribute *lpfc_vport_attrs[] = { &dev_attr_info, &dev_attr_link_state, &dev_attr_num_discovered_ports, &dev_attr_lpfc_drvr_version, &dev_attr_lpfc_log_verbose, &dev_attr_lpfc_lun_queue_depth, &dev_attr_lpfc_tgt_queue_depth, &dev_attr_lpfc_nodev_tmo, &dev_attr_lpfc_devloss_tmo, &dev_attr_lpfc_hba_queue_depth, &dev_attr_lpfc_peer_port_login, &dev_attr_lpfc_restrict_login, &dev_attr_lpfc_fcp_class, &dev_attr_lpfc_use_adisc, &dev_attr_lpfc_fdmi_on, &dev_attr_lpfc_max_luns, &dev_attr_nport_evt_cnt, &dev_attr_npiv_info, &dev_attr_lpfc_enable_da_id, &dev_attr_lpfc_max_scsicmpl_time, &dev_attr_lpfc_stat_data_ctrl, &dev_attr_lpfc_static_vport, &dev_attr_lpfc_fips_level, &dev_attr_lpfc_fips_rev, NULL, }; /** * sysfs_ctlreg_write - Write method for writing to ctlreg * @filp: open sysfs file * @kobj: kernel kobject that contains the kernel class device. * @bin_attr: kernel attributes passed to us. * @buf: contains the data to be written to the adapter IOREG space. * @off: offset into buffer to beginning of data. * @count: bytes to transfer. * * Description: * Accessed via /sys/class/scsi_host/hostxxx/ctlreg. * Uses the adapter io control registers to send buf contents to the adapter. * * Returns: * -ERANGE off and count combo out of range * -EINVAL off, count or buff address invalid * -EPERM adapter is offline * value of count, buf contents written **/ static ssize_t sysfs_ctlreg_write(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { size_t buf_off; struct device *dev = container_of(kobj, struct device, kobj); struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; if (phba->sli_rev >= LPFC_SLI_REV4) return -EPERM; if ((off + count) > FF_REG_AREA_SIZE) return -ERANGE; if (count == 0) return 0; if (off % 4 || count % 4 || (unsigned long)buf % 4) return -EINVAL; if (!(vport->fc_flag & FC_OFFLINE_MODE)) { return -EPERM; } spin_lock_irq(&phba->hbalock); for (buf_off = 0; buf_off < count; buf_off += sizeof(uint32_t)) writel(*((uint32_t *)(buf + buf_off)), phba->ctrl_regs_memmap_p + off + buf_off); spin_unlock_irq(&phba->hbalock); return count; } /** * sysfs_ctlreg_read - Read method for reading from ctlreg * @filp: open sysfs file * @kobj: kernel kobject that contains the kernel class device. * @bin_attr: kernel attributes passed to us. * @buf: if successful contains the data from the adapter IOREG space. * @off: offset into buffer to beginning of data. * @count: bytes to transfer. * * Description: * Accessed via /sys/class/scsi_host/hostxxx/ctlreg. * Uses the adapter io control registers to read data into buf. * * Returns: * -ERANGE off and count combo out of range * -EINVAL off, count or buff address invalid * value of count, buf contents read **/ static ssize_t sysfs_ctlreg_read(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { size_t buf_off; uint32_t * tmp_ptr; struct device *dev = container_of(kobj, struct device, kobj); struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; if (phba->sli_rev >= LPFC_SLI_REV4) return -EPERM; if (off > FF_REG_AREA_SIZE) return -ERANGE; if ((off + count) > FF_REG_AREA_SIZE) count = FF_REG_AREA_SIZE - off; if (count == 0) return 0; if (off % 4 || count % 4 || (unsigned long)buf % 4) return -EINVAL; spin_lock_irq(&phba->hbalock); for (buf_off = 0; buf_off < count; buf_off += sizeof(uint32_t)) { tmp_ptr = (uint32_t *)(buf + buf_off); *tmp_ptr = readl(phba->ctrl_regs_memmap_p + off + buf_off); } spin_unlock_irq(&phba->hbalock); return count; } static struct bin_attribute sysfs_ctlreg_attr = { .attr = { .name = "ctlreg", .mode = S_IRUSR | S_IWUSR, }, .size = 256, .read = sysfs_ctlreg_read, .write = sysfs_ctlreg_write, }; /** * sysfs_mbox_idle - frees the sysfs mailbox * @phba: lpfc_hba pointer **/ static void sysfs_mbox_idle(struct lpfc_hba *phba) { phba->sysfs_mbox.state = SMBOX_IDLE; phba->sysfs_mbox.offset = 0; if (phba->sysfs_mbox.mbox) { mempool_free(phba->sysfs_mbox.mbox, phba->mbox_mem_pool); phba->sysfs_mbox.mbox = NULL; } } /** * sysfs_mbox_write - Write method for writing information via mbox * @filp: open sysfs file * @kobj: kernel kobject that contains the kernel class device. * @bin_attr: kernel attributes passed to us. * @buf: contains the data to be written to sysfs mbox. * @off: offset into buffer to beginning of data. * @count: bytes to transfer. * * Description: * Accessed via /sys/class/scsi_host/hostxxx/mbox. * Uses the sysfs mbox to send buf contents to the adapter. * * Returns: * -ERANGE off and count combo out of range * -EINVAL off, count or buff address invalid * zero if count is zero * -EPERM adapter is offline * -ENOMEM failed to allocate memory for the mail box * -EAGAIN offset, state or mbox is NULL * count number of bytes transferred **/ static ssize_t sysfs_mbox_write(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct device *dev = container_of(kobj, struct device, kobj); struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; struct lpfcMboxq *mbox = NULL; if ((count + off) > MAILBOX_CMD_SIZE) return -ERANGE; if (off % 4 || count % 4 || (unsigned long)buf % 4) return -EINVAL; if (count == 0) return 0; if (off == 0) { mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) return -ENOMEM; memset(mbox, 0, sizeof (LPFC_MBOXQ_t)); } spin_lock_irq(&phba->hbalock); if (off == 0) { if (phba->sysfs_mbox.mbox) mempool_free(mbox, phba->mbox_mem_pool); else phba->sysfs_mbox.mbox = mbox; phba->sysfs_mbox.state = SMBOX_WRITING; } else { if (phba->sysfs_mbox.state != SMBOX_WRITING || phba->sysfs_mbox.offset != off || phba->sysfs_mbox.mbox == NULL) { sysfs_mbox_idle(phba); spin_unlock_irq(&phba->hbalock); return -EAGAIN; } } memcpy((uint8_t *) &phba->sysfs_mbox.mbox->u.mb + off, buf, count); phba->sysfs_mbox.offset = off + count; spin_unlock_irq(&phba->hbalock); return count; } /** * sysfs_mbox_read - Read method for reading information via mbox * @filp: open sysfs file * @kobj: kernel kobject that contains the kernel class device. * @bin_attr: kernel attributes passed to us. * @buf: contains the data to be read from sysfs mbox. * @off: offset into buffer to beginning of data. * @count: bytes to transfer. * * Description: * Accessed via /sys/class/scsi_host/hostxxx/mbox. * Uses the sysfs mbox to receive data from to the adapter. * * Returns: * -ERANGE off greater than mailbox command size * -EINVAL off, count or buff address invalid * zero if off and count are zero * -EACCES adapter over temp * -EPERM garbage can value to catch a multitude of errors * -EAGAIN management IO not permitted, state or off error * -ETIME mailbox timeout * -ENODEV mailbox error * count number of bytes transferred **/ static ssize_t sysfs_mbox_read(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct device *dev = container_of(kobj, struct device, kobj); struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; int rc; MAILBOX_t *pmb; if (off > MAILBOX_CMD_SIZE) return -ERANGE; if ((count + off) > MAILBOX_CMD_SIZE) count = MAILBOX_CMD_SIZE - off; if (off % 4 || count % 4 || (unsigned long)buf % 4) return -EINVAL; if (off && count == 0) return 0; spin_lock_irq(&phba->hbalock); if (phba->over_temp_state == HBA_OVER_TEMP) { sysfs_mbox_idle(phba); spin_unlock_irq(&phba->hbalock); return -EACCES; } if (off == 0 && phba->sysfs_mbox.state == SMBOX_WRITING && phba->sysfs_mbox.offset >= 2 * sizeof(uint32_t)) { pmb = &phba->sysfs_mbox.mbox->u.mb; switch (pmb->mbxCommand) { /* Offline only */ case MBX_INIT_LINK: case MBX_DOWN_LINK: case MBX_CONFIG_LINK: case MBX_CONFIG_RING: case MBX_RESET_RING: case MBX_UNREG_LOGIN: case MBX_CLEAR_LA: case MBX_DUMP_CONTEXT: case MBX_RUN_DIAGS: case MBX_RESTART: case MBX_SET_MASK: case MBX_SET_DEBUG: if (!(vport->fc_flag & FC_OFFLINE_MODE)) { printk(KERN_WARNING "mbox_read:Command 0x%x " "is illegal in on-line state\n", pmb->mbxCommand); sysfs_mbox_idle(phba); spin_unlock_irq(&phba->hbalock); return -EPERM; } case MBX_WRITE_NV: case MBX_WRITE_VPARMS: case MBX_LOAD_SM: case MBX_READ_NV: case MBX_READ_CONFIG: case MBX_READ_RCONFIG: case MBX_READ_STATUS: case MBX_READ_XRI: case MBX_READ_REV: case MBX_READ_LNK_STAT: case MBX_DUMP_MEMORY: case MBX_DOWN_LOAD: case MBX_UPDATE_CFG: case MBX_KILL_BOARD: case MBX_LOAD_AREA: case MBX_LOAD_EXP_ROM: case MBX_BEACON: case MBX_DEL_LD_ENTRY: case MBX_SET_VARIABLE: case MBX_WRITE_WWN: case MBX_PORT_CAPABILITIES: case MBX_PORT_IOV_CONTROL: break; case MBX_SECURITY_MGMT: case MBX_AUTH_PORT: if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) { printk(KERN_WARNING "mbox_read:Command 0x%x " "is not permitted\n", pmb->mbxCommand); sysfs_mbox_idle(phba); spin_unlock_irq(&phba->hbalock); return -EPERM; } break; case MBX_READ_SPARM64: case MBX_READ_TOPOLOGY: case MBX_REG_LOGIN: case MBX_REG_LOGIN64: case MBX_CONFIG_PORT: case MBX_RUN_BIU_DIAG: printk(KERN_WARNING "mbox_read: Illegal Command 0x%x\n", pmb->mbxCommand); sysfs_mbox_idle(phba); spin_unlock_irq(&phba->hbalock); return -EPERM; default: printk(KERN_WARNING "mbox_read: Unknown Command 0x%x\n", pmb->mbxCommand); sysfs_mbox_idle(phba); spin_unlock_irq(&phba->hbalock); return -EPERM; } /* If HBA encountered an error attention, allow only DUMP * or RESTART mailbox commands until the HBA is restarted. */ if (phba->pport->stopped && pmb->mbxCommand != MBX_DUMP_MEMORY && pmb->mbxCommand != MBX_RESTART && pmb->mbxCommand != MBX_WRITE_VPARMS && pmb->mbxCommand != MBX_WRITE_WWN) lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, "1259 mbox: Issued mailbox cmd " "0x%x while in stopped state.\n", pmb->mbxCommand); phba->sysfs_mbox.mbox->vport = vport; /* Don't allow mailbox commands to be sent when blocked * or when in the middle of discovery */ if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) { sysfs_mbox_idle(phba); spin_unlock_irq(&phba->hbalock); return -EAGAIN; } if ((vport->fc_flag & FC_OFFLINE_MODE) || (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) { spin_unlock_irq(&phba->hbalock); rc = lpfc_sli_issue_mbox (phba, phba->sysfs_mbox.mbox, MBX_POLL); spin_lock_irq(&phba->hbalock); } else { spin_unlock_irq(&phba->hbalock); rc = lpfc_sli_issue_mbox_wait (phba, phba->sysfs_mbox.mbox, lpfc_mbox_tmo_val(phba, pmb->mbxCommand) * HZ); spin_lock_irq(&phba->hbalock); } if (rc != MBX_SUCCESS) { if (rc == MBX_TIMEOUT) { phba->sysfs_mbox.mbox = NULL; } sysfs_mbox_idle(phba); spin_unlock_irq(&phba->hbalock); return (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV; } phba->sysfs_mbox.state = SMBOX_READING; } else if (phba->sysfs_mbox.offset != off || phba->sysfs_mbox.state != SMBOX_READING) { printk(KERN_WARNING "mbox_read: Bad State\n"); sysfs_mbox_idle(phba); spin_unlock_irq(&phba->hbalock); return -EAGAIN; } memcpy(buf, (uint8_t *) &pmb + off, count); phba->sysfs_mbox.offset = off + count; if (phba->sysfs_mbox.offset == MAILBOX_CMD_SIZE) sysfs_mbox_idle(phba); spin_unlock_irq(&phba->hbalock); return count; } static struct bin_attribute sysfs_mbox_attr = { .attr = { .name = "mbox", .mode = S_IRUSR | S_IWUSR, }, .size = MAILBOX_SYSFS_MAX, .read = sysfs_mbox_read, .write = sysfs_mbox_write, }; /** * lpfc_alloc_sysfs_attr - Creates the ctlreg and mbox entries * @vport: address of lpfc vport structure. * * Return codes: * zero on success * error return code from sysfs_create_bin_file() **/ int lpfc_alloc_sysfs_attr(struct lpfc_vport *vport) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); int error; error = sysfs_create_bin_file(&shost->shost_dev.kobj, &sysfs_drvr_stat_data_attr); /* Virtual ports do not need ctrl_reg and mbox */ if (error || vport->port_type == LPFC_NPIV_PORT) goto out; error = sysfs_create_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr); if (error) goto out_remove_stat_attr; error = sysfs_create_bin_file(&shost->shost_dev.kobj, &sysfs_mbox_attr); if (error) goto out_remove_ctlreg_attr; return 0; out_remove_ctlreg_attr: sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr); out_remove_stat_attr: sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_drvr_stat_data_attr); out: return error; } /** * lpfc_free_sysfs_attr - Removes the ctlreg and mbox entries * @vport: address of lpfc vport structure. **/ void lpfc_free_sysfs_attr(struct lpfc_vport *vport) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_drvr_stat_data_attr); /* Virtual ports do not need ctrl_reg and mbox */ if (vport->port_type == LPFC_NPIV_PORT) return; sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_mbox_attr); sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr); } /* * Dynamic FC Host Attributes Support */ /** * lpfc_get_host_port_id - Copy the vport DID into the scsi host port id * @shost: kernel scsi host pointer. **/ static void lpfc_get_host_port_id(struct Scsi_Host *shost) { struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; /* note: fc_myDID already in cpu endianness */ fc_host_port_id(shost) = vport->fc_myDID; } /** * lpfc_get_host_port_type - Set the value of the scsi host port type * @shost: kernel scsi host pointer. **/ static void lpfc_get_host_port_type(struct Scsi_Host *shost) { struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; spin_lock_irq(shost->host_lock); if (vport->port_type == LPFC_NPIV_PORT) { fc_host_port_type(shost) = FC_PORTTYPE_NPIV; } else if (lpfc_is_link_up(phba)) { if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { if (vport->fc_flag & FC_PUBLIC_LOOP) fc_host_port_type(shost) = FC_PORTTYPE_NLPORT; else fc_host_port_type(shost) = FC_PORTTYPE_LPORT; } else { if (vport->fc_flag & FC_FABRIC) fc_host_port_type(shost) = FC_PORTTYPE_NPORT; else fc_host_port_type(shost) = FC_PORTTYPE_PTP; } } else fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN; spin_unlock_irq(shost->host_lock); } /** * lpfc_get_host_port_state - Set the value of the scsi host port state * @shost: kernel scsi host pointer. **/ static void lpfc_get_host_port_state(struct Scsi_Host *shost) { struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; spin_lock_irq(shost->host_lock); if (vport->fc_flag & FC_OFFLINE_MODE) fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE; else { switch (phba->link_state) { case LPFC_LINK_UNKNOWN: case LPFC_LINK_DOWN: fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN; break; case LPFC_LINK_UP: case LPFC_CLEAR_LA: case LPFC_HBA_READY: /* Links up, beyond this port_type reports state */ fc_host_port_state(shost) = FC_PORTSTATE_ONLINE; break; case LPFC_HBA_ERROR: fc_host_port_state(shost) = FC_PORTSTATE_ERROR; break; default: fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN; break; } } spin_unlock_irq(shost->host_lock); } /** * lpfc_get_host_speed - Set the value of the scsi host speed * @shost: kernel scsi host pointer. **/ static void lpfc_get_host_speed(struct Scsi_Host *shost) { struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; spin_lock_irq(shost->host_lock); if (lpfc_is_link_up(phba)) { switch(phba->fc_linkspeed) { case LPFC_LINK_SPEED_1GHZ: fc_host_speed(shost) = FC_PORTSPEED_1GBIT; break; case LPFC_LINK_SPEED_2GHZ: fc_host_speed(shost) = FC_PORTSPEED_2GBIT; break; case LPFC_LINK_SPEED_4GHZ: fc_host_speed(shost) = FC_PORTSPEED_4GBIT; break; case LPFC_LINK_SPEED_8GHZ: fc_host_speed(shost) = FC_PORTSPEED_8GBIT; break; case LPFC_LINK_SPEED_10GHZ: fc_host_speed(shost) = FC_PORTSPEED_10GBIT; break; case LPFC_LINK_SPEED_16GHZ: fc_host_speed(shost) = FC_PORTSPEED_16GBIT; break; default: fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; break; } } else fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; spin_unlock_irq(shost->host_lock); } /** * lpfc_get_host_fabric_name - Set the value of the scsi host fabric name * @shost: kernel scsi host pointer. **/ static void lpfc_get_host_fabric_name (struct Scsi_Host *shost) { struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; u64 node_name; spin_lock_irq(shost->host_lock); if ((vport->fc_flag & FC_FABRIC) || ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) && (vport->fc_flag & FC_PUBLIC_LOOP))) node_name = wwn_to_u64(phba->fc_fabparam.nodeName.u.wwn); else /* fabric is local port if there is no F/FL_Port */ node_name = 0; spin_unlock_irq(shost->host_lock); fc_host_fabric_name(shost) = node_name; } /** * lpfc_get_stats - Return statistical information about the adapter * @shost: kernel scsi host pointer. * * Notes: * NULL on error for link down, no mbox pool, sli2 active, * management not allowed, memory allocation error, or mbox error. * * Returns: * NULL for error * address of the adapter host statistics **/ static struct fc_host_statistics * lpfc_get_stats(struct Scsi_Host *shost) { struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; struct lpfc_sli *psli = &phba->sli; struct fc_host_statistics *hs = &phba->link_stats; struct lpfc_lnk_stat * lso = &psli->lnk_stat_offsets; LPFC_MBOXQ_t *pmboxq; MAILBOX_t *pmb; unsigned long seconds; int rc = 0; /* * prevent udev from issuing mailbox commands until the port is * configured. */ if (phba->link_state < LPFC_LINK_DOWN || !phba->mbox_mem_pool || (phba->sli.sli_flag & LPFC_SLI_ACTIVE) == 0) return NULL; if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) return NULL; pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmboxq) return NULL; memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t)); pmb = &pmboxq->u.mb; pmb->mbxCommand = MBX_READ_STATUS; pmb->mbxOwner = OWN_HOST; pmboxq->context1 = NULL; pmboxq->vport = vport; if (vport->fc_flag & FC_OFFLINE_MODE) rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); else rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); if (rc != MBX_SUCCESS) { if (rc != MBX_TIMEOUT) mempool_free(pmboxq, phba->mbox_mem_pool); return NULL; } memset(hs, 0, sizeof (struct fc_host_statistics)); hs->tx_frames = pmb->un.varRdStatus.xmitFrameCnt; hs->tx_words = (pmb->un.varRdStatus.xmitByteCnt * 256); hs->rx_frames = pmb->un.varRdStatus.rcvFrameCnt; hs->rx_words = (pmb->un.varRdStatus.rcvByteCnt * 256); memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t)); pmb->mbxCommand = MBX_READ_LNK_STAT; pmb->mbxOwner = OWN_HOST; pmboxq->context1 = NULL; pmboxq->vport = vport; if (vport->fc_flag & FC_OFFLINE_MODE) rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); else rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); if (rc != MBX_SUCCESS) { if (rc != MBX_TIMEOUT) mempool_free(pmboxq, phba->mbox_mem_pool); return NULL; } hs->link_failure_count = pmb->un.varRdLnk.linkFailureCnt; hs->loss_of_sync_count = pmb->un.varRdLnk.lossSyncCnt; hs->loss_of_signal_count = pmb->un.varRdLnk.lossSignalCnt; hs->prim_seq_protocol_err_count = pmb->un.varRdLnk.primSeqErrCnt; hs->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord; hs->invalid_crc_count = pmb->un.varRdLnk.crcCnt; hs->error_frames = pmb->un.varRdLnk.crcCnt; hs->link_failure_count -= lso->link_failure_count; hs->loss_of_sync_count -= lso->loss_of_sync_count; hs->loss_of_signal_count -= lso->loss_of_signal_count; hs->prim_seq_protocol_err_count -= lso->prim_seq_protocol_err_count; hs->invalid_tx_word_count -= lso->invalid_tx_word_count; hs->invalid_crc_count -= lso->invalid_crc_count; hs->error_frames -= lso->error_frames; if (phba->hba_flag & HBA_FCOE_MODE) { hs->lip_count = -1; hs->nos_count = (phba->link_events >> 1); hs->nos_count -= lso->link_events; } else if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { hs->lip_count = (phba->fc_eventTag >> 1); hs->lip_count -= lso->link_events; hs->nos_count = -1; } else { hs->lip_count = -1; hs->nos_count = (phba->fc_eventTag >> 1); hs->nos_count -= lso->link_events; } hs->dumped_frames = -1; seconds = get_seconds(); if (seconds < psli->stats_start) hs->seconds_since_last_reset = seconds + ((unsigned long)-1 - psli->stats_start); else hs->seconds_since_last_reset = seconds - psli->stats_start; mempool_free(pmboxq, phba->mbox_mem_pool); return hs; } /** * lpfc_reset_stats - Copy the adapter link stats information * @shost: kernel scsi host pointer. **/ static void lpfc_reset_stats(struct Scsi_Host *shost) { struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; struct lpfc_sli *psli = &phba->sli; struct lpfc_lnk_stat *lso = &psli->lnk_stat_offsets; LPFC_MBOXQ_t *pmboxq; MAILBOX_t *pmb; int rc = 0; if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) return; pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmboxq) return; memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); pmb = &pmboxq->u.mb; pmb->mbxCommand = MBX_READ_STATUS; pmb->mbxOwner = OWN_HOST; pmb->un.varWords[0] = 0x1; /* reset request */ pmboxq->context1 = NULL; pmboxq->vport = vport; if ((vport->fc_flag & FC_OFFLINE_MODE) || (!(psli->sli_flag & LPFC_SLI_ACTIVE))) rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); else rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); if (rc != MBX_SUCCESS) { if (rc != MBX_TIMEOUT) mempool_free(pmboxq, phba->mbox_mem_pool); return; } memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); pmb->mbxCommand = MBX_READ_LNK_STAT; pmb->mbxOwner = OWN_HOST; pmboxq->context1 = NULL; pmboxq->vport = vport; if ((vport->fc_flag & FC_OFFLINE_MODE) || (!(psli->sli_flag & LPFC_SLI_ACTIVE))) rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); else rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); if (rc != MBX_SUCCESS) { if (rc != MBX_TIMEOUT) mempool_free( pmboxq, phba->mbox_mem_pool); return; } lso->link_failure_count = pmb->un.varRdLnk.linkFailureCnt; lso->loss_of_sync_count = pmb->un.varRdLnk.lossSyncCnt; lso->loss_of_signal_count = pmb->un.varRdLnk.lossSignalCnt; lso->prim_seq_protocol_err_count = pmb->un.varRdLnk.primSeqErrCnt; lso->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord; lso->invalid_crc_count = pmb->un.varRdLnk.crcCnt; lso->error_frames = pmb->un.varRdLnk.crcCnt; if (phba->hba_flag & HBA_FCOE_MODE) lso->link_events = (phba->link_events >> 1); else lso->link_events = (phba->fc_eventTag >> 1); psli->stats_start = get_seconds(); mempool_free(pmboxq, phba->mbox_mem_pool); return; } /* * The LPFC driver treats linkdown handling as target loss events so there * are no sysfs handlers for link_down_tmo. */ /** * lpfc_get_node_by_target - Return the nodelist for a target * @starget: kernel scsi target pointer. * * Returns: * address of the node list if found * NULL target not found **/ static struct lpfc_nodelist * lpfc_get_node_by_target(struct scsi_target *starget) { struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_nodelist *ndlp; spin_lock_irq(shost->host_lock); /* Search for this, mapped, target ID */ list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { if (NLP_CHK_NODE_ACT(ndlp) && ndlp->nlp_state == NLP_STE_MAPPED_NODE && starget->id == ndlp->nlp_sid) { spin_unlock_irq(shost->host_lock); return ndlp; } } spin_unlock_irq(shost->host_lock); return NULL; } /** * lpfc_get_starget_port_id - Set the target port id to the ndlp DID or -1 * @starget: kernel scsi target pointer. **/ static void lpfc_get_starget_port_id(struct scsi_target *starget) { struct lpfc_nodelist *ndlp = lpfc_get_node_by_target(starget); fc_starget_port_id(starget) = ndlp ? ndlp->nlp_DID : -1; } /** * lpfc_get_starget_node_name - Set the target node name * @starget: kernel scsi target pointer. * * Description: Set the target node name to the ndlp node name wwn or zero. **/ static void lpfc_get_starget_node_name(struct scsi_target *starget) { struct lpfc_nodelist *ndlp = lpfc_get_node_by_target(starget); fc_starget_node_name(starget) = ndlp ? wwn_to_u64(ndlp->nlp_nodename.u.wwn) : 0; } /** * lpfc_get_starget_port_name - Set the target port name * @starget: kernel scsi target pointer. * * Description: set the target port name to the ndlp port name wwn or zero. **/ static void lpfc_get_starget_port_name(struct scsi_target *starget) { struct lpfc_nodelist *ndlp = lpfc_get_node_by_target(starget); fc_starget_port_name(starget) = ndlp ? wwn_to_u64(ndlp->nlp_portname.u.wwn) : 0; } /** * lpfc_set_rport_loss_tmo - Set the rport dev loss tmo * @rport: fc rport address. * @timeout: new value for dev loss tmo. * * Description: * If timeout is non zero set the dev_loss_tmo to timeout, else set * dev_loss_tmo to one. **/ static void lpfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout) { if (timeout) rport->dev_loss_tmo = timeout; else rport->dev_loss_tmo = 1; } /** * lpfc_rport_show_function - Return rport target information * * Description: * Macro that uses field to generate a function with the name lpfc_show_rport_ * * lpfc_show_rport_##field: returns the bytes formatted in buf * @cdev: class converted to an fc_rport. * @buf: on return contains the target_field or zero. * * Returns: size of formatted string. **/ #define lpfc_rport_show_function(field, format_string, sz, cast) \ static ssize_t \ lpfc_show_rport_##field (struct device *dev, \ struct device_attribute *attr, \ char *buf) \ { \ struct fc_rport *rport = transport_class_to_rport(dev); \ struct lpfc_rport_data *rdata = rport->hostdata; \ return snprintf(buf, sz, format_string, \ (rdata->target) ? cast rdata->target->field : 0); \ } #define lpfc_rport_rd_attr(field, format_string, sz) \ lpfc_rport_show_function(field, format_string, sz, ) \ static FC_RPORT_ATTR(field, S_IRUGO, lpfc_show_rport_##field, NULL) /** * lpfc_set_vport_symbolic_name - Set the vport's symbolic name * @fc_vport: The fc_vport who's symbolic name has been changed. * * Description: * This function is called by the transport after the @fc_vport's symbolic name * has been changed. This function re-registers the symbolic name with the * switch to propagate the change into the fabric if the vport is active. **/ static void lpfc_set_vport_symbolic_name(struct fc_vport *fc_vport) { struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data; if (vport->port_state == LPFC_VPORT_READY) lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0); } /** * lpfc_hba_log_verbose_init - Set hba's log verbose level * @phba: Pointer to lpfc_hba struct. * * This function is called by the lpfc_get_cfgparam() routine to set the * module lpfc_log_verbose into the @phba cfg_log_verbose for use with * log message according to the module's lpfc_log_verbose parameter setting * before hba port or vport created. **/ static void lpfc_hba_log_verbose_init(struct lpfc_hba *phba, uint32_t verbose) { phba->cfg_log_verbose = verbose; } struct fc_function_template lpfc_transport_functions = { /* fixed attributes the driver supports */ .show_host_node_name = 1, .show_host_port_name = 1, .show_host_supported_classes = 1, .show_host_supported_fc4s = 1, .show_host_supported_speeds = 1, .show_host_maxframe_size = 1, .show_host_symbolic_name = 1, /* dynamic attributes the driver supports */ .get_host_port_id = lpfc_get_host_port_id, .show_host_port_id = 1, .get_host_port_type = lpfc_get_host_port_type, .show_host_port_type = 1, .get_host_port_state = lpfc_get_host_port_state, .show_host_port_state = 1, /* active_fc4s is shown but doesn't change (thus no get function) */ .show_host_active_fc4s = 1, .get_host_speed = lpfc_get_host_speed, .show_host_speed = 1, .get_host_fabric_name = lpfc_get_host_fabric_name, .show_host_fabric_name = 1, /* * The LPFC driver treats linkdown handling as target loss events * so there are no sysfs handlers for link_down_tmo. */ .get_fc_host_stats = lpfc_get_stats, .reset_fc_host_stats = lpfc_reset_stats, .dd_fcrport_size = sizeof(struct lpfc_rport_data), .show_rport_maxframe_size = 1, .show_rport_supported_classes = 1, .set_rport_dev_loss_tmo = lpfc_set_rport_loss_tmo, .show_rport_dev_loss_tmo = 1, .get_starget_port_id = lpfc_get_starget_port_id, .show_starget_port_id = 1, .get_starget_node_name = lpfc_get_starget_node_name, .show_starget_node_name = 1, .get_starget_port_name = lpfc_get_starget_port_name, .show_starget_port_name = 1, .issue_fc_host_lip = lpfc_issue_lip, .dev_loss_tmo_callbk = lpfc_dev_loss_tmo_callbk, .terminate_rport_io = lpfc_terminate_rport_io, .dd_fcvport_size = sizeof(struct lpfc_vport *), .vport_disable = lpfc_vport_disable, .set_vport_symbolic_name = lpfc_set_vport_symbolic_name, .bsg_request = lpfc_bsg_request, .bsg_timeout = lpfc_bsg_timeout, }; struct fc_function_template lpfc_vport_transport_functions = { /* fixed attributes the driver supports */ .show_host_node_name = 1, .show_host_port_name = 1, .show_host_supported_classes = 1, .show_host_supported_fc4s = 1, .show_host_supported_speeds = 1, .show_host_maxframe_size = 1, .show_host_symbolic_name = 1, /* dynamic attributes the driver supports */ .get_host_port_id = lpfc_get_host_port_id, .show_host_port_id = 1, .get_host_port_type = lpfc_get_host_port_type, .show_host_port_type = 1, .get_host_port_state = lpfc_get_host_port_state, .show_host_port_state = 1, /* active_fc4s is shown but doesn't change (thus no get function) */ .show_host_active_fc4s = 1, .get_host_speed = lpfc_get_host_speed, .show_host_speed = 1, .get_host_fabric_name = lpfc_get_host_fabric_name, .show_host_fabric_name = 1, /* * The LPFC driver treats linkdown handling as target loss events * so there are no sysfs handlers for link_down_tmo. */ .get_fc_host_stats = lpfc_get_stats, .reset_fc_host_stats = lpfc_reset_stats, .dd_fcrport_size = sizeof(struct lpfc_rport_data), .show_rport_maxframe_size = 1, .show_rport_supported_classes = 1, .set_rport_dev_loss_tmo = lpfc_set_rport_loss_tmo, .show_rport_dev_loss_tmo = 1, .get_starget_port_id = lpfc_get_starget_port_id, .show_starget_port_id = 1, .get_starget_node_name = lpfc_get_starget_node_name, .show_starget_node_name = 1, .get_starget_port_name = lpfc_get_starget_port_name, .show_starget_port_name = 1, .dev_loss_tmo_callbk = lpfc_dev_loss_tmo_callbk, .terminate_rport_io = lpfc_terminate_rport_io, .vport_disable = lpfc_vport_disable, .set_vport_symbolic_name = lpfc_set_vport_symbolic_name, }; /** * lpfc_get_cfgparam - Used during probe_one to init the adapter structure * @phba: lpfc_hba pointer. **/ void lpfc_get_cfgparam(struct lpfc_hba *phba) { lpfc_cr_delay_init(phba, lpfc_cr_delay); lpfc_cr_count_init(phba, lpfc_cr_count); lpfc_multi_ring_support_init(phba, lpfc_multi_ring_support); lpfc_multi_ring_rctl_init(phba, lpfc_multi_ring_rctl); lpfc_multi_ring_type_init(phba, lpfc_multi_ring_type); lpfc_ack0_init(phba, lpfc_ack0); lpfc_topology_init(phba, lpfc_topology); lpfc_link_speed_init(phba, lpfc_link_speed); lpfc_poll_tmo_init(phba, lpfc_poll_tmo); lpfc_enable_npiv_init(phba, lpfc_enable_npiv); lpfc_enable_rrq_init(phba, lpfc_enable_rrq); lpfc_use_msi_init(phba, lpfc_use_msi); lpfc_fcp_imax_init(phba, lpfc_fcp_imax); lpfc_fcp_wq_count_init(phba, lpfc_fcp_wq_count); lpfc_fcp_eq_count_init(phba, lpfc_fcp_eq_count); lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset); lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat); lpfc_enable_bg_init(phba, lpfc_enable_bg); if (phba->sli_rev == LPFC_SLI_REV4) phba->cfg_poll = 0; else phba->cfg_poll = lpfc_poll; phba->cfg_soft_wwnn = 0L; phba->cfg_soft_wwpn = 0L; lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt); lpfc_prot_sg_seg_cnt_init(phba, lpfc_prot_sg_seg_cnt); lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth); lpfc_hba_log_verbose_init(phba, lpfc_log_verbose); lpfc_aer_support_init(phba, lpfc_aer_support); lpfc_sriov_nr_virtfn_init(phba, lpfc_sriov_nr_virtfn); lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up); lpfc_iocb_cnt_init(phba, lpfc_iocb_cnt); phba->cfg_enable_dss = 1; return; } /** * lpfc_get_vport_cfgparam - Used during port create, init the vport structure * @vport: lpfc_vport pointer. **/ void lpfc_get_vport_cfgparam(struct lpfc_vport *vport) { lpfc_log_verbose_init(vport, lpfc_log_verbose); lpfc_lun_queue_depth_init(vport, lpfc_lun_queue_depth); lpfc_tgt_queue_depth_init(vport, lpfc_tgt_queue_depth); lpfc_devloss_tmo_init(vport, lpfc_devloss_tmo); lpfc_nodev_tmo_init(vport, lpfc_nodev_tmo); lpfc_peer_port_login_init(vport, lpfc_peer_port_login); lpfc_restrict_login_init(vport, lpfc_restrict_login); lpfc_fcp_class_init(vport, lpfc_fcp_class); lpfc_use_adisc_init(vport, lpfc_use_adisc); lpfc_max_scsicmpl_time_init(vport, lpfc_max_scsicmpl_time); lpfc_fdmi_on_init(vport, lpfc_fdmi_on); lpfc_discovery_threads_init(vport, lpfc_discovery_threads); lpfc_max_luns_init(vport, lpfc_max_luns); lpfc_scan_down_init(vport, lpfc_scan_down); lpfc_enable_da_id_init(vport, lpfc_enable_da_id); return; }
gpl-2.0
djvoleur/S6_UniPR_BOI1
drivers/char/hw_random/picoxcell-rng.c
2316
4778
/* * Copyright (c) 2010-2011 Picochip Ltd., Jamie Iles * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * All enquiries to support@picochip.com */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/hw_random.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #define DATA_REG_OFFSET 0x0200 #define CSR_REG_OFFSET 0x0278 #define CSR_OUT_EMPTY_MASK (1 << 24) #define CSR_FAULT_MASK (1 << 1) #define TRNG_BLOCK_RESET_MASK (1 << 0) #define TAI_REG_OFFSET 0x0380 /* * The maximum amount of time in microseconds to spend waiting for data if the * core wants us to wait. The TRNG should generate 32 bits every 320ns so a * timeout of 20us seems reasonable. The TRNG does builtin tests of the data * for randomness so we can't always assume there is data present. */ #define PICO_TRNG_TIMEOUT 20 static void __iomem *rng_base; static struct clk *rng_clk; struct device *rng_dev; static inline u32 picoxcell_trng_read_csr(void) { return __raw_readl(rng_base + CSR_REG_OFFSET); } static inline bool picoxcell_trng_is_empty(void) { return picoxcell_trng_read_csr() & CSR_OUT_EMPTY_MASK; } /* * Take the random number generator out of reset and make sure the interrupts * are masked. We shouldn't need to get large amounts of random bytes so just * poll the status register. The hardware generates 32 bits every 320ns so we * shouldn't have to wait long enough to warrant waiting for an IRQ. */ static void picoxcell_trng_start(void) { __raw_writel(0, rng_base + TAI_REG_OFFSET); __raw_writel(0, rng_base + CSR_REG_OFFSET); } static void picoxcell_trng_reset(void) { __raw_writel(TRNG_BLOCK_RESET_MASK, rng_base + CSR_REG_OFFSET); __raw_writel(TRNG_BLOCK_RESET_MASK, rng_base + TAI_REG_OFFSET); picoxcell_trng_start(); } /* * Get some random data from the random number generator. The hw_random core * layer provides us with locking. */ static int picoxcell_trng_read(struct hwrng *rng, void *buf, size_t max, bool wait) { int i; /* Wait for some data to become available. */ for (i = 0; i < PICO_TRNG_TIMEOUT && picoxcell_trng_is_empty(); ++i) { if (!wait) return 0; udelay(1); } if (picoxcell_trng_read_csr() & CSR_FAULT_MASK) { dev_err(rng_dev, "fault detected, resetting TRNG\n"); picoxcell_trng_reset(); return -EIO; } if (i == PICO_TRNG_TIMEOUT) return 0; *(u32 *)buf = __raw_readl(rng_base + DATA_REG_OFFSET); return sizeof(u32); } static struct hwrng picoxcell_trng = { .name = "picoxcell", .read = picoxcell_trng_read, }; static int picoxcell_trng_probe(struct platform_device *pdev) { int ret; struct resource *mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!mem) { dev_warn(&pdev->dev, "no memory resource\n"); return -ENOMEM; } if (!devm_request_mem_region(&pdev->dev, mem->start, resource_size(mem), "picoxcell_trng")) { dev_warn(&pdev->dev, "unable to request io mem\n"); return -EBUSY; } rng_base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem)); if (!rng_base) { dev_warn(&pdev->dev, "unable to remap io mem\n"); return -ENOMEM; } rng_clk = clk_get(&pdev->dev, NULL); if (IS_ERR(rng_clk)) { dev_warn(&pdev->dev, "no clk\n"); return PTR_ERR(rng_clk); } ret = clk_enable(rng_clk); if (ret) { dev_warn(&pdev->dev, "unable to enable clk\n"); goto err_enable; } picoxcell_trng_start(); ret = hwrng_register(&picoxcell_trng); if (ret) goto err_register; rng_dev = &pdev->dev; dev_info(&pdev->dev, "pixoxcell random number generator active\n"); return 0; err_register: clk_disable(rng_clk); err_enable: clk_put(rng_clk); return ret; } static int picoxcell_trng_remove(struct platform_device *pdev) { hwrng_unregister(&picoxcell_trng); clk_disable(rng_clk); clk_put(rng_clk); return 0; } #ifdef CONFIG_PM static int picoxcell_trng_suspend(struct device *dev) { clk_disable(rng_clk); return 0; } static int picoxcell_trng_resume(struct device *dev) { return clk_enable(rng_clk); } static const struct dev_pm_ops picoxcell_trng_pm_ops = { .suspend = picoxcell_trng_suspend, .resume = picoxcell_trng_resume, }; #endif /* CONFIG_PM */ static struct platform_driver picoxcell_trng_driver = { .probe = picoxcell_trng_probe, .remove = picoxcell_trng_remove, .driver = { .name = "picoxcell-trng", .owner = THIS_MODULE, #ifdef CONFIG_PM .pm = &picoxcell_trng_pm_ops, #endif /* CONFIG_PM */ }, }; module_platform_driver(picoxcell_trng_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jamie Iles"); MODULE_DESCRIPTION("Picochip picoXcell TRNG driver");
gpl-2.0
ibazzi/rk3288-kernel
drivers/char/hw_random/pseries-rng.c
2572
2648
/* * Copyright (C) 2010 Michael Neuling IBM Corporation * * Driver for the pseries hardware RNG for POWER7+ and above * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/module.h> #include <linux/hw_random.h> #include <asm/vio.h> #define MODULE_NAME "pseries-rng" static int pseries_rng_data_read(struct hwrng *rng, u32 *data) { if (plpar_hcall(H_RANDOM, (unsigned long *)data) != H_SUCCESS) { printk(KERN_ERR "pseries rng hcall error\n"); return 0; } return 8; } /** * pseries_rng_get_desired_dma - Return desired DMA allocate for CMO operations * * This is a required function for a driver to operate in a CMO environment * but this device does not make use of DMA allocations, return 0. * * Return value: * Number of bytes of IO data the driver will need to perform well -> 0 */ static unsigned long pseries_rng_get_desired_dma(struct vio_dev *vdev) { return 0; }; static struct hwrng pseries_rng = { .name = MODULE_NAME, .data_read = pseries_rng_data_read, }; static int __init pseries_rng_probe(struct vio_dev *dev, const struct vio_device_id *id) { return hwrng_register(&pseries_rng); } static int __exit pseries_rng_remove(struct vio_dev *dev) { hwrng_unregister(&pseries_rng); return 0; } static struct vio_device_id pseries_rng_driver_ids[] = { { "ibm,random-v1", "ibm,random"}, { "", "" } }; MODULE_DEVICE_TABLE(vio, pseries_rng_driver_ids); static struct vio_driver pseries_rng_driver = { .name = MODULE_NAME, .probe = pseries_rng_probe, .remove = pseries_rng_remove, .get_desired_dma = pseries_rng_get_desired_dma, .id_table = pseries_rng_driver_ids }; static int __init rng_init(void) { printk(KERN_INFO "Registering IBM pSeries RNG driver\n"); return vio_register_driver(&pseries_rng_driver); } module_init(rng_init); static void __exit rng_exit(void) { vio_unregister_driver(&pseries_rng_driver); } module_exit(rng_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Michael Neuling <mikey@neuling.org>"); MODULE_DESCRIPTION("H/W RNG driver for IBM pSeries processors");
gpl-2.0
vmobi-gogh/android_kernel_samsung_gogh
arch/mips/oprofile/common.c
3340
2862
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2004, 2005 Ralf Baechle * Copyright (C) 2005 MIPS Technologies, Inc. */ #include <linux/compiler.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/oprofile.h> #include <linux/smp.h> #include <asm/cpu-info.h> #include "op_impl.h" extern struct op_mips_model op_model_mipsxx_ops __weak; extern struct op_mips_model op_model_rm9000_ops __weak; extern struct op_mips_model op_model_loongson2_ops __weak; static struct op_mips_model *model; static struct op_counter_config ctr[20]; static int op_mips_setup(void) { /* Pre-compute the values to stuff in the hardware registers. */ model->reg_setup(ctr); /* Configure the registers on all cpus. */ on_each_cpu(model->cpu_setup, NULL, 1); return 0; } static int op_mips_create_files(struct super_block *sb, struct dentry *root) { int i; for (i = 0; i < model->num_counters; ++i) { struct dentry *dir; char buf[4]; snprintf(buf, sizeof buf, "%d", i); dir = oprofilefs_mkdir(sb, root, buf); oprofilefs_create_ulong(sb, dir, "enabled", &ctr[i].enabled); oprofilefs_create_ulong(sb, dir, "event", &ctr[i].event); oprofilefs_create_ulong(sb, dir, "count", &ctr[i].count); oprofilefs_create_ulong(sb, dir, "kernel", &ctr[i].kernel); oprofilefs_create_ulong(sb, dir, "user", &ctr[i].user); oprofilefs_create_ulong(sb, dir, "exl", &ctr[i].exl); /* Dummy. */ oprofilefs_create_ulong(sb, dir, "unit_mask", &ctr[i].unit_mask); } return 0; } static int op_mips_start(void) { on_each_cpu(model->cpu_start, NULL, 1); return 0; } static void op_mips_stop(void) { /* Disable performance monitoring for all counters. */ on_each_cpu(model->cpu_stop, NULL, 1); } int __init oprofile_arch_init(struct oprofile_operations *ops) { struct op_mips_model *lmodel = NULL; int res; switch (current_cpu_type()) { case CPU_5KC: case CPU_20KC: case CPU_24K: case CPU_25KF: case CPU_34K: case CPU_1004K: case CPU_74K: case CPU_SB1: case CPU_SB1A: case CPU_R10000: case CPU_R12000: case CPU_R14000: lmodel = &op_model_mipsxx_ops; break; case CPU_RM9000: lmodel = &op_model_rm9000_ops; break; case CPU_LOONGSON2: lmodel = &op_model_loongson2_ops; break; }; if (!lmodel) return -ENODEV; res = lmodel->init(); if (res) return res; model = lmodel; ops->create_files = op_mips_create_files; ops->setup = op_mips_setup; //ops->shutdown = op_mips_shutdown; ops->start = op_mips_start; ops->stop = op_mips_stop; ops->cpu_type = lmodel->cpu_type; printk(KERN_INFO "oprofile: using %s performance monitoring.\n", lmodel->cpu_type); return 0; } void oprofile_arch_exit(void) { if (model) model->exit(); }
gpl-2.0
Ekylypse/android_kernel_kltesprsport
fs/xfs/xfs_iops.c
3596
29365
/* * Copyright (c) 2000-2005 Silicon Graphics, Inc. * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include "xfs.h" #include "xfs_fs.h" #include "xfs_acl.h" #include "xfs_bit.h" #include "xfs_log.h" #include "xfs_inum.h" #include "xfs_trans.h" #include "xfs_sb.h" #include "xfs_ag.h" #include "xfs_alloc.h" #include "xfs_quota.h" #include "xfs_mount.h" #include "xfs_bmap_btree.h" #include "xfs_dinode.h" #include "xfs_inode.h" #include "xfs_bmap.h" #include "xfs_rtalloc.h" #include "xfs_error.h" #include "xfs_itable.h" #include "xfs_rw.h" #include "xfs_attr.h" #include "xfs_buf_item.h" #include "xfs_utils.h" #include "xfs_vnodeops.h" #include "xfs_inode_item.h" #include "xfs_trace.h" #include <linux/capability.h> #include <linux/xattr.h> #include <linux/namei.h> #include <linux/posix_acl.h> #include <linux/security.h> #include <linux/fiemap.h> #include <linux/slab.h> static int xfs_initxattrs( struct inode *inode, const struct xattr *xattr_array, void *fs_info) { const struct xattr *xattr; struct xfs_inode *ip = XFS_I(inode); int error = 0; for (xattr = xattr_array; xattr->name != NULL; xattr++) { error = xfs_attr_set(ip, xattr->name, xattr->value, xattr->value_len, ATTR_SECURE); if (error < 0) break; } return error; } /* * Hook in SELinux. This is not quite correct yet, what we really need * here (as we do for default ACLs) is a mechanism by which creation of * these attrs can be journalled at inode creation time (along with the * inode, of course, such that log replay can't cause these to be lost). */ STATIC int xfs_init_security( struct inode *inode, struct inode *dir, const struct qstr *qstr) { return security_inode_init_security(inode, dir, qstr, &xfs_initxattrs, NULL); } static void xfs_dentry_to_name( struct xfs_name *namep, struct dentry *dentry) { namep->name = dentry->d_name.name; namep->len = dentry->d_name.len; } STATIC void xfs_cleanup_inode( struct inode *dir, struct inode *inode, struct dentry *dentry) { struct xfs_name teardown; /* Oh, the horror. * If we can't add the ACL or we fail in * xfs_init_security we must back out. * ENOSPC can hit here, among other things. */ xfs_dentry_to_name(&teardown, dentry); xfs_remove(XFS_I(dir), &teardown, XFS_I(inode)); iput(inode); } STATIC int xfs_vn_mknod( struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rdev) { struct inode *inode; struct xfs_inode *ip = NULL; struct posix_acl *default_acl = NULL; struct xfs_name name; int error; /* * Irix uses Missed'em'V split, but doesn't want to see * the upper 5 bits of (14bit) major. */ if (S_ISCHR(mode) || S_ISBLK(mode)) { if (unlikely(!sysv_valid_dev(rdev) || MAJOR(rdev) & ~0x1ff)) return -EINVAL; rdev = sysv_encode_dev(rdev); } else { rdev = 0; } if (IS_POSIXACL(dir)) { default_acl = xfs_get_acl(dir, ACL_TYPE_DEFAULT); if (IS_ERR(default_acl)) return PTR_ERR(default_acl); if (!default_acl) mode &= ~current_umask(); } xfs_dentry_to_name(&name, dentry); error = xfs_create(XFS_I(dir), &name, mode, rdev, &ip); if (unlikely(error)) goto out_free_acl; inode = VFS_I(ip); error = xfs_init_security(inode, dir, &dentry->d_name); if (unlikely(error)) goto out_cleanup_inode; if (default_acl) { error = -xfs_inherit_acl(inode, default_acl); default_acl = NULL; if (unlikely(error)) goto out_cleanup_inode; } d_instantiate(dentry, inode); return -error; out_cleanup_inode: xfs_cleanup_inode(dir, inode, dentry); out_free_acl: posix_acl_release(default_acl); return -error; } STATIC int xfs_vn_create( struct inode *dir, struct dentry *dentry, umode_t mode, struct nameidata *nd) { return xfs_vn_mknod(dir, dentry, mode, 0); } STATIC int xfs_vn_mkdir( struct inode *dir, struct dentry *dentry, umode_t mode) { return xfs_vn_mknod(dir, dentry, mode|S_IFDIR, 0); } STATIC struct dentry * xfs_vn_lookup( struct inode *dir, struct dentry *dentry, struct nameidata *nd) { struct xfs_inode *cip; struct xfs_name name; int error; if (dentry->d_name.len >= MAXNAMELEN) return ERR_PTR(-ENAMETOOLONG); xfs_dentry_to_name(&name, dentry); error = xfs_lookup(XFS_I(dir), &name, &cip, NULL); if (unlikely(error)) { if (unlikely(error != ENOENT)) return ERR_PTR(-error); d_add(dentry, NULL); return NULL; } return d_splice_alias(VFS_I(cip), dentry); } STATIC struct dentry * xfs_vn_ci_lookup( struct inode *dir, struct dentry *dentry, struct nameidata *nd) { struct xfs_inode *ip; struct xfs_name xname; struct xfs_name ci_name; struct qstr dname; int error; if (dentry->d_name.len >= MAXNAMELEN) return ERR_PTR(-ENAMETOOLONG); xfs_dentry_to_name(&xname, dentry); error = xfs_lookup(XFS_I(dir), &xname, &ip, &ci_name); if (unlikely(error)) { if (unlikely(error != ENOENT)) return ERR_PTR(-error); /* * call d_add(dentry, NULL) here when d_drop_negative_children * is called in xfs_vn_mknod (ie. allow negative dentries * with CI filesystems). */ return NULL; } /* if exact match, just splice and exit */ if (!ci_name.name) return d_splice_alias(VFS_I(ip), dentry); /* else case-insensitive match... */ dname.name = ci_name.name; dname.len = ci_name.len; dentry = d_add_ci(dentry, VFS_I(ip), &dname); kmem_free(ci_name.name); return dentry; } STATIC int xfs_vn_link( struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) { struct inode *inode = old_dentry->d_inode; struct xfs_name name; int error; xfs_dentry_to_name(&name, dentry); error = xfs_link(XFS_I(dir), XFS_I(inode), &name); if (unlikely(error)) return -error; ihold(inode); d_instantiate(dentry, inode); return 0; } STATIC int xfs_vn_unlink( struct inode *dir, struct dentry *dentry) { struct xfs_name name; int error; xfs_dentry_to_name(&name, dentry); error = -xfs_remove(XFS_I(dir), &name, XFS_I(dentry->d_inode)); if (error) return error; /* * With unlink, the VFS makes the dentry "negative": no inode, * but still hashed. This is incompatible with case-insensitive * mode, so invalidate (unhash) the dentry in CI-mode. */ if (xfs_sb_version_hasasciici(&XFS_M(dir->i_sb)->m_sb)) d_invalidate(dentry); return 0; } STATIC int xfs_vn_symlink( struct inode *dir, struct dentry *dentry, const char *symname) { struct inode *inode; struct xfs_inode *cip = NULL; struct xfs_name name; int error; umode_t mode; mode = S_IFLNK | (irix_symlink_mode ? 0777 & ~current_umask() : S_IRWXUGO); xfs_dentry_to_name(&name, dentry); error = xfs_symlink(XFS_I(dir), &name, symname, mode, &cip); if (unlikely(error)) goto out; inode = VFS_I(cip); error = xfs_init_security(inode, dir, &dentry->d_name); if (unlikely(error)) goto out_cleanup_inode; d_instantiate(dentry, inode); return 0; out_cleanup_inode: xfs_cleanup_inode(dir, inode, dentry); out: return -error; } STATIC int xfs_vn_rename( struct inode *odir, struct dentry *odentry, struct inode *ndir, struct dentry *ndentry) { struct inode *new_inode = ndentry->d_inode; struct xfs_name oname; struct xfs_name nname; xfs_dentry_to_name(&oname, odentry); xfs_dentry_to_name(&nname, ndentry); return -xfs_rename(XFS_I(odir), &oname, XFS_I(odentry->d_inode), XFS_I(ndir), &nname, new_inode ? XFS_I(new_inode) : NULL); } /* * careful here - this function can get called recursively, so * we need to be very careful about how much stack we use. * uio is kmalloced for this reason... */ STATIC void * xfs_vn_follow_link( struct dentry *dentry, struct nameidata *nd) { char *link; int error = -ENOMEM; link = kmalloc(MAXPATHLEN+1, GFP_KERNEL); if (!link) goto out_err; error = -xfs_readlink(XFS_I(dentry->d_inode), link); if (unlikely(error)) goto out_kfree; nd_set_link(nd, link); return NULL; out_kfree: kfree(link); out_err: nd_set_link(nd, ERR_PTR(error)); return NULL; } STATIC void xfs_vn_put_link( struct dentry *dentry, struct nameidata *nd, void *p) { char *s = nd_get_link(nd); if (!IS_ERR(s)) kfree(s); } STATIC int xfs_vn_getattr( struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) { struct inode *inode = dentry->d_inode; struct xfs_inode *ip = XFS_I(inode); struct xfs_mount *mp = ip->i_mount; trace_xfs_getattr(ip); if (XFS_FORCED_SHUTDOWN(mp)) return -XFS_ERROR(EIO); stat->size = XFS_ISIZE(ip); stat->dev = inode->i_sb->s_dev; stat->mode = ip->i_d.di_mode; stat->nlink = ip->i_d.di_nlink; stat->uid = ip->i_d.di_uid; stat->gid = ip->i_d.di_gid; stat->ino = ip->i_ino; stat->atime = inode->i_atime; stat->mtime = inode->i_mtime; stat->ctime = inode->i_ctime; stat->blocks = XFS_FSB_TO_BB(mp, ip->i_d.di_nblocks + ip->i_delayed_blks); switch (inode->i_mode & S_IFMT) { case S_IFBLK: case S_IFCHR: stat->blksize = BLKDEV_IOSIZE; stat->rdev = MKDEV(sysv_major(ip->i_df.if_u2.if_rdev) & 0x1ff, sysv_minor(ip->i_df.if_u2.if_rdev)); break; default: if (XFS_IS_REALTIME_INODE(ip)) { /* * If the file blocks are being allocated from a * realtime volume, then return the inode's realtime * extent size or the realtime volume's extent size. */ stat->blksize = xfs_get_extsz_hint(ip) << mp->m_sb.sb_blocklog; } else stat->blksize = xfs_preferred_iosize(mp); stat->rdev = 0; break; } return 0; } int xfs_setattr_nonsize( struct xfs_inode *ip, struct iattr *iattr, int flags) { xfs_mount_t *mp = ip->i_mount; struct inode *inode = VFS_I(ip); int mask = iattr->ia_valid; xfs_trans_t *tp; int error; uid_t uid = 0, iuid = 0; gid_t gid = 0, igid = 0; struct xfs_dquot *udqp = NULL, *gdqp = NULL; struct xfs_dquot *olddquot1 = NULL, *olddquot2 = NULL; trace_xfs_setattr(ip); if (mp->m_flags & XFS_MOUNT_RDONLY) return XFS_ERROR(EROFS); if (XFS_FORCED_SHUTDOWN(mp)) return XFS_ERROR(EIO); error = -inode_change_ok(inode, iattr); if (error) return XFS_ERROR(error); ASSERT((mask & ATTR_SIZE) == 0); /* * If disk quotas is on, we make sure that the dquots do exist on disk, * before we start any other transactions. Trying to do this later * is messy. We don't care to take a readlock to look at the ids * in inode here, because we can't hold it across the trans_reserve. * If the IDs do change before we take the ilock, we're covered * because the i_*dquot fields will get updated anyway. */ if (XFS_IS_QUOTA_ON(mp) && (mask & (ATTR_UID|ATTR_GID))) { uint qflags = 0; if ((mask & ATTR_UID) && XFS_IS_UQUOTA_ON(mp)) { uid = iattr->ia_uid; qflags |= XFS_QMOPT_UQUOTA; } else { uid = ip->i_d.di_uid; } if ((mask & ATTR_GID) && XFS_IS_GQUOTA_ON(mp)) { gid = iattr->ia_gid; qflags |= XFS_QMOPT_GQUOTA; } else { gid = ip->i_d.di_gid; } /* * We take a reference when we initialize udqp and gdqp, * so it is important that we never blindly double trip on * the same variable. See xfs_create() for an example. */ ASSERT(udqp == NULL); ASSERT(gdqp == NULL); error = xfs_qm_vop_dqalloc(ip, uid, gid, xfs_get_projid(ip), qflags, &udqp, &gdqp); if (error) return error; } tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE); error = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0); if (error) goto out_dqrele; xfs_ilock(ip, XFS_ILOCK_EXCL); /* * Change file ownership. Must be the owner or privileged. */ if (mask & (ATTR_UID|ATTR_GID)) { /* * These IDs could have changed since we last looked at them. * But, we're assured that if the ownership did change * while we didn't have the inode locked, inode's dquot(s) * would have changed also. */ iuid = ip->i_d.di_uid; igid = ip->i_d.di_gid; gid = (mask & ATTR_GID) ? iattr->ia_gid : igid; uid = (mask & ATTR_UID) ? iattr->ia_uid : iuid; /* * Do a quota reservation only if uid/gid is actually * going to change. */ if (XFS_IS_QUOTA_RUNNING(mp) && ((XFS_IS_UQUOTA_ON(mp) && iuid != uid) || (XFS_IS_GQUOTA_ON(mp) && igid != gid))) { ASSERT(tp); error = xfs_qm_vop_chown_reserve(tp, ip, udqp, gdqp, capable(CAP_FOWNER) ? XFS_QMOPT_FORCE_RES : 0); if (error) /* out of quota */ goto out_trans_cancel; } } xfs_trans_ijoin(tp, ip, 0); /* * Change file ownership. Must be the owner or privileged. */ if (mask & (ATTR_UID|ATTR_GID)) { /* * CAP_FSETID overrides the following restrictions: * * The set-user-ID and set-group-ID bits of a file will be * cleared upon successful return from chown() */ if ((ip->i_d.di_mode & (S_ISUID|S_ISGID)) && !capable(CAP_FSETID)) ip->i_d.di_mode &= ~(S_ISUID|S_ISGID); /* * Change the ownerships and register quota modifications * in the transaction. */ if (iuid != uid) { if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_UQUOTA_ON(mp)) { ASSERT(mask & ATTR_UID); ASSERT(udqp); olddquot1 = xfs_qm_vop_chown(tp, ip, &ip->i_udquot, udqp); } ip->i_d.di_uid = uid; inode->i_uid = uid; } if (igid != gid) { if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_GQUOTA_ON(mp)) { ASSERT(!XFS_IS_PQUOTA_ON(mp)); ASSERT(mask & ATTR_GID); ASSERT(gdqp); olddquot2 = xfs_qm_vop_chown(tp, ip, &ip->i_gdquot, gdqp); } ip->i_d.di_gid = gid; inode->i_gid = gid; } } /* * Change file access modes. */ if (mask & ATTR_MODE) { umode_t mode = iattr->ia_mode; if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID)) mode &= ~S_ISGID; ip->i_d.di_mode &= S_IFMT; ip->i_d.di_mode |= mode & ~S_IFMT; inode->i_mode &= S_IFMT; inode->i_mode |= mode & ~S_IFMT; } /* * Change file access or modified times. */ if (mask & ATTR_ATIME) { inode->i_atime = iattr->ia_atime; ip->i_d.di_atime.t_sec = iattr->ia_atime.tv_sec; ip->i_d.di_atime.t_nsec = iattr->ia_atime.tv_nsec; } if (mask & ATTR_CTIME) { inode->i_ctime = iattr->ia_ctime; ip->i_d.di_ctime.t_sec = iattr->ia_ctime.tv_sec; ip->i_d.di_ctime.t_nsec = iattr->ia_ctime.tv_nsec; } if (mask & ATTR_MTIME) { inode->i_mtime = iattr->ia_mtime; ip->i_d.di_mtime.t_sec = iattr->ia_mtime.tv_sec; ip->i_d.di_mtime.t_nsec = iattr->ia_mtime.tv_nsec; } xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); XFS_STATS_INC(xs_ig_attrchg); if (mp->m_flags & XFS_MOUNT_WSYNC) xfs_trans_set_sync(tp); error = xfs_trans_commit(tp, 0); xfs_iunlock(ip, XFS_ILOCK_EXCL); /* * Release any dquot(s) the inode had kept before chown. */ xfs_qm_dqrele(olddquot1); xfs_qm_dqrele(olddquot2); xfs_qm_dqrele(udqp); xfs_qm_dqrele(gdqp); if (error) return XFS_ERROR(error); /* * XXX(hch): Updating the ACL entries is not atomic vs the i_mode * update. We could avoid this with linked transactions * and passing down the transaction pointer all the way * to attr_set. No previous user of the generic * Posix ACL code seems to care about this issue either. */ if ((mask & ATTR_MODE) && !(flags & XFS_ATTR_NOACL)) { error = -xfs_acl_chmod(inode); if (error) return XFS_ERROR(error); } return 0; out_trans_cancel: xfs_trans_cancel(tp, 0); xfs_iunlock(ip, XFS_ILOCK_EXCL); out_dqrele: xfs_qm_dqrele(udqp); xfs_qm_dqrele(gdqp); return error; } /* * Truncate file. Must have write permission and not be a directory. */ int xfs_setattr_size( struct xfs_inode *ip, struct iattr *iattr, int flags) { struct xfs_mount *mp = ip->i_mount; struct inode *inode = VFS_I(ip); int mask = iattr->ia_valid; xfs_off_t oldsize, newsize; struct xfs_trans *tp; int error; uint lock_flags; uint commit_flags = 0; trace_xfs_setattr(ip); if (mp->m_flags & XFS_MOUNT_RDONLY) return XFS_ERROR(EROFS); if (XFS_FORCED_SHUTDOWN(mp)) return XFS_ERROR(EIO); error = -inode_change_ok(inode, iattr); if (error) return XFS_ERROR(error); ASSERT(S_ISREG(ip->i_d.di_mode)); ASSERT((mask & (ATTR_MODE|ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_ATIME_SET| ATTR_MTIME_SET|ATTR_KILL_SUID|ATTR_KILL_SGID| ATTR_KILL_PRIV|ATTR_TIMES_SET)) == 0); lock_flags = XFS_ILOCK_EXCL; if (!(flags & XFS_ATTR_NOLOCK)) lock_flags |= XFS_IOLOCK_EXCL; xfs_ilock(ip, lock_flags); oldsize = inode->i_size; newsize = iattr->ia_size; /* * Short circuit the truncate case for zero length files. */ if (newsize == 0 && oldsize == 0 && ip->i_d.di_nextents == 0) { if (!(mask & (ATTR_CTIME|ATTR_MTIME))) goto out_unlock; /* * Use the regular setattr path to update the timestamps. */ xfs_iunlock(ip, lock_flags); iattr->ia_valid &= ~ATTR_SIZE; return xfs_setattr_nonsize(ip, iattr, 0); } /* * Make sure that the dquots are attached to the inode. */ error = xfs_qm_dqattach_locked(ip, 0); if (error) goto out_unlock; /* * Now we can make the changes. Before we join the inode to the * transaction, take care of the part of the truncation that must be * done without the inode lock. This needs to be done before joining * the inode to the transaction, because the inode cannot be unlocked * once it is a part of the transaction. */ if (newsize > oldsize) { /* * Do the first part of growing a file: zero any data in the * last block that is beyond the old EOF. We need to do this * before the inode is joined to the transaction to modify * i_size. */ error = xfs_zero_eof(ip, newsize, oldsize); if (error) goto out_unlock; } xfs_iunlock(ip, XFS_ILOCK_EXCL); lock_flags &= ~XFS_ILOCK_EXCL; /* * We are going to log the inode size change in this transaction so * any previous writes that are beyond the on disk EOF and the new * EOF that have not been written out need to be written here. If we * do not write the data out, we expose ourselves to the null files * problem. * * Only flush from the on disk size to the smaller of the in memory * file size or the new size as that's the range we really care about * here and prevents waiting for other data not within the range we * care about here. */ if (oldsize != ip->i_d.di_size && newsize > ip->i_d.di_size) { error = xfs_flush_pages(ip, ip->i_d.di_size, newsize, 0, FI_NONE); if (error) goto out_unlock; } /* * Wait for all direct I/O to complete. */ inode_dio_wait(inode); error = -block_truncate_page(inode->i_mapping, newsize, xfs_get_blocks); if (error) goto out_unlock; tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_SIZE); error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, XFS_TRANS_PERM_LOG_RES, XFS_ITRUNCATE_LOG_COUNT); if (error) goto out_trans_cancel; truncate_setsize(inode, newsize); commit_flags = XFS_TRANS_RELEASE_LOG_RES; lock_flags |= XFS_ILOCK_EXCL; xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_trans_ijoin(tp, ip, 0); /* * Only change the c/mtime if we are changing the size or we are * explicitly asked to change it. This handles the semantic difference * between truncate() and ftruncate() as implemented in the VFS. * * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a * special case where we need to update the times despite not having * these flags set. For all other operations the VFS set these flags * explicitly if it wants a timestamp update. */ if (newsize != oldsize && (!(mask & (ATTR_CTIME | ATTR_MTIME)))) { iattr->ia_ctime = iattr->ia_mtime = current_fs_time(inode->i_sb); mask |= ATTR_CTIME | ATTR_MTIME; } /* * The first thing we do is set the size to new_size permanently on * disk. This way we don't have to worry about anyone ever being able * to look at the data being freed even in the face of a crash. * What we're getting around here is the case where we free a block, it * is allocated to another file, it is written to, and then we crash. * If the new data gets written to the file but the log buffers * containing the free and reallocation don't, then we'd end up with * garbage in the blocks being freed. As long as we make the new size * permanent before actually freeing any blocks it doesn't matter if * they get written to. */ ip->i_d.di_size = newsize; xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); if (newsize <= oldsize) { error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, newsize); if (error) goto out_trans_abort; /* * Truncated "down", so we're removing references to old data * here - if we delay flushing for a long time, we expose * ourselves unduly to the notorious NULL files problem. So, * we mark this inode and flush it when the file is closed, * and do not wait the usual (long) time for writeout. */ xfs_iflags_set(ip, XFS_ITRUNCATED); } if (mask & ATTR_CTIME) { inode->i_ctime = iattr->ia_ctime; ip->i_d.di_ctime.t_sec = iattr->ia_ctime.tv_sec; ip->i_d.di_ctime.t_nsec = iattr->ia_ctime.tv_nsec; } if (mask & ATTR_MTIME) { inode->i_mtime = iattr->ia_mtime; ip->i_d.di_mtime.t_sec = iattr->ia_mtime.tv_sec; ip->i_d.di_mtime.t_nsec = iattr->ia_mtime.tv_nsec; } xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); XFS_STATS_INC(xs_ig_attrchg); if (mp->m_flags & XFS_MOUNT_WSYNC) xfs_trans_set_sync(tp); error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); out_unlock: if (lock_flags) xfs_iunlock(ip, lock_flags); return error; out_trans_abort: commit_flags |= XFS_TRANS_ABORT; out_trans_cancel: xfs_trans_cancel(tp, commit_flags); goto out_unlock; } STATIC int xfs_vn_setattr( struct dentry *dentry, struct iattr *iattr) { if (iattr->ia_valid & ATTR_SIZE) return -xfs_setattr_size(XFS_I(dentry->d_inode), iattr, 0); return -xfs_setattr_nonsize(XFS_I(dentry->d_inode), iattr, 0); } #define XFS_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR) /* * Call fiemap helper to fill in user data. * Returns positive errors to xfs_getbmap. */ STATIC int xfs_fiemap_format( void **arg, struct getbmapx *bmv, int *full) { int error; struct fiemap_extent_info *fieinfo = *arg; u32 fiemap_flags = 0; u64 logical, physical, length; /* Do nothing for a hole */ if (bmv->bmv_block == -1LL) return 0; logical = BBTOB(bmv->bmv_offset); physical = BBTOB(bmv->bmv_block); length = BBTOB(bmv->bmv_length); if (bmv->bmv_oflags & BMV_OF_PREALLOC) fiemap_flags |= FIEMAP_EXTENT_UNWRITTEN; else if (bmv->bmv_oflags & BMV_OF_DELALLOC) { fiemap_flags |= FIEMAP_EXTENT_DELALLOC; physical = 0; /* no block yet */ } if (bmv->bmv_oflags & BMV_OF_LAST) fiemap_flags |= FIEMAP_EXTENT_LAST; error = fiemap_fill_next_extent(fieinfo, logical, physical, length, fiemap_flags); if (error > 0) { error = 0; *full = 1; /* user array now full */ } return -error; } STATIC int xfs_vn_fiemap( struct inode *inode, struct fiemap_extent_info *fieinfo, u64 start, u64 length) { xfs_inode_t *ip = XFS_I(inode); struct getbmapx bm; int error; error = fiemap_check_flags(fieinfo, XFS_FIEMAP_FLAGS); if (error) return error; /* Set up bmap header for xfs internal routine */ bm.bmv_offset = BTOBB(start); /* Special case for whole file */ if (length == FIEMAP_MAX_OFFSET) bm.bmv_length = -1LL; else bm.bmv_length = BTOBB(length); /* We add one because in getbmap world count includes the header */ bm.bmv_count = !fieinfo->fi_extents_max ? MAXEXTNUM : fieinfo->fi_extents_max + 1; bm.bmv_count = min_t(__s32, bm.bmv_count, (PAGE_SIZE * 16 / sizeof(struct getbmapx))); bm.bmv_iflags = BMV_IF_PREALLOC | BMV_IF_NO_HOLES; if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) bm.bmv_iflags |= BMV_IF_ATTRFORK; if (!(fieinfo->fi_flags & FIEMAP_FLAG_SYNC)) bm.bmv_iflags |= BMV_IF_DELALLOC; error = xfs_getbmap(ip, &bm, xfs_fiemap_format, fieinfo); if (error) return -error; return 0; } static const struct inode_operations xfs_inode_operations = { .get_acl = xfs_get_acl, .getattr = xfs_vn_getattr, .setattr = xfs_vn_setattr, .setxattr = generic_setxattr, .getxattr = generic_getxattr, .removexattr = generic_removexattr, .listxattr = xfs_vn_listxattr, .fiemap = xfs_vn_fiemap, }; static const struct inode_operations xfs_dir_inode_operations = { .create = xfs_vn_create, .lookup = xfs_vn_lookup, .link = xfs_vn_link, .unlink = xfs_vn_unlink, .symlink = xfs_vn_symlink, .mkdir = xfs_vn_mkdir, /* * Yes, XFS uses the same method for rmdir and unlink. * * There are some subtile differences deeper in the code, * but we use S_ISDIR to check for those. */ .rmdir = xfs_vn_unlink, .mknod = xfs_vn_mknod, .rename = xfs_vn_rename, .get_acl = xfs_get_acl, .getattr = xfs_vn_getattr, .setattr = xfs_vn_setattr, .setxattr = generic_setxattr, .getxattr = generic_getxattr, .removexattr = generic_removexattr, .listxattr = xfs_vn_listxattr, }; static const struct inode_operations xfs_dir_ci_inode_operations = { .create = xfs_vn_create, .lookup = xfs_vn_ci_lookup, .link = xfs_vn_link, .unlink = xfs_vn_unlink, .symlink = xfs_vn_symlink, .mkdir = xfs_vn_mkdir, /* * Yes, XFS uses the same method for rmdir and unlink. * * There are some subtile differences deeper in the code, * but we use S_ISDIR to check for those. */ .rmdir = xfs_vn_unlink, .mknod = xfs_vn_mknod, .rename = xfs_vn_rename, .get_acl = xfs_get_acl, .getattr = xfs_vn_getattr, .setattr = xfs_vn_setattr, .setxattr = generic_setxattr, .getxattr = generic_getxattr, .removexattr = generic_removexattr, .listxattr = xfs_vn_listxattr, }; static const struct inode_operations xfs_symlink_inode_operations = { .readlink = generic_readlink, .follow_link = xfs_vn_follow_link, .put_link = xfs_vn_put_link, .get_acl = xfs_get_acl, .getattr = xfs_vn_getattr, .setattr = xfs_vn_setattr, .setxattr = generic_setxattr, .getxattr = generic_getxattr, .removexattr = generic_removexattr, .listxattr = xfs_vn_listxattr, }; STATIC void xfs_diflags_to_iflags( struct inode *inode, struct xfs_inode *ip) { if (ip->i_d.di_flags & XFS_DIFLAG_IMMUTABLE) inode->i_flags |= S_IMMUTABLE; else inode->i_flags &= ~S_IMMUTABLE; if (ip->i_d.di_flags & XFS_DIFLAG_APPEND) inode->i_flags |= S_APPEND; else inode->i_flags &= ~S_APPEND; if (ip->i_d.di_flags & XFS_DIFLAG_SYNC) inode->i_flags |= S_SYNC; else inode->i_flags &= ~S_SYNC; if (ip->i_d.di_flags & XFS_DIFLAG_NOATIME) inode->i_flags |= S_NOATIME; else inode->i_flags &= ~S_NOATIME; } /* * Initialize the Linux inode, set up the operation vectors and * unlock the inode. * * When reading existing inodes from disk this is called directly * from xfs_iget, when creating a new inode it is called from * xfs_ialloc after setting up the inode. * * We are always called with an uninitialised linux inode here. * We need to initialise the necessary fields and take a reference * on it. */ void xfs_setup_inode( struct xfs_inode *ip) { struct inode *inode = &ip->i_vnode; inode->i_ino = ip->i_ino; inode->i_state = I_NEW; inode_sb_list_add(inode); /* make the inode look hashed for the writeback code */ hlist_add_fake(&inode->i_hash); inode->i_mode = ip->i_d.di_mode; set_nlink(inode, ip->i_d.di_nlink); inode->i_uid = ip->i_d.di_uid; inode->i_gid = ip->i_d.di_gid; switch (inode->i_mode & S_IFMT) { case S_IFBLK: case S_IFCHR: inode->i_rdev = MKDEV(sysv_major(ip->i_df.if_u2.if_rdev) & 0x1ff, sysv_minor(ip->i_df.if_u2.if_rdev)); break; default: inode->i_rdev = 0; break; } inode->i_generation = ip->i_d.di_gen; i_size_write(inode, ip->i_d.di_size); inode->i_atime.tv_sec = ip->i_d.di_atime.t_sec; inode->i_atime.tv_nsec = ip->i_d.di_atime.t_nsec; inode->i_mtime.tv_sec = ip->i_d.di_mtime.t_sec; inode->i_mtime.tv_nsec = ip->i_d.di_mtime.t_nsec; inode->i_ctime.tv_sec = ip->i_d.di_ctime.t_sec; inode->i_ctime.tv_nsec = ip->i_d.di_ctime.t_nsec; xfs_diflags_to_iflags(inode, ip); switch (inode->i_mode & S_IFMT) { case S_IFREG: inode->i_op = &xfs_inode_operations; inode->i_fop = &xfs_file_operations; inode->i_mapping->a_ops = &xfs_address_space_operations; break; case S_IFDIR: if (xfs_sb_version_hasasciici(&XFS_M(inode->i_sb)->m_sb)) inode->i_op = &xfs_dir_ci_inode_operations; else inode->i_op = &xfs_dir_inode_operations; inode->i_fop = &xfs_dir_file_operations; break; case S_IFLNK: inode->i_op = &xfs_symlink_inode_operations; if (!(ip->i_df.if_flags & XFS_IFINLINE)) inode->i_mapping->a_ops = &xfs_address_space_operations; break; default: inode->i_op = &xfs_inode_operations; init_special_inode(inode, inode->i_mode, inode->i_rdev); break; } /* * If there is no attribute fork no ACL can exist on this inode, * and it can't have any file capabilities attached to it either. */ if (!XFS_IFORK_Q(ip)) { inode_has_no_xattr(inode); cache_no_acl(inode); } xfs_iflags_clear(ip, XFS_INEW); barrier(); unlock_new_inode(inode); }
gpl-2.0
Kevindeving/android_kernel_lge_gee
drivers/mtd/ubi/kapi.c
4876
23221
/* * Copyright (c) International Business Machines Corp., 2006 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Author: Artem Bityutskiy (Битюцкий Артём) */ /* This file mostly implements UBI kernel API functions */ #include <linux/module.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/namei.h> #include <linux/fs.h> #include <asm/div64.h> #include "ubi.h" /** * ubi_do_get_device_info - get information about UBI device. * @ubi: UBI device description object * @di: the information is stored here * * This function is the same as 'ubi_get_device_info()', but it assumes the UBI * device is locked and cannot disappear. */ void ubi_do_get_device_info(struct ubi_device *ubi, struct ubi_device_info *di) { di->ubi_num = ubi->ubi_num; di->leb_size = ubi->leb_size; di->leb_start = ubi->leb_start; di->min_io_size = ubi->min_io_size; di->max_write_size = ubi->max_write_size; di->ro_mode = ubi->ro_mode; di->cdev = ubi->cdev.dev; } EXPORT_SYMBOL_GPL(ubi_do_get_device_info); /** * ubi_get_device_info - get information about UBI device. * @ubi_num: UBI device number * @di: the information is stored here * * This function returns %0 in case of success, %-EINVAL if the UBI device * number is invalid, and %-ENODEV if there is no such UBI device. */ int ubi_get_device_info(int ubi_num, struct ubi_device_info *di) { struct ubi_device *ubi; if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES) return -EINVAL; ubi = ubi_get_device(ubi_num); if (!ubi) return -ENODEV; ubi_do_get_device_info(ubi, di); ubi_put_device(ubi); return 0; } EXPORT_SYMBOL_GPL(ubi_get_device_info); /** * ubi_do_get_volume_info - get information about UBI volume. * @ubi: UBI device description object * @vol: volume description object * @vi: the information is stored here */ void ubi_do_get_volume_info(struct ubi_device *ubi, struct ubi_volume *vol, struct ubi_volume_info *vi) { vi->vol_id = vol->vol_id; vi->ubi_num = ubi->ubi_num; vi->size = vol->reserved_pebs; vi->used_bytes = vol->used_bytes; vi->vol_type = vol->vol_type; vi->corrupted = vol->corrupted; vi->upd_marker = vol->upd_marker; vi->alignment = vol->alignment; vi->usable_leb_size = vol->usable_leb_size; vi->name_len = vol->name_len; vi->name = vol->name; vi->cdev = vol->cdev.dev; } /** * ubi_get_volume_info - get information about UBI volume. * @desc: volume descriptor * @vi: the information is stored here */ void ubi_get_volume_info(struct ubi_volume_desc *desc, struct ubi_volume_info *vi) { ubi_do_get_volume_info(desc->vol->ubi, desc->vol, vi); } EXPORT_SYMBOL_GPL(ubi_get_volume_info); /** * ubi_open_volume - open UBI volume. * @ubi_num: UBI device number * @vol_id: volume ID * @mode: open mode * * The @mode parameter specifies if the volume should be opened in read-only * mode, read-write mode, or exclusive mode. The exclusive mode guarantees that * nobody else will be able to open this volume. UBI allows to have many volume * readers and one writer at a time. * * If a static volume is being opened for the first time since boot, it will be * checked by this function, which means it will be fully read and the CRC * checksum of each logical eraseblock will be checked. * * This function returns volume descriptor in case of success and a negative * error code in case of failure. */ struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode) { int err; struct ubi_volume_desc *desc; struct ubi_device *ubi; struct ubi_volume *vol; dbg_gen("open device %d, volume %d, mode %d", ubi_num, vol_id, mode); if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES) return ERR_PTR(-EINVAL); if (mode != UBI_READONLY && mode != UBI_READWRITE && mode != UBI_EXCLUSIVE) return ERR_PTR(-EINVAL); /* * First of all, we have to get the UBI device to prevent its removal. */ ubi = ubi_get_device(ubi_num); if (!ubi) return ERR_PTR(-ENODEV); if (vol_id < 0 || vol_id >= ubi->vtbl_slots) { err = -EINVAL; goto out_put_ubi; } desc = kmalloc(sizeof(struct ubi_volume_desc), GFP_KERNEL); if (!desc) { err = -ENOMEM; goto out_put_ubi; } err = -ENODEV; if (!try_module_get(THIS_MODULE)) goto out_free; spin_lock(&ubi->volumes_lock); vol = ubi->volumes[vol_id]; if (!vol) goto out_unlock; err = -EBUSY; switch (mode) { case UBI_READONLY: if (vol->exclusive) goto out_unlock; vol->readers += 1; break; case UBI_READWRITE: if (vol->exclusive || vol->writers > 0) goto out_unlock; vol->writers += 1; break; case UBI_EXCLUSIVE: if (vol->exclusive || vol->writers || vol->readers) goto out_unlock; vol->exclusive = 1; break; } get_device(&vol->dev); vol->ref_count += 1; spin_unlock(&ubi->volumes_lock); desc->vol = vol; desc->mode = mode; mutex_lock(&ubi->ckvol_mutex); if (!vol->checked) { /* This is the first open - check the volume */ err = ubi_check_volume(ubi, vol_id); if (err < 0) { mutex_unlock(&ubi->ckvol_mutex); ubi_close_volume(desc); return ERR_PTR(err); } if (err == 1) { ubi_warn("volume %d on UBI device %d is corrupted", vol_id, ubi->ubi_num); vol->corrupted = 1; } vol->checked = 1; } mutex_unlock(&ubi->ckvol_mutex); return desc; out_unlock: spin_unlock(&ubi->volumes_lock); module_put(THIS_MODULE); out_free: kfree(desc); out_put_ubi: ubi_put_device(ubi); dbg_err("cannot open device %d, volume %d, error %d", ubi_num, vol_id, err); return ERR_PTR(err); } EXPORT_SYMBOL_GPL(ubi_open_volume); /** * ubi_open_volume_nm - open UBI volume by name. * @ubi_num: UBI device number * @name: volume name * @mode: open mode * * This function is similar to 'ubi_open_volume()', but opens a volume by name. */ struct ubi_volume_desc *ubi_open_volume_nm(int ubi_num, const char *name, int mode) { int i, vol_id = -1, len; struct ubi_device *ubi; struct ubi_volume_desc *ret; dbg_gen("open device %d, volume %s, mode %d", ubi_num, name, mode); if (!name) return ERR_PTR(-EINVAL); len = strnlen(name, UBI_VOL_NAME_MAX + 1); if (len > UBI_VOL_NAME_MAX) return ERR_PTR(-EINVAL); if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES) return ERR_PTR(-EINVAL); ubi = ubi_get_device(ubi_num); if (!ubi) return ERR_PTR(-ENODEV); spin_lock(&ubi->volumes_lock); /* Walk all volumes of this UBI device */ for (i = 0; i < ubi->vtbl_slots; i++) { struct ubi_volume *vol = ubi->volumes[i]; if (vol && len == vol->name_len && !strcmp(name, vol->name)) { vol_id = i; break; } } spin_unlock(&ubi->volumes_lock); if (vol_id >= 0) ret = ubi_open_volume(ubi_num, vol_id, mode); else ret = ERR_PTR(-ENODEV); /* * We should put the UBI device even in case of success, because * 'ubi_open_volume()' took a reference as well. */ ubi_put_device(ubi); return ret; } EXPORT_SYMBOL_GPL(ubi_open_volume_nm); /** * ubi_open_volume_path - open UBI volume by its character device node path. * @pathname: volume character device node path * @mode: open mode * * This function is similar to 'ubi_open_volume()', but opens a volume the path * to its character device node. */ struct ubi_volume_desc *ubi_open_volume_path(const char *pathname, int mode) { int error, ubi_num, vol_id, mod; struct inode *inode; struct path path; dbg_gen("open volume %s, mode %d", pathname, mode); if (!pathname || !*pathname) return ERR_PTR(-EINVAL); error = kern_path(pathname, LOOKUP_FOLLOW, &path); if (error) return ERR_PTR(error); inode = path.dentry->d_inode; mod = inode->i_mode; ubi_num = ubi_major2num(imajor(inode)); vol_id = iminor(inode) - 1; path_put(&path); if (!S_ISCHR(mod)) return ERR_PTR(-EINVAL); if (vol_id >= 0 && ubi_num >= 0) return ubi_open_volume(ubi_num, vol_id, mode); return ERR_PTR(-ENODEV); } EXPORT_SYMBOL_GPL(ubi_open_volume_path); /** * ubi_close_volume - close UBI volume. * @desc: volume descriptor */ void ubi_close_volume(struct ubi_volume_desc *desc) { struct ubi_volume *vol = desc->vol; struct ubi_device *ubi = vol->ubi; dbg_gen("close device %d, volume %d, mode %d", ubi->ubi_num, vol->vol_id, desc->mode); spin_lock(&ubi->volumes_lock); switch (desc->mode) { case UBI_READONLY: vol->readers -= 1; break; case UBI_READWRITE: vol->writers -= 1; break; case UBI_EXCLUSIVE: vol->exclusive = 0; } vol->ref_count -= 1; spin_unlock(&ubi->volumes_lock); kfree(desc); put_device(&vol->dev); ubi_put_device(ubi); module_put(THIS_MODULE); } EXPORT_SYMBOL_GPL(ubi_close_volume); /** * ubi_leb_read - read data. * @desc: volume descriptor * @lnum: logical eraseblock number to read from * @buf: buffer where to store the read data * @offset: offset within the logical eraseblock to read from * @len: how many bytes to read * @check: whether UBI has to check the read data's CRC or not. * * This function reads data from offset @offset of logical eraseblock @lnum and * stores the data at @buf. When reading from static volumes, @check specifies * whether the data has to be checked or not. If yes, the whole logical * eraseblock will be read and its CRC checksum will be checked (i.e., the CRC * checksum is per-eraseblock). So checking may substantially slow down the * read speed. The @check argument is ignored for dynamic volumes. * * In case of success, this function returns zero. In case of failure, this * function returns a negative error code. * * %-EBADMSG error code is returned: * o for both static and dynamic volumes if MTD driver has detected a data * integrity problem (unrecoverable ECC checksum mismatch in case of NAND); * o for static volumes in case of data CRC mismatch. * * If the volume is damaged because of an interrupted update this function just * returns immediately with %-EBADF error code. */ int ubi_leb_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset, int len, int check) { struct ubi_volume *vol = desc->vol; struct ubi_device *ubi = vol->ubi; int err, vol_id = vol->vol_id; dbg_gen("read %d bytes from LEB %d:%d:%d", len, vol_id, lnum, offset); if (vol_id < 0 || vol_id >= ubi->vtbl_slots || lnum < 0 || lnum >= vol->used_ebs || offset < 0 || len < 0 || offset + len > vol->usable_leb_size) return -EINVAL; if (vol->vol_type == UBI_STATIC_VOLUME) { if (vol->used_ebs == 0) /* Empty static UBI volume */ return 0; if (lnum == vol->used_ebs - 1 && offset + len > vol->last_eb_bytes) return -EINVAL; } if (vol->upd_marker) return -EBADF; if (len == 0) return 0; err = ubi_eba_read_leb(ubi, vol, lnum, buf, offset, len, check); if (err && mtd_is_eccerr(err) && vol->vol_type == UBI_STATIC_VOLUME) { ubi_warn("mark volume %d as corrupted", vol_id); vol->corrupted = 1; } return err; } EXPORT_SYMBOL_GPL(ubi_leb_read); /** * ubi_leb_write - write data. * @desc: volume descriptor * @lnum: logical eraseblock number to write to * @buf: data to write * @offset: offset within the logical eraseblock where to write * @len: how many bytes to write * @dtype: expected data type * * This function writes @len bytes of data from @buf to offset @offset of * logical eraseblock @lnum. The @dtype argument describes expected lifetime of * the data. * * This function takes care of physical eraseblock write failures. If write to * the physical eraseblock write operation fails, the logical eraseblock is * re-mapped to another physical eraseblock, the data is recovered, and the * write finishes. UBI has a pool of reserved physical eraseblocks for this. * * If all the data were successfully written, zero is returned. If an error * occurred and UBI has not been able to recover from it, this function returns * a negative error code. Note, in case of an error, it is possible that * something was still written to the flash media, but that may be some * garbage. * * If the volume is damaged because of an interrupted update this function just * returns immediately with %-EBADF code. */ int ubi_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf, int offset, int len, int dtype) { struct ubi_volume *vol = desc->vol; struct ubi_device *ubi = vol->ubi; int vol_id = vol->vol_id; dbg_gen("write %d bytes to LEB %d:%d:%d", len, vol_id, lnum, offset); if (vol_id < 0 || vol_id >= ubi->vtbl_slots) return -EINVAL; if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME) return -EROFS; if (lnum < 0 || lnum >= vol->reserved_pebs || offset < 0 || len < 0 || offset + len > vol->usable_leb_size || offset & (ubi->min_io_size - 1) || len & (ubi->min_io_size - 1)) return -EINVAL; if (dtype != UBI_LONGTERM && dtype != UBI_SHORTTERM && dtype != UBI_UNKNOWN) return -EINVAL; if (vol->upd_marker) return -EBADF; if (len == 0) return 0; return ubi_eba_write_leb(ubi, vol, lnum, buf, offset, len, dtype); } EXPORT_SYMBOL_GPL(ubi_leb_write); /* * ubi_leb_change - change logical eraseblock atomically. * @desc: volume descriptor * @lnum: logical eraseblock number to change * @buf: data to write * @len: how many bytes to write * @dtype: expected data type * * This function changes the contents of a logical eraseblock atomically. @buf * has to contain new logical eraseblock data, and @len - the length of the * data, which has to be aligned. The length may be shorter than the logical * eraseblock size, ant the logical eraseblock may be appended to more times * later on. This function guarantees that in case of an unclean reboot the old * contents is preserved. Returns zero in case of success and a negative error * code in case of failure. */ int ubi_leb_change(struct ubi_volume_desc *desc, int lnum, const void *buf, int len, int dtype) { struct ubi_volume *vol = desc->vol; struct ubi_device *ubi = vol->ubi; int vol_id = vol->vol_id; dbg_gen("atomically write %d bytes to LEB %d:%d", len, vol_id, lnum); if (vol_id < 0 || vol_id >= ubi->vtbl_slots) return -EINVAL; if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME) return -EROFS; if (lnum < 0 || lnum >= vol->reserved_pebs || len < 0 || len > vol->usable_leb_size || len & (ubi->min_io_size - 1)) return -EINVAL; if (dtype != UBI_LONGTERM && dtype != UBI_SHORTTERM && dtype != UBI_UNKNOWN) return -EINVAL; if (vol->upd_marker) return -EBADF; if (len == 0) return 0; return ubi_eba_atomic_leb_change(ubi, vol, lnum, buf, len, dtype); } EXPORT_SYMBOL_GPL(ubi_leb_change); /** * ubi_leb_erase - erase logical eraseblock. * @desc: volume descriptor * @lnum: logical eraseblock number * * This function un-maps logical eraseblock @lnum and synchronously erases the * correspondent physical eraseblock. Returns zero in case of success and a * negative error code in case of failure. * * If the volume is damaged because of an interrupted update this function just * returns immediately with %-EBADF code. */ int ubi_leb_erase(struct ubi_volume_desc *desc, int lnum) { struct ubi_volume *vol = desc->vol; struct ubi_device *ubi = vol->ubi; int err; dbg_gen("erase LEB %d:%d", vol->vol_id, lnum); if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME) return -EROFS; if (lnum < 0 || lnum >= vol->reserved_pebs) return -EINVAL; if (vol->upd_marker) return -EBADF; err = ubi_eba_unmap_leb(ubi, vol, lnum); if (err) return err; return ubi_wl_flush(ubi); } EXPORT_SYMBOL_GPL(ubi_leb_erase); /** * ubi_leb_unmap - un-map logical eraseblock. * @desc: volume descriptor * @lnum: logical eraseblock number * * This function un-maps logical eraseblock @lnum and schedules the * corresponding physical eraseblock for erasure, so that it will eventually be * physically erased in background. This operation is much faster than the * erase operation. * * Unlike erase, the un-map operation does not guarantee that the logical * eraseblock will contain all 0xFF bytes when UBI is initialized again. For * example, if several logical eraseblocks are un-mapped, and an unclean reboot * happens after this, the logical eraseblocks will not necessarily be * un-mapped again when this MTD device is attached. They may actually be * mapped to the same physical eraseblocks again. So, this function has to be * used with care. * * In other words, when un-mapping a logical eraseblock, UBI does not store * any information about this on the flash media, it just marks the logical * eraseblock as "un-mapped" in RAM. If UBI is detached before the physical * eraseblock is physically erased, it will be mapped again to the same logical * eraseblock when the MTD device is attached again. * * The main and obvious use-case of this function is when the contents of a * logical eraseblock has to be re-written. Then it is much more efficient to * first un-map it, then write new data, rather than first erase it, then write * new data. Note, once new data has been written to the logical eraseblock, * UBI guarantees that the old contents has gone forever. In other words, if an * unclean reboot happens after the logical eraseblock has been un-mapped and * then written to, it will contain the last written data. * * This function returns zero in case of success and a negative error code in * case of failure. If the volume is damaged because of an interrupted update * this function just returns immediately with %-EBADF code. */ int ubi_leb_unmap(struct ubi_volume_desc *desc, int lnum) { struct ubi_volume *vol = desc->vol; struct ubi_device *ubi = vol->ubi; dbg_gen("unmap LEB %d:%d", vol->vol_id, lnum); if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME) return -EROFS; if (lnum < 0 || lnum >= vol->reserved_pebs) return -EINVAL; if (vol->upd_marker) return -EBADF; return ubi_eba_unmap_leb(ubi, vol, lnum); } EXPORT_SYMBOL_GPL(ubi_leb_unmap); /** * ubi_leb_map - map logical eraseblock to a physical eraseblock. * @desc: volume descriptor * @lnum: logical eraseblock number * @dtype: expected data type * * This function maps an un-mapped logical eraseblock @lnum to a physical * eraseblock. This means, that after a successful invocation of this * function the logical eraseblock @lnum will be empty (contain only %0xFF * bytes) and be mapped to a physical eraseblock, even if an unclean reboot * happens. * * This function returns zero in case of success, %-EBADF if the volume is * damaged because of an interrupted update, %-EBADMSG if the logical * eraseblock is already mapped, and other negative error codes in case of * other failures. */ int ubi_leb_map(struct ubi_volume_desc *desc, int lnum, int dtype) { struct ubi_volume *vol = desc->vol; struct ubi_device *ubi = vol->ubi; dbg_gen("unmap LEB %d:%d", vol->vol_id, lnum); if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME) return -EROFS; if (lnum < 0 || lnum >= vol->reserved_pebs) return -EINVAL; if (dtype != UBI_LONGTERM && dtype != UBI_SHORTTERM && dtype != UBI_UNKNOWN) return -EINVAL; if (vol->upd_marker) return -EBADF; if (vol->eba_tbl[lnum] >= 0) return -EBADMSG; return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0, dtype); } EXPORT_SYMBOL_GPL(ubi_leb_map); /** * ubi_is_mapped - check if logical eraseblock is mapped. * @desc: volume descriptor * @lnum: logical eraseblock number * * This function checks if logical eraseblock @lnum is mapped to a physical * eraseblock. If a logical eraseblock is un-mapped, this does not necessarily * mean it will still be un-mapped after the UBI device is re-attached. The * logical eraseblock may become mapped to the physical eraseblock it was last * mapped to. * * This function returns %1 if the LEB is mapped, %0 if not, and a negative * error code in case of failure. If the volume is damaged because of an * interrupted update this function just returns immediately with %-EBADF error * code. */ int ubi_is_mapped(struct ubi_volume_desc *desc, int lnum) { struct ubi_volume *vol = desc->vol; dbg_gen("test LEB %d:%d", vol->vol_id, lnum); if (lnum < 0 || lnum >= vol->reserved_pebs) return -EINVAL; if (vol->upd_marker) return -EBADF; return vol->eba_tbl[lnum] >= 0; } EXPORT_SYMBOL_GPL(ubi_is_mapped); /** * ubi_sync - synchronize UBI device buffers. * @ubi_num: UBI device to synchronize * * The underlying MTD device may cache data in hardware or in software. This * function ensures the caches are flushed. Returns zero in case of success and * a negative error code in case of failure. */ int ubi_sync(int ubi_num) { struct ubi_device *ubi; ubi = ubi_get_device(ubi_num); if (!ubi) return -ENODEV; mtd_sync(ubi->mtd); ubi_put_device(ubi); return 0; } EXPORT_SYMBOL_GPL(ubi_sync); BLOCKING_NOTIFIER_HEAD(ubi_notifiers); /** * ubi_register_volume_notifier - register a volume notifier. * @nb: the notifier description object * @ignore_existing: if non-zero, do not send "added" notification for all * already existing volumes * * This function registers a volume notifier, which means that * 'nb->notifier_call()' will be invoked when an UBI volume is created, * removed, re-sized, re-named, or updated. The first argument of the function * is the notification type. The second argument is pointer to a * &struct ubi_notification object which describes the notification event. * Using UBI API from the volume notifier is prohibited. * * This function returns zero in case of success and a negative error code * in case of failure. */ int ubi_register_volume_notifier(struct notifier_block *nb, int ignore_existing) { int err; err = blocking_notifier_chain_register(&ubi_notifiers, nb); if (err != 0) return err; if (ignore_existing) return 0; /* * We are going to walk all UBI devices and all volumes, and * notify the user about existing volumes by the %UBI_VOLUME_ADDED * event. We have to lock the @ubi_devices_mutex to make sure UBI * devices do not disappear. */ mutex_lock(&ubi_devices_mutex); ubi_enumerate_volumes(nb); mutex_unlock(&ubi_devices_mutex); return err; } EXPORT_SYMBOL_GPL(ubi_register_volume_notifier); /** * ubi_unregister_volume_notifier - unregister the volume notifier. * @nb: the notifier description object * * This function unregisters volume notifier @nm and returns zero in case of * success and a negative error code in case of failure. */ int ubi_unregister_volume_notifier(struct notifier_block *nb) { return blocking_notifier_chain_unregister(&ubi_notifiers, nb); } EXPORT_SYMBOL_GPL(ubi_unregister_volume_notifier);
gpl-2.0
dianlujitao/android_kernel_huawei_msm8610
arch/powerpc/kvm/book3s_exports.c
4876
1116
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, version 2, as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * Copyright SUSE Linux Products GmbH 2009 * * Authors: Alexander Graf <agraf@suse.de> */ #include <linux/export.h> #include <asm/kvm_book3s.h> #ifdef CONFIG_KVM_BOOK3S_64_HV EXPORT_SYMBOL_GPL(kvmppc_hv_entry_trampoline); #else EXPORT_SYMBOL_GPL(kvmppc_entry_trampoline); EXPORT_SYMBOL_GPL(kvmppc_load_up_fpu); #ifdef CONFIG_ALTIVEC EXPORT_SYMBOL_GPL(kvmppc_load_up_altivec); #endif #ifdef CONFIG_VSX EXPORT_SYMBOL_GPL(kvmppc_load_up_vsx); #endif #endif
gpl-2.0
kamarush/android_kernel_lge_hammerhead
arch/sparc/kernel/chmc.c
7436
20646
/* chmc.c: Driver for UltraSPARC-III memory controller. * * Copyright (C) 2001, 2007, 2008 David S. Miller (davem@davemloft.net) */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/list.h> #include <linux/string.h> #include <linux/sched.h> #include <linux/smp.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/of.h> #include <linux/of_device.h> #include <asm/spitfire.h> #include <asm/chmctrl.h> #include <asm/cpudata.h> #include <asm/oplib.h> #include <asm/prom.h> #include <asm/head.h> #include <asm/io.h> #include <asm/memctrl.h> #define DRV_MODULE_NAME "chmc" #define PFX DRV_MODULE_NAME ": " #define DRV_MODULE_VERSION "0.2" MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); MODULE_DESCRIPTION("UltraSPARC-III memory controller driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_MODULE_VERSION); static int mc_type; #define MC_TYPE_SAFARI 1 #define MC_TYPE_JBUS 2 static dimm_printer_t us3mc_dimm_printer; #define CHMCTRL_NDGRPS 2 #define CHMCTRL_NDIMMS 4 #define CHMC_DIMMS_PER_MC (CHMCTRL_NDGRPS * CHMCTRL_NDIMMS) /* OBP memory-layout property format. */ struct chmc_obp_map { unsigned char dimm_map[144]; unsigned char pin_map[576]; }; #define DIMM_LABEL_SZ 8 struct chmc_obp_mem_layout { /* One max 8-byte string label per DIMM. Usually * this matches the label on the motherboard where * that DIMM resides. */ char dimm_labels[CHMC_DIMMS_PER_MC][DIMM_LABEL_SZ]; /* If symmetric use map[0], else it is * asymmetric and map[1] should be used. */ char symmetric; struct chmc_obp_map map[2]; }; #define CHMCTRL_NBANKS 4 struct chmc_bank_info { struct chmc *p; int bank_id; u64 raw_reg; int valid; int uk; int um; int lk; int lm; int interleave; unsigned long base; unsigned long size; }; struct chmc { struct list_head list; int portid; struct chmc_obp_mem_layout layout_prop; int layout_size; void __iomem *regs; u64 timing_control1; u64 timing_control2; u64 timing_control3; u64 timing_control4; u64 memaddr_control; struct chmc_bank_info logical_banks[CHMCTRL_NBANKS]; }; #define JBUSMC_REGS_SIZE 8 #define JB_MC_REG1_DIMM2_BANK3 0x8000000000000000UL #define JB_MC_REG1_DIMM1_BANK1 0x4000000000000000UL #define JB_MC_REG1_DIMM2_BANK2 0x2000000000000000UL #define JB_MC_REG1_DIMM1_BANK0 0x1000000000000000UL #define JB_MC_REG1_XOR 0x0000010000000000UL #define JB_MC_REG1_ADDR_GEN_2 0x000000e000000000UL #define JB_MC_REG1_ADDR_GEN_2_SHIFT 37 #define JB_MC_REG1_ADDR_GEN_1 0x0000001c00000000UL #define JB_MC_REG1_ADDR_GEN_1_SHIFT 34 #define JB_MC_REG1_INTERLEAVE 0x0000000001800000UL #define JB_MC_REG1_INTERLEAVE_SHIFT 23 #define JB_MC_REG1_DIMM2_PTYPE 0x0000000000200000UL #define JB_MC_REG1_DIMM2_PTYPE_SHIFT 21 #define JB_MC_REG1_DIMM1_PTYPE 0x0000000000100000UL #define JB_MC_REG1_DIMM1_PTYPE_SHIFT 20 #define PART_TYPE_X8 0 #define PART_TYPE_X4 1 #define INTERLEAVE_NONE 0 #define INTERLEAVE_SAME 1 #define INTERLEAVE_INTERNAL 2 #define INTERLEAVE_BOTH 3 #define ADDR_GEN_128MB 0 #define ADDR_GEN_256MB 1 #define ADDR_GEN_512MB 2 #define ADDR_GEN_1GB 3 #define JB_NUM_DIMM_GROUPS 2 #define JB_NUM_DIMMS_PER_GROUP 2 #define JB_NUM_DIMMS (JB_NUM_DIMM_GROUPS * JB_NUM_DIMMS_PER_GROUP) struct jbusmc_obp_map { unsigned char dimm_map[18]; unsigned char pin_map[144]; }; struct jbusmc_obp_mem_layout { /* One max 8-byte string label per DIMM. Usually * this matches the label on the motherboard where * that DIMM resides. */ char dimm_labels[JB_NUM_DIMMS][DIMM_LABEL_SZ]; /* If symmetric use map[0], else it is * asymmetric and map[1] should be used. */ char symmetric; struct jbusmc_obp_map map; char _pad; }; struct jbusmc_dimm_group { struct jbusmc *controller; int index; u64 base_addr; u64 size; }; struct jbusmc { void __iomem *regs; u64 mc_reg_1; u32 portid; struct jbusmc_obp_mem_layout layout; int layout_len; int num_dimm_groups; struct jbusmc_dimm_group dimm_groups[JB_NUM_DIMM_GROUPS]; struct list_head list; }; static DEFINE_SPINLOCK(mctrl_list_lock); static LIST_HEAD(mctrl_list); static void mc_list_add(struct list_head *list) { spin_lock(&mctrl_list_lock); list_add(list, &mctrl_list); spin_unlock(&mctrl_list_lock); } static void mc_list_del(struct list_head *list) { spin_lock(&mctrl_list_lock); list_del_init(list); spin_unlock(&mctrl_list_lock); } #define SYNDROME_MIN -1 #define SYNDROME_MAX 144 /* Covert syndrome code into the way the bits are positioned * on the bus. */ static int syndrome_to_qword_code(int syndrome_code) { if (syndrome_code < 128) syndrome_code += 16; else if (syndrome_code < 128 + 9) syndrome_code -= (128 - 7); else if (syndrome_code < (128 + 9 + 3)) syndrome_code -= (128 + 9 - 4); else syndrome_code -= (128 + 9 + 3); return syndrome_code; } /* All this magic has to do with how a cache line comes over the wire * on Safari and JBUS. A 64-bit line comes over in 1 or more quadword * cycles, each of which transmit ECC/MTAG info as well as the actual * data. */ #define L2_LINE_SIZE 64 #define L2_LINE_ADDR_MSK (L2_LINE_SIZE - 1) #define QW_PER_LINE 4 #define QW_BYTES (L2_LINE_SIZE / QW_PER_LINE) #define QW_BITS 144 #define SAFARI_LAST_BIT (576 - 1) #define JBUS_LAST_BIT (144 - 1) static void get_pin_and_dimm_str(int syndrome_code, unsigned long paddr, int *pin_p, char **dimm_str_p, void *_prop, int base_dimm_offset) { int qword_code = syndrome_to_qword_code(syndrome_code); int cache_line_offset; int offset_inverse; int dimm_map_index; int map_val; if (mc_type == MC_TYPE_JBUS) { struct jbusmc_obp_mem_layout *p = _prop; /* JBUS */ cache_line_offset = qword_code; offset_inverse = (JBUS_LAST_BIT - cache_line_offset); dimm_map_index = offset_inverse / 8; map_val = p->map.dimm_map[dimm_map_index]; map_val = ((map_val >> ((7 - (offset_inverse & 7)))) & 1); *dimm_str_p = p->dimm_labels[base_dimm_offset + map_val]; *pin_p = p->map.pin_map[cache_line_offset]; } else { struct chmc_obp_mem_layout *p = _prop; struct chmc_obp_map *mp; int qword; /* Safari */ if (p->symmetric) mp = &p->map[0]; else mp = &p->map[1]; qword = (paddr & L2_LINE_ADDR_MSK) / QW_BYTES; cache_line_offset = ((3 - qword) * QW_BITS) + qword_code; offset_inverse = (SAFARI_LAST_BIT - cache_line_offset); dimm_map_index = offset_inverse >> 2; map_val = mp->dimm_map[dimm_map_index]; map_val = ((map_val >> ((3 - (offset_inverse & 3)) << 1)) & 0x3); *dimm_str_p = p->dimm_labels[base_dimm_offset + map_val]; *pin_p = mp->pin_map[cache_line_offset]; } } static struct jbusmc_dimm_group *jbusmc_find_dimm_group(unsigned long phys_addr) { struct jbusmc *p; list_for_each_entry(p, &mctrl_list, list) { int i; for (i = 0; i < p->num_dimm_groups; i++) { struct jbusmc_dimm_group *dp = &p->dimm_groups[i]; if (phys_addr < dp->base_addr || (dp->base_addr + dp->size) <= phys_addr) continue; return dp; } } return NULL; } static int jbusmc_print_dimm(int syndrome_code, unsigned long phys_addr, char *buf, int buflen) { struct jbusmc_obp_mem_layout *prop; struct jbusmc_dimm_group *dp; struct jbusmc *p; int first_dimm; dp = jbusmc_find_dimm_group(phys_addr); if (dp == NULL || syndrome_code < SYNDROME_MIN || syndrome_code > SYNDROME_MAX) { buf[0] = '?'; buf[1] = '?'; buf[2] = '?'; buf[3] = '\0'; return 0; } p = dp->controller; prop = &p->layout; first_dimm = dp->index * JB_NUM_DIMMS_PER_GROUP; if (syndrome_code != SYNDROME_MIN) { char *dimm_str; int pin; get_pin_and_dimm_str(syndrome_code, phys_addr, &pin, &dimm_str, prop, first_dimm); sprintf(buf, "%s, pin %3d", dimm_str, pin); } else { int dimm; /* Multi-bit error, we just dump out all the * dimm labels associated with this dimm group. */ for (dimm = 0; dimm < JB_NUM_DIMMS_PER_GROUP; dimm++) { sprintf(buf, "%s ", prop->dimm_labels[first_dimm + dimm]); buf += strlen(buf); } } return 0; } static u64 __devinit jbusmc_dimm_group_size(u64 base, const struct linux_prom64_registers *mem_regs, int num_mem_regs) { u64 max = base + (8UL * 1024 * 1024 * 1024); u64 max_seen = base; int i; for (i = 0; i < num_mem_regs; i++) { const struct linux_prom64_registers *ent; u64 this_base; u64 this_end; ent = &mem_regs[i]; this_base = ent->phys_addr; this_end = this_base + ent->reg_size; if (base < this_base || base >= this_end) continue; if (this_end > max) this_end = max; if (this_end > max_seen) max_seen = this_end; } return max_seen - base; } static void __devinit jbusmc_construct_one_dimm_group(struct jbusmc *p, unsigned long index, const struct linux_prom64_registers *mem_regs, int num_mem_regs) { struct jbusmc_dimm_group *dp = &p->dimm_groups[index]; dp->controller = p; dp->index = index; dp->base_addr = (p->portid * (64UL * 1024 * 1024 * 1024)); dp->base_addr += (index * (8UL * 1024 * 1024 * 1024)); dp->size = jbusmc_dimm_group_size(dp->base_addr, mem_regs, num_mem_regs); } static void __devinit jbusmc_construct_dimm_groups(struct jbusmc *p, const struct linux_prom64_registers *mem_regs, int num_mem_regs) { if (p->mc_reg_1 & JB_MC_REG1_DIMM1_BANK0) { jbusmc_construct_one_dimm_group(p, 0, mem_regs, num_mem_regs); p->num_dimm_groups++; } if (p->mc_reg_1 & JB_MC_REG1_DIMM2_BANK2) { jbusmc_construct_one_dimm_group(p, 1, mem_regs, num_mem_regs); p->num_dimm_groups++; } } static int __devinit jbusmc_probe(struct platform_device *op) { const struct linux_prom64_registers *mem_regs; struct device_node *mem_node; int err, len, num_mem_regs; struct jbusmc *p; const u32 *prop; const void *ml; err = -ENODEV; mem_node = of_find_node_by_path("/memory"); if (!mem_node) { printk(KERN_ERR PFX "Cannot find /memory node.\n"); goto out; } mem_regs = of_get_property(mem_node, "reg", &len); if (!mem_regs) { printk(KERN_ERR PFX "Cannot get reg property of /memory node.\n"); goto out; } num_mem_regs = len / sizeof(*mem_regs); err = -ENOMEM; p = kzalloc(sizeof(*p), GFP_KERNEL); if (!p) { printk(KERN_ERR PFX "Cannot allocate struct jbusmc.\n"); goto out; } INIT_LIST_HEAD(&p->list); err = -ENODEV; prop = of_get_property(op->dev.of_node, "portid", &len); if (!prop || len != 4) { printk(KERN_ERR PFX "Cannot find portid.\n"); goto out_free; } p->portid = *prop; prop = of_get_property(op->dev.of_node, "memory-control-register-1", &len); if (!prop || len != 8) { printk(KERN_ERR PFX "Cannot get memory control register 1.\n"); goto out_free; } p->mc_reg_1 = ((u64)prop[0] << 32) | (u64) prop[1]; err = -ENOMEM; p->regs = of_ioremap(&op->resource[0], 0, JBUSMC_REGS_SIZE, "jbusmc"); if (!p->regs) { printk(KERN_ERR PFX "Cannot map jbusmc regs.\n"); goto out_free; } err = -ENODEV; ml = of_get_property(op->dev.of_node, "memory-layout", &p->layout_len); if (!ml) { printk(KERN_ERR PFX "Cannot get memory layout property.\n"); goto out_iounmap; } if (p->layout_len > sizeof(p->layout)) { printk(KERN_ERR PFX "Unexpected memory-layout size %d\n", p->layout_len); goto out_iounmap; } memcpy(&p->layout, ml, p->layout_len); jbusmc_construct_dimm_groups(p, mem_regs, num_mem_regs); mc_list_add(&p->list); printk(KERN_INFO PFX "UltraSPARC-IIIi memory controller at %s\n", op->dev.of_node->full_name); dev_set_drvdata(&op->dev, p); err = 0; out: return err; out_iounmap: of_iounmap(&op->resource[0], p->regs, JBUSMC_REGS_SIZE); out_free: kfree(p); goto out; } /* Does BANK decode PHYS_ADDR? */ static int chmc_bank_match(struct chmc_bank_info *bp, unsigned long phys_addr) { unsigned long upper_bits = (phys_addr & PA_UPPER_BITS) >> PA_UPPER_BITS_SHIFT; unsigned long lower_bits = (phys_addr & PA_LOWER_BITS) >> PA_LOWER_BITS_SHIFT; /* Bank must be enabled to match. */ if (bp->valid == 0) return 0; /* Would BANK match upper bits? */ upper_bits ^= bp->um; /* What bits are different? */ upper_bits = ~upper_bits; /* Invert. */ upper_bits |= bp->uk; /* What bits don't matter for matching? */ upper_bits = ~upper_bits; /* Invert. */ if (upper_bits) return 0; /* Would BANK match lower bits? */ lower_bits ^= bp->lm; /* What bits are different? */ lower_bits = ~lower_bits; /* Invert. */ lower_bits |= bp->lk; /* What bits don't matter for matching? */ lower_bits = ~lower_bits; /* Invert. */ if (lower_bits) return 0; /* I always knew you'd be the one. */ return 1; } /* Given PHYS_ADDR, search memory controller banks for a match. */ static struct chmc_bank_info *chmc_find_bank(unsigned long phys_addr) { struct chmc *p; list_for_each_entry(p, &mctrl_list, list) { int bank_no; for (bank_no = 0; bank_no < CHMCTRL_NBANKS; bank_no++) { struct chmc_bank_info *bp; bp = &p->logical_banks[bank_no]; if (chmc_bank_match(bp, phys_addr)) return bp; } } return NULL; } /* This is the main purpose of this driver. */ static int chmc_print_dimm(int syndrome_code, unsigned long phys_addr, char *buf, int buflen) { struct chmc_bank_info *bp; struct chmc_obp_mem_layout *prop; int bank_in_controller, first_dimm; bp = chmc_find_bank(phys_addr); if (bp == NULL || syndrome_code < SYNDROME_MIN || syndrome_code > SYNDROME_MAX) { buf[0] = '?'; buf[1] = '?'; buf[2] = '?'; buf[3] = '\0'; return 0; } prop = &bp->p->layout_prop; bank_in_controller = bp->bank_id & (CHMCTRL_NBANKS - 1); first_dimm = (bank_in_controller & (CHMCTRL_NDGRPS - 1)); first_dimm *= CHMCTRL_NDIMMS; if (syndrome_code != SYNDROME_MIN) { char *dimm_str; int pin; get_pin_and_dimm_str(syndrome_code, phys_addr, &pin, &dimm_str, prop, first_dimm); sprintf(buf, "%s, pin %3d", dimm_str, pin); } else { int dimm; /* Multi-bit error, we just dump out all the * dimm labels associated with this bank. */ for (dimm = 0; dimm < CHMCTRL_NDIMMS; dimm++) { sprintf(buf, "%s ", prop->dimm_labels[first_dimm + dimm]); buf += strlen(buf); } } return 0; } /* Accessing the registers is slightly complicated. If you want * to get at the memory controller which is on the same processor * the code is executing, you must use special ASI load/store else * you go through the global mapping. */ static u64 chmc_read_mcreg(struct chmc *p, unsigned long offset) { unsigned long ret, this_cpu; preempt_disable(); this_cpu = real_hard_smp_processor_id(); if (p->portid == this_cpu) { __asm__ __volatile__("ldxa [%1] %2, %0" : "=r" (ret) : "r" (offset), "i" (ASI_MCU_CTRL_REG)); } else { __asm__ __volatile__("ldxa [%1] %2, %0" : "=r" (ret) : "r" (p->regs + offset), "i" (ASI_PHYS_BYPASS_EC_E)); } preempt_enable(); return ret; } #if 0 /* currently unused */ static void chmc_write_mcreg(struct chmc *p, unsigned long offset, u64 val) { if (p->portid == smp_processor_id()) { __asm__ __volatile__("stxa %0, [%1] %2" : : "r" (val), "r" (offset), "i" (ASI_MCU_CTRL_REG)); } else { __asm__ __volatile__("ldxa %0, [%1] %2" : : "r" (val), "r" (p->regs + offset), "i" (ASI_PHYS_BYPASS_EC_E)); } } #endif static void chmc_interpret_one_decode_reg(struct chmc *p, int which_bank, u64 val) { struct chmc_bank_info *bp = &p->logical_banks[which_bank]; bp->p = p; bp->bank_id = (CHMCTRL_NBANKS * p->portid) + which_bank; bp->raw_reg = val; bp->valid = (val & MEM_DECODE_VALID) >> MEM_DECODE_VALID_SHIFT; bp->uk = (val & MEM_DECODE_UK) >> MEM_DECODE_UK_SHIFT; bp->um = (val & MEM_DECODE_UM) >> MEM_DECODE_UM_SHIFT; bp->lk = (val & MEM_DECODE_LK) >> MEM_DECODE_LK_SHIFT; bp->lm = (val & MEM_DECODE_LM) >> MEM_DECODE_LM_SHIFT; bp->base = (bp->um); bp->base &= ~(bp->uk); bp->base <<= PA_UPPER_BITS_SHIFT; switch(bp->lk) { case 0xf: default: bp->interleave = 1; break; case 0xe: bp->interleave = 2; break; case 0xc: bp->interleave = 4; break; case 0x8: bp->interleave = 8; break; case 0x0: bp->interleave = 16; break; } /* UK[10] is reserved, and UK[11] is not set for the SDRAM * bank size definition. */ bp->size = (((unsigned long)bp->uk & ((1UL << 10UL) - 1UL)) + 1UL) << PA_UPPER_BITS_SHIFT; bp->size /= bp->interleave; } static void chmc_fetch_decode_regs(struct chmc *p) { if (p->layout_size == 0) return; chmc_interpret_one_decode_reg(p, 0, chmc_read_mcreg(p, CHMCTRL_DECODE1)); chmc_interpret_one_decode_reg(p, 1, chmc_read_mcreg(p, CHMCTRL_DECODE2)); chmc_interpret_one_decode_reg(p, 2, chmc_read_mcreg(p, CHMCTRL_DECODE3)); chmc_interpret_one_decode_reg(p, 3, chmc_read_mcreg(p, CHMCTRL_DECODE4)); } static int __devinit chmc_probe(struct platform_device *op) { struct device_node *dp = op->dev.of_node; unsigned long ver; const void *pval; int len, portid; struct chmc *p; int err; err = -ENODEV; __asm__ ("rdpr %%ver, %0" : "=r" (ver)); if ((ver >> 32UL) == __JALAPENO_ID || (ver >> 32UL) == __SERRANO_ID) goto out; portid = of_getintprop_default(dp, "portid", -1); if (portid == -1) goto out; pval = of_get_property(dp, "memory-layout", &len); if (pval && len > sizeof(p->layout_prop)) { printk(KERN_ERR PFX "Unexpected memory-layout property " "size %d.\n", len); goto out; } err = -ENOMEM; p = kzalloc(sizeof(*p), GFP_KERNEL); if (!p) { printk(KERN_ERR PFX "Could not allocate struct chmc.\n"); goto out; } p->portid = portid; p->layout_size = len; if (!pval) p->layout_size = 0; else memcpy(&p->layout_prop, pval, len); p->regs = of_ioremap(&op->resource[0], 0, 0x48, "chmc"); if (!p->regs) { printk(KERN_ERR PFX "Could not map registers.\n"); goto out_free; } if (p->layout_size != 0UL) { p->timing_control1 = chmc_read_mcreg(p, CHMCTRL_TCTRL1); p->timing_control2 = chmc_read_mcreg(p, CHMCTRL_TCTRL2); p->timing_control3 = chmc_read_mcreg(p, CHMCTRL_TCTRL3); p->timing_control4 = chmc_read_mcreg(p, CHMCTRL_TCTRL4); p->memaddr_control = chmc_read_mcreg(p, CHMCTRL_MACTRL); } chmc_fetch_decode_regs(p); mc_list_add(&p->list); printk(KERN_INFO PFX "UltraSPARC-III memory controller at %s [%s]\n", dp->full_name, (p->layout_size ? "ACTIVE" : "INACTIVE")); dev_set_drvdata(&op->dev, p); err = 0; out: return err; out_free: kfree(p); goto out; } static int __devinit us3mc_probe(struct platform_device *op) { if (mc_type == MC_TYPE_SAFARI) return chmc_probe(op); else if (mc_type == MC_TYPE_JBUS) return jbusmc_probe(op); return -ENODEV; } static void __devexit chmc_destroy(struct platform_device *op, struct chmc *p) { list_del(&p->list); of_iounmap(&op->resource[0], p->regs, 0x48); kfree(p); } static void __devexit jbusmc_destroy(struct platform_device *op, struct jbusmc *p) { mc_list_del(&p->list); of_iounmap(&op->resource[0], p->regs, JBUSMC_REGS_SIZE); kfree(p); } static int __devexit us3mc_remove(struct platform_device *op) { void *p = dev_get_drvdata(&op->dev); if (p) { if (mc_type == MC_TYPE_SAFARI) chmc_destroy(op, p); else if (mc_type == MC_TYPE_JBUS) jbusmc_destroy(op, p); } return 0; } static const struct of_device_id us3mc_match[] = { { .name = "memory-controller", }, {}, }; MODULE_DEVICE_TABLE(of, us3mc_match); static struct platform_driver us3mc_driver = { .driver = { .name = "us3mc", .owner = THIS_MODULE, .of_match_table = us3mc_match, }, .probe = us3mc_probe, .remove = __devexit_p(us3mc_remove), }; static inline bool us3mc_platform(void) { if (tlb_type == cheetah || tlb_type == cheetah_plus) return true; return false; } static int __init us3mc_init(void) { unsigned long ver; int ret; if (!us3mc_platform()) return -ENODEV; __asm__ __volatile__("rdpr %%ver, %0" : "=r" (ver)); if ((ver >> 32UL) == __JALAPENO_ID || (ver >> 32UL) == __SERRANO_ID) { mc_type = MC_TYPE_JBUS; us3mc_dimm_printer = jbusmc_print_dimm; } else { mc_type = MC_TYPE_SAFARI; us3mc_dimm_printer = chmc_print_dimm; } ret = register_dimm_printer(us3mc_dimm_printer); if (!ret) { ret = platform_driver_register(&us3mc_driver); if (ret) unregister_dimm_printer(us3mc_dimm_printer); } return ret; } static void __exit us3mc_cleanup(void) { if (us3mc_platform()) { unregister_dimm_printer(us3mc_dimm_printer); platform_driver_unregister(&us3mc_driver); } } module_init(us3mc_init); module_exit(us3mc_cleanup);
gpl-2.0
sakuraba001/android_kernel_samsung_klteactive
drivers/staging/tidspbridge/dynload/reloc.c
8460
14023
/* * reloc.c * * DSP-BIOS Bridge driver support functions for TI OMAP processors. * * Copyright (C) 2005-2006 Texas Instruments, Inc. * * This package is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ #include "header.h" #if TMS32060 /* the magic symbol for the start of BSS */ static const char bsssymbol[] = { ".bss" }; #endif #if TMS32060 #include "reloc_table_c6000.c" #endif #if TMS32060 /* From coff.h - ignore these relocation operations */ #define R_C60ALIGN 0x76 /* C60: Alignment info for compressor */ #define R_C60FPHEAD 0x77 /* C60: Explicit assembly directive */ #define R_C60NOCMP 0x100 /* C60: Don't compress this code scn */ #endif /************************************************************************** * Procedure dload_unpack * * Parameters: * data pointer to storage unit containing lowest host address of * image data * fieldsz Size of bit field, 0 < fieldsz <= sizeof(rvalue)*BITS_PER_AU * offset Offset from LSB, 0 <= offset < BITS_PER_AU * sgn Signedness of the field (ROP_SGN, ROP_UNS, ROP_MAX, ROP_ANY) * * Effect: * Extracts the specified field and returns it. ************************************************************************* */ rvalue dload_unpack(struct dload_state *dlthis, tgt_au_t * data, int fieldsz, int offset, unsigned sgn) { register rvalue objval; register int shift, direction; register tgt_au_t *dp = data; fieldsz -= 1; /* avoid nastiness with 32-bit shift of 32-bit value */ /* * collect up enough bits to contain the desired field */ if (TARGET_BIG_ENDIAN) { dp += (fieldsz + offset) >> LOG_TGTAU_BITS; direction = -1; } else direction = 1; objval = *dp >> offset; shift = TGTAU_BITS - offset; while (shift <= fieldsz) { dp += direction; objval += (rvalue) *dp << shift; shift += TGTAU_BITS; } /* * sign or zero extend the value appropriately */ if (sgn == ROP_UNS) objval &= (2 << fieldsz) - 1; else { shift = sizeof(rvalue) * BITS_PER_AU - 1 - fieldsz; objval = (objval << shift) >> shift; } return objval; } /* dload_unpack */ /************************************************************************** * Procedure dload_repack * * Parameters: * val Value to insert * data Pointer to storage unit containing lowest host address of * image data * fieldsz Size of bit field, 0 < fieldsz <= sizeof(rvalue)*BITS_PER_AU * offset Offset from LSB, 0 <= offset < BITS_PER_AU * sgn Signedness of the field (ROP_SGN, ROP_UNS, ROP_MAX, ROP_ANY) * * Effect: * Stuffs the specified value in the specified field. Returns 0 for * success * or 1 if the value will not fit in the specified field according to the * specified signedness rule. ************************************************************************* */ static const unsigned char ovf_limit[] = { 1, 2, 2 }; int dload_repack(struct dload_state *dlthis, rvalue val, tgt_au_t * data, int fieldsz, int offset, unsigned sgn) { register urvalue objval, mask; register int shift, direction; register tgt_au_t *dp = data; fieldsz -= 1; /* avoid nastiness with 32-bit shift of 32-bit value */ /* clip the bits */ mask = (2UL << fieldsz) - 1; objval = (val & mask); /* * store the bits through the specified mask */ if (TARGET_BIG_ENDIAN) { dp += (fieldsz + offset) >> LOG_TGTAU_BITS; direction = -1; } else direction = 1; /* insert LSBs */ *dp = (*dp & ~(mask << offset)) + (objval << offset); shift = TGTAU_BITS - offset; /* align mask and objval with AU boundary */ objval >>= shift; mask >>= shift; while (mask) { dp += direction; *dp = (*dp & ~mask) + objval; objval >>= TGTAU_BITS; mask >>= TGTAU_BITS; } /* * check for overflow */ if (sgn) { unsigned tmp = (val >> fieldsz) + (sgn & 0x1); if (tmp > ovf_limit[sgn - 1]) return 1; } return 0; } /* dload_repack */ /* lookup table for the scaling amount in a C6x instruction */ #if TMS32060 #define SCALE_BITS 4 /* there are 4 bits in the scale field */ #define SCALE_MASK 0x7 /* we really only use the bottom 3 bits */ static const u8 c60_scale[SCALE_MASK + 1] = { 1, 0, 0, 0, 1, 1, 2, 2 }; #endif /************************************************************************** * Procedure dload_relocate * * Parameters: * data Pointer to base of image data * rp Pointer to relocation operation * * Effect: * Performs the specified relocation operation ************************************************************************* */ void dload_relocate(struct dload_state *dlthis, tgt_au_t * data, struct reloc_record_t *rp, bool *tramps_generated, bool second_pass) { rvalue val, reloc_amt, orig_val = 0; unsigned int fieldsz = 0; unsigned int offset = 0; unsigned int reloc_info = 0; unsigned int reloc_action = 0; register int rx = 0; rvalue *stackp = NULL; int top; struct local_symbol *svp = NULL; #ifdef RFV_SCALE unsigned int scale = 0; #endif struct image_packet_t *img_pkt = NULL; /* The image packet data struct is only used during first pass * relocation in the event that a trampoline is needed. 2nd pass * relocation doesn't guarantee that data is coming from an * image_packet_t structure. See cload.c, dload_data for how img_data is * set. If that changes this needs to be updated!!! */ if (second_pass == false) img_pkt = (struct image_packet_t *)((u8 *) data - sizeof(struct image_packet_t)); rx = HASH_FUNC(rp->TYPE); while (rop_map1[rx] != rp->TYPE) { rx = HASH_L(rop_map2[rx]); if (rx < 0) { #if TMS32060 switch (rp->TYPE) { case R_C60ALIGN: case R_C60NOCMP: case R_C60FPHEAD: /* Ignore these reloc types and return */ break; default: /* Unknown reloc type, print error and return */ dload_error(dlthis, "Bad coff operator 0x%x", rp->TYPE); } #else dload_error(dlthis, "Bad coff operator 0x%x", rp->TYPE); #endif return; } } rx = HASH_I(rop_map2[rx]); if ((rx < (sizeof(rop_action) / sizeof(u16))) && (rx < (sizeof(rop_info) / sizeof(u16))) && (rx > 0)) { reloc_action = rop_action[rx]; reloc_info = rop_info[rx]; } else { dload_error(dlthis, "Buffer Overflow - Array Index Out " "of Bounds"); } /* Compute the relocation amount for the referenced symbol, if any */ reloc_amt = rp->UVAL; if (RFV_SYM(reloc_info)) { /* relocation uses a symbol reference */ /* If this is first pass, use the module local symbol table, * else use the trampoline symbol table. */ if (second_pass == false) { if ((u32) rp->SYMNDX < dlthis->dfile_hdr.df_no_syms) { /* real symbol reference */ svp = &dlthis->local_symtab[rp->SYMNDX]; reloc_amt = (RFV_SYM(reloc_info) == ROP_SYMD) ? svp->delta : svp->value; } /* reloc references current section */ else if (rp->SYMNDX == -1) { reloc_amt = (RFV_SYM(reloc_info) == ROP_SYMD) ? dlthis->delta_runaddr : dlthis->image_secn->run_addr; } } } /* relocation uses a symbol reference */ /* Handle stack adjustment */ val = 0; top = RFV_STK(reloc_info); if (top) { top += dlthis->relstkidx - RSTK_UOP; if (top >= STATIC_EXPR_STK_SIZE) { dload_error(dlthis, "Expression stack overflow in %s at offset " FMT_UI32, dlthis->image_secn->name, rp->vaddr + dlthis->image_offset); return; } val = dlthis->relstk[dlthis->relstkidx]; dlthis->relstkidx = top; stackp = &dlthis->relstk[top]; } /* Derive field position and size, if we need them */ if (reloc_info & ROP_RW) { /* read or write action in our future */ fieldsz = RFV_WIDTH(reloc_action); if (fieldsz) { /* field info from table */ offset = RFV_POSN(reloc_action); if (TARGET_BIG_ENDIAN) /* make sure vaddr is the lowest target * address containing bits */ rp->vaddr += RFV_BIGOFF(reloc_info); } else { /* field info from relocation op */ fieldsz = rp->FIELDSZ; offset = rp->OFFSET; if (TARGET_BIG_ENDIAN) /* make sure vaddr is the lowest target address containing bits */ rp->vaddr += (rp->WORDSZ - offset - fieldsz) >> LOG_TARGET_AU_BITS; } data = (tgt_au_t *) ((char *)data + TADDR_TO_HOST(rp->vaddr)); /* compute lowest host location of referenced data */ #if BITS_PER_AU > TARGET_AU_BITS /* conversion from target address to host address may lose address bits; add loss to offset */ if (TARGET_BIG_ENDIAN) { offset += -((rp->vaddr << LOG_TARGET_AU_BITS) + offset + fieldsz) & (BITS_PER_AU - TARGET_AU_BITS); } else { offset += (rp->vaddr << LOG_TARGET_AU_BITS) & (BITS_PER_AU - 1); } #endif #ifdef RFV_SCALE scale = RFV_SCALE(reloc_info); #endif } /* read the object value from the current image, if so ordered */ if (reloc_info & ROP_R) { /* relocation reads current image value */ val = dload_unpack(dlthis, data, fieldsz, offset, RFV_SIGN(reloc_info)); /* Save off the original value in case the relo overflows and * we can trampoline it. */ orig_val = val; #ifdef RFV_SCALE val <<= scale; #endif } /* perform the necessary arithmetic */ switch (RFV_ACTION(reloc_action)) { /* relocation actions */ case RACT_VAL: break; case RACT_ASGN: val = reloc_amt; break; case RACT_ADD: val += reloc_amt; break; case RACT_PCR: /*----------------------------------------------------------- * Handle special cases of jumping from absolute sections * (special reloc type) or to absolute destination * (symndx == -1). In either case, set the appropriate * relocation amount to 0. *----------------------------------------------------------- */ if (rp->SYMNDX == -1) reloc_amt = 0; val += reloc_amt - dlthis->delta_runaddr; break; case RACT_ADDISP: val += rp->R_DISP + reloc_amt; break; case RACT_ASGPC: val = dlthis->image_secn->run_addr + reloc_amt; break; case RACT_PLUS: if (stackp != NULL) val += *stackp; break; case RACT_SUB: if (stackp != NULL) val = *stackp - val; break; case RACT_NEG: val = -val; break; case RACT_MPY: if (stackp != NULL) val *= *stackp; break; case RACT_DIV: if (stackp != NULL) val = *stackp / val; break; case RACT_MOD: if (stackp != NULL) val = *stackp % val; break; case RACT_SR: if (val >= sizeof(rvalue) * BITS_PER_AU) val = 0; else if (stackp != NULL) val = (urvalue) *stackp >> val; break; case RACT_ASR: if (val >= sizeof(rvalue) * BITS_PER_AU) val = sizeof(rvalue) * BITS_PER_AU - 1; else if (stackp != NULL) val = *stackp >> val; break; case RACT_SL: if (val >= sizeof(rvalue) * BITS_PER_AU) val = 0; else if (stackp != NULL) val = *stackp << val; break; case RACT_AND: if (stackp != NULL) val &= *stackp; break; case RACT_OR: if (stackp != NULL) val |= *stackp; break; case RACT_XOR: if (stackp != NULL) val ^= *stackp; break; case RACT_NOT: val = ~val; break; #if TMS32060 case RACT_C6SECT: /* actually needed address of secn containing symbol */ if (svp != NULL) { if (rp->SYMNDX >= 0) if (svp->secnn > 0) reloc_amt = dlthis->ldr_sections [svp->secnn - 1].run_addr; } /* !!! FALL THRU !!! */ case RACT_C6BASE: if (dlthis->bss_run_base == 0) { struct dynload_symbol *symp; symp = dlthis->mysym->find_matching_symbol (dlthis->mysym, bsssymbol); /* lookup value of global BSS base */ if (symp) dlthis->bss_run_base = symp->value; else dload_error(dlthis, "Global BSS base referenced in %s " "offset" FMT_UI32 " but not " "defined", dlthis->image_secn->name, rp->vaddr + dlthis->image_offset); } reloc_amt -= dlthis->bss_run_base; /* !!! FALL THRU !!! */ case RACT_C6DSPL: /* scale factor determined by 3 LSBs of field */ scale = c60_scale[val & SCALE_MASK]; offset += SCALE_BITS; fieldsz -= SCALE_BITS; val >>= SCALE_BITS; /* ignore the scale field hereafter */ val <<= scale; val += reloc_amt; /* do the usual relocation */ if (((1 << scale) - 1) & val) dload_error(dlthis, "Unaligned reference in %s offset " FMT_UI32, dlthis->image_secn->name, rp->vaddr + dlthis->image_offset); break; #endif } /* relocation actions */ /* * Put back result as required */ if (reloc_info & ROP_W) { /* relocation writes image value */ #ifdef RFV_SCALE val >>= scale; #endif if (dload_repack(dlthis, val, data, fieldsz, offset, RFV_SIGN(reloc_info))) { /* Check to see if this relo can be trampolined, * but only in first phase relocation. 2nd phase * relocation cannot trampoline. */ if ((second_pass == false) && (dload_tramp_avail(dlthis, rp) == true)) { /* Before generating the trampoline, restore * the value to its original so the 2nd pass * relo will work. */ dload_repack(dlthis, orig_val, data, fieldsz, offset, RFV_SIGN(reloc_info)); if (!dload_tramp_generate(dlthis, (dlthis->image_secn - dlthis->ldr_sections), dlthis->image_offset, img_pkt, rp)) { dload_error(dlthis, "Failed to " "generate trampoline for " "bit overflow"); dload_error(dlthis, "Relocation val " FMT_UI32 " overflows %d bits in %s " "offset " FMT_UI32, val, fieldsz, dlthis->image_secn->name, dlthis->image_offset + rp->vaddr); } else *tramps_generated = true; } else { dload_error(dlthis, "Relocation value " FMT_UI32 " overflows %d bits in %s" " offset " FMT_UI32, val, fieldsz, dlthis->image_secn->name, dlthis->image_offset + rp->vaddr); } } } else if (top) *stackp = val; } /* reloc_value */
gpl-2.0
davros-/elite_kernel_jf
drivers/media/dvb/frontends/cx22702.c
8716
14428
/* Conexant 22702 DVB OFDM demodulator driver based on: Alps TDMB7 DVB OFDM demodulator driver Copyright (C) 2001-2002 Convergence Integrated Media GmbH Holger Waechtler <holger@convergence.de> Copyright (C) 2004 Steven Toth <stoth@linuxtv.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/delay.h> #include "dvb_frontend.h" #include "cx22702.h" struct cx22702_state { struct i2c_adapter *i2c; /* configuration settings */ const struct cx22702_config *config; struct dvb_frontend frontend; /* previous uncorrected block counter */ u8 prevUCBlocks; }; static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Enable verbose debug messages"); #define dprintk if (debug) printk /* Register values to initialise the demod */ static const u8 init_tab[] = { 0x00, 0x00, /* Stop acquisition */ 0x0B, 0x06, 0x09, 0x01, 0x0D, 0x41, 0x16, 0x32, 0x20, 0x0A, 0x21, 0x17, 0x24, 0x3e, 0x26, 0xff, 0x27, 0x10, 0x28, 0x00, 0x29, 0x00, 0x2a, 0x10, 0x2b, 0x00, 0x2c, 0x10, 0x2d, 0x00, 0x48, 0xd4, 0x49, 0x56, 0x6b, 0x1e, 0xc8, 0x02, 0xf9, 0x00, 0xfa, 0x00, 0xfb, 0x00, 0xfc, 0x00, 0xfd, 0x00, }; static int cx22702_writereg(struct cx22702_state *state, u8 reg, u8 data) { int ret; u8 buf[] = { reg, data }; struct i2c_msg msg = { .addr = state->config->demod_address, .flags = 0, .buf = buf, .len = 2 }; ret = i2c_transfer(state->i2c, &msg, 1); if (unlikely(ret != 1)) { printk(KERN_ERR "%s: error (reg == 0x%02x, val == 0x%02x, ret == %i)\n", __func__, reg, data, ret); return -1; } return 0; } static u8 cx22702_readreg(struct cx22702_state *state, u8 reg) { int ret; u8 data; struct i2c_msg msg[] = { { .addr = state->config->demod_address, .flags = 0, .buf = &reg, .len = 1 }, { .addr = state->config->demod_address, .flags = I2C_M_RD, .buf = &data, .len = 1 } }; ret = i2c_transfer(state->i2c, msg, 2); if (unlikely(ret != 2)) { printk(KERN_ERR "%s: error (reg == 0x%02x, ret == %i)\n", __func__, reg, ret); return 0; } return data; } static int cx22702_set_inversion(struct cx22702_state *state, int inversion) { u8 val; val = cx22702_readreg(state, 0x0C); switch (inversion) { case INVERSION_AUTO: return -EOPNOTSUPP; case INVERSION_ON: val |= 0x01; break; case INVERSION_OFF: val &= 0xfe; break; default: return -EINVAL; } return cx22702_writereg(state, 0x0C, val); } /* Retrieve the demod settings */ static int cx22702_get_tps(struct cx22702_state *state, struct dtv_frontend_properties *p) { u8 val; /* Make sure the TPS regs are valid */ if (!(cx22702_readreg(state, 0x0A) & 0x20)) return -EAGAIN; val = cx22702_readreg(state, 0x01); switch ((val & 0x18) >> 3) { case 0: p->modulation = QPSK; break; case 1: p->modulation = QAM_16; break; case 2: p->modulation = QAM_64; break; } switch (val & 0x07) { case 0: p->hierarchy = HIERARCHY_NONE; break; case 1: p->hierarchy = HIERARCHY_1; break; case 2: p->hierarchy = HIERARCHY_2; break; case 3: p->hierarchy = HIERARCHY_4; break; } val = cx22702_readreg(state, 0x02); switch ((val & 0x38) >> 3) { case 0: p->code_rate_HP = FEC_1_2; break; case 1: p->code_rate_HP = FEC_2_3; break; case 2: p->code_rate_HP = FEC_3_4; break; case 3: p->code_rate_HP = FEC_5_6; break; case 4: p->code_rate_HP = FEC_7_8; break; } switch (val & 0x07) { case 0: p->code_rate_LP = FEC_1_2; break; case 1: p->code_rate_LP = FEC_2_3; break; case 2: p->code_rate_LP = FEC_3_4; break; case 3: p->code_rate_LP = FEC_5_6; break; case 4: p->code_rate_LP = FEC_7_8; break; } val = cx22702_readreg(state, 0x03); switch ((val & 0x0c) >> 2) { case 0: p->guard_interval = GUARD_INTERVAL_1_32; break; case 1: p->guard_interval = GUARD_INTERVAL_1_16; break; case 2: p->guard_interval = GUARD_INTERVAL_1_8; break; case 3: p->guard_interval = GUARD_INTERVAL_1_4; break; } switch (val & 0x03) { case 0: p->transmission_mode = TRANSMISSION_MODE_2K; break; case 1: p->transmission_mode = TRANSMISSION_MODE_8K; break; } return 0; } static int cx22702_i2c_gate_ctrl(struct dvb_frontend *fe, int enable) { struct cx22702_state *state = fe->demodulator_priv; u8 val; dprintk("%s(%d)\n", __func__, enable); val = cx22702_readreg(state, 0x0D); if (enable) val &= 0xfe; else val |= 0x01; return cx22702_writereg(state, 0x0D, val); } /* Talk to the demod, set the FEC, GUARD, QAM settings etc */ static int cx22702_set_tps(struct dvb_frontend *fe) { struct dtv_frontend_properties *p = &fe->dtv_property_cache; u8 val; struct cx22702_state *state = fe->demodulator_priv; if (fe->ops.tuner_ops.set_params) { fe->ops.tuner_ops.set_params(fe); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); } /* set inversion */ cx22702_set_inversion(state, p->inversion); /* set bandwidth */ val = cx22702_readreg(state, 0x0C) & 0xcf; switch (p->bandwidth_hz) { case 6000000: val |= 0x20; break; case 7000000: val |= 0x10; break; case 8000000: break; default: dprintk("%s: invalid bandwidth\n", __func__); return -EINVAL; } cx22702_writereg(state, 0x0C, val); p->code_rate_LP = FEC_AUTO; /* temp hack as manual not working */ /* use auto configuration? */ if ((p->hierarchy == HIERARCHY_AUTO) || (p->modulation == QAM_AUTO) || (p->code_rate_HP == FEC_AUTO) || (p->code_rate_LP == FEC_AUTO) || (p->guard_interval == GUARD_INTERVAL_AUTO) || (p->transmission_mode == TRANSMISSION_MODE_AUTO)) { /* TPS Source - use hardware driven values */ cx22702_writereg(state, 0x06, 0x10); cx22702_writereg(state, 0x07, 0x9); cx22702_writereg(state, 0x08, 0xC1); cx22702_writereg(state, 0x0B, cx22702_readreg(state, 0x0B) & 0xfc); cx22702_writereg(state, 0x0C, (cx22702_readreg(state, 0x0C) & 0xBF) | 0x40); cx22702_writereg(state, 0x00, 0x01); /* Begin acquisition */ dprintk("%s: Autodetecting\n", __func__); return 0; } /* manually programmed values */ switch (p->modulation) { /* mask 0x18 */ case QPSK: val = 0x00; break; case QAM_16: val = 0x08; break; case QAM_64: val = 0x10; break; default: dprintk("%s: invalid modulation\n", __func__); return -EINVAL; } switch (p->hierarchy) { /* mask 0x07 */ case HIERARCHY_NONE: break; case HIERARCHY_1: val |= 0x01; break; case HIERARCHY_2: val |= 0x02; break; case HIERARCHY_4: val |= 0x03; break; default: dprintk("%s: invalid hierarchy\n", __func__); return -EINVAL; } cx22702_writereg(state, 0x06, val); switch (p->code_rate_HP) { /* mask 0x38 */ case FEC_NONE: case FEC_1_2: val = 0x00; break; case FEC_2_3: val = 0x08; break; case FEC_3_4: val = 0x10; break; case FEC_5_6: val = 0x18; break; case FEC_7_8: val = 0x20; break; default: dprintk("%s: invalid code_rate_HP\n", __func__); return -EINVAL; } switch (p->code_rate_LP) { /* mask 0x07 */ case FEC_NONE: case FEC_1_2: break; case FEC_2_3: val |= 0x01; break; case FEC_3_4: val |= 0x02; break; case FEC_5_6: val |= 0x03; break; case FEC_7_8: val |= 0x04; break; default: dprintk("%s: invalid code_rate_LP\n", __func__); return -EINVAL; } cx22702_writereg(state, 0x07, val); switch (p->guard_interval) { /* mask 0x0c */ case GUARD_INTERVAL_1_32: val = 0x00; break; case GUARD_INTERVAL_1_16: val = 0x04; break; case GUARD_INTERVAL_1_8: val = 0x08; break; case GUARD_INTERVAL_1_4: val = 0x0c; break; default: dprintk("%s: invalid guard_interval\n", __func__); return -EINVAL; } switch (p->transmission_mode) { /* mask 0x03 */ case TRANSMISSION_MODE_2K: break; case TRANSMISSION_MODE_8K: val |= 0x1; break; default: dprintk("%s: invalid transmission_mode\n", __func__); return -EINVAL; } cx22702_writereg(state, 0x08, val); cx22702_writereg(state, 0x0B, (cx22702_readreg(state, 0x0B) & 0xfc) | 0x02); cx22702_writereg(state, 0x0C, (cx22702_readreg(state, 0x0C) & 0xBF) | 0x40); /* Begin channel acquisition */ cx22702_writereg(state, 0x00, 0x01); return 0; } /* Reset the demod hardware and reset all of the configuration registers to a default state. */ static int cx22702_init(struct dvb_frontend *fe) { int i; struct cx22702_state *state = fe->demodulator_priv; cx22702_writereg(state, 0x00, 0x02); msleep(10); for (i = 0; i < ARRAY_SIZE(init_tab); i += 2) cx22702_writereg(state, init_tab[i], init_tab[i + 1]); cx22702_writereg(state, 0xf8, (state->config->output_mode << 1) & 0x02); cx22702_i2c_gate_ctrl(fe, 0); return 0; } static int cx22702_read_status(struct dvb_frontend *fe, fe_status_t *status) { struct cx22702_state *state = fe->demodulator_priv; u8 reg0A; u8 reg23; *status = 0; reg0A = cx22702_readreg(state, 0x0A); reg23 = cx22702_readreg(state, 0x23); dprintk("%s: status demod=0x%02x agc=0x%02x\n" , __func__, reg0A, reg23); if (reg0A & 0x10) { *status |= FE_HAS_LOCK; *status |= FE_HAS_VITERBI; *status |= FE_HAS_SYNC; } if (reg0A & 0x20) *status |= FE_HAS_CARRIER; if (reg23 < 0xf0) *status |= FE_HAS_SIGNAL; return 0; } static int cx22702_read_ber(struct dvb_frontend *fe, u32 *ber) { struct cx22702_state *state = fe->demodulator_priv; if (cx22702_readreg(state, 0xE4) & 0x02) { /* Realtime statistics */ *ber = (cx22702_readreg(state, 0xDE) & 0x7F) << 7 | (cx22702_readreg(state, 0xDF) & 0x7F); } else { /* Averagtine statistics */ *ber = (cx22702_readreg(state, 0xDE) & 0x7F) << 7 | cx22702_readreg(state, 0xDF); } return 0; } static int cx22702_read_signal_strength(struct dvb_frontend *fe, u16 *signal_strength) { struct cx22702_state *state = fe->demodulator_priv; u8 reg23; /* * Experience suggests that the strength signal register works as * follows: * - In the absence of signal, value is 0xff. * - In the presence of a weak signal, bit 7 is set, not sure what * the lower 7 bits mean. * - In the presence of a strong signal, the register holds a 7-bit * value (bit 7 is cleared), with greater values standing for * weaker signals. */ reg23 = cx22702_readreg(state, 0x23); if (reg23 & 0x80) { *signal_strength = 0; } else { reg23 = ~reg23 & 0x7f; /* Scale to 16 bit */ *signal_strength = (reg23 << 9) | (reg23 << 2) | (reg23 >> 5); } return 0; } static int cx22702_read_snr(struct dvb_frontend *fe, u16 *snr) { struct cx22702_state *state = fe->demodulator_priv; u16 rs_ber; if (cx22702_readreg(state, 0xE4) & 0x02) { /* Realtime statistics */ rs_ber = (cx22702_readreg(state, 0xDE) & 0x7F) << 7 | (cx22702_readreg(state, 0xDF) & 0x7F); } else { /* Averagine statistics */ rs_ber = (cx22702_readreg(state, 0xDE) & 0x7F) << 8 | cx22702_readreg(state, 0xDF); } *snr = ~rs_ber; return 0; } static int cx22702_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks) { struct cx22702_state *state = fe->demodulator_priv; u8 _ucblocks; /* RS Uncorrectable Packet Count then reset */ _ucblocks = cx22702_readreg(state, 0xE3); if (state->prevUCBlocks < _ucblocks) *ucblocks = (_ucblocks - state->prevUCBlocks); else *ucblocks = state->prevUCBlocks - _ucblocks; state->prevUCBlocks = _ucblocks; return 0; } static int cx22702_get_frontend(struct dvb_frontend *fe) { struct dtv_frontend_properties *c = &fe->dtv_property_cache; struct cx22702_state *state = fe->demodulator_priv; u8 reg0C = cx22702_readreg(state, 0x0C); c->inversion = reg0C & 0x1 ? INVERSION_ON : INVERSION_OFF; return cx22702_get_tps(state, c); } static int cx22702_get_tune_settings(struct dvb_frontend *fe, struct dvb_frontend_tune_settings *tune) { tune->min_delay_ms = 1000; return 0; } static void cx22702_release(struct dvb_frontend *fe) { struct cx22702_state *state = fe->demodulator_priv; kfree(state); } static const struct dvb_frontend_ops cx22702_ops; struct dvb_frontend *cx22702_attach(const struct cx22702_config *config, struct i2c_adapter *i2c) { struct cx22702_state *state = NULL; /* allocate memory for the internal state */ state = kzalloc(sizeof(struct cx22702_state), GFP_KERNEL); if (state == NULL) goto error; /* setup the state */ state->config = config; state->i2c = i2c; /* check if the demod is there */ if (cx22702_readreg(state, 0x1f) != 0x3) goto error; /* create dvb_frontend */ memcpy(&state->frontend.ops, &cx22702_ops, sizeof(struct dvb_frontend_ops)); state->frontend.demodulator_priv = state; return &state->frontend; error: kfree(state); return NULL; } EXPORT_SYMBOL(cx22702_attach); static const struct dvb_frontend_ops cx22702_ops = { .delsys = { SYS_DVBT }, .info = { .name = "Conexant CX22702 DVB-T", .frequency_min = 177000000, .frequency_max = 858000000, .frequency_stepsize = 166666, .caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO | FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 | FE_CAN_QAM_AUTO | FE_CAN_HIERARCHY_AUTO | FE_CAN_GUARD_INTERVAL_AUTO | FE_CAN_TRANSMISSION_MODE_AUTO | FE_CAN_RECOVER }, .release = cx22702_release, .init = cx22702_init, .i2c_gate_ctrl = cx22702_i2c_gate_ctrl, .set_frontend = cx22702_set_tps, .get_frontend = cx22702_get_frontend, .get_tune_settings = cx22702_get_tune_settings, .read_status = cx22702_read_status, .read_ber = cx22702_read_ber, .read_signal_strength = cx22702_read_signal_strength, .read_snr = cx22702_read_snr, .read_ucblocks = cx22702_read_ucblocks, }; MODULE_DESCRIPTION("Conexant CX22702 DVB-T Demodulator driver"); MODULE_AUTHOR("Steven Toth"); MODULE_LICENSE("GPL");
gpl-2.0
samnazarko/linux-rbp-browser
arch/sh/kernel/cpu/sh4a/clock-sh7770.c
9228
1779
/* * arch/sh/kernel/cpu/sh4a/clock-sh7770.c * * SH7770 support for the clock framework * * Copyright (C) 2005 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/kernel.h> #include <asm/clock.h> #include <asm/freq.h> #include <asm/io.h> static int ifc_divisors[] = { 1, 1, 1, 1, 1, 1, 1, 1 }; static int bfc_divisors[] = { 1, 1, 1, 1, 1, 8,12, 1 }; static int pfc_divisors[] = { 1, 8, 1,10,12,16, 1, 1 }; static void master_clk_init(struct clk *clk) { clk->rate *= pfc_divisors[(__raw_readl(FRQCR) >> 28) & 0x000f]; } static struct sh_clk_ops sh7770_master_clk_ops = { .init = master_clk_init, }; static unsigned long module_clk_recalc(struct clk *clk) { int idx = ((__raw_readl(FRQCR) >> 28) & 0x000f); return clk->parent->rate / pfc_divisors[idx]; } static struct sh_clk_ops sh7770_module_clk_ops = { .recalc = module_clk_recalc, }; static unsigned long bus_clk_recalc(struct clk *clk) { int idx = (__raw_readl(FRQCR) & 0x000f); return clk->parent->rate / bfc_divisors[idx]; } static struct sh_clk_ops sh7770_bus_clk_ops = { .recalc = bus_clk_recalc, }; static unsigned long cpu_clk_recalc(struct clk *clk) { int idx = ((__raw_readl(FRQCR) >> 24) & 0x000f); return clk->parent->rate / ifc_divisors[idx]; } static struct sh_clk_ops sh7770_cpu_clk_ops = { .recalc = cpu_clk_recalc, }; static struct sh_clk_ops *sh7770_clk_ops[] = { &sh7770_master_clk_ops, &sh7770_module_clk_ops, &sh7770_bus_clk_ops, &sh7770_cpu_clk_ops, }; void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx) { if (idx < ARRAY_SIZE(sh7770_clk_ops)) *ops = sh7770_clk_ops[idx]; }
gpl-2.0
bilalliberty/android_kernel_htc_villec2-caf-display
arch/sh/kernel/cpu/sh4a/clock-sh7770.c
9228
1779
/* * arch/sh/kernel/cpu/sh4a/clock-sh7770.c * * SH7770 support for the clock framework * * Copyright (C) 2005 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/kernel.h> #include <asm/clock.h> #include <asm/freq.h> #include <asm/io.h> static int ifc_divisors[] = { 1, 1, 1, 1, 1, 1, 1, 1 }; static int bfc_divisors[] = { 1, 1, 1, 1, 1, 8,12, 1 }; static int pfc_divisors[] = { 1, 8, 1,10,12,16, 1, 1 }; static void master_clk_init(struct clk *clk) { clk->rate *= pfc_divisors[(__raw_readl(FRQCR) >> 28) & 0x000f]; } static struct sh_clk_ops sh7770_master_clk_ops = { .init = master_clk_init, }; static unsigned long module_clk_recalc(struct clk *clk) { int idx = ((__raw_readl(FRQCR) >> 28) & 0x000f); return clk->parent->rate / pfc_divisors[idx]; } static struct sh_clk_ops sh7770_module_clk_ops = { .recalc = module_clk_recalc, }; static unsigned long bus_clk_recalc(struct clk *clk) { int idx = (__raw_readl(FRQCR) & 0x000f); return clk->parent->rate / bfc_divisors[idx]; } static struct sh_clk_ops sh7770_bus_clk_ops = { .recalc = bus_clk_recalc, }; static unsigned long cpu_clk_recalc(struct clk *clk) { int idx = ((__raw_readl(FRQCR) >> 24) & 0x000f); return clk->parent->rate / ifc_divisors[idx]; } static struct sh_clk_ops sh7770_cpu_clk_ops = { .recalc = cpu_clk_recalc, }; static struct sh_clk_ops *sh7770_clk_ops[] = { &sh7770_master_clk_ops, &sh7770_module_clk_ops, &sh7770_bus_clk_ops, &sh7770_cpu_clk_ops, }; void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx) { if (idx < ARRAY_SIZE(sh7770_clk_ops)) *ops = sh7770_clk_ops[idx]; }
gpl-2.0
omnirom/android_kernel_google_msm
arch/parisc/kernel/hardware.c
9228
77839
/* * Hardware descriptions for HP 9000 based hardware, including * system types, SCSI controllers, DMA controllers, HPPB controllers * and lots more. * * Based on the document "PA-RISC 1.1 I/O Firmware Architecture * Reference Specification", March 7, 1999, version 0.96. This * is available at http://parisc-linux.org/documentation/ * * Copyright 1999 by Alex deVries <alex@onefishtwo.ca> * and copyright 1999 The Puffin Group Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <asm/hardware.h> #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/init.h> /* * HP PARISC Hardware Database * Access to this database is only possible during bootup * so don't reference this table after starting the init process */ static struct hp_hardware hp_hardware_list[] __devinitdata = { {HPHW_NPROC,0x01,0x4,0x0,"Indigo (840, 930)"}, {HPHW_NPROC,0x8,0x4,0x01,"Firefox(825,925)"}, {HPHW_NPROC,0xA,0x4,0x01,"Top Gun (835,834,935,635)"}, {HPHW_NPROC,0xB,0x4,0x01,"Technical Shogun (845, 645)"}, {HPHW_NPROC,0xF,0x4,0x01,"Commercial Shogun (949)"}, {HPHW_NPROC,0xC,0x4,0x01,"Cheetah (850, 950)"}, {HPHW_NPROC,0x80,0x4,0x01,"Cheetah (950S)"}, {HPHW_NPROC,0x81,0x4,0x01,"Jaguar (855, 955)"}, {HPHW_NPROC,0x82,0x4,0x01,"Cougar (860, 960)"}, {HPHW_NPROC,0x83,0x4,0x13,"Panther (865, 870, 980)"}, {HPHW_NPROC,0x100,0x4,0x01,"Burgundy (810)"}, {HPHW_NPROC,0x101,0x4,0x01,"SilverFox Low (822, 922)"}, {HPHW_NPROC,0x102,0x4,0x01,"SilverFox High (832, 932)"}, {HPHW_NPROC,0x103,0x4,0x01,"Lego, SilverLite (815, 808, 920)"}, {HPHW_NPROC,0x104,0x4,0x03,"SilverBullet Low (842, 948)"}, {HPHW_NPROC,0x105,0x4,0x03,"SilverBullet High (852, 958)"}, {HPHW_NPROC,0x106,0x4,0x81,"Oboe"}, {HPHW_NPROC,0x180,0x4,0x12,"Dragon"}, {HPHW_NPROC,0x181,0x4,0x13,"Chimera (890, 990, 992)"}, {HPHW_NPROC,0x182,0x4,0x91,"TNT 100 (891,T500)"}, {HPHW_NPROC,0x183,0x4,0x91,"TNT 120 (892,T520)"}, {HPHW_NPROC,0x184,0x4,0x91,"Jade 180 U (893,T540)"}, {HPHW_NPROC,0x1FF,0x4,0x91,"Hitachi X Processor"}, {HPHW_NPROC,0x200,0x4,0x81,"Cobra (720)"}, {HPHW_NPROC,0x201,0x4,0x81,"Coral (750)"}, {HPHW_NPROC,0x202,0x4,0x81,"King Cobra (730)"}, {HPHW_NPROC,0x203,0x4,0x81,"Hardball (735/99)"}, {HPHW_NPROC,0x204,0x4,0x81,"Coral II (755/99)"}, {HPHW_NPROC,0x205,0x4,0x81,"Coral II (755/125)"}, {HPHW_NPROC,0x205,0x4,0x91,"Snake Eagle "}, {HPHW_NPROC,0x206,0x4,0x81,"Snake Cheetah (735/130)"}, {HPHW_NPROC,0x280,0x4,0x81,"Nova Low (817, 827, 957, 957LX)"}, {HPHW_NPROC,0x281,0x4,0x81,"Nova High (837, 847, 857, 967, 967LX)"}, {HPHW_NPROC,0x282,0x4,0x81,"Nova8 (807, 917, 917LX, 927,927LX, 937, 937LX, 947,947LX)"}, {HPHW_NPROC,0x283,0x4,0x81,"Nova64 (867, 877, 977)"}, {HPHW_NPROC,0x284,0x4,0x81,"TNova (887, 897, 987)"}, {HPHW_NPROC,0x285,0x4,0x81,"TNova64"}, {HPHW_NPROC,0x286,0x4,0x91,"Hydra64 (Nova)"}, {HPHW_NPROC,0x287,0x4,0x91,"Hydra96 (Nova)"}, {HPHW_NPROC,0x288,0x4,0x81,"TNova96"}, {HPHW_NPROC,0x300,0x4,0x81,"Bushmaster (710)"}, {HPHW_NPROC,0x302,0x4,0x81,"Flounder (705)"}, {HPHW_NPROC,0x310,0x4,0x81,"Scorpio (715/50)"}, {HPHW_NPROC,0x311,0x4,0x81,"Scorpio Jr.(715/33)"}, {HPHW_NPROC,0x312,0x4,0x81,"Strider-50 (715S/50)"}, {HPHW_NPROC,0x313,0x4,0x81,"Strider-33 (715S/33)"}, {HPHW_NPROC,0x314,0x4,0x81,"Trailways-50 (715T/50)"}, {HPHW_NPROC,0x315,0x4,0x81,"Trailways-33 (715T/33)"}, {HPHW_NPROC,0x316,0x4,0x81,"Scorpio Sr.(715/75)"}, {HPHW_NPROC,0x317,0x4,0x81,"Scorpio 100 (715/100)"}, {HPHW_NPROC,0x318,0x4,0x81,"Spectra (725/50)"}, {HPHW_NPROC,0x319,0x4,0x81,"Spectra (725/75)"}, {HPHW_NPROC,0x320,0x4,0x81,"Spectra (725/100)"}, {HPHW_NPROC,0x401,0x4,0x81,"Pace (745i, 747i)"}, {HPHW_NPROC,0x402,0x4,0x81,"Sidewinder (742i)"}, {HPHW_NPROC,0x403,0x4,0x81,"Fast Pace"}, {HPHW_NPROC,0x480,0x4,0x81,"Orville (E23)"}, {HPHW_NPROC,0x481,0x4,0x81,"Wilbur (E25)"}, {HPHW_NPROC,0x482,0x4,0x81,"WB-80 (E35)"}, {HPHW_NPROC,0x483,0x4,0x81,"WB-96 (E45)"}, {HPHW_NPROC,0x484,0x4,0x81,"UL Proc L-100 (811/D210,D310)"}, {HPHW_NPROC,0x485,0x4,0x81,"UL Proc L-75 (801/D200)"}, {HPHW_NPROC,0x501,0x4,0x81,"Merlin L2 132 (9000/778/B132L)"}, {HPHW_NPROC,0x502,0x4,0x81,"Merlin L2 160 (9000/778/B160L)"}, {HPHW_NPROC,0x503,0x4,0x81,"Merlin L2+ 132 (9000/778/B132L)"}, {HPHW_NPROC,0x504,0x4,0x81,"Merlin L2+ 180 (9000/778/B180L)"}, {HPHW_NPROC,0x505,0x4,0x81,"Raven L2 132 (9000/778/C132L)"}, {HPHW_NPROC,0x506,0x4,0x81,"Raven L2 160 (9000/779/C160L)"}, {HPHW_NPROC,0x507,0x4,0x81,"Raven L2 180 (9000/779/C180L)"}, {HPHW_NPROC,0x508,0x4,0x81,"Raven L2 160 (9000/779/C160L)"}, {HPHW_NPROC,0x509,0x4,0x81,"712/132 L2 Upgrade"}, {HPHW_NPROC,0x50A,0x4,0x81,"712/160 L2 Upgrade"}, {HPHW_NPROC,0x50B,0x4,0x81,"715/132 L2 Upgrade"}, {HPHW_NPROC,0x50C,0x4,0x81,"715/160 L2 Upgrade"}, {HPHW_NPROC,0x50D,0x4,0x81,"Rocky2 L2 120"}, {HPHW_NPROC,0x50E,0x4,0x81,"Rocky2 L2 150"}, {HPHW_NPROC,0x50F,0x4,0x81,"Anole L2 132 (744)"}, {HPHW_NPROC,0x510,0x4,0x81,"Anole L2 165 (744)"}, {HPHW_NPROC,0x511,0x4,0x81,"Kiji L2 132"}, {HPHW_NPROC,0x512,0x4,0x81,"UL L2 132 (803/D220,D320)"}, {HPHW_NPROC,0x513,0x4,0x81,"UL L2 160 (813/D220,D320)"}, {HPHW_NPROC,0x514,0x4,0x81,"Merlin Jr L2 132"}, {HPHW_NPROC,0x515,0x4,0x81,"Staccato L2 132"}, {HPHW_NPROC,0x516,0x4,0x81,"Staccato L2 180 (A Class 180)"}, {HPHW_NPROC,0x580,0x4,0x81,"KittyHawk DC2-100 (K100)"}, {HPHW_NPROC,0x581,0x4,0x91,"KittyHawk DC3-120 (K210)"}, {HPHW_NPROC,0x582,0x4,0x91,"KittyHawk DC3 100 (K400)"}, {HPHW_NPROC,0x583,0x4,0x91,"KittyHawk DC3 120 (K410)"}, {HPHW_NPROC,0x584,0x4,0x91,"LighteningHawk T120"}, {HPHW_NPROC,0x585,0x4,0x91,"SkyHawk 100"}, {HPHW_NPROC,0x586,0x4,0x91,"SkyHawk 120"}, {HPHW_NPROC,0x587,0x4,0x81,"UL Proc 1-way T'120"}, {HPHW_NPROC,0x588,0x4,0x91,"UL Proc 2-way T'120"}, {HPHW_NPROC,0x589,0x4,0x81,"UL Proc 1-way T'100 (821/D250,D350)"}, {HPHW_NPROC,0x58A,0x4,0x91,"UL Proc 2-way T'100 (831/D250,D350)"}, {HPHW_NPROC,0x58B,0x4,0x91,"KittyHawk DC2 100 (K200)"}, {HPHW_NPROC,0x58C,0x4,0x91,"ThunderHawk DC3- 120 1M (K220)"}, {HPHW_NPROC,0x58D,0x4,0x91,"ThunderHawk DC3 120 1M (K420)"}, {HPHW_NPROC,0x58E,0x4,0x81,"Raven 120 T'"}, {HPHW_NPROC,0x58F,0x4,0x91,"Mohawk 160 U 1M DC3 (K450)"}, {HPHW_NPROC,0x590,0x4,0x91,"Mohawk 180 U 1M DC3 (K460)"}, {HPHW_NPROC,0x591,0x4,0x91,"Mohawk 200 U 1M DC3"}, {HPHW_NPROC,0x592,0x4,0x81,"Raven 100 T'"}, {HPHW_NPROC,0x593,0x4,0x91,"FireHawk 160 U"}, {HPHW_NPROC,0x594,0x4,0x91,"FireHawk 180 U"}, {HPHW_NPROC,0x595,0x4,0x91,"FireHawk 220 U"}, {HPHW_NPROC,0x596,0x4,0x91,"FireHawk 240 U"}, {HPHW_NPROC,0x597,0x4,0x91,"SPP2000 processor"}, {HPHW_NPROC,0x598,0x4,0x81,"Raven U 230 (9000/780/C230)"}, {HPHW_NPROC,0x599,0x4,0x81,"Raven U 240 (9000/780/C240)"}, {HPHW_NPROC,0x59A,0x4,0x91,"Unlisted but reserved"}, {HPHW_NPROC,0x59A,0x4,0x81,"Unlisted but reserved"}, {HPHW_NPROC,0x59B,0x4,0x81,"Raven U 160 (9000/780/C160)"}, {HPHW_NPROC,0x59C,0x4,0x81,"Raven U 180 (9000/780/C180)"}, {HPHW_NPROC,0x59D,0x4,0x81,"Raven U 200 (9000/780/C200)"}, {HPHW_NPROC,0x59E,0x4,0x91,"ThunderHawk T' 120"}, {HPHW_NPROC,0x59F,0x4,0x91,"Raven U 180+ (9000/780)"}, {HPHW_NPROC,0x5A0,0x4,0x81,"UL 1w T120 1MB/1MB (841/D260,D360)"}, {HPHW_NPROC,0x5A1,0x4,0x91,"UL 2w T120 1MB/1MB (851/D260,D360)"}, {HPHW_NPROC,0x5A2,0x4,0x81,"UL 1w U160 512K/512K (861/D270,D370)"}, {HPHW_NPROC,0x5A3,0x4,0x91,"UL 2w U160 512K/512K (871/D270,D370)"}, {HPHW_NPROC,0x5A4,0x4,0x91,"Mohawk 160 U 1M DC3- (K250)"}, {HPHW_NPROC,0x5A5,0x4,0x91,"Mohawk 180 U 1M DC3- (K260)"}, {HPHW_NPROC,0x5A6,0x4,0x91,"Mohawk 200 U 1M DC3-"}, {HPHW_NPROC,0x5A7,0x4,0x81,"UL proc 1-way U160 1M/1M"}, {HPHW_NPROC,0x5A8,0x4,0x91,"UL proc 2-way U160 1M/1M"}, {HPHW_NPROC,0x5A9,0x4,0x81,"UL proc 1-way U180 1M/1M"}, {HPHW_NPROC,0x5AA,0x4,0x91,"UL proc 2-way U180 1M/1M"}, {HPHW_NPROC,0x5AB,0x4,0x91,"Obsolete"}, {HPHW_NPROC,0x5AB,0x4,0x81,"Obsolete"}, {HPHW_NPROC,0x5AC,0x4,0x91,"Obsolete"}, {HPHW_NPROC,0x5AC,0x4,0x81,"Obsolete"}, {HPHW_NPROC,0x5AD,0x4,0x91,"BraveHawk 180MHz DC3-"}, {HPHW_NPROC,0x5AE,0x4,0x91,"BraveHawk 200MHz DC3- (898/K370)"}, {HPHW_NPROC,0x5AF,0x4,0x91,"BraveHawk 220MHz DC3-"}, {HPHW_NPROC,0x5B0,0x4,0x91,"BraveHawk 180MHz DC3"}, {HPHW_NPROC,0x5B1,0x4,0x91,"BraveHawk 200MHz DC3 (899/K570)"}, {HPHW_NPROC,0x5B2,0x4,0x91,"BraveHawk 220MHz DC3"}, {HPHW_NPROC,0x5B3,0x4,0x91,"FireHawk 200"}, {HPHW_NPROC,0x5B4,0x4,0x91,"SPP2500"}, {HPHW_NPROC,0x5B5,0x4,0x91,"SummitHawk U+"}, {HPHW_NPROC,0x5B6,0x4,0x91,"DragonHawk U+ 240 DC3"}, {HPHW_NPROC,0x5B7,0x4,0x91,"DragonHawk U+ 240 DC3-"}, {HPHW_NPROC,0x5B8,0x4,0x91,"SPP2250 240 MHz"}, {HPHW_NPROC,0x5B9,0x4,0x81,"UL 1w U+/240 (350/550)"}, {HPHW_NPROC,0x5BA,0x4,0x91,"UL 2w U+/240 (350/550)"}, {HPHW_NPROC,0x5BB,0x4,0x81,"AllegroHigh W"}, {HPHW_NPROC,0x5BC,0x4,0x91,"AllegroLow W"}, {HPHW_NPROC,0x5BD,0x4,0x91,"Forte W 2-way"}, {HPHW_NPROC,0x5BE,0x4,0x91,"Prelude W"}, {HPHW_NPROC,0x5BF,0x4,0x91,"Forte W 4-way"}, {HPHW_NPROC,0x5C0,0x4,0x91,"M2250"}, {HPHW_NPROC,0x5C1,0x4,0x91,"M2500"}, {HPHW_NPROC,0x5C2,0x4,0x91,"Sonata 440"}, {HPHW_NPROC,0x5C3,0x4,0x91,"Sonata 360"}, {HPHW_NPROC,0x5C4,0x4,0x91,"Rhapsody 440"}, {HPHW_NPROC,0x5C5,0x4,0x91,"Rhapsody 360"}, {HPHW_NPROC,0x5C6,0x4,0x91,"Raven W 360 (9000/780)"}, {HPHW_NPROC,0x5C7,0x4,0x91,"Halfdome W 440"}, {HPHW_NPROC,0x5C8,0x4,0x81,"Lego 360 processor"}, {HPHW_NPROC,0x5C9,0x4,0x91,"Rhapsody DC- 440"}, {HPHW_NPROC,0x5CA,0x4,0x91,"Rhapsody DC- 360"}, {HPHW_NPROC,0x5CB,0x4,0x91,"Crescendo 440"}, {HPHW_NPROC,0x5CC,0x4,0x91,"Prelude W 440"}, {HPHW_NPROC,0x5CD,0x4,0x91,"SPP2600"}, {HPHW_NPROC,0x5CE,0x4,0x91,"M2600"}, {HPHW_NPROC,0x5CF,0x4,0x81,"Allegro W+"}, {HPHW_NPROC,0x5D0,0x4,0x81,"Kazoo W+"}, {HPHW_NPROC,0x5D1,0x4,0x91,"Forte W+ 2w"}, {HPHW_NPROC,0x5D2,0x4,0x91,"Forte W+ 4w"}, {HPHW_NPROC,0x5D3,0x4,0x91,"Prelude W+ 540"}, {HPHW_NPROC,0x5D4,0x4,0x91,"Duet W+"}, {HPHW_NPROC,0x5D5,0x4,0x91,"Crescendo 550"}, {HPHW_NPROC,0x5D6,0x4,0x81,"Crescendo DC- 440"}, {HPHW_NPROC,0x5D7,0x4,0x91,"Keystone W+"}, {HPHW_NPROC,0x5D8,0x4,0x91,"Rhapsody wave 2 W+ DC-"}, {HPHW_NPROC,0x5D9,0x4,0x91,"Rhapsody wave 2 W+"}, {HPHW_NPROC,0x5DA,0x4,0x91,"Marcato W+ DC-"}, {HPHW_NPROC,0x5DB,0x4,0x91,"Marcato W+"}, {HPHW_NPROC,0x5DC,0x4,0x91,"Allegro W2"}, {HPHW_NPROC,0x5DD,0x4,0x81,"Duet W2"}, {HPHW_NPROC,0x5DE,0x4,0x81,"Piccolo W+"}, {HPHW_NPROC,0x5DF,0x4,0x81,"Cantata W2"}, {HPHW_NPROC,0x5E0,0x4,0x91,"Cantata DC- W2"}, {HPHW_NPROC,0x5E1,0x4,0x91,"Crescendo DC- W2"}, {HPHW_NPROC,0x5E2,0x4,0x91,"Crescendo 650 W2"}, {HPHW_NPROC,0x5E3,0x4,0x91,"Crescendo 750 W2"}, {HPHW_NPROC,0x5E4,0x4,0x91,"Keystone/Matterhorn W2 750"}, {HPHW_NPROC,0x5E5,0x4,0x91,"PowerBar W+"}, {HPHW_NPROC,0x5E6,0x4,0x91,"Keystone/Matterhorn W2 650"}, {HPHW_NPROC,0x5E7,0x4,0x91,"Caribe W2 800"}, {HPHW_NPROC,0x5E8,0x4,0x91,"Pikes Peak W2"}, {HPHW_NPROC,0x5EB,0x4,0x91,"Perf/Leone 875 W2+"}, {HPHW_NPROC,0x5FF,0x4,0x91,"Hitachi W"}, {HPHW_NPROC,0x600,0x4,0x81,"Gecko (712/60)"}, {HPHW_NPROC,0x601,0x4,0x81,"Gecko 80 (712/80)"}, {HPHW_NPROC,0x602,0x4,0x81,"Gecko 100 (712/100)"}, {HPHW_NPROC,0x603,0x4,0x81,"Anole 64 (743/64)"}, {HPHW_NPROC,0x604,0x4,0x81,"Anole 100 (743/100)"}, {HPHW_NPROC,0x605,0x4,0x81,"Gecko 120 (712/120)"}, {HPHW_NPROC,0x606,0x4,0x81,"Gila 80"}, {HPHW_NPROC,0x607,0x4,0x81,"Gila 100"}, {HPHW_NPROC,0x608,0x4,0x81,"Gila 120"}, {HPHW_NPROC,0x609,0x4,0x81,"Scorpio-L 80"}, {HPHW_NPROC,0x60A,0x4,0x81,"Mirage Jr (715/64)"}, {HPHW_NPROC,0x60B,0x4,0x81,"Mirage 100"}, {HPHW_NPROC,0x60C,0x4,0x81,"Mirage 100+"}, {HPHW_NPROC,0x60D,0x4,0x81,"Electra 100"}, {HPHW_NPROC,0x60E,0x4,0x81,"Electra 120"}, {HPHW_NPROC,0x610,0x4,0x81,"Scorpio-L 100"}, {HPHW_NPROC,0x611,0x4,0x81,"Scorpio-L 120"}, {HPHW_NPROC,0x612,0x4,0x81,"Spectra-L 80"}, {HPHW_NPROC,0x613,0x4,0x81,"Spectra-L 100"}, {HPHW_NPROC,0x614,0x4,0x81,"Spectra-L 120"}, {HPHW_NPROC,0x615,0x4,0x81,"Piranha 100"}, {HPHW_NPROC,0x616,0x4,0x81,"Piranha 120"}, {HPHW_NPROC,0x617,0x4,0x81,"Jason 50"}, {HPHW_NPROC,0x618,0x4,0x81,"Jason 100"}, {HPHW_NPROC,0x619,0x4,0x81,"Mirage 80"}, {HPHW_NPROC,0x61A,0x4,0x81,"SAIC L-80"}, {HPHW_NPROC,0x61B,0x4,0x81,"Rocky1 L-60"}, {HPHW_NPROC,0x61C,0x4,0x81,"Anole T (743/T)"}, {HPHW_NPROC,0x67E,0x4,0x81,"Hitachi Tiny 80"}, {HPHW_NPROC,0x67F,0x4,0x81,"Hitachi Tiny 64"}, {HPHW_NPROC,0x700,0x4,0x91,"NEC Aska Processor"}, {HPHW_NPROC,0x880,0x4,0x91,"Orca Mako"}, {HPHW_NPROC,0x881,0x4,0x91,"Everest Mako"}, {HPHW_NPROC,0x882,0x4,0x91,"Rainier/Medel Mako Slow"}, {HPHW_NPROC,0x883,0x4,0x91,"Rainier/Medel Mako Fast"}, {HPHW_NPROC,0x884,0x4,0x91,"Mt. Hamilton"}, {HPHW_NPROC,0x885,0x4,0x91,"Mt. Hamilton DC-"}, {HPHW_NPROC,0x886,0x4,0x91,"Storm Peak Slow DC-"}, {HPHW_NPROC,0x887,0x4,0x91,"Storm Peak Slow"}, {HPHW_NPROC,0x888,0x4,0x91,"Storm Peak Fast DC-"}, {HPHW_NPROC,0x889,0x4,0x91,"Storm Peak Fast"}, {HPHW_NPROC,0x88A,0x4,0x91,"Crestone Peak Slow"}, {HPHW_NPROC,0x88C,0x4,0x91,"Orca Mako+"}, {HPHW_NPROC,0x88D,0x4,0x91,"Rainier/Medel Mako+ Slow"}, {HPHW_NPROC,0x88E,0x4,0x91,"Rainier/Medel Mako+ Fast"}, {HPHW_NPROC,0x894,0x4,0x91,"Mt. Hamilton Fast Mako+"}, {HPHW_NPROC,0x895,0x4,0x91,"Storm Peak Slow Mako+"}, {HPHW_NPROC,0x896,0x4,0x91,"Storm Peak Fast Mako+"}, {HPHW_NPROC,0x897,0x4,0x91,"Storm Peak DC- Slow Mako+"}, {HPHW_NPROC,0x898,0x4,0x91,"Storm Peak DC- Fast Mako+"}, {HPHW_NPROC,0x899,0x4,0x91,"Mt. Hamilton Slow Mako+"}, {HPHW_NPROC,0x89B,0x4,0x91,"Crestone Peak Mako+ Slow"}, {HPHW_NPROC,0x89C,0x4,0x91,"Crestone Peak Mako+ Fast"}, {HPHW_A_DIRECT, 0x004, 0x0000D, 0x00, "Arrakis MUX"}, {HPHW_A_DIRECT, 0x005, 0x0000D, 0x00, "Dyun Kiuh MUX"}, {HPHW_A_DIRECT, 0x006, 0x0000D, 0x00, "Baat Kiuh AP/MUX (40299B)"}, {HPHW_A_DIRECT, 0x007, 0x0000D, 0x00, "Dino AP"}, {HPHW_A_DIRECT, 0x009, 0x0000D, 0x00, "Solaris Direct Connect MUX (J2092A)"}, {HPHW_A_DIRECT, 0x00A, 0x0000D, 0x00, "Solaris RS-422/423 MUX (J2093A)"}, {HPHW_A_DIRECT, 0x00B, 0x0000D, 0x00, "Solaris RS-422/423 Quadriloops MUX"}, {HPHW_A_DIRECT, 0x00C, 0x0000D, 0x00, "Solaris Modem MUX (J2094A)"}, {HPHW_A_DIRECT, 0x00D, 0x0000D, 0x00, "Twins Direct Connect MUX"}, {HPHW_A_DIRECT, 0x00E, 0x0000D, 0x00, "Twins Modem MUX"}, {HPHW_A_DIRECT, 0x00F, 0x0000D, 0x00, "Nautilus RS-485"}, {HPHW_A_DIRECT, 0x010, 0x0000D, 0x00, "UltraLight CAP/MUX"}, {HPHW_A_DIRECT, 0x015, 0x0000D, 0x00, "Eole CAP/MUX"}, {HPHW_A_DIRECT, 0x024, 0x0000D, 0x00, "Sahp Kiuh AP/MUX"}, {HPHW_A_DIRECT, 0x034, 0x0000D, 0x00, "Sahp Kiuh Low AP/MUX"}, {HPHW_A_DIRECT, 0x044, 0x0000D, 0x00, "Sahp Baat Kiuh AP/MUX"}, {HPHW_A_DIRECT, 0x004, 0x0000E, 0x80, "Burgundy RS-232"}, {HPHW_A_DIRECT, 0x005, 0x0000E, 0x80, "Silverfox RS-232"}, {HPHW_A_DIRECT, 0x006, 0x0000E, 0x80, "Lego RS-232"}, {HPHW_A_DIRECT, 0x004, 0x0000F, 0x00, "Peacock Graphics"}, {HPHW_A_DIRECT, 0x004, 0x00014, 0x80, "Burgundy HIL"}, {HPHW_A_DIRECT, 0x005, 0x00014, 0x80, "Peacock HIL"}, {HPHW_A_DIRECT, 0x004, 0x00015, 0x80, "Leonardo"}, {HPHW_A_DIRECT, 0x004, 0x00016, 0x80, "HP-PB HRM"}, {HPHW_A_DIRECT, 0x004, 0x00017, 0x80, "HP-PB HRC"}, {HPHW_A_DIRECT, 0x004, 0x0003A, 0x80, "Skunk Centronics (28655A)"}, {HPHW_A_DIRECT, 0x024, 0x0003A, 0x80, "Sahp Kiuh Centronics"}, {HPHW_A_DIRECT, 0x044, 0x0003A, 0x80, "Sahp Baat Kiuh Centronics"}, {HPHW_A_DIRECT, 0x004, 0x0004E, 0x80, "AT&T DataKit (AMSO)"}, {HPHW_A_DIRECT, 0x004, 0x0009B, 0x80, "Test&Meas GSC HPIB"}, {HPHW_A_DIRECT, 0x004, 0x000A8, 0x00, "Rocky2-120 Front Keyboard"}, {HPHW_A_DIRECT, 0x005, 0x000A8, 0x00, "Rocky2-150 Front Keyboard"}, {HPHW_A_DIRECT, 0x004, 0x00101, 0x80, "Hitachi Console Module"}, {HPHW_A_DIRECT, 0x004, 0x00102, 0x80, "Hitachi Boot Module"}, {HPHW_A_DIRECT, 0x004, 0x00203, 0x80, "MELCO HBMLA MLAIT"}, {HPHW_A_DIRECT, 0x004, 0x00208, 0x80, "MELCO HBDPC"}, {HPHW_A_DIRECT, 0x004, 0x00300, 0x00, "DCI TWINAX TERM IO MUX"}, {HPHW_A_DMA, 0x004, 0x00039, 0x80, "Skunk SCSI (28655A)"}, {HPHW_A_DMA, 0x005, 0x00039, 0x80, "KittyHawk CSY Core SCSI"}, {HPHW_A_DMA, 0x014, 0x00039, 0x80, "Diablo SCSI"}, {HPHW_A_DMA, 0x024, 0x00039, 0x80, "Sahp Kiuh SCSI"}, {HPHW_A_DMA, 0x034, 0x00039, 0x80, "Sahp Kiuh Low SCSI"}, {HPHW_A_DMA, 0x044, 0x00039, 0x80, "Sahp Baat Kiuh SCSI"}, {HPHW_A_DMA, 0x004, 0x0003B, 0x80, "Wizard SCSI"}, {HPHW_A_DMA, 0x005, 0x0003B, 0x80, "KittyHawk CSY Core FW-SCSI"}, {HPHW_A_DMA, 0x006, 0x0003B, 0x80, "Symbios EPIC FW-SCSI"}, {HPHW_A_DMA, 0x004, 0x00040, 0x80, "HP-PB Shazam HPIB (28650A)"}, {HPHW_A_DMA, 0x005, 0x00040, 0x80, "Burgundy HPIB"}, {HPHW_A_DMA, 0x004, 0x00041, 0x80, "HP-PB HP-FL"}, {HPHW_A_DMA, 0x004, 0x00042, 0x80, "HP-PB LoQuix HPIB (28650B)"}, {HPHW_A_DMA, 0x004, 0x00043, 0x80, "HP-PB Crypt LoQuix"}, {HPHW_A_DMA, 0x004, 0x00044, 0x80, "HP-PB Shazam GPIO (28651A)"}, {HPHW_A_DMA, 0x004, 0x00045, 0x80, "HP-PB LoQuix GPIO"}, {HPHW_A_DMA, 0x004, 0x00046, 0x80, "2-Port X.25 NIO_ACC (AMSO)"}, {HPHW_A_DMA, 0x004, 0x00047, 0x80, "4-Port X.25 NIO_ACC (AMSO)"}, {HPHW_A_DMA, 0x004, 0x0004B, 0x80, "LGB Control"}, {HPHW_A_DMA, 0x004, 0x0004C, 0x80, "Martian RTI (AMSO)"}, {HPHW_A_DMA, 0x004, 0x0004D, 0x80, "ACC Mux (AMSO)"}, {HPHW_A_DMA, 0x004, 0x00050, 0x80, "Lanbrusca 802.3 (36967A)"}, {HPHW_A_DMA, 0x004, 0x00056, 0x80, "HP-PB LoQuix FDDI"}, {HPHW_A_DMA, 0x004, 0x00057, 0x80, "HP-PB LoQuix FDDI (28670A)"}, {HPHW_A_DMA, 0x004, 0x0005E, 0x00, "Gecko Add-on Token Ring"}, {HPHW_A_DMA, 0x012, 0x00089, 0x80, "Barracuda Add-on FW-SCSI"}, {HPHW_A_DMA, 0x013, 0x00089, 0x80, "Bluefish Add-on FW-SCSI"}, {HPHW_A_DMA, 0x014, 0x00089, 0x80, "Shrike Add-on FW-SCSI"}, {HPHW_A_DMA, 0x015, 0x00089, 0x80, "KittyHawk GSY Core FW-SCSI"}, {HPHW_A_DMA, 0x017, 0x00089, 0x80, "Shrike Jade Add-on FW-SCSI (A3644A)"}, {HPHW_A_DMA, 0x01F, 0x00089, 0x80, "SkyHawk 100/120 FW-SCSI"}, {HPHW_A_DMA, 0x027, 0x00089, 0x80, "Piranha 100 FW-SCSI"}, {HPHW_A_DMA, 0x032, 0x00089, 0x80, "Raven T' Core FW-SCSI"}, {HPHW_A_DMA, 0x03B, 0x00089, 0x80, "Raven U/L2 Core FW-SCSI"}, {HPHW_A_DMA, 0x03C, 0x00089, 0x80, "Merlin 132 Core FW-SCSI"}, {HPHW_A_DMA, 0x03D, 0x00089, 0x80, "Merlin 160 Core FW-SCSI"}, {HPHW_A_DMA, 0x044, 0x00089, 0x80, "Mohawk Core FW-SCSI"}, {HPHW_A_DMA, 0x051, 0x00089, 0x80, "Firehawk FW-SCSI"}, {HPHW_A_DMA, 0x058, 0x00089, 0x80, "FireHawk 200 FW-SCSI"}, {HPHW_A_DMA, 0x05C, 0x00089, 0x80, "SummitHawk 230 Ultra-SCSI"}, {HPHW_A_DMA, 0x014, 0x00091, 0x80, "Baby Hugo Add-on Net FC (A3406A)"}, {HPHW_A_DMA, 0x020, 0x00091, 0x80, "Baby Jade Add-on Net FC (A3638A)"}, {HPHW_A_DMA, 0x004, 0x00092, 0x80, "GSC+ YLIASTER ATM"}, {HPHW_A_DMA, 0x004, 0x00095, 0x80, "Hamlyn GSC+ Network Card"}, {HPHW_A_DMA, 0x004, 0x00098, 0x80, "Lo-fat Emulator"}, {HPHW_A_DMA, 0x004, 0x0009A, 0x80, "GSC+ Venus ATM"}, {HPHW_A_DMA, 0x005, 0x0009A, 0x80, "GSC+ Samorobrive ATM"}, {HPHW_A_DMA, 0x004, 0x0009D, 0x80, "HP HSC-PCI Cards"}, {HPHW_A_DMA, 0x004, 0x0009E, 0x80, "Alaxis GSC+ 155Mb ATM"}, {HPHW_A_DMA, 0x005, 0x0009E, 0x80, "Alaxis GSC+ 622Mb ATM"}, {HPHW_A_DMA, 0x05C, 0x0009F, 0x80, "SummitHawk 230 USB"}, {HPHW_A_DMA, 0x05C, 0x000A0, 0x80, "SummitHawk 230 100BaseT"}, {HPHW_A_DMA, 0x015, 0x000A7, 0x80, "Baby Hugo Add-on mass FC (A3404A)"}, {HPHW_A_DMA, 0x018, 0x000A7, 0x80, "Mombasa GS Add-on mass FC (A3591)"}, {HPHW_A_DMA, 0x021, 0x000A7, 0x80, "Baby Jade Add-on mass FC (A3636A)"}, {HPHW_A_DMA, 0x004, 0x00201, 0x80, "MELCO HCMAP"}, {HPHW_A_DMA, 0x004, 0x00202, 0x80, "MELCO HBMLA MLAMA"}, {HPHW_A_DMA, 0x004, 0x00205, 0x80, "MELCO HBRFU"}, {HPHW_A_DMA, 0x004, 0x00380, 0x80, "Interphase NIO-FC"}, {HPHW_A_DMA, 0x004, 0x00381, 0x80, "Interphase NIO-ATM"}, {HPHW_A_DMA, 0x004, 0x00382, 0x80, "Interphase NIO-100BaseTX"}, {HPHW_BA, 0x004, 0x00070, 0x0, "Cobra Core BA"}, {HPHW_BA, 0x005, 0x00070, 0x0, "Coral Core BA"}, {HPHW_BA, 0x006, 0x00070, 0x0, "Bushmaster Core BA"}, {HPHW_BA, 0x007, 0x00070, 0x0, "Scorpio Core BA"}, {HPHW_BA, 0x008, 0x00070, 0x0, "Flounder Core BA"}, {HPHW_BA, 0x009, 0x00070, 0x0, "Outfield Core BA"}, {HPHW_BA, 0x00A, 0x00070, 0x0, "CoralII Core BA"}, {HPHW_BA, 0x00B, 0x00070, 0x0, "Scorpio Jr. Core BA"}, {HPHW_BA, 0x00C, 0x00070, 0x0, "Strider-50 Core BA"}, {HPHW_BA, 0x00D, 0x00070, 0x0, "Strider-33 Core BA"}, {HPHW_BA, 0x00E, 0x00070, 0x0, "Trailways-50 Core BA"}, {HPHW_BA, 0x00F, 0x00070, 0x0, "Trailways-33 Core BA"}, {HPHW_BA, 0x010, 0x00070, 0x0, "Pace Core BA"}, {HPHW_BA, 0x011, 0x00070, 0x0, "Sidewinder Core BA"}, {HPHW_BA, 0x019, 0x00070, 0x0, "Scorpio Sr. Core BA"}, {HPHW_BA, 0x020, 0x00070, 0x0, "Scorpio 100 Core BA"}, {HPHW_BA, 0x021, 0x00070, 0x0, "Spectra 50 Core BA"}, {HPHW_BA, 0x022, 0x00070, 0x0, "Spectra 75 Core BA"}, {HPHW_BA, 0x023, 0x00070, 0x0, "Spectra 100 Core BA"}, {HPHW_BA, 0x024, 0x00070, 0x0, "Fast Pace Core BA"}, {HPHW_BA, 0x026, 0x00070, 0x0, "CoralII Jaguar Core BA"}, {HPHW_BA, 0x004, 0x00076, 0x0, "Cobra EISA BA"}, {HPHW_BA, 0x005, 0x00076, 0x0, "Coral EISA BA"}, {HPHW_BA, 0x007, 0x00076, 0x0, "Scorpio EISA BA"}, {HPHW_BA, 0x00A, 0x00076, 0x0, "CoralII EISA BA"}, {HPHW_BA, 0x00B, 0x00076, 0x0, "Scorpio Jr. EISA BA"}, {HPHW_BA, 0x00C, 0x00076, 0x0, "Strider-50 Core EISA"}, {HPHW_BA, 0x00D, 0x00076, 0x0, "Strider-33 Core EISA"}, {HPHW_BA, 0x00E, 0x00076, 0x0, "Trailways-50 Core EISA"}, {HPHW_BA, 0x00F, 0x00076, 0x0, "Trailways-33 Core EISA"}, {HPHW_BA, 0x010, 0x00076, 0x0, "Pace Core EISA"}, {HPHW_BA, 0x019, 0x00076, 0x0, "Scorpio Sr. EISA BA"}, {HPHW_BA, 0x020, 0x00076, 0x0, "Scorpio 100 EISA BA"}, {HPHW_BA, 0x021, 0x00076, 0x0, "Spectra 50 EISA BA"}, {HPHW_BA, 0x022, 0x00076, 0x0, "Spectra 75 EISA BA"}, {HPHW_BA, 0x023, 0x00076, 0x0, "Spectra 100 EISA BA"}, {HPHW_BA, 0x026, 0x00076, 0x0, "CoralII Jaguar EISA BA"}, {HPHW_BA, 0x010, 0x00078, 0x0, "Pace VME BA"}, {HPHW_BA, 0x011, 0x00078, 0x0, "Sidewinder VME BA"}, {HPHW_BA, 0x01A, 0x00078, 0x0, "Anole 64 VME BA"}, {HPHW_BA, 0x01B, 0x00078, 0x0, "Anole 100 VME BA"}, {HPHW_BA, 0x024, 0x00078, 0x0, "Fast Pace VME BA"}, {HPHW_BA, 0x034, 0x00078, 0x0, "Anole T VME BA"}, {HPHW_BA, 0x04A, 0x00078, 0x0, "Anole L2 132 VME BA"}, {HPHW_BA, 0x04C, 0x00078, 0x0, "Anole L2 165 VME BA"}, {HPHW_BA, 0x011, 0x00081, 0x0, "WB-96 Core BA"}, {HPHW_BA, 0x012, 0x00081, 0x0, "Orville UX Core BA"}, {HPHW_BA, 0x013, 0x00081, 0x0, "Wilbur UX Core BA"}, {HPHW_BA, 0x014, 0x00081, 0x0, "WB-80 Core BA"}, {HPHW_BA, 0x015, 0x00081, 0x0, "KittyHawk GSY Core BA"}, {HPHW_BA, 0x016, 0x00081, 0x0, "Gecko Core BA"}, {HPHW_BA, 0x018, 0x00081, 0x0, "Gecko Optional BA"}, {HPHW_BA, 0x01A, 0x00081, 0x0, "Anole 64 Core BA"}, {HPHW_BA, 0x01B, 0x00081, 0x0, "Anole 100 Core BA"}, {HPHW_BA, 0x01C, 0x00081, 0x0, "Gecko 80 Core BA"}, {HPHW_BA, 0x01D, 0x00081, 0x0, "Gecko 100 Core BA"}, {HPHW_BA, 0x01F, 0x00081, 0x0, "SkyHawk 100/120 Core BA"}, {HPHW_BA, 0x027, 0x00081, 0x0, "Piranha 100 Core BA"}, {HPHW_BA, 0x028, 0x00081, 0x0, "Mirage Jr Core BA"}, {HPHW_BA, 0x029, 0x00081, 0x0, "Mirage Core BA"}, {HPHW_BA, 0x02A, 0x00081, 0x0, "Electra Core BA"}, {HPHW_BA, 0x02B, 0x00081, 0x0, "Mirage 80 Core BA"}, {HPHW_BA, 0x02C, 0x00081, 0x0, "Mirage 100+ Core BA"}, {HPHW_BA, 0x02E, 0x00081, 0x0, "UL 350 Lasi Core BA"}, {HPHW_BA, 0x02F, 0x00081, 0x0, "UL 550 Lasi Core BA"}, {HPHW_BA, 0x032, 0x00081, 0x0, "Raven T' Core BA"}, {HPHW_BA, 0x033, 0x00081, 0x0, "Anole T Core BA"}, {HPHW_BA, 0x034, 0x00081, 0x0, "SAIC L-80 Core BA"}, {HPHW_BA, 0x035, 0x00081, 0x0, "PCX-L2 712/132 Core BA"}, {HPHW_BA, 0x036, 0x00081, 0x0, "PCX-L2 712/160 Core BA"}, {HPHW_BA, 0x03B, 0x00081, 0x0, "Raven U/L2 Core BA"}, {HPHW_BA, 0x03C, 0x00081, 0x0, "Merlin 132 Core BA"}, {HPHW_BA, 0x03D, 0x00081, 0x0, "Merlin 160 Core BA"}, {HPHW_BA, 0x03E, 0x00081, 0x0, "Merlin+ 132 Core BA"}, {HPHW_BA, 0x03F, 0x00081, 0x0, "Merlin+ 180 Core BA"}, {HPHW_BA, 0x044, 0x00081, 0x0, "Mohawk Core BA"}, {HPHW_BA, 0x045, 0x00081, 0x0, "Rocky1 Core BA"}, {HPHW_BA, 0x046, 0x00081, 0x0, "Rocky2 120 Core BA"}, {HPHW_BA, 0x047, 0x00081, 0x0, "Rocky2 150 Core BA"}, {HPHW_BA, 0x04B, 0x00081, 0x0, "Anole L2 132 Core BA"}, {HPHW_BA, 0x04D, 0x00081, 0x0, "Anole L2 165 Core BA"}, {HPHW_BA, 0x04E, 0x00081, 0x0, "Kiji L2 132 Core BA"}, {HPHW_BA, 0x050, 0x00081, 0x0, "Merlin Jr 132 Core BA"}, {HPHW_BA, 0x051, 0x00081, 0x0, "Firehawk Core BA"}, {HPHW_BA, 0x056, 0x00081, 0x0, "Raven+ w SE FWSCSI Core BA"}, {HPHW_BA, 0x057, 0x00081, 0x0, "Raven+ w Diff FWSCSI Core BA"}, {HPHW_BA, 0x058, 0x00081, 0x0, "FireHawk 200 Core BA"}, {HPHW_BA, 0x05C, 0x00081, 0x0, "SummitHawk 230 Core BA"}, {HPHW_BA, 0x05E, 0x00081, 0x0, "Staccato 132 Core BA"}, {HPHW_BA, 0x05E, 0x00081, 0x0, "Staccato 180 Core BA"}, {HPHW_BA, 0x05F, 0x00081, 0x0, "Staccato 180 Lasi"}, {HPHW_BA, 0x800, 0x00081, 0x0, "Hitachi Tiny 64 Core BA"}, {HPHW_BA, 0x801, 0x00081, 0x0, "Hitachi Tiny 80 Core BA"}, {HPHW_BA, 0x004, 0x0008B, 0x0, "Anole Optional PCMCIA BA"}, {HPHW_BA, 0x004, 0x0008E, 0x0, "GSC ITR Wax BA"}, {HPHW_BA, 0x00C, 0x0008E, 0x0, "Gecko Optional Wax BA"}, {HPHW_BA, 0x010, 0x0008E, 0x0, "Pace Wax BA"}, {HPHW_BA, 0x011, 0x0008E, 0x0, "SuperPace Wax BA"}, {HPHW_BA, 0x012, 0x0008E, 0x0, "Mirage Jr Wax BA"}, {HPHW_BA, 0x013, 0x0008E, 0x0, "Mirage Wax BA"}, {HPHW_BA, 0x014, 0x0008E, 0x0, "Electra Wax BA"}, {HPHW_BA, 0x017, 0x0008E, 0x0, "Raven Backplane Wax BA"}, {HPHW_BA, 0x01E, 0x0008E, 0x0, "Raven T' Wax BA"}, {HPHW_BA, 0x01F, 0x0008E, 0x0, "SkyHawk Wax BA"}, {HPHW_BA, 0x023, 0x0008E, 0x0, "Rocky1 Wax BA"}, {HPHW_BA, 0x02B, 0x0008E, 0x0, "Mirage 80 Wax BA"}, {HPHW_BA, 0x02C, 0x0008E, 0x0, "Mirage 100+ Wax BA"}, {HPHW_BA, 0x030, 0x0008E, 0x0, "UL 350 Core Wax BA"}, {HPHW_BA, 0x031, 0x0008E, 0x0, "UL 550 Core Wax BA"}, {HPHW_BA, 0x034, 0x0008E, 0x0, "SAIC L-80 Wax BA"}, {HPHW_BA, 0x03A, 0x0008E, 0x0, "Merlin+ Wax BA"}, {HPHW_BA, 0x040, 0x0008E, 0x0, "Merlin 132 Wax BA"}, {HPHW_BA, 0x041, 0x0008E, 0x0, "Merlin 160 Wax BA"}, {HPHW_BA, 0x043, 0x0008E, 0x0, "Merlin 132/160 Wax BA"}, {HPHW_BA, 0x052, 0x0008E, 0x0, "Raven+ Hi Power Backplane w/EISA Wax BA"}, {HPHW_BA, 0x054, 0x0008E, 0x0, "Raven+ Lo Power Backplane w/EISA Wax BA"}, {HPHW_BA, 0x059, 0x0008E, 0x0, "FireHawk 200 Wax BA"}, {HPHW_BA, 0x05A, 0x0008E, 0x0, "Raven+ L2 Backplane w/EISA Wax BA"}, {HPHW_BA, 0x05D, 0x0008E, 0x0, "SummitHawk Wax BA"}, {HPHW_BA, 0x800, 0x0008E, 0x0, "Hitachi Tiny 64 Wax BA"}, {HPHW_BA, 0x801, 0x0008E, 0x0, "Hitachi Tiny 80 Wax BA"}, {HPHW_BA, 0x011, 0x00090, 0x0, "SuperPace Wax EISA BA"}, {HPHW_BA, 0x017, 0x00090, 0x0, "Raven Backplane Wax EISA BA"}, {HPHW_BA, 0x01E, 0x00090, 0x0, "Raven T' Wax EISA BA"}, {HPHW_BA, 0x01F, 0x00090, 0x0, "SkyHawk 100/120 Wax EISA BA"}, {HPHW_BA, 0x027, 0x00090, 0x0, "Piranha 100 Wax EISA BA"}, {HPHW_BA, 0x028, 0x00090, 0x0, "Mirage Jr Wax EISA BA"}, {HPHW_BA, 0x029, 0x00090, 0x0, "Mirage Wax EISA BA"}, {HPHW_BA, 0x02A, 0x00090, 0x0, "Electra Wax EISA BA"}, {HPHW_BA, 0x02B, 0x00090, 0x0, "Mirage 80 Wax EISA BA"}, {HPHW_BA, 0x02C, 0x00090, 0x0, "Mirage 100+ Wax EISA BA"}, {HPHW_BA, 0x030, 0x00090, 0x0, "UL 350 Wax EISA BA"}, {HPHW_BA, 0x031, 0x00090, 0x0, "UL 550 Wax EISA BA"}, {HPHW_BA, 0x034, 0x00090, 0x0, "SAIC L-80 Wax EISA BA"}, {HPHW_BA, 0x03A, 0x00090, 0x0, "Merlin+ Wax EISA BA"}, {HPHW_BA, 0x040, 0x00090, 0x0, "Merlin 132 Wax EISA BA"}, {HPHW_BA, 0x041, 0x00090, 0x0, "Merlin 160 Wax EISA BA"}, {HPHW_BA, 0x043, 0x00090, 0x0, "Merlin 132/160 Wax EISA BA"}, {HPHW_BA, 0x052, 0x00090, 0x0, "Raven Hi Power Backplane Wax EISA BA"}, {HPHW_BA, 0x054, 0x00090, 0x0, "Raven Lo Power Backplane Wax EISA BA"}, {HPHW_BA, 0x059, 0x00090, 0x0, "FireHawk 200 Wax EISA BA"}, {HPHW_BA, 0x05A, 0x00090, 0x0, "Raven L2 Backplane Wax EISA BA"}, {HPHW_BA, 0x05D, 0x00090, 0x0, "SummitHawk Wax EISA BA"}, {HPHW_BA, 0x800, 0x00090, 0x0, "Hitachi Tiny 64 Wax EISA BA"}, {HPHW_BA, 0x801, 0x00090, 0x0, "Hitachi Tiny 80 Wax EISA BA"}, {HPHW_BA, 0x01A, 0x00093, 0x0, "Anole 64 TIMI BA"}, {HPHW_BA, 0x01B, 0x00093, 0x0, "Anole 100 TIMI BA"}, {HPHW_BA, 0x034, 0x00093, 0x0, "Anole T TIMI BA"}, {HPHW_BA, 0x04A, 0x00093, 0x0, "Anole L2 132 TIMI BA"}, {HPHW_BA, 0x04C, 0x00093, 0x0, "Anole L2 165 TIMI BA"}, {HPHW_BA, 0x582, 0x000A5, 0x00, "Epic PCI Bridge"}, {HPHW_BCPORT, 0x504, 0x00000, 0x00, "Phantom PseudoBC GSC+ Port"}, {HPHW_BCPORT, 0x505, 0x00000, 0x00, "Phantom PseudoBC GSC+ Port"}, {HPHW_BCPORT, 0x503, 0x0000C, 0x00, "Java BC GSC+ Port"}, {HPHW_BCPORT, 0x57F, 0x0000C, 0x00, "Hitachi Ghostview GSC+ Port"}, {HPHW_BCPORT, 0x501, 0x0000C, 0x00, "U2-IOA BC GSC+ Port"}, {HPHW_BCPORT, 0x502, 0x0000C, 0x00, "Uturn-IOA BC GSC+ Port"}, {HPHW_BCPORT, 0x780, 0x0000C, 0x00, "Astro BC Ropes Port"}, {HPHW_BCPORT, 0x506, 0x0000C, 0x00, "NEC-IOS BC HSC Port"}, {HPHW_BCPORT, 0x004, 0x0000C, 0x00, "Cheetah BC SMB Port"}, {HPHW_BCPORT, 0x006, 0x0000C, 0x00, "Cheetah BC MID_BUS Port"}, {HPHW_BCPORT, 0x005, 0x0000C, 0x00, "Condor BC MID_BUS Port"}, {HPHW_BCPORT, 0x100, 0x0000C, 0x00, "Condor BC HP-PB Port"}, {HPHW_BCPORT, 0x184, 0x0000C, 0x00, "Summit BC Port"}, {HPHW_BCPORT, 0x101, 0x0000C, 0x00, "Summit BC HP-PB Port"}, {HPHW_BCPORT, 0x102, 0x0000C, 0x00, "HP-PB Port (prefetch)"}, {HPHW_BCPORT, 0x500, 0x0000C, 0x00, "Gecko BOA BC GSC+ Port"}, {HPHW_BCPORT, 0x103, 0x0000C, 0x00, "Gecko BOA BC HP-PB Port"}, {HPHW_BCPORT, 0x507, 0x0000C, 0x00, "Keyaki BC GSC+ Port"}, {HPHW_BCPORT, 0x508, 0x0000C, 0x00, "Keyaki-DX BC GSC+ Port"}, {HPHW_BCPORT, 0x584, 0x0000C, 0x10, "DEW BC Runway Port"}, {HPHW_BCPORT, 0x800, 0x0000C, 0x10, "DEW BC Merced Port"}, {HPHW_BCPORT, 0x801, 0x0000C, 0x10, "SMC Bus Interface Merced Bus0"}, {HPHW_BCPORT, 0x802, 0x0000C, 0x10, "SMC Bus INterface Merced Bus1"}, {HPHW_BCPORT, 0x803, 0x0000C, 0x10, "IKE I/O BC Merced Port"}, {HPHW_BCPORT, 0x781, 0x0000C, 0x00, "IKE I/O BC Ropes Port"}, {HPHW_BCPORT, 0x804, 0x0000C, 0x10, "REO I/O BC Merced Port"}, {HPHW_BCPORT, 0x782, 0x0000C, 0x00, "REO I/O BC Ropes Port"}, {HPHW_BCPORT, 0x784, 0x0000C, 0x00, "Pluto I/O BC Ropes Port"}, {HPHW_BRIDGE, 0x05D, 0x0000A, 0x00, "SummitHawk Dino PCI Bridge"}, {HPHW_BRIDGE, 0x680, 0x0000A, 0x00, "Dino PCI Bridge"}, {HPHW_BRIDGE, 0x682, 0x0000A, 0x00, "Cujo PCI Bridge"}, {HPHW_BRIDGE, 0x782, 0x0000A, 0x00, "Elroy PCI Bridge"}, {HPHW_BRIDGE, 0x583, 0x000A5, 0x00, "Saga PCI Bridge"}, {HPHW_BRIDGE, 0x783, 0x0000A, 0x00, "Mercury PCI Bridge"}, {HPHW_BRIDGE, 0x784, 0x0000A, 0x00, "Quicksilver AGP Bridge"}, {HPHW_B_DMA, 0x004, 0x00018, 0x00, "Parallel I/O"}, {HPHW_B_DMA, 0x004, 0x00019, 0x00, "Parallel RDB"}, {HPHW_B_DMA, 0x004, 0x00020, 0x80, "MID_BUS PSI"}, {HPHW_B_DMA, 0x004, 0x0002F, 0x80, "HP-PB Transit PSI (36960A)"}, {HPHW_B_DMA, 0x008, 0x00051, 0x80, "HP-PB Transit 802.3"}, {HPHW_B_DMA, 0x004, 0x00052, 0x80, "Miura LAN/Console (J2146A)"}, {HPHW_B_DMA, 0x008, 0x00058, 0x80, "HP-PB Transit 802.4"}, {HPHW_B_DMA, 0x005, 0x00060, 0x80, "KittyHawk CSY Core LAN/Console"}, {HPHW_B_DMA, 0x014, 0x00060, 0x80, "Diablo LAN/Console"}, {HPHW_B_DMA, 0x054, 0x00060, 0x80, "Countach LAN/Console"}, {HPHW_B_DMA, 0x004, 0x00094, 0x80, "KittyHawk GSC+ Exerciser"}, {HPHW_B_DMA, 0x004, 0x00100, 0x80, "HP-PB HF Interface"}, {HPHW_B_DMA, 0x000, 0x00206, 0x80, "MELCO HMPHA"}, {HPHW_B_DMA, 0x005, 0x00206, 0x80, "MELCO HMPHA_10"}, {HPHW_B_DMA, 0x006, 0x00206, 0x80, "MELCO HMQHA"}, {HPHW_B_DMA, 0x007, 0x00206, 0x80, "MELCO HMQHA_10"}, {HPHW_B_DMA, 0x004, 0x207, 0x80, "MELCO HNDWA MDWS-70"}, {HPHW_CIO, 0x004, 0x00010, 0x00, "VLSI CIO"}, {HPHW_CIO, 0x005, 0x00010, 0x00, "Silverfox CIO"}, {HPHW_CIO, 0x006, 0x00010, 0x00, "Emerald CIO"}, {HPHW_CIO, 0x008, 0x00010, 0x00, "Discrete CIO"}, {HPHW_CONSOLE, 0x004, 0x0001C, 0x00, "Cheetah console"}, {HPHW_CONSOLE, 0x005, 0x0001C, 0x00, "Emerald console"}, {HPHW_CONSOLE, 0x01A, 0x0001F, 0x00, "Jason/Anole 64 Null Console"}, {HPHW_CONSOLE, 0x01B, 0x0001F, 0x00, "Jason/Anole 100 Null Console"}, {HPHW_FABRIC, 0x004, 0x000AA, 0x80, "Halfdome DNA Central Agent"}, {HPHW_FABRIC, 0x005, 0x000AA, 0x80, "Keystone DNA Central Agent"}, {HPHW_FABRIC, 0x007, 0x000AA, 0x80, "Caribe DNA Central Agent"}, {HPHW_FABRIC, 0x004, 0x000AB, 0x00, "Halfdome TOGO Fabric Crossbar"}, {HPHW_FABRIC, 0x005, 0x000AB, 0x00, "Keystone TOGO Fabric Crossbar"}, {HPHW_FABRIC, 0x004, 0x000AC, 0x00, "Halfdome Sakura Fabric Router"}, {HPHW_FIO, 0x025, 0x0002E, 0x80, "Armyknife Optional X.25"}, {HPHW_FIO, 0x004, 0x0004F, 0x0, "8-Port X.25 EISA-ACC (AMSO)"}, {HPHW_FIO, 0x004, 0x00071, 0x0, "Cobra Core SCSI"}, {HPHW_FIO, 0x005, 0x00071, 0x0, "Coral Core SCSI"}, {HPHW_FIO, 0x006, 0x00071, 0x0, "Bushmaster Core SCSI"}, {HPHW_FIO, 0x007, 0x00071, 0x0, "Scorpio Core SCSI"}, {HPHW_FIO, 0x008, 0x00071, 0x0, "Flounder Core SCSI"}, {HPHW_FIO, 0x009, 0x00071, 0x0, "Outfield Core SCSI"}, {HPHW_FIO, 0x00A, 0x00071, 0x0, "CoralII Core SCSI"}, {HPHW_FIO, 0x00B, 0x00071, 0x0, "Scorpio Jr. Core SCSI"}, {HPHW_FIO, 0x00C, 0x00071, 0x0, "Strider-50 Core SCSI"}, {HPHW_FIO, 0x00D, 0x00071, 0x0, "Strider-33 Core SCSI"}, {HPHW_FIO, 0x00E, 0x00071, 0x0, "Trailways-50 Core SCSI"}, {HPHW_FIO, 0x00F, 0x00071, 0x0, "Trailways-33 Core SCSI"}, {HPHW_FIO, 0x010, 0x00071, 0x0, "Pace Core SCSI"}, {HPHW_FIO, 0x011, 0x00071, 0x0, "Sidewinder Core SCSI"}, {HPHW_FIO, 0x019, 0x00071, 0x0, "Scorpio Sr. Core SCSI"}, {HPHW_FIO, 0x020, 0x00071, 0x0, "Scorpio 100 Core SCSI"}, {HPHW_FIO, 0x021, 0x00071, 0x0, "Spectra 50 Core SCSI"}, {HPHW_FIO, 0x022, 0x00071, 0x0, "Spectra 75 Core SCSI"}, {HPHW_FIO, 0x023, 0x00071, 0x0, "Spectra 100 Core SCSI"}, {HPHW_FIO, 0x024, 0x00071, 0x0, "Fast Pace Core SCSI"}, {HPHW_FIO, 0x026, 0x00071, 0x0, "CoralII Jaguar Core SCSI"}, {HPHW_FIO, 0x004, 0x00072, 0x0, "Cobra Core LAN (802.3)"}, {HPHW_FIO, 0x005, 0x00072, 0x0, "Coral Core LAN (802.3)"}, {HPHW_FIO, 0x006, 0x00072, 0x0, "Bushmaster Core LAN (802.3)"}, {HPHW_FIO, 0x007, 0x00072, 0x0, "Scorpio Core LAN (802.3)"}, {HPHW_FIO, 0x008, 0x00072, 0x0, "Flounder Core LAN (802.3)"}, {HPHW_FIO, 0x009, 0x00072, 0x0, "Outfield Core LAN (802.3)"}, {HPHW_FIO, 0x00A, 0x00072, 0x0, "CoralII Core LAN (802.3)"}, {HPHW_FIO, 0x00B, 0x00072, 0x0, "Scorpio Jr. Core LAN (802.3)"}, {HPHW_FIO, 0x00C, 0x00072, 0x0, "Strider-50 Core LAN (802.3)"}, {HPHW_FIO, 0x00D, 0x00072, 0x0, "Strider-33 Core LAN (802.3)"}, {HPHW_FIO, 0x00E, 0x00072, 0x0, "Trailways-50 Core LAN (802.3)"}, {HPHW_FIO, 0x00F, 0x00072, 0x0, "Trailways-33 Core LAN (802.3)"}, {HPHW_FIO, 0x010, 0x00072, 0x0, "Pace Core LAN (802.3)"}, {HPHW_FIO, 0x011, 0x00072, 0x0, "Sidewinder Core LAN (802.3)"}, {HPHW_FIO, 0x019, 0x00072, 0x0, "Scorpio Sr. Core LAN (802.3)"}, {HPHW_FIO, 0x020, 0x00072, 0x0, "Scorpio 100 Core LAN (802.3)"}, {HPHW_FIO, 0x021, 0x00072, 0x0, "Spectra 50 Core LAN (802.3)"}, {HPHW_FIO, 0x022, 0x00072, 0x0, "Spectra 75 Core LAN (802.3)"}, {HPHW_FIO, 0x023, 0x00072, 0x0, "Spectra 100 Core LAN (802.3)"}, {HPHW_FIO, 0x024, 0x00072, 0x0, "Fast Pace Core LAN (802.3)"}, {HPHW_FIO, 0x026, 0x00072, 0x0, "CoralII Jaguar Core LAN (802.3)"}, {HPHW_FIO, 0x004, 0x00073, 0x0, "Cobra Core HIL"}, {HPHW_FIO, 0x005, 0x00073, 0x0, "Coral Core HIL"}, {HPHW_FIO, 0x006, 0x00073, 0x0, "Bushmaster Core HIL"}, {HPHW_FIO, 0x007, 0x00073, 0x0, "Scorpio Core HIL"}, {HPHW_FIO, 0x008, 0x00073, 0x0, "Flounder Core HIL"}, {HPHW_FIO, 0x009, 0x00073, 0x0, "Outfield Core HIL"}, {HPHW_FIO, 0x00A, 0x00073, 0x0, "CoralII Core HIL"}, {HPHW_FIO, 0x00B, 0x00073, 0x0, "Scorpio Jr. Core HIL"}, {HPHW_FIO, 0x00C, 0x00073, 0x0, "Strider-50 Core HIL"}, {HPHW_FIO, 0x00D, 0x00073, 0x0, "Strider-33 Core HIL"}, {HPHW_FIO, 0x00E, 0x00073, 0x0, "Trailways-50 Core HIL"}, {HPHW_FIO, 0x00F, 0x00073, 0x0, "Trailways-33 Core HIL"}, {HPHW_FIO, 0x010, 0x00073, 0x0, "Pace Core HIL"}, {HPHW_FIO, 0x011, 0x00073, 0xcc, "SuperPace Wax HIL"}, {HPHW_FIO, 0x012, 0x00073, 0x0, "Mirage Jr Wax HIL"}, {HPHW_FIO, 0x013, 0x00073, 0x0, "Mirage 100 Wax HIL"}, {HPHW_FIO, 0x014, 0x00073, 0x0, "Electra Wax HIL"}, {HPHW_FIO, 0x017, 0x00073, 0x0, "Raven Backplane Wax HIL"}, {HPHW_FIO, 0x019, 0x00073, 0x0, "Scorpio Sr. Core HIL"}, {HPHW_FIO, 0x01E, 0x00073, 0x0, "Raven T' Wax HIL"}, {HPHW_FIO, 0x01F, 0x00073, 0x0, "SkyHawk 100/120 Wax HIL"}, {HPHW_FIO, 0x020, 0x00073, 0x0, "Scorpio 100 Core HIL"}, {HPHW_FIO, 0x021, 0x00073, 0x0, "Spectra 50 Core HIL"}, {HPHW_FIO, 0x022, 0x00073, 0x0, "Spectra 75 Core HIL"}, {HPHW_FIO, 0x023, 0x00073, 0x0, "Spectra 100 Core HIL"}, {HPHW_FIO, 0x024, 0x00073, 0x0, "Fast Pace Core HIL"}, {HPHW_FIO, 0x026, 0x00073, 0x0, "CoralII Jaguar Core HIL"}, {HPHW_FIO, 0x02B, 0x00073, 0x0, "Mirage 80 Wax HIL"}, {HPHW_FIO, 0x02C, 0x00073, 0x0, "Mirage 100+ Wax HIL"}, {HPHW_FIO, 0x03A, 0x00073, 0x0, "Merlin+ Wax HIL"}, {HPHW_FIO, 0x040, 0x00073, 0x0, "Merlin 132 Wax HIL"}, {HPHW_FIO, 0x041, 0x00073, 0x0, "Merlin 160 Wax HIL"}, {HPHW_FIO, 0x043, 0x00073, 0x0, "Merlin 132/160 Wax HIL"}, {HPHW_FIO, 0x052, 0x00073, 0x0, "Raven+ Hi Power Backplane w/EISA Wax HIL"}, {HPHW_FIO, 0x053, 0x00073, 0x0, "Raven+ Hi Power Backplane wo/EISA Wax HIL"}, {HPHW_FIO, 0x054, 0x00073, 0x0, "Raven+ Lo Power Backplane w/EISA Wax HIL"}, {HPHW_FIO, 0x055, 0x00073, 0x0, "Raven+ Lo Power Backplane wo/EISA Wax HIL"}, {HPHW_FIO, 0x059, 0x00073, 0x0, "FireHawk 200 Wax HIL"}, {HPHW_FIO, 0x05A, 0x00073, 0x0, "Raven+ L2 Backplane w/EISA Wax HIL"}, {HPHW_FIO, 0x05B, 0x00073, 0x0, "Raven+ L2 Backplane wo/EISA Wax HIL"}, {HPHW_FIO, 0x05D, 0x00073, 0x0, "SummitHawk Wax HIL"}, {HPHW_FIO, 0x800, 0x00073, 0x0, "Hitachi Tiny 64 Wax HIL"}, {HPHW_FIO, 0x801, 0x00073, 0x0, "Hitachi Tiny 80 Wax HIL"}, {HPHW_FIO, 0x004, 0x00074, 0x0, "Cobra Core Centronics"}, {HPHW_FIO, 0x005, 0x00074, 0x0, "Coral Core Centronics"}, {HPHW_FIO, 0x006, 0x00074, 0x0, "Bushmaster Core Centronics"}, {HPHW_FIO, 0x007, 0x00074, 0x0, "Scorpio Core Centronics"}, {HPHW_FIO, 0x008, 0x00074, 0x0, "Flounder Core Centronics"}, {HPHW_FIO, 0x009, 0x00074, 0x0, "Outfield Core Centronics"}, {HPHW_FIO, 0x00A, 0x00074, 0x0, "CoralII Core Centronics"}, {HPHW_FIO, 0x00B, 0x00074, 0x0, "Scorpio Jr. Core Centronics"}, {HPHW_FIO, 0x00C, 0x00074, 0x0, "Strider-50 Core Centronics"}, {HPHW_FIO, 0x00D, 0x00074, 0x0, "Strider-33 Core Centronics"}, {HPHW_FIO, 0x00E, 0x00074, 0x0, "Trailways-50 Core Centronics"}, {HPHW_FIO, 0x00F, 0x00074, 0x0, "Trailways-33 Core Centronics"}, {HPHW_FIO, 0x010, 0x00074, 0x0, "Pace Core Centronics"}, {HPHW_FIO, 0x011, 0x00074, 0x0, "Sidewinder Core Centronics"}, {HPHW_FIO, 0x015, 0x00074, 0x0, "KittyHawk GSY Core Centronics"}, {HPHW_FIO, 0x016, 0x00074, 0x0, "Gecko Core Centronics"}, {HPHW_FIO, 0x019, 0x00074, 0x0, "Scorpio Sr. Core Centronics"}, {HPHW_FIO, 0x01A, 0x00074, 0x0, "Anole 64 Core Centronics"}, {HPHW_FIO, 0x01B, 0x00074, 0x0, "Anole 100 Core Centronics"}, {HPHW_FIO, 0x01C, 0x00074, 0x0, "Gecko 80 Core Centronics"}, {HPHW_FIO, 0x01D, 0x00074, 0x0, "Gecko 100 Core Centronics"}, {HPHW_FIO, 0x01F, 0x00074, 0x0, "SkyHawk 100/120 Core Centronics"}, {HPHW_FIO, 0x020, 0x00074, 0x0, "Scorpio 100 Core Centronics"}, {HPHW_FIO, 0x021, 0x00074, 0x0, "Spectra 50 Core Centronics"}, {HPHW_FIO, 0x022, 0x00074, 0x0, "Spectra 75 Core Centronics"}, {HPHW_FIO, 0x023, 0x00074, 0x0, "Spectra 100 Core Centronics"}, {HPHW_FIO, 0x024, 0x00074, 0x0, "Fast Pace Core Centronics"}, {HPHW_FIO, 0x026, 0x00074, 0x0, "CoralII Jaguar Core Centronics"}, {HPHW_FIO, 0x027, 0x00074, 0x0, "Piranha 100 Core Centronics"}, {HPHW_FIO, 0x028, 0x00074, 0x0, "Mirage Jr Core Centronics"}, {HPHW_FIO, 0x029, 0x00074, 0x0, "Mirage Core Centronics"}, {HPHW_FIO, 0x02A, 0x00074, 0x0, "Electra Core Centronics"}, {HPHW_FIO, 0x02B, 0x00074, 0x0, "Mirage 80 Core Centronics"}, {HPHW_FIO, 0x02C, 0x00074, 0x0, "Mirage 100+ Core Centronics"}, {HPHW_FIO, 0x02E, 0x00074, 0x0, "UL 350 Core Centronics"}, {HPHW_FIO, 0x02F, 0x00074, 0x0, "UL 550 Core Centronics"}, {HPHW_FIO, 0x032, 0x00074, 0x0, "Raven T' Core Centronics"}, {HPHW_FIO, 0x033, 0x00074, 0x0, "Anole T Core Centronics"}, {HPHW_FIO, 0x034, 0x00074, 0x0, "SAIC L-80 Core Centronics"}, {HPHW_FIO, 0x035, 0x00074, 0x0, "PCX-L2 712/132 Core Centronics"}, {HPHW_FIO, 0x036, 0x00074, 0x0, "PCX-L2 712/160 Core Centronics"}, {HPHW_FIO, 0x03B, 0x00074, 0x0, "Raven U/L2 Core Centronics"}, {HPHW_FIO, 0x03C, 0x00074, 0x0, "Merlin 132 Core Centronics"}, {HPHW_FIO, 0x03D, 0x00074, 0x0, "Merlin 160 Core Centronics"}, {HPHW_FIO, 0x03E, 0x00074, 0x0, "Merlin+ 132 Core Centronics"}, {HPHW_FIO, 0x03F, 0x00074, 0x0, "Merlin+ 180 Core Centronics"}, {HPHW_FIO, 0x044, 0x00074, 0x0, "Mohawk Core Centronics"}, {HPHW_FIO, 0x045, 0x00074, 0x0, "Rocky1 Core Centronics"}, {HPHW_FIO, 0x046, 0x00074, 0x0, "Rocky2 120 Core Centronics"}, {HPHW_FIO, 0x047, 0x00074, 0x0, "Rocky2 150 Core Centronics"}, {HPHW_FIO, 0x04B, 0x00074, 0x0, "Anole L2 132 Core Centronics"}, {HPHW_FIO, 0x04D, 0x00074, 0x0, "Anole L2 165 Core Centronics"}, {HPHW_FIO, 0x050, 0x00074, 0x0, "Merlin Jr 132 Core Centronics"}, {HPHW_FIO, 0x051, 0x00074, 0x0, "Firehawk Core Centronics"}, {HPHW_FIO, 0x056, 0x00074, 0x0, "Raven+ w SE FWSCSI Core Centronics"}, {HPHW_FIO, 0x057, 0x00074, 0x0, "Raven+ w Diff FWSCSI Core Centronics"}, {HPHW_FIO, 0x058, 0x00074, 0x0, "FireHawk 200 Core Centronics"}, {HPHW_FIO, 0x05C, 0x00074, 0x0, "SummitHawk 230 Core Centronics"}, {HPHW_FIO, 0x800, 0x00074, 0x0, "Hitachi Tiny 64 Core Centronics"}, {HPHW_FIO, 0x801, 0x00074, 0x0, "Hitachi Tiny 80 Core Centronics"}, {HPHW_FIO, 0x004, 0x00075, 0x0, "Cobra Core RS-232"}, {HPHW_FIO, 0x005, 0x00075, 0x0, "Coral Core RS-232"}, {HPHW_FIO, 0x006, 0x00075, 0x0, "Bushmaster Core RS-232"}, {HPHW_FIO, 0x007, 0x00075, 0x0, "Scorpio Core RS-232"}, {HPHW_FIO, 0x008, 0x00075, 0x0, "Flounder Core RS-232"}, {HPHW_FIO, 0x009, 0x00075, 0x0, "Outfield Core RS-232"}, {HPHW_FIO, 0x00A, 0x00075, 0x0, "CoralII Core RS-232"}, {HPHW_FIO, 0x00B, 0x00075, 0x0, "Scorpio Jr. Core RS-232"}, {HPHW_FIO, 0x00C, 0x00075, 0x0, "Strider-50 Core RS-232"}, {HPHW_FIO, 0x00D, 0x00075, 0x0, "Strider-33 Core RS-232"}, {HPHW_FIO, 0x00E, 0x00075, 0x0, "Trailways-50 Core RS-232"}, {HPHW_FIO, 0x00F, 0x00075, 0x0, "Trailways-33 Core RS-232"}, {HPHW_FIO, 0x010, 0x00075, 0x0, "Pace Core RS-232"}, {HPHW_FIO, 0x011, 0x00075, 0x0, "Sidewinder Core RS-232"}, {HPHW_FIO, 0x019, 0x00075, 0x0, "Scorpio Sr. Core RS-232"}, {HPHW_FIO, 0x020, 0x00075, 0x0, "Scorpio 100 Core RS-232"}, {HPHW_FIO, 0x021, 0x00075, 0x0, "Spectra 50 Core RS-232"}, {HPHW_FIO, 0x022, 0x00075, 0x0, "Spectra 75 Core RS-232"}, {HPHW_FIO, 0x023, 0x00075, 0x0, "Spectra 100 Core RS-232"}, {HPHW_FIO, 0x024, 0x00075, 0x0, "Fast Pace Core RS-232"}, {HPHW_FIO, 0x026, 0x00075, 0x0, "CoralII Jaguar Core RS-232"}, {HPHW_FIO, 0x004, 0x00077, 0x0, "Coral SGC Graphics"}, {HPHW_FIO, 0x005, 0x00077, 0x0, "Hyperdrive Optional Graphics"}, {HPHW_FIO, 0x006, 0x00077, 0x0, "Stinger Optional Graphics"}, {HPHW_FIO, 0x007, 0x00077, 0x0, "Scorpio Builtin Graphics"}, {HPHW_FIO, 0x008, 0x00077, 0x0, "Anole Hyperdrive Optional Graphics"}, {HPHW_FIO, 0x009, 0x00077, 0x0, "Thunder II graphics EISA form"}, {HPHW_FIO, 0x00A, 0x00077, 0x0, "Thunder II graphics GSA form"}, {HPHW_FIO, 0x00B, 0x00077, 0x0, "Scorpio Jr Builtin Graphics"}, {HPHW_FIO, 0x00C, 0x00077, 0x0, "Strider-50 SSC Graphics"}, {HPHW_FIO, 0x00D, 0x00077, 0x0, "Strider-33 SSC Graphics"}, {HPHW_FIO, 0x00E, 0x00077, 0x0, "Trailways-50 SSC Graphics"}, {HPHW_FIO, 0x00F, 0x00077, 0x0, "Trailways-33 SSC Graphics"}, {HPHW_FIO, 0x010, 0x00077, 0x0, "Pace SGC Graphics"}, {HPHW_FIO, 0x011, 0x00077, 0x0, "Mohawk Opt. 2D Graphics (Kid)"}, {HPHW_FIO, 0x012, 0x00077, 0x0, "Raven Opt. 2D Graphics (Goat)"}, {HPHW_FIO, 0x016, 0x00077, 0x0, "Lego 24 SCG Graphics"}, {HPHW_FIO, 0x017, 0x00077, 0x0, "Lego 24Z SCG Graphics"}, {HPHW_FIO, 0x018, 0x00077, 0x0, "Lego 48Z SCG Graphics"}, {HPHW_FIO, 0x019, 0x00077, 0x0, "Scorpio Sr Builtin Graphics"}, {HPHW_FIO, 0x020, 0x00077, 0x0, "Scorpio 100 Builtin Graphics"}, {HPHW_FIO, 0x021, 0x00077, 0x0, "Spectra 50 Builtin Graphics"}, {HPHW_FIO, 0x022, 0x00077, 0x0, "Spectra 75 Builtin Graphics"}, {HPHW_FIO, 0x023, 0x00077, 0x0, "Spectra 100 Builtin Graphics"}, {HPHW_FIO, 0x024, 0x00077, 0x0, "Fast Pace SGC Graphics"}, {HPHW_FIO, 0x006, 0x0007A, 0x0, "Bushmaster Audio"}, {HPHW_FIO, 0x008, 0x0007A, 0x0, "Flounder Audio"}, {HPHW_FIO, 0x004, 0x0007B, 0x0, "UL Optional Audio"}, {HPHW_FIO, 0x007, 0x0007B, 0x0, "Scorpio Audio"}, {HPHW_FIO, 0x00B, 0x0007B, 0x0, "Scorpio Jr. Audio"}, {HPHW_FIO, 0x00C, 0x0007B, 0x0, "Strider-50 Audio"}, {HPHW_FIO, 0x00D, 0x0007B, 0x0, "Strider-33 Audio"}, {HPHW_FIO, 0x00E, 0x0007B, 0x0, "Trailways-50 Audio"}, {HPHW_FIO, 0x00F, 0x0007B, 0x0, "Trailways-33 Audio"}, {HPHW_FIO, 0x015, 0x0007B, 0x0, "KittyHawk GSY Core Audio"}, {HPHW_FIO, 0x016, 0x0007B, 0x0, "Gecko Audio"}, {HPHW_FIO, 0x019, 0x0007B, 0x0, "Scorpio Sr. Audio"}, {HPHW_FIO, 0x01A, 0x0007B, 0x0, "Anole 64 Audio"}, {HPHW_FIO, 0x01B, 0x0007B, 0x0, "Anole 100 Audio"}, {HPHW_FIO, 0x01C, 0x0007B, 0x0, "Gecko 80 Audio"}, {HPHW_FIO, 0x01D, 0x0007B, 0x0, "Gecko 100 Audio"}, {HPHW_FIO, 0x01F, 0x0007B, 0x0, "SkyHawk 100/120 Audio"}, {HPHW_FIO, 0x020, 0x0007B, 0x0, "Scorpio 100 Audio"}, {HPHW_FIO, 0x021, 0x0007B, 0x0, "Spectra 50 Audio"}, {HPHW_FIO, 0x022, 0x0007B, 0x0, "Spectra 75 Audio"}, {HPHW_FIO, 0x023, 0x0007B, 0x0, "Spectra 100 Audio"}, {HPHW_FIO, 0x028, 0x0007B, 0x0, "Mirage Jr Audio"}, {HPHW_FIO, 0x029, 0x0007B, 0x0, "Mirage Audio"}, {HPHW_FIO, 0x02A, 0x0007B, 0x0, "Electra Audio"}, {HPHW_FIO, 0x02B, 0x0007B, 0x0, "Mirage 80 Audio"}, {HPHW_FIO, 0x02C, 0x0007B, 0x0, "Mirage 100+ Audio"}, {HPHW_FIO, 0x032, 0x0007B, 0x0, "Raven T' Audio"}, {HPHW_FIO, 0x034, 0x0007B, 0x0, "SAIC L-80 Audio"}, {HPHW_FIO, 0x035, 0x0007B, 0x0, "PCX-L2 712/132 Core Audio"}, {HPHW_FIO, 0x036, 0x0007B, 0x0, "PCX-L2 712/160 Core Audio"}, {HPHW_FIO, 0x03B, 0x0007B, 0x0, "Raven U/L2 Core Audio"}, {HPHW_FIO, 0x03C, 0x0007B, 0x0, "Merlin 132 Core Audio"}, {HPHW_FIO, 0x03D, 0x0007B, 0x0, "Merlin 160 Core Audio"}, {HPHW_FIO, 0x03E, 0x0007B, 0x0, "Merlin+ 132 Core Audio"}, {HPHW_FIO, 0x03F, 0x0007B, 0x0, "Merlin+ 180 Core Audio"}, {HPHW_FIO, 0x044, 0x0007B, 0x0, "Mohawk Core Audio"}, {HPHW_FIO, 0x046, 0x0007B, 0x0, "Rocky2 120 Core Audio"}, {HPHW_FIO, 0x047, 0x0007B, 0x0, "Rocky2 150 Core Audio"}, {HPHW_FIO, 0x04B, 0x0007B, 0x0, "Anole L2 132 Core Audio"}, {HPHW_FIO, 0x04D, 0x0007B, 0x0, "Anole L2 165 Core Audio"}, {HPHW_FIO, 0x04E, 0x0007B, 0x0, "Kiji L2 132 Core Audio"}, {HPHW_FIO, 0x050, 0x0007B, 0x0, "Merlin Jr 132 Core Audio"}, {HPHW_FIO, 0x051, 0x0007B, 0x0, "Firehawk Audio"}, {HPHW_FIO, 0x056, 0x0007B, 0x0, "Raven+ w SE FWSCSI Core Audio"}, {HPHW_FIO, 0x057, 0x0007B, 0x0, "Raven+ w Diff FWSCSI Core Audio"}, {HPHW_FIO, 0x058, 0x0007B, 0x0, "FireHawk 200 Audio"}, {HPHW_FIO, 0x05C, 0x0007B, 0x0, "SummitHawk 230 Core Audio"}, {HPHW_FIO, 0x800, 0x0007B, 0x0, "Hitachi Tiny 64 Audio"}, {HPHW_FIO, 0x801, 0x0007B, 0x0, "Hitachi Tiny 80 Audio"}, {HPHW_FIO, 0x009, 0x0007C, 0x0, "Outfield FW SCSI"}, {HPHW_FIO, 0x00A, 0x0007C, 0x0, "CoralII FW SCSI"}, {HPHW_FIO, 0x026, 0x0007C, 0x0, "CoralII Jaguar FW SCSI"}, {HPHW_FIO, 0x009, 0x0007D, 0x0, "Outfield FDDI"}, {HPHW_FIO, 0x00A, 0x0007D, 0x0, "CoralII FDDI"}, {HPHW_FIO, 0x026, 0x0007D, 0x0, "CoralII Jaguar FDDI"}, {HPHW_FIO, 0x010, 0x0007E, 0x0, "Pace Audio"}, {HPHW_FIO, 0x024, 0x0007E, 0x0, "Fast Pace Audio"}, {HPHW_FIO, 0x009, 0x0007F, 0x0, "Outfield Audio"}, {HPHW_FIO, 0x00A, 0x0007F, 0x0, "CoralII Audio"}, {HPHW_FIO, 0x026, 0x0007F, 0x0, "CoralII Jaguar Audio"}, {HPHW_FIO, 0x010, 0x00080, 0x0, "Pace Core HPIB"}, {HPHW_FIO, 0x024, 0x00080, 0x0, "Fast Pace Core HPIB"}, {HPHW_FIO, 0x015, 0x00082, 0x0, "KittyHawk GSY Core SCSI"}, {HPHW_FIO, 0x016, 0x00082, 0x0, "Gecko Core SCSI"}, {HPHW_FIO, 0x01A, 0x00082, 0x0, "Anole 64 Core SCSI"}, {HPHW_FIO, 0x01B, 0x00082, 0x0, "Anole 100 Core SCSI"}, {HPHW_FIO, 0x01C, 0x00082, 0x0, "Gecko 80 Core SCSI"}, {HPHW_FIO, 0x01D, 0x00082, 0x0, "Gecko 100 Core SCSI"}, {HPHW_FIO, 0x01F, 0x00082, 0x0, "SkyHawk 100/120 Core SCSI"}, {HPHW_FIO, 0x027, 0x00082, 0x0, "Piranha 100 Core SCSI"}, {HPHW_FIO, 0x028, 0x00082, 0x0, "Mirage Jr Core SCSI"}, {HPHW_FIO, 0x029, 0x00082, 0x0, "Mirage Core SCSI"}, {HPHW_FIO, 0x02A, 0x00082, 0x0, "Electra Core SCSI"}, {HPHW_FIO, 0x02B, 0x00082, 0x0, "Mirage 80 Core SCSI"}, {HPHW_FIO, 0x02C, 0x00082, 0x0, "Mirage 100+ Core SCSI"}, {HPHW_FIO, 0x02E, 0x00082, 0x0, "UL 350 Core SCSI"}, {HPHW_FIO, 0x02F, 0x00082, 0x0, "UL 550 Core SCSI"}, {HPHW_FIO, 0x032, 0x00082, 0x0, "Raven T' Core SCSI"}, {HPHW_FIO, 0x033, 0x00082, 0x0, "Anole T Core SCSI"}, {HPHW_FIO, 0x034, 0x00082, 0x0, "SAIC L-80 Core SCSI"}, {HPHW_FIO, 0x035, 0x00082, 0x0, "PCX-L2 712/132 Core SCSI"}, {HPHW_FIO, 0x036, 0x00082, 0x0, "PCX-L2 712/160 Core SCSI"}, {HPHW_FIO, 0x03B, 0x00082, 0x0, "Raven U/L2 Core SCSI"}, {HPHW_FIO, 0x03C, 0x00082, 0x0, "Merlin 132 Core SCSI"}, {HPHW_FIO, 0x03D, 0x00082, 0x0, "Merlin 160 Core SCSI"}, {HPHW_FIO, 0x03E, 0x00082, 0x0, "Merlin+ 132 Core SCSI"}, {HPHW_FIO, 0x03F, 0x00082, 0x0, "Merlin+ 180 Core SCSI"}, {HPHW_FIO, 0x044, 0x00082, 0x0, "Mohawk Core SCSI"}, {HPHW_FIO, 0x045, 0x00082, 0x0, "Rocky1 Core SCSI"}, {HPHW_FIO, 0x046, 0x00082, 0x0, "Rocky2 120 Core SCSI"}, {HPHW_FIO, 0x047, 0x00082, 0x0, "Rocky2 150 Core SCSI"}, {HPHW_FIO, 0x04B, 0x00082, 0x0, "Anole L2 132 Core SCSI"}, {HPHW_FIO, 0x04D, 0x00082, 0x0, "Anole L2 165 Core SCSI"}, {HPHW_FIO, 0x04E, 0x00082, 0x0, "Kiji L2 132 Core SCSI"}, {HPHW_FIO, 0x050, 0x00082, 0x0, "Merlin Jr 132 Core SCSI"}, {HPHW_FIO, 0x051, 0x00082, 0x0, "Firehawk Core SCSI"}, {HPHW_FIO, 0x056, 0x00082, 0x0, "Raven+ w SE FWSCSI Core SCSI"}, {HPHW_FIO, 0x057, 0x00082, 0x0, "Raven+ w Diff FWSCSI Core SCSI"}, {HPHW_FIO, 0x058, 0x00082, 0x0, "FireHawk 200 Core SCSI"}, {HPHW_FIO, 0x05C, 0x00082, 0x0, "SummitHawk 230 Core SCSI"}, {HPHW_FIO, 0x05E, 0x00082, 0x0, "Staccato 132 Core SCSI"}, {HPHW_FIO, 0x05F, 0x00082, 0x0, "Staccato 180 Core SCSI"}, {HPHW_FIO, 0x800, 0x00082, 0x0, "Hitachi Tiny 64 Core SCSI"}, {HPHW_FIO, 0x801, 0x00082, 0x0, "Hitachi Tiny 80 Core SCSI"}, {HPHW_FIO, 0x016, 0x00083, 0x0, "Gecko Core PC Floppy"}, {HPHW_FIO, 0x01C, 0x00083, 0x0, "Gecko 80 Core PC Floppy"}, {HPHW_FIO, 0x01D, 0x00083, 0x0, "Gecko 100 Core PC Floppy"}, {HPHW_FIO, 0x051, 0x00083, 0x0, "Firehawk Core PC Floppy"}, {HPHW_FIO, 0x058, 0x00083, 0x0, "FireHawk 200 Core PC Floppy"}, {HPHW_FIO, 0x027, 0x00083, 0x0, "Piranha 100 Core PC Floppy"}, {HPHW_FIO, 0x028, 0x00083, 0x0, "Mirage Jr Core PC Floppy"}, {HPHW_FIO, 0x029, 0x00083, 0x0, "Mirage Core PC Floppy"}, {HPHW_FIO, 0x02A, 0x00083, 0x0, "Electra Core PC Floppy"}, {HPHW_FIO, 0x02B, 0x00083, 0x0, "Mirage 80 Core PC Floppy"}, {HPHW_FIO, 0x02C, 0x00083, 0x0, "Mirage 100+ Core PC Floppy"}, {HPHW_FIO, 0x02E, 0x00083, 0x0, "UL 350 Core PC Floppy"}, {HPHW_FIO, 0x02F, 0x00083, 0x0, "UL 550 Core PC Floppy"}, {HPHW_FIO, 0x032, 0x00083, 0x0, "Raven T' Core PC Floppy"}, {HPHW_FIO, 0x034, 0x00083, 0x0, "SAIC L-80 Core PC Floppy"}, {HPHW_FIO, 0x035, 0x00083, 0x0, "PCX-L2 712/132 Core Floppy"}, {HPHW_FIO, 0x036, 0x00083, 0x0, "PCX-L2 712/160 Core Floppy"}, {HPHW_FIO, 0x03B, 0x00083, 0x0, "Raven U/L2 Core PC Floppy"}, {HPHW_FIO, 0x03C, 0x00083, 0x0, "Merlin 132 Core PC Floppy"}, {HPHW_FIO, 0x03D, 0x00083, 0x0, "Merlin 160 Core PC Floppy"}, {HPHW_FIO, 0x03E, 0x00083, 0x0, "Merlin+ 132 Core PC Floppy"}, {HPHW_FIO, 0x03F, 0x00083, 0x0, "Merlin+ 180 Core PC Floppy"}, {HPHW_FIO, 0x045, 0x00083, 0x0, "Rocky1 Core PC Floppy"}, {HPHW_FIO, 0x046, 0x00083, 0x0, "Rocky2 120 Core PC Floppy"}, {HPHW_FIO, 0x047, 0x00083, 0x0, "Rocky2 150 Core PC Floppy"}, {HPHW_FIO, 0x04E, 0x00083, 0x0, "Kiji L2 132 Core PC Floppy"}, {HPHW_FIO, 0x050, 0x00083, 0x0, "Merlin Jr 132 Core PC Floppy"}, {HPHW_FIO, 0x056, 0x00083, 0x0, "Raven+ w SE FWSCSI Core PC Floppy"}, {HPHW_FIO, 0x057, 0x00083, 0x0, "Raven+ w Diff FWSCSI Core PC Floppy"}, {HPHW_FIO, 0x800, 0x00083, 0x0, "Hitachi Tiny 64 Core PC Floppy"}, {HPHW_FIO, 0x801, 0x00083, 0x0, "Hitachi Tiny 80 Core PC Floppy"}, {HPHW_FIO, 0x015, 0x00084, 0x0, "KittyHawk GSY Core PS/2 Port"}, {HPHW_FIO, 0x016, 0x00084, 0x0, "Gecko Core PS/2 Port"}, {HPHW_FIO, 0x018, 0x00084, 0x0, "Gecko Optional PS/2 Port"}, {HPHW_FIO, 0x01A, 0x00084, 0x0, "Anole 64 Core PS/2 Port"}, {HPHW_FIO, 0x01B, 0x00084, 0x0, "Anole 100 Core PS/2 Port"}, {HPHW_FIO, 0x01C, 0x00084, 0x0, "Gecko 80 Core PS/2 Port"}, {HPHW_FIO, 0x01D, 0x00084, 0x0, "Gecko 100 Core PS/2 Port"}, {HPHW_FIO, 0x01F, 0x00084, 0x0, "SkyHawk 100/120 Core PS/2 Port"}, {HPHW_FIO, 0x027, 0x00084, 0x0, "Piranha 100 Core PS/2 Port"}, {HPHW_FIO, 0x028, 0x00084, 0x0, "Mirage Jr Core PS/2 Port"}, {HPHW_FIO, 0x029, 0x00084, 0x0, "Mirage Core PS/2 Port"}, {HPHW_FIO, 0x02A, 0x00084, 0x0, "Electra Core PS/2 Port"}, {HPHW_FIO, 0x02B, 0x00084, 0x0, "Mirage 80 Core PS/2 Port"}, {HPHW_FIO, 0x02C, 0x00084, 0x0, "Mirage 100+ Core PS/2 Port"}, {HPHW_FIO, 0x02E, 0x00084, 0x0, "UL 350 Core PS/2 Port"}, {HPHW_FIO, 0x02F, 0x00084, 0x0, "UL 550 Core PS/2 Port"}, {HPHW_FIO, 0x032, 0x00084, 0x0, "Raven T' Core PS/2 Port"}, {HPHW_FIO, 0x033, 0x00084, 0x0, "Anole T Core PS/2 Port"}, {HPHW_FIO, 0x034, 0x00084, 0x0, "SAIC L-80 Core PS/2 Port"}, {HPHW_FIO, 0x035, 0x00084, 0x0, "PCX-L2 712/132 Core PS/2 Port"}, {HPHW_FIO, 0x036, 0x00084, 0x0, "PCX-L2 712/160 Core PS/2 Port"}, {HPHW_FIO, 0x03B, 0x00084, 0x0, "Raven U/L2 Core PS/2 Port"}, {HPHW_FIO, 0x03C, 0x00084, 0x0, "Merlin 132 Core PS/2 Port"}, {HPHW_FIO, 0x03D, 0x00084, 0x0, "Merlin 160 Core PS/2 Port"}, {HPHW_FIO, 0x03E, 0x00084, 0x0, "Merlin+ 132 Core PS/2 Port"}, {HPHW_FIO, 0x03F, 0x00084, 0x0, "Merlin+ 180 Core PS/2 Port"}, {HPHW_FIO, 0x044, 0x00084, 0x0, "Mohawk Core PS/2 Port"}, {HPHW_FIO, 0x045, 0x00084, 0x0, "Rocky1 Core PS/2 Port"}, {HPHW_FIO, 0x046, 0x00084, 0x0, "Rocky2 120 Core PS/2 Port"}, {HPHW_FIO, 0x047, 0x00084, 0x0, "Rocky2 150 Core PS/2 Port"}, {HPHW_FIO, 0x048, 0x00084, 0x0, "Rocky2 120 Dino PS/2 Port"}, {HPHW_FIO, 0x049, 0x00084, 0x0, "Rocky2 150 Dino PS/2 Port"}, {HPHW_FIO, 0x04B, 0x00084, 0x0, "Anole L2 132 Core PS/2 Port"}, {HPHW_FIO, 0x04D, 0x00084, 0x0, "Anole L2 165 Core PS/2 Port"}, {HPHW_FIO, 0x04E, 0x00084, 0x0, "Kiji L2 132 Core PS/2 Port"}, {HPHW_FIO, 0x050, 0x00084, 0x0, "Merlin Jr 132 Core PS/2 Port"}, {HPHW_FIO, 0x051, 0x00084, 0x0, "Firehawk Core PS/2 Port"}, {HPHW_FIO, 0x056, 0x00084, 0x0, "Raven+ w SE FWSCSI Core PS/2 Port"}, {HPHW_FIO, 0x057, 0x00084, 0x0, "Raven+ w Diff FWSCSI Core PS/2 Port"}, {HPHW_FIO, 0x058, 0x00084, 0x0, "FireHawk 200 Core PS/2 Port"}, {HPHW_FIO, 0x05C, 0x00084, 0x0, "SummitHawk 230 Core PS/2 Port"}, {HPHW_FIO, 0x800, 0x00084, 0x0, "Hitachi Tiny 64 Core PS/2 Port"}, {HPHW_FIO, 0x801, 0x00084, 0x0, "Hitachi Tiny 80 Core PS/2 Port"}, {HPHW_FIO, 0x004, 0x00085, 0x0, "Solo GSC Optional Graphics"}, {HPHW_FIO, 0x005, 0x00085, 0x0, "Duet GSC Optional Graphics"}, {HPHW_FIO, 0x008, 0x00085, 0x0, "Anole Artist Optional Graphics"}, {HPHW_FIO, 0x010, 0x00085, 0x0, "Mirage 80 GSC Builtin Graphics"}, {HPHW_FIO, 0x011, 0x00085, 0x0, "Mirage 100+ GSC Builtin Graphics"}, {HPHW_FIO, 0x012, 0x00085, 0x0, "Mirage Jr GSC Builtin Graphics"}, {HPHW_FIO, 0x013, 0x00085, 0x0, "Mirage GSC Builtin Graphics"}, {HPHW_FIO, 0x014, 0x00085, 0x0, "Electra GSC Builtin Graphics"}, {HPHW_FIO, 0x016, 0x00085, 0x0, "Gecko GSC Core Graphics"}, {HPHW_FIO, 0x017, 0x00085, 0x0, "Gecko GSC Optional Graphics"}, {HPHW_FIO, 0x01A, 0x00085, 0x0, "Anole 64 Artist Builtin Graphics"}, {HPHW_FIO, 0x01B, 0x00085, 0x0, "Anole 100 Artist Builtin Graphics"}, {HPHW_FIO, 0x01C, 0x00085, 0x0, "Gecko 80 GSC Core Graphics"}, {HPHW_FIO, 0x01D, 0x00085, 0x0, "Gecko 100 GSC Core Graphics"}, {HPHW_FIO, 0x032, 0x00085, 0x0, "Raven T' GSC Core Graphics"}, {HPHW_FIO, 0x033, 0x00085, 0x0, "Anole T Artist Builtin Graphics"}, {HPHW_FIO, 0x034, 0x00085, 0x0, "SAIC L-80 GSC Core Graphics"}, {HPHW_FIO, 0x035, 0x00085, 0x0, "PCX-L2 712/132 Core Graphics"}, {HPHW_FIO, 0x036, 0x00085, 0x0, "PCX-L2 712/160 Core Graphics"}, {HPHW_FIO, 0x03B, 0x00085, 0x0, "Raven U/L2 Core Graphics"}, {HPHW_FIO, 0x03C, 0x00085, 0x0, "Merlin 132 Core Graphics"}, {HPHW_FIO, 0x03D, 0x00085, 0x0, "Merlin 160 Core Graphics"}, {HPHW_FIO, 0x03E, 0x00085, 0x0, "Merlin+ 132 Core Graphics"}, {HPHW_FIO, 0x03F, 0x00085, 0x0, "Merlin+ 180 Core Graphics"}, {HPHW_FIO, 0x045, 0x00085, 0x0, "Rocky1 Core Graphics"}, {HPHW_FIO, 0x046, 0x00085, 0x0, "Rocky2 120 Core Graphics"}, {HPHW_FIO, 0x047, 0x00085, 0x0, "Rocky2 150 Core Graphics"}, {HPHW_FIO, 0x04B, 0x00085, 0x0, "Anole L2 132 Core Graphics"}, {HPHW_FIO, 0x04D, 0x00085, 0x0, "Anole L2 165 Core Graphics"}, {HPHW_FIO, 0x04E, 0x00085, 0x0, "Kiji L2 132 Core Graphics"}, {HPHW_FIO, 0x050, 0x00085, 0x0, "Merlin Jr 132 Core Graphics"}, {HPHW_FIO, 0x056, 0x00085, 0x0, "Raven+ w SE FWSCSI Core Graphics"}, {HPHW_FIO, 0x057, 0x00085, 0x0, "Raven+ w Diff FWSCSI Core Graphics"}, {HPHW_FIO, 0x800, 0x00085, 0x0, "Hitachi Tiny 64 Core Graphics"}, {HPHW_FIO, 0x801, 0x00085, 0x0, "Hitachi Tiny 80 Core Graphics"}, {HPHW_FIO, 0x004, 0x00086, 0x0, "GSC IBM Token Ring"}, {HPHW_FIO, 0x015, 0x00087, 0x0, "Gecko Optional ISDN"}, {HPHW_FIO, 0x016, 0x00087, 0x0, "Gecko Core ISDN"}, {HPHW_FIO, 0x01C, 0x00087, 0x0, "Gecko 80 Core ISDN"}, {HPHW_FIO, 0x01D, 0x00087, 0x0, "Gecko 100 Core ISDN"}, {HPHW_FIO, 0x010, 0x00088, 0x0, "Pace VME Networking"}, {HPHW_FIO, 0x011, 0x00088, 0x0, "Sidewinder VME Networking"}, {HPHW_FIO, 0x01A, 0x00088, 0x0, "Anole 64 VME Networking"}, {HPHW_FIO, 0x01B, 0x00088, 0x0, "Anole 100 VME Networking"}, {HPHW_FIO, 0x024, 0x00088, 0x0, "Fast Pace VME Networking"}, {HPHW_FIO, 0x034, 0x00088, 0x0, "Anole T VME Networking"}, {HPHW_FIO, 0x04A, 0x00088, 0x0, "Anole L2 132 VME Networking"}, {HPHW_FIO, 0x04C, 0x00088, 0x0, "Anole L2 165 VME Networking"}, {HPHW_FIO, 0x011, 0x0008A, 0x0, "WB-96 Core LAN (802.3)"}, {HPHW_FIO, 0x012, 0x0008A, 0x0, "Orville Core LAN (802.3)"}, {HPHW_FIO, 0x013, 0x0008A, 0x0, "Wilbur Core LAN (802.3)"}, {HPHW_FIO, 0x014, 0x0008A, 0x0, "WB-80 Core LAN (802.3)"}, {HPHW_FIO, 0x015, 0x0008A, 0x0, "KittyHawk GSY Core LAN (802.3)"}, {HPHW_FIO, 0x016, 0x0008A, 0x0, "Gecko Core LAN (802.3)"}, {HPHW_FIO, 0x018, 0x0008A, 0x0, "Gecko Optional LAN (802.3)"}, {HPHW_FIO, 0x01A, 0x0008A, 0x0, "Anole 64 Core LAN (802.3)"}, {HPHW_FIO, 0x01B, 0x0008A, 0x0, "Anole 100 Core LAN (802.3)"}, {HPHW_FIO, 0x01C, 0x0008A, 0x0, "Gecko 80 Core LAN (802.3)"}, {HPHW_FIO, 0x01D, 0x0008A, 0x0, "Gecko 100 Core LAN (802.3)"}, {HPHW_FIO, 0x01F, 0x0008A, 0x0, "SkyHawk 100/120 Core LAN (802.3)"}, {HPHW_FIO, 0x027, 0x0008A, 0x0, "Piranha 100 Core LAN (802.3)"}, {HPHW_FIO, 0x028, 0x0008A, 0x0, "Mirage Jr Core LAN (802.3)"}, {HPHW_FIO, 0x029, 0x0008A, 0x0, "Mirage Core LAN (802.3)"}, {HPHW_FIO, 0x02A, 0x0008A, 0x0, "Electra Core LAN (802.3)"}, {HPHW_FIO, 0x02B, 0x0008A, 0x0, "Mirage 80 Core LAN (802.3)"}, {HPHW_FIO, 0x02C, 0x0008A, 0x0, "Mirage 100+ Core LAN (802.3)"}, {HPHW_FIO, 0x02E, 0x0008A, 0x0, "UL 350 Core LAN (802.3)"}, {HPHW_FIO, 0x02F, 0x0008A, 0x0, "UL 350 Core LAN (802.3)"}, {HPHW_FIO, 0x032, 0x0008A, 0x0, "Raven T' Core LAN (802.3)"}, {HPHW_FIO, 0x033, 0x0008A, 0x0, "Anole T Core LAN (802.3)"}, {HPHW_FIO, 0x034, 0x0008A, 0x0, "SAIC L-80 Core LAN (802.3)"}, {HPHW_FIO, 0x035, 0x0008A, 0x0, "PCX-L2 712/132 Core LAN (802.3)"}, {HPHW_FIO, 0x036, 0x0008A, 0x0, "PCX-L2 712/160 Core LAN (802.3)"}, {HPHW_FIO, 0x03B, 0x0008A, 0x0, "Raven U/L2 Core LAN (802.3)"}, {HPHW_FIO, 0x03C, 0x0008A, 0x0, "Merlin 132 Core LAN (802.3)"}, {HPHW_FIO, 0x03D, 0x0008A, 0x0, "Merlin 160 Core LAN (802.3)"}, {HPHW_FIO, 0x044, 0x0008A, 0x0, "Mohawk Core LAN (802.3)"}, {HPHW_FIO, 0x045, 0x0008A, 0x0, "Rocky1 Core LAN (802.3)"}, {HPHW_FIO, 0x046, 0x0008A, 0x0, "Rocky2 120 Core LAN (802.3)"}, {HPHW_FIO, 0x047, 0x0008A, 0x0, "Rocky2 150 Core LAN (802.3)"}, {HPHW_FIO, 0x04B, 0x0008A, 0x0, "Anole L2 132 Core LAN (802.3)"}, {HPHW_FIO, 0x04D, 0x0008A, 0x0, "Anole L2 165 Core LAN (802.3)"}, {HPHW_FIO, 0x04E, 0x0008A, 0x0, "Kiji L2 132 Core LAN (802.3)"}, {HPHW_FIO, 0x050, 0x0008A, 0x0, "Merlin Jr 132 Core LAN (802.3)"}, {HPHW_FIO, 0x058, 0x0008A, 0x0, "FireHawk 200 Core LAN (802.3)"}, {HPHW_FIO, 0x800, 0x0008A, 0x0, "Hitachi Tiny 64 Core LAN (802.3)"}, {HPHW_FIO, 0x801, 0x0008A, 0x0, "Hitachi Tiny 80 Core LAN (802.3)"}, {HPHW_FIO, 0x004, 0x0008C, 0x0, "SkyHawk 100/120 Wax RS-232"}, {HPHW_FIO, 0x005, 0x0008C, 0x0, "SAIC L-80 Wax RS-232"}, {HPHW_FIO, 0x006, 0x0008C, 0x0, "Raven U/L2 Dino RS-232"}, {HPHW_FIO, 0x007, 0x0008C, 0x0, "Dino RS-232"}, {HPHW_FIO, 0x008, 0x0008C, 0x0, "Merlin 132 Dino RS-232"}, {HPHW_FIO, 0x009, 0x0008C, 0x0, "Merlin 160 Dino RS-232"}, {HPHW_FIO, 0x00A, 0x0008C, 0x0, "Merlin Jr 132 Dino RS-232"}, {HPHW_FIO, 0x010, 0x0008C, 0x0, "Mirage 80 Wax RS-232"}, {HPHW_FIO, 0x011, 0x0008C, 0x0, "Mirage 100+ Wax RS-232"}, {HPHW_FIO, 0x012, 0x0008C, 0x0, "Mirage Jr Wax RS-232"}, {HPHW_FIO, 0x013, 0x0008C, 0x0, "Mirage Wax RS-232"}, {HPHW_FIO, 0x014, 0x0008C, 0x0, "Electra Wax RS-232"}, {HPHW_FIO, 0x015, 0x0008C, 0x0, "KittyHawk GSY Core RS-232"}, {HPHW_FIO, 0x016, 0x0008C, 0x0, "Gecko Core RS-232"}, {HPHW_FIO, 0x017, 0x0008C, 0x0, "Raven Backplane RS-232"}, {HPHW_FIO, 0x018, 0x0008C, 0x0, "Gecko Optional RS-232"}, {HPHW_FIO, 0x019, 0x0008C, 0x0, "Merlin+ 180 Dino RS-232"}, {HPHW_FIO, 0x01A, 0x0008C, 0x0, "Anole 64 Core RS-232"}, {HPHW_FIO, 0x01B, 0x0008C, 0x0, "Anole 100 Core RS-232"}, {HPHW_FIO, 0x01C, 0x0008C, 0x0, "Gecko 80 Core RS-232"}, {HPHW_FIO, 0x01D, 0x0008C, 0x0, "Gecko 100 Core RS-232"}, {HPHW_FIO, 0x01E, 0x0008C, 0x0, "Raven T' Wax RS-232"}, {HPHW_FIO, 0x01F, 0x0008C, 0x0, "SkyHawk 100/120 Core RS-232"}, {HPHW_FIO, 0x020, 0x0008C, 0x0, "Anole 64 Timi RS-232"}, {HPHW_FIO, 0x021, 0x0008C, 0x0, "Anole 100 Timi RS-232"}, {HPHW_FIO, 0x022, 0x0008C, 0x0, "Merlin+ 132 Dino RS-232"}, {HPHW_FIO, 0x023, 0x0008C, 0x0, "Rocky1 Wax RS-232"}, {HPHW_FIO, 0x025, 0x0008C, 0x0, "Armyknife Optional RS-232"}, {HPHW_FIO, 0x026, 0x0008C, 0x0, "Piranha 100 Wax RS-232"}, {HPHW_FIO, 0x027, 0x0008C, 0x0, "Piranha 100 Core RS-232"}, {HPHW_FIO, 0x028, 0x0008C, 0x0, "Mirage Jr Core RS-232"}, {HPHW_FIO, 0x029, 0x0008C, 0x0, "Mirage Core RS-232"}, {HPHW_FIO, 0x02A, 0x0008C, 0x0, "Electra Core RS-232"}, {HPHW_FIO, 0x02B, 0x0008C, 0x0, "Mirage 80 Core RS-232"}, {HPHW_FIO, 0x02C, 0x0008C, 0x0, "Mirage 100+ Core RS-232"}, {HPHW_FIO, 0x02E, 0x0008C, 0x0, "UL 350 Lasi Core RS-232"}, {HPHW_FIO, 0x02F, 0x0008C, 0x0, "UL 550 Lasi Core RS-232"}, {HPHW_FIO, 0x030, 0x0008C, 0x0, "UL 350 Wax Core RS-232"}, {HPHW_FIO, 0x031, 0x0008C, 0x0, "UL 550 Wax Core RS-232"}, {HPHW_FIO, 0x032, 0x0008C, 0x0, "Raven T' Lasi Core RS-232"}, {HPHW_FIO, 0x033, 0x0008C, 0x0, "Anole T Core RS-232"}, {HPHW_FIO, 0x034, 0x0008C, 0x0, "SAIC L-80 Core RS-232"}, {HPHW_FIO, 0x035, 0x0008C, 0x0, "PCX-L2 712/132 Core RS-232"}, {HPHW_FIO, 0x036, 0x0008C, 0x0, "PCX-L2 712/160 Core RS-232"}, {HPHW_FIO, 0x03A, 0x0008C, 0x0, "Merlin+ Wax RS-232"}, {HPHW_FIO, 0x03B, 0x0008C, 0x0, "Raven U/L2 Core RS-232"}, {HPHW_FIO, 0x03C, 0x0008C, 0x0, "Merlin 132 Core RS-232"}, {HPHW_FIO, 0x03D, 0x0008C, 0x0, "Merlin 160 Core RS-232"}, {HPHW_FIO, 0x03E, 0x0008C, 0x0, "Merlin+ 132 Core RS-232"}, {HPHW_FIO, 0x03F, 0x0008C, 0x0, "Merlin+ 180 Core RS-232"}, {HPHW_FIO, 0x040, 0x0008C, 0x0, "Merlin 132 Wax RS-232"}, {HPHW_FIO, 0x041, 0x0008C, 0x0, "Merlin 160 Wax RS-232"}, {HPHW_FIO, 0x043, 0x0008C, 0x0, "Merlin 132/160 Wax RS-232"}, {HPHW_FIO, 0x044, 0x0008C, 0x0, "Mohawk Core RS-232"}, {HPHW_FIO, 0x045, 0x0008C, 0x0, "Rocky1 Core RS-232"}, {HPHW_FIO, 0x046, 0x0008C, 0x0, "Rocky2 120 Core RS-232"}, {HPHW_FIO, 0x047, 0x0008C, 0x0, "Rocky2 150 Core RS-232"}, {HPHW_FIO, 0x048, 0x0008C, 0x0, "Rocky2 120 Dino RS-232"}, {HPHW_FIO, 0x049, 0x0008C, 0x0, "Rocky2 150 Dino RS-232"}, {HPHW_FIO, 0x04A, 0x0008C, 0x0, "Anole L2 132 TIMI RS-232"}, {HPHW_FIO, 0x04B, 0x0008C, 0x0, "Anole L2 l32 Core RS-232"}, {HPHW_FIO, 0x04C, 0x0008D, 0x0, "Anole L2 165 TIMI RS-232"}, {HPHW_FIO, 0x04D, 0x0008C, 0x0, "Anole L2 165 Core RS-232"}, {HPHW_FIO, 0x04E, 0x0008C, 0x0, "Kiji L2 132 Core RS-232"}, {HPHW_FIO, 0x04F, 0x0008C, 0x0, "Kiji L2 132 Dino RS-232"}, {HPHW_FIO, 0x050, 0x0008C, 0x0, "Merlin Jr 132 Core RS-232"}, {HPHW_FIO, 0x051, 0x0008C, 0x0, "Firehawk Core RS-232"}, {HPHW_FIO, 0x052, 0x0008C, 0x0, "Raven+ Hi Power Backplane w EISA RS-232"}, {HPHW_FIO, 0x053, 0x0008C, 0x0, "Raven+ Hi Power Backplane w/o EISA RS-232"}, {HPHW_FIO, 0x054, 0x0008C, 0x0, "Raven+ Lo Power Backplane w EISA RS-232"}, {HPHW_FIO, 0x055, 0x0008C, 0x0, "Raven+ Lo Power Backplane w/o EISA RS-232"}, {HPHW_FIO, 0x056, 0x0008C, 0x0, "Raven+ w SE FWSCSI Core RS-232"}, {HPHW_FIO, 0x057, 0x0008C, 0x0, "Raven+ w Diff FWSCSI Core RS-232"}, {HPHW_FIO, 0x058, 0x0008C, 0x0, "FireHawk 200 Core RS-232"}, {HPHW_FIO, 0x059, 0x0008C, 0x0, "FireHawk 200 Wax RS-232"}, {HPHW_FIO, 0x05A, 0x0008C, 0x0, "Raven+ L2 Backplane w EISA RS-232"}, {HPHW_FIO, 0x05B, 0x0008C, 0x0, "Raven+ L2 Backplane w/o EISA RS-232"}, {HPHW_FIO, 0x05D, 0x0008C, 0x0, "SummitHawk Dino RS-232"}, {HPHW_FIO, 0x05E, 0x0008C, 0x0, "Staccato 132 Core LAN RS-232"}, {HPHW_FIO, 0x05F, 0x0008C, 0x0, "Staccato 180 Core LAN RS-232"}, {HPHW_FIO, 0x800, 0x0008C, 0x0, "Hitachi Tiny 64 Core RS-232"}, {HPHW_FIO, 0x801, 0x0008C, 0x0, "Hitachi Tiny 80 Core RS-232"}, {HPHW_FIO, 0x015, 0x0008D, 0x0, "Gecko Optional RJ-16"}, {HPHW_FIO, 0x016, 0x0008D, 0x0, "Gecko Core RJ-16"}, {HPHW_FIO, 0x01C, 0x0008D, 0x0, "Gecko 80 Core RJ-16"}, {HPHW_FIO, 0x01D, 0x0008D, 0x0, "Gecko 100 Core RJ-16"}, {HPHW_FIO, 0x004, 0x0008F, 0x0, "Anole Boot Rom"}, {HPHW_FIO, 0x005, 0x0008F, 0x0, "Rocky1 Boot Rom"}, {HPHW_FIO, 0x006, 0x0008F, 0x0, "Rocky2 120 Boot Rom"}, {HPHW_FIO, 0x007, 0x0008F, 0x0, "Rocky2 150 Boot Rom"}, {HPHW_FIO, 0x01B, 0x0008F, 0x0, "Anole 100 Boot Rom"}, {HPHW_FIO, 0x006, 0x00096, 0x0, "Raven U/L2 Dino PS/2 Port"}, {HPHW_FIO, 0x007, 0x00096, 0x0, "Dino PS/2 Port"}, {HPHW_FIO, 0x008, 0x00096, 0x0, "Merlin 132 Dino PS/2 Port"}, {HPHW_FIO, 0x009, 0x00096, 0x0, "Merlin 160 Dino PS/2 Port"}, {HPHW_FIO, 0x00A, 0x00096, 0x0, "Merlin Jr 132 Dino PS/2 Port"}, {HPHW_FIO, 0x019, 0x00096, 0x0, "Merlin+ 180 Dino PS/2 Port"}, {HPHW_FIO, 0x022, 0x00096, 0x0, "Merlin+ 132 Dino PS/2 Port"}, {HPHW_FIO, 0x004, 0x00097, 0x0, "Cascade EISA 100VG LAN"}, {HPHW_FIO, 0x023, 0x00099, 0x0, "Rocky1 Wax HPIB"}, {HPHW_FIO, 0x048, 0x00099, 0x0, "Rocky2 120 Clark/Dino HPIB"}, {HPHW_FIO, 0x049, 0x00099, 0x0, "Rocky2 150 Clark/Dino HPIB"}, {HPHW_FIO, 0x004, 0x000A1, 0x0, "SPP2000 Console TTY"}, {HPHW_FIO, 0x004, 0x000A2, 0x0, "Forte Core PCI 10/100BT LAN"}, {HPHW_FIO, 0x005, 0x000A2, 0x0, "AllegroLow PCI 10/100BT LAN"}, {HPHW_FIO, 0x006, 0x000A2, 0x0, "AllegroHIgh Core PCI 10/100BT LAN"}, {HPHW_FIO, 0x007, 0x000A2, 0x0, "PCI Plug-in LAN"}, {HPHW_FIO, 0x00A, 0x000A2, 0x0, "Lego 360 Core PCI 10/100BT LAN"}, {HPHW_FIO, 0x03E, 0x000A2, 0x0, "Merlin+ 132 Core PCI LAN"}, {HPHW_FIO, 0x03F, 0x000A2, 0x0, "Merlin+ 180 Core PCI LAN"}, {HPHW_FIO, 0x056, 0x000A2, 0x0, "Raven+ w SE FWSCSI Core PCI LAN"}, {HPHW_FIO, 0x057, 0x000A2, 0x0, "Raven+ w Diff FWSCSI Core PCI LAN"}, {HPHW_FIO, 0x05E, 0x000A2, 0x0, "Staccato 132 PCI LAN"}, {HPHW_FIO, 0x05F, 0x000A2, 0x0, "Staccato 180 PCI LAN"}, {HPHW_FIO, 0x004, 0x000A3, 0x0, "Forte Core PCI LVD Ultra2 SCSI"}, {HPHW_FIO, 0x004, 0x000A3, 0x0, "Forte Core PCI SE UltraSCSI"}, {HPHW_FIO, 0x004, 0x000A3, 0x0, "Forte Core PCI IDE/ATAPI CD-ROM"}, {HPHW_FIO, 0x005, 0x000A3, 0x0, "AllegroLow Core PCI LVD Ultra2 SCSI"}, {HPHW_FIO, 0x005, 0x000A3, 0x0, "AllegroLow Core PCI IDE/ATAPI CD-ROM"}, {HPHW_FIO, 0x006, 0x000A3, 0x0, "AllegroHigh Core PCI LVD Ultra2 SCSI"}, {HPHW_FIO, 0x006, 0x000A3, 0x0, "AllegroHigh Core PCI IDE/ATAPI CD-ROM"}, {HPHW_FIO, 0x007, 0x000A3, 0x0, "PCI Plug-in Disk"}, {HPHW_FIO, 0x008, 0x000A3, 0x0, "A5158A S FC Tachlite HBA"}, {HPHW_FIO, 0x009, 0x000A3, 0x0, "A5157A D FC HBA"}, {HPHW_FIO, 0x00A, 0x000A3, 0x0, "Lego 360 Core PCI LVD Ultra2 SCSI"}, {HPHW_FIO, 0x00A, 0x000A3, 0x0, "Lego 360 Core PCI NSE UltraSCSI"}, {HPHW_FIO, 0x00A, 0x000A3, 0x0, "Lego 360 Core PCI WSE UltraSCSI"}, {HPHW_FIO, 0x00A, 0x000A3, 0x0, "Lego 360 Core PCI IDE/ATAPI CD-ROM"}, {HPHW_FIO, 0x03E, 0x000A3, 0x0, "Merlin+ 132 Core SE FWSCSI PCI Disk"}, {HPHW_FIO, 0x03F, 0x000A3, 0x0, "Merlin+ 180 Core SE FWSCSI PCI Disk"}, {HPHW_FIO, 0x056, 0x000A3, 0x0, "Raven+ w SE FWSCSI Core PCI Disk"}, {HPHW_FIO, 0x057, 0x000A3, 0x0, "Raven+ w Diff FWSCSI Core PCI Disk"}, {HPHW_FIO, 0x004, 0x000A4, 0x0, "SPP2000 Core BA"}, {HPHW_FIO, 0x004, 0x000A6, 0x0, "Sonic Ethernet 802.3 Card"}, {HPHW_FIO, 0x004, 0x000A9, 0x00, "Forte Core PCI SuperIO RS-232"}, {HPHW_FIO, 0x004, 0x000A9, 0x00, "Forte Core PCI USB KB"}, {HPHW_FIO, 0x005, 0x000A9, 0x00, "AllegroLow Core PCI SuperIO RS-232"}, {HPHW_FIO, 0x005, 0x000A9, 0x00, "AllegroLow Core PCI USB KB"}, {HPHW_FIO, 0x006, 0x000A9, 0x00, "AllegroHigh Core PCI SuperIO RS-232"}, {HPHW_FIO, 0x006, 0x000A9, 0x00, "AllegroHigh Core PCI USB KB"}, {HPHW_FIO, 0x007, 0x000A9, 0x0, "Miscellaneous PCI Plug-in"}, {HPHW_FIO, 0x00A, 0x000A9, 0x0, "Lego 360 Core PCI SuperIO RS-232"}, {HPHW_FIO, 0x00A, 0x000A9, 0x0, "Lego 360 Core PCI USB KB"}, {HPHW_FIO, 0x004, 0x00320, 0x0, "Metheus Frame Buffer"}, {HPHW_FIO, 0x004, 0x00340, 0x0, "BARCO CX4500 VME Grphx Cnsl"}, {HPHW_FIO, 0x004, 0x00360, 0x0, "Hughes TOG VME FDDI"}, {HPHW_IOA, 0x185, 0x0000B, 0x00, "Java BC Summit Port"}, {HPHW_IOA, 0x1FF, 0x0000B, 0x00, "Hitachi Ghostview Summit Port"}, {HPHW_IOA, 0x580, 0x0000B, 0x10, "U2-IOA BC Runway Port"}, {HPHW_IOA, 0x581, 0x0000B, 0x10, "Uturn-IOA BC Runway Port"}, {HPHW_IOA, 0x582, 0x0000B, 0x10, "Astro BC Runway Port"}, {HPHW_IOA, 0x700, 0x0000B, 0x00, "NEC-IOS BC System Bus Port"}, {HPHW_IOA, 0x880, 0x0000C, 0x10, "Pluto BC McKinley Port"}, {HPHW_MEMORY, 0x002, 0x00008, 0x00, "MID_BUS"}, {HPHW_MEMORY, 0x063, 0x00009, 0x00, "712/132 L2 Upgrade"}, {HPHW_MEMORY, 0x064, 0x00009, 0x00, "712/160 L2 Upgrade"}, {HPHW_MEMORY, 0x065, 0x00009, 0x00, "715/132 L2 Upgrade"}, {HPHW_MEMORY, 0x066, 0x00009, 0x00, "715/160 L2 Upgrade"}, {HPHW_MEMORY, 0x0AF, 0x00009, 0x00, "Everest Mako Memory"}, {HPHW_OTHER, 0x004, 0x00030, 0x00, "Master"}, {HPHW_OTHER, 0x004, 0x00034, 0x00, "Slave"}, {HPHW_OTHER, 0x004, 0x00038, 0x00, "EDU"}, {HPHW_OTHER, 0x004, 0x00049, 0x00, "LGB Control"}, {HPHW_MC, 0x004, 0x000C0, 0x00, "BMC IPMI Mgmt Ctlr"}, {HPHW_FAULTY, 0, } /* Special Marker for last entry */ }; static struct hp_cpu_type_mask { unsigned short model; unsigned short mask; enum cpu_type cpu; } hp_cpu_type_mask_list[] __devinitdata = { { 0x0000, 0x0ff0, pcx }, /* 0x0000 - 0x000f */ { 0x0048, 0x0ff0, pcxl }, /* 0x0040 - 0x004f */ { 0x0080, 0x0ff0, pcx }, /* 0x0080 - 0x008f */ { 0x0100, 0x0ff0, pcx }, /* 0x0100 - 0x010f */ { 0x0182, 0x0ffe, pcx }, /* 0x0182 - 0x0183 */ { 0x0182, 0x0ffe, pcxt }, /* 0x0182 - 0x0183 */ { 0x0184, 0x0fff, pcxu }, /* 0x0184 - 0x0184 */ { 0x0200, 0x0ffe, pcxs }, /* 0x0200 - 0x0201 */ { 0x0202, 0x0fff, pcxs }, /* 0x0202 - 0x0202 */ { 0x0203, 0x0fff, pcxt }, /* 0x0203 - 0x0203 */ { 0x0204, 0x0ffc, pcxt }, /* 0x0204 - 0x0207 */ { 0x0280, 0x0ffc, pcxs }, /* 0x0280 - 0x0283 */ { 0x0284, 0x0ffc, pcxt }, /* 0x0284 - 0x0287 */ { 0x0288, 0x0fff, pcxt }, /* 0x0288 - 0x0288 */ { 0x0300, 0x0ffc, pcxs }, /* 0x0300 - 0x0303 */ { 0x0310, 0x0ff0, pcxt }, /* 0x0310 - 0x031f */ { 0x0320, 0x0ff0, pcxt }, /* 0x0320 - 0x032f */ { 0x0400, 0x0ff0, pcxt }, /* 0x0400 - 0x040f */ { 0x0480, 0x0ff0, pcxl }, /* 0x0480 - 0x048f */ { 0x0500, 0x0ff0, pcxl2 }, /* 0x0500 - 0x050f */ { 0x0510, 0x0ff0, pcxl2 }, /* 0x0510 - 0x051f */ { 0x0580, 0x0ff8, pcxt_ }, /* 0x0580 - 0x0587 */ { 0x0588, 0x0ffc, pcxt_ }, /* 0x0588 - 0x058b */ { 0x058c, 0x0ffe, pcxt_ }, /* 0x058c - 0x058d */ { 0x058e, 0x0fff, pcxt_ }, /* 0x058e - 0x058e */ { 0x058f, 0x0fff, pcxu }, /* 0x058f - 0x058f */ { 0x0590, 0x0ffe, pcxu }, /* 0x0590 - 0x0591 */ { 0x0592, 0x0fff, pcxt_ }, /* 0x0592 - 0x0592 */ { 0x0593, 0x0fff, pcxu }, /* 0x0593 - 0x0593 */ { 0x0594, 0x0ffc, pcxu }, /* 0x0594 - 0x0597 */ { 0x0598, 0x0ffe, pcxu_ }, /* 0x0598 - 0x0599 */ { 0x059a, 0x0ffe, pcxu }, /* 0x059a - 0x059b */ { 0x059c, 0x0fff, pcxu }, /* 0x059c - 0x059c */ { 0x059d, 0x0fff, pcxu_ }, /* 0x059d - 0x059d */ { 0x059e, 0x0fff, pcxt_ }, /* 0x059e - 0x059e */ { 0x059f, 0x0fff, pcxu }, /* 0x059f - 0x059f */ { 0x05a0, 0x0ffe, pcxt_ }, /* 0x05a0 - 0x05a1 */ { 0x05a2, 0x0ffe, pcxu }, /* 0x05a2 - 0x05a3 */ { 0x05a4, 0x0ffc, pcxu }, /* 0x05a4 - 0x05a7 */ { 0x05a8, 0x0ffc, pcxu }, /* 0x05a8 - 0x05ab */ { 0x05ad, 0x0fff, pcxu_ }, /* 0x05ad - 0x05ad */ { 0x05ae, 0x0ffe, pcxu_ }, /* 0x05ae - 0x05af */ { 0x05b0, 0x0ffe, pcxu_ }, /* 0x05b0 - 0x05b1 */ { 0x05b2, 0x0fff, pcxu_ }, /* 0x05b2 - 0x05b2 */ { 0x05b3, 0x0fff, pcxu }, /* 0x05b3 - 0x05b3 */ { 0x05b4, 0x0fff, pcxw }, /* 0x05b4 - 0x05b4 */ { 0x05b5, 0x0fff, pcxu_ }, /* 0x05b5 - 0x05b5 */ { 0x05b6, 0x0ffe, pcxu_ }, /* 0x05b6 - 0x05b7 */ { 0x05b8, 0x0ffe, pcxu_ }, /* 0x05b8 - 0x05b9 */ { 0x05ba, 0x0fff, pcxu_ }, /* 0x05ba - 0x05ba */ { 0x05bb, 0x0fff, pcxw }, /* 0x05bb - 0x05bb */ { 0x05bc, 0x0ffc, pcxw }, /* 0x05bc - 0x05bf */ { 0x05c0, 0x0ffc, pcxw }, /* 0x05c0 - 0x05c3 */ { 0x05c4, 0x0ffe, pcxw }, /* 0x05c4 - 0x05c5 */ { 0x05c6, 0x0fff, pcxw }, /* 0x05c6 - 0x05c6 */ { 0x05c7, 0x0fff, pcxw_ }, /* 0x05c7 - 0x05c7 */ { 0x05c8, 0x0ffc, pcxw }, /* 0x05c8 - 0x05cb */ { 0x05cc, 0x0ffe, pcxw }, /* 0x05cc - 0x05cd */ { 0x05ce, 0x0ffe, pcxw_ }, /* 0x05ce - 0x05cf */ { 0x05d0, 0x0ffc, pcxw_ }, /* 0x05d0 - 0x05d3 */ { 0x05d4, 0x0ffe, pcxw_ }, /* 0x05d4 - 0x05d5 */ { 0x05d6, 0x0fff, pcxw }, /* 0x05d6 - 0x05d6 */ { 0x05d7, 0x0fff, pcxw_ }, /* 0x05d7 - 0x05d7 */ { 0x05d8, 0x0ffc, pcxw_ }, /* 0x05d8 - 0x05db */ { 0x05dc, 0x0ffe, pcxw2 }, /* 0x05dc - 0x05dd */ { 0x05de, 0x0fff, pcxw_ }, /* 0x05de - 0x05de */ { 0x05df, 0x0fff, pcxw2 }, /* 0x05df - 0x05df */ { 0x05e0, 0x0ffc, pcxw2 }, /* 0x05e0 - 0x05e3 */ { 0x05e4, 0x0fff, pcxw2 }, /* 0x05e4 - 0x05e4 */ { 0x05e5, 0x0fff, pcxw_ }, /* 0x05e5 - 0x05e5 */ { 0x05e6, 0x0ffe, pcxw2 }, /* 0x05e6 - 0x05e7 */ { 0x05e8, 0x0ff8, pcxw2 }, /* 0x05e8 - 0x05ef */ { 0x05f0, 0x0ff0, pcxw2 }, /* 0x05f0 - 0x05ff */ { 0x0600, 0x0fe0, pcxl }, /* 0x0600 - 0x061f */ { 0x0880, 0x0ff0, mako }, /* 0x0880 - 0x088f */ { 0x0890, 0x0ff0, mako2 }, /* 0x0890 - 0x089f */ { 0x0000, 0x0000, pcx } /* terminate table */ }; const char * const cpu_name_version[][2] = { [pcx] = { "PA7000 (PCX)", "1.0" }, [pcxs] = { "PA7000 (PCX-S)", "1.1a" }, [pcxt] = { "PA7100 (PCX-T)", "1.1b" }, [pcxt_] = { "PA7200 (PCX-T')", "1.1c" }, [pcxl] = { "PA7100LC (PCX-L)", "1.1d" }, [pcxl2] = { "PA7300LC (PCX-L2)", "1.1e" }, [pcxu] = { "PA8000 (PCX-U)", "2.0" }, [pcxu_] = { "PA8200 (PCX-U+)", "2.0" }, [pcxw] = { "PA8500 (PCX-W)", "2.0" }, [pcxw_] = { "PA8600 (PCX-W+)", "2.0" }, [pcxw2] = { "PA8700 (PCX-W2)", "2.0" }, [mako] = { "PA8800 (Mako)", "2.0" }, [mako2] = { "PA8900 (Shortfin)", "2.0" } }; const char * __devinit parisc_hardware_description(struct parisc_device_id *id) { struct hp_hardware *listptr; for (listptr = hp_hardware_list; listptr->hw_type != HPHW_FAULTY; listptr++) { if ((listptr->hw_type == id->hw_type) && (listptr->hversion == id->hversion) && (listptr->sversion == id->sversion)){ return listptr->name; } } /* * ok, the above hardware table isn't complete, and we haven't found * our device in this table. So let's now try to find a generic name * to describe the given hardware... */ switch (id->hw_type) { case HPHW_NPROC: return "Unknown machine"; case HPHW_A_DIRECT: switch (id->sversion) { case 0x0D: return "MUX port"; case 0x0E: return "RS-232 port"; } break; case HPHW_MEMORY: return "Memory"; } return "unknown device"; } /* Interpret hversion (ret[0]) from PDC_MODEL(4)/PDC_MODEL_INFO(0) */ enum cpu_type __cpuinit parisc_get_cpu_type(unsigned long hversion) { struct hp_cpu_type_mask *ptr; unsigned short model = ((unsigned short) (hversion)) >> 4; for (ptr = hp_cpu_type_mask_list; 0 != ptr->mask; ptr++) { if (ptr->model == (model & ptr->mask)) return ptr->cpu; } panic("could not identify CPU type\n"); return pcx; /* not reached: */ }
gpl-2.0
Nold360/GC-Linux-Kernel-2.6.32
drivers/scsi/scsi_transport_srp.c
9996
10394
/* * SCSI RDMA (SRP) transport class * * Copyright (C) 2007 FUJITA Tomonori <tomof@acm.org> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation, version 2 of the * License. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA */ #include <linux/init.h> #include <linux/module.h> #include <linux/jiffies.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/string.h> #include <scsi/scsi.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_transport.h> #include <scsi/scsi_transport_srp.h> #include "scsi_transport_srp_internal.h" struct srp_host_attrs { atomic_t next_port_id; }; #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data) #define SRP_HOST_ATTRS 0 #define SRP_RPORT_ATTRS 2 struct srp_internal { struct scsi_transport_template t; struct srp_function_template *f; struct device_attribute *host_attrs[SRP_HOST_ATTRS + 1]; struct device_attribute *rport_attrs[SRP_RPORT_ATTRS + 1]; struct device_attribute private_rport_attrs[SRP_RPORT_ATTRS]; struct transport_container rport_attr_cont; }; #define to_srp_internal(tmpl) container_of(tmpl, struct srp_internal, t) #define dev_to_rport(d) container_of(d, struct srp_rport, dev) #define transport_class_to_srp_rport(dev) dev_to_rport((dev)->parent) static int srp_host_setup(struct transport_container *tc, struct device *dev, struct device *cdev) { struct Scsi_Host *shost = dev_to_shost(dev); struct srp_host_attrs *srp_host = to_srp_host_attrs(shost); atomic_set(&srp_host->next_port_id, 0); return 0; } static DECLARE_TRANSPORT_CLASS(srp_host_class, "srp_host", srp_host_setup, NULL, NULL); static DECLARE_TRANSPORT_CLASS(srp_rport_class, "srp_remote_ports", NULL, NULL, NULL); #define SETUP_TEMPLATE(attrb, field, perm, test, ro_test, ro_perm) \ i->private_##attrb[count] = dev_attr_##field; \ i->private_##attrb[count].attr.mode = perm; \ if (ro_test) { \ i->private_##attrb[count].attr.mode = ro_perm; \ i->private_##attrb[count].store = NULL; \ } \ i->attrb[count] = &i->private_##attrb[count]; \ if (test) \ count++ #define SETUP_RPORT_ATTRIBUTE_RD(field) \ SETUP_TEMPLATE(rport_attrs, field, S_IRUGO, 1, 0, 0) #define SETUP_RPORT_ATTRIBUTE_RW(field) \ SETUP_TEMPLATE(rport_attrs, field, S_IRUGO | S_IWUSR, \ 1, 1, S_IRUGO) #define SRP_PID(p) \ (p)->port_id[0], (p)->port_id[1], (p)->port_id[2], (p)->port_id[3], \ (p)->port_id[4], (p)->port_id[5], (p)->port_id[6], (p)->port_id[7], \ (p)->port_id[8], (p)->port_id[9], (p)->port_id[10], (p)->port_id[11], \ (p)->port_id[12], (p)->port_id[13], (p)->port_id[14], (p)->port_id[15] #define SRP_PID_FMT "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:" \ "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x" static ssize_t show_srp_rport_id(struct device *dev, struct device_attribute *attr, char *buf) { struct srp_rport *rport = transport_class_to_srp_rport(dev); return sprintf(buf, SRP_PID_FMT "\n", SRP_PID(rport)); } static DEVICE_ATTR(port_id, S_IRUGO, show_srp_rport_id, NULL); static const struct { u32 value; char *name; } srp_rport_role_names[] = { {SRP_RPORT_ROLE_INITIATOR, "SRP Initiator"}, {SRP_RPORT_ROLE_TARGET, "SRP Target"}, }; static ssize_t show_srp_rport_roles(struct device *dev, struct device_attribute *attr, char *buf) { struct srp_rport *rport = transport_class_to_srp_rport(dev); int i; char *name = NULL; for (i = 0; i < ARRAY_SIZE(srp_rport_role_names); i++) if (srp_rport_role_names[i].value == rport->roles) { name = srp_rport_role_names[i].name; break; } return sprintf(buf, "%s\n", name ? : "unknown"); } static DEVICE_ATTR(roles, S_IRUGO, show_srp_rport_roles, NULL); static void srp_rport_release(struct device *dev) { struct srp_rport *rport = dev_to_rport(dev); put_device(dev->parent); kfree(rport); } static int scsi_is_srp_rport(const struct device *dev) { return dev->release == srp_rport_release; } static int srp_rport_match(struct attribute_container *cont, struct device *dev) { struct Scsi_Host *shost; struct srp_internal *i; if (!scsi_is_srp_rport(dev)) return 0; shost = dev_to_shost(dev->parent); if (!shost->transportt) return 0; if (shost->transportt->host_attrs.ac.class != &srp_host_class.class) return 0; i = to_srp_internal(shost->transportt); return &i->rport_attr_cont.ac == cont; } static int srp_host_match(struct attribute_container *cont, struct device *dev) { struct Scsi_Host *shost; struct srp_internal *i; if (!scsi_is_host_device(dev)) return 0; shost = dev_to_shost(dev); if (!shost->transportt) return 0; if (shost->transportt->host_attrs.ac.class != &srp_host_class.class) return 0; i = to_srp_internal(shost->transportt); return &i->t.host_attrs.ac == cont; } /** * srp_rport_add - add a SRP remote port to the device hierarchy * @shost: scsi host the remote port is connected to. * @ids: The port id for the remote port. * * Publishes a port to the rest of the system. */ struct srp_rport *srp_rport_add(struct Scsi_Host *shost, struct srp_rport_identifiers *ids) { struct srp_rport *rport; struct device *parent = &shost->shost_gendev; int id, ret; rport = kzalloc(sizeof(*rport), GFP_KERNEL); if (!rport) return ERR_PTR(-ENOMEM); device_initialize(&rport->dev); rport->dev.parent = get_device(parent); rport->dev.release = srp_rport_release; memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id)); rport->roles = ids->roles; id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id); dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id); transport_setup_device(&rport->dev); ret = device_add(&rport->dev); if (ret) { transport_destroy_device(&rport->dev); put_device(&rport->dev); return ERR_PTR(ret); } if (shost->active_mode & MODE_TARGET && ids->roles == SRP_RPORT_ROLE_INITIATOR) { ret = srp_tgt_it_nexus_create(shost, (unsigned long)rport, rport->port_id); if (ret) { device_del(&rport->dev); transport_destroy_device(&rport->dev); put_device(&rport->dev); return ERR_PTR(ret); } } transport_add_device(&rport->dev); transport_configure_device(&rport->dev); return rport; } EXPORT_SYMBOL_GPL(srp_rport_add); /** * srp_rport_del - remove a SRP remote port * @rport: SRP remote port to remove * * Removes the specified SRP remote port. */ void srp_rport_del(struct srp_rport *rport) { struct device *dev = &rport->dev; struct Scsi_Host *shost = dev_to_shost(dev->parent); if (shost->active_mode & MODE_TARGET && rport->roles == SRP_RPORT_ROLE_INITIATOR) srp_tgt_it_nexus_destroy(shost, (unsigned long)rport); transport_remove_device(dev); device_del(dev); transport_destroy_device(dev); put_device(dev); } EXPORT_SYMBOL_GPL(srp_rport_del); static int do_srp_rport_del(struct device *dev, void *data) { if (scsi_is_srp_rport(dev)) srp_rport_del(dev_to_rport(dev)); return 0; } /** * srp_remove_host - tear down a Scsi_Host's SRP data structures * @shost: Scsi Host that is torn down * * Removes all SRP remote ports for a given Scsi_Host. * Must be called just before scsi_remove_host for SRP HBAs. */ void srp_remove_host(struct Scsi_Host *shost) { device_for_each_child(&shost->shost_gendev, NULL, do_srp_rport_del); } EXPORT_SYMBOL_GPL(srp_remove_host); static int srp_tsk_mgmt_response(struct Scsi_Host *shost, u64 nexus, u64 tm_id, int result) { struct srp_internal *i = to_srp_internal(shost->transportt); return i->f->tsk_mgmt_response(shost, nexus, tm_id, result); } static int srp_it_nexus_response(struct Scsi_Host *shost, u64 nexus, int result) { struct srp_internal *i = to_srp_internal(shost->transportt); return i->f->it_nexus_response(shost, nexus, result); } /** * srp_attach_transport - instantiate SRP transport template * @ft: SRP transport class function template */ struct scsi_transport_template * srp_attach_transport(struct srp_function_template *ft) { int count; struct srp_internal *i; i = kzalloc(sizeof(*i), GFP_KERNEL); if (!i) return NULL; i->t.tsk_mgmt_response = srp_tsk_mgmt_response; i->t.it_nexus_response = srp_it_nexus_response; i->t.host_size = sizeof(struct srp_host_attrs); i->t.host_attrs.ac.attrs = &i->host_attrs[0]; i->t.host_attrs.ac.class = &srp_host_class.class; i->t.host_attrs.ac.match = srp_host_match; i->host_attrs[0] = NULL; transport_container_register(&i->t.host_attrs); i->rport_attr_cont.ac.attrs = &i->rport_attrs[0]; i->rport_attr_cont.ac.class = &srp_rport_class.class; i->rport_attr_cont.ac.match = srp_rport_match; transport_container_register(&i->rport_attr_cont); count = 0; SETUP_RPORT_ATTRIBUTE_RD(port_id); SETUP_RPORT_ATTRIBUTE_RD(roles); i->rport_attrs[count] = NULL; i->f = ft; return &i->t; } EXPORT_SYMBOL_GPL(srp_attach_transport); /** * srp_release_transport - release SRP transport template instance * @t: transport template instance */ void srp_release_transport(struct scsi_transport_template *t) { struct srp_internal *i = to_srp_internal(t); transport_container_unregister(&i->t.host_attrs); transport_container_unregister(&i->rport_attr_cont); kfree(i); } EXPORT_SYMBOL_GPL(srp_release_transport); static __init int srp_transport_init(void) { int ret; ret = transport_class_register(&srp_host_class); if (ret) return ret; ret = transport_class_register(&srp_rport_class); if (ret) goto unregister_host_class; return 0; unregister_host_class: transport_class_unregister(&srp_host_class); return ret; } static void __exit srp_transport_exit(void) { transport_class_unregister(&srp_host_class); transport_class_unregister(&srp_rport_class); } MODULE_AUTHOR("FUJITA Tomonori"); MODULE_DESCRIPTION("SRP Transport Attributes"); MODULE_LICENSE("GPL"); module_init(srp_transport_init); module_exit(srp_transport_exit);
gpl-2.0
ftteam/kernel
arch/ia64/kvm/mmio.c
10764
8856
/* * mmio.c: MMIO emulation components. * Copyright (c) 2004, Intel Corporation. * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com) * Kun Tian (Kevin Tian) (Kevin.tian@intel.com) * * Copyright (c) 2007 Intel Corporation KVM support. * Xuefei Xu (Anthony Xu) (anthony.xu@intel.com) * Xiantao Zhang (xiantao.zhang@intel.com) * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * */ #include <linux/kvm_host.h> #include "vcpu.h" static void vlsapic_write_xtp(struct kvm_vcpu *v, uint8_t val) { VLSAPIC_XTP(v) = val; } /* * LSAPIC OFFSET */ #define PIB_LOW_HALF(ofst) !(ofst & (1 << 20)) #define PIB_OFST_INTA 0x1E0000 #define PIB_OFST_XTP 0x1E0008 /* * execute write IPI op. */ static void vlsapic_write_ipi(struct kvm_vcpu *vcpu, uint64_t addr, uint64_t data) { struct exit_ctl_data *p = &current_vcpu->arch.exit_data; unsigned long psr; local_irq_save(psr); p->exit_reason = EXIT_REASON_IPI; p->u.ipi_data.addr.val = addr; p->u.ipi_data.data.val = data; vmm_transition(current_vcpu); local_irq_restore(psr); } void lsapic_write(struct kvm_vcpu *v, unsigned long addr, unsigned long length, unsigned long val) { addr &= (PIB_SIZE - 1); switch (addr) { case PIB_OFST_INTA: panic_vm(v, "Undefined write on PIB INTA\n"); break; case PIB_OFST_XTP: if (length == 1) { vlsapic_write_xtp(v, val); } else { panic_vm(v, "Undefined write on PIB XTP\n"); } break; default: if (PIB_LOW_HALF(addr)) { /*Lower half */ if (length != 8) panic_vm(v, "Can't LHF write with size %ld!\n", length); else vlsapic_write_ipi(v, addr, val); } else { /*Upper half */ panic_vm(v, "IPI-UHF write %lx\n", addr); } break; } } unsigned long lsapic_read(struct kvm_vcpu *v, unsigned long addr, unsigned long length) { uint64_t result = 0; addr &= (PIB_SIZE - 1); switch (addr) { case PIB_OFST_INTA: if (length == 1) /* 1 byte load */ ; /* There is no i8259, there is no INTA access*/ else panic_vm(v, "Undefined read on PIB INTA\n"); break; case PIB_OFST_XTP: if (length == 1) { result = VLSAPIC_XTP(v); } else { panic_vm(v, "Undefined read on PIB XTP\n"); } break; default: panic_vm(v, "Undefined addr access for lsapic!\n"); break; } return result; } static void mmio_access(struct kvm_vcpu *vcpu, u64 src_pa, u64 *dest, u16 s, int ma, int dir) { unsigned long iot; struct exit_ctl_data *p = &vcpu->arch.exit_data; unsigned long psr; iot = __gpfn_is_io(src_pa >> PAGE_SHIFT); local_irq_save(psr); /*Intercept the access for PIB range*/ if (iot == GPFN_PIB) { if (!dir) lsapic_write(vcpu, src_pa, s, *dest); else *dest = lsapic_read(vcpu, src_pa, s); goto out; } p->exit_reason = EXIT_REASON_MMIO_INSTRUCTION; p->u.ioreq.addr = src_pa; p->u.ioreq.size = s; p->u.ioreq.dir = dir; if (dir == IOREQ_WRITE) p->u.ioreq.data = *dest; p->u.ioreq.state = STATE_IOREQ_READY; vmm_transition(vcpu); if (p->u.ioreq.state == STATE_IORESP_READY) { if (dir == IOREQ_READ) /* it's necessary to ensure zero extending */ *dest = p->u.ioreq.data & (~0UL >> (64-(s*8))); } else panic_vm(vcpu, "Unhandled mmio access returned!\n"); out: local_irq_restore(psr); return ; } /* dir 1: read 0:write inst_type 0:integer 1:floating point */ #define SL_INTEGER 0 /* store/load interger*/ #define SL_FLOATING 1 /* store/load floating*/ void emulate_io_inst(struct kvm_vcpu *vcpu, u64 padr, u64 ma) { struct kvm_pt_regs *regs; IA64_BUNDLE bundle; int slot, dir = 0; int inst_type = -1; u16 size = 0; u64 data, slot1a, slot1b, temp, update_reg; s32 imm; INST64 inst; regs = vcpu_regs(vcpu); if (fetch_code(vcpu, regs->cr_iip, &bundle)) { /* if fetch code fail, return and try again */ return; } slot = ((struct ia64_psr *)&(regs->cr_ipsr))->ri; if (!slot) inst.inst = bundle.slot0; else if (slot == 1) { slot1a = bundle.slot1a; slot1b = bundle.slot1b; inst.inst = slot1a + (slot1b << 18); } else if (slot == 2) inst.inst = bundle.slot2; /* Integer Load/Store */ if (inst.M1.major == 4 && inst.M1.m == 0 && inst.M1.x == 0) { inst_type = SL_INTEGER; size = (inst.M1.x6 & 0x3); if ((inst.M1.x6 >> 2) > 0xb) { /*write*/ dir = IOREQ_WRITE; data = vcpu_get_gr(vcpu, inst.M4.r2); } else if ((inst.M1.x6 >> 2) < 0xb) { /*read*/ dir = IOREQ_READ; } } else if (inst.M2.major == 4 && inst.M2.m == 1 && inst.M2.x == 0) { /* Integer Load + Reg update */ inst_type = SL_INTEGER; dir = IOREQ_READ; size = (inst.M2.x6 & 0x3); temp = vcpu_get_gr(vcpu, inst.M2.r3); update_reg = vcpu_get_gr(vcpu, inst.M2.r2); temp += update_reg; vcpu_set_gr(vcpu, inst.M2.r3, temp, 0); } else if (inst.M3.major == 5) { /*Integer Load/Store + Imm update*/ inst_type = SL_INTEGER; size = (inst.M3.x6&0x3); if ((inst.M5.x6 >> 2) > 0xb) { /*write*/ dir = IOREQ_WRITE; data = vcpu_get_gr(vcpu, inst.M5.r2); temp = vcpu_get_gr(vcpu, inst.M5.r3); imm = (inst.M5.s << 31) | (inst.M5.i << 30) | (inst.M5.imm7 << 23); temp += imm >> 23; vcpu_set_gr(vcpu, inst.M5.r3, temp, 0); } else if ((inst.M3.x6 >> 2) < 0xb) { /*read*/ dir = IOREQ_READ; temp = vcpu_get_gr(vcpu, inst.M3.r3); imm = (inst.M3.s << 31) | (inst.M3.i << 30) | (inst.M3.imm7 << 23); temp += imm >> 23; vcpu_set_gr(vcpu, inst.M3.r3, temp, 0); } } else if (inst.M9.major == 6 && inst.M9.x6 == 0x3B && inst.M9.m == 0 && inst.M9.x == 0) { /* Floating-point spill*/ struct ia64_fpreg v; inst_type = SL_FLOATING; dir = IOREQ_WRITE; vcpu_get_fpreg(vcpu, inst.M9.f2, &v); /* Write high word. FIXME: this is a kludge! */ v.u.bits[1] &= 0x3ffff; mmio_access(vcpu, padr + 8, (u64 *)&v.u.bits[1], 8, ma, IOREQ_WRITE); data = v.u.bits[0]; size = 3; } else if (inst.M10.major == 7 && inst.M10.x6 == 0x3B) { /* Floating-point spill + Imm update */ struct ia64_fpreg v; inst_type = SL_FLOATING; dir = IOREQ_WRITE; vcpu_get_fpreg(vcpu, inst.M10.f2, &v); temp = vcpu_get_gr(vcpu, inst.M10.r3); imm = (inst.M10.s << 31) | (inst.M10.i << 30) | (inst.M10.imm7 << 23); temp += imm >> 23; vcpu_set_gr(vcpu, inst.M10.r3, temp, 0); /* Write high word.FIXME: this is a kludge! */ v.u.bits[1] &= 0x3ffff; mmio_access(vcpu, padr + 8, (u64 *)&v.u.bits[1], 8, ma, IOREQ_WRITE); data = v.u.bits[0]; size = 3; } else if (inst.M10.major == 7 && inst.M10.x6 == 0x31) { /* Floating-point stf8 + Imm update */ struct ia64_fpreg v; inst_type = SL_FLOATING; dir = IOREQ_WRITE; size = 3; vcpu_get_fpreg(vcpu, inst.M10.f2, &v); data = v.u.bits[0]; /* Significand. */ temp = vcpu_get_gr(vcpu, inst.M10.r3); imm = (inst.M10.s << 31) | (inst.M10.i << 30) | (inst.M10.imm7 << 23); temp += imm >> 23; vcpu_set_gr(vcpu, inst.M10.r3, temp, 0); } else if (inst.M15.major == 7 && inst.M15.x6 >= 0x2c && inst.M15.x6 <= 0x2f) { temp = vcpu_get_gr(vcpu, inst.M15.r3); imm = (inst.M15.s << 31) | (inst.M15.i << 30) | (inst.M15.imm7 << 23); temp += imm >> 23; vcpu_set_gr(vcpu, inst.M15.r3, temp, 0); vcpu_increment_iip(vcpu); return; } else if (inst.M12.major == 6 && inst.M12.m == 1 && inst.M12.x == 1 && inst.M12.x6 == 1) { /* Floating-point Load Pair + Imm ldfp8 M12*/ struct ia64_fpreg v; inst_type = SL_FLOATING; dir = IOREQ_READ; size = 8; /*ldfd*/ mmio_access(vcpu, padr, &data, size, ma, dir); v.u.bits[0] = data; v.u.bits[1] = 0x1003E; vcpu_set_fpreg(vcpu, inst.M12.f1, &v); padr += 8; mmio_access(vcpu, padr, &data, size, ma, dir); v.u.bits[0] = data; v.u.bits[1] = 0x1003E; vcpu_set_fpreg(vcpu, inst.M12.f2, &v); padr += 8; vcpu_set_gr(vcpu, inst.M12.r3, padr, 0); vcpu_increment_iip(vcpu); return; } else { inst_type = -1; panic_vm(vcpu, "Unsupported MMIO access instruction! " "Bunld[0]=0x%lx, Bundle[1]=0x%lx\n", bundle.i64[0], bundle.i64[1]); } size = 1 << size; if (dir == IOREQ_WRITE) { mmio_access(vcpu, padr, &data, size, ma, dir); } else { mmio_access(vcpu, padr, &data, size, ma, dir); if (inst_type == SL_INTEGER) vcpu_set_gr(vcpu, inst.M1.r1, data, 0); else panic_vm(vcpu, "Unsupported instruction type!\n"); } vcpu_increment_iip(vcpu); }
gpl-2.0
franzjesus/GTab-10.1-AOKP
net/netfilter/xt_LED.c
11020
5597
/* * xt_LED.c - netfilter target to make LEDs blink upon packet matches * * Copyright (C) 2008 Adam Nielsen <a.nielsen@shikadi.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301 USA. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/skbuff.h> #include <linux/netfilter/x_tables.h> #include <linux/slab.h> #include <linux/leds.h> #include <linux/mutex.h> #include <linux/netfilter/xt_LED.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("Adam Nielsen <a.nielsen@shikadi.net>"); MODULE_DESCRIPTION("Xtables: trigger LED devices on packet match"); MODULE_ALIAS("ipt_LED"); MODULE_ALIAS("ip6t_LED"); static LIST_HEAD(xt_led_triggers); static DEFINE_MUTEX(xt_led_mutex); /* * This is declared in here (the kernel module) only, to avoid having these * dependencies in userspace code. This is what xt_led_info.internal_data * points to. */ struct xt_led_info_internal { struct list_head list; int refcnt; char *trigger_id; struct led_trigger netfilter_led_trigger; struct timer_list timer; }; static unsigned int led_tg(struct sk_buff *skb, const struct xt_action_param *par) { const struct xt_led_info *ledinfo = par->targinfo; struct xt_led_info_internal *ledinternal = ledinfo->internal_data; /* * If "always blink" is enabled, and there's still some time until the * LED will switch off, briefly switch it off now. */ if ((ledinfo->delay > 0) && ledinfo->always_blink && timer_pending(&ledinternal->timer)) led_trigger_event(&ledinternal->netfilter_led_trigger, LED_OFF); led_trigger_event(&ledinternal->netfilter_led_trigger, LED_FULL); /* If there's a positive delay, start/update the timer */ if (ledinfo->delay > 0) { mod_timer(&ledinternal->timer, jiffies + msecs_to_jiffies(ledinfo->delay)); /* Otherwise if there was no delay given, blink as fast as possible */ } else if (ledinfo->delay == 0) { led_trigger_event(&ledinternal->netfilter_led_trigger, LED_OFF); } /* else the delay is negative, which means switch on and stay on */ return XT_CONTINUE; } static void led_timeout_callback(unsigned long data) { struct xt_led_info_internal *ledinternal = (struct xt_led_info_internal *)data; led_trigger_event(&ledinternal->netfilter_led_trigger, LED_OFF); } static struct xt_led_info_internal *led_trigger_lookup(const char *name) { struct xt_led_info_internal *ledinternal; list_for_each_entry(ledinternal, &xt_led_triggers, list) { if (!strcmp(name, ledinternal->netfilter_led_trigger.name)) { return ledinternal; } } return NULL; } static int led_tg_check(const struct xt_tgchk_param *par) { struct xt_led_info *ledinfo = par->targinfo; struct xt_led_info_internal *ledinternal; int err; if (ledinfo->id[0] == '\0') { pr_info("No 'id' parameter given.\n"); return -EINVAL; } mutex_lock(&xt_led_mutex); ledinternal = led_trigger_lookup(ledinfo->id); if (ledinternal) { ledinternal->refcnt++; goto out; } err = -ENOMEM; ledinternal = kzalloc(sizeof(struct xt_led_info_internal), GFP_KERNEL); if (!ledinternal) goto exit_mutex_only; ledinternal->trigger_id = kstrdup(ledinfo->id, GFP_KERNEL); if (!ledinternal->trigger_id) goto exit_internal_alloc; ledinternal->refcnt = 1; ledinternal->netfilter_led_trigger.name = ledinternal->trigger_id; err = led_trigger_register(&ledinternal->netfilter_led_trigger); if (err) { pr_warning("led_trigger_register() failed\n"); if (err == -EEXIST) pr_warning("Trigger name is already in use.\n"); goto exit_alloc; } /* See if we need to set up a timer */ if (ledinfo->delay > 0) setup_timer(&ledinternal->timer, led_timeout_callback, (unsigned long)ledinternal); list_add_tail(&ledinternal->list, &xt_led_triggers); out: mutex_unlock(&xt_led_mutex); ledinfo->internal_data = ledinternal; return 0; exit_alloc: kfree(ledinternal->trigger_id); exit_internal_alloc: kfree(ledinternal); exit_mutex_only: mutex_unlock(&xt_led_mutex); return err; } static void led_tg_destroy(const struct xt_tgdtor_param *par) { const struct xt_led_info *ledinfo = par->targinfo; struct xt_led_info_internal *ledinternal = ledinfo->internal_data; mutex_lock(&xt_led_mutex); if (--ledinternal->refcnt) { mutex_unlock(&xt_led_mutex); return; } list_del(&ledinternal->list); if (ledinfo->delay > 0) del_timer_sync(&ledinternal->timer); led_trigger_unregister(&ledinternal->netfilter_led_trigger); mutex_unlock(&xt_led_mutex); kfree(ledinternal->trigger_id); kfree(ledinternal); } static struct xt_target led_tg_reg __read_mostly = { .name = "LED", .revision = 0, .family = NFPROTO_UNSPEC, .target = led_tg, .targetsize = sizeof(struct xt_led_info), .checkentry = led_tg_check, .destroy = led_tg_destroy, .me = THIS_MODULE, }; static int __init led_tg_init(void) { return xt_register_target(&led_tg_reg); } static void __exit led_tg_exit(void) { xt_unregister_target(&led_tg_reg); } module_init(led_tg_init); module_exit(led_tg_exit);
gpl-2.0
Dm47021/Android_kernel_f6mt_aosp
sound/core/timer_compat.c
13836
3619
/* * 32bit -> 64bit ioctl wrapper for timer API * Copyright (c) by Takashi Iwai <tiwai@suse.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ /* This file included from timer.c */ #include <linux/compat.h> struct snd_timer_info32 { u32 flags; s32 card; unsigned char id[64]; unsigned char name[80]; u32 reserved0; u32 resolution; unsigned char reserved[64]; }; static int snd_timer_user_info_compat(struct file *file, struct snd_timer_info32 __user *_info) { struct snd_timer_user *tu; struct snd_timer_info32 info; struct snd_timer *t; tu = file->private_data; if (snd_BUG_ON(!tu->timeri)) return -ENXIO; t = tu->timeri->timer; if (snd_BUG_ON(!t)) return -ENXIO; memset(&info, 0, sizeof(info)); info.card = t->card ? t->card->number : -1; if (t->hw.flags & SNDRV_TIMER_HW_SLAVE) info.flags |= SNDRV_TIMER_FLG_SLAVE; strlcpy(info.id, t->id, sizeof(info.id)); strlcpy(info.name, t->name, sizeof(info.name)); info.resolution = t->hw.resolution; if (copy_to_user(_info, &info, sizeof(*_info))) return -EFAULT; return 0; } struct snd_timer_status32 { struct compat_timespec tstamp; u32 resolution; u32 lost; u32 overrun; u32 queue; unsigned char reserved[64]; }; static int snd_timer_user_status_compat(struct file *file, struct snd_timer_status32 __user *_status) { struct snd_timer_user *tu; struct snd_timer_status status; tu = file->private_data; if (snd_BUG_ON(!tu->timeri)) return -ENXIO; memset(&status, 0, sizeof(status)); status.tstamp = tu->tstamp; status.resolution = snd_timer_resolution(tu->timeri); status.lost = tu->timeri->lost; status.overrun = tu->overrun; spin_lock_irq(&tu->qlock); status.queue = tu->qused; spin_unlock_irq(&tu->qlock); if (copy_to_user(_status, &status, sizeof(status))) return -EFAULT; return 0; } /* */ enum { SNDRV_TIMER_IOCTL_INFO32 = _IOR('T', 0x11, struct snd_timer_info32), SNDRV_TIMER_IOCTL_STATUS32 = _IOW('T', 0x14, struct snd_timer_status32), }; static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg) { void __user *argp = compat_ptr(arg); switch (cmd) { case SNDRV_TIMER_IOCTL_PVERSION: case SNDRV_TIMER_IOCTL_TREAD: case SNDRV_TIMER_IOCTL_GINFO: case SNDRV_TIMER_IOCTL_GPARAMS: case SNDRV_TIMER_IOCTL_GSTATUS: case SNDRV_TIMER_IOCTL_SELECT: case SNDRV_TIMER_IOCTL_PARAMS: case SNDRV_TIMER_IOCTL_START: case SNDRV_TIMER_IOCTL_START_OLD: case SNDRV_TIMER_IOCTL_STOP: case SNDRV_TIMER_IOCTL_STOP_OLD: case SNDRV_TIMER_IOCTL_CONTINUE: case SNDRV_TIMER_IOCTL_CONTINUE_OLD: case SNDRV_TIMER_IOCTL_PAUSE: case SNDRV_TIMER_IOCTL_PAUSE_OLD: case SNDRV_TIMER_IOCTL_NEXT_DEVICE: return snd_timer_user_ioctl(file, cmd, (unsigned long)argp); case SNDRV_TIMER_IOCTL_INFO32: return snd_timer_user_info_compat(file, argp); case SNDRV_TIMER_IOCTL_STATUS32: return snd_timer_user_status_compat(file, argp); } return -ENOIOCTLCMD; }
gpl-2.0
minorua/QGIS
src/core/raster/qgsbrightnesscontrastfilter.cpp
13
6304
/*************************************************************************** qgsbrightnesscontrastfilter.cpp --------------------- begin : February 2013 copyright : (C) 2013 by Alexander Bruy email : alexander dot bruy at gmail dot com ***************************************************************************/ /*************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * ***************************************************************************/ #include "qgsrasterdataprovider.h" #include "qgsbrightnesscontrastfilter.h" #include <QDomDocument> #include <QDomElement> QgsBrightnessContrastFilter::QgsBrightnessContrastFilter( QgsRasterInterface *input ) : QgsRasterInterface( input ) { } QgsBrightnessContrastFilter *QgsBrightnessContrastFilter::clone() const { QgsDebugMsgLevel( QStringLiteral( "Entered" ), 4 ); QgsBrightnessContrastFilter *filter = new QgsBrightnessContrastFilter( nullptr ); filter->setBrightness( mBrightness ); filter->setContrast( mContrast ); return filter; } int QgsBrightnessContrastFilter::bandCount() const { if ( mOn ) { return 1; } if ( mInput ) { return mInput->bandCount(); } return 0; } Qgis::DataType QgsBrightnessContrastFilter::dataType( int bandNo ) const { if ( mOn ) { return Qgis::ARGB32_Premultiplied; } if ( mInput ) { return mInput->dataType( bandNo ); } return Qgis::UnknownDataType; } bool QgsBrightnessContrastFilter::setInput( QgsRasterInterface *input ) { QgsDebugMsgLevel( QStringLiteral( "Entered" ), 4 ); // Brightness filter can only work with single band ARGB32_Premultiplied if ( !input ) { QgsDebugMsgLevel( QStringLiteral( "No input" ), 4 ); return false; } if ( !mOn ) { // In off mode we can connect to anything QgsDebugMsgLevel( QStringLiteral( "OK" ), 4 ); mInput = input; return true; } if ( input->bandCount() < 1 ) { QgsDebugMsg( QStringLiteral( "No input band" ) ); return false; } if ( input->dataType( 1 ) != Qgis::ARGB32_Premultiplied && input->dataType( 1 ) != Qgis::ARGB32 ) { QgsDebugMsg( QStringLiteral( "Unknown input data type" ) ); return false; } mInput = input; QgsDebugMsgLevel( QStringLiteral( "OK" ), 4 ); return true; } QgsRasterBlock *QgsBrightnessContrastFilter::block( int bandNo, QgsRectangle const &extent, int width, int height, QgsRasterBlockFeedback *feedback ) { Q_UNUSED( bandNo ) QgsDebugMsgLevel( QStringLiteral( "width = %1 height = %2 extent = %3" ).arg( width ).arg( height ).arg( extent.toString() ), 4 ); std::unique_ptr< QgsRasterBlock > outputBlock( new QgsRasterBlock() ); if ( !mInput ) { return outputBlock.release(); } // At this moment we know that we read rendered image int bandNumber = 1; std::unique_ptr< QgsRasterBlock > inputBlock( mInput->block( bandNumber, extent, width, height, feedback ) ); if ( !inputBlock || inputBlock->isEmpty() ) { QgsDebugMsg( QStringLiteral( "No raster data!" ) ); return outputBlock.release(); } if ( mBrightness == 0 && mContrast == 0 ) { QgsDebugMsgLevel( QStringLiteral( "No brightness changes." ), 4 ); return inputBlock.release(); } if ( !outputBlock->reset( Qgis::ARGB32_Premultiplied, width, height ) ) { return outputBlock.release(); } // adjust image QRgb myNoDataColor = qRgba( 0, 0, 0, 0 ); QRgb myColor; int r, g, b, alpha; double f = std::pow( ( mContrast + 100 ) / 100.0, 2 ); for ( qgssize i = 0; i < ( qgssize )width * height; i++ ) { if ( inputBlock->color( i ) == myNoDataColor ) { outputBlock->setColor( i, myNoDataColor ); continue; } myColor = inputBlock->color( i ); alpha = qAlpha( myColor ); r = adjustColorComponent( qRed( myColor ), alpha, mBrightness, f ); g = adjustColorComponent( qGreen( myColor ), alpha, mBrightness, f ); b = adjustColorComponent( qBlue( myColor ), alpha, mBrightness, f ); outputBlock->setColor( i, qRgba( r, g, b, alpha ) ); } return outputBlock.release(); } int QgsBrightnessContrastFilter::adjustColorComponent( int colorComponent, int alpha, int brightness, double contrastFactor ) const { if ( alpha == 255 ) { // Opaque pixel, do simpler math return qBound( 0, ( int )( ( ( ( ( ( colorComponent / 255.0 ) - 0.5 ) * contrastFactor ) + 0.5 ) * 255 ) + brightness ), 255 ); } else if ( alpha == 0 ) { // Totally transparent pixel return 0; } else { // Semi-transparent pixel. We need to adjust the math since we are using Qgis::ARGB32_Premultiplied // and color values have been premultiplied by alpha double alphaFactor = alpha / 255.; double adjustedColor = colorComponent / alphaFactor; // Make sure to return a premultiplied color return alphaFactor * qBound( 0., ( ( ( ( ( ( adjustedColor / 255.0 ) - 0.5 ) * contrastFactor ) + 0.5 ) * 255 ) + brightness ), 255. ); } } void QgsBrightnessContrastFilter::writeXml( QDomDocument &doc, QDomElement &parentElem ) const { if ( parentElem.isNull() ) { return; } QDomElement filterElem = doc.createElement( QStringLiteral( "brightnesscontrast" ) ); filterElem.setAttribute( QStringLiteral( "brightness" ), QString::number( mBrightness ) ); filterElem.setAttribute( QStringLiteral( "contrast" ), QString::number( mContrast ) ); parentElem.appendChild( filterElem ); } void QgsBrightnessContrastFilter::readXml( const QDomElement &filterElem ) { if ( filterElem.isNull() ) { return; } mBrightness = filterElem.attribute( QStringLiteral( "brightness" ), QStringLiteral( "0" ) ).toInt(); mContrast = filterElem.attribute( QStringLiteral( "contrast" ), QStringLiteral( "0" ) ).toInt(); }
gpl-2.0
rispo/almas
src/server/scripts/Northrend/howling_fjord.cpp
13
13780
/* * Copyright (C) 2008-2012 TrinityCore <http://www.trinitycore.org/> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program. If not, see <http://www.gnu.org/licenses/>. */ /* ScriptData SDName: Sholazar_Basin SD%Complete: 100 SDComment: Quest support: 11253, 11241. SDCategory: howling_fjord EndScriptData */ /* ContentData npc_plaguehound_tracker npc_apothecary_hanes EndContentData */ #include "ScriptMgr.h" #include "ScriptedCreature.h" #include "ScriptedGossip.h" #include "ScriptedEscortAI.h" #include "Player.h" /*###### ## npc_apothecary_hanes ######*/ enum Entries { NPC_APOTHECARY_HANES = 23784, FACTION_ESCORTEE_A = 774, FACTION_ESCORTEE_H = 775, NPC_HANES_FIRE_TRIGGER = 23968, QUEST_TRAIL_OF_FIRE = 11241, SPELL_COSMETIC_LOW_POLY_FIRE = 56274 }; class npc_apothecary_hanes : public CreatureScript { public: npc_apothecary_hanes() : CreatureScript("npc_apothecary_hanes") { } bool OnQuestAccept(Player* player, Creature* creature, Quest const* quest) { if (quest->GetQuestId() == QUEST_TRAIL_OF_FIRE) { switch (player->GetTeam()) { case ALLIANCE: creature->setFaction(FACTION_ESCORTEE_A); break; case HORDE: creature->setFaction(FACTION_ESCORTEE_H); break; } CAST_AI(npc_escortAI, (creature->AI()))->Start(true, false, player->GetGUID()); } return true; } struct npc_Apothecary_HanesAI : public npc_escortAI { npc_Apothecary_HanesAI(Creature* creature) : npc_escortAI(creature){} uint32 PotTimer; void Reset() { SetDespawnAtFar(false); PotTimer = 10000; //10 sec cooldown on potion } void JustDied(Unit* /*killer*/) { if (Player* player = GetPlayerForEscort()) player->FailQuest(QUEST_TRAIL_OF_FIRE); } void UpdateEscortAI(const uint32 diff) { if (HealthBelowPct(75)) { if (PotTimer <= diff) { DoCast(me, 17534, true); PotTimer = 10000; } else PotTimer -= diff; } if (GetAttack() && UpdateVictim()) DoMeleeAttackIfReady(); } void WaypointReached(uint32 waypointId) { Player* player = GetPlayerForEscort(); if (!player) return; switch (waypointId) { case 1: me->SetReactState(REACT_AGGRESSIVE); SetRun(true); break; case 23: player->GroupEventHappens(QUEST_TRAIL_OF_FIRE, me); me->DespawnOrUnsummon(); break; case 5: if (Unit* Trigger = me->FindNearestCreature(NPC_HANES_FIRE_TRIGGER, 10.0f)) Trigger->CastSpell(Trigger, SPELL_COSMETIC_LOW_POLY_FIRE, false); SetRun(false); break; case 6: if (Unit* Trigger = me->FindNearestCreature(NPC_HANES_FIRE_TRIGGER, 10.0f)) Trigger->CastSpell(Trigger, SPELL_COSMETIC_LOW_POLY_FIRE, false); SetRun(true); break; case 8: if (Unit* Trigger = me->FindNearestCreature(NPC_HANES_FIRE_TRIGGER, 10.0f)) Trigger->CastSpell(Trigger, SPELL_COSMETIC_LOW_POLY_FIRE, false); SetRun(false); break; case 9: if (Unit* Trigger = me->FindNearestCreature(NPC_HANES_FIRE_TRIGGER, 10.0f)) Trigger->CastSpell(Trigger, SPELL_COSMETIC_LOW_POLY_FIRE, false); break; case 10: SetRun(true); break; case 13: SetRun(false); break; case 14: if (Unit* Trigger = me->FindNearestCreature(NPC_HANES_FIRE_TRIGGER, 10.0f)) Trigger->CastSpell(Trigger, SPELL_COSMETIC_LOW_POLY_FIRE, false); SetRun(true); break; } } }; CreatureAI* GetAI(Creature* creature) const { return new npc_Apothecary_HanesAI(creature); } }; /*###### ## npc_plaguehound_tracker ######*/ enum ePlaguehound { QUEST_SNIFF_OUT_ENEMY = 11253 }; class npc_plaguehound_tracker : public CreatureScript { public: npc_plaguehound_tracker() : CreatureScript("npc_plaguehound_tracker") { } struct npc_plaguehound_trackerAI : public npc_escortAI { npc_plaguehound_trackerAI(Creature* creature) : npc_escortAI(creature) { } void Reset() { uint64 summonerGUID = 0; if (me->isSummon()) if (Unit* summoner = me->ToTempSummon()->GetSummoner()) if (summoner->GetTypeId() == TYPEID_PLAYER) summonerGUID = summoner->GetGUID(); if (!summonerGUID) return; me->SetUnitMovementFlags(MOVEMENTFLAG_WALKING); Start(false, false, summonerGUID); } void WaypointReached(uint32 waypointId) { if (waypointId != 26) return; me->DespawnOrUnsummon(); } }; CreatureAI* GetAI(Creature* creature) const { return new npc_plaguehound_trackerAI(creature); } }; /*###### ## npc_razael_and_lyana ######*/ #define GOSSIP_RAZAEL_REPORT "High Executor Anselm wants a report on the situation." #define GOSSIP_LYANA_REPORT "High Executor Anselm requests your report." enum eRazael { QUEST_REPORTS_FROM_THE_FIELD = 11221, NPC_RAZAEL = 23998, NPC_LYANA = 23778, GOSSIP_TEXTID_RAZAEL1 = 11562, GOSSIP_TEXTID_RAZAEL2 = 11564, GOSSIP_TEXTID_LYANA1 = 11586, GOSSIP_TEXTID_LYANA2 = 11588 }; class npc_razael_and_lyana : public CreatureScript { public: npc_razael_and_lyana() : CreatureScript("npc_razael_and_lyana") { } bool OnGossipHello(Player* player, Creature* creature) { if (creature->isQuestGiver()) player->PrepareQuestMenu(creature->GetGUID()); if (player->GetQuestStatus(QUEST_REPORTS_FROM_THE_FIELD) == QUEST_STATUS_INCOMPLETE) switch (creature->GetEntry()) { case NPC_RAZAEL: if (!player->GetReqKillOrCastCurrentCount(QUEST_REPORTS_FROM_THE_FIELD, NPC_RAZAEL)) { player->ADD_GOSSIP_ITEM(GOSSIP_ICON_CHAT, GOSSIP_RAZAEL_REPORT, GOSSIP_SENDER_MAIN, GOSSIP_ACTION_INFO_DEF + 1); player->SEND_GOSSIP_MENU(GOSSIP_TEXTID_RAZAEL1, creature->GetGUID()); return true; } break; case NPC_LYANA: if (!player->GetReqKillOrCastCurrentCount(QUEST_REPORTS_FROM_THE_FIELD, NPC_LYANA)) { player->ADD_GOSSIP_ITEM(GOSSIP_ICON_CHAT, GOSSIP_LYANA_REPORT, GOSSIP_SENDER_MAIN, GOSSIP_ACTION_INFO_DEF + 2); player->SEND_GOSSIP_MENU(GOSSIP_TEXTID_LYANA1, creature->GetGUID()); return true; } break; } player->SEND_GOSSIP_MENU(player->GetGossipTextId(creature), creature->GetGUID()); return true; } bool OnGossipSelect(Player* player, Creature* creature, uint32 /*sender*/, uint32 action) { player->PlayerTalkClass->ClearMenus(); switch (action) { case GOSSIP_ACTION_INFO_DEF + 1: player->SEND_GOSSIP_MENU(GOSSIP_TEXTID_RAZAEL2, creature->GetGUID()); player->TalkedToCreature(NPC_RAZAEL, creature->GetGUID()); break; case GOSSIP_ACTION_INFO_DEF + 2: player->SEND_GOSSIP_MENU(GOSSIP_TEXTID_LYANA2, creature->GetGUID()); player->TalkedToCreature(NPC_LYANA, creature->GetGUID()); break; } return true; } }; /*###### ## npc_mcgoyver ######*/ #define GOSSIP_ITEM_MG_I "Walt sent me to pick up some dark iron ingots." #define GOSSIP_ITEM_MG_II "Yarp." enum eMcGoyver { QUEST_WE_CAN_REBUILD_IT = 11483, SPELL_CREATURE_DARK_IRON_INGOTS = 44512, SPELL_TAXI_EXPLORERS_LEAGUE = 44280, GOSSIP_TEXTID_MCGOYVER = 12193 }; class npc_mcgoyver : public CreatureScript { public: npc_mcgoyver() : CreatureScript("npc_mcgoyver") { } bool OnGossipHello(Player* player, Creature* creature) { if (player->GetQuestStatus(QUEST_WE_CAN_REBUILD_IT) == QUEST_STATUS_INCOMPLETE) player->ADD_GOSSIP_ITEM(GOSSIP_ICON_CHAT, GOSSIP_ITEM_MG_I, GOSSIP_SENDER_MAIN, GOSSIP_ACTION_INFO_DEF+1); player->SEND_GOSSIP_MENU(player->GetGossipTextId(creature), creature->GetGUID()); return true; } bool OnGossipSelect(Player* player, Creature* creature, uint32 /*sender*/, uint32 action) { player->PlayerTalkClass->ClearMenus(); switch (action) { case GOSSIP_ACTION_INFO_DEF+1: player->ADD_GOSSIP_ITEM(GOSSIP_ICON_CHAT, GOSSIP_ITEM_MG_II, GOSSIP_SENDER_MAIN, GOSSIP_ACTION_INFO_DEF+2); player->SEND_GOSSIP_MENU(GOSSIP_TEXTID_MCGOYVER, creature->GetGUID()); player->CastSpell(player, SPELL_CREATURE_DARK_IRON_INGOTS, true); break; case GOSSIP_ACTION_INFO_DEF+2: player->CastSpell(player, SPELL_TAXI_EXPLORERS_LEAGUE, true); player->CLOSE_GOSSIP_MENU(); break; } return true; } }; /*###### ## npc_daegarn ######*/ enum eDaegarnn { QUEST_DEFEAT_AT_RING = 11300, NPC_FIRJUS = 24213, NPC_JLARBORN = 24215, NPC_YOROS = 24214, NPC_OLUF = 23931, NPC_PRISONER_1 = 24253, // looks the same but has different abilities NPC_PRISONER_2 = 24254, NPC_PRISONER_3 = 24255, }; static float afSummon[] = {838.81f, -4678.06f, -94.182f}; static float afCenter[] = {801.88f, -4721.87f, -96.143f}; class npc_daegarn : public CreatureScript { public: npc_daegarn() : CreatureScript("npc_daegarn") { } bool OnQuestAccept(Player* player, Creature* creature, const Quest* quest) { if (quest->GetQuestId() == QUEST_DEFEAT_AT_RING) { if (npc_daegarnAI* pDaegarnAI = CAST_AI(npc_daegarn::npc_daegarnAI, creature->AI())) pDaegarnAI->StartEvent(player->GetGUID()); } return true; } // TODO: make prisoners help (unclear if summoned or using npc's from surrounding cages (summon inside small cages?)) struct npc_daegarnAI : public ScriptedAI { npc_daegarnAI(Creature* creature) : ScriptedAI(creature) { } bool bEventInProgress; uint64 uiPlayerGUID; void Reset() { bEventInProgress = false; uiPlayerGUID = 0; } void StartEvent(uint64 uiGUID) { if (bEventInProgress) return; uiPlayerGUID = uiGUID; SummonGladiator(NPC_FIRJUS); } void JustSummoned(Creature* summon) { if (Player* player = me->GetPlayer(*me, uiPlayerGUID)) { if (player->isAlive()) { summon->SetWalk(false); summon->GetMotionMaster()->MovePoint(0, afCenter[0], afCenter[1], afCenter[2]); summon->AI()->AttackStart(player); return; } } Reset(); } void SummonGladiator(uint32 uiEntry) { me->SummonCreature(uiEntry, afSummon[0], afSummon[1], afSummon[2], 0.0f, TEMPSUMMON_TIMED_DESPAWN_OUT_OF_COMBAT, 30*IN_MILLISECONDS); } void SummonedCreatureDies(Creature* summoned, Unit* /*killer*/) { uint32 uiEntry = 0; // will eventually reset the event if something goes wrong switch (summoned->GetEntry()) { case NPC_FIRJUS: uiEntry = NPC_JLARBORN; break; case NPC_JLARBORN: uiEntry = NPC_YOROS; break; case NPC_YOROS: uiEntry = NPC_OLUF; break; case NPC_OLUF: Reset(); return; } SummonGladiator(uiEntry); } }; CreatureAI* GetAI(Creature* creature) const { return new npc_daegarnAI(creature); } }; void AddSC_howling_fjord() { new npc_apothecary_hanes; new npc_plaguehound_tracker; new npc_razael_and_lyana; new npc_mcgoyver; new npc_daegarn; }
gpl-2.0
sub77-bkp/T530XXU1BOD8
drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c
13
32496
/* Copyright (c) 2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/delay.h> #include <linux/io.h> #include <linux/module.h> #include <linux/jiffies.h> #include <linux/of.h> #include <linux/videodev2.h> #include <linux/platform_device.h> #include <linux/gpio.h> #include <linux/iopoll.h> #include <media/msmb_isp.h> #include "msm_ispif.h" #include "msm.h" #include "msm_sd.h" #include "msm_camera_io_util.h" #ifdef CONFIG_MSM_ISPIF_V1 #include "msm_ispif_hwreg_v1.h" #else #include "msm_ispif_hwreg_v2.h" #endif #define V4L2_IDENT_ISPIF 50001 #define MSM_ISPIF_DRV_NAME "msm_ispif" #define ISPIF_INTF_CMD_DISABLE_FRAME_BOUNDARY 0x00 #define ISPIF_INTF_CMD_ENABLE_FRAME_BOUNDARY 0x01 #define ISPIF_INTF_CMD_DISABLE_IMMEDIATELY 0x02 #define ISPIF_TIMEOUT_SLEEP_US 1000 #if defined(CONFIG_MACH_VICTORLTE_CTC) || defined(CONFIG_MACH_AFYONLTE_TMO) \ || defined (CONFIG_MACH_AFYONLTE_MTR) || defined (CONFIG_MACH_AFYONLTE_CAN) #define ISPIF_TIMEOUT_ALL_US 1000000 #else #define ISPIF_TIMEOUT_ALL_US 500000 #endif #define CSID_VERSION_V30 0x30000000 #undef CDBG #ifdef CONFIG_MSMB_CAMERA_DEBUG #define CDBG(fmt, args...) pr_debug(fmt, ##args) #else #define CDBG(fmt, args...) do { } while (0) #endif static void msm_camera_io_dump_3(void __iomem *addr, int size) { char line_str[128], *p_str; int i; u32 *p = (u32 *) addr; u32 data; printk("%s: %p %d\n", __func__, addr, size); line_str[0] = '\0'; p_str = line_str; for (i = 0; i < size/4; i++) { if (i % 4 == 0) { snprintf(p_str, 12, "%08x: ", (u32) p); p_str += 10; } data = readl_relaxed(p++); snprintf(p_str, 12, "%08x ", data); p_str += 9; if ((i + 1) % 4 == 0) { printk("%s\n", line_str); line_str[0] = '\0'; p_str = line_str; } } if (line_str[0] != '\0') printk("%s\n", line_str); } static void msm_ispif_io_dump_reg(struct ispif_device *ispif) { if (!ispif->enb_dump_reg) return; msm_camera_io_dump(ispif->base, 0x250); } static void msm_ispif_io_dump_start_reg(struct ispif_device *ispif) { if (!ispif->enb_dump_reg) return; msm_camera_io_dump_3(ispif->base, 0x270); } static inline int msm_ispif_is_intf_valid(uint32_t csid_version, uint8_t intf_type) { return ((csid_version <= CSID_VERSION_V2 && intf_type != VFE0) || (intf_type >= VFE_MAX)) ? false : true; } static struct msm_cam_clk_info ispif_8974_ahb_clk_info[] = { {"ispif_ahb_clk", -1}, }; static struct msm_cam_clk_info ispif_8974_reset_clk_info[] = { {"csi0_src_clk", INIT_RATE}, {"csi0_clk", NO_SET_RATE}, {"csi0_pix_clk", NO_SET_RATE}, {"csi0_rdi_clk", NO_SET_RATE}, {"csi1_src_clk", INIT_RATE}, {"csi1_clk", NO_SET_RATE}, {"csi1_pix_clk", NO_SET_RATE}, {"csi1_rdi_clk", NO_SET_RATE}, {"csi2_src_clk", INIT_RATE}, {"csi2_clk", NO_SET_RATE}, {"csi2_pix_clk", NO_SET_RATE}, {"csi2_rdi_clk", NO_SET_RATE}, {"csi3_src_clk", INIT_RATE}, {"csi3_clk", NO_SET_RATE}, {"csi3_pix_clk", NO_SET_RATE}, {"csi3_rdi_clk", NO_SET_RATE}, {"vfe0_clk_src", INIT_RATE}, {"camss_vfe_vfe0_clk", NO_SET_RATE}, {"camss_csi_vfe0_clk", NO_SET_RATE}, {"vfe1_clk_src", INIT_RATE}, {"camss_vfe_vfe1_clk", NO_SET_RATE}, {"camss_csi_vfe1_clk", NO_SET_RATE}, }; static int msm_ispif_reset_hw(struct ispif_device *ispif) { int rc = 0; long timeout = 0; struct clk *reset_clk[ARRAY_SIZE(ispif_8974_reset_clk_info)]; if (ispif->csid_version < CSID_VERSION_V30) { /* currently reset is done only for 8974 */ return 0; } rc = msm_cam_clk_enable(&ispif->pdev->dev, ispif_8974_reset_clk_info, reset_clk, ARRAY_SIZE(ispif_8974_reset_clk_info), 1); if (rc < 0) { pr_err("%s: cannot enable clock, error = %d", __func__, rc); } init_completion(&ispif->reset_complete[VFE0]); if (ispif->hw_num_isps > 1) init_completion(&ispif->reset_complete[VFE1]); /* initiate reset of ISPIF */ msm_camera_io_w(ISPIF_RST_CMD_MASK, ispif->base + ISPIF_RST_CMD_ADDR); if (ispif->hw_num_isps > 1) msm_camera_io_w(ISPIF_RST_CMD_1_MASK, ispif->base + ISPIF_RST_CMD_1_ADDR); timeout = wait_for_completion_interruptible_timeout( &ispif->reset_complete[VFE0], msecs_to_jiffies(500)); CDBG("%s: VFE0 done\n", __func__); if (timeout <= 0) { pr_err("%s: VFE0 reset wait timeout\n", __func__); return -ETIMEDOUT; } if (ispif->hw_num_isps > 1) { timeout = wait_for_completion_interruptible_timeout( &ispif->reset_complete[VFE1], msecs_to_jiffies(500)); CDBG("%s: VFE1 done\n", __func__); if (timeout <= 0) { pr_err("%s: VFE1 reset wait timeout\n", __func__); return -ETIMEDOUT; } } rc = msm_cam_clk_enable(&ispif->pdev->dev, ispif_8974_reset_clk_info, reset_clk, ARRAY_SIZE(ispif_8974_reset_clk_info), 0); if (rc < 0) { pr_err("%s: cannot disable clock, error = %d", __func__, rc); } return rc; } static int msm_ispif_clk_ahb_enable(struct ispif_device *ispif, int enable) { int rc = 0; if (ispif->csid_version < CSID_VERSION_V3) { /* Older ISPIF versiond don't need ahb clokc */ return 0; } rc = msm_cam_clk_enable(&ispif->pdev->dev, ispif_8974_ahb_clk_info, &ispif->ahb_clk, ARRAY_SIZE(ispif_8974_ahb_clk_info), enable); if (rc < 0) { pr_err("%s: cannot enable clock, error = %d", __func__, rc); } return rc; } static int msm_ispif_reset(struct ispif_device *ispif) { int rc = 0; int i; BUG_ON(!ispif); memset(ispif->sof_count, 0, sizeof(ispif->sof_count)); for (i = 0; i < ispif->vfe_info.num_vfe; i++) { msm_camera_io_w(1 << PIX0_LINE_BUF_EN_BIT, ispif->base + ISPIF_VFE_m_CTRL_0(i)); msm_camera_io_w(0, ispif->base + ISPIF_VFE_m_IRQ_MASK_0(i)); msm_camera_io_w(0, ispif->base + ISPIF_VFE_m_IRQ_MASK_1(i)); msm_camera_io_w(0, ispif->base + ISPIF_VFE_m_IRQ_MASK_2(i)); msm_camera_io_w(0xFFFFFFFF, ispif->base + ISPIF_VFE_m_IRQ_CLEAR_0(i)); msm_camera_io_w(0xFFFFFFFF, ispif->base + ISPIF_VFE_m_IRQ_CLEAR_1(i)); msm_camera_io_w(0xFFFFFFFF, ispif->base + ISPIF_VFE_m_IRQ_CLEAR_2(i)); msm_camera_io_w(0, ispif->base + ISPIF_VFE_m_INPUT_SEL(i)); msm_camera_io_w(0xAAAAAAAA, ispif->base + ISPIF_VFE_m_INTF_CMD_0(i)); msm_camera_io_w(0xAAAAAAAA, ispif->base + ISPIF_VFE_m_INTF_CMD_1(i)); msm_camera_io_w(0, ispif->base + ISPIF_VFE_m_PIX_INTF_n_CID_MASK(i, 0)); msm_camera_io_w(0, ispif->base + ISPIF_VFE_m_PIX_INTF_n_CID_MASK(i, 1)); msm_camera_io_w(0, ispif->base + ISPIF_VFE_m_PIX_INTF_n_CID_MASK(i, 0)); msm_camera_io_w(0, ispif->base + ISPIF_VFE_m_PIX_INTF_n_CID_MASK(i, 1)); msm_camera_io_w(0, ispif->base + ISPIF_VFE_m_PIX_INTF_n_CID_MASK(i, 2)); msm_camera_io_w(0, ispif->base + ISPIF_VFE_m_PIX_INTF_n_CROP(i, 0)); msm_camera_io_w(0, ispif->base + ISPIF_VFE_m_PIX_INTF_n_CROP(i, 1)); } msm_camera_io_w_mb(ISPIF_IRQ_GLOBAL_CLEAR_CMD, ispif->base + ISPIF_IRQ_GLOBAL_CLEAR_CMD_ADDR); return rc; } static int msm_ispif_subdev_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip) { BUG_ON(!chip); chip->ident = V4L2_IDENT_ISPIF; chip->revision = 0; return 0; } static void msm_ispif_sel_csid_core(struct ispif_device *ispif, uint8_t intftype, uint8_t csid, uint8_t vfe_intf) { uint32_t data; BUG_ON(!ispif); if (!msm_ispif_is_intf_valid(ispif->csid_version, vfe_intf)) { pr_err("%s: invalid interface type\n", __func__); return; } data = msm_camera_io_r(ispif->base + ISPIF_VFE_m_INPUT_SEL(vfe_intf)); switch (intftype) { case PIX0: data &= ~(BIT(1) | BIT(0)); data |= csid; break; case RDI0: data &= ~(BIT(5) | BIT(4)); data |= (csid << 4); break; case PIX1: data &= ~(BIT(9) | BIT(8)); data |= (csid << 8); break; case RDI1: data &= ~(BIT(13) | BIT(12)); data |= (csid << 12); break; case RDI2: data &= ~(BIT(21) | BIT(20)); data |= (csid << 20); break; } msm_camera_io_w_mb(data, ispif->base + ISPIF_VFE_m_INPUT_SEL(vfe_intf)); } static void msm_ispif_enable_crop(struct ispif_device *ispif, uint8_t intftype, uint8_t vfe_intf, uint16_t start_pixel, uint16_t end_pixel) { uint32_t data; BUG_ON(!ispif); if (!msm_ispif_is_intf_valid(ispif->csid_version, vfe_intf)) { pr_err("%s: invalid interface type\n", __func__); return; } data = msm_camera_io_r(ispif->base + ISPIF_VFE_m_CTRL_0(vfe_intf)); data |= (1 << (intftype + 7)); if (intftype == PIX0) data |= 1 << PIX0_LINE_BUF_EN_BIT; msm_camera_io_w(data, ispif->base + ISPIF_VFE_m_CTRL_0(vfe_intf)); if (intftype == PIX0) msm_camera_io_w_mb(start_pixel | (end_pixel << 16), ispif->base + ISPIF_VFE_m_PIX_INTF_n_CROP(vfe_intf, 0)); else if (intftype == PIX1) msm_camera_io_w_mb(start_pixel | (end_pixel << 16), ispif->base + ISPIF_VFE_m_PIX_INTF_n_CROP(vfe_intf, 1)); else { pr_err("%s: invalid intftype=%d\n", __func__, intftype); BUG_ON(1); return; } } static void msm_ispif_enable_intf_cids(struct ispif_device *ispif, uint8_t intftype, uint16_t cid_mask, uint8_t vfe_intf, uint8_t enable) { uint32_t intf_addr, data; BUG_ON(!ispif); if (!msm_ispif_is_intf_valid(ispif->csid_version, vfe_intf)) { pr_err("%s: invalid interface type\n", __func__); return; } switch (intftype) { case PIX0: intf_addr = ISPIF_VFE_m_PIX_INTF_n_CID_MASK(vfe_intf, 0); break; case RDI0: intf_addr = ISPIF_VFE_m_RDI_INTF_n_CID_MASK(vfe_intf, 0); break; case PIX1: intf_addr = ISPIF_VFE_m_PIX_INTF_n_CID_MASK(vfe_intf, 1); break; case RDI1: intf_addr = ISPIF_VFE_m_RDI_INTF_n_CID_MASK(vfe_intf, 1); break; case RDI2: intf_addr = ISPIF_VFE_m_RDI_INTF_n_CID_MASK(vfe_intf, 2); break; default: pr_err("%s: invalid intftype=%d\n", __func__, intftype); BUG_ON(1); return; } data = msm_camera_io_r(ispif->base + intf_addr); if (enable) data |= cid_mask; else data &= ~cid_mask; msm_camera_io_w_mb(data, ispif->base + intf_addr); } static int msm_ispif_validate_intf_status(struct ispif_device *ispif, uint8_t intftype, uint8_t vfe_intf) { int rc = 0; uint32_t data = 0; BUG_ON(!ispif); if (!msm_ispif_is_intf_valid(ispif->csid_version, vfe_intf)) { pr_err("%s: invalid interface type\n", __func__); return -EINVAL; } switch (intftype) { case PIX0: data = msm_camera_io_r(ispif->base + ISPIF_VFE_m_PIX_INTF_n_STATUS(vfe_intf, 0)); break; case RDI0: data = msm_camera_io_r(ispif->base + ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe_intf, 0)); break; case PIX1: data = msm_camera_io_r(ispif->base + ISPIF_VFE_m_PIX_INTF_n_STATUS(vfe_intf, 1)); break; case RDI1: data = msm_camera_io_r(ispif->base + ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe_intf, 1)); break; case RDI2: data = msm_camera_io_r(ispif->base + ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe_intf, 2)); break; } if ((data & 0xf) != 0xf) rc = -EBUSY; return rc; } static void msm_ispif_select_clk_mux(struct ispif_device *ispif, uint8_t intftype, uint8_t csid, uint8_t vfe_intf) { uint32_t data = 0; switch (intftype) { case PIX0: data = msm_camera_io_r(ispif->clk_mux_base); data &= ~(0xf << (vfe_intf * 8)); data |= (csid << (vfe_intf * 8)); msm_camera_io_w(data, ispif->clk_mux_base); break; case RDI0: data = msm_camera_io_r(ispif->clk_mux_base + ISPIF_RDI_CLK_MUX_SEL_ADDR); data &= ~(0xf << (vfe_intf * 12)); data |= (csid << (vfe_intf * 12)); msm_camera_io_w(data, ispif->clk_mux_base + ISPIF_RDI_CLK_MUX_SEL_ADDR); break; case PIX1: data = msm_camera_io_r(ispif->clk_mux_base); data &= ~(0xf0 << (vfe_intf * 8)); data |= (csid << (4 + (vfe_intf * 8))); msm_camera_io_w(data, ispif->clk_mux_base); break; case RDI1: data = msm_camera_io_r(ispif->clk_mux_base + ISPIF_RDI_CLK_MUX_SEL_ADDR); data &= ~(0xf << (4 + (vfe_intf * 12))); data |= (csid << (4 + (vfe_intf * 12))); msm_camera_io_w(data, ispif->clk_mux_base + ISPIF_RDI_CLK_MUX_SEL_ADDR); break; case RDI2: data = msm_camera_io_r(ispif->clk_mux_base + ISPIF_RDI_CLK_MUX_SEL_ADDR); data &= ~(0xf << (8 + (vfe_intf * 12))); data |= (csid << (8 + (vfe_intf * 12))); msm_camera_io_w(data, ispif->clk_mux_base + ISPIF_RDI_CLK_MUX_SEL_ADDR); break; } CDBG("%s intftype %d data %x\n", __func__, intftype, data); mb(); return; } static uint16_t msm_ispif_get_cids_mask_from_cfg( struct msm_ispif_params_entry *entry) { int i; uint16_t cids_mask = 0; BUG_ON(!entry); for (i = 0; i < entry->num_cids; i++) cids_mask |= (1 << entry->cids[i]); return cids_mask; } static int msm_ispif_config(struct ispif_device *ispif, struct msm_ispif_param_data *params) { int rc = 0, i = 0; uint16_t cid_mask; enum msm_ispif_intftype intftype; enum msm_ispif_vfe_intf vfe_intf; BUG_ON(!ispif); BUG_ON(!params); if (ispif->ispif_state != ISPIF_POWER_UP) { pr_err("%s: ispif invalid state %d\n", __func__, ispif->ispif_state); rc = -EPERM; return rc; } for (i = 0; i < params->num; i++) { vfe_intf = params->entries[i].vfe_intf; if (!msm_ispif_is_intf_valid(ispif->csid_version, vfe_intf)) { pr_err("%s: invalid interface type\n", __func__); return -EINVAL; } msm_camera_io_w(0x0, ispif->base + ISPIF_VFE_m_IRQ_MASK_0(vfe_intf)); msm_camera_io_w(0x0, ispif->base + ISPIF_VFE_m_IRQ_MASK_1(vfe_intf)); msm_camera_io_w_mb(0x0, ispif->base + ISPIF_VFE_m_IRQ_MASK_2(vfe_intf)); } for (i = 0; i < params->num; i++) { intftype = params->entries[i].intftype; vfe_intf = params->entries[i].vfe_intf; CDBG("%s intftype %x, vfe_intf %d, csid %d\n", __func__, intftype, vfe_intf, params->entries[i].csid); if ((intftype >= INTF_MAX) || (vfe_intf >= ispif->vfe_info.num_vfe) || (ispif->csid_version <= CSID_VERSION_V2 && (vfe_intf > VFE0))) { pr_err("%s: VFEID %d and CSID version %d mismatch\n", __func__, vfe_intf, ispif->csid_version); return -EINVAL; } if (ispif->csid_version >= CSID_VERSION_V3) msm_ispif_select_clk_mux(ispif, intftype, params->entries[i].csid, vfe_intf); rc = msm_ispif_validate_intf_status(ispif, intftype, vfe_intf); if (rc) { pr_err("%s:validate_intf_status failed, rc = %d\n", __func__, rc); return rc; } msm_ispif_sel_csid_core(ispif, intftype, params->entries[i].csid, vfe_intf); cid_mask = msm_ispif_get_cids_mask_from_cfg( &params->entries[i]); msm_ispif_enable_intf_cids(ispif, intftype, cid_mask, vfe_intf, 1); if (params->entries[i].crop_enable) msm_ispif_enable_crop(ispif, intftype, vfe_intf, params->entries[i].crop_start_pixel, params->entries[i].crop_end_pixel); } for (vfe_intf = 0; vfe_intf < 2; vfe_intf++) { msm_camera_io_w(ISPIF_IRQ_STATUS_MASK, ispif->base + ISPIF_VFE_m_IRQ_MASK_0(vfe_intf)); msm_camera_io_w(ISPIF_IRQ_STATUS_MASK, ispif->base + ISPIF_VFE_m_IRQ_CLEAR_0(vfe_intf)); msm_camera_io_w(ISPIF_IRQ_STATUS_1_MASK, ispif->base + ISPIF_VFE_m_IRQ_MASK_1(vfe_intf)); msm_camera_io_w(ISPIF_IRQ_STATUS_1_MASK, ispif->base + ISPIF_VFE_m_IRQ_CLEAR_1(vfe_intf)); msm_camera_io_w(ISPIF_IRQ_STATUS_2_MASK, ispif->base + ISPIF_VFE_m_IRQ_MASK_2(vfe_intf)); msm_camera_io_w(ISPIF_IRQ_STATUS_2_MASK, ispif->base + ISPIF_VFE_m_IRQ_CLEAR_2(vfe_intf)); } msm_camera_io_w_mb(ISPIF_IRQ_GLOBAL_CLEAR_CMD, ispif->base + ISPIF_IRQ_GLOBAL_CLEAR_CMD_ADDR); return rc; } static void msm_ispif_intf_cmd(struct ispif_device *ispif, uint32_t cmd_bits, struct msm_ispif_param_data *params) { uint8_t vc; int i, k; enum msm_ispif_intftype intf_type; enum msm_ispif_cid cid; enum msm_ispif_vfe_intf vfe_intf; BUG_ON(!ispif); BUG_ON(!params); for (i = 0; i < params->num; i++) { vfe_intf = params->entries[i].vfe_intf; if (!msm_ispif_is_intf_valid(ispif->csid_version, vfe_intf)) { pr_err("%s: invalid interface type\n", __func__); return; } } for (i = 0; i < params->num; i++) { intf_type = params->entries[i].intftype; vfe_intf = params->entries[i].vfe_intf; for (k = 0; k < params->entries[i].num_cids; k++) { cid = params->entries[i].cids[k]; vc = cid / 4; if (intf_type == RDI2) { /* zero out two bits */ ispif->applied_intf_cmd[vfe_intf].intf_cmd1 &= ~(0x3 << (vc * 2 + 8)); /* set cmd bits */ ispif->applied_intf_cmd[vfe_intf].intf_cmd1 |= (cmd_bits << (vc * 2 + 8)); } else { /* zero 2 bits */ ispif->applied_intf_cmd[vfe_intf].intf_cmd &= ~(0x3 << (vc * 2 + intf_type * 8)); /* set cmd bits */ ispif->applied_intf_cmd[vfe_intf].intf_cmd |= (cmd_bits << (vc * 2 + intf_type * 8)); } } /* cmd for PIX0, PIX1, RDI0, RDI1 */ if (ispif->applied_intf_cmd[vfe_intf].intf_cmd != 0xFFFFFFFF) { msm_camera_io_w_mb( ispif->applied_intf_cmd[vfe_intf].intf_cmd, ispif->base + ISPIF_VFE_m_INTF_CMD_0(vfe_intf)); } /* cmd for RDI2 */ if (ispif->applied_intf_cmd[vfe_intf].intf_cmd1 != 0xFFFFFFFF) msm_camera_io_w_mb( ispif->applied_intf_cmd[vfe_intf].intf_cmd1, ispif->base + ISPIF_VFE_m_INTF_CMD_1(vfe_intf)); } } static int msm_ispif_stop_immediately(struct ispif_device *ispif, struct msm_ispif_param_data *params) { int i, rc = 0; uint16_t cid_mask = 0; BUG_ON(!ispif); BUG_ON(!params); if (ispif->ispif_state != ISPIF_POWER_UP) { pr_err("%s: ispif invalid state %d\n", __func__, ispif->ispif_state); rc = -EPERM; return rc; } msm_ispif_intf_cmd(ispif, ISPIF_INTF_CMD_DISABLE_IMMEDIATELY, params); /* after stop the interface we need to unmask the CID enable bits */ for (i = 0; i < params->num; i++) { cid_mask = msm_ispif_get_cids_mask_from_cfg( &params->entries[i]); msm_ispif_enable_intf_cids(ispif, params->entries[i].intftype, cid_mask, params->entries[i].vfe_intf, 0); } return rc; } static int msm_ispif_start_frame_boundary(struct ispif_device *ispif, struct msm_ispif_param_data *params) { int rc = 0; if (ispif->ispif_state != ISPIF_POWER_UP) { pr_err("%s: ispif invalid state %d\n", __func__, ispif->ispif_state); rc = -EPERM; return rc; } msm_ispif_intf_cmd(ispif, ISPIF_INTF_CMD_ENABLE_FRAME_BOUNDARY, params); return rc; } static int msm_ispif_stop_frame_boundary(struct ispif_device *ispif, struct msm_ispif_param_data *params) { int i, rc = 0; uint16_t cid_mask = 0; uint32_t intf_addr; enum msm_ispif_vfe_intf vfe_intf; uint32_t stop_flag = 0; BUG_ON(!ispif); BUG_ON(!params); if (ispif->ispif_state != ISPIF_POWER_UP) { pr_err("%s: ispif invalid state %d\n", __func__, ispif->ispif_state); rc = -EPERM; return rc; } for (i = 0; i < params->num; i++) { if (!msm_ispif_is_intf_valid(ispif->csid_version, params->entries[i].vfe_intf)) { pr_err("%s: invalid interface type\n", __func__); rc = -EINVAL; goto end; } } msm_ispif_intf_cmd(ispif, ISPIF_INTF_CMD_DISABLE_FRAME_BOUNDARY, params); for (i = 0; i < params->num; i++) { cid_mask = msm_ispif_get_cids_mask_from_cfg(&params->entries[i]); vfe_intf = params->entries[i].vfe_intf; switch (params->entries[i].intftype) { case PIX0: intf_addr = ISPIF_VFE_m_PIX_INTF_n_STATUS(vfe_intf, 0); break; case RDI0: intf_addr = ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe_intf, 0); break; case PIX1: intf_addr = ISPIF_VFE_m_PIX_INTF_n_STATUS(vfe_intf, 1); break; case RDI1: intf_addr = ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe_intf, 1); break; case RDI2: intf_addr = ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe_intf, 2); break; default: pr_err("%s: invalid intftype=%d\n", __func__, params->entries[i].intftype); rc = -EPERM; goto end; } #if 0 /* todo_bug_fix? very bad. use readl_poll_timeout */ while ((msm_camera_io_r(ispif->base + intf_addr) & 0xF) != 0xF) CDBG("%s: Wait for %d Idle\n", __func__, params->entries[i].intftype); #else rc = readl_poll_timeout(ispif->base + intf_addr, stop_flag, (stop_flag & 0xF) == 0xF, ISPIF_TIMEOUT_SLEEP_US, ISPIF_TIMEOUT_ALL_US); if (rc < 0) goto end; #endif /* disable CIDs in CID_MASK register */ msm_ispif_enable_intf_cids(ispif, params->entries[i].intftype, cid_mask, vfe_intf, 0); } end: return rc; } static void ispif_process_irq(struct ispif_device *ispif, struct ispif_irq_status *out, enum msm_ispif_vfe_intf vfe_id) { BUG_ON(!ispif); BUG_ON(!out); if (out[vfe_id].ispifIrqStatus0 & ISPIF_IRQ_STATUS_PIX_SOF_MASK) { ispif->sof_count[vfe_id].sof_cnt[PIX0]++; } if (out[vfe_id].ispifIrqStatus0 & ISPIF_IRQ_STATUS_RDI0_SOF_MASK) { ispif->sof_count[vfe_id].sof_cnt[RDI0]++; } if (out[vfe_id].ispifIrqStatus1 & ISPIF_IRQ_STATUS_RDI1_SOF_MASK) { ispif->sof_count[vfe_id].sof_cnt[RDI1]++; } if (out[vfe_id].ispifIrqStatus2 & ISPIF_IRQ_STATUS_RDI2_SOF_MASK) { ispif->sof_count[vfe_id].sof_cnt[RDI2]++; } } static inline void msm_ispif_read_irq_status(struct ispif_irq_status *out, void *data) { struct ispif_device *ispif = (struct ispif_device *)data; BUG_ON(!ispif); BUG_ON(!out); out[VFE0].ispifIrqStatus0 = msm_camera_io_r(ispif->base + ISPIF_VFE_m_IRQ_STATUS_0(VFE0)); msm_camera_io_w(out[VFE0].ispifIrqStatus0, ispif->base + ISPIF_VFE_m_IRQ_CLEAR_0(VFE0)); out[VFE0].ispifIrqStatus1 = msm_camera_io_r(ispif->base + ISPIF_VFE_m_IRQ_STATUS_1(VFE0)); msm_camera_io_w(out[VFE0].ispifIrqStatus1, ispif->base + ISPIF_VFE_m_IRQ_CLEAR_1(VFE0)); out[VFE0].ispifIrqStatus2 = msm_camera_io_r(ispif->base + ISPIF_VFE_m_IRQ_STATUS_2(VFE0)); msm_camera_io_w_mb(out[VFE0].ispifIrqStatus2, ispif->base + ISPIF_VFE_m_IRQ_CLEAR_2(VFE0)); if (ispif->vfe_info.num_vfe > 1) { out[VFE1].ispifIrqStatus0 = msm_camera_io_r(ispif->base + ISPIF_VFE_m_IRQ_STATUS_0(VFE1)); msm_camera_io_w(out[VFE1].ispifIrqStatus0, ispif->base + ISPIF_VFE_m_IRQ_CLEAR_0(VFE1)); out[VFE1].ispifIrqStatus1 = msm_camera_io_r(ispif->base + ISPIF_VFE_m_IRQ_STATUS_1(VFE1)); msm_camera_io_w(out[VFE1].ispifIrqStatus1, ispif->base + ISPIF_VFE_m_IRQ_CLEAR_1(VFE1)); out[VFE1].ispifIrqStatus2 = msm_camera_io_r(ispif->base + ISPIF_VFE_m_IRQ_STATUS_2(VFE1)); msm_camera_io_w_mb(out[VFE1].ispifIrqStatus2, ispif->base + ISPIF_VFE_m_IRQ_CLEAR_2(VFE1)); } msm_camera_io_w_mb(ISPIF_IRQ_GLOBAL_CLEAR_CMD, ispif->base + ISPIF_IRQ_GLOBAL_CLEAR_CMD_ADDR); if (out[VFE0].ispifIrqStatus0 & ISPIF_IRQ_STATUS_MASK) { if (out[VFE0].ispifIrqStatus0 & RESET_DONE_IRQ) complete(&ispif->reset_complete[VFE0]); if (out[VFE0].ispifIrqStatus0 & PIX_INTF_0_OVERFLOW_IRQ) pr_err("%s: VFE0 pix0 overflow.\n", __func__); if (out[VFE0].ispifIrqStatus0 & RAW_INTF_0_OVERFLOW_IRQ) pr_err("%s: VFE0 rdi0 overflow.\n", __func__); if (out[VFE0].ispifIrqStatus1 & RAW_INTF_1_OVERFLOW_IRQ) pr_err("%s: VFE0 rdi1 overflow.\n", __func__); if (out[VFE0].ispifIrqStatus2 & RAW_INTF_2_OVERFLOW_IRQ) pr_err("%s: VFE0 rdi2 overflow.\n", __func__); ispif_process_irq(ispif, out, VFE0); } if (ispif->vfe_info.num_vfe > 1) { if (out[VFE1].ispifIrqStatus0 & RESET_DONE_IRQ) complete(&ispif->reset_complete[VFE1]); if (out[VFE1].ispifIrqStatus0 & PIX_INTF_0_OVERFLOW_IRQ) pr_err("%s: VFE1 pix0 overflow.\n", __func__); if (out[VFE1].ispifIrqStatus0 & RAW_INTF_0_OVERFLOW_IRQ) pr_err("%s: VFE1 rdi0 overflow.\n", __func__); if (out[VFE1].ispifIrqStatus1 & RAW_INTF_1_OVERFLOW_IRQ) pr_err("%s: VFE1 rdi1 overflow.\n", __func__); if (out[VFE1].ispifIrqStatus2 & RAW_INTF_2_OVERFLOW_IRQ) pr_err("%s: VFE1 rdi2 overflow.\n", __func__); ispif_process_irq(ispif, out, VFE1); } } static irqreturn_t msm_io_ispif_irq(int irq_num, void *data) { struct ispif_irq_status irq[VFE_MAX]; msm_ispif_read_irq_status(irq, data); return IRQ_HANDLED; } static int msm_ispif_set_vfe_info(struct ispif_device *ispif, struct msm_ispif_vfe_info *vfe_info) { memcpy(&ispif->vfe_info, vfe_info, sizeof(struct msm_ispif_vfe_info)); return 0; } static int msm_ispif_init(struct ispif_device *ispif, uint32_t csid_version) { int rc = 0; BUG_ON(!ispif); if (ispif->ispif_state == ISPIF_POWER_UP) { pr_err("%s: ispif already initted state = %d\n", __func__, ispif->ispif_state); rc = -EPERM; return rc; } /* can we set to zero? */ ispif->applied_intf_cmd[VFE0].intf_cmd = 0xFFFFFFFF; ispif->applied_intf_cmd[VFE0].intf_cmd1 = 0xFFFFFFFF; ispif->applied_intf_cmd[VFE1].intf_cmd = 0xFFFFFFFF; ispif->applied_intf_cmd[VFE1].intf_cmd1 = 0xFFFFFFFF; memset(ispif->sof_count, 0, sizeof(ispif->sof_count)); ispif->csid_version = csid_version; if (ispif->csid_version >= CSID_VERSION_V3) { if (!ispif->clk_mux_mem || !ispif->clk_mux_io) { pr_err("%s csi clk mux mem %p io %p\n", __func__, ispif->clk_mux_mem, ispif->clk_mux_io); rc = -ENOMEM; return rc; } ispif->clk_mux_base = ioremap(ispif->clk_mux_mem->start, resource_size(ispif->clk_mux_mem)); if (!ispif->clk_mux_base) { pr_err("%s: clk_mux_mem ioremap failed\n", __func__); rc = -ENOMEM; return rc; } } ispif->base = ioremap(ispif->mem->start, resource_size(ispif->mem)); if (!ispif->base) { rc = -ENOMEM; pr_err("%s: nomem\n", __func__); goto end; } rc = request_irq(ispif->irq->start, msm_io_ispif_irq, IRQF_TRIGGER_RISING, "ispif", ispif); if (rc) { pr_err("%s: request_irq error = %d\n", __func__, rc); goto error_irq; } rc = msm_ispif_clk_ahb_enable(ispif, 1); if (rc) { pr_err("%s: ahb_clk enable failed", __func__); goto error_ahb; } if(of_device_is_compatible(ispif->pdev->dev.of_node, "qcom,ispif-v3.0")) { /*Currently HW reset is implemented for 8974 only*/ msm_ispif_reset_hw(ispif); } rc = msm_ispif_reset(ispif); if (rc == 0) { ispif->ispif_state = ISPIF_POWER_UP; CDBG("%s: power up done\n", __func__); goto end; } error_ahb: free_irq(ispif->irq->start, ispif); error_irq: iounmap(ispif->base); end: return rc; } static void msm_ispif_release(struct ispif_device *ispif) { BUG_ON(!ispif); if (ispif->ispif_state != ISPIF_POWER_UP) { pr_err("%s: ispif invalid state %d\n", __func__, ispif->ispif_state); return; } /* make sure no streaming going on */ msm_ispif_reset(ispif); msm_ispif_clk_ahb_enable(ispif, 0); free_irq(ispif->irq->start, ispif); iounmap(ispif->base); iounmap(ispif->clk_mux_base); ispif->ispif_state = ISPIF_POWER_DOWN; } static long msm_ispif_cmd(struct v4l2_subdev *sd, void *arg) { long rc = 0; struct ispif_cfg_data *pcdata = (struct ispif_cfg_data *)arg; struct ispif_device *ispif = (struct ispif_device *)v4l2_get_subdevdata(sd); BUG_ON(!sd); BUG_ON(!pcdata); mutex_lock(&ispif->mutex); switch (pcdata->cfg_type) { case ISPIF_ENABLE_REG_DUMP: ispif->enb_dump_reg = pcdata->reg_dump; /* save dump config */ break; case ISPIF_INIT: rc = msm_ispif_init(ispif, pcdata->csid_version); msm_ispif_io_dump_reg(ispif); break; case ISPIF_CFG: rc = msm_ispif_config(ispif, &pcdata->params); msm_ispif_io_dump_reg(ispif); break; case ISPIF_START_FRAME_BOUNDARY: rc = msm_ispif_start_frame_boundary(ispif, &pcdata->params); msm_ispif_io_dump_start_reg(ispif); break; case ISPIF_STOP_FRAME_BOUNDARY: rc = msm_ispif_stop_frame_boundary(ispif, &pcdata->params); msm_ispif_io_dump_reg(ispif); break; case ISPIF_STOP_IMMEDIATELY: rc = msm_ispif_stop_immediately(ispif, &pcdata->params); msm_ispif_io_dump_reg(ispif); break; case ISPIF_RELEASE: msm_ispif_release(ispif); break; case ISPIF_SET_VFE_INFO: rc = msm_ispif_set_vfe_info(ispif, &pcdata->vfe_info); break; default: pr_err("%s: invalid cfg_type\n", __func__); rc = -EINVAL; break; } mutex_unlock(&ispif->mutex); return rc; } static long msm_ispif_subdev_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg) { switch (cmd) { case VIDIOC_MSM_ISPIF_CFG: return msm_ispif_cmd(sd, arg); case MSM_SD_SHUTDOWN: { struct ispif_device *ispif = (struct ispif_device *)v4l2_get_subdevdata(sd); msm_ispif_release(ispif); return 0; } default: pr_err("%s: invalid cmd 0x%x received\n", __func__, cmd); return -ENOIOCTLCMD; } } static int ispif_open_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) { struct ispif_device *ispif = v4l2_get_subdevdata(sd); mutex_lock(&ispif->mutex); /* mem remap is done in init when the clock is on */ ispif->open_cnt++; mutex_unlock(&ispif->mutex); return 0; } static int ispif_close_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) { int rc = 0; struct ispif_device *ispif = v4l2_get_subdevdata(sd); if (!ispif) { pr_err("%s: invalid input\n", __func__); return -EINVAL; } mutex_lock(&ispif->mutex); if (ispif->open_cnt == 0) { pr_err("%s: Invalid close\n", __func__); rc = -ENODEV; goto end; } ispif->open_cnt--; if (ispif->open_cnt == 0) msm_ispif_release(ispif); end: mutex_unlock(&ispif->mutex); return rc; } static struct v4l2_subdev_core_ops msm_ispif_subdev_core_ops = { .g_chip_ident = &msm_ispif_subdev_g_chip_ident, .ioctl = &msm_ispif_subdev_ioctl, }; static const struct v4l2_subdev_ops msm_ispif_subdev_ops = { .core = &msm_ispif_subdev_core_ops, }; static const struct v4l2_subdev_internal_ops msm_ispif_internal_ops = { .open = ispif_open_node, .close = ispif_close_node, }; static int __devinit ispif_probe(struct platform_device *pdev) { int rc; struct ispif_device *ispif; ispif = kzalloc(sizeof(struct ispif_device), GFP_KERNEL); if (!ispif) { pr_err("%s: no enough memory\n", __func__); return -ENOMEM; } v4l2_subdev_init(&ispif->msm_sd.sd, &msm_ispif_subdev_ops); ispif->msm_sd.sd.internal_ops = &msm_ispif_internal_ops; ispif->msm_sd.sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; snprintf(ispif->msm_sd.sd.name, ARRAY_SIZE(ispif->msm_sd.sd.name), MSM_ISPIF_DRV_NAME); v4l2_set_subdevdata(&ispif->msm_sd.sd, ispif); platform_set_drvdata(pdev, &ispif->msm_sd.sd); mutex_init(&ispif->mutex); media_entity_init(&ispif->msm_sd.sd.entity, 0, NULL, 0); ispif->msm_sd.sd.entity.type = MEDIA_ENT_T_V4L2_SUBDEV; ispif->msm_sd.sd.entity.group_id = MSM_CAMERA_SUBDEV_ISPIF; ispif->msm_sd.sd.entity.name = pdev->name; ispif->msm_sd.close_seq = MSM_SD_CLOSE_1ST_CATEGORY | 0x1; rc = msm_sd_register(&ispif->msm_sd); if (rc) { pr_err("%s: msm_sd_register error = %d\n", __func__, rc); goto error_sd_register; } if (pdev->dev.of_node) { of_property_read_u32((&pdev->dev)->of_node, "cell-index", &pdev->id); rc = of_property_read_u32((&pdev->dev)->of_node, "qcom,num-isps", &ispif->hw_num_isps); if (rc) /* backward compatibility */ ispif->hw_num_isps = 1; /* not an error condition */ rc = 0; } ispif->mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ispif"); if (!ispif->mem) { pr_err("%s: no mem resource?\n", __func__); rc = -ENODEV; goto error; } ispif->irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "ispif"); if (!ispif->irq) { pr_err("%s: no irq resource?\n", __func__); rc = -ENODEV; goto error; } ispif->io = request_mem_region(ispif->mem->start, resource_size(ispif->mem), pdev->name); if (!ispif->io) { pr_err("%s: no valid mem region\n", __func__); rc = -EBUSY; goto error; } ispif->clk_mux_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "csi_clk_mux"); if (ispif->clk_mux_mem) { ispif->clk_mux_io = request_mem_region( ispif->clk_mux_mem->start, resource_size(ispif->clk_mux_mem), ispif->clk_mux_mem->name); if (!ispif->clk_mux_io) pr_err("%s: no valid csi_mux region\n", __func__); } ispif->pdev = pdev; ispif->ispif_state = ISPIF_POWER_DOWN; ispif->open_cnt = 0; return 0; error: msm_sd_unregister(&ispif->msm_sd); error_sd_register: mutex_destroy(&ispif->mutex); kfree(ispif); return rc; } static const struct of_device_id msm_ispif_dt_match[] = { {.compatible = "qcom,ispif"}, }; MODULE_DEVICE_TABLE(of, msm_ispif_dt_match); static struct platform_driver ispif_driver = { .probe = ispif_probe, .driver = { .name = MSM_ISPIF_DRV_NAME, .owner = THIS_MODULE, .of_match_table = msm_ispif_dt_match, }, }; static int __init msm_ispif_init_module(void) { return platform_driver_register(&ispif_driver); } static void __exit msm_ispif_exit_module(void) { platform_driver_unregister(&ispif_driver); } module_init(msm_ispif_init_module); module_exit(msm_ispif_exit_module); MODULE_DESCRIPTION("MSM ISP Interface driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
lizhekang/TCJDK
sources/openjdk8/jdk/src/windows/native/sun/nio/fs/WindowsNativeDispatcher.c
13
45234
/* * Copyright (c) 2008, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. Oracle designates this * particular file as subject to the "Classpath" exception as provided * by Oracle in the LICENSE file that accompanied this code. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. */ #ifndef _WIN32_WINNT #define _WIN32_WINNT 0x0501 #endif #include <stdio.h> #include <stdlib.h> #include <ctype.h> #include <direct.h> #include <malloc.h> #include <io.h> #include <windows.h> #include <aclapi.h> #include <winioctl.h> #include <Sddl.h> #include "jni.h" #include "jni_util.h" #include "jlong.h" #include "sun_nio_fs_WindowsNativeDispatcher.h" /** * jfieldIDs */ static jfieldID findFirst_handle; static jfieldID findFirst_name; static jfieldID findFirst_attributes; static jfieldID findStream_handle; static jfieldID findStream_name; static jfieldID volumeInfo_fsName; static jfieldID volumeInfo_volName; static jfieldID volumeInfo_volSN; static jfieldID volumeInfo_flags; static jfieldID diskSpace_bytesAvailable; static jfieldID diskSpace_totalBytes; static jfieldID diskSpace_totalFree; static jfieldID account_domain; static jfieldID account_name; static jfieldID account_use; static jfieldID aclInfo_aceCount; static jfieldID completionStatus_error; static jfieldID completionStatus_bytesTransferred; static jfieldID completionStatus_completionKey; static jfieldID backupResult_bytesTransferred; static jfieldID backupResult_context; /** * Win32 APIs not available in Windows XP */ typedef HANDLE (WINAPI* FindFirstStream_Proc)(LPCWSTR, STREAM_INFO_LEVELS, LPVOID, DWORD); typedef BOOL (WINAPI* FindNextStream_Proc)(HANDLE, LPVOID); typedef BOOLEAN (WINAPI* CreateSymbolicLinkProc) (LPCWSTR, LPCWSTR, DWORD); typedef BOOL (WINAPI* GetFinalPathNameByHandleProc) (HANDLE, LPWSTR, DWORD, DWORD); static FindFirstStream_Proc FindFirstStream_func; static FindNextStream_Proc FindNextStream_func; static CreateSymbolicLinkProc CreateSymbolicLink_func; static GetFinalPathNameByHandleProc GetFinalPathNameByHandle_func; static void throwWindowsException(JNIEnv* env, DWORD lastError) { jobject x = JNU_NewObjectByName(env, "sun/nio/fs/WindowsException", "(I)V", lastError); if (x != NULL) { (*env)->Throw(env, x); } } /** * Initializes jfieldIDs and get address of Win32 calls that are located * at runtime. */ JNIEXPORT void JNICALL Java_sun_nio_fs_WindowsNativeDispatcher_initIDs(JNIEnv* env, jclass this) { jclass clazz; HMODULE h; clazz = (*env)->FindClass(env, "sun/nio/fs/WindowsNativeDispatcher$FirstFile"); CHECK_NULL(clazz); findFirst_handle = (*env)->GetFieldID(env, clazz, "handle", "J"); CHECK_NULL(findFirst_handle); findFirst_name = (*env)->GetFieldID(env, clazz, "name", "Ljava/lang/String;"); CHECK_NULL(findFirst_name); findFirst_attributes = (*env)->GetFieldID(env, clazz, "attributes", "I"); CHECK_NULL(findFirst_attributes); clazz = (*env)->FindClass(env, "sun/nio/fs/WindowsNativeDispatcher$FirstStream"); CHECK_NULL(clazz); findStream_handle = (*env)->GetFieldID(env, clazz, "handle", "J"); CHECK_NULL(findStream_handle); findStream_name = (*env)->GetFieldID(env, clazz, "name", "Ljava/lang/String;"); CHECK_NULL(findStream_name); clazz = (*env)->FindClass(env, "sun/nio/fs/WindowsNativeDispatcher$VolumeInformation"); CHECK_NULL(clazz); volumeInfo_fsName = (*env)->GetFieldID(env, clazz, "fileSystemName", "Ljava/lang/String;"); CHECK_NULL(volumeInfo_fsName); volumeInfo_volName = (*env)->GetFieldID(env, clazz, "volumeName", "Ljava/lang/String;"); CHECK_NULL(volumeInfo_volName); volumeInfo_volSN = (*env)->GetFieldID(env, clazz, "volumeSerialNumber", "I"); CHECK_NULL(volumeInfo_volSN); volumeInfo_flags = (*env)->GetFieldID(env, clazz, "flags", "I"); CHECK_NULL(volumeInfo_flags); clazz = (*env)->FindClass(env, "sun/nio/fs/WindowsNativeDispatcher$DiskFreeSpace"); CHECK_NULL(clazz); diskSpace_bytesAvailable = (*env)->GetFieldID(env, clazz, "freeBytesAvailable", "J"); CHECK_NULL(diskSpace_bytesAvailable); diskSpace_totalBytes = (*env)->GetFieldID(env, clazz, "totalNumberOfBytes", "J"); CHECK_NULL(diskSpace_totalBytes); diskSpace_totalFree = (*env)->GetFieldID(env, clazz, "totalNumberOfFreeBytes", "J"); CHECK_NULL(diskSpace_totalFree); clazz = (*env)->FindClass(env, "sun/nio/fs/WindowsNativeDispatcher$Account"); CHECK_NULL(clazz); account_domain = (*env)->GetFieldID(env, clazz, "domain", "Ljava/lang/String;"); CHECK_NULL(account_domain); account_name = (*env)->GetFieldID(env, clazz, "name", "Ljava/lang/String;"); CHECK_NULL(account_name); account_use = (*env)->GetFieldID(env, clazz, "use", "I"); CHECK_NULL(account_use); clazz = (*env)->FindClass(env, "sun/nio/fs/WindowsNativeDispatcher$AclInformation"); CHECK_NULL(clazz); aclInfo_aceCount = (*env)->GetFieldID(env, clazz, "aceCount", "I"); CHECK_NULL(aclInfo_aceCount); clazz = (*env)->FindClass(env, "sun/nio/fs/WindowsNativeDispatcher$CompletionStatus"); CHECK_NULL(clazz); completionStatus_error = (*env)->GetFieldID(env, clazz, "error", "I"); CHECK_NULL(completionStatus_error); completionStatus_bytesTransferred = (*env)->GetFieldID(env, clazz, "bytesTransferred", "I"); CHECK_NULL(completionStatus_bytesTransferred); completionStatus_completionKey = (*env)->GetFieldID(env, clazz, "completionKey", "J"); CHECK_NULL(completionStatus_completionKey); clazz = (*env)->FindClass(env, "sun/nio/fs/WindowsNativeDispatcher$BackupResult"); CHECK_NULL(clazz); backupResult_bytesTransferred = (*env)->GetFieldID(env, clazz, "bytesTransferred", "I"); CHECK_NULL(backupResult_bytesTransferred); backupResult_context = (*env)->GetFieldID(env, clazz, "context", "J"); CHECK_NULL(backupResult_context); // get handle to kernel32 if (GetModuleHandleExW((GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS | GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT), (LPCWSTR)&CreateFileW, &h) != 0) { // requires Windows Server 2003 or newer FindFirstStream_func = (FindFirstStream_Proc)GetProcAddress(h, "FindFirstStreamW"); FindNextStream_func = (FindNextStream_Proc)GetProcAddress(h, "FindNextStreamW"); // requires Windows Vista or newer CreateSymbolicLink_func = (CreateSymbolicLinkProc)GetProcAddress(h, "CreateSymbolicLinkW"); GetFinalPathNameByHandle_func = (GetFinalPathNameByHandleProc)GetProcAddress(h, "GetFinalPathNameByHandleW"); } } JNIEXPORT jlong JNICALL Java_sun_nio_fs_WindowsNativeDispatcher_CreateEvent(JNIEnv* env, jclass this, jboolean bManualReset, jboolean bInitialState) { HANDLE hEvent = CreateEventW(NULL, bManualReset, bInitialState, NULL); if (hEvent == NULL) { throwWindowsException(env, GetLastError()); } return ptr_to_jlong(hEvent); } JNIEXPORT jstring JNICALL Java_sun_nio_fs_WindowsNativeDispatcher_FormatMessage(JNIEnv* env, jclass this, jint errorCode) { WCHAR message[255]; DWORD len = FormatMessageW(FORMAT_MESSAGE_FROM_SYSTEM, NULL, (DWORD)errorCode, 0, &message[0], 255, NULL); if (len == 0) { return NULL; } else { return (*env)->NewString(env, (const jchar *)message, (jsize)wcslen(message)); } } JNIEXPORT void JNICALL Java_sun_nio_fs_WindowsNativeDispatcher_LocalFree(JNIEnv* env, jclass this, jlong address) { HLOCAL hMem = (HLOCAL)jlong_to_ptr(address); LocalFree(hMem); } JNIEXPORT jlong JNICALL Java_sun_nio_fs_WindowsNativeDispatcher_CreateFile0(JNIEnv* env, jclass this, jlong address, jint dwDesiredAccess, jint dwShareMode, jlong sdAddress, jint dwCreationDisposition, jint dwFlagsAndAttributes) { HANDLE handle; LPCWSTR lpFileName = jlong_to_ptr(address); SECURITY_ATTRIBUTES securityAttributes; LPSECURITY_ATTRIBUTES lpSecurityAttributes; PSECURITY_DESCRIPTOR lpSecurityDescriptor = jlong_to_ptr(sdAddress); if (lpSecurityDescriptor == NULL) { lpSecurityAttributes = NULL; } else { securityAttributes.nLength = sizeof(SECURITY_ATTRIBUTES); securityAttributes.lpSecurityDescriptor = lpSecurityDescriptor; securityAttributes.bInheritHandle = FALSE; lpSecurityAttributes = &securityAttributes; } handle = CreateFileW(lpFileName, (DWORD)dwDesiredAccess, (DWORD)dwShareMode, lpSecurityAttributes, (DWORD)dwCreationDisposition, (DWORD)dwFlagsAndAttributes, NULL); if (handle == INVALID_HANDLE_VALUE) { throwWindowsException(env, GetLastError()); } return ptr_to_jlong(handle); } JNIEXPORT void JNICALL Java_sun_nio_fs_WindowsNativeDispatcher_DeviceIoControlSetSparse(JNIEnv* env, jclass this, jlong handle) { DWORD bytesReturned; HANDLE h = (HANDLE)jlong_to_ptr(handle); if (DeviceIoControl(h, FSCTL_SET_SPARSE, NULL, 0, NULL, 0, &bytesReturned, NULL) == 0) { throwWindowsException(env, GetLastError()); } } JNIEXPORT void JNICALL Java_sun_nio_fs_WindowsNativeDispatcher_DeviceIoControlGetReparsePoint(JNIEnv* env, jclass this, jlong handle, jlong bufferAddress, jint bufferSize) { DWORD bytesReturned; HANDLE h = (HANDLE)jlong_to_ptr(handle); LPVOID outBuffer = (LPVOID)jlong_to_ptr(bufferAddress); if (DeviceIoControl(h, FSCTL_GET_REPARSE_POINT, NULL, 0, outBuffer, (DWORD)bufferSize, &bytesReturned, NULL) == 0) { throwWindowsException(env, GetLastError()); } } JNIEXPORT void JNICALL Java_sun_nio_fs_WindowsNativeDispatcher_DeleteFile0(JNIEnv* env, jclass this, jlong address) { LPCWSTR lpFileName = jlong_to_ptr(address); if (DeleteFileW(lpFileName) == 0) { throwWindowsException(env, GetLastError()); } } JNIEXPORT void JNICALL Java_sun_nio_fs_WindowsNativeDispatcher_CreateDirectory0(JNIEnv* env, jclass this, jlong address, jlong sdAddress) { LPCWSTR lpFileName = jlong_to_ptr(address); SECURITY_ATTRIBUTES securityAttributes; LPSECURITY_ATTRIBUTES lpSecurityAttributes; PSECURITY_DESCRIPTOR lpSecurityDescriptor = jlong_to_ptr(sdAddress); if (lpSecurityDescriptor == NULL) { lpSecurityAttributes = NULL; } else { securityAttributes.nLength = sizeof(SECURITY_ATTRIBUTES); securityAttributes.lpSecurityDescriptor = lpSecurityDescriptor; securityAttributes.bInheritHandle = FALSE; lpSecurityAttributes = &securityAttributes; } if (CreateDirectoryW(lpFileName, lpSecurityAttributes) == 0) { throwWindowsException(env, GetLastError()); } } JNIEXPORT void JNICALL Java_sun_nio_fs_WindowsNativeDispatcher_RemoveDirectory0(JNIEnv* env, jclass this, jlong address) { LPCWSTR lpFileName = jlong_to_ptr(address); if (RemoveDirectoryW(lpFileName) == 0) { throwWindowsException(env, GetLastError()); } } JNIEXPORT void JNICALL Java_sun_nio_fs_WindowsNativeDispatcher_CloseHandle(JNIEnv* env, jclass this, jlong handle) { HANDLE h = (HANDLE)jlong_to_ptr(handle); CloseHandle(h); } JNIEXPORT void JNICALL Java_sun_nio_fs_WindowsNativeDispatcher_FindFirstFile0(JNIEnv* env, jclass this, jlong address, jobject obj) { WIN32_FIND_DATAW data; LPCWSTR lpFileName = jlong_to_ptr(address); HANDLE handle = FindFirstFileW(lpFileName, &data); if (handle != INVALID_HANDLE_VALUE) { jstring name = (*env)->NewString(env, data.cFileName, (jsize)wcslen(data.cFileName)); if (name == NULL) return; (*env)->SetLongField(env, obj, findFirst_handle, ptr_to_jlong(handle)); (*env)->SetObjectField(env, obj, findFirst_name, name); (*env)->SetIntField(env, obj, findFirst_attributes, data.dwFileAttributes); } else { throwWindowsException(env, GetLastError()); } } JNIEXPORT jlong JNICALL Java_sun_nio_fs_WindowsNativeDispatcher_FindFirstFile1(JNIEnv* env, jclass this, jlong pathAddress, jlong dataAddress) { LPCWSTR lpFileName = jlong_to_ptr(pathAddress); WIN32_FIND_DATAW* data = (WIN32_FIND_DATAW*)jlong_to_ptr(dataAddress); HANDLE handle = FindFirstFileW(lpFileName, data); if (handle == INVALID_HANDLE_VALUE) { throwWindowsException(env, GetLastError()); } return ptr_to_jlong(handle); } JNIEXPORT jstring JNICALL Java_sun_nio_fs_WindowsNativeDispatcher_FindNextFile(JNIEnv* env, jclass this, jlong handle, jlong dataAddress) { HANDLE h = (HANDLE)jlong_to_ptr(handle); WIN32_FIND_DATAW* data = (WIN32_FIND_DATAW*)jlong_to_ptr(dataAddress); if (FindNextFileW(h, data) != 0) { return (*env)->NewString(env, data->cFileName, (jsize)wcslen(data->cFileName)); } else { if (GetLastError() != ERROR_NO_MORE_FILES) throwWindowsException(env, GetLastError()); return NULL; } } JNIEXPORT void JNICALL Java_sun_nio_fs_WindowsNativeDispatcher_FindFirstStream0(JNIEnv* env, jclass this, jlong address, jobject obj) { WIN32_FIND_STREAM_DATA data; LPCWSTR lpFileName = jlong_to_ptr(address); HANDLE handle; if (FindFirstStream_func == NULL) { JNU_ThrowInternalError(env, "Should not get here"); return; } handle = (*FindFirstStream_func)(lpFileName, FindStreamInfoStandard, &data, 0); if (handle != INVALID_HANDLE_VALUE) { jstring name = (*env)->NewString(env, data.cStreamName, (jsize)wcslen(data.cStreamName)); if (name == NULL) return; (*env)->SetLongField(env, obj, findStream_handle, ptr_to_jlong(handle)); (*env)->SetObjectField(env, obj, findStream_name, name); } else { if (GetLastError() == ERROR_HANDLE_EOF) { (*env)->SetLongField(env, obj, findStream_handle, ptr_to_jlong(handle)); } else { throwWindowsException(env, GetLastError()); } } } JNIEXPORT jstring JNICALL Java_sun_nio_fs_WindowsNativeDispatcher_FindNextStream(JNIEnv* env, jclass this, jlong handle) { WIN32_FIND_STREAM_DATA data; HANDLE h = (HANDLE)jlong_to_ptr(handle); if (FindNextStream_func == NULL) { JNU_ThrowInternalError(env, "Should not get here"); return NULL; } if ((*FindNextStream_func)(h, &data) != 0) { return (*env)->NewString(env, data.cStreamName, (jsize)wcslen(data.cStreamName)); } else { if (GetLastError() != ERROR_HANDLE_EOF) throwWindowsException(env, GetLastError()); return NULL; } } JNIEXPORT void JNICALL Java_sun_nio_fs_WindowsNativeDispatcher_FindClose(JNIEnv* env, jclass this, jlong handle) { HANDLE h = (HANDLE)jlong_to_ptr(handle); if (FindClose(h) == 0) { throwWindowsException(env, GetLastError()); } } JNIEXPORT void JNICALL Java_sun_nio_fs_WindowsNativeDispatcher_GetFileInformationByHandle(JNIEnv* env, jclass this, jlong handle, jlong address) { HANDLE h = (HANDLE)jlong_to_ptr(handle); BY_HANDLE_FILE_INFORMATION* info = (BY_HANDLE_FILE_INFORMATION*)jlong_to_ptr(address); if (GetFileInformationByHandle(h, info) == 0) { throwWindowsException(env, GetLastError()); } } JNIEXPORT void JNICALL Java_sun_nio_fs_WindowsNativeDispatcher_CopyFileEx0(JNIEnv* env, jclass this, jlong existingAddress, jlong newAddress, jint flags, jlong cancelAddress) { LPCWSTR lpExistingFileName = jlong_to_ptr(existingAddress); LPCWSTR lpNewFileName = jlong_to_ptr(newAddress); LPBOOL cancel = (LPBOOL)jlong_to_ptr(cancelAddress); if (CopyFileExW(lpExistingFileName, lpNewFileName, NULL, NULL, cancel, (DWORD)flags) == 0) { throwWindowsException(env, GetLastError()); } } JNIEXPORT void JNICALL Java_sun_nio_fs_WindowsNativeDispatcher_MoveFileEx0(JNIEnv* env, jclass this, jlong existingAddress, jlong newAddress, jint flags) { LPCWSTR lpExistingFileName = jlong_to_ptr(existingAddress); LPCWSTR lpNewFileName = jlong_to_ptr(newAddress); if (MoveFileExW(lpExistingFileName, lpNewFileName, (DWORD)flags) == 0) { throwWindowsException(env, GetLastError()); } } JNIEXPORT jint JNICALL Java_sun_nio_fs_WindowsNativeDispatcher_GetLogicalDrives(JNIEnv* env, jclass this) { DWORD res = GetLogicalDrives(); if (res == 0) { throwWindowsException(env, GetLastError()); } return (jint)res; } JNIEXPORT jint JNICALL Java_sun_nio_fs_WindowsNativeDispatcher_GetFileAttributes0(JNIEnv* env, jclass this, jlong address) { LPCWSTR lpFileName = jlong_to_ptr(address); DWORD value = GetFileAttributesW(lpFileName); if (value == INVALID_FILE_ATTRIBUTES) { throwWindowsException(env, GetLastError()); } return (jint)value; } JNIEXPORT void JNICALL Java_sun_nio_fs_WindowsNativeDispatcher_SetFileAttributes0(JNIEnv* env, jclass this, jlong address, jint value) { LPCWSTR lpFileName = jlong_to_ptr(address); if (SetFileAttributesW(lpFileName, (DWORD)value) == 0) { throwWindowsException(env, GetLastError()); } } JNIEXPORT void JNICALL Java_sun_nio_fs_WindowsNativeDispatcher_GetFileAttributesEx0(JNIEnv* env, jclass this, jlong pathAddress, jlong dataAddress) { LPCWSTR lpFileName = jlong_to_ptr(pathAddress); WIN32_FILE_ATTRIBUTE_DATA* data = (WIN32_FILE_ATTRIBUTE_DATA*)jlong_to_ptr(dataAddress); BOOL res = GetFileAttributesExW(lpFileName, GetFileExInfoStandard, (LPVOID)data); if (res == 0) throwWindowsException(env, GetLastError()); } JNIEXPORT void JNICALL Java_sun_nio_fs_WindowsNativeDispatcher_SetFileTime(JNIEnv* env, jclass this, jlong handle, jlong createTime, jlong lastAccessTime, jlong lastWriteTime) { HANDLE h = (HANDLE)jlong_to_ptr(handle); if (SetFileTime(h, (createTime == (jlong)-1) ? NULL : (CONST FILETIME *)&createTime, (lastAccessTime == (jlong)-1) ? NULL : (CONST FILETIME *)&lastAccessTime, (lastWriteTime == (jlong)-1) ? NULL : (CONST FILETIME *)&lastWriteTime) == 0) { throwWindowsException(env, GetLastError()); } } JNIEXPORT void JNICALL Java_sun_nio_fs_WindowsNativeDispatcher_SetEndOfFile(JNIEnv* env, jclass this, jlong handle) { HANDLE h = (HANDLE)jlong_to_ptr(handle); if (SetEndOfFile(h) == 0) throwWindowsException(env, GetLastError()); } JNIEXPORT void JNICALL Java_sun_nio_fs_WindowsNativeDispatcher_GetVolumeInformation0(JNIEnv* env, jclass this, jlong address, jobject obj) { WCHAR volumeName[MAX_PATH+1]; DWORD volumeSerialNumber; DWORD maxComponentLength; DWORD flags; WCHAR fileSystemName[MAX_PATH+1]; LPCWSTR lpFileName = jlong_to_ptr(address); jstring str; BOOL res = GetVolumeInformationW(lpFileName, &volumeName[0], MAX_PATH+1, &volumeSerialNumber, &maxComponentLength, &flags, &fileSystemName[0], MAX_PATH+1); if (res == 0) { throwWindowsException(env, GetLastError()); return; } str = (*env)->NewString(env, (const jchar *)fileSystemName, (jsize)wcslen(fileSystemName)); if (str == NULL) return; (*env)->SetObjectField(env, obj, volumeInfo_fsName, str); str = (*env)->NewString(env, (const jchar *)volumeName, (jsize)wcslen(volumeName)); if (str == NULL) return; (*env)->SetObjectField(env, obj, volumeInfo_volName, str); (*env)->SetIntField(env, obj, volumeInfo_volSN, (jint)volumeSerialNumber); (*env)->SetIntField(env, obj, volumeInfo_flags, (jint)flags); } JNIEXPORT jint JNICALL Java_sun_nio_fs_WindowsNativeDispatcher_GetDriveType0(JNIEnv* env, jclass this, jlong address) { LPCWSTR lpRootPathName = jlong_to_ptr(address); return (jint)GetDriveTypeW(lpRootPathName); } JNIEXPORT void JNICALL Java_sun_nio_fs_WindowsNativeDispatcher_GetDiskFreeSpaceEx0(JNIEnv* env, jclass this, jlong address, jobject obj) { ULARGE_INTEGER freeBytesAvailable; ULARGE_INTEGER totalNumberOfBytes; ULARGE_INTEGER totalNumberOfFreeBytes; LPCWSTR lpDirName = jlong_to_ptr(address); BOOL res = GetDiskFreeSpaceExW(lpDirName, &freeBytesAvailable, &totalNumberOfBytes, &totalNumberOfFreeBytes); if (res == 0) { throwWindowsException(env, GetLastError()); return; } (*env)->SetLongField(env, obj, diskSpace_bytesAvailable, long_to_jlong(freeBytesAvailable.QuadPart)); (*env)->SetLongField(env, obj, diskSpace_totalBytes, long_to_jlong(totalNumberOfBytes.QuadPart)); (*env)->SetLongField(env, obj, diskSpace_totalFree, long_to_jlong(totalNumberOfFreeBytes.QuadPart)); } JNIEXPORT jstring JNICALL Java_sun_nio_fs_WindowsNativeDispatcher_GetVolumePathName0(JNIEnv* env, jclass this, jlong address) { WCHAR volumeName[MAX_PATH+1]; LPCWSTR lpFileName = jlong_to_ptr(address); BOOL res = GetVolumePathNameW(lpFileName, &volumeName[0], MAX_PATH+1); if (res == 0) { throwWindowsException(env, GetLastError()); return NULL; } else { return (*env)->NewString(env, (const jchar *)volumeName, (jsize)wcslen(volumeName)); } } JNIEXPORT void JNICALL Java_sun_nio_fs_WindowsNativeDispatcher_InitializeSecurityDescriptor(JNIEnv* env, jclass this, jlong address) { PSECURITY_DESCRIPTOR pSecurityDescriptor = (PSECURITY_DESCRIPTOR)jlong_to_ptr(address); if (InitializeSecurityDescriptor(pSecurityDescriptor, SECURITY_DESCRIPTOR_REVISION) == 0) { throwWindowsException(env, GetLastError()); } } JNIEXPORT void JNICALL Java_sun_nio_fs_WindowsNativeDispatcher_InitializeAcl(JNIEnv* env, jclass this, jlong address, jint size) { PACL pAcl = (PACL)jlong_to_ptr(address); if (InitializeAcl(pAcl, (DWORD)size, ACL_REVISION) == 0) { throwWindowsException(env, GetLastError()); } } JNIEXPORT void JNICALL Java_sun_nio_fs_WindowsNativeDispatcher_SetFileSecurity0(JNIEnv* env, jclass this, jlong pathAddress, jint requestedInformation, jlong descAddress) { LPCWSTR lpFileName = jlong_to_ptr(pathAddress); PSECURITY_DESCRIPTOR pSecurityDescriptor = jlong_to_ptr(descAddress); DWORD lengthNeeded = 0; BOOL res = SetFileSecurityW(lpFileName, (SECURITY_INFORMATION)requestedInformation, pSecurityDescriptor); if (res == 0) { throwWindowsException(env, GetLastError()); } } JNIEXPORT jint JNICALL Java_sun_nio_fs_WindowsNativeDispatcher_GetFileSecurity0(JNIEnv* env, jclass this, jlong pathAddress, jint requestedInformation, jlong descAddress, jint nLength) { LPCWSTR lpFileName = jlong_to_ptr(pathAddress); PSECURITY_DESCRIPTOR pSecurityDescriptor = jlong_to_ptr(descAddress); DWORD lengthNeeded = 0; BOOL res = GetFileSecurityW(lpFileName, (SECURITY_INFORMATION)requestedInformation, pSecurityDescriptor, (DWORD)nLength, &lengthNeeded); if (res == 0) { if (GetLastError() == ERROR_INSUFFICIENT_BUFFER) { return (jint)lengthNeeded; } else { throwWindowsException(env, GetLastError()); return 0; } } else { return (jint)nLength; } } JNIEXPORT jlong JNICALL Java_sun_nio_fs_WindowsNativeDispatcher_GetSecurityDescriptorOwner(JNIEnv* env, jclass this, jlong address) { PSECURITY_DESCRIPTOR pSecurityDescriptor = jlong_to_ptr(address); PSID pOwner; BOOL bOwnerDefaulted; if (GetSecurityDescriptorOwner(pSecurityDescriptor, &pOwner, &bOwnerDefaulted) == 0) { throwWindowsException(env, GetLastError()); } return ptr_to_jlong(pOwner); } JNIEXPORT void JNICALL Java_sun_nio_fs_WindowsNativeDispatcher_SetSecurityDescriptorOwner(JNIEnv* env, jclass this, jlong descAddress, jlong ownerAddress) { PSECURITY_DESCRIPTOR pSecurityDescriptor = jlong_to_ptr(descAddress); PSID pOwner = jlong_to_ptr(ownerAddress); if (SetSecurityDescriptorOwner(pSecurityDescriptor, pOwner, FALSE) == 0) { throwWindowsException(env, GetLastError()); } } JNIEXPORT jlong JNICALL Java_sun_nio_fs_WindowsNativeDispatcher_GetSecurityDescriptorDacl(JNIEnv* env, jclass this, jlong address) { PSECURITY_DESCRIPTOR pSecurityDescriptor = jlong_to_ptr(address); BOOL bDaclPresent; PACL pDacl; BOOL bDaclDefaulted; if (GetSecurityDescriptorDacl(pSecurityDescriptor, &bDaclPresent, &pDacl, &bDaclDefaulted) == 0) { throwWindowsException(env, GetLastError()); return (jlong)0; } else { return (bDaclPresent) ? ptr_to_jlong(pDacl) : (jlong)0; } } JNIEXPORT void JNICALL Java_sun_nio_fs_WindowsNativeDispatcher_SetSecurityDescriptorDacl(JNIEnv* env, jclass this, jlong descAddress, jlong aclAddress) { PSECURITY_DESCRIPTOR pSecurityDescriptor = (PSECURITY_DESCRIPTOR)jlong_to_ptr(descAddress); PACL pAcl = (PACL)jlong_to_ptr(aclAddress); if (SetSecurityDescriptorDacl(pSecurityDescriptor, TRUE, pAcl, FALSE) == 0) { throwWindowsException(env, GetLastError()); } } JNIEXPORT void JNICALL Java_sun_nio_fs_WindowsNativeDispatcher_GetAclInformation0(JNIEnv* env, jclass this, jlong address, jobject obj) { PACL pAcl = (PACL)jlong_to_ptr(address); ACL_SIZE_INFORMATION acl_size_info; if (GetAclInformation(pAcl, (void *) &acl_size_info, sizeof(acl_size_info), AclSizeInformation) == 0) { throwWindowsException(env, GetLastError()); } else { (*env)->SetIntField(env, obj, aclInfo_aceCount, (jint)acl_size_info.AceCount); } } JNIEXPORT jlong JNICALL Java_sun_nio_fs_WindowsNativeDispatcher_GetAce(JNIEnv* env, jclass this, jlong address, jint aceIndex) { PACL pAcl = (PACL)jlong_to_ptr(address); LPVOID pAce; if (GetAce(pAcl, (DWORD)aceIndex, &pAce) == 0) { throwWindowsException(env, GetLastError()); return (jlong)0; } else { return ptr_to_jlong(pAce); } } JNIEXPORT void JNICALL Java_sun_nio_fs_WindowsNativeDispatcher_AddAccessAllowedAceEx(JNIEnv* env, jclass this, jlong aclAddress, jint flags, jint mask, jlong sidAddress) { PACL pAcl = (PACL)jlong_to_ptr(aclAddress); PSID pSid = (PSID)jlong_to_ptr(sidAddress); if (AddAccessAllowedAceEx(pAcl, ACL_REVISION, (DWORD)flags, (DWORD)mask, pSid) == 0) { throwWindowsException(env, GetLastError()); } } JNIEXPORT void JNICALL Java_sun_nio_fs_WindowsNativeDispatcher_AddAccessDeniedAceEx(JNIEnv* env, jclass this, jlong aclAddress, jint flags, jint mask, jlong sidAddress) { PACL pAcl = (PACL)jlong_to_ptr(aclAddress); PSID pSid = (PSID)jlong_to_ptr(sidAddress); if (AddAccessDeniedAceEx(pAcl, ACL_REVISION, (DWORD)flags, (DWORD)mask, pSid) == 0) { throwWindowsException(env, GetLastError()); } } JNIEXPORT void JNICALL Java_sun_nio_fs_WindowsNativeDispatcher_LookupAccountSid0(JNIEnv* env, jclass this, jlong address, jobject obj) { WCHAR domain[255]; WCHAR name[255]; DWORD domainLen = sizeof(domain); DWORD nameLen = sizeof(name); SID_NAME_USE use; PSID sid = jlong_to_ptr(address); jstring s; if (LookupAccountSidW(NULL, sid, &name[0], &nameLen, &domain[0], &domainLen, &use) == 0) { throwWindowsException(env, GetLastError()); return; } s = (*env)->NewString(env, (const jchar *)domain, (jsize)wcslen(domain)); if (s == NULL) return; (*env)->SetObjectField(env, obj, account_domain, s); s = (*env)->NewString(env, (const jchar *)name, (jsize)wcslen(name)); if (s == NULL) return; (*env)->SetObjectField(env, obj, account_name, s); (*env)->SetIntField(env, obj, account_use, (jint)use); } JNIEXPORT jint JNICALL Java_sun_nio_fs_WindowsNativeDispatcher_LookupAccountName0(JNIEnv* env, jclass this, jlong nameAddress, jlong sidAddress, jint cbSid) { LPCWSTR accountName = jlong_to_ptr(nameAddress); PSID sid = jlong_to_ptr(sidAddress); WCHAR domain[255]; DWORD domainLen = sizeof(domain); SID_NAME_USE use; if (LookupAccountNameW(NULL, accountName, sid, (LPDWORD)&cbSid, &domain[0], &domainLen, &use) == 0) { if (GetLastError() != ERROR_INSUFFICIENT_BUFFER) { throwWindowsException(env, GetLastError()); } } return cbSid; } JNIEXPORT jint JNICALL Java_sun_nio_fs_WindowsNativeDispatcher_GetLengthSid(JNIEnv* env, jclass this, jlong address) { PSID sid = jlong_to_ptr(address); return (jint)GetLengthSid(sid); } JNIEXPORT jstring JNICALL Java_sun_nio_fs_WindowsNativeDispatcher_ConvertSidToStringSid(JNIEnv* env, jclass this, jlong address) { PSID sid = jlong_to_ptr(address); LPWSTR string; if (ConvertSidToStringSidW(sid, &string) == 0) { throwWindowsException(env, GetLastError()); return NULL; } else { jstring s = (*env)->NewString(env, (const jchar *)string, (jsize)wcslen(string)); LocalFree(string); return s; } } JNIEXPORT jlong JNICALL Java_sun_nio_fs_WindowsNativeDispatcher_ConvertStringSidToSid0(JNIEnv* env, jclass this, jlong address) { LPWSTR lpStringSid = jlong_to_ptr(address); PSID pSid; if (ConvertStringSidToSidW(lpStringSid, &pSid) == 0) throwWindowsException(env, GetLastError()); return ptr_to_jlong(pSid); } JNIEXPORT jlong JNICALL Java_sun_nio_fs_WindowsNativeDispatcher_GetCurrentProcess(JNIEnv* env, jclass this) { HANDLE hProcess = GetCurrentProcess(); return ptr_to_jlong(hProcess); } JNIEXPORT jlong JNICALL Java_sun_nio_fs_WindowsNativeDispatcher_GetCurrentThread(JNIEnv* env, jclass this) { HANDLE hThread = GetCurrentThread(); return ptr_to_jlong(hThread); } JNIEXPORT jlong JNICALL Java_sun_nio_fs_WindowsNativeDispatcher_OpenProcessToken(JNIEnv* env, jclass this, jlong process, jint desiredAccess) { HANDLE hProcess = (HANDLE)jlong_to_ptr(process); HANDLE hToken; if (OpenProcessToken(hProcess, (DWORD)desiredAccess, &hToken) == 0) throwWindowsException(env, GetLastError()); return ptr_to_jlong(hToken); } JNIEXPORT jlong JNICALL Java_sun_nio_fs_WindowsNativeDispatcher_OpenThreadToken(JNIEnv* env, jclass this, jlong thread, jint desiredAccess, jboolean openAsSelf) { HANDLE hThread = (HANDLE)jlong_to_ptr(thread); HANDLE hToken; BOOL bOpenAsSelf = (openAsSelf == JNI_TRUE) ? TRUE : FALSE; if (OpenThreadToken(hThread, (DWORD)desiredAccess, bOpenAsSelf, &hToken) == 0) { if (GetLastError() == ERROR_NO_TOKEN) return (jlong)0; throwWindowsException(env, GetLastError()); } return ptr_to_jlong(hToken); } JNIEXPORT jlong JNICALL Java_sun_nio_fs_WindowsNativeDispatcher_DuplicateTokenEx(JNIEnv* env, jclass this, jlong token, jint desiredAccess) { HANDLE hToken = (HANDLE)jlong_to_ptr(token); HANDLE resultToken; BOOL res; res = DuplicateTokenEx(hToken, (DWORD)desiredAccess, NULL, SecurityImpersonation, TokenImpersonation, &resultToken); if (res == 0) throwWindowsException(env, GetLastError()); return ptr_to_jlong(resultToken); } JNIEXPORT void JNICALL Java_sun_nio_fs_WindowsNativeDispatcher_SetThreadToken(JNIEnv* env, jclass this, jlong thread, jlong token) { HANDLE hThread = (HANDLE)jlong_to_ptr(thread); HANDLE hToken = (HANDLE)jlong_to_ptr(token); if (SetThreadToken(hThread, hToken) == 0) throwWindowsException(env, GetLastError()); } JNIEXPORT jint JNICALL Java_sun_nio_fs_WindowsNativeDispatcher_GetTokenInformation(JNIEnv* env, jclass this, jlong token, jint tokenInfoClass, jlong tokenInfo, jint tokenInfoLength) { BOOL res; DWORD lengthNeeded; HANDLE hToken = (HANDLE)jlong_to_ptr(token); LPVOID result = (LPVOID)jlong_to_ptr(tokenInfo); res = GetTokenInformation(hToken, (TOKEN_INFORMATION_CLASS)tokenInfoClass, (LPVOID)result, tokenInfoLength, &lengthNeeded); if (res == 0) { if (GetLastError() == ERROR_INSUFFICIENT_BUFFER) { return (jint)lengthNeeded; } else { throwWindowsException(env, GetLastError()); return 0; } } else { return tokenInfoLength; } } JNIEXPORT void JNICALL Java_sun_nio_fs_WindowsNativeDispatcher_AdjustTokenPrivileges(JNIEnv* env, jclass this, jlong token, jlong luid, jint attributes) { TOKEN_PRIVILEGES privs[1]; HANDLE hToken = (HANDLE)jlong_to_ptr(token); PLUID pLuid = (PLUID)jlong_to_ptr(luid); privs[0].PrivilegeCount = 1; privs[0].Privileges[0].Luid = *pLuid; privs[0].Privileges[0].Attributes = (DWORD)attributes; if (AdjustTokenPrivileges(hToken, FALSE, &privs[0], 1, NULL, NULL) == 0) throwWindowsException(env, GetLastError()); } JNIEXPORT jboolean JNICALL Java_sun_nio_fs_WindowsNativeDispatcher_AccessCheck(JNIEnv* env, jclass this, jlong token, jlong securityInfo, jint accessMask, jint genericRead, jint genericWrite, jint genericExecute, jint genericAll) { HANDLE hImpersonatedToken = (HANDLE)jlong_to_ptr(token); PSECURITY_DESCRIPTOR security = (PSECURITY_DESCRIPTOR)jlong_to_ptr(securityInfo); DWORD checkAccessRights = (DWORD)accessMask; GENERIC_MAPPING mapping = { genericRead, genericWrite, genericExecute, genericAll}; PRIVILEGE_SET privileges = {0}; DWORD privilegesLength = sizeof(privileges); DWORD grantedAccess = 0; BOOL result = FALSE; /* checkAccessRights is in-out parameter */ MapGenericMask(&checkAccessRights, &mapping); if (AccessCheck(security, hImpersonatedToken, checkAccessRights, &mapping, &privileges, &privilegesLength, &grantedAccess, &result) == 0) throwWindowsException(env, GetLastError()); return (result == FALSE) ? JNI_FALSE : JNI_TRUE; } JNIEXPORT jlong JNICALL Java_sun_nio_fs_WindowsNativeDispatcher_LookupPrivilegeValue0(JNIEnv* env, jclass this, jlong name) { LPCWSTR lpName = (LPCWSTR)jlong_to_ptr(name); PLUID pLuid = LocalAlloc(0, sizeof(LUID)); if (pLuid == NULL) { JNU_ThrowInternalError(env, "Unable to allocate LUID structure"); } else { if (LookupPrivilegeValueW(NULL, lpName, pLuid) == 0) throwWindowsException(env, GetLastError()); } return ptr_to_jlong(pLuid); } JNIEXPORT void JNICALL Java_sun_nio_fs_WindowsNativeDispatcher_CreateSymbolicLink0(JNIEnv* env, jclass this, jlong linkAddress, jlong targetAddress, jint flags) { LPCWSTR link = jlong_to_ptr(linkAddress); LPCWSTR target = jlong_to_ptr(targetAddress); if (CreateSymbolicLink_func == NULL) { JNU_ThrowInternalError(env, "Should not get here"); return; } /* On Windows 64-bit this appears to succeed even when there is insufficient privileges */ if ((*CreateSymbolicLink_func)(link, target, (DWORD)flags) == 0) throwWindowsException(env, GetLastError()); } JNIEXPORT void JNICALL Java_sun_nio_fs_WindowsNativeDispatcher_CreateHardLink0(JNIEnv* env, jclass this, jlong newFileAddress, jlong existingFileAddress) { LPCWSTR newFile = jlong_to_ptr(newFileAddress); LPCWSTR existingFile = jlong_to_ptr(existingFileAddress); if (CreateHardLinkW(newFile, existingFile, NULL) == 0) throwWindowsException(env, GetLastError()); } JNIEXPORT jstring JNICALL Java_sun_nio_fs_WindowsNativeDispatcher_GetFullPathName0(JNIEnv *env, jclass clz, jlong pathAddress) { jstring rv = NULL; WCHAR *lpBuf = NULL; WCHAR buf[MAX_PATH]; DWORD len; LPCWSTR lpFileName = jlong_to_ptr(pathAddress); len = GetFullPathNameW(lpFileName, MAX_PATH, buf, NULL); if (len > 0) { if (len < MAX_PATH) { rv = (*env)->NewString(env, buf, len); } else { len += 1; /* return length does not include terminator */ lpBuf = (WCHAR*)malloc(len * sizeof(WCHAR)); if (lpBuf != NULL) { len = GetFullPathNameW(lpFileName, len, lpBuf, NULL); if (len > 0) { rv = (*env)->NewString(env, lpBuf, len); } else { JNU_ThrowInternalError(env, "GetFullPathNameW failed"); } free(lpBuf); } else { JNU_ThrowOutOfMemoryError(env, "native memory allocation failure"); } } } else { throwWindowsException(env, GetLastError()); } return rv; } JNIEXPORT jstring JNICALL Java_sun_nio_fs_WindowsNativeDispatcher_GetFinalPathNameByHandle(JNIEnv* env, jclass this, jlong handle) { jstring rv = NULL; WCHAR *lpBuf = NULL; WCHAR path[MAX_PATH]; HANDLE h = (HANDLE)jlong_to_ptr(handle); DWORD len; if (GetFinalPathNameByHandle_func == NULL) { JNU_ThrowInternalError(env, "Should not get here"); return NULL; } len = (*GetFinalPathNameByHandle_func)(h, path, MAX_PATH, 0); if (len > 0) { if (len < MAX_PATH) { rv = (*env)->NewString(env, (const jchar *)path, (jsize)len); } else { len += 1; /* return length does not include terminator */ lpBuf = (WCHAR*)malloc(len * sizeof(WCHAR)); if (lpBuf != NULL) { len = (*GetFinalPathNameByHandle_func)(h, lpBuf, len, 0); if (len > 0) { rv = (*env)->NewString(env, (const jchar *)lpBuf, (jsize)len); } else { JNU_ThrowInternalError(env, "GetFinalPathNameByHandleW failed"); } free(lpBuf); } else { JNU_ThrowOutOfMemoryError(env, "native memory allocation failure"); } } } else { throwWindowsException(env, GetLastError()); } return rv; } JNIEXPORT jlong JNICALL Java_sun_nio_fs_WindowsNativeDispatcher_CreateIoCompletionPort(JNIEnv* env, jclass this, jlong fileHandle, jlong existingPort, jlong completionKey) { HANDLE port = CreateIoCompletionPort((HANDLE)jlong_to_ptr(fileHandle), (HANDLE)jlong_to_ptr(existingPort), (ULONG_PTR)completionKey, 0); if (port == NULL) { throwWindowsException(env, GetLastError()); } return ptr_to_jlong(port); } JNIEXPORT void JNICALL Java_sun_nio_fs_WindowsNativeDispatcher_GetQueuedCompletionStatus0(JNIEnv* env, jclass this, jlong completionPort, jobject obj) { DWORD bytesTransferred; ULONG_PTR completionKey; OVERLAPPED *lpOverlapped; BOOL res; res = GetQueuedCompletionStatus((HANDLE)jlong_to_ptr(completionPort), &bytesTransferred, &completionKey, &lpOverlapped, INFINITE); if (res == 0 && lpOverlapped == NULL) { throwWindowsException(env, GetLastError()); } else { DWORD ioResult = (res == 0) ? GetLastError() : 0; (*env)->SetIntField(env, obj, completionStatus_error, ioResult); (*env)->SetIntField(env, obj, completionStatus_bytesTransferred, (jint)bytesTransferred); (*env)->SetLongField(env, obj, completionStatus_completionKey, (jlong)completionKey); } } JNIEXPORT void JNICALL Java_sun_nio_fs_WindowsNativeDispatcher_PostQueuedCompletionStatus(JNIEnv* env, jclass this, jlong completionPort, jlong completionKey) { BOOL res; res = PostQueuedCompletionStatus((HANDLE)jlong_to_ptr(completionPort), (DWORD)0, /* dwNumberOfBytesTransferred */ (ULONG_PTR)completionKey, NULL); /* lpOverlapped */ if (res == 0) { throwWindowsException(env, GetLastError()); } } JNIEXPORT void JNICALL Java_sun_nio_fs_WindowsNativeDispatcher_CancelIo(JNIEnv* env, jclass this, jlong hFile) { if (CancelIo((HANDLE)jlong_to_ptr(hFile)) == 0) { throwWindowsException(env, GetLastError()); } } JNIEXPORT jint JNICALL Java_sun_nio_fs_WindowsNativeDispatcher_GetOverlappedResult(JNIEnv *env, jclass this, jlong hFile, jlong lpOverlapped) { BOOL res; DWORD bytesTransferred = -1; res = GetOverlappedResult((HANDLE)jlong_to_ptr(hFile), (LPOVERLAPPED)jlong_to_ptr(lpOverlapped), &bytesTransferred, TRUE); if (res == 0) { throwWindowsException(env, GetLastError()); } return (jint)bytesTransferred; } JNIEXPORT void JNICALL Java_sun_nio_fs_WindowsNativeDispatcher_ReadDirectoryChangesW(JNIEnv* env, jclass this, jlong hDirectory, jlong bufferAddress, jint bufferLength, jboolean watchSubTree, jint filter, jlong bytesReturnedAddress, jlong pOverlapped) { BOOL res; BOOL subtree = (watchSubTree == JNI_TRUE) ? TRUE : FALSE; LPOVERLAPPED ov = (LPOVERLAPPED)jlong_to_ptr(pOverlapped); res = ReadDirectoryChangesW((HANDLE)jlong_to_ptr(hDirectory), (LPVOID)jlong_to_ptr(bufferAddress), (DWORD)bufferLength, subtree, (DWORD)filter, (LPDWORD)jlong_to_ptr(bytesReturnedAddress), (LPOVERLAPPED)jlong_to_ptr(pOverlapped), NULL); if (res == 0) { throwWindowsException(env, GetLastError()); } } JNIEXPORT void JNICALL Java_sun_nio_fs_WindowsNativeDispatcher_BackupRead0(JNIEnv* env, jclass this, jlong hFile, jlong bufferAddress, jint bufferSize, jboolean abort, jlong context, jobject obj) { BOOL res; DWORD bytesTransferred; BOOL a = (abort == JNI_TRUE) ? TRUE : FALSE; VOID* pContext = (VOID*)jlong_to_ptr(context); res = BackupRead((HANDLE)jlong_to_ptr(hFile), (LPBYTE)jlong_to_ptr(bufferAddress), (DWORD)bufferSize, &bytesTransferred, a, FALSE, &pContext); if (res == 0) { throwWindowsException(env, GetLastError()); } else { (*env)->SetIntField(env, obj, backupResult_bytesTransferred, bytesTransferred); (*env)->SetLongField(env, obj, backupResult_context, ptr_to_jlong(pContext)); } } JNIEXPORT void JNICALL Java_sun_nio_fs_WindowsNativeDispatcher_BackupSeek(JNIEnv* env, jclass this, jlong hFile, jlong bytesToSeek, jlong context) { BOOL res; jint lowBytesToSeek = (jint)bytesToSeek; jint highBytesToSeek = (jint)(bytesToSeek >> 32); DWORD lowBytesSeeked; DWORD highBytesSeeked; VOID* pContext = jlong_to_ptr(context); res = BackupSeek((HANDLE)jlong_to_ptr(hFile), (DWORD)lowBytesToSeek, (DWORD)highBytesToSeek, &lowBytesSeeked, &highBytesSeeked, &pContext); if (res == 0) { throwWindowsException(env, GetLastError()); } }
gpl-2.0
cuviper/binutils-gdb
gdb/testsuite/gdb.threads/dlopen-libpthread.c
13
1148
/* This testcase is part of GDB, the GNU debugger. Copyright 2011-2016 Free Software Foundation, Inc. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <dlfcn.h> #include <stddef.h> #include <assert.h> static const char *volatile filename; static void notify (void) { filename = NULL; /* notify-here */ } int main (void) { void *h; void (*fp) (void (*) (void)); assert (filename != NULL); h = dlopen (filename, RTLD_LAZY); assert (h != NULL); fp = dlsym (h, "f"); assert (fp != NULL); fp (notify); return 0; }
gpl-2.0
asis92/kernel-lp-lg-d802
drivers/usb/gadget/f_mbim.c
525
49476
/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #define pr_fmt(fmt) "%s: " fmt, __func__ #include <linux/kernel.h> #include <linux/device.h> #include <linux/usb/cdc.h> #include <linux/usb/composite.h> #include <linux/platform_device.h> #include <linux/spinlock.h> /* * This function is a "Mobile Broadband Interface Model" (MBIM) link. * MBIM is intended to be used with high-speed network attachments. * * Note that MBIM requires the use of "alternate settings" for its data * interface. This means that the set_alt() method has real work to do, * and also means that a get_alt() method is required. */ #define MBIM_BULK_BUFFER_SIZE 4096 #define MBIM_IOCTL_MAGIC 'o' #define MBIM_GET_NTB_SIZE _IOR(MBIM_IOCTL_MAGIC, 2, u32) #define MBIM_GET_DATAGRAM_COUNT _IOR(MBIM_IOCTL_MAGIC, 3, u16) #define NR_MBIM_PORTS 1 /* ID for Microsoft OS String */ #define MBIM_OS_STRING_ID 0xEE struct ctrl_pkt { void *buf; int len; struct list_head list; }; struct mbim_ep_descs { struct usb_endpoint_descriptor *in; struct usb_endpoint_descriptor *out; struct usb_endpoint_descriptor *notify; }; struct mbim_notify_port { struct usb_ep *notify; struct usb_request *notify_req; u8 notify_state; atomic_t notify_count; }; enum mbim_notify_state { MBIM_NOTIFY_NONE, MBIM_NOTIFY_CONNECT, MBIM_NOTIFY_SPEED, MBIM_NOTIFY_RESPONSE_AVAILABLE, }; struct f_mbim { struct usb_function function; struct usb_composite_dev *cdev; atomic_t online; atomic_t open_excl; atomic_t ioctl_excl; atomic_t read_excl; atomic_t write_excl; wait_queue_head_t read_wq; wait_queue_head_t write_wq; enum transport_type xport; u8 port_num; struct data_port bam_port; struct mbim_notify_port not_port; struct mbim_ep_descs fs; struct mbim_ep_descs hs; u8 ctrl_id, data_id; u8 data_alt_int; struct mbim_ndp_parser_opts *parser_opts; spinlock_t lock; struct list_head cpkt_req_q; struct list_head cpkt_resp_q; u32 ntb_input_size; u16 ntb_max_datagrams; atomic_t error; }; struct mbim_ntb_input_size { u32 ntb_input_size; u16 ntb_max_datagrams; u16 reserved; }; /* temporary variable used between mbim_open() and mbim_gadget_bind() */ static struct f_mbim *_mbim_dev; static unsigned int nr_mbim_ports; static struct mbim_ports { struct f_mbim *port; unsigned port_num; } mbim_ports[NR_MBIM_PORTS]; static inline struct f_mbim *func_to_mbim(struct usb_function *f) { return container_of(f, struct f_mbim, function); } /* peak (theoretical) bulk transfer rate in bits-per-second */ static inline unsigned mbim_bitrate(struct usb_gadget *g) { if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH) return 13 * 512 * 8 * 1000 * 8; else return 19 * 64 * 1 * 1000 * 8; } /*-------------------------------------------------------------------------*/ #define MBIM_NTB_DEFAULT_IN_SIZE (0x4000) #define MBIM_NTB_OUT_SIZE (0x1000) #define MBIM_NDP_IN_DIVISOR (0x4) #define NTB_DEFAULT_IN_SIZE_IPA (0x2000) #define MBIM_NTB_OUT_SIZE_IPA (0x2000) #define MBIM_FORMATS_SUPPORTED USB_CDC_NCM_NTB16_SUPPORTED static struct usb_cdc_ncm_ntb_parameters mbim_ntb_parameters = { .wLength = sizeof mbim_ntb_parameters, .bmNtbFormatsSupported = cpu_to_le16(MBIM_FORMATS_SUPPORTED), .dwNtbInMaxSize = cpu_to_le32(MBIM_NTB_DEFAULT_IN_SIZE), .wNdpInDivisor = cpu_to_le16(MBIM_NDP_IN_DIVISOR), .wNdpInPayloadRemainder = cpu_to_le16(0), .wNdpInAlignment = cpu_to_le16(4), .dwNtbOutMaxSize = cpu_to_le32(MBIM_NTB_OUT_SIZE), .wNdpOutDivisor = cpu_to_le16(4), .wNdpOutPayloadRemainder = cpu_to_le16(0), .wNdpOutAlignment = cpu_to_le16(4), .wNtbOutMaxDatagrams = 0, }; /* * Use wMaxPacketSize big enough to fit CDC_NOTIFY_SPEED_CHANGE in one * packet, to simplify cancellation; and a big transfer interval, to * waste less bandwidth. */ #define LOG2_STATUS_INTERVAL_MSEC 5 /* 1 << 5 == 32 msec */ #define NCM_STATUS_BYTECOUNT 16 /* 8 byte header + data */ static struct usb_interface_assoc_descriptor mbim_iad_desc = { .bLength = sizeof mbim_iad_desc, .bDescriptorType = USB_DT_INTERFACE_ASSOCIATION, /* .bFirstInterface = DYNAMIC, */ .bInterfaceCount = 2, /* control + data */ .bFunctionClass = 2, .bFunctionSubClass = 0x0e, .bFunctionProtocol = 0, /* .iFunction = DYNAMIC */ }; /* interface descriptor: */ static struct usb_interface_descriptor mbim_control_intf = { .bLength = sizeof mbim_control_intf, .bDescriptorType = USB_DT_INTERFACE, /* .bInterfaceNumber = DYNAMIC */ .bNumEndpoints = 1, .bInterfaceClass = 0x02, .bInterfaceSubClass = 0x0e, .bInterfaceProtocol = 0, /* .iInterface = DYNAMIC */ }; static struct usb_cdc_header_desc mbim_header_desc = { .bLength = sizeof mbim_header_desc, .bDescriptorType = USB_DT_CS_INTERFACE, .bDescriptorSubType = USB_CDC_HEADER_TYPE, .bcdCDC = cpu_to_le16(0x0110), }; static struct usb_cdc_union_desc mbim_union_desc = { .bLength = sizeof(mbim_union_desc), .bDescriptorType = USB_DT_CS_INTERFACE, .bDescriptorSubType = USB_CDC_UNION_TYPE, /* .bMasterInterface0 = DYNAMIC */ /* .bSlaveInterface0 = DYNAMIC */ }; static struct usb_cdc_mbb_desc mbb_desc = { .bLength = sizeof mbb_desc, .bDescriptorType = USB_DT_CS_INTERFACE, .bDescriptorSubType = USB_CDC_MBB_TYPE, .bcdMbbVersion = cpu_to_le16(0x0100), .wMaxControlMessage = cpu_to_le16(0x1000), .bNumberFilters = 0x20, .bMaxFilterSize = 0x80, .wMaxSegmentSize = cpu_to_le16(0xfe0), .bmNetworkCapabilities = 0x20, }; static struct usb_cdc_ext_mbb_desc ext_mbb_desc = { .bLength = sizeof ext_mbb_desc, .bDescriptorType = USB_DT_CS_INTERFACE, .bDescriptorSubType = USB_CDC_EXT_MBB_TYPE, .bcdMbbExtendedVersion = cpu_to_le16(0x0100), .bMaxOutstandingCmdMsges = 64, .wMTU = 1500, }; /* the default data interface has no endpoints ... */ static struct usb_interface_descriptor mbim_data_nop_intf = { .bLength = sizeof mbim_data_nop_intf, .bDescriptorType = USB_DT_INTERFACE, /* .bInterfaceNumber = DYNAMIC */ .bAlternateSetting = 0, .bNumEndpoints = 0, .bInterfaceClass = 0x0a, .bInterfaceSubClass = 0, .bInterfaceProtocol = 0x02, /* .iInterface = DYNAMIC */ }; /* ... but the "real" data interface has two bulk endpoints */ static struct usb_interface_descriptor mbim_data_intf = { .bLength = sizeof mbim_data_intf, .bDescriptorType = USB_DT_INTERFACE, /* .bInterfaceNumber = DYNAMIC */ .bAlternateSetting = 1, .bNumEndpoints = 2, .bInterfaceClass = 0x0a, .bInterfaceSubClass = 0, .bInterfaceProtocol = 0x02, /* .iInterface = DYNAMIC */ }; /* full speed support: */ static struct usb_endpoint_descriptor fs_mbim_notify_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_INT, .wMaxPacketSize = 4*cpu_to_le16(NCM_STATUS_BYTECOUNT), .bInterval = 1 << LOG2_STATUS_INTERVAL_MSEC, }; static struct usb_endpoint_descriptor fs_mbim_in_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_BULK, }; static struct usb_endpoint_descriptor fs_mbim_out_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_OUT, .bmAttributes = USB_ENDPOINT_XFER_BULK, }; static struct usb_descriptor_header *mbim_fs_function[] = { (struct usb_descriptor_header *) &mbim_iad_desc, /* MBIM control descriptors */ (struct usb_descriptor_header *) &mbim_control_intf, (struct usb_descriptor_header *) &mbim_header_desc, (struct usb_descriptor_header *) &mbim_union_desc, (struct usb_descriptor_header *) &mbb_desc, (struct usb_descriptor_header *) &ext_mbb_desc, (struct usb_descriptor_header *) &fs_mbim_notify_desc, /* data interface, altsettings 0 and 1 */ (struct usb_descriptor_header *) &mbim_data_nop_intf, (struct usb_descriptor_header *) &mbim_data_intf, (struct usb_descriptor_header *) &fs_mbim_in_desc, (struct usb_descriptor_header *) &fs_mbim_out_desc, NULL, }; /* high speed support: */ static struct usb_endpoint_descriptor hs_mbim_notify_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_INT, .wMaxPacketSize = 4*cpu_to_le16(NCM_STATUS_BYTECOUNT), .bInterval = LOG2_STATUS_INTERVAL_MSEC + 4, }; static struct usb_endpoint_descriptor hs_mbim_in_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = cpu_to_le16(512), }; static struct usb_endpoint_descriptor hs_mbim_out_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_OUT, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = cpu_to_le16(512), }; static struct usb_descriptor_header *mbim_hs_function[] = { (struct usb_descriptor_header *) &mbim_iad_desc, /* MBIM control descriptors */ (struct usb_descriptor_header *) &mbim_control_intf, (struct usb_descriptor_header *) &mbim_header_desc, (struct usb_descriptor_header *) &mbim_union_desc, (struct usb_descriptor_header *) &mbb_desc, (struct usb_descriptor_header *) &ext_mbb_desc, (struct usb_descriptor_header *) &hs_mbim_notify_desc, /* data interface, altsettings 0 and 1 */ (struct usb_descriptor_header *) &mbim_data_nop_intf, (struct usb_descriptor_header *) &mbim_data_intf, (struct usb_descriptor_header *) &hs_mbim_in_desc, (struct usb_descriptor_header *) &hs_mbim_out_desc, NULL, }; /* string descriptors: */ #define STRING_CTRL_IDX 0 #define STRING_DATA_IDX 1 static struct usb_string mbim_string_defs[] = { [STRING_CTRL_IDX].s = "MBIM Control", [STRING_DATA_IDX].s = "MBIM Data", { } /* end of list */ }; static struct usb_gadget_strings mbim_string_table = { .language = 0x0409, /* en-us */ .strings = mbim_string_defs, }; static struct usb_gadget_strings *mbim_strings[] = { &mbim_string_table, NULL, }; /* Microsoft OS Descriptors */ /* * We specify our own bMS_VendorCode byte which Windows will use * as the bRequest value in subsequent device get requests. */ #define MBIM_VENDOR_CODE 0xA5 /* Microsoft OS String */ static u8 mbim_os_string[] = { 18, /* sizeof(mtp_os_string) */ USB_DT_STRING, /* Signature field: "MSFT100" */ 'M', 0, 'S', 0, 'F', 0, 'T', 0, '1', 0, '0', 0, '0', 0, /* vendor code */ MBIM_VENDOR_CODE, /* padding */ 0 }; /* Microsoft Extended Configuration Descriptor Header Section */ struct mbim_ext_config_desc_header { __le32 dwLength; __u16 bcdVersion; __le16 wIndex; __u8 bCount; __u8 reserved[7]; }; /* Microsoft Extended Configuration Descriptor Function Section */ struct mbim_ext_config_desc_function { __u8 bFirstInterfaceNumber; __u8 bInterfaceCount; __u8 compatibleID[8]; __u8 subCompatibleID[8]; __u8 reserved[6]; }; /* Microsoft Extended Configuration Descriptor */ static struct { struct mbim_ext_config_desc_header header; struct mbim_ext_config_desc_function function; } mbim_ext_config_desc = { .header = { .dwLength = __constant_cpu_to_le32(sizeof mbim_ext_config_desc), .bcdVersion = __constant_cpu_to_le16(0x0100), .wIndex = __constant_cpu_to_le16(4), .bCount = 1, }, .function = { .bFirstInterfaceNumber = 0, .bInterfaceCount = 1, .compatibleID = { 'A', 'L', 'T', 'R', 'C', 'F', 'G' }, /* .subCompatibleID = DYNAMIC */ }, }; /* * Here are options for the Datagram Pointer table (NDP) parser. * There are 2 different formats: NDP16 and NDP32 in the spec (ch. 3), * in NDP16 offsets and sizes fields are 1 16bit word wide, * in NDP32 -- 2 16bit words wide. Also signatures are different. * To make the parser code the same, put the differences in the structure, * and switch pointers to the structures when the format is changed. */ struct mbim_ndp_parser_opts { u32 nth_sign; u32 ndp_sign; unsigned nth_size; unsigned ndp_size; unsigned ndplen_align; /* sizes in u16 units */ unsigned dgram_item_len; /* index or length */ unsigned block_length; unsigned fp_index; unsigned reserved1; unsigned reserved2; unsigned next_fp_index; }; #define INIT_NDP16_OPTS { \ .nth_sign = USB_CDC_NCM_NTH16_SIGN, \ .ndp_sign = USB_CDC_NCM_NDP16_NOCRC_SIGN, \ .nth_size = sizeof(struct usb_cdc_ncm_nth16), \ .ndp_size = sizeof(struct usb_cdc_ncm_ndp16), \ .ndplen_align = 4, \ .dgram_item_len = 1, \ .block_length = 1, \ .fp_index = 1, \ .reserved1 = 0, \ .reserved2 = 0, \ .next_fp_index = 1, \ } #define INIT_NDP32_OPTS { \ .nth_sign = USB_CDC_NCM_NTH32_SIGN, \ .ndp_sign = USB_CDC_NCM_NDP32_NOCRC_SIGN, \ .nth_size = sizeof(struct usb_cdc_ncm_nth32), \ .ndp_size = sizeof(struct usb_cdc_ncm_ndp32), \ .ndplen_align = 8, \ .dgram_item_len = 2, \ .block_length = 2, \ .fp_index = 2, \ .reserved1 = 1, \ .reserved2 = 2, \ .next_fp_index = 2, \ } static struct mbim_ndp_parser_opts mbim_ndp16_opts = INIT_NDP16_OPTS; static struct mbim_ndp_parser_opts mbim_ndp32_opts = INIT_NDP32_OPTS; static inline int mbim_lock(atomic_t *excl) { if (atomic_inc_return(excl) == 1) { return 0; } else { atomic_dec(excl); return -EBUSY; } } static inline void mbim_unlock(atomic_t *excl) { atomic_dec(excl); } static struct ctrl_pkt *mbim_alloc_ctrl_pkt(unsigned len, gfp_t flags) { struct ctrl_pkt *pkt; pkt = kzalloc(sizeof(struct ctrl_pkt), flags); if (!pkt) return ERR_PTR(-ENOMEM); pkt->buf = kmalloc(len, flags); if (!pkt->buf) { kfree(pkt); return ERR_PTR(-ENOMEM); } pkt->len = len; return pkt; } static void mbim_free_ctrl_pkt(struct ctrl_pkt *pkt) { if (pkt) { kfree(pkt->buf); kfree(pkt); } } static struct usb_request *mbim_alloc_req(struct usb_ep *ep, int buffer_size) { struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL); if (!req) return NULL; req->buf = kmalloc(buffer_size, GFP_KERNEL); if (!req->buf) { usb_ep_free_request(ep, req); return NULL; } req->length = buffer_size; return req; } void fmbim_free_req(struct usb_ep *ep, struct usb_request *req) { if (req) { kfree(req->buf); usb_ep_free_request(ep, req); } } static void fmbim_ctrl_response_available(struct f_mbim *dev) { struct usb_request *req = dev->not_port.notify_req; struct usb_cdc_notification *event = NULL; unsigned long flags; int ret; pr_debug("dev:%p portno#%d\n", dev, dev->port_num); spin_lock_irqsave(&dev->lock, flags); if (!atomic_read(&dev->online)) { pr_err("dev:%p is not online\n", dev); spin_unlock_irqrestore(&dev->lock, flags); return; } if (!req) { pr_err("dev:%p req is NULL\n", dev); spin_unlock_irqrestore(&dev->lock, flags); return; } if (!req->buf) { pr_err("dev:%p req->buf is NULL\n", dev); spin_unlock_irqrestore(&dev->lock, flags); return; } if (atomic_inc_return(&dev->not_port.notify_count) != 1) { pr_debug("delay ep_queue: notifications queue is busy[%d]", atomic_read(&dev->not_port.notify_count)); spin_unlock_irqrestore(&dev->lock, flags); return; } req->length = sizeof *event; event = req->buf; event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE; event->bNotificationType = USB_CDC_NOTIFY_RESPONSE_AVAILABLE; event->wValue = cpu_to_le16(0); event->wIndex = cpu_to_le16(dev->ctrl_id); event->wLength = cpu_to_le16(0); spin_unlock_irqrestore(&dev->lock, flags); ret = usb_ep_queue(dev->not_port.notify, req, GFP_ATOMIC); if (ret) { atomic_dec(&dev->not_port.notify_count); pr_err("ep enqueue error %d\n", ret); } pr_debug("Successful Exit"); } static int fmbim_send_cpkt_response(struct f_mbim *gr, struct ctrl_pkt *cpkt) { struct f_mbim *dev = gr; unsigned long flags; if (!gr || !cpkt) { pr_err("Invalid cpkt, dev:%p cpkt:%p\n", gr, cpkt); return -ENODEV; } pr_debug("dev:%p port_num#%d\n", dev, dev->port_num); if (!atomic_read(&dev->online)) { pr_err("dev:%p is not connected\n", dev); mbim_free_ctrl_pkt(cpkt); return 0; } if (dev->not_port.notify_state != MBIM_NOTIFY_RESPONSE_AVAILABLE) { pr_err("dev:%p state=%d, recover!!\n", dev, dev->not_port.notify_state); mbim_free_ctrl_pkt(cpkt); return 0; } spin_lock_irqsave(&dev->lock, flags); list_add_tail(&cpkt->list, &dev->cpkt_resp_q); spin_unlock_irqrestore(&dev->lock, flags); fmbim_ctrl_response_available(dev); return 0; } /* ---------------------------- BAM INTERFACE ----------------------------- */ static int mbim_bam_setup(int no_ports) { int ret; pr_info("no_ports:%d\n", no_ports); ret = bam_data_setup(no_ports); if (ret) { pr_err("bam_data_setup failed err: %d\n", ret); return ret; } pr_info("Initialized %d ports\n", no_ports); return 0; } int mbim_configure_params(void) { struct teth_aggr_params aggr_params; int ret = 0; aggr_params.dl.aggr_prot = TETH_AGGR_PROTOCOL_MBIM; aggr_params.dl.max_datagrams = mbim_ntb_parameters.wNtbOutMaxDatagrams; aggr_params.dl.max_transfer_size_byte = mbim_ntb_parameters.dwNtbInMaxSize; aggr_params.ul.aggr_prot = TETH_AGGR_PROTOCOL_MBIM; aggr_params.ul.max_datagrams = mbim_ntb_parameters.wNtbOutMaxDatagrams; aggr_params.ul.max_transfer_size_byte = mbim_ntb_parameters.dwNtbOutMaxSize; ret = teth_bridge_set_aggr_params(&aggr_params); if (ret) pr_err("%s: teth_bridge_set_aggr_params failed\n", __func__); return ret; } static int mbim_bam_connect(struct f_mbim *dev) { int ret; u8 src_connection_idx, dst_connection_idx; struct usb_gadget *gadget = dev->cdev->gadget; enum peer_bam bam_name = (dev->xport == USB_GADGET_XPORT_BAM2BAM_IPA) ? IPA_P_BAM : A2_P_BAM; pr_info("dev:%p portno:%d\n", dev, dev->port_num); src_connection_idx = usb_bam_get_connection_idx(gadget->name, bam_name, USB_TO_PEER_PERIPHERAL, dev->port_num); dst_connection_idx = usb_bam_get_connection_idx(gadget->name, bam_name, PEER_PERIPHERAL_TO_USB, dev->port_num); if (src_connection_idx < 0 || dst_connection_idx < 0) { pr_err("%s: usb_bam_get_connection_idx failed\n", __func__); return ret; } ret = bam_data_connect(&dev->bam_port, dev->port_num, dev->xport, src_connection_idx, dst_connection_idx, USB_FUNC_MBIM); if (ret) { pr_err("bam_data_setup failed: err:%d\n", ret); return ret; } pr_info("mbim bam connected\n"); return 0; } static int mbim_bam_disconnect(struct f_mbim *dev) { pr_info("dev:%p port:%d. Do nothing.\n", dev, dev->port_num); bam_data_disconnect(&dev->bam_port, dev->port_num); return 0; } /* -------------------------------------------------------------------------*/ static inline void mbim_reset_values(struct f_mbim *mbim) { mbim->parser_opts = &mbim_ndp16_opts; mbim->ntb_input_size = MBIM_NTB_DEFAULT_IN_SIZE; atomic_set(&mbim->online, 0); } static void mbim_reset_function_queue(struct f_mbim *dev) { struct ctrl_pkt *cpkt = NULL; pr_debug("Queue empty packet for QBI"); spin_lock(&dev->lock); cpkt = mbim_alloc_ctrl_pkt(0, GFP_ATOMIC); if (!cpkt) { pr_err("%s: Unable to allocate reset function pkt\n", __func__); spin_unlock(&dev->lock); return; } list_add_tail(&cpkt->list, &dev->cpkt_req_q); spin_unlock(&dev->lock); pr_debug("%s: Wake up read queue", __func__); wake_up(&dev->read_wq); } static void fmbim_reset_cmd_complete(struct usb_ep *ep, struct usb_request *req) { struct f_mbim *dev = req->context; mbim_reset_function_queue(dev); } static void mbim_clear_queues(struct f_mbim *mbim) { struct ctrl_pkt *cpkt = NULL; struct list_head *act, *tmp; spin_lock(&mbim->lock); list_for_each_safe(act, tmp, &mbim->cpkt_req_q) { cpkt = list_entry(act, struct ctrl_pkt, list); list_del(&cpkt->list); mbim_free_ctrl_pkt(cpkt); } list_for_each_safe(act, tmp, &mbim->cpkt_resp_q) { cpkt = list_entry(act, struct ctrl_pkt, list); list_del(&cpkt->list); mbim_free_ctrl_pkt(cpkt); } spin_unlock(&mbim->lock); } /* * Context: mbim->lock held */ static void mbim_do_notify(struct f_mbim *mbim) { struct usb_request *req = mbim->not_port.notify_req; struct usb_cdc_notification *event; int status; pr_debug("notify_state: %d", mbim->not_port.notify_state); if (!req) return; event = req->buf; switch (mbim->not_port.notify_state) { case MBIM_NOTIFY_NONE: if (atomic_read(&mbim->not_port.notify_count) > 0) pr_err("Pending notifications in MBIM_NOTIFY_NONE\n"); else pr_debug("No pending notifications\n"); return; case MBIM_NOTIFY_RESPONSE_AVAILABLE: pr_debug("Notification %02x sent\n", event->bNotificationType); if (atomic_read(&mbim->not_port.notify_count) <= 0) { pr_debug("notify_response_avaliable: done"); return; } spin_unlock(&mbim->lock); status = usb_ep_queue(mbim->not_port.notify, req, GFP_ATOMIC); spin_lock(&mbim->lock); if (status) { atomic_dec(&mbim->not_port.notify_count); pr_err("Queue notify request failed, err: %d", status); } return; } event->bmRequestType = 0xA1; event->wIndex = cpu_to_le16(mbim->ctrl_id); /* * In double buffering if there is a space in FIFO, * completion callback can be called right after the call, * so unlocking */ atomic_inc(&mbim->not_port.notify_count); pr_debug("queue request: notify_count = %d", atomic_read(&mbim->not_port.notify_count)); spin_unlock(&mbim->lock); status = usb_ep_queue(mbim->not_port.notify, req, GFP_ATOMIC); spin_lock(&mbim->lock); if (status) { atomic_dec(&mbim->not_port.notify_count); pr_err("usb_ep_queue failed, err: %d", status); } } static void mbim_notify_complete(struct usb_ep *ep, struct usb_request *req) { struct f_mbim *mbim = req->context; struct usb_cdc_notification *event = req->buf; pr_debug("dev:%p\n", mbim); spin_lock(&mbim->lock); switch (req->status) { case 0: atomic_dec(&mbim->not_port.notify_count); pr_debug("notify_count = %d", atomic_read(&mbim->not_port.notify_count)); break; case -ECONNRESET: case -ESHUTDOWN: /* connection gone */ mbim->not_port.notify_state = MBIM_NOTIFY_NONE; atomic_set(&mbim->not_port.notify_count, 0); pr_info("ESHUTDOWN/ECONNRESET, connection gone"); spin_unlock(&mbim->lock); mbim_clear_queues(mbim); mbim_reset_function_queue(mbim); spin_lock(&mbim->lock); break; default: pr_err("Unknown event %02x --> %d\n", event->bNotificationType, req->status); break; } mbim_do_notify(mbim); spin_unlock(&mbim->lock); pr_debug("dev:%p Exit\n", mbim); } static void mbim_ep0out_complete(struct usb_ep *ep, struct usb_request *req) { /* now for SET_NTB_INPUT_SIZE only */ unsigned in_size = 0; struct usb_function *f = req->context; struct f_mbim *mbim = func_to_mbim(f); struct mbim_ntb_input_size *ntb = NULL; pr_debug("dev:%p\n", mbim); req->context = NULL; if (req->status || req->actual != req->length) { pr_err("Bad control-OUT transfer\n"); goto invalid; } if (req->length == 4) { in_size = get_unaligned_le32(req->buf); if (in_size < USB_CDC_NCM_NTB_MIN_IN_SIZE || in_size > le32_to_cpu(mbim_ntb_parameters.dwNtbInMaxSize)) { pr_err("Illegal INPUT SIZE (%d) from host\n", in_size); goto invalid; } } else if (req->length == 8) { ntb = (struct mbim_ntb_input_size *)req->buf; in_size = get_unaligned_le32(&(ntb->ntb_input_size)); if (in_size < USB_CDC_NCM_NTB_MIN_IN_SIZE || in_size > le32_to_cpu(mbim_ntb_parameters.dwNtbInMaxSize)) { pr_err("Illegal INPUT SIZE (%d) from host\n", in_size); goto invalid; } mbim->ntb_max_datagrams = get_unaligned_le16(&(ntb->ntb_max_datagrams)); } else { pr_err("Illegal NTB length %d\n", in_size); goto invalid; } pr_debug("Set NTB INPUT SIZE %d\n", in_size); mbim->ntb_input_size = in_size; return; invalid: usb_ep_set_halt(ep); pr_err("dev:%p Failed\n", mbim); return; } static void fmbim_cmd_complete(struct usb_ep *ep, struct usb_request *req) { struct f_mbim *dev = req->context; struct ctrl_pkt *cpkt = NULL; int len = req->actual; if (!dev) { pr_err("mbim dev is null\n"); return; } if (req->status < 0) { pr_err("mbim command error %d\n", req->status); return; } pr_debug("dev:%p port#%d\n", dev, dev->port_num); cpkt = mbim_alloc_ctrl_pkt(len, GFP_ATOMIC); if (!cpkt) { pr_err("Unable to allocate ctrl pkt\n"); return; } pr_debug("Add to cpkt_req_q packet with len = %d\n", len); memcpy(cpkt->buf, req->buf, len); spin_lock(&dev->lock); list_add_tail(&cpkt->list, &dev->cpkt_req_q); spin_unlock(&dev->lock); /* wakeup read thread */ pr_debug("Wake up read queue"); wake_up(&dev->read_wq); return; } static int mbim_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl) { struct f_mbim *mbim = func_to_mbim(f); struct usb_composite_dev *cdev = mbim->cdev; struct usb_request *req = cdev->req; struct ctrl_pkt *cpkt = NULL; int value = -EOPNOTSUPP; u16 w_index = le16_to_cpu(ctrl->wIndex); u16 w_value = le16_to_cpu(ctrl->wValue); u16 w_length = le16_to_cpu(ctrl->wLength); /* * composite driver infrastructure handles everything except * CDC class messages; interface activation uses set_alt(). */ if (!atomic_read(&mbim->online)) { pr_info("usb cable is not connected\n"); return -ENOTCONN; } switch ((ctrl->bRequestType << 8) | ctrl->bRequest) { case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) | USB_CDC_RESET_FUNCTION: pr_debug("USB_CDC_RESET_FUNCTION"); value = 0; req->complete = fmbim_reset_cmd_complete; req->context = mbim; break; case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) | USB_CDC_SEND_ENCAPSULATED_COMMAND: pr_debug("USB_CDC_SEND_ENCAPSULATED_COMMAND"); if (w_length > req->length) { pr_debug("w_length > req->length: %d > %d", w_length, req->length); } value = w_length; req->complete = fmbim_cmd_complete; req->context = mbim; break; case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) | USB_CDC_GET_ENCAPSULATED_RESPONSE: pr_debug("USB_CDC_GET_ENCAPSULATED_RESPONSE"); if (w_value) { pr_err("w_length > 0: %d", w_length); break; } pr_debug("req%02x.%02x v%04x i%04x l%d\n", ctrl->bRequestType, ctrl->bRequest, w_value, w_index, w_length); spin_lock(&mbim->lock); if (list_empty(&mbim->cpkt_resp_q)) { pr_err("ctrl resp queue empty\n"); spin_unlock(&mbim->lock); break; } cpkt = list_first_entry(&mbim->cpkt_resp_q, struct ctrl_pkt, list); list_del(&cpkt->list); spin_unlock(&mbim->lock); value = min_t(unsigned, w_length, cpkt->len); memcpy(req->buf, cpkt->buf, value); mbim_free_ctrl_pkt(cpkt); pr_debug("copied encapsulated_response %d bytes", value); break; case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) | USB_CDC_GET_NTB_PARAMETERS: pr_debug("USB_CDC_GET_NTB_PARAMETERS"); if (w_length == 0 || w_value != 0 || w_index != mbim->ctrl_id) break; value = w_length > sizeof mbim_ntb_parameters ? sizeof mbim_ntb_parameters : w_length; memcpy(req->buf, &mbim_ntb_parameters, value); break; case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) | USB_CDC_GET_NTB_INPUT_SIZE: pr_debug("USB_CDC_GET_NTB_INPUT_SIZE"); if (w_length < 4 || w_value != 0 || w_index != mbim->ctrl_id) break; put_unaligned_le32(mbim->ntb_input_size, req->buf); value = 4; pr_debug("Reply to host INPUT SIZE %d\n", mbim->ntb_input_size); break; case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) | USB_CDC_SET_NTB_INPUT_SIZE: pr_debug("USB_CDC_SET_NTB_INPUT_SIZE"); if (w_length != 4 && w_length != 8) { pr_err("wrong NTB length %d", w_length); break; } if (w_value != 0 || w_index != mbim->ctrl_id) break; req->complete = mbim_ep0out_complete; req->length = w_length; req->context = f; value = req->length; break; case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) | USB_CDC_GET_NTB_FORMAT: { uint16_t format; pr_debug("USB_CDC_GET_NTB_FORMAT"); if (w_length < 2 || w_value != 0 || w_index != mbim->ctrl_id) break; format = (mbim->parser_opts == &mbim_ndp16_opts) ? 0 : 1; put_unaligned_le16(format, req->buf); value = 2; pr_debug("NTB FORMAT: sending %d\n", format); break; } case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) | USB_CDC_SET_NTB_FORMAT: { pr_debug("USB_CDC_SET_NTB_FORMAT"); if (w_length != 0 || w_index != mbim->ctrl_id) break; switch (w_value) { case 0x0000: mbim->parser_opts = &mbim_ndp16_opts; pr_debug("NCM16 selected\n"); break; case 0x0001: mbim->parser_opts = &mbim_ndp32_opts; pr_debug("NCM32 selected\n"); break; default: break; } value = 0; break; } /* optional in mbim descriptor: */ /* case USB_CDC_GET_MAX_DATAGRAM_SIZE: */ /* case USB_CDC_SET_MAX_DATAGRAM_SIZE: */ default: pr_err("invalid control req: %02x.%02x v%04x i%04x l%d\n", ctrl->bRequestType, ctrl->bRequest, w_value, w_index, w_length); } /* respond with data transfer or status phase? */ if (value >= 0) { pr_debug("control request: %02x.%02x v%04x i%04x l%d\n", ctrl->bRequestType, ctrl->bRequest, w_value, w_index, w_length); req->zero = (value < w_length); req->length = value; value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC); if (value < 0) { pr_err("queueing req failed: %02x.%02x, err %d\n", ctrl->bRequestType, ctrl->bRequest, value); } } else { pr_err("ctrl req err %d: %02x.%02x v%04x i%04x l%d\n", value, ctrl->bRequestType, ctrl->bRequest, w_value, w_index, w_length); } /* device either stalls (value < 0) or reports success */ return value; } /* * This function handles the Microsoft-specific OS descriptor control * requests that are issued by Windows host drivers to determine the * configuration containing the MBIM function. * * Unlike mbim_setup() this function handles two specific device requests, * and only when a configuration has not yet been selected. */ static int mbim_ctrlrequest(struct usb_composite_dev *cdev, const struct usb_ctrlrequest *ctrl) { int value = -EOPNOTSUPP; u16 w_index = le16_to_cpu(ctrl->wIndex); u16 w_value = le16_to_cpu(ctrl->wValue); u16 w_length = le16_to_cpu(ctrl->wLength); /* only respond to OS desciptors when no configuration selected */ if (cdev->config || !mbim_ext_config_desc.function.subCompatibleID[0]) return value; pr_debug("%02x.%02x v%04x i%04x l%u", ctrl->bRequestType, ctrl->bRequest, w_value, w_index, w_length); /* Handle MSFT OS string */ if (ctrl->bRequestType == (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE) && ctrl->bRequest == USB_REQ_GET_DESCRIPTOR && (w_value >> 8) == USB_DT_STRING && (w_value & 0xFF) == MBIM_OS_STRING_ID) { value = (w_length < sizeof(mbim_os_string) ? w_length : sizeof(mbim_os_string)); memcpy(cdev->req->buf, mbim_os_string, value); } else if (ctrl->bRequestType == (USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE) && ctrl->bRequest == MBIM_VENDOR_CODE && w_index == 4) { /* Handle Extended OS descriptor */ value = (w_length < sizeof(mbim_ext_config_desc) ? w_length : sizeof(mbim_ext_config_desc)); memcpy(cdev->req->buf, &mbim_ext_config_desc, value); } /* respond with data transfer or status phase? */ if (value >= 0) { int rc; cdev->req->zero = value < w_length; cdev->req->length = value; rc = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC); if (rc < 0) pr_err("response queue error: %d", rc); } return value; } static int mbim_set_alt(struct usb_function *f, unsigned intf, unsigned alt) { struct f_mbim *mbim = func_to_mbim(f); struct usb_composite_dev *cdev = mbim->cdev; int ret = 0; /* Control interface has only altsetting 0 */ if (intf == mbim->ctrl_id) { pr_info("CONTROL_INTERFACE"); if (alt != 0) goto fail; if (mbim->not_port.notify->driver_data) { pr_info("reset mbim control %d\n", intf); usb_ep_disable(mbim->not_port.notify); } ret = config_ep_by_speed(cdev->gadget, f, mbim->not_port.notify); if (ret) { mbim->not_port.notify->desc = NULL; pr_err("Failed configuring notify ep %s: err %d\n", mbim->not_port.notify->name, ret); return ret; } ret = usb_ep_enable(mbim->not_port.notify); if (ret) { pr_err("usb ep#%s enable failed, err#%d\n", mbim->not_port.notify->name, ret); return ret; } mbim->not_port.notify->driver_data = mbim; /* Data interface has two altsettings, 0 and 1 */ } else if (intf == mbim->data_id) { pr_info("DATA_INTERFACE"); if (alt > 1) goto fail; if (mbim->bam_port.in->driver_data) { pr_info("reset mbim\n"); mbim_reset_values(mbim); } /* * CDC Network only sends data in non-default altsettings. * Changing altsettings resets filters, statistics, etc. */ if (alt == 1) { pr_info("Alt set 1, initialize ports"); if (!mbim->bam_port.in->desc) { pr_info("Choose endpoints"); ret = config_ep_by_speed(cdev->gadget, f, mbim->bam_port.in); if (ret) { mbim->bam_port.in->desc = NULL; pr_err("IN ep %s failed: %d\n", mbim->bam_port.in->name, ret); return ret; } pr_info("Set mbim port in_desc = 0x%p", mbim->bam_port.in->desc); ret = config_ep_by_speed(cdev->gadget, f, mbim->bam_port.out); if (ret) { mbim->bam_port.out->desc = NULL; pr_err("OUT ep %s failed: %d\n", mbim->bam_port.out->name, ret); return ret; } pr_info("Set mbim port out_desc = 0x%p", mbim->bam_port.out->desc); pr_debug("Activate mbim\n"); mbim_bam_connect(mbim); } else { pr_info("PORTS already SET"); } } mbim->data_alt_int = alt; spin_lock(&mbim->lock); mbim->not_port.notify_state = MBIM_NOTIFY_RESPONSE_AVAILABLE; spin_unlock(&mbim->lock); } else { goto fail; } atomic_set(&mbim->online, 1); pr_info("SET DEVICE ONLINE"); /* wakeup file threads */ wake_up(&mbim->read_wq); wake_up(&mbim->write_wq); return 0; fail: pr_err("ERROR: Illegal Interface"); return -EINVAL; } /* * Because the data interface supports multiple altsettings, * this MBIM function *MUST* implement a get_alt() method. */ static int mbim_get_alt(struct usb_function *f, unsigned intf) { struct f_mbim *mbim = func_to_mbim(f); if (intf == mbim->ctrl_id) return 0; else if (intf == mbim->data_id) return mbim->data_alt_int; return -EINVAL; } static void mbim_disable(struct usb_function *f) { struct f_mbim *mbim = func_to_mbim(f); pr_info("SET DEVICE OFFLINE"); atomic_set(&mbim->online, 0); mbim->not_port.notify_state = MBIM_NOTIFY_NONE; mbim_clear_queues(mbim); mbim_reset_function_queue(mbim); mbim_bam_disconnect(mbim); if (mbim->not_port.notify->driver_data) { usb_ep_disable(mbim->not_port.notify); mbim->not_port.notify->driver_data = NULL; } atomic_set(&mbim->not_port.notify_count, 0); pr_info("mbim deactivated\n"); } #define MBIM_ACTIVE_PORT 0 static void mbim_suspend(struct usb_function *f) { pr_info("mbim suspended\n"); bam_data_suspend(MBIM_ACTIVE_PORT); } static void mbim_resume(struct usb_function *f) { pr_info("mbim resumed\n"); bam_data_resume(MBIM_ACTIVE_PORT); } /*---------------------- function driver setup/binding ---------------------*/ static int mbim_bind(struct usb_configuration *c, struct usb_function *f) { struct usb_composite_dev *cdev = c->cdev; struct f_mbim *mbim = func_to_mbim(f); int status; struct usb_ep *ep; pr_info("Enter"); mbim->cdev = cdev; /* allocate instance-specific interface IDs */ status = usb_interface_id(c, f); if (status < 0) goto fail; mbim->ctrl_id = status; mbim_iad_desc.bFirstInterface = status; mbim_control_intf.bInterfaceNumber = status; mbim_union_desc.bMasterInterface0 = status; status = usb_interface_id(c, f); if (status < 0) goto fail; mbim->data_id = status; mbim->data_alt_int = 0; mbim_data_nop_intf.bInterfaceNumber = status; mbim_data_intf.bInterfaceNumber = status; mbim_union_desc.bSlaveInterface0 = status; mbim->bam_port.cdev = cdev; mbim->bam_port.func = &mbim->function; status = -ENODEV; /* allocate instance-specific endpoints */ ep = usb_ep_autoconfig(cdev->gadget, &fs_mbim_in_desc); if (!ep) { pr_err("usb epin autoconfig failed\n"); goto fail; } pr_info("usb epin autoconfig succeeded\n"); ep->driver_data = cdev; /* claim */ mbim->bam_port.in = ep; ep = usb_ep_autoconfig(cdev->gadget, &fs_mbim_out_desc); if (!ep) { pr_err("usb epout autoconfig failed\n"); goto fail; } pr_info("usb epout autoconfig succeeded\n"); ep->driver_data = cdev; /* claim */ mbim->bam_port.out = ep; ep = usb_ep_autoconfig(cdev->gadget, &fs_mbim_notify_desc); if (!ep) { pr_err("usb notify ep autoconfig failed\n"); goto fail; } pr_info("usb notify ep autoconfig succeeded\n"); mbim->not_port.notify = ep; ep->driver_data = cdev; /* claim */ status = -ENOMEM; /* allocate notification request and buffer */ mbim->not_port.notify_req = mbim_alloc_req(ep, NCM_STATUS_BYTECOUNT); if (!mbim->not_port.notify_req) { pr_info("failed to allocate notify request\n"); goto fail; } pr_info("allocated notify ep request & request buffer\n"); mbim->not_port.notify_req->context = mbim; mbim->not_port.notify_req->complete = mbim_notify_complete; if (mbim->xport == USB_GADGET_XPORT_BAM2BAM_IPA) mbb_desc.wMaxSegmentSize = cpu_to_le16(0x800); else mbb_desc.wMaxSegmentSize = cpu_to_le16(0xfe0); /* copy descriptors, and track endpoint copies */ f->descriptors = usb_copy_descriptors(mbim_fs_function); if (!f->descriptors) goto fail; /* * support all relevant hardware speeds... we expect that when * hardware is dual speed, all bulk-capable endpoints work at * both speeds */ if (gadget_is_dualspeed(c->cdev->gadget)) { hs_mbim_in_desc.bEndpointAddress = fs_mbim_in_desc.bEndpointAddress; hs_mbim_out_desc.bEndpointAddress = fs_mbim_out_desc.bEndpointAddress; hs_mbim_notify_desc.bEndpointAddress = fs_mbim_notify_desc.bEndpointAddress; /* copy descriptors, and track endpoint copies */ f->hs_descriptors = usb_copy_descriptors(mbim_hs_function); if (!f->hs_descriptors) goto fail; } /* * If MBIM is bound in a config other than the first, tell Windows * about it by returning the num as a string in the OS descriptor's * subCompatibleID field. Windows only supports up to config #4. */ if (c->bConfigurationValue >= 2 && c->bConfigurationValue <= 4) { pr_debug("MBIM in configuration %d", c->bConfigurationValue); mbim_ext_config_desc.function.subCompatibleID[0] = c->bConfigurationValue + '0'; } pr_info("mbim(%d): %s speed IN/%s OUT/%s NOTIFY/%s\n", mbim->port_num, gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full", mbim->bam_port.in->name, mbim->bam_port.out->name, mbim->not_port.notify->name); return 0; fail: pr_err("%s failed to bind, err %d\n", f->name, status); if (f->descriptors) usb_free_descriptors(f->descriptors); if (mbim->not_port.notify_req) { kfree(mbim->not_port.notify_req->buf); usb_ep_free_request(mbim->not_port.notify, mbim->not_port.notify_req); } /* we might as well release our claims on endpoints */ if (mbim->not_port.notify) mbim->not_port.notify->driver_data = NULL; if (mbim->bam_port.out) mbim->bam_port.out->driver_data = NULL; if (mbim->bam_port.in) mbim->bam_port.in->driver_data = NULL; return status; } static void mbim_unbind(struct usb_configuration *c, struct usb_function *f) { struct f_mbim *mbim = func_to_mbim(f); bam_data_destroy(mbim->port_num); if (gadget_is_dualspeed(c->cdev->gadget)) usb_free_descriptors(f->hs_descriptors); usb_free_descriptors(f->descriptors); kfree(mbim->not_port.notify_req->buf); usb_ep_free_request(mbim->not_port.notify, mbim->not_port.notify_req); mbim_ext_config_desc.function.subCompatibleID[0] = 0; } /** * mbim_bind_config - add MBIM link to a configuration * @c: the configuration to support the network link * Context: single threaded during gadget setup * Returns zero on success, else negative errno. */ int mbim_bind_config(struct usb_configuration *c, unsigned portno, char *xport_name) { struct f_mbim *mbim = NULL; int status = 0; pr_info("port number %u", portno); if (portno >= nr_mbim_ports) { pr_err("Can not add port %u. Max ports = %d", portno, nr_mbim_ports); return -ENODEV; } status = mbim_bam_setup(nr_mbim_ports); if (status) { pr_err("bam setup failed"); return status; } /* maybe allocate device-global string IDs */ if (mbim_string_defs[0].id == 0) { /* control interface label */ status = usb_string_id(c->cdev); if (status < 0) return status; mbim_string_defs[STRING_CTRL_IDX].id = status; mbim_control_intf.iInterface = status; /* data interface label */ status = usb_string_id(c->cdev); if (status < 0) return status; mbim_string_defs[STRING_DATA_IDX].id = status; mbim_data_nop_intf.iInterface = status; mbim_data_intf.iInterface = status; } /* allocate and initialize one new instance */ mbim = mbim_ports[0].port; if (!mbim) { pr_info("mbim struct not allocated"); return -ENOMEM; } mbim->cdev = c->cdev; mbim_reset_values(mbim); mbim->function.name = "usb_mbim"; mbim->function.strings = mbim_strings; mbim->function.bind = mbim_bind; mbim->function.unbind = mbim_unbind; mbim->function.set_alt = mbim_set_alt; mbim->function.get_alt = mbim_get_alt; mbim->function.setup = mbim_setup; mbim->function.disable = mbim_disable; mbim->function.suspend = mbim_suspend; mbim->function.resume = mbim_resume; mbim->xport = str_to_xport(xport_name); if (mbim->xport != USB_GADGET_XPORT_BAM2BAM_IPA) { /* Use BAM2BAM by default if not IPA */ mbim->xport = USB_GADGET_XPORT_BAM2BAM; } else { /* For IPA we use limit of 16 */ mbim_ntb_parameters.wNtbOutMaxDatagrams = 16; /* For IPA this is proven to give maximum throughput */ mbim_ntb_parameters.dwNtbInMaxSize = cpu_to_le32(NTB_DEFAULT_IN_SIZE_IPA); mbim_ntb_parameters.dwNtbOutMaxSize = cpu_to_le32(MBIM_NTB_OUT_SIZE_IPA); mbim_ntb_parameters.wNdpInDivisor = 1; } INIT_LIST_HEAD(&mbim->cpkt_req_q); INIT_LIST_HEAD(&mbim->cpkt_resp_q); status = usb_add_function(c, &mbim->function); pr_info("Exit status %d", status); return status; } /* ------------ MBIM DRIVER File Operations API for USER SPACE ------------ */ static ssize_t mbim_read(struct file *fp, char __user *buf, size_t count, loff_t *pos) { struct f_mbim *dev = fp->private_data; struct ctrl_pkt *cpkt = NULL; unsigned long flags; int ret = 0; pr_debug("Enter(%d)\n", count); if (!dev) { pr_err("Received NULL mbim pointer\n"); return -ENODEV; } if (count > MBIM_BULK_BUFFER_SIZE) { pr_err("Buffer size is too big %d, should be at most %d\n", count, MBIM_BULK_BUFFER_SIZE); return -EINVAL; } if (mbim_lock(&dev->read_excl)) { pr_err("Previous reading is not finished yet\n"); return -EBUSY; } /* block until mbim online */ while (!(atomic_read(&dev->online) || atomic_read(&dev->error))) { pr_err("USB cable not connected. Wait.\n"); ret = wait_event_interruptible(dev->read_wq, (atomic_read(&dev->online) || atomic_read(&dev->error))); if (ret < 0) { mbim_unlock(&dev->read_excl); return -ERESTARTSYS; } } if (atomic_read(&dev->error)) { mbim_unlock(&dev->read_excl); return -EIO; } spin_lock_irqsave(&dev->lock, flags); while (list_empty(&dev->cpkt_req_q)) { pr_debug("Requests list is empty. Wait.\n"); spin_unlock_irqrestore(&dev->lock, flags); ret = wait_event_interruptible(dev->read_wq, !list_empty(&dev->cpkt_req_q)); if (ret < 0) { pr_err("Waiting failed\n"); mbim_unlock(&dev->read_excl); return -ERESTARTSYS; } pr_debug("Received request packet\n"); spin_lock_irqsave(&dev->lock, flags); } cpkt = list_first_entry(&dev->cpkt_req_q, struct ctrl_pkt, list); if (cpkt->len > count) { spin_unlock_irqrestore(&dev->lock, flags); mbim_unlock(&dev->read_excl); pr_err("cpkt size too big:%d > buf size:%d\n", cpkt->len, count); return -ENOMEM; } pr_debug("cpkt size:%d\n", cpkt->len); list_del(&cpkt->list); spin_unlock_irqrestore(&dev->lock, flags); mbim_unlock(&dev->read_excl); ret = copy_to_user(buf, cpkt->buf, cpkt->len); if (ret) { pr_err("copy_to_user failed: err %d\n", ret); ret = -ENOMEM; } else { pr_debug("copied %d bytes to user\n", cpkt->len); ret = cpkt->len; } mbim_free_ctrl_pkt(cpkt); return ret; } static ssize_t mbim_write(struct file *fp, const char __user *buf, size_t count, loff_t *pos) { struct f_mbim *dev = fp->private_data; struct ctrl_pkt *cpkt = NULL; int ret = 0; pr_debug("Enter(%d)", count); if (!dev) { pr_err("Received NULL mbim pointer\n"); return -ENODEV; } if (!count) { pr_err("zero length ctrl pkt\n"); return -ENODEV; } if (count > MAX_CTRL_PKT_SIZE) { pr_err("given pkt size too big:%d > max_pkt_size:%d\n", count, MAX_CTRL_PKT_SIZE); return -ENOMEM; } if (mbim_lock(&dev->write_excl)) { pr_err("Previous writing not finished yet\n"); return -EBUSY; } if (!atomic_read(&dev->online)) { pr_err("USB cable not connected\n"); mbim_unlock(&dev->write_excl); return -EPIPE; } cpkt = mbim_alloc_ctrl_pkt(count, GFP_KERNEL); if (!cpkt) { pr_err("failed to allocate ctrl pkt\n"); mbim_unlock(&dev->write_excl); return -ENOMEM; } ret = copy_from_user(cpkt->buf, buf, count); if (ret) { pr_err("copy_from_user failed err:%d\n", ret); mbim_free_ctrl_pkt(cpkt); mbim_unlock(&dev->write_excl); return 0; } fmbim_send_cpkt_response(dev, cpkt); mbim_unlock(&dev->write_excl); pr_debug("Exit(%d)", count); return count; } static int mbim_open(struct inode *ip, struct file *fp) { pr_info("Open mbim driver\n"); while (!_mbim_dev) { pr_err("mbim_dev not created yet\n"); return -ENODEV; } if (mbim_lock(&_mbim_dev->open_excl)) { pr_err("Already opened\n"); return -EBUSY; } pr_info("Lock mbim_dev->open_excl for open\n"); if (!atomic_read(&_mbim_dev->online)) pr_err("USB cable not connected\n"); fp->private_data = _mbim_dev; atomic_set(&_mbim_dev->error, 0); pr_info("Exit, mbim file opened\n"); return 0; } static int mbim_release(struct inode *ip, struct file *fp) { pr_info("Close mbim file"); mbim_unlock(&_mbim_dev->open_excl); return 0; } static long mbim_ioctl(struct file *fp, unsigned cmd, unsigned long arg) { struct f_mbim *mbim = fp->private_data; int ret = 0; pr_debug("Received command %d", cmd); if (mbim_lock(&mbim->ioctl_excl)) return -EBUSY; switch (cmd) { case MBIM_GET_NTB_SIZE: ret = copy_to_user((void __user *)arg, &mbim->ntb_input_size, sizeof(mbim->ntb_input_size)); if (ret) { pr_err("copying to user space failed"); ret = -EFAULT; } pr_info("Sent NTB size %d", mbim->ntb_input_size); break; case MBIM_GET_DATAGRAM_COUNT: ret = copy_to_user((void __user *)arg, &mbim->ntb_max_datagrams, sizeof(mbim->ntb_max_datagrams)); if (ret) { pr_err("copying to user space failed"); ret = -EFAULT; } pr_info("Sent NTB datagrams count %d", mbim->ntb_max_datagrams); break; default: pr_err("wrong parameter"); ret = -EINVAL; } mbim_unlock(&mbim->ioctl_excl); return ret; } /* file operations for MBIM device /dev/android_mbim */ static const struct file_operations mbim_fops = { .owner = THIS_MODULE, .open = mbim_open, .release = mbim_release, .read = mbim_read, .write = mbim_write, .unlocked_ioctl = mbim_ioctl, }; static struct miscdevice mbim_device = { .minor = MISC_DYNAMIC_MINOR, .name = "android_mbim", .fops = &mbim_fops, }; static int mbim_init(int instances) { int i; struct f_mbim *dev = NULL; int ret; pr_info("initialize %d instances\n", instances); if (instances > NR_MBIM_PORTS) { pr_err("Max-%d instances supported\n", NR_MBIM_PORTS); return -EINVAL; } for (i = 0; i < instances; i++) { dev = kzalloc(sizeof(struct f_mbim), GFP_KERNEL); if (!dev) { pr_err("Failed to allocate mbim dev\n"); ret = -ENOMEM; goto fail_probe; } dev->port_num = i; spin_lock_init(&dev->lock); INIT_LIST_HEAD(&dev->cpkt_req_q); INIT_LIST_HEAD(&dev->cpkt_resp_q); mbim_ports[i].port = dev; mbim_ports[i].port_num = i; init_waitqueue_head(&dev->read_wq); init_waitqueue_head(&dev->write_wq); atomic_set(&dev->open_excl, 0); atomic_set(&dev->ioctl_excl, 0); atomic_set(&dev->read_excl, 0); atomic_set(&dev->write_excl, 0); nr_mbim_ports++; } _mbim_dev = dev; ret = misc_register(&mbim_device); if (ret) { pr_err("mbim driver failed to register"); goto fail_probe; } pr_info("Initialized %d ports\n", nr_mbim_ports); return ret; fail_probe: pr_err("Failed"); for (i = 0; i < nr_mbim_ports; i++) { kfree(mbim_ports[i].port); mbim_ports[i].port = NULL; } return ret; } static void fmbim_cleanup(void) { int i = 0; pr_info("Enter"); for (i = 0; i < nr_mbim_ports; i++) { kfree(mbim_ports[i].port); mbim_ports[i].port = NULL; } nr_mbim_ports = 0; misc_deregister(&mbim_device); _mbim_dev = NULL; }
gpl-2.0
TeamExodus/kernel_moto_shamu
arch/arm/mach-mvebu/armada-370-xp.c
1549
2529
/* * Device Tree support for Armada 370 and XP platforms. * * Copyright (C) 2012 Marvell * * Lior Amsalem <alior@marvell.com> * Gregory CLEMENT <gregory.clement@free-electrons.com> * Thomas Petazzoni <thomas.petazzoni@free-electrons.com> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/of_platform.h> #include <linux/io.h> #include <linux/time-armada-370-xp.h> #include <linux/clk/mvebu.h> #include <linux/dma-mapping.h> #include <linux/mbus.h> #include <linux/irqchip.h> #include <asm/hardware/cache-l2x0.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/time.h> #include "armada-370-xp.h" #include "common.h" #include "coherency.h" static struct map_desc armada_370_xp_io_desc[] __initdata = { { .virtual = (unsigned long) ARMADA_370_XP_REGS_VIRT_BASE, .pfn = __phys_to_pfn(ARMADA_370_XP_REGS_PHYS_BASE), .length = ARMADA_370_XP_REGS_SIZE, .type = MT_DEVICE, }, }; void __init armada_370_xp_map_io(void) { iotable_init(armada_370_xp_io_desc, ARRAY_SIZE(armada_370_xp_io_desc)); } void __init armada_370_xp_timer_and_clk_init(void) { mvebu_clocks_init(); armada_370_xp_timer_init(); } void __init armada_370_xp_init_early(void) { char *mbus_soc_name; /* * This initialization will be replaced by a DT-based * initialization once the mvebu-mbus driver gains DT support. */ if (of_machine_is_compatible("marvell,armada370")) mbus_soc_name = "marvell,armada370-mbus"; else mbus_soc_name = "marvell,armadaxp-mbus"; mvebu_mbus_init(mbus_soc_name, ARMADA_370_XP_MBUS_WINS_BASE, ARMADA_370_XP_MBUS_WINS_SIZE, ARMADA_370_XP_SDRAM_WINS_BASE, ARMADA_370_XP_SDRAM_WINS_SIZE); #ifdef CONFIG_CACHE_L2X0 l2x0_of_init(0, ~0UL); #endif } static void __init armada_370_xp_dt_init(void) { of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); coherency_init(); } static const char * const armada_370_xp_dt_compat[] = { "marvell,armada-370-xp", NULL, }; DT_MACHINE_START(ARMADA_XP_DT, "Marvell Armada 370/XP (Device Tree)") .smp = smp_ops(armada_xp_smp_ops), .init_machine = armada_370_xp_dt_init, .map_io = armada_370_xp_map_io, .init_early = armada_370_xp_init_early, .init_irq = irqchip_init, .init_time = armada_370_xp_timer_and_clk_init, .restart = mvebu_restart, .dt_compat = armada_370_xp_dt_compat, MACHINE_END
gpl-2.0
elefher/Donkey_Kernel_MotoG
drivers/cpuidle/coupled.c
2061
23980
/* * coupled.c - helper functions to enter the same idle state on multiple cpus * * Copyright (c) 2011 Google, Inc. * * Author: Colin Cross <ccross@android.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include <linux/kernel.h> #include <linux/cpu.h> #include <linux/cpuidle.h> #include <linux/mutex.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/spinlock.h> #include "cpuidle.h" /** * DOC: Coupled cpuidle states * * On some ARM SMP SoCs (OMAP4460, Tegra 2, and probably more), the * cpus cannot be independently powered down, either due to * sequencing restrictions (on Tegra 2, cpu 0 must be the last to * power down), or due to HW bugs (on OMAP4460, a cpu powering up * will corrupt the gic state unless the other cpu runs a work * around). Each cpu has a power state that it can enter without * coordinating with the other cpu (usually Wait For Interrupt, or * WFI), and one or more "coupled" power states that affect blocks * shared between the cpus (L2 cache, interrupt controller, and * sometimes the whole SoC). Entering a coupled power state must * be tightly controlled on both cpus. * * This file implements a solution, where each cpu will wait in the * WFI state until all cpus are ready to enter a coupled state, at * which point the coupled state function will be called on all * cpus at approximately the same time. * * Once all cpus are ready to enter idle, they are woken by an smp * cross call. At this point, there is a chance that one of the * cpus will find work to do, and choose not to enter idle. A * final pass is needed to guarantee that all cpus will call the * power state enter function at the same time. During this pass, * each cpu will increment the ready counter, and continue once the * ready counter matches the number of online coupled cpus. If any * cpu exits idle, the other cpus will decrement their counter and * retry. * * requested_state stores the deepest coupled idle state each cpu * is ready for. It is assumed that the states are indexed from * shallowest (highest power, lowest exit latency) to deepest * (lowest power, highest exit latency). The requested_state * variable is not locked. It is only written from the cpu that * it stores (or by the on/offlining cpu if that cpu is offline), * and only read after all the cpus are ready for the coupled idle * state are are no longer updating it. * * Three atomic counters are used. alive_count tracks the number * of cpus in the coupled set that are currently or soon will be * online. waiting_count tracks the number of cpus that are in * the waiting loop, in the ready loop, or in the coupled idle state. * ready_count tracks the number of cpus that are in the ready loop * or in the coupled idle state. * * To use coupled cpuidle states, a cpuidle driver must: * * Set struct cpuidle_device.coupled_cpus to the mask of all * coupled cpus, usually the same as cpu_possible_mask if all cpus * are part of the same cluster. The coupled_cpus mask must be * set in the struct cpuidle_device for each cpu. * * Set struct cpuidle_device.safe_state to a state that is not a * coupled state. This is usually WFI. * * Set CPUIDLE_FLAG_COUPLED in struct cpuidle_state.flags for each * state that affects multiple cpus. * * Provide a struct cpuidle_state.enter function for each state * that affects multiple cpus. This function is guaranteed to be * called on all cpus at approximately the same time. The driver * should ensure that the cpus all abort together if any cpu tries * to abort once the function is called. The function should return * with interrupts still disabled. */ /** * struct cpuidle_coupled - data for set of cpus that share a coupled idle state * @coupled_cpus: mask of cpus that are part of the coupled set * @requested_state: array of requested states for cpus in the coupled set * @ready_waiting_counts: combined count of cpus in ready or waiting loops * @online_count: count of cpus that are online * @refcnt: reference count of cpuidle devices that are using this struct * @prevent: flag to prevent coupled idle while a cpu is hotplugging */ struct cpuidle_coupled { cpumask_t coupled_cpus; int requested_state[NR_CPUS]; atomic_t ready_waiting_counts; int online_count; int refcnt; int prevent; }; #define WAITING_BITS 16 #define MAX_WAITING_CPUS (1 << WAITING_BITS) #define WAITING_MASK (MAX_WAITING_CPUS - 1) #define READY_MASK (~WAITING_MASK) #define CPUIDLE_COUPLED_NOT_IDLE (-1) static DEFINE_MUTEX(cpuidle_coupled_lock); static DEFINE_PER_CPU(struct call_single_data, cpuidle_coupled_poke_cb); /* * The cpuidle_coupled_poked_mask mask is used to avoid calling * __smp_call_function_single with the per cpu call_single_data struct already * in use. This prevents a deadlock where two cpus are waiting for each others * call_single_data struct to be available */ static cpumask_t cpuidle_coupled_poked_mask; /** * cpuidle_coupled_parallel_barrier - synchronize all online coupled cpus * @dev: cpuidle_device of the calling cpu * @a: atomic variable to hold the barrier * * No caller to this function will return from this function until all online * cpus in the same coupled group have called this function. Once any caller * has returned from this function, the barrier is immediately available for * reuse. * * The atomic variable a must be initialized to 0 before any cpu calls * this function, will be reset to 0 before any cpu returns from this function. * * Must only be called from within a coupled idle state handler * (state.enter when state.flags has CPUIDLE_FLAG_COUPLED set). * * Provides full smp barrier semantics before and after calling. */ void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a) { int n = dev->coupled->online_count; smp_mb__before_atomic_inc(); atomic_inc(a); while (atomic_read(a) < n) cpu_relax(); if (atomic_inc_return(a) == n * 2) { atomic_set(a, 0); return; } while (atomic_read(a) > n) cpu_relax(); } /** * cpuidle_state_is_coupled - check if a state is part of a coupled set * @dev: struct cpuidle_device for the current cpu * @drv: struct cpuidle_driver for the platform * @state: index of the target state in drv->states * * Returns true if the target state is coupled with cpus besides this one */ bool cpuidle_state_is_coupled(struct cpuidle_device *dev, struct cpuidle_driver *drv, int state) { return drv->states[state].flags & CPUIDLE_FLAG_COUPLED; } /** * cpuidle_coupled_set_ready - mark a cpu as ready * @coupled: the struct coupled that contains the current cpu */ static inline void cpuidle_coupled_set_ready(struct cpuidle_coupled *coupled) { atomic_add(MAX_WAITING_CPUS, &coupled->ready_waiting_counts); } /** * cpuidle_coupled_set_not_ready - mark a cpu as not ready * @coupled: the struct coupled that contains the current cpu * * Decrements the ready counter, unless the ready (and thus the waiting) counter * is equal to the number of online cpus. Prevents a race where one cpu * decrements the waiting counter and then re-increments it just before another * cpu has decremented its ready counter, leading to the ready counter going * down from the number of online cpus without going through the coupled idle * state. * * Returns 0 if the counter was decremented successfully, -EINVAL if the ready * counter was equal to the number of online cpus. */ static inline int cpuidle_coupled_set_not_ready(struct cpuidle_coupled *coupled) { int all; int ret; all = coupled->online_count || (coupled->online_count << WAITING_BITS); ret = atomic_add_unless(&coupled->ready_waiting_counts, -MAX_WAITING_CPUS, all); return ret ? 0 : -EINVAL; } /** * cpuidle_coupled_no_cpus_ready - check if no cpus in a coupled set are ready * @coupled: the struct coupled that contains the current cpu * * Returns true if all of the cpus in a coupled set are out of the ready loop. */ static inline int cpuidle_coupled_no_cpus_ready(struct cpuidle_coupled *coupled) { int r = atomic_read(&coupled->ready_waiting_counts) >> WAITING_BITS; return r == 0; } /** * cpuidle_coupled_cpus_ready - check if all cpus in a coupled set are ready * @coupled: the struct coupled that contains the current cpu * * Returns true if all cpus coupled to this target state are in the ready loop */ static inline bool cpuidle_coupled_cpus_ready(struct cpuidle_coupled *coupled) { int r = atomic_read(&coupled->ready_waiting_counts) >> WAITING_BITS; return r == coupled->online_count; } /** * cpuidle_coupled_cpus_waiting - check if all cpus in a coupled set are waiting * @coupled: the struct coupled that contains the current cpu * * Returns true if all cpus coupled to this target state are in the wait loop */ static inline bool cpuidle_coupled_cpus_waiting(struct cpuidle_coupled *coupled) { int w = atomic_read(&coupled->ready_waiting_counts) & WAITING_MASK; return w == coupled->online_count; } /** * cpuidle_coupled_no_cpus_waiting - check if no cpus in coupled set are waiting * @coupled: the struct coupled that contains the current cpu * * Returns true if all of the cpus in a coupled set are out of the waiting loop. */ static inline int cpuidle_coupled_no_cpus_waiting(struct cpuidle_coupled *coupled) { int w = atomic_read(&coupled->ready_waiting_counts) & WAITING_MASK; return w == 0; } /** * cpuidle_coupled_get_state - determine the deepest idle state * @dev: struct cpuidle_device for this cpu * @coupled: the struct coupled that contains the current cpu * * Returns the deepest idle state that all coupled cpus can enter */ static inline int cpuidle_coupled_get_state(struct cpuidle_device *dev, struct cpuidle_coupled *coupled) { int i; int state = INT_MAX; /* * Read barrier ensures that read of requested_state is ordered after * reads of ready_count. Matches the write barriers * cpuidle_set_state_waiting. */ smp_rmb(); for_each_cpu_mask(i, coupled->coupled_cpus) if (cpu_online(i) && coupled->requested_state[i] < state) state = coupled->requested_state[i]; return state; } static void cpuidle_coupled_poked(void *info) { int cpu = (unsigned long)info; cpumask_clear_cpu(cpu, &cpuidle_coupled_poked_mask); } /** * cpuidle_coupled_poke - wake up a cpu that may be waiting * @cpu: target cpu * * Ensures that the target cpu exits it's waiting idle state (if it is in it) * and will see updates to waiting_count before it re-enters it's waiting idle * state. * * If cpuidle_coupled_poked_mask is already set for the target cpu, that cpu * either has or will soon have a pending IPI that will wake it out of idle, * or it is currently processing the IPI and is not in idle. */ static void cpuidle_coupled_poke(int cpu) { struct call_single_data *csd = &per_cpu(cpuidle_coupled_poke_cb, cpu); if (!cpumask_test_and_set_cpu(cpu, &cpuidle_coupled_poked_mask)) __smp_call_function_single(cpu, csd, 0); } /** * cpuidle_coupled_poke_others - wake up all other cpus that may be waiting * @dev: struct cpuidle_device for this cpu * @coupled: the struct coupled that contains the current cpu * * Calls cpuidle_coupled_poke on all other online cpus. */ static void cpuidle_coupled_poke_others(int this_cpu, struct cpuidle_coupled *coupled) { int cpu; for_each_cpu_mask(cpu, coupled->coupled_cpus) if (cpu != this_cpu && cpu_online(cpu)) cpuidle_coupled_poke(cpu); } /** * cpuidle_coupled_set_waiting - mark this cpu as in the wait loop * @dev: struct cpuidle_device for this cpu * @coupled: the struct coupled that contains the current cpu * @next_state: the index in drv->states of the requested state for this cpu * * Updates the requested idle state for the specified cpuidle device, * poking all coupled cpus out of idle if necessary to let them see the new * state. */ static void cpuidle_coupled_set_waiting(int cpu, struct cpuidle_coupled *coupled, int next_state) { int w; coupled->requested_state[cpu] = next_state; /* * If this is the last cpu to enter the waiting state, poke * all the other cpus out of their waiting state so they can * enter a deeper state. This can race with one of the cpus * exiting the waiting state due to an interrupt and * decrementing waiting_count, see comment below. * * The atomic_inc_return provides a write barrier to order the write * to requested_state with the later write that increments ready_count. */ w = atomic_inc_return(&coupled->ready_waiting_counts) & WAITING_MASK; if (w == coupled->online_count) cpuidle_coupled_poke_others(cpu, coupled); } /** * cpuidle_coupled_set_not_waiting - mark this cpu as leaving the wait loop * @dev: struct cpuidle_device for this cpu * @coupled: the struct coupled that contains the current cpu * * Removes the requested idle state for the specified cpuidle device. */ static void cpuidle_coupled_set_not_waiting(int cpu, struct cpuidle_coupled *coupled) { /* * Decrementing waiting count can race with incrementing it in * cpuidle_coupled_set_waiting, but that's OK. Worst case, some * cpus will increment ready_count and then spin until they * notice that this cpu has cleared it's requested_state. */ atomic_dec(&coupled->ready_waiting_counts); coupled->requested_state[cpu] = CPUIDLE_COUPLED_NOT_IDLE; } /** * cpuidle_coupled_set_done - mark this cpu as leaving the ready loop * @cpu: the current cpu * @coupled: the struct coupled that contains the current cpu * * Marks this cpu as no longer in the ready and waiting loops. Decrements * the waiting count first to prevent another cpu looping back in and seeing * this cpu as waiting just before it exits idle. */ static void cpuidle_coupled_set_done(int cpu, struct cpuidle_coupled *coupled) { cpuidle_coupled_set_not_waiting(cpu, coupled); atomic_sub(MAX_WAITING_CPUS, &coupled->ready_waiting_counts); } /** * cpuidle_coupled_clear_pokes - spin until the poke interrupt is processed * @cpu - this cpu * * Turns on interrupts and spins until any outstanding poke interrupts have * been processed and the poke bit has been cleared. * * Other interrupts may also be processed while interrupts are enabled, so * need_resched() must be tested after turning interrupts off again to make sure * the interrupt didn't schedule work that should take the cpu out of idle. * * Returns 0 if need_resched was false, -EINTR if need_resched was true. */ static int cpuidle_coupled_clear_pokes(int cpu) { local_irq_enable(); while (cpumask_test_cpu(cpu, &cpuidle_coupled_poked_mask)) cpu_relax(); local_irq_disable(); return need_resched() ? -EINTR : 0; } /** * cpuidle_enter_state_coupled - attempt to enter a state with coupled cpus * @dev: struct cpuidle_device for the current cpu * @drv: struct cpuidle_driver for the platform * @next_state: index of the requested state in drv->states * * Coordinate with coupled cpus to enter the target state. This is a two * stage process. In the first stage, the cpus are operating independently, * and may call into cpuidle_enter_state_coupled at completely different times. * To save as much power as possible, the first cpus to call this function will * go to an intermediate state (the cpuidle_device's safe state), and wait for * all the other cpus to call this function. Once all coupled cpus are idle, * the second stage will start. Each coupled cpu will spin until all cpus have * guaranteed that they will call the target_state. * * This function must be called with interrupts disabled. It may enable * interrupts while preparing for idle, and it will always return with * interrupts enabled. */ int cpuidle_enter_state_coupled(struct cpuidle_device *dev, struct cpuidle_driver *drv, int next_state) { int entered_state = -1; struct cpuidle_coupled *coupled = dev->coupled; if (!coupled) return -EINVAL; while (coupled->prevent) { if (cpuidle_coupled_clear_pokes(dev->cpu)) { local_irq_enable(); return entered_state; } entered_state = cpuidle_enter_state(dev, drv, dev->safe_state_index); } /* Read barrier ensures online_count is read after prevent is cleared */ smp_rmb(); cpuidle_coupled_set_waiting(dev->cpu, coupled, next_state); retry: /* * Wait for all coupled cpus to be idle, using the deepest state * allowed for a single cpu. */ while (!cpuidle_coupled_cpus_waiting(coupled)) { if (cpuidle_coupled_clear_pokes(dev->cpu)) { cpuidle_coupled_set_not_waiting(dev->cpu, coupled); goto out; } if (coupled->prevent) { cpuidle_coupled_set_not_waiting(dev->cpu, coupled); goto out; } entered_state = cpuidle_enter_state(dev, drv, dev->safe_state_index); } if (cpuidle_coupled_clear_pokes(dev->cpu)) { cpuidle_coupled_set_not_waiting(dev->cpu, coupled); goto out; } /* * All coupled cpus are probably idle. There is a small chance that * one of the other cpus just became active. Increment the ready count, * and spin until all coupled cpus have incremented the counter. Once a * cpu has incremented the ready counter, it cannot abort idle and must * spin until either all cpus have incremented the ready counter, or * another cpu leaves idle and decrements the waiting counter. */ cpuidle_coupled_set_ready(coupled); while (!cpuidle_coupled_cpus_ready(coupled)) { /* Check if any other cpus bailed out of idle. */ if (!cpuidle_coupled_cpus_waiting(coupled)) if (!cpuidle_coupled_set_not_ready(coupled)) goto retry; cpu_relax(); } /* all cpus have acked the coupled state */ next_state = cpuidle_coupled_get_state(dev, coupled); entered_state = cpuidle_enter_state(dev, drv, next_state); cpuidle_coupled_set_done(dev->cpu, coupled); out: /* * Normal cpuidle states are expected to return with irqs enabled. * That leads to an inefficiency where a cpu receiving an interrupt * that brings it out of idle will process that interrupt before * exiting the idle enter function and decrementing ready_count. All * other cpus will need to spin waiting for the cpu that is processing * the interrupt. If the driver returns with interrupts disabled, * all other cpus will loop back into the safe idle state instead of * spinning, saving power. * * Calling local_irq_enable here allows coupled states to return with * interrupts disabled, but won't cause problems for drivers that * exit with interrupts enabled. */ local_irq_enable(); /* * Wait until all coupled cpus have exited idle. There is no risk that * a cpu exits and re-enters the ready state because this cpu has * already decremented its waiting_count. */ while (!cpuidle_coupled_no_cpus_ready(coupled)) cpu_relax(); return entered_state; } static void cpuidle_coupled_update_online_cpus(struct cpuidle_coupled *coupled) { cpumask_t cpus; cpumask_and(&cpus, cpu_online_mask, &coupled->coupled_cpus); coupled->online_count = cpumask_weight(&cpus); } /** * cpuidle_coupled_register_device - register a coupled cpuidle device * @dev: struct cpuidle_device for the current cpu * * Called from cpuidle_register_device to handle coupled idle init. Finds the * cpuidle_coupled struct for this set of coupled cpus, or creates one if none * exists yet. */ int cpuidle_coupled_register_device(struct cpuidle_device *dev) { int cpu; struct cpuidle_device *other_dev; struct call_single_data *csd; struct cpuidle_coupled *coupled; if (cpumask_empty(&dev->coupled_cpus)) return 0; for_each_cpu_mask(cpu, dev->coupled_cpus) { other_dev = per_cpu(cpuidle_devices, cpu); if (other_dev && other_dev->coupled) { coupled = other_dev->coupled; goto have_coupled; } } /* No existing coupled info found, create a new one */ coupled = kzalloc(sizeof(struct cpuidle_coupled), GFP_KERNEL); if (!coupled) return -ENOMEM; coupled->coupled_cpus = dev->coupled_cpus; have_coupled: dev->coupled = coupled; if (WARN_ON(!cpumask_equal(&dev->coupled_cpus, &coupled->coupled_cpus))) coupled->prevent++; cpuidle_coupled_update_online_cpus(coupled); coupled->refcnt++; csd = &per_cpu(cpuidle_coupled_poke_cb, dev->cpu); csd->func = cpuidle_coupled_poked; csd->info = (void *)(unsigned long)dev->cpu; return 0; } /** * cpuidle_coupled_unregister_device - unregister a coupled cpuidle device * @dev: struct cpuidle_device for the current cpu * * Called from cpuidle_unregister_device to tear down coupled idle. Removes the * cpu from the coupled idle set, and frees the cpuidle_coupled_info struct if * this was the last cpu in the set. */ void cpuidle_coupled_unregister_device(struct cpuidle_device *dev) { struct cpuidle_coupled *coupled = dev->coupled; if (cpumask_empty(&dev->coupled_cpus)) return; if (--coupled->refcnt) kfree(coupled); dev->coupled = NULL; } /** * cpuidle_coupled_prevent_idle - prevent cpus from entering a coupled state * @coupled: the struct coupled that contains the cpu that is changing state * * Disables coupled cpuidle on a coupled set of cpus. Used to ensure that * cpu_online_mask doesn't change while cpus are coordinating coupled idle. */ static void cpuidle_coupled_prevent_idle(struct cpuidle_coupled *coupled) { int cpu = get_cpu(); /* Force all cpus out of the waiting loop. */ coupled->prevent++; cpuidle_coupled_poke_others(cpu, coupled); put_cpu(); while (!cpuidle_coupled_no_cpus_waiting(coupled)) cpu_relax(); } /** * cpuidle_coupled_allow_idle - allows cpus to enter a coupled state * @coupled: the struct coupled that contains the cpu that is changing state * * Enables coupled cpuidle on a coupled set of cpus. Used to ensure that * cpu_online_mask doesn't change while cpus are coordinating coupled idle. */ static void cpuidle_coupled_allow_idle(struct cpuidle_coupled *coupled) { int cpu = get_cpu(); /* * Write barrier ensures readers see the new online_count when they * see prevent == 0. */ smp_wmb(); coupled->prevent--; /* Force cpus out of the prevent loop. */ cpuidle_coupled_poke_others(cpu, coupled); put_cpu(); } /** * cpuidle_coupled_cpu_notify - notifier called during hotplug transitions * @nb: notifier block * @action: hotplug transition * @hcpu: target cpu number * * Called when a cpu is brought on or offline using hotplug. Updates the * coupled cpu set appropriately */ static int cpuidle_coupled_cpu_notify(struct notifier_block *nb, unsigned long action, void *hcpu) { int cpu = (unsigned long)hcpu; struct cpuidle_device *dev; switch (action & ~CPU_TASKS_FROZEN) { case CPU_UP_PREPARE: case CPU_DOWN_PREPARE: case CPU_ONLINE: case CPU_DEAD: case CPU_UP_CANCELED: case CPU_DOWN_FAILED: break; default: return NOTIFY_OK; } mutex_lock(&cpuidle_lock); dev = per_cpu(cpuidle_devices, cpu); if (!dev->coupled) goto out; switch (action & ~CPU_TASKS_FROZEN) { case CPU_UP_PREPARE: case CPU_DOWN_PREPARE: cpuidle_coupled_prevent_idle(dev->coupled); break; case CPU_ONLINE: case CPU_DEAD: cpuidle_coupled_update_online_cpus(dev->coupled); /* Fall through */ case CPU_UP_CANCELED: case CPU_DOWN_FAILED: cpuidle_coupled_allow_idle(dev->coupled); break; } out: mutex_unlock(&cpuidle_lock); return NOTIFY_OK; } static struct notifier_block cpuidle_coupled_cpu_notifier = { .notifier_call = cpuidle_coupled_cpu_notify, }; static int __init cpuidle_coupled_init(void) { return register_cpu_notifier(&cpuidle_coupled_cpu_notifier); } core_initcall(cpuidle_coupled_init);
gpl-2.0
e-mailky/kernel-comment
arch/arm/mach-imx/tzic.c
2573
6085
/* * Copyright (C)2004-2010 Freescale Semiconductor, Inc. All Rights Reserved. * * The code contained herein is licensed under the GNU General Public * License. You may obtain a copy of the GNU General Public License * Version 2 or later at the following locations: * * http://www.opensource.org/licenses/gpl-license.html * http://www.gnu.org/copyleft/gpl.html */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/device.h> #include <linux/errno.h> #include <linux/io.h> #include <linux/irqdomain.h> #include <linux/of.h> #include <asm/mach/irq.h> #include <asm/exception.h> #include "common.h" #include "hardware.h" #include "irq-common.h" /* ***************************************** * TZIC Registers * ***************************************** */ #define TZIC_INTCNTL 0x0000 /* Control register */ #define TZIC_INTTYPE 0x0004 /* Controller Type register */ #define TZIC_IMPID 0x0008 /* Distributor Implementer Identification */ #define TZIC_PRIOMASK 0x000C /* Priority Mask Reg */ #define TZIC_SYNCCTRL 0x0010 /* Synchronizer Control register */ #define TZIC_DSMINT 0x0014 /* DSM interrupt Holdoffregister */ #define TZIC_INTSEC0(i) (0x0080 + ((i) << 2)) /* Interrupt Security Reg 0 */ #define TZIC_ENSET0(i) (0x0100 + ((i) << 2)) /* Enable Set Reg 0 */ #define TZIC_ENCLEAR0(i) (0x0180 + ((i) << 2)) /* Enable Clear Reg 0 */ #define TZIC_SRCSET0 0x0200 /* Source Set Register 0 */ #define TZIC_SRCCLAR0 0x0280 /* Source Clear Register 0 */ #define TZIC_PRIORITY0 0x0400 /* Priority Register 0 */ #define TZIC_PND0 0x0D00 /* Pending Register 0 */ #define TZIC_HIPND(i) (0x0D80+ ((i) << 2)) /* High Priority Pending Register */ #define TZIC_WAKEUP0(i) (0x0E00 + ((i) << 2)) /* Wakeup Config Register */ #define TZIC_SWINT 0x0F00 /* Software Interrupt Rigger Register */ #define TZIC_ID0 0x0FD0 /* Indentification Register 0 */ static void __iomem *tzic_base; static struct irq_domain *domain; #define TZIC_NUM_IRQS 128 #ifdef CONFIG_FIQ static int tzic_set_irq_fiq(unsigned int irq, unsigned int type) { unsigned int index, mask, value; index = irq >> 5; if (unlikely(index >= 4)) return -EINVAL; mask = 1U << (irq & 0x1F); value = __raw_readl(tzic_base + TZIC_INTSEC0(index)) | mask; if (type) value &= ~mask; __raw_writel(value, tzic_base + TZIC_INTSEC0(index)); return 0; } #else #define tzic_set_irq_fiq NULL #endif #ifdef CONFIG_PM static void tzic_irq_suspend(struct irq_data *d) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); int idx = d->hwirq >> 5; __raw_writel(gc->wake_active, tzic_base + TZIC_WAKEUP0(idx)); } static void tzic_irq_resume(struct irq_data *d) { int idx = d->hwirq >> 5; __raw_writel(__raw_readl(tzic_base + TZIC_ENSET0(idx)), tzic_base + TZIC_WAKEUP0(idx)); } #else #define tzic_irq_suspend NULL #define tzic_irq_resume NULL #endif static struct mxc_extra_irq tzic_extra_irq = { #ifdef CONFIG_FIQ .set_irq_fiq = tzic_set_irq_fiq, #endif }; static __init void tzic_init_gc(int idx, unsigned int irq_start) { struct irq_chip_generic *gc; struct irq_chip_type *ct; gc = irq_alloc_generic_chip("tzic", 1, irq_start, tzic_base, handle_level_irq); gc->private = &tzic_extra_irq; gc->wake_enabled = IRQ_MSK(32); ct = gc->chip_types; ct->chip.irq_mask = irq_gc_mask_disable_reg; ct->chip.irq_unmask = irq_gc_unmask_enable_reg; ct->chip.irq_set_wake = irq_gc_set_wake; ct->chip.irq_suspend = tzic_irq_suspend; ct->chip.irq_resume = tzic_irq_resume; ct->regs.disable = TZIC_ENCLEAR0(idx); ct->regs.enable = TZIC_ENSET0(idx); irq_setup_generic_chip(gc, IRQ_MSK(32), 0, IRQ_NOREQUEST, 0); } asmlinkage void __exception_irq_entry tzic_handle_irq(struct pt_regs *regs) { u32 stat; int i, irqofs, handled; do { handled = 0; for (i = 0; i < 4; i++) { stat = __raw_readl(tzic_base + TZIC_HIPND(i)) & __raw_readl(tzic_base + TZIC_INTSEC0(i)); while (stat) { handled = 1; irqofs = fls(stat) - 1; handle_IRQ(irq_find_mapping(domain, irqofs + i * 32), regs); stat &= ~(1 << irqofs); } } } while (handled); } /* * This function initializes the TZIC hardware and disables all the * interrupts. It registers the interrupt enable and disable functions * to the kernel for each interrupt source. */ void __init tzic_init_irq(void __iomem *irqbase) { struct device_node *np; int irq_base; int i; tzic_base = irqbase; /* put the TZIC into the reset value with * all interrupts disabled */ i = __raw_readl(tzic_base + TZIC_INTCNTL); __raw_writel(0x80010001, tzic_base + TZIC_INTCNTL); __raw_writel(0x1f, tzic_base + TZIC_PRIOMASK); __raw_writel(0x02, tzic_base + TZIC_SYNCCTRL); for (i = 0; i < 4; i++) __raw_writel(0xFFFFFFFF, tzic_base + TZIC_INTSEC0(i)); /* disable all interrupts */ for (i = 0; i < 4; i++) __raw_writel(0xFFFFFFFF, tzic_base + TZIC_ENCLEAR0(i)); /* all IRQ no FIQ Warning :: No selection */ irq_base = irq_alloc_descs(-1, 0, TZIC_NUM_IRQS, numa_node_id()); WARN_ON(irq_base < 0); np = of_find_compatible_node(NULL, NULL, "fsl,tzic"); domain = irq_domain_add_legacy(np, TZIC_NUM_IRQS, irq_base, 0, &irq_domain_simple_ops, NULL); WARN_ON(!domain); for (i = 0; i < 4; i++, irq_base += 32) tzic_init_gc(i, irq_base); #ifdef CONFIG_FIQ /* Initialize FIQ */ init_FIQ(FIQ_START); #endif pr_info("TrustZone Interrupt Controller (TZIC) initialized\n"); } /** * tzic_enable_wake() - enable wakeup interrupt * * @return 0 if successful; non-zero otherwise * * This function provides an interrupt synchronization point that is required * by tzic enabled platforms before entering imx specific low power modes (ie, * those low power modes beyond the WAIT_CLOCKED basic ARM WFI only mode). */ int tzic_enable_wake(void) { unsigned int i; __raw_writel(1, tzic_base + TZIC_DSMINT); if (unlikely(__raw_readl(tzic_base + TZIC_DSMINT) == 0)) return -EAGAIN; for (i = 0; i < 4; i++) __raw_writel(__raw_readl(tzic_base + TZIC_ENSET0(i)), tzic_base + TZIC_WAKEUP0(i)); return 0; }
gpl-2.0
sub77/matissewifi
drivers/target/tcm_fc/tfc_cmd.c
2829
14140
/* * Copyright (c) 2010 Cisco Systems, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. */ /* XXX TBD some includes may be extraneous */ #include <linux/module.h> #include <linux/moduleparam.h> #include <generated/utsrelease.h> #include <linux/utsname.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/kthread.h> #include <linux/types.h> #include <linux/string.h> #include <linux/configfs.h> #include <linux/ctype.h> #include <linux/hash.h> #include <asm/unaligned.h> #include <scsi/scsi.h> #include <scsi/scsi_host.h> #include <scsi/scsi_device.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_tcq.h> #include <scsi/libfc.h> #include <scsi/fc_encode.h> #include <target/target_core_base.h> #include <target/target_core_fabric.h> #include <target/target_core_configfs.h> #include <target/configfs_macros.h> #include "tcm_fc.h" /* * Dump cmd state for debugging. */ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller) { struct fc_exch *ep; struct fc_seq *sp; struct se_cmd *se_cmd; struct scatterlist *sg; int count; se_cmd = &cmd->se_cmd; pr_debug("%s: cmd %p sess %p seq %p se_cmd %p\n", caller, cmd, cmd->sess, cmd->seq, se_cmd); pr_debug("%s: cmd %p data_nents %u len %u se_cmd_flags <0x%x>\n", caller, cmd, se_cmd->t_data_nents, se_cmd->data_length, se_cmd->se_cmd_flags); for_each_sg(se_cmd->t_data_sg, sg, se_cmd->t_data_nents, count) pr_debug("%s: cmd %p sg %p page %p " "len 0x%x off 0x%x\n", caller, cmd, sg, sg_page(sg), sg->length, sg->offset); sp = cmd->seq; if (sp) { ep = fc_seq_exch(sp); pr_debug("%s: cmd %p sid %x did %x " "ox_id %x rx_id %x seq_id %x e_stat %x\n", caller, cmd, ep->sid, ep->did, ep->oxid, ep->rxid, sp->id, ep->esb_stat); } } static void ft_free_cmd(struct ft_cmd *cmd) { struct fc_frame *fp; struct fc_lport *lport; if (!cmd) return; fp = cmd->req_frame; lport = fr_dev(fp); if (fr_seq(fp)) lport->tt.seq_release(fr_seq(fp)); fc_frame_free(fp); ft_sess_put(cmd->sess); /* undo get from lookup at recv */ kfree(cmd); } void ft_release_cmd(struct se_cmd *se_cmd) { struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd); ft_free_cmd(cmd); } int ft_check_stop_free(struct se_cmd *se_cmd) { transport_generic_free_cmd(se_cmd, 0); return 1; } /* * Send response. */ int ft_queue_status(struct se_cmd *se_cmd) { struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd); struct fc_frame *fp; struct fcp_resp_with_ext *fcp; struct fc_lport *lport; struct fc_exch *ep; size_t len; if (cmd->aborted) return 0; ft_dump_cmd(cmd, __func__); ep = fc_seq_exch(cmd->seq); lport = ep->lp; len = sizeof(*fcp) + se_cmd->scsi_sense_length; fp = fc_frame_alloc(lport, len); if (!fp) { /* XXX shouldn't just drop it - requeue and retry? */ return 0; } fcp = fc_frame_payload_get(fp, len); memset(fcp, 0, len); fcp->resp.fr_status = se_cmd->scsi_status; len = se_cmd->scsi_sense_length; if (len) { fcp->resp.fr_flags |= FCP_SNS_LEN_VAL; fcp->ext.fr_sns_len = htonl(len); memcpy((fcp + 1), se_cmd->sense_buffer, len); } /* * Test underflow and overflow with one mask. Usually both are off. * Bidirectional commands are not handled yet. */ if (se_cmd->se_cmd_flags & (SCF_OVERFLOW_BIT | SCF_UNDERFLOW_BIT)) { if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) fcp->resp.fr_flags |= FCP_RESID_OVER; else fcp->resp.fr_flags |= FCP_RESID_UNDER; fcp->ext.fr_resid = cpu_to_be32(se_cmd->residual_count); } /* * Send response. */ cmd->seq = lport->tt.seq_start_next(cmd->seq); fc_fill_fc_hdr(fp, FC_RCTL_DD_CMD_STATUS, ep->did, ep->sid, FC_TYPE_FCP, FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ, 0); lport->tt.seq_send(lport, cmd->seq, fp); lport->tt.exch_done(cmd->seq); return 0; } int ft_write_pending_status(struct se_cmd *se_cmd) { struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd); return cmd->write_data_len != se_cmd->data_length; } /* * Send TX_RDY (transfer ready). */ int ft_write_pending(struct se_cmd *se_cmd) { struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd); struct fc_frame *fp; struct fcp_txrdy *txrdy; struct fc_lport *lport; struct fc_exch *ep; struct fc_frame_header *fh; u32 f_ctl; ft_dump_cmd(cmd, __func__); if (cmd->aborted) return 0; ep = fc_seq_exch(cmd->seq); lport = ep->lp; fp = fc_frame_alloc(lport, sizeof(*txrdy)); if (!fp) return -ENOMEM; /* Signal QUEUE_FULL */ txrdy = fc_frame_payload_get(fp, sizeof(*txrdy)); memset(txrdy, 0, sizeof(*txrdy)); txrdy->ft_burst_len = htonl(se_cmd->data_length); cmd->seq = lport->tt.seq_start_next(cmd->seq); fc_fill_fc_hdr(fp, FC_RCTL_DD_DATA_DESC, ep->did, ep->sid, FC_TYPE_FCP, FC_FC_EX_CTX | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); fh = fc_frame_header_get(fp); f_ctl = ntoh24(fh->fh_f_ctl); /* Only if it is 'Exchange Responder' */ if (f_ctl & FC_FC_EX_CTX) { /* Target is 'exchange responder' and sending XFER_READY * to 'exchange initiator (initiator)' */ if ((ep->xid <= lport->lro_xid) && (fh->fh_r_ctl == FC_RCTL_DD_DATA_DESC)) { if (se_cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { /* * cmd may have been broken up into multiple * tasks. Link their sgs together so we can * operate on them all at once. */ transport_do_task_sg_chain(se_cmd); cmd->sg = se_cmd->t_tasks_sg_chained; cmd->sg_cnt = se_cmd->t_tasks_sg_chained_no; } if (cmd->sg && lport->tt.ddp_target(lport, ep->xid, cmd->sg, cmd->sg_cnt)) cmd->was_ddp_setup = 1; } } lport->tt.seq_send(lport, cmd->seq, fp); return 0; } u32 ft_get_task_tag(struct se_cmd *se_cmd) { struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd); return fc_seq_exch(cmd->seq)->rxid; } int ft_get_cmd_state(struct se_cmd *se_cmd) { return 0; } /* * FC sequence response handler for follow-on sequences (data) and aborts. */ static void ft_recv_seq(struct fc_seq *sp, struct fc_frame *fp, void *arg) { struct ft_cmd *cmd = arg; struct fc_frame_header *fh; if (unlikely(IS_ERR(fp))) { /* XXX need to find cmd if queued */ cmd->seq = NULL; cmd->aborted = true; return; } fh = fc_frame_header_get(fp); switch (fh->fh_r_ctl) { case FC_RCTL_DD_SOL_DATA: /* write data */ ft_recv_write_data(cmd, fp); break; case FC_RCTL_DD_UNSOL_CTL: /* command */ case FC_RCTL_DD_SOL_CTL: /* transfer ready */ case FC_RCTL_DD_DATA_DESC: /* transfer ready */ default: pr_debug("%s: unhandled frame r_ctl %x\n", __func__, fh->fh_r_ctl); ft_invl_hw_context(cmd); fc_frame_free(fp); transport_generic_free_cmd(&cmd->se_cmd, 0); break; } } /* * Send a FCP response including SCSI status and optional FCP rsp_code. * status is SAM_STAT_GOOD (zero) iff code is valid. * This is used in error cases, such as allocation failures. */ static void ft_send_resp_status(struct fc_lport *lport, const struct fc_frame *rx_fp, u32 status, enum fcp_resp_rsp_codes code) { struct fc_frame *fp; struct fc_seq *sp; const struct fc_frame_header *fh; size_t len; struct fcp_resp_with_ext *fcp; struct fcp_resp_rsp_info *info; fh = fc_frame_header_get(rx_fp); pr_debug("FCP error response: did %x oxid %x status %x code %x\n", ntoh24(fh->fh_s_id), ntohs(fh->fh_ox_id), status, code); len = sizeof(*fcp); if (status == SAM_STAT_GOOD) len += sizeof(*info); fp = fc_frame_alloc(lport, len); if (!fp) return; fcp = fc_frame_payload_get(fp, len); memset(fcp, 0, len); fcp->resp.fr_status = status; if (status == SAM_STAT_GOOD) { fcp->ext.fr_rsp_len = htonl(sizeof(*info)); fcp->resp.fr_flags |= FCP_RSP_LEN_VAL; info = (struct fcp_resp_rsp_info *)(fcp + 1); info->rsp_code = code; } fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_DD_CMD_STATUS, 0); sp = fr_seq(fp); if (sp) { lport->tt.seq_send(lport, sp, fp); lport->tt.exch_done(sp); } else { lport->tt.frame_send(lport, fp); } } /* * Send error or task management response. */ static void ft_send_resp_code(struct ft_cmd *cmd, enum fcp_resp_rsp_codes code) { ft_send_resp_status(cmd->sess->tport->lport, cmd->req_frame, SAM_STAT_GOOD, code); } /* * Send error or task management response. * Always frees the cmd and associated state. */ static void ft_send_resp_code_and_free(struct ft_cmd *cmd, enum fcp_resp_rsp_codes code) { ft_send_resp_code(cmd, code); ft_free_cmd(cmd); } /* * Handle Task Management Request. */ static void ft_send_tm(struct ft_cmd *cmd) { struct fcp_cmnd *fcp; int rc; u8 tm_func; fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp)); switch (fcp->fc_tm_flags) { case FCP_TMF_LUN_RESET: tm_func = TMR_LUN_RESET; break; case FCP_TMF_TGT_RESET: tm_func = TMR_TARGET_WARM_RESET; break; case FCP_TMF_CLR_TASK_SET: tm_func = TMR_CLEAR_TASK_SET; break; case FCP_TMF_ABT_TASK_SET: tm_func = TMR_ABORT_TASK_SET; break; case FCP_TMF_CLR_ACA: tm_func = TMR_CLEAR_ACA; break; default: /* * FCP4r01 indicates having a combination of * tm_flags set is invalid. */ pr_debug("invalid FCP tm_flags %x\n", fcp->fc_tm_flags); ft_send_resp_code_and_free(cmd, FCP_CMND_FIELDS_INVALID); return; } /* FIXME: Add referenced task tag for ABORT_TASK */ rc = target_submit_tmr(&cmd->se_cmd, cmd->sess->se_sess, &cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun), cmd, tm_func, GFP_KERNEL, 0, 0); if (rc < 0) ft_send_resp_code_and_free(cmd, FCP_TMF_FAILED); } /* * Send status from completed task management request. */ int ft_queue_tm_resp(struct se_cmd *se_cmd) { struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd); struct se_tmr_req *tmr = se_cmd->se_tmr_req; enum fcp_resp_rsp_codes code; if (cmd->aborted) return 0; switch (tmr->response) { case TMR_FUNCTION_COMPLETE: code = FCP_TMF_CMPL; break; case TMR_LUN_DOES_NOT_EXIST: code = FCP_TMF_INVALID_LUN; break; case TMR_FUNCTION_REJECTED: code = FCP_TMF_REJECTED; break; case TMR_TASK_DOES_NOT_EXIST: case TMR_TASK_STILL_ALLEGIANT: case TMR_TASK_FAILOVER_NOT_SUPPORTED: case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED: case TMR_FUNCTION_AUTHORIZATION_FAILED: default: code = FCP_TMF_FAILED; break; } pr_debug("tmr fn %d resp %d fcp code %d\n", tmr->function, tmr->response, code); ft_send_resp_code(cmd, code); return 0; } static void ft_send_work(struct work_struct *work); /* * Handle incoming FCP command. */ static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp) { struct ft_cmd *cmd; struct fc_lport *lport = sess->tport->lport; cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC); if (!cmd) goto busy; cmd->sess = sess; cmd->seq = lport->tt.seq_assign(lport, fp); if (!cmd->seq) { kfree(cmd); goto busy; } cmd->req_frame = fp; /* hold frame during cmd */ INIT_WORK(&cmd->work, ft_send_work); queue_work(sess->tport->tpg->workqueue, &cmd->work); return; busy: pr_debug("cmd or seq allocation failure - sending BUSY\n"); ft_send_resp_status(lport, fp, SAM_STAT_BUSY, 0); fc_frame_free(fp); ft_sess_put(sess); /* undo get from lookup */ } /* * Handle incoming FCP frame. * Caller has verified that the frame is type FCP. */ void ft_recv_req(struct ft_sess *sess, struct fc_frame *fp) { struct fc_frame_header *fh = fc_frame_header_get(fp); switch (fh->fh_r_ctl) { case FC_RCTL_DD_UNSOL_CMD: /* command */ ft_recv_cmd(sess, fp); break; case FC_RCTL_DD_SOL_DATA: /* write data */ case FC_RCTL_DD_UNSOL_CTL: case FC_RCTL_DD_SOL_CTL: case FC_RCTL_DD_DATA_DESC: /* transfer ready */ case FC_RCTL_ELS4_REQ: /* SRR, perhaps */ default: pr_debug("%s: unhandled frame r_ctl %x\n", __func__, fh->fh_r_ctl); fc_frame_free(fp); ft_sess_put(sess); /* undo get from lookup */ break; } } /* * Send new command to target. */ static void ft_send_work(struct work_struct *work) { struct ft_cmd *cmd = container_of(work, struct ft_cmd, work); struct fc_frame_header *fh = fc_frame_header_get(cmd->req_frame); struct fcp_cmnd *fcp; int data_dir = 0; int task_attr; fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp)); if (!fcp) goto err; if (fcp->fc_flags & FCP_CFL_LEN_MASK) goto err; /* not handling longer CDBs yet */ /* * Check for FCP task management flags */ if (fcp->fc_tm_flags) { ft_send_tm(cmd); return; } switch (fcp->fc_flags & (FCP_CFL_RDDATA | FCP_CFL_WRDATA)) { case 0: data_dir = DMA_NONE; break; case FCP_CFL_RDDATA: data_dir = DMA_FROM_DEVICE; break; case FCP_CFL_WRDATA: data_dir = DMA_TO_DEVICE; break; case FCP_CFL_WRDATA | FCP_CFL_RDDATA: goto err; /* TBD not supported by tcm_fc yet */ } /* * Locate the SAM Task Attr from fc_pri_ta */ switch (fcp->fc_pri_ta & FCP_PTA_MASK) { case FCP_PTA_HEADQ: task_attr = MSG_HEAD_TAG; break; case FCP_PTA_ORDERED: task_attr = MSG_ORDERED_TAG; break; case FCP_PTA_ACA: task_attr = MSG_ACA_TAG; break; case FCP_PTA_SIMPLE: /* Fallthrough */ default: task_attr = MSG_SIMPLE_TAG; } fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd); /* * Use a single se_cmd->cmd_kref as we expect to release se_cmd * directly from ft_check_stop_free callback in response path. */ target_submit_cmd(&cmd->se_cmd, cmd->sess->se_sess, fcp->fc_cdb, &cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun), ntohl(fcp->fc_dl), task_attr, data_dir, 0); pr_debug("r_ctl %x alloc target_submit_cmd\n", fh->fh_r_ctl); return; err: ft_send_resp_code_and_free(cmd, FCP_CMND_FIELDS_INVALID); }
gpl-2.0
netico-solutions/linux-am335x
drivers/scsi/device_handler/scsi_dh_hp_sw.c
7693
10601
/* * Basic HP/COMPAQ MSA 1000 support. This is only needed if your HW cannot be * upgraded. * * Copyright (C) 2006 Red Hat, Inc. All rights reserved. * Copyright (C) 2006 Mike Christie * Copyright (C) 2008 Hannes Reinecke <hare@suse.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/slab.h> #include <linux/module.h> #include <scsi/scsi.h> #include <scsi/scsi_dbg.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_dh.h> #define HP_SW_NAME "hp_sw" #define HP_SW_TIMEOUT (60 * HZ) #define HP_SW_RETRIES 3 #define HP_SW_PATH_UNINITIALIZED -1 #define HP_SW_PATH_ACTIVE 0 #define HP_SW_PATH_PASSIVE 1 struct hp_sw_dh_data { unsigned char sense[SCSI_SENSE_BUFFERSIZE]; int path_state; int retries; int retry_cnt; struct scsi_device *sdev; activate_complete callback_fn; void *callback_data; }; static int hp_sw_start_stop(struct hp_sw_dh_data *); static inline struct hp_sw_dh_data *get_hp_sw_data(struct scsi_device *sdev) { struct scsi_dh_data *scsi_dh_data = sdev->scsi_dh_data; BUG_ON(scsi_dh_data == NULL); return ((struct hp_sw_dh_data *) scsi_dh_data->buf); } /* * tur_done - Handle TEST UNIT READY return status * @sdev: sdev the command has been sent to * @errors: blk error code * * Returns SCSI_DH_DEV_OFFLINED if the sdev is on the passive path */ static int tur_done(struct scsi_device *sdev, unsigned char *sense) { struct scsi_sense_hdr sshdr; int ret; ret = scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr); if (!ret) { sdev_printk(KERN_WARNING, sdev, "%s: sending tur failed, no sense available\n", HP_SW_NAME); ret = SCSI_DH_IO; goto done; } switch (sshdr.sense_key) { case UNIT_ATTENTION: ret = SCSI_DH_IMM_RETRY; break; case NOT_READY: if ((sshdr.asc == 0x04) && (sshdr.ascq == 2)) { /* * LUN not ready - Initialization command required * * This is the passive path */ ret = SCSI_DH_DEV_OFFLINED; break; } /* Fallthrough */ default: sdev_printk(KERN_WARNING, sdev, "%s: sending tur failed, sense %x/%x/%x\n", HP_SW_NAME, sshdr.sense_key, sshdr.asc, sshdr.ascq); break; } done: return ret; } /* * hp_sw_tur - Send TEST UNIT READY * @sdev: sdev command should be sent to * * Use the TEST UNIT READY command to determine * the path state. */ static int hp_sw_tur(struct scsi_device *sdev, struct hp_sw_dh_data *h) { struct request *req; int ret; retry: req = blk_get_request(sdev->request_queue, WRITE, GFP_NOIO); if (!req) return SCSI_DH_RES_TEMP_UNAVAIL; req->cmd_type = REQ_TYPE_BLOCK_PC; req->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER; req->cmd_len = COMMAND_SIZE(TEST_UNIT_READY); req->cmd[0] = TEST_UNIT_READY; req->timeout = HP_SW_TIMEOUT; req->sense = h->sense; memset(req->sense, 0, SCSI_SENSE_BUFFERSIZE); req->sense_len = 0; ret = blk_execute_rq(req->q, NULL, req, 1); if (ret == -EIO) { if (req->sense_len > 0) { ret = tur_done(sdev, h->sense); } else { sdev_printk(KERN_WARNING, sdev, "%s: sending tur failed with %x\n", HP_SW_NAME, req->errors); ret = SCSI_DH_IO; } } else { h->path_state = HP_SW_PATH_ACTIVE; ret = SCSI_DH_OK; } if (ret == SCSI_DH_IMM_RETRY) { blk_put_request(req); goto retry; } if (ret == SCSI_DH_DEV_OFFLINED) { h->path_state = HP_SW_PATH_PASSIVE; ret = SCSI_DH_OK; } blk_put_request(req); return ret; } /* * start_done - Handle START STOP UNIT return status * @sdev: sdev the command has been sent to * @errors: blk error code */ static int start_done(struct scsi_device *sdev, unsigned char *sense) { struct scsi_sense_hdr sshdr; int rc; rc = scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr); if (!rc) { sdev_printk(KERN_WARNING, sdev, "%s: sending start_stop_unit failed, " "no sense available\n", HP_SW_NAME); return SCSI_DH_IO; } switch (sshdr.sense_key) { case NOT_READY: if ((sshdr.asc == 0x04) && (sshdr.ascq == 3)) { /* * LUN not ready - manual intervention required * * Switch-over in progress, retry. */ rc = SCSI_DH_RETRY; break; } /* fall through */ default: sdev_printk(KERN_WARNING, sdev, "%s: sending start_stop_unit failed, sense %x/%x/%x\n", HP_SW_NAME, sshdr.sense_key, sshdr.asc, sshdr.ascq); rc = SCSI_DH_IO; } return rc; } static void start_stop_endio(struct request *req, int error) { struct hp_sw_dh_data *h = req->end_io_data; unsigned err = SCSI_DH_OK; if (error || host_byte(req->errors) != DID_OK || msg_byte(req->errors) != COMMAND_COMPLETE) { sdev_printk(KERN_WARNING, h->sdev, "%s: sending start_stop_unit failed with %x\n", HP_SW_NAME, req->errors); err = SCSI_DH_IO; goto done; } if (req->sense_len > 0) { err = start_done(h->sdev, h->sense); if (err == SCSI_DH_RETRY) { err = SCSI_DH_IO; if (--h->retry_cnt) { blk_put_request(req); err = hp_sw_start_stop(h); if (err == SCSI_DH_OK) return; } } } done: req->end_io_data = NULL; __blk_put_request(req->q, req); if (h->callback_fn) { h->callback_fn(h->callback_data, err); h->callback_fn = h->callback_data = NULL; } return; } /* * hp_sw_start_stop - Send START STOP UNIT command * @sdev: sdev command should be sent to * * Sending START STOP UNIT activates the SP. */ static int hp_sw_start_stop(struct hp_sw_dh_data *h) { struct request *req; req = blk_get_request(h->sdev->request_queue, WRITE, GFP_ATOMIC); if (!req) return SCSI_DH_RES_TEMP_UNAVAIL; req->cmd_type = REQ_TYPE_BLOCK_PC; req->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER; req->cmd_len = COMMAND_SIZE(START_STOP); req->cmd[0] = START_STOP; req->cmd[4] = 1; /* Start spin cycle */ req->timeout = HP_SW_TIMEOUT; req->sense = h->sense; memset(req->sense, 0, SCSI_SENSE_BUFFERSIZE); req->sense_len = 0; req->end_io_data = h; blk_execute_rq_nowait(req->q, NULL, req, 1, start_stop_endio); return SCSI_DH_OK; } static int hp_sw_prep_fn(struct scsi_device *sdev, struct request *req) { struct hp_sw_dh_data *h = get_hp_sw_data(sdev); int ret = BLKPREP_OK; if (h->path_state != HP_SW_PATH_ACTIVE) { ret = BLKPREP_KILL; req->cmd_flags |= REQ_QUIET; } return ret; } /* * hp_sw_activate - Activate a path * @sdev: sdev on the path to be activated * * The HP Active/Passive firmware is pretty simple; * the passive path reports NOT READY with sense codes * 0x04/0x02; a START STOP UNIT command will then * activate the passive path (and deactivate the * previously active one). */ static int hp_sw_activate(struct scsi_device *sdev, activate_complete fn, void *data) { int ret = SCSI_DH_OK; struct hp_sw_dh_data *h = get_hp_sw_data(sdev); ret = hp_sw_tur(sdev, h); if (ret == SCSI_DH_OK && h->path_state == HP_SW_PATH_PASSIVE) { h->retry_cnt = h->retries; h->callback_fn = fn; h->callback_data = data; ret = hp_sw_start_stop(h); if (ret == SCSI_DH_OK) return 0; h->callback_fn = h->callback_data = NULL; } if (fn) fn(data, ret); return 0; } static const struct scsi_dh_devlist hp_sw_dh_data_list[] = { {"COMPAQ", "MSA1000 VOLUME"}, {"COMPAQ", "HSV110"}, {"HP", "HSV100"}, {"DEC", "HSG80"}, {NULL, NULL}, }; static bool hp_sw_match(struct scsi_device *sdev) { int i; if (scsi_device_tpgs(sdev)) return false; for (i = 0; hp_sw_dh_data_list[i].vendor; i++) { if (!strncmp(sdev->vendor, hp_sw_dh_data_list[i].vendor, strlen(hp_sw_dh_data_list[i].vendor)) && !strncmp(sdev->model, hp_sw_dh_data_list[i].model, strlen(hp_sw_dh_data_list[i].model))) { return true; } } return false; } static int hp_sw_bus_attach(struct scsi_device *sdev); static void hp_sw_bus_detach(struct scsi_device *sdev); static struct scsi_device_handler hp_sw_dh = { .name = HP_SW_NAME, .module = THIS_MODULE, .devlist = hp_sw_dh_data_list, .attach = hp_sw_bus_attach, .detach = hp_sw_bus_detach, .activate = hp_sw_activate, .prep_fn = hp_sw_prep_fn, .match = hp_sw_match, }; static int hp_sw_bus_attach(struct scsi_device *sdev) { struct scsi_dh_data *scsi_dh_data; struct hp_sw_dh_data *h; unsigned long flags; int ret; scsi_dh_data = kzalloc(sizeof(*scsi_dh_data) + sizeof(*h) , GFP_KERNEL); if (!scsi_dh_data) { sdev_printk(KERN_ERR, sdev, "%s: Attach Failed\n", HP_SW_NAME); return 0; } scsi_dh_data->scsi_dh = &hp_sw_dh; h = (struct hp_sw_dh_data *) scsi_dh_data->buf; h->path_state = HP_SW_PATH_UNINITIALIZED; h->retries = HP_SW_RETRIES; h->sdev = sdev; ret = hp_sw_tur(sdev, h); if (ret != SCSI_DH_OK || h->path_state == HP_SW_PATH_UNINITIALIZED) goto failed; if (!try_module_get(THIS_MODULE)) goto failed; spin_lock_irqsave(sdev->request_queue->queue_lock, flags); sdev->scsi_dh_data = scsi_dh_data; spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); sdev_printk(KERN_INFO, sdev, "%s: attached to %s path\n", HP_SW_NAME, h->path_state == HP_SW_PATH_ACTIVE? "active":"passive"); return 0; failed: kfree(scsi_dh_data); sdev_printk(KERN_ERR, sdev, "%s: not attached\n", HP_SW_NAME); return -EINVAL; } static void hp_sw_bus_detach( struct scsi_device *sdev ) { struct scsi_dh_data *scsi_dh_data; unsigned long flags; spin_lock_irqsave(sdev->request_queue->queue_lock, flags); scsi_dh_data = sdev->scsi_dh_data; sdev->scsi_dh_data = NULL; spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); module_put(THIS_MODULE); sdev_printk(KERN_NOTICE, sdev, "%s: Detached\n", HP_SW_NAME); kfree(scsi_dh_data); } static int __init hp_sw_init(void) { return scsi_register_device_handler(&hp_sw_dh); } static void __exit hp_sw_exit(void) { scsi_unregister_device_handler(&hp_sw_dh); } module_init(hp_sw_init); module_exit(hp_sw_exit); MODULE_DESCRIPTION("HP Active/Passive driver"); MODULE_AUTHOR("Mike Christie <michaelc@cs.wisc.edu"); MODULE_LICENSE("GPL");
gpl-2.0
Split-Screen/android_kernel_samsung_msm8930-common
net/sctp/primitive.c
9229
7790
/* SCTP kernel implementation * Copyright (c) 1999-2000 Cisco, Inc. * Copyright (c) 1999-2001 Motorola, Inc. * * This file is part of the SCTP kernel implementation * * These functions implement the SCTP primitive functions from Section 10. * * Note that the descriptions from the specification are USER level * functions--this file is the functions which populate the struct proto * for SCTP which is the BOTTOM of the sockets interface. * * This SCTP implementation is free software; * you can redistribute it and/or modify it under the terms of * the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This SCTP implementation is distributed in the hope that it * will be useful, but WITHOUT ANY WARRANTY; without even the implied * ************************ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GNU CC; see the file COPYING. If not, write to * the Free Software Foundation, 59 Temple Place - Suite 330, * Boston, MA 02111-1307, USA. * * Please send any bug reports or fixes you make to the * email address(es): * lksctp developers <lksctp-developers@lists.sourceforge.net> * * Or submit a bug report through the following website: * http://www.sf.net/projects/lksctp * * Written or modified by: * La Monte H.P. Yarroll <piggy@acm.org> * Narasimha Budihal <narasimha@refcode.org> * Karl Knutson <karl@athena.chicago.il.us> * Ardelle Fan <ardelle.fan@intel.com> * Kevin Gao <kevin.gao@intel.com> * * Any bugs reported given to us we will try to fix... any fixes shared will * be incorporated into the next SCTP release. */ #include <linux/types.h> #include <linux/list.h> /* For struct list_head */ #include <linux/socket.h> #include <linux/ip.h> #include <linux/time.h> /* For struct timeval */ #include <linux/gfp.h> #include <net/sock.h> #include <net/sctp/sctp.h> #include <net/sctp/sm.h> #define DECLARE_PRIMITIVE(name) \ /* This is called in the code as sctp_primitive_ ## name. */ \ int sctp_primitive_ ## name(struct sctp_association *asoc, \ void *arg) { \ int error = 0; \ sctp_event_t event_type; sctp_subtype_t subtype; \ sctp_state_t state; \ struct sctp_endpoint *ep; \ \ event_type = SCTP_EVENT_T_PRIMITIVE; \ subtype = SCTP_ST_PRIMITIVE(SCTP_PRIMITIVE_ ## name); \ state = asoc ? asoc->state : SCTP_STATE_CLOSED; \ ep = asoc ? asoc->ep : NULL; \ \ error = sctp_do_sm(event_type, subtype, state, ep, asoc, \ arg, GFP_KERNEL); \ return error; \ } /* 10.1 ULP-to-SCTP * B) Associate * * Format: ASSOCIATE(local SCTP instance name, destination transport addr, * outbound stream count) * -> association id [,destination transport addr list] [,outbound stream * count] * * This primitive allows the upper layer to initiate an association to a * specific peer endpoint. * * This version assumes that asoc is fully populated with the initial * parameters. We then return a traditional kernel indicator of * success or failure. */ /* This is called in the code as sctp_primitive_ASSOCIATE. */ DECLARE_PRIMITIVE(ASSOCIATE) /* 10.1 ULP-to-SCTP * C) Shutdown * * Format: SHUTDOWN(association id) * -> result * * Gracefully closes an association. Any locally queued user data * will be delivered to the peer. The association will be terminated only * after the peer acknowledges all the SCTP packets sent. A success code * will be returned on successful termination of the association. If * attempting to terminate the association results in a failure, an error * code shall be returned. */ DECLARE_PRIMITIVE(SHUTDOWN); /* 10.1 ULP-to-SCTP * C) Abort * * Format: Abort(association id [, cause code]) * -> result * * Ungracefully closes an association. Any locally queued user data * will be discarded and an ABORT chunk is sent to the peer. A success * code will be returned on successful abortion of the association. If * attempting to abort the association results in a failure, an error * code shall be returned. */ DECLARE_PRIMITIVE(ABORT); /* 10.1 ULP-to-SCTP * E) Send * * Format: SEND(association id, buffer address, byte count [,context] * [,stream id] [,life time] [,destination transport address] * [,unorder flag] [,no-bundle flag] [,payload protocol-id] ) * -> result * * This is the main method to send user data via SCTP. * * Mandatory attributes: * * o association id - local handle to the SCTP association * * o buffer address - the location where the user message to be * transmitted is stored; * * o byte count - The size of the user data in number of bytes; * * Optional attributes: * * o context - an optional 32 bit integer that will be carried in the * sending failure notification to the ULP if the transportation of * this User Message fails. * * o stream id - to indicate which stream to send the data on. If not * specified, stream 0 will be used. * * o life time - specifies the life time of the user data. The user data * will not be sent by SCTP after the life time expires. This * parameter can be used to avoid efforts to transmit stale * user messages. SCTP notifies the ULP if the data cannot be * initiated to transport (i.e. sent to the destination via SCTP's * send primitive) within the life time variable. However, the * user data will be transmitted if SCTP has attempted to transmit a * chunk before the life time expired. * * o destination transport address - specified as one of the destination * transport addresses of the peer endpoint to which this packet * should be sent. Whenever possible, SCTP should use this destination * transport address for sending the packets, instead of the current * primary path. * * o unorder flag - this flag, if present, indicates that the user * would like the data delivered in an unordered fashion to the peer * (i.e., the U flag is set to 1 on all DATA chunks carrying this * message). * * o no-bundle flag - instructs SCTP not to bundle this user data with * other outbound DATA chunks. SCTP MAY still bundle even when * this flag is present, when faced with network congestion. * * o payload protocol-id - A 32 bit unsigned integer that is to be * passed to the peer indicating the type of payload protocol data * being transmitted. This value is passed as opaque data by SCTP. */ DECLARE_PRIMITIVE(SEND); /* 10.1 ULP-to-SCTP * J) Request Heartbeat * * Format: REQUESTHEARTBEAT(association id, destination transport address) * * -> result * * Instructs the local endpoint to perform a HeartBeat on the specified * destination transport address of the given association. The returned * result should indicate whether the transmission of the HEARTBEAT * chunk to the destination address is successful. * * Mandatory attributes: * * o association id - local handle to the SCTP association * * o destination transport address - the transport address of the * association on which a heartbeat should be issued. */ DECLARE_PRIMITIVE(REQUESTHEARTBEAT); /* ADDIP * 3.1.1 Address Configuration Change Chunk (ASCONF) * * This chunk is used to communicate to the remote endpoint one of the * configuration change requests that MUST be acknowledged. The * information carried in the ASCONF Chunk uses the form of a * Type-Length-Value (TLV), as described in "3.2.1 Optional/ * Variable-length Parameter Format" in RFC2960 [5], forall variable * parameters. */ DECLARE_PRIMITIVE(ASCONF);
gpl-2.0
mzueger/linux-phycore-mpc5200b
arch/x86/kernel/microcode_amd.c
14
8624
/* * AMD CPU Microcode Update Driver for Linux * Copyright (C) 2008 Advanced Micro Devices Inc. * * Author: Peter Oruba <peter.oruba@amd.com> * * Based on work by: * Tigran Aivazian <tigran@aivazian.fsnet.co.uk> * * This driver allows to upgrade microcode on AMD * family 0x10 and 0x11 processors. * * Licensed under the terms of the GNU General Public * License version 2. See file COPYING for details. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/firmware.h> #include <linux/pci_ids.h> #include <linux/uaccess.h> #include <linux/vmalloc.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <asm/microcode.h> #include <asm/processor.h> #include <asm/msr.h> MODULE_DESCRIPTION("AMD Microcode Update Driver"); MODULE_AUTHOR("Peter Oruba"); MODULE_LICENSE("GPL v2"); #define UCODE_MAGIC 0x00414d44 #define UCODE_EQUIV_CPU_TABLE_TYPE 0x00000000 #define UCODE_UCODE_TYPE 0x00000001 struct equiv_cpu_entry { u32 installed_cpu; u32 fixed_errata_mask; u32 fixed_errata_compare; u16 equiv_cpu; u16 res; } __attribute__((packed)); struct microcode_header_amd { u32 data_code; u32 patch_id; u16 mc_patch_data_id; u8 mc_patch_data_len; u8 init_flag; u32 mc_patch_data_checksum; u32 nb_dev_id; u32 sb_dev_id; u16 processor_rev_id; u8 nb_rev_id; u8 sb_rev_id; u8 bios_api_rev; u8 reserved1[3]; u32 match_reg[8]; } __attribute__((packed)); struct microcode_amd { struct microcode_header_amd hdr; unsigned int mpb[0]; }; #define SECTION_HDR_SIZE 8 #define CONTAINER_HDR_SZ 12 static struct equiv_cpu_entry *equiv_cpu_table; static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig) { struct cpuinfo_x86 *c = &cpu_data(cpu); if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) { pr_warning("CPU%d: family %d not supported\n", cpu, c->x86); return -1; } csig->rev = c->microcode; pr_info("CPU%d: patch_level=0x%08x\n", cpu, csig->rev); return 0; } static int get_matching_microcode(int cpu, struct microcode_header_amd *mc_hdr, int rev) { unsigned int current_cpu_id; u16 equiv_cpu_id = 0; unsigned int i = 0; BUG_ON(equiv_cpu_table == NULL); current_cpu_id = cpuid_eax(0x00000001); while (equiv_cpu_table[i].installed_cpu != 0) { if (current_cpu_id == equiv_cpu_table[i].installed_cpu) { equiv_cpu_id = equiv_cpu_table[i].equiv_cpu; break; } i++; } if (!equiv_cpu_id) return 0; if (mc_hdr->processor_rev_id != equiv_cpu_id) return 0; /* ucode might be chipset specific -- currently we don't support this */ if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) { pr_err("CPU%d: chipset specific code not yet supported\n", cpu); return 0; } if (mc_hdr->patch_id <= rev) return 0; return 1; } static int apply_microcode_amd(int cpu) { u32 rev, dummy; int cpu_num = raw_smp_processor_id(); struct ucode_cpu_info *uci = ucode_cpu_info + cpu_num; struct microcode_amd *mc_amd = uci->mc; struct cpuinfo_x86 *c = &cpu_data(cpu); /* We should bind the task to the CPU */ BUG_ON(cpu_num != cpu); if (mc_amd == NULL) return 0; wrmsrl(MSR_AMD64_PATCH_LOADER, (u64)(long)&mc_amd->hdr.data_code); /* get patch id after patching */ rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); /* check current patch id and patch's id for match */ if (rev != mc_amd->hdr.patch_id) { pr_err("CPU%d: update failed for patch_level=0x%08x\n", cpu, mc_amd->hdr.patch_id); return -1; } pr_info("CPU%d: new patch_level=0x%08x\n", cpu, rev); uci->cpu_sig.rev = rev; c->microcode = rev; return 0; } static unsigned int verify_ucode_size(int cpu, const u8 *buf, unsigned int size) { struct cpuinfo_x86 *c = &cpu_data(cpu); u32 max_size, actual_size; #define F1XH_MPB_MAX_SIZE 2048 #define F14H_MPB_MAX_SIZE 1824 #define F15H_MPB_MAX_SIZE 4096 #define F16H_MPB_MAX_SIZE 3458 switch (c->x86) { case 0x14: max_size = F14H_MPB_MAX_SIZE; break; case 0x15: max_size = F15H_MPB_MAX_SIZE; break; case 0x16: max_size = F16H_MPB_MAX_SIZE; break; default: max_size = F1XH_MPB_MAX_SIZE; break; } actual_size = *(u32 *)(buf + 4); if (actual_size + SECTION_HDR_SIZE > size || actual_size > max_size) { pr_err("section size mismatch\n"); return 0; } return actual_size; } static struct microcode_header_amd * get_next_ucode(int cpu, const u8 *buf, unsigned int size, unsigned int *mc_size) { struct microcode_header_amd *mc = NULL; unsigned int actual_size = 0; if (*(u32 *)buf != UCODE_UCODE_TYPE) { pr_err("invalid type field in container file section header\n"); goto out; } actual_size = verify_ucode_size(cpu, buf, size); if (!actual_size) goto out; mc = vzalloc(actual_size); if (!mc) goto out; get_ucode_data(mc, buf + SECTION_HDR_SIZE, actual_size); *mc_size = actual_size + SECTION_HDR_SIZE; out: return mc; } static int install_equiv_cpu_table(const u8 *buf) { unsigned int *ibuf = (unsigned int *)buf; unsigned int type = ibuf[1]; unsigned int size = ibuf[2]; if (type != UCODE_EQUIV_CPU_TABLE_TYPE || !size) { pr_err("empty section/" "invalid type field in container file section header\n"); return -EINVAL; } equiv_cpu_table = vmalloc(size); if (!equiv_cpu_table) { pr_err("failed to allocate equivalent CPU table\n"); return -ENOMEM; } get_ucode_data(equiv_cpu_table, buf + CONTAINER_HDR_SZ, size); /* add header length */ return size + CONTAINER_HDR_SZ; } static void free_equiv_cpu_table(void) { vfree(equiv_cpu_table); equiv_cpu_table = NULL; } static enum ucode_state generic_load_microcode(int cpu, const u8 *data, size_t size) { struct ucode_cpu_info *uci = ucode_cpu_info + cpu; struct microcode_header_amd *mc_hdr = NULL; unsigned int mc_size, leftover; int offset; const u8 *ucode_ptr = data; void *new_mc = NULL; unsigned int new_rev = uci->cpu_sig.rev; enum ucode_state state = UCODE_OK; offset = install_equiv_cpu_table(ucode_ptr); if (offset < 0) { pr_err("failed to create equivalent cpu table\n"); return UCODE_ERROR; } ucode_ptr += offset; leftover = size - offset; while (leftover) { mc_hdr = get_next_ucode(cpu, ucode_ptr, leftover, &mc_size); if (!mc_hdr) break; if (get_matching_microcode(cpu, mc_hdr, new_rev)) { vfree(new_mc); new_rev = mc_hdr->patch_id; new_mc = mc_hdr; } else vfree(mc_hdr); ucode_ptr += mc_size; leftover -= mc_size; } if (!new_mc) { state = UCODE_NFOUND; goto free_table; } if (!leftover) { vfree(uci->mc); uci->mc = new_mc; pr_debug("CPU%d update ucode (0x%08x -> 0x%08x)\n", cpu, uci->cpu_sig.rev, new_rev); } else { vfree(new_mc); state = UCODE_ERROR; } free_table: free_equiv_cpu_table(); return state; } /* * AMD microcode firmware naming convention, up to family 15h they are in * the legacy file: * * amd-ucode/microcode_amd.bin * * This legacy file is always smaller than 2K in size. * * Starting at family 15h they are in family specific firmware files: * * amd-ucode/microcode_amd_fam15h.bin * amd-ucode/microcode_amd_fam16h.bin * ... * * These might be larger than 2K. */ static enum ucode_state request_microcode_amd(int cpu, struct device *device) { char fw_name[36] = "amd-ucode/microcode_amd.bin"; const struct firmware *fw; enum ucode_state ret = UCODE_NFOUND; struct cpuinfo_x86 *c = &cpu_data(cpu); if (c->x86 >= 0x15) snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86); if (request_firmware(&fw, (const char *)fw_name, device)) { pr_debug("failed to load file %s\n", fw_name); goto out; } ret = UCODE_ERROR; if (*(u32 *)fw->data != UCODE_MAGIC) { pr_err("invalid magic value (0x%08x)\n", *(u32 *)fw->data); goto fw_release; } ret = generic_load_microcode(cpu, fw->data, fw->size); fw_release: release_firmware(fw); out: return ret; } static enum ucode_state request_microcode_user(int cpu, const void __user *buf, size_t size) { pr_info("AMD microcode update via /dev/cpu/microcode not supported\n"); return UCODE_ERROR; } static void microcode_fini_cpu_amd(int cpu) { struct ucode_cpu_info *uci = ucode_cpu_info + cpu; vfree(uci->mc); uci->mc = NULL; } static struct microcode_ops microcode_amd_ops = { .request_microcode_user = request_microcode_user, .request_microcode_fw = request_microcode_amd, .collect_cpu_info = collect_cpu_info_amd, .apply_microcode = apply_microcode_amd, .microcode_fini_cpu = microcode_fini_cpu_amd, }; struct microcode_ops * __init init_amd_microcode(void) { return &microcode_amd_ops; }
gpl-2.0
CyanogenMod/lge-kernel-omap4
arch/arm/mach-omap2/emif.c
14
46938
/* * OMAP4 EMIF platform driver * * Copyright (C) 2010 Texas Instruments, Inc. * * Santosh Shilimkar <santosh.shilimkar@ti.com> * Aneesh V <aneesh@ti.com> * Vibhore Vardhan <vvardhan@ti.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/module.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/err.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/reboot.h> #include <linux/slab.h> #include <plat/omap_hwmod.h> #include <plat/omap_device.h> #include <mach/emif-44xx.h> #include <mach/emif.h> #include <mach/lpddr2-jedec.h> #include <mach/omap4-common.h> #include "voltage.h" /* Utility macro for masking and setting a field in a register/variable */ #define mask_n_set(reg, shift, msk, val) \ (reg) = (((reg) & ~(msk))|(((val) << (shift)) & msk)) struct emif_instance { void __iomem *base; u16 irq; struct platform_device *pdev; bool ddr_refresh_disabled; }; static struct emif_instance emif[EMIF_NUM_INSTANCES]; static struct emif_regs *emif_curr_regs[EMIF_NUM_INSTANCES]; static struct emif_regs *emif1_regs_cache[EMIF_MAX_NUM_FREQUENCIES]; static struct emif_regs *emif2_regs_cache[EMIF_MAX_NUM_FREQUENCIES]; static struct emif_device_details *emif_devices[2]; static u32 emif_temperature_level[EMIF_NUM_INSTANCES] = { SDRAM_TEMP_NOMINAL, SDRAM_TEMP_NOMINAL }; static u32 emif_notify_pending; static u32 emif_thermal_handling_pending; static u32 T_den, T_num; static struct omap_device_pm_latency omap_emif_latency[] = { [0] = { .deactivate_func = omap_device_idle_hwmods, .activate_func = omap_device_enable_hwmods, .flags = OMAP_DEVICE_LATENCY_AUTO_ADJUST, }, }; /* * EMIF Power Management timer for Self Refresh will put the external SDRAM * in Self Refresh mode after the EMIF is idle for number of DDR clock cycles * set with REG_SR_TIM. The minimal value starts at 16 cycles mapped to 1 in * REG_SR_TIM. * However due to Errata i735, the minimal value of REG_SR_TIM is 6. That * corresponds to 512 DDR cycles required for OPP100 */ #define EMIF_ERRATUM_SR_TIMER_i735 BIT(0) #define EMIF_ERRATUM_SR_TIMER_MIN 6 static u32 emif_errata; #define is_emif_erratum(erratum) (emif_errata & EMIF_ERRATUM_##erratum) static void do_cancel_out(u32 *num, u32 *den, u32 factor) { while (1) { if (((*num) / factor * factor == (*num)) && ((*den) / factor * factor == (*den))) { (*num) /= factor; (*den) /= factor; } else break; } } static void cancel_out(u32 *num, u32 *den) { do_cancel_out(num, den, 2); do_cancel_out(num, den, 3); do_cancel_out(num, den, 5); do_cancel_out(num, den, 7); do_cancel_out(num, den, 11); do_cancel_out(num, den, 13); do_cancel_out(num, den, 17); } /* * Get the period in ns (in fraction form) for a given frequency: * Getting it in fraction form is for better accuracy in integer arithmetics * freq_hz - input: frequency in Hertz * den_limit - input: upper limit for denominator. see the description of * EMIF_PERIOD_DEN_LIMIT for more details * period_den - output: pointer to denominator of period in ns * period_num - output: pointer to numerator of period in ns */ static void get_period(u32 freq_hz, u32 den_limit, u32 *period_num, u32 *period_den) { *period_num = 1000000000; /* 10^9 to convert the period to 'ns' */ *period_den = freq_hz; cancel_out(period_num, period_den); /* make sure den <= den_limit at the cost of some accuracy */ while ((*period_den) > den_limit) { *period_num /= 2; *period_den /= 2; } } /* * Calculate the period of DDR clock from frequency value and set the * denominator and numerator in global variables for easy access later */ static void set_ddr_clk_period(u32 freq) { get_period(freq, EMIF_PERIOD_DEN_LIMIT, &T_num, &T_den); } /* * Convert time in nano seconds to number of cycles of DDR clock */ static u32 ns_2_cycles(u32 ns) { return ((ns * T_den) + T_num - 1) / T_num; } /* * ns_2_cycles with the difference that the time passed is 2 times the actual * value(to avoid fractions). The cycles returned is for the original value of * the timing parameter */ static u32 ns_x2_2_cycles(u32 ns) { return ((ns * T_den) + T_num * 2 - 1) / (T_num * 2); } /* * Find addressing table index based on the device's type(S2 or S4) and * density */ static s8 addressing_table_index(u8 type, u8 density, u8 width) { u8 index; if (unlikely((density > LPDDR2_DENSITY_8Gb) || (width == LPDDR2_IO_WIDTH_8))) return -1; /* * Look at the way ADDR_TABLE_INDEX* values have been defined * in emif.h compared to LPDDR2_DENSITY_* values * The table is layed out in the increasing order of density * (ignoring type). The exceptions 1GS2 and 2GS2 have been placed * at the end */ if ((type == LPDDR2_TYPE_S2) && (density == LPDDR2_DENSITY_1Gb)) index = ADDR_TABLE_INDEX1GS2; else if ((type == LPDDR2_TYPE_S2) && (density == LPDDR2_DENSITY_2Gb)) index = ADDR_TABLE_INDEX2GS2; else index = density; pr_debug("emif: addressing table index %d", index); return index; } /* * Find the the right timing table from the array of timing * tables of the device using DDR clock frequency */ static const struct lpddr2_timings *get_timings_table( const struct lpddr2_timings * const *device_timings, u32 freq) { u32 i, temp, freq_nearest; const struct lpddr2_timings *timings = NULL; emif_assert(freq <= MAX_LPDDR2_FREQ); emif_assert(device_timings); /* * Start with the maximum allowed frequency - that is always safe */ freq_nearest = MAX_LPDDR2_FREQ; /* * Find the timings table that has the max frequency value: * i. Above or equal to the DDR frequency - safe * ii. The lowest that satisfies condition (i) - optimal */ for (i = 0; i < MAX_NUM_SPEEDBINS; i++) { if (device_timings[i]) { temp = device_timings[i]->max_freq; if ((temp >= freq) && (temp <= freq_nearest)) { freq_nearest = temp; timings = device_timings[i]; } } } pr_debug("emif: timings table: %d", freq_nearest); return timings; } /* * Finds the value of emif_sdram_config_reg * All parameters are programmed based on the device on CS0. * If there is a device on CS1, it will be same as that on CS0 or * it will be NVM. We don't support NVM yet. * If cs1_device pointer is NULL it is assumed that there is no device * on CS1 */ static u32 get_sdram_config_reg(const struct lpddr2_device_info *cs0_device, const struct lpddr2_device_info *cs1_device, const struct lpddr2_addressing *addressing, u8 RL) { u32 config_reg = 0; mask_n_set(config_reg, OMAP44XX_REG_SDRAM_TYPE_SHIFT, OMAP44XX_REG_SDRAM_TYPE_MASK, cs0_device->type + 4); mask_n_set(config_reg, OMAP44XX_REG_IBANK_POS_SHIFT, OMAP44XX_REG_IBANK_POS_MASK, EMIF_INTERLEAVING_POLICY_MAX_INTERLEAVING); mask_n_set(config_reg, OMAP44XX_REG_NARROW_MODE_SHIFT, OMAP44XX_REG_NARROW_MODE_MASK, cs0_device->io_width); mask_n_set(config_reg, OMAP44XX_REG_CL_SHIFT, OMAP44XX_REG_CL_MASK, RL); mask_n_set(config_reg, OMAP44XX_REG_ROWSIZE_SHIFT, OMAP44XX_REG_ROWSIZE_MASK, addressing->row_sz[cs0_device->io_width]); mask_n_set(config_reg, OMAP44XX_REG_IBANK_SHIFT, OMAP44XX_REG_IBANK_MASK, addressing->num_banks); mask_n_set(config_reg, OMAP44XX_REG_EBANK_SHIFT, OMAP44XX_REG_EBANK_MASK, (cs1_device ? EBANK_CS1_EN : EBANK_CS1_DIS)); mask_n_set(config_reg, OMAP44XX_REG_PAGESIZE_SHIFT, OMAP44XX_REG_PAGESIZE_MASK, addressing->col_sz[cs0_device->io_width]); return config_reg; } static u32 get_sdram_ref_ctrl(u32 freq, const struct lpddr2_addressing *addressing) { u32 ref_ctrl = 0, val = 0, freq_khz; freq_khz = freq / 1000; /* * refresh rate to be set is 'tREFI * freq in MHz * division by 10000 to account for khz and x10 in t_REFI_us_x10 */ val = addressing->t_REFI_us_x10 * freq_khz / 10000; mask_n_set(ref_ctrl, OMAP44XX_REG_REFRESH_RATE_SHIFT, OMAP44XX_REG_REFRESH_RATE_MASK, val); /* enable refresh */ mask_n_set(ref_ctrl, OMAP44XX_REG_INITREF_DIS_SHIFT, OMAP44XX_REG_INITREF_DIS_MASK, 1); return ref_ctrl; } static u32 get_sdram_tim_1_reg(const struct lpddr2_timings *timings, const struct lpddr2_min_tck *min_tck, const struct lpddr2_addressing *addressing) { u32 tim1 = 0, val = 0; val = max(min_tck->tWTR, ns_x2_2_cycles(timings->tWTRx2)) - 1; mask_n_set(tim1, OMAP44XX_REG_T_WTR_SHIFT, OMAP44XX_REG_T_WTR_MASK, val); if (addressing->num_banks == BANKS8) val = (timings->tFAW * T_den + 4 * T_num - 1) / (4 * T_num) - 1; else val = max(min_tck->tRRD, ns_2_cycles(timings->tRRD)) - 1; mask_n_set(tim1, OMAP44XX_REG_T_RRD_SHIFT, OMAP44XX_REG_T_RRD_MASK, val); val = ns_2_cycles(timings->tRASmin + timings->tRPab) - 1; mask_n_set(tim1, OMAP44XX_REG_T_RC_SHIFT, OMAP44XX_REG_T_RC_MASK, val); val = max(min_tck->tRAS_MIN, ns_2_cycles(timings->tRASmin)) - 1; mask_n_set(tim1, OMAP44XX_REG_T_RAS_SHIFT, OMAP44XX_REG_T_RAS_MASK, val); val = max(min_tck->tWR, ns_2_cycles(timings->tWR)) - 1; mask_n_set(tim1, OMAP44XX_REG_T_WR_SHIFT, OMAP44XX_REG_T_WR_MASK, val); val = max(min_tck->tRCD, ns_2_cycles(timings->tRCD)) - 1; mask_n_set(tim1, OMAP44XX_REG_T_RCD_SHIFT, OMAP44XX_REG_T_RCD_MASK, val); val = max(min_tck->tRP_AB, ns_2_cycles(timings->tRPab)) - 1; mask_n_set(tim1, OMAP44XX_REG_T_RP_SHIFT, OMAP44XX_REG_T_RP_MASK, val); return tim1; } /* * Finds the de-rated value for EMIF_SDRAM_TIM1 register * All the de-rated timings are limited to this register * Adds 2ns instead of 1.875ns to the affected timings as * we can not use float. */ static u32 get_sdram_tim_1_reg_derated(const struct lpddr2_timings *timings, const struct lpddr2_min_tck *min_tck, const struct lpddr2_addressing *addressing) { u32 tim1 = 0, val = 0; val = max(min_tck->tWTR, ns_x2_2_cycles(timings->tWTRx2)) - 1; mask_n_set(tim1, OMAP44XX_REG_T_WTR_SHIFT, OMAP44XX_REG_T_WTR_MASK, val); if (addressing->num_banks == BANKS8) /* * tFAW is approximately 4 times tRRD. So add 1.875*4 = 7.5 ~ 8 * to tFAW for de-rating */ val = ((timings->tFAW + 8) * T_den + 4 * T_num - 1) / (4 * T_num) - 1; else val = max(min_tck->tRRD, ns_2_cycles(timings->tRRD + 2)) - 1; mask_n_set(tim1, OMAP44XX_REG_T_RRD_SHIFT, OMAP44XX_REG_T_RRD_MASK, val); val = ns_2_cycles(timings->tRASmin + timings->tRPab + 2) - 1; mask_n_set(tim1, OMAP44XX_REG_T_RC_SHIFT, OMAP44XX_REG_T_RC_MASK, val); val = max(min_tck->tRAS_MIN, ns_2_cycles(timings->tRASmin + 2)) - 1; mask_n_set(tim1, OMAP44XX_REG_T_RAS_SHIFT, OMAP44XX_REG_T_RAS_MASK, val); val = max(min_tck->tWR, ns_2_cycles(timings->tWR)) - 1; mask_n_set(tim1, OMAP44XX_REG_T_WR_SHIFT, OMAP44XX_REG_T_WR_MASK, val); val = max(min_tck->tRCD, ns_2_cycles(timings->tRCD + 2)) - 1; mask_n_set(tim1, OMAP44XX_REG_T_RCD_SHIFT, OMAP44XX_REG_T_RCD_MASK, val); val = max(min_tck->tRP_AB, ns_2_cycles(timings->tRPab + 2)) - 1; mask_n_set(tim1, OMAP44XX_REG_T_RP_SHIFT, OMAP44XX_REG_T_RP_MASK, val); return tim1; } static u32 get_sdram_tim_2_reg(const struct lpddr2_timings *timings, const struct lpddr2_min_tck *min_tck) { u32 tim2 = 0, val = 0; val = max(min_tck->tCKE, timings->tCKE) - 1; mask_n_set(tim2, OMAP44XX_REG_T_CKE_SHIFT, OMAP44XX_REG_T_CKE_MASK, val); val = max(min_tck->tRTP, ns_x2_2_cycles(timings->tRTPx2)) - 1; mask_n_set(tim2, OMAP44XX_REG_T_RTP_SHIFT, OMAP44XX_REG_T_RTP_MASK, val); /* * tXSRD = tRFCab + 10 ns. XSRD and XSNR should have the * same value */ val = ns_2_cycles(timings->tXSR) - 1; mask_n_set(tim2, OMAP44XX_REG_T_XSRD_SHIFT, OMAP44XX_REG_T_XSRD_MASK, val); mask_n_set(tim2, OMAP44XX_REG_T_XSNR_SHIFT, OMAP44XX_REG_T_XSNR_MASK, val); val = max(min_tck->tXP, ns_x2_2_cycles(timings->tXPx2)) - 1; mask_n_set(tim2, OMAP44XX_REG_T_XP_SHIFT, OMAP44XX_REG_T_XP_MASK, val); return tim2; } static u32 get_sdram_tim_3_reg(const struct lpddr2_timings *timings, const struct lpddr2_min_tck *min_tck, const struct lpddr2_addressing *addressing) { u32 tim3 = 0, val = 0; val = min(timings->tRASmax * 10 / addressing->t_REFI_us_x10 - 1, 0xF); mask_n_set(tim3, OMAP44XX_REG_T_RAS_MAX_SHIFT, OMAP44XX_REG_T_RAS_MAX_MASK, val); val = ns_2_cycles(timings->tRFCab) - 1; mask_n_set(tim3, OMAP44XX_REG_T_RFC_SHIFT, OMAP44XX_REG_T_RFC_MASK, val); val = ns_x2_2_cycles(timings->tDQSCKMAXx2) - 1; mask_n_set(tim3, OMAP44XX_REG_T_TDQSCKMAX_SHIFT, OMAP44XX_REG_T_TDQSCKMAX_MASK, val); val = ns_2_cycles(timings->tZQCS) - 1; mask_n_set(tim3, OMAP44XX_REG_ZQ_ZQCS_SHIFT, OMAP44XX_REG_ZQ_ZQCS_MASK, val); val = max(min_tck->tCKESR, ns_2_cycles(timings->tCKESR)) - 1; mask_n_set(tim3, OMAP44XX_REG_T_CKESR_SHIFT, OMAP44XX_REG_T_CKESR_MASK, val); return tim3; } static u32 get_zq_config_reg(const struct lpddr2_device_info *cs1_device, const struct lpddr2_addressing *addressing, bool volt_ramp) { u32 zq = 0, val = 0; if (volt_ramp) val = EMIF_ZQCS_INTERVAL_DVFS_IN_US * 10 / addressing->t_REFI_us_x10; else val = EMIF_ZQCS_INTERVAL_NORMAL_IN_US * 10 / addressing->t_REFI_us_x10; mask_n_set(zq, OMAP44XX_REG_ZQ_REFINTERVAL_SHIFT, OMAP44XX_REG_ZQ_REFINTERVAL_MASK, val); mask_n_set(zq, OMAP44XX_REG_ZQ_ZQCL_MULT_SHIFT, OMAP44XX_REG_ZQ_ZQCL_MULT_MASK, REG_ZQ_ZQCL_MULT - 1); mask_n_set(zq, OMAP44XX_REG_ZQ_ZQINIT_MULT_SHIFT, OMAP44XX_REG_ZQ_ZQINIT_MULT_MASK, REG_ZQ_ZQINIT_MULT - 1); mask_n_set(zq, OMAP44XX_REG_ZQ_SFEXITEN_SHIFT, OMAP44XX_REG_ZQ_SFEXITEN_MASK, REG_ZQ_SFEXITEN_ENABLE); /* * Assuming that two chipselects have a single calibration resistor * If there are indeed two calibration resistors, then this flag should * be enabled to take advantage of dual calibration feature. * This data should ideally come from board files. But considering * that none of the boards today have calibration resistors per CS, * it would be an unnecessary overhead. */ mask_n_set(zq, OMAP44XX_REG_ZQ_DUALCALEN_SHIFT, OMAP44XX_REG_ZQ_DUALCALEN_MASK, REG_ZQ_DUALCALEN_DISABLE); mask_n_set(zq, OMAP44XX_REG_ZQ_CS0EN_SHIFT, OMAP44XX_REG_ZQ_CS0EN_MASK, REG_ZQ_CS0EN_ENABLE); mask_n_set(zq, OMAP44XX_REG_ZQ_CS1EN_SHIFT, OMAP44XX_REG_ZQ_CS1EN_MASK, (cs1_device ? 1 : 0)); return zq; } static u32 get_temp_alert_config(const struct lpddr2_device_info *cs1_device, const struct lpddr2_addressing *addressing, bool is_derated) { u32 alert = 0, interval; interval = TEMP_ALERT_POLL_INTERVAL_MS * 10000 / addressing->t_REFI_us_x10; if (is_derated) interval *= 4; mask_n_set(alert, OMAP44XX_REG_TA_REFINTERVAL_SHIFT, OMAP44XX_REG_TA_REFINTERVAL_MASK, interval); mask_n_set(alert, OMAP44XX_REG_TA_DEVCNT_SHIFT, OMAP44XX_REG_TA_DEVCNT_MASK, TEMP_ALERT_CONFIG_DEVCT_1); mask_n_set(alert, OMAP44XX_REG_TA_DEVWDT_SHIFT, OMAP44XX_REG_TA_DEVWDT_MASK, TEMP_ALERT_CONFIG_DEVWDT_32); mask_n_set(alert, OMAP44XX_REG_TA_SFEXITEN_SHIFT, OMAP44XX_REG_TA_SFEXITEN_MASK, 1); mask_n_set(alert, OMAP44XX_REG_TA_CS0EN_SHIFT, OMAP44XX_REG_TA_CS0EN_MASK, 1); mask_n_set(alert, OMAP44XX_REG_TA_CS1EN_SHIFT, OMAP44XX_REG_TA_CS1EN_MASK, (cs1_device ? 1 : 0)); return alert; } static u32 get_read_idle_ctrl_reg(bool volt_ramp) { u32 idle = 0, val = 0; if (volt_ramp) val = ns_2_cycles(READ_IDLE_INTERVAL_DVFS) / 64 - 1; else /*Maximum value in normal conditions - suggested by hw team */ val = 0x1FF; mask_n_set(idle, OMAP44XX_REG_READ_IDLE_INTERVAL_SHIFT, OMAP44XX_REG_READ_IDLE_INTERVAL_MASK, val); mask_n_set(idle, OMAP44XX_REG_READ_IDLE_LEN_SHIFT, OMAP44XX_REG_READ_IDLE_LEN_MASK, EMIF_REG_READ_IDLE_LEN_VAL); return idle; } static u32 get_ddr_phy_ctrl_1(u32 freq, u8 RL) { u32 phy = 0, val = 0; mask_n_set(phy, OMAP44XX_REG_READ_LATENCY_SHIFT, OMAP44XX_REG_READ_LATENCY_MASK, RL + 2); if (freq <= 100000000) val = EMIF_DLL_SLAVE_DLY_CTRL_100_MHZ_AND_LESS; else if (freq <= 200000000) val = EMIF_DLL_SLAVE_DLY_CTRL_200_MHZ; else val = EMIF_DLL_SLAVE_DLY_CTRL_400_MHZ; mask_n_set(phy, OMAP44XX_REG_DLL_SLAVE_DLY_CTRL_SHIFT, OMAP44XX_REG_DLL_SLAVE_DLY_CTRL_MASK, val); /* Other fields are constant magic values. Hardcode them together */ mask_n_set(phy, OMAP44XX_EMIF_DDR_PHY_CTRL_1_BASE_VAL_SHIFT, OMAP44XX_EMIF_DDR_PHY_CTRL_1_BASE_VAL_MASK, EMIF_DDR_PHY_CTRL_1_BASE_VAL); phy >>= OMAP44XX_REG_DDR_PHY_CTRL_1_SHIFT; return phy; } /* * get_lp_mode - Get the LP Mode of a EMIF instance. * * It returns the REG_LP_MODE of EMIF_PWR_MGMT_CTRL[10:8] * for a EMIF. * */ static u32 get_lp_mode(u32 emif_nr) { u32 temp, lpmode; void __iomem *base = emif[emif_nr].base; temp = readl(base + OMAP44XX_EMIF_PWR_MGMT_CTRL); lpmode = (temp & OMAP44XX_REG_LP_MODE_MASK) >> OMAP44XX_REG_LP_MODE_SHIFT; return lpmode; } /* * set_lp_mode - Set the LP Mode of a EMIF instance. * * It replaces the REG_LP_MODE of EMIF_PWR_MGMT_CTRL[10:8] * with the new value for a EMIF. * */ static void set_lp_mode(u32 emif_nr, u32 lpmode) { u32 temp; void __iomem *base = emif[emif_nr].base; /* Extract current lp mode value */ temp = readl(base + OMAP44XX_EMIF_PWR_MGMT_CTRL); /* Write out the new lp mode value */ temp &= ~OMAP44XX_REG_LP_MODE_MASK; temp |= lpmode << OMAP44XX_REG_LP_MODE_SHIFT; writel(temp, base + OMAP44XX_EMIF_PWR_MGMT_CTRL); } /* * Get the temperature level of the EMIF instance: * Reads the MR4 register of attached SDRAM parts to find out the temperature * level. If there are two parts attached(one on each CS), then the temperature * level for the EMIF instance is the higher of the two temperatures. */ static u32 get_temperature_level(u32 emif_nr) { u32 temp, tmp_temperature_level; bool cs1_used; void __iomem *base; base = emif[emif_nr].base; temp = __raw_readl(base + OMAP44XX_EMIF_SDRAM_CONFIG); cs1_used = (temp & OMAP44XX_REG_EBANK_MASK) ? true : false; /* Read mode register 4 */ __raw_writel(LPDDR2_MR4, base + OMAP44XX_EMIF_LPDDR2_MODE_REG_CFG); tmp_temperature_level = __raw_readl(base + OMAP44XX_EMIF_LPDDR2_MODE_REG_DATA); tmp_temperature_level = (tmp_temperature_level & MR4_SDRAM_REF_RATE_MASK) >> MR4_SDRAM_REF_RATE_SHIFT; if (cs1_used) { __raw_writel(LPDDR2_MR4 | OMAP44XX_REG_CS_MASK, base + OMAP44XX_EMIF_LPDDR2_MODE_REG_CFG); temp = __raw_readl(base + OMAP44XX_EMIF_LPDDR2_MODE_REG_DATA); temp = (temp & MR4_SDRAM_REF_RATE_MASK) >> MR4_SDRAM_REF_RATE_SHIFT; tmp_temperature_level = max(temp, tmp_temperature_level); } /* treat everything less than nominal(3) in MR4 as nominal */ if (unlikely(tmp_temperature_level < SDRAM_TEMP_NOMINAL)) tmp_temperature_level = SDRAM_TEMP_NOMINAL; /* if we get reserved value in MR4 persist with the existing value */ if (unlikely(tmp_temperature_level == SDRAM_TEMP_RESERVED_4)) tmp_temperature_level = emif_temperature_level[emif_nr]; return tmp_temperature_level; } /* * Program EMIF shadow registers: * Sets the shadow registers using pre-caulated register values * When volt_state indicates that this function is called just before * a voltage scaling, set only the registers relevant for voltage scaling * Otherwise, set all the registers relevant for a frequency change */ static void setup_registers(u32 emif_nr, struct emif_regs *regs, u32 volt_state) { u32 temp,read_idle; void __iomem *base = emif[emif_nr].base; __raw_writel(regs->ref_ctrl, base + OMAP44XX_EMIF_SDRAM_REF_CTRL_SHDW); __raw_writel(regs->sdram_tim2, base + OMAP44XX_EMIF_SDRAM_TIM_2_SHDW); __raw_writel(regs->sdram_tim3, base + OMAP44XX_EMIF_SDRAM_TIM_3_SHDW); /* * Do not change the RL part in PHY CTRL register * RL is not changed during DVFS */ temp = __raw_readl(base + OMAP44XX_EMIF_DDR_PHY_CTRL_1_SHDW); mask_n_set(temp, OMAP44XX_REG_DDR_PHY_CTRL_1_SHDW_SHIFT, OMAP44XX_REG_DDR_PHY_CTRL_1_SHDW_MASK, regs->emif_ddr_phy_ctlr_1_final); __raw_writel(temp, base + OMAP44XX_EMIF_DDR_PHY_CTRL_1_SHDW); __raw_writel(regs->temp_alert_config, base + OMAP44XX_EMIF_TEMP_ALERT_CONFIG); /* * When voltage ramps forced read idle should * happen more often. */ if (volt_state == LPDDR2_VOLTAGE_RAMPING) read_idle = regs->read_idle_ctrl_volt_ramp; else read_idle = regs->read_idle_ctrl_normal; __raw_writel(read_idle, base + OMAP44XX_EMIF_READ_IDLE_CTRL_SHDW); /* * Reading back the last written register to ensure all writes are * complete */ temp = __raw_readl(base + OMAP44XX_EMIF_READ_IDLE_CTRL_SHDW); } /* * setup_temperature_sensitive_regs() - set the timings for temperature * sensitive registers. This happens once at initialization time based * on the temperature at boot time and subsequently based on the temperature * alert interrupt. Temperature alert can happen when the temperature * increases or drops. So this function can have the effect of either * derating the timings or going back to nominal values. */ static void setup_temperature_sensitive_regs(u32 emif_nr, struct emif_regs *regs) { u32 tim1, ref_ctrl, temp_alert_cfg; void __iomem *base = emif[emif_nr].base; u32 temperature = emif_temperature_level[emif_nr]; if (unlikely(temperature == SDRAM_TEMP_HIGH_DERATE_REFRESH)) { tim1 = regs->sdram_tim1; ref_ctrl = regs->ref_ctrl_derated; temp_alert_cfg = regs->temp_alert_config_derated; } else if (unlikely(temperature == SDRAM_TEMP_HIGH_DERATE_REFRESH_AND_TIMINGS)) { tim1 = regs->sdram_tim1_derated; ref_ctrl = regs->ref_ctrl_derated; temp_alert_cfg = regs->temp_alert_config_derated; } else { /* * Nominal timings - you may switch back to the * nominal timings if the temperature falls */ tim1 = regs->sdram_tim1; ref_ctrl = regs->ref_ctrl; temp_alert_cfg = regs->temp_alert_config; } __raw_writel(tim1, base + OMAP44XX_EMIF_SDRAM_TIM_1_SHDW); __raw_writel(temp_alert_cfg, base + OMAP44XX_EMIF_TEMP_ALERT_CONFIG); __raw_writel(ref_ctrl, base + OMAP44XX_EMIF_SDRAM_REF_CTRL_SHDW); /* read back last written register to ensure write is complete */ __raw_readl(base + OMAP44XX_EMIF_SDRAM_REF_CTRL_SHDW); } static irqreturn_t handle_temp_alert(void __iomem *base, u32 emif_nr) { u32 old_temperature_level; old_temperature_level = emif_temperature_level[emif_nr]; emif_temperature_level[emif_nr] = get_temperature_level(emif_nr); if (unlikely(emif_temperature_level[emif_nr] == old_temperature_level)) return IRQ_HANDLED; emif_notify_pending |= (1 << emif_nr); if (likely(emif_temperature_level[emif_nr] < old_temperature_level)) { /* Temperature coming down - defer handling to thread */ emif_thermal_handling_pending |= (1 << emif_nr); } else if (likely(emif_temperature_level[emif_nr] != SDRAM_TEMP_VERY_HIGH_SHUTDOWN)) { /* Temperature is going up - handle immediately */ setup_temperature_sensitive_regs(emif_nr, emif_curr_regs[emif_nr]); /* * EMIF de-rated timings register needs to be setup using * freq update method only */ omap4_prcm_freq_update(); } return IRQ_WAKE_THREAD; } static void setup_volt_sensitive_registers(u32 emif_nr, struct emif_regs *regs, u32 volt_state) { u32 read_idle; void __iomem *base = emif[emif_nr].base; /* * When voltage ramps forced read idle should * happen more often. */ if (volt_state == LPDDR2_VOLTAGE_RAMPING) read_idle = regs->read_idle_ctrl_volt_ramp; else read_idle = regs->read_idle_ctrl_normal; __raw_writel(read_idle, base + OMAP44XX_EMIF_READ_IDLE_CTRL_SHDW); /* read back last written register to ensure write is complete */ __raw_readl(base + OMAP44XX_EMIF_READ_IDLE_CTRL_SHDW); return; } /* * Interrupt Handler for EMIF1 and EMIF2 */ static irqreturn_t emif_interrupt_handler(int irq, void *dev_id) { void __iomem *base; irqreturn_t ret = IRQ_HANDLED; u32 sys, ll; u8 emif_nr = EMIF1; if (emif[EMIF2].irq == irq) emif_nr = EMIF2; base = emif[emif_nr].base; /* Save the status and clear it */ sys = __raw_readl(base + OMAP44XX_EMIF_IRQSTATUS_SYS); ll = __raw_readl(base + OMAP44XX_EMIF_IRQSTATUS_LL); __raw_writel(sys, base + OMAP44XX_EMIF_IRQSTATUS_SYS); __raw_writel(ll, base + OMAP44XX_EMIF_IRQSTATUS_LL); /* * Handle temperature alert * Temperature alert should be same for both ports * So, it's enough to process it for only one of the ports */ if (sys & OMAP44XX_REG_TA_SYS_MASK) ret = handle_temp_alert(base, emif_nr); if (sys & OMAP44XX_REG_ERR_SYS_MASK) pr_err("EMIF: Access error from EMIF%d SYS port - %x", emif_nr, sys); if (ll & OMAP44XX_REG_ERR_LL_MASK) pr_err("EMIF Error: Access error from EMIF%d LL port - %x", emif_nr, ll); return ret; } static irqreturn_t emif_threaded_isr(int irq, void *dev_id) { u8 emif_nr = EMIF1; if (emif[EMIF2].irq == irq) emif_nr = EMIF2; if (emif_thermal_handling_pending & (1 << emif_nr)) { setup_temperature_sensitive_regs(emif_nr, emif_curr_regs[emif_nr]); /* * EMIF de-rated timings register needs to be setup using * freq update method only */ omap4_prcm_freq_update(); /* clear the bit */ emif_thermal_handling_pending &= ~(1 << emif_nr); } if (emif_notify_pending & (1 << emif_nr)) { sysfs_notify(&(emif[emif_nr].pdev->dev.kobj), NULL, "temperature"); kobject_uevent(&(emif[emif_nr].pdev->dev.kobj), KOBJ_CHANGE); /* clear the bit */ emif_notify_pending &= ~(1 << emif_nr); } if (emif_temperature_level[emif_nr] >= SDRAM_TEMP_VERY_HIGH_SHUTDOWN) { pr_emerg("%s %d: SDRAM temperature exceeds operating" "limit.. Shutdown system...\n", __func__, emif_nr + 1); kernel_power_off(); } return IRQ_HANDLED; } static int __init setup_emif_interrupts(u32 emif_nr) { u32 temp; void __iomem *base = emif[emif_nr].base; int r; /* Clear any pendining interrupts */ __raw_writel(0xFFFFFFFF, base + OMAP44XX_EMIF_IRQSTATUS_SYS); __raw_writel(0xFFFFFFFF, base + OMAP44XX_EMIF_IRQSTATUS_LL); /* Enable the relevant interrupts for both LL and SYS */ /* LGE_CHANGE_START [bk.shin@leg.com] 2012-05-03, EMIF temperature irq is very often occur, But it is not too necessary */ temp = OMAP44XX_REG_EN_ERR_SYS_MASK;//OMAP44XX_REG_EN_TA_SYS_MASK | OMAP44XX_REG_EN_ERR_SYS_MASK; /* LGE_CHANGE_END [bk.shin@lge.com] */ __raw_writel(temp, base + OMAP44XX_EMIF_IRQENABLE_SET_SYS); __raw_writel(temp, base + OMAP44XX_EMIF_IRQENABLE_SET_LL); /* Dummy read to make sure writes are complete */ __raw_readl(base + OMAP44XX_EMIF_IRQENABLE_SET_LL); /* setup IRQ handlers */ r = request_threaded_irq(emif[emif_nr].irq, emif_interrupt_handler, emif_threaded_isr, IRQF_SHARED, emif[emif_nr].pdev->name, emif[emif_nr].pdev); if (r) { pr_err("%s: Failed: request_irq emif[%d] IRQ%d:%d\n", __func__, emif_nr, emif[emif_nr].irq, r); return r; } /* * Even if we fail to make the irq wakeup capable, we are at risk only * while going to suspend where the device is cooler, we might lose a * bit of power due to pending interrupt preventing core from hitting * low power state but we can continue to handle events in active use * cases. So don't free interrupt on failure of marking wakeup capable, * just warn and continue. */ if (enable_irq_wake(emif[emif_nr].irq)) pr_err("%s: Failed: wakeupen emif[%d] IRQ%d\n", __func__, emif_nr, emif[emif_nr].irq); return 0; } static ssize_t emif_temperature_show(struct device *dev, struct device_attribute *attr, char *buf) { u32 temperature; if (dev == &(emif[EMIF1].pdev->dev)) temperature = emif_temperature_level[EMIF1]; else if (dev == &(emif[EMIF2].pdev->dev)) temperature = emif_temperature_level[EMIF2]; else return 0; return snprintf(buf, 20, "%u\n", temperature); } static DEVICE_ATTR(temperature, S_IRUGO, emif_temperature_show, NULL); static int __devinit omap_emif_probe(struct platform_device *pdev) { int id; struct resource *res; if (!pdev) return -EINVAL; id = pdev->id; emif[id].pdev = pdev; res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!res) { pr_err("EMIF %i Invalid IRQ resource\n", id); return -ENODEV; } emif[id].irq = res->start; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { pr_err("EMIF%i Invalid mem resource\n", id); return -ENODEV; } emif[id].base = ioremap(res->start, SZ_1M); if (!emif[id].base) { pr_err("Could not ioremap EMIF%i\n", id); return -ENOMEM; } pr_info("EMIF%d is enabled with IRQ%d\n", id, emif[id].irq); emif[id].ddr_refresh_disabled = false; return 0; } static int emif_init(struct omap_hwmod *oh, void *user) { char *name = "omap_emif"; struct omap_device *od; static int id; od = omap_device_build(name, id, oh, NULL, 0, omap_emif_latency, ARRAY_SIZE(omap_emif_latency), false); WARN(IS_ERR(od), "Can't build omap_device for %s:%s.\n", name, oh->name); id++; return 0; } static void emif_calculate_regs(const struct emif_device_details *devices, u32 freq, struct emif_regs *regs) { u32 temp; const struct lpddr2_addressing *addressing; const struct lpddr2_timings *timings; const struct lpddr2_min_tck *min_tck; const struct lpddr2_device_info *cs0_device = devices->cs0_device; const struct lpddr2_device_info *cs1_device = devices->cs1_device; emif_assert(devices); emif_assert(regs); /* * You can not have a device on CS1 without one on CS0 * So configuring EMIF without a device on CS0 doesn't * make sense */ emif_assert(cs0_device); emif_assert(cs0_device->type != LPDDR2_TYPE_NVM); /* * If there is a device on CS1 it should be same type as CS0 * (or NVM. But NVM is not supported in this driver yet) */ emif_assert((cs1_device == NULL) || (cs1_device->type == LPDDR2_TYPE_NVM) || (cs0_device->type == cs1_device->type)); emif_assert(freq <= MAX_LPDDR2_FREQ); set_ddr_clk_period(freq); /* * The device on CS0 is used for all timing calculations * There is only one set of registers for timings per EMIF. So, if the * second CS(CS1) has a device, it should have the same timings as the * device on CS0 */ timings = get_timings_table(cs0_device->device_timings, freq); emif_assert(timings); min_tck = cs0_device->min_tck; temp = addressing_table_index(cs0_device->type, cs0_device->density, cs0_device->io_width); emif_assert((temp >= 0)); addressing = &(lpddr2_jedec_addressing_table[temp]); emif_assert(addressing); regs->RL_final = timings->RL; /* * Initial value of EMIF_SDRAM_CONFIG corresponds to the base * frequency - 19.2 MHz */ regs->sdram_config_init = get_sdram_config_reg(cs0_device, cs1_device, addressing, RL_19_2_MHZ); regs->sdram_config_final = regs->sdram_config_init; mask_n_set(regs->sdram_config_final, OMAP44XX_REG_CL_SHIFT, OMAP44XX_REG_CL_MASK, timings->RL); regs->ref_ctrl = get_sdram_ref_ctrl(freq, addressing); regs->ref_ctrl_derated = regs->ref_ctrl / 4; regs->sdram_tim1 = get_sdram_tim_1_reg(timings, min_tck, addressing); regs->sdram_tim1_derated = get_sdram_tim_1_reg_derated(timings, min_tck, addressing); regs->sdram_tim2 = get_sdram_tim_2_reg(timings, min_tck); regs->sdram_tim3 = get_sdram_tim_3_reg(timings, min_tck, addressing); regs->read_idle_ctrl_normal = get_read_idle_ctrl_reg(LPDDR2_VOLTAGE_STABLE); regs->read_idle_ctrl_volt_ramp = get_read_idle_ctrl_reg(LPDDR2_VOLTAGE_RAMPING); regs->zq_config_normal = get_zq_config_reg(cs1_device, addressing, LPDDR2_VOLTAGE_STABLE); regs->zq_config_volt_ramp = get_zq_config_reg(cs1_device, addressing, LPDDR2_VOLTAGE_RAMPING); regs->temp_alert_config = get_temp_alert_config(cs1_device, addressing, false); regs->temp_alert_config_derated = get_temp_alert_config(cs1_device, addressing, true); regs->emif_ddr_phy_ctlr_1_init = get_ddr_phy_ctrl_1(EMIF_FREQ_19_2_MHZ, RL_19_2_MHZ); regs->emif_ddr_phy_ctlr_1_final = get_ddr_phy_ctrl_1(freq, regs->RL_final); /* save the frequency in the struct to act as a tag when cached */ regs->freq = freq; pr_debug("Calculated EMIF configuration register values " "for %d MHz", freq / 1000000); pr_debug("sdram_config_init\t\t: 0x%08x\n", regs->sdram_config_init); pr_debug("sdram_config_final\t\t: 0x%08x\n", regs->sdram_config_final); pr_debug("sdram_ref_ctrl\t\t: 0x%08x\n", regs->ref_ctrl); pr_debug("sdram_ref_ctrl_derated\t\t: 0x%08x\n", regs->ref_ctrl_derated); pr_debug("sdram_tim_1_reg\t\t: 0x%08x\n", regs->sdram_tim1); pr_debug("sdram_tim_1_reg_derated\t\t: 0x%08x\n", regs->sdram_tim1_derated); pr_debug("sdram_tim_2_reg\t\t: 0x%08x\n", regs->sdram_tim2); pr_debug("sdram_tim_3_reg\t\t: 0x%08x\n", regs->sdram_tim3); pr_debug("emif_read_idle_ctrl_normal\t: 0x%08x\n", regs->read_idle_ctrl_normal); pr_debug("emif_read_idle_ctrl_dvfs\t: 0x%08x\n", regs->read_idle_ctrl_volt_ramp); pr_debug("zq_config_reg_normal\t: 0x%08x\n", regs->zq_config_normal); pr_debug("zq_config_reg_dvfs\t\t: 0x%08x\n", regs->zq_config_volt_ramp); pr_debug("temp_alert_config\t: 0x%08x\n", regs->temp_alert_config); pr_debug("emif_ddr_phy_ctlr_1_init\t: 0x%08x\n", regs->emif_ddr_phy_ctlr_1_init); pr_debug("emif_ddr_phy_ctlr_1_final\t: 0x%08x\n", regs->emif_ddr_phy_ctlr_1_final); } /* * get_regs() - gets the cached emif_regs structure for a given EMIF instance * (emif_nr) for a given frequency(freq): * * As an optimization, only one cache array(that of EMIF1) if both EMIF1 and * EMIF2 has identical devices * * If we do not have an entry corresponding to the frequency given, we * allocate a new entry and calculate the values */ static struct emif_regs *get_regs(u32 emif_nr, u32 freq) { int i; struct emif_regs **regs_cache; struct emif_regs *regs = NULL; /* * If EMIF2 has the same devices as EMIF1 use the register * cache of EMIF1 */ if ((emif_nr == EMIF1) || ((emif_nr == EMIF2) && (emif_devices[EMIF1] == emif_devices[EMIF2]))) regs_cache = emif1_regs_cache; else regs_cache = emif2_regs_cache; for (i = 0; i < EMIF_MAX_NUM_FREQUENCIES && regs_cache[i]; i++) { if (regs_cache[i]->freq == freq) { regs = regs_cache[i]; break; } } /* * If we don't have an entry for this frequency in the cache create one * and calculate the values */ if (!regs) { regs = kmalloc(sizeof(struct emif_regs), GFP_ATOMIC); if (!regs) return NULL; emif_calculate_regs(emif_devices[emif_nr], freq, regs); /* * Now look for an un-used entry in the cache and save the * newly created struct. If there are no free entries * over-write the last entry */ for (i = 0; i < EMIF_MAX_NUM_FREQUENCIES && regs_cache[i]; i++) ; if (i >= EMIF_MAX_NUM_FREQUENCIES) { pr_warning("emif: emif regs_cache full - more number" " of frequencies used than expected!!"); i = EMIF_MAX_NUM_FREQUENCIES - 1; kfree(regs_cache[i]); } regs_cache[i] = regs; } return regs; } static int do_emif_setup_registers(u32 emif_nr, u32 freq, u32 volt_state) { struct emif_regs *regs; regs = get_regs(emif_nr, freq); if (!regs) return -ENOMEM; emif_curr_regs[emif_nr] = regs; setup_registers(emif_nr, regs, volt_state); setup_temperature_sensitive_regs(emif_nr, regs); return 0; } static int do_setup_device_details(u32 emif_nr, const struct emif_device_details *devices) { if (!emif_devices[emif_nr]) { emif_devices[emif_nr] = kmalloc(sizeof(struct emif_device_details), GFP_KERNEL); if (!emif_devices[emif_nr]) return -ENOMEM; *emif_devices[emif_nr] = *devices; } return 0; } /* * Initialize the temperature level and setup the sysfs nodes * and uvent for temperature monitoring */ static void init_temperature(u32 emif_nr) { if (!emif_devices[emif_nr]) return; emif_temperature_level[emif_nr] = get_temperature_level(emif_nr); WARN_ON(device_create_file(&(emif[emif_nr].pdev->dev), &dev_attr_temperature)); kobject_uevent(&(emif[emif_nr].pdev->dev.kobj), KOBJ_ADD); if (emif_temperature_level[emif_nr] >= SDRAM_TEMP_VERY_HIGH_SHUTDOWN) { pr_emerg("EMIF %d: SDRAM temperature exceeds operating" "limit! Powering OFF\n", emif_nr + 1); kernel_power_off(); } } static void __init emif_setup_errata(void) { if (cpu_is_omap44xx()) emif_errata |= EMIF_ERRATUM_SR_TIMER_i735; } /* * omap_emif_device_init needs to be done before * ddr reconfigure function call. * Hence omap_emif_device_init is a postcore_initcall. */ static int __init omap_emif_device_init(void) { /* setup the erratas */ emif_setup_errata(); /* * To avoid code running on other OMAPs in * multi-omap builds */ if (!cpu_is_omap44xx()) return -ENODEV; return omap_hwmod_for_each_by_class("emif", emif_init, NULL); } postcore_initcall(omap_emif_device_init); /* We need to disable interrupts of the EMIF * module, because in a warm reboot scenario, there * may be a pending irq that is not serviced and emif * is stuck in transition. On the next boot HW mod * fails emif inizalization with a timeout. */ void emif_clear_irq(int emif_id) { u32 irq_mask = 0; u32 base = 0; u32 reg = 0; if (emif_id == 0) base = OMAP44XX_EMIF1_VIRT; else base = OMAP44XX_EMIF2_VIRT; /* Disable the relevant interrupts for both LL and SYS */ irq_mask = OMAP44XX_REG_EN_TA_SYS_MASK | OMAP44XX_REG_EN_ERR_SYS_MASK | OMAP44XX_REG_EN_DNV_SYS_MASK; __raw_writel(irq_mask, base + OMAP44XX_EMIF_IRQENABLE_CLR_SYS); __raw_writel(irq_mask, base + OMAP44XX_EMIF_IRQENABLE_CLR_LL); /* Clear any pendining interrupts without overwritng reserved bits*/ reg = __raw_readl(base + OMAP44XX_EMIF_IRQSTATUS_SYS); reg |= irq_mask; __raw_writel(reg, base + OMAP44XX_EMIF_IRQSTATUS_SYS); reg = __raw_readl(base + OMAP44XX_EMIF_IRQSTATUS_LL); reg |= irq_mask; __raw_writel(reg, base + OMAP44XX_EMIF_IRQSTATUS_LL); /* Dummy read to make sure writes are complete */ __raw_readl(base + OMAP44XX_EMIF_IRQENABLE_SET_LL); return; } void emif_driver_shutdown(struct platform_device *pdev) { emif_clear_irq(pdev->id); } static struct platform_driver omap_emif_driver = { .probe = omap_emif_probe, .driver = { .name = "omap_emif", }, .shutdown = emif_driver_shutdown, }; static int __init omap_emif_register(void) { return platform_driver_register(&omap_emif_driver); } postcore_initcall(omap_emif_register); /* * omap_emif_notify_voltage - setup the voltage sensitive * registers based on the voltage situation (voltage ramping or stable) * read_idle_ctrl and zq_config are the registers that are voltage sensitive * They need to have a very safe value(more frequent zq calibration and * read idle forcing) when voltage is scaling and can have a more relaxed * nominal value(frequency dependent) when voltage is stable */ int omap_emif_notify_voltage(struct notifier_block *nb, unsigned long val, void *data) { u32 volt_state; if (val == OMAP_VOLTAGE_PRECHANGE) volt_state = LPDDR2_VOLTAGE_RAMPING; else volt_state = LPDDR2_VOLTAGE_STABLE; if (likely(emif_curr_regs[EMIF1])) setup_volt_sensitive_registers(EMIF1, emif_curr_regs[EMIF1], volt_state); if (likely(emif_curr_regs[EMIF2])) setup_volt_sensitive_registers(EMIF2, emif_curr_regs[EMIF2], volt_state); if (unlikely(!emif_curr_regs[EMIF1] && !emif_curr_regs[EMIF2])) { pr_err_once("emif: voltage state notification came before the" " initial setup - ignoring the notification"); return -EINVAL; } /* * EMIF read-idle control needs to be setup using * freq update method only */ return omap4_prcm_freq_update(); } static struct notifier_block emif_volt_notifier_block = { .notifier_call = omap_emif_notify_voltage, }; static int __init omap_emif_late_init(void) { struct voltagedomain *voltdm = voltdm_lookup("core"); if (!voltdm) { pr_err("CORE voltage domain lookup failed\n"); return -EINVAL; } voltdm_register_notifier(voltdm, &emif_volt_notifier_block); return 0; } late_initcall(omap_emif_late_init); /* * omap_emif_setup_registers - setup the shadow registers for a given * frequency. This will be typically followed by a FREQ_UPDATE procedure * to lock at the new frequency and this will update the EMIF main registers * with shadow register values */ int omap_emif_setup_registers(u32 freq, u32 volt_state) { int err = 0; if (likely(emif_devices[EMIF1])) err = do_emif_setup_registers(EMIF1, freq, volt_state); if (likely(!err && emif_devices[EMIF2])) err = do_emif_setup_registers(EMIF2, freq, volt_state); return err; } /* * omap_emif_frequency_pre_notify - Disable DDR self refresh of both EMIFs * * It disables the LP mode if the LP mode of EMIFs was LP_MODE_SELF_REFRESH. * * It should be called before any PRCM frequency update sequence. * After the frequency update sequence, omap_emif_frequency_post_notify * should be called to restore the original LP MODE setting of the EMIFs. * */ void omap_emif_frequency_pre_notify(void) { int emif_num; for (emif_num = EMIF1; emif_num < EMIF_NUM_INSTANCES; emif_num++) { /* * Only disable ddr self-refresh * if ddr self-refresh was enabled */ if (likely(LP_MODE_SELF_REFRESH == get_lp_mode(emif_num))) { set_lp_mode(emif_num, LP_MODE_DISABLE); emif[emif_num].ddr_refresh_disabled = true; } } } /* * omap_emif_frequency_post_notify - Enable DDR self refresh of both EMIFs * * It restores the LP mode of the EMIFs back to LP_MODE_SELF_REFRESH if it * was previously disabled by omap_emif_frequency_pre_notify() * */ void omap_emif_frequency_post_notify(void) { int emif_num; for (emif_num = EMIF1; emif_num < EMIF_NUM_INSTANCES; emif_num++) { /* * Only re-enable ddr self-refresh * if ddr self-refresh was disabled */ if (likely(emif[emif_num].ddr_refresh_disabled)) { set_lp_mode(emif_num, LP_MODE_SELF_REFRESH); emif[emif_num].ddr_refresh_disabled = false; } } } /* * omap_emif_setup_device_details - save the SDRAM device details passed * from the board file */ int omap_emif_setup_device_details(const struct emif_device_details *emif1_devices, const struct emif_device_details *emif2_devices) { if (emif1_devices) BUG_ON(do_setup_device_details(EMIF1, emif1_devices)); /* * If memory devices connected to both the EMIFs are identical * (which is normally the case), then no need to calculate the * registers again for EMIF1 and allocate the structure for registers */ if (emif2_devices && (emif1_devices != emif2_devices)) BUG_ON(do_setup_device_details(EMIF2, emif2_devices)); else if (emif2_devices) { emif_devices[EMIF2] = emif_devices[EMIF1]; /* call for temperature related setup */ BUG_ON(do_setup_device_details(EMIF2, emif2_devices)); } return 0; } static void __init setup_lowpower_regs(u32 emif_nr, struct emif_device_details *emif_dev) { u32 temp; void __iomem *base = emif[emif_nr].base; const struct lpddr2_device_info *dev; if (!emif_dev) { pr_err("%s: no emif %d\n", __func__, emif_nr); return; } /* * All devices on this specific EMIF should have the same Selfrefresh * timing, so use cs0 */ dev = emif_dev->cs0_device; if (!dev) { pr_err("%s: no CS0 device in emif %d\n", __func__, emif_nr); return; } if (dev->emif_ddr_selfrefresh_cycles >= 0) { u32 num_cycles, ddr_sr_timer; /* Enable self refresh if not already configured */ temp = __raw_readl(base + OMAP44XX_EMIF_PWR_MGMT_CTRL) & OMAP44XX_REG_LP_MODE_MASK; /* * Configure the self refresh timing * base value starts at 16 cycles mapped to 1( __fls(16) = 4) */ num_cycles = dev->emif_ddr_selfrefresh_cycles; if (num_cycles >= 16) ddr_sr_timer = __fls(num_cycles) - 3; else ddr_sr_timer = 0; if (is_emif_erratum(SR_TIMER_i735) && ddr_sr_timer < EMIF_ERRATUM_SR_TIMER_MIN) { /* * Operating with such SR_TIM value will cause * instability, so re-adjust to safe value as stated * by erratum i735 */ ddr_sr_timer = EMIF_ERRATUM_SR_TIMER_MIN; pr_warning("%s: PM timer for self refresh is set to %i" " cycles\n", __func__, 16 << (EMIF_ERRATUM_SR_TIMER_MIN - 1)); } /* Program the idle delay */ temp = __raw_readl(base + OMAP44XX_EMIF_PWR_MGMT_CTRL_SHDW); mask_n_set(temp, OMAP44XX_REG_SR_TIM_SHDW_SHIFT, OMAP44XX_REG_SR_TIM_SHDW_MASK, ddr_sr_timer); /* * Some weird magic number to a field which should'nt impact.. * but seems to make this work.. */ mask_n_set(temp, OMAP44XX_REG_CS_TIM_SHDW_SHIFT, OMAP44XX_REG_CS_TIM_SHDW_MASK, 0xf); __raw_writel(temp, base + OMAP44XX_EMIF_PWR_MGMT_CTRL_SHDW); /* Enable Self Refresh */ temp = __raw_readl(base + OMAP44XX_EMIF_PWR_MGMT_CTRL); mask_n_set(temp, OMAP44XX_REG_LP_MODE_SHIFT, OMAP44XX_REG_LP_MODE_MASK, LP_MODE_SELF_REFRESH); __raw_writel(temp, base + OMAP44XX_EMIF_PWR_MGMT_CTRL); } else { /* Disable Automatic power management if < 0 and not disabled */ temp = __raw_readl(base + OMAP44XX_EMIF_PWR_MGMT_CTRL) & OMAP44XX_REG_LP_MODE_MASK; temp = __raw_readl(base + OMAP44XX_EMIF_PWR_MGMT_CTRL_SHDW); mask_n_set(temp, OMAP44XX_REG_SR_TIM_SHDW_SHIFT, OMAP44XX_REG_SR_TIM_SHDW_MASK, 0x0); __raw_writel(temp, base + OMAP44XX_EMIF_PWR_MGMT_CTRL_SHDW); temp = __raw_readl(base + OMAP44XX_EMIF_PWR_MGMT_CTRL); mask_n_set(temp, OMAP44XX_REG_LP_MODE_SHIFT, OMAP44XX_REG_LP_MODE_MASK, LP_MODE_DISABLE); __raw_writel(temp, base + OMAP44XX_EMIF_PWR_MGMT_CTRL); } } /* * omap_init_emif_timings - reprogram EMIF timing parameters * * Sets the CORE DPLL3 M2 divider to the same value that it's at * currently. This has the effect of setting the EMIF DDR AC timing * registers to the values currently defined by the kernel. */ static int __init omap_init_emif_timings(void) { struct clk *dpll_core_m2_clk; int ret; long rate; /* * Setup the initial temperatures sysfs nodes etc. * Subsequent updates to temperature is done through interrupts */ init_temperature(EMIF1); init_temperature(EMIF2); /* FREQ_UPDATE sequence isn't supported on early vesion */ if (omap_rev() == OMAP4430_REV_ES1_0) return -EINVAL; dpll_core_m2_clk = clk_get(NULL, "dpll_core_m2_ck"); if (!dpll_core_m2_clk) pr_err("Could not get LPDDR2 clock - dpll_core_m2_ck\n"); rate = clk_get_rate(dpll_core_m2_clk); pr_info("Reprogramming LPDDR2 timings to %ld Hz\n", rate >> 1); ret = clk_set_rate(dpll_core_m2_clk, rate); if (ret) pr_err("Unable to set LPDDR2 rate to %ld:\n", rate); /* registers are setup correctly - now enable interrupts */ if (emif_devices[EMIF1]) { ret = setup_emif_interrupts(EMIF1); setup_lowpower_regs(EMIF1, emif_devices[EMIF1]); } if (!ret && emif_devices[EMIF2]) { ret = setup_emif_interrupts(EMIF2); setup_lowpower_regs(EMIF2, emif_devices[EMIF2]); } clk_put(dpll_core_m2_clk); return ret; } late_initcall(omap_init_emif_timings);
gpl-2.0
Shabbypenguin/Kettle_Corn_Kernel
drivers/misc/stmpe811.c
14
8625
/* drivers/misc/stmpe811.c * * Copyright (C) 2011 Samsung Electronics Co, Ltd. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/types.h> #include <linux/irq.h> #include <linux/pm.h> #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/module.h> #include <linux/device.h> #include <linux/vmalloc.h> #include <linux/fs.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/firmware.h> #include <linux/wakelock.h> #include <linux/blkdev.h> #include <linux/i2c.h> #include <linux/delay.h> #include <linux/workqueue.h> #include <mach/gpio.h> #include <linux/stmpe811.h> #define STMPE811_CHIP_ID 0x00 #define STMPE811_ID_VER 0x02 #define STMPE811_SYS_CTRL1 0x03 #define STMPE811_SYS_CTRL2 0x04 #define STMPE811_INT_CTRL 0x09 #define STMPE811_INT_EN 0x0A #define STMPE811_INT_STA 0x0B #define STMPE811_ADC_INT_EN 0x0E #define STMPE811_ADC_INT_STA 0x0F #define STMPE811_ADC_CTRL1 0x20 #define STMPE811_ADC_CTRL2 0x21 #define STMPE811_ADC_CAPT 0x22 #define STMPE811_ADC_DATA_CH0 0x30 #define STMPE811_ADC_DATA_CH1 0x32 #define STMPE811_ADC_DATA_CH2 0x34 #define STMPE811_ADC_DATA_CH3 0x36 #define STMPE811_ADC_DATA_CH4 0x38 #define STMPE811_ADC_DATA_CH5 0x3A #define STMPE811_ADC_DATA_CH6 0x3C #define STMPE811_ADC_DATA_CH7 0x3E #define STMPE811_GPIO_AF 0x17 #define STMPE811_TSC_CTRL 0x40 struct stmpe811_adc_data { struct i2c_client *client; struct mutex adc_lock; struct delayed_work init_work; }; static struct stmpe811_adc_data *local_adc_data; u16 stmpe811_adc_get_value(u8 channel) { struct stmpe811_adc_data *adc_data = local_adc_data; struct i2c_client *client = adc_data->client; u16 w_data = 0; int data_channel_addr = 0; u16 ddata; mutex_lock(&adc_data->adc_lock); /* delay stablization time */ msleep(30); i2c_smbus_write_byte_data(client, STMPE811_ADC_CAPT, 0xff); msleep(30); data_channel_addr = STMPE811_ADC_DATA_CH0 + (channel * 2); ddata = i2c_smbus_read_word_data(client, data_channel_addr); if (ddata < 0) { printk(KERN_INFO "%s: Failed to read ADC_DATA_CH(%d).\n", __func__, channel); mutex_unlock(&adc_data->adc_lock); return ddata; } w_data = ((ddata & 0xff) << 8 | (ddata >> 8)) & 0xfff; printk(KERN_INFO "%s: ADC_DATA_CH(%d) = 0x%x, %d\n", __func__, channel, w_data, w_data); mutex_unlock(&adc_data->adc_lock); return w_data; } static ssize_t adc_test_show(struct device *dev, struct device_attribute *attr, char *buf) { struct stmpe811_adc_data *adc_data = dev_get_drvdata(dev); struct i2c_client *client = adc_data->client; u16 ret; ret = stmpe811_adc_get_value(7); if (ret < 0) { dev_err(&client->dev, "%s: err at read adc %d\n", __func__, ret); return snprintf(buf, 9, "UNKNOWN\n"); } printk(KERN_INFO "%s: accessory adc[ch7]: %x\n", __func__, ret); if (ret > 0xe38 && ret < 0xed8) return snprintf(buf, 3, "1c\n"); return snprintf(buf, 4, "%x\n", ret); } static ssize_t usb_state_show(struct device *dev, struct device_attribute *attr, char *buf) { if (current_cable_type == 3) return snprintf(buf, 22, "USB_STATE_CONFIGURED\n"); return snprintf(buf, 25, "USB_STATE_NOTCONFIGURED\n"); } static DEVICE_ATTR(adc, S_IRUGO, adc_test_show, NULL); static DEVICE_ATTR(usb_state, S_IRUGO, usb_state_show, NULL); static int stmpe811_adc_i2c_remove(struct i2c_client *client) { struct stmpe811_adc_data *adc = i2c_get_clientdata(client); mutex_destroy(&adc->adc_lock); kfree(adc); return 0; } static void stmpe811_reg_init(struct stmpe811_adc_data *data) { struct stmpe811_adc_data *adc_data = data; struct i2c_client *client = adc_data->client; int ret; /* intialize stmpe811 control register */ ret = i2c_smbus_read_word_data(client, STMPE811_CHIP_ID); printk(KERN_INFO "%s: CHIP_ID: %x\n", __func__, ret); if (ret < 0) { printk(KERN_ERR "%s: Failed to read STMPE811_CHIP_ID : 0x%x\n", __func__, ret); return; } /* soft rest */ i2c_smbus_write_byte_data(client, STMPE811_SYS_CTRL1, 0x02); msleep(20); /* clock on:GPIO&ADC, off:TS&TSC */ i2c_smbus_write_byte_data(client, STMPE811_SYS_CTRL2, 0x0a); ret = i2c_smbus_read_byte_data(client, STMPE811_SYS_CTRL2); printk(KERN_INFO "STMPE811_SYS_CTRL2 = 0x%x\n", ret); /* disable interrupt */ i2c_smbus_write_byte_data(client, STMPE811_INT_EN, 0x00); ret = i2c_smbus_read_byte_data(client, STMPE811_INT_EN); printk(KERN_INFO "STMPE811_INT_EN = 0x%x\n", ret); /* adc conversion time:64, 12bit ADC oper, internal */ i2c_smbus_write_byte_data(client, STMPE811_ADC_CTRL1, 0x3c); ret = i2c_smbus_read_byte_data(client, STMPE811_ADC_CTRL1); printk(KERN_INFO "STMPE811_ADC_CTRL1 = 0x%x\n", ret); /* clock speed 6.5MHz */ i2c_smbus_write_byte_data(client, STMPE811_ADC_CTRL2, 0x03); ret = i2c_smbus_read_byte_data(client, STMPE811_ADC_CTRL2); printk(KERN_INFO "STMPE811_ADC_CTRL2 = 0x%x\n", ret); /* It should be ADC settings. * So the value should be 0x00 instead of 0xFF * gpio 0-3 -> ADC */ i2c_smbus_write_byte_data(client, STMPE811_GPIO_AF, 0x00); ret = i2c_smbus_read_byte_data(client, STMPE811_GPIO_AF); printk(KERN_INFO "STMPE811_GPIO_AF = 0x%x\n", ret); /* init Ch[0]=ADC_CHECK(battery), ch[3]=Accessory */ i2c_smbus_write_byte_data(client, STMPE811_ADC_CAPT, 0x90); i2c_smbus_write_byte_data(client, STMPE811_TSC_CTRL, 0x00); ret = i2c_smbus_read_byte_data(client, STMPE811_TSC_CTRL); printk(KERN_INFO "STMPE811_TSC_CTRL = 0x%x\n", ret); pr_info("%s success\n", __func__); } static int stmpe811_adc_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); struct stmpe811_adc_data *adc_data; struct device *sec_adc_dev; int ret; printk(KERN_INFO "%s: probe start!\n", __func__); if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA)) return -EIO; adc_data = kzalloc(sizeof(struct stmpe811_adc_data), GFP_KERNEL); if (adc_data == NULL) { printk(KERN_ERR "failed to allocate memory\n"); return -ENOMEM; } adc_data->client = client; i2c_set_clientdata(client, adc_data); mutex_init(&adc_data->adc_lock); local_adc_data = adc_data; /* set sysfs for adc test mode*/ sec_adc_dev = device_create(sec_class, NULL, 0, NULL, "switch"); if (IS_ERR(sec_adc_dev)) { printk(KERN_ERR "%s: Failed to create device (switch)!\n", __func__); ret = PTR_ERR(sec_adc_dev); goto err_create_device; } ret = device_create_file(sec_adc_dev, &dev_attr_adc); if (ret < 0) { printk(KERN_ERR "failed to create device file(%s)!\n", dev_attr_adc.attr.name); goto err_create_file_adc; } ret = device_create_file(sec_adc_dev, &dev_attr_usb_state); if (ret < 0) { printk(KERN_ERR "failed to create device file(%s)!\n", dev_attr_usb_state.attr.name); goto err_create_file_usb_state; } dev_set_drvdata(sec_adc_dev, adc_data); stmpe811_reg_init(adc_data); return 0; err_create_file_usb_state: device_remove_file(sec_adc_dev, &dev_attr_usb_state); err_create_file_adc: device_remove_file(sec_adc_dev, &dev_attr_adc); err_create_device: if (client->irq) free_irq(client->irq, adc_data); mutex_destroy(&adc_data->adc_lock); i2c_set_clientdata(client, NULL); kfree(adc_data); return ret; } static const struct i2c_device_id stmpe811_id[] = { {"stmpe811", 0}, {} }; MODULE_DEVICE_TABLE(i2c, stmpe811_adc_device_id); static struct i2c_driver stmpe811_adc_i2c_driver = { .driver = { .name = "stmpe811", .owner = THIS_MODULE, }, .probe = stmpe811_adc_i2c_probe, .remove = stmpe811_adc_i2c_remove, .id_table = stmpe811_id, }; static int __init stmpe811_adc_init(void) { return i2c_add_driver(&stmpe811_adc_i2c_driver); } module_init(stmpe811_adc_init); static void __exit stmpe811_adc_exit(void) { i2c_del_driver(&stmpe811_adc_i2c_driver); } module_exit(stmpe811_adc_exit); MODULE_AUTHOR("Samsung"); MODULE_DESCRIPTION("Samsung STMPE811 ADC driver"); MODULE_LICENSE("GPL");
gpl-2.0
MattCrystal/yolo-computing-machine
arch/arm/mach-msm/lge/board-8974-g2-spr-gpiomux.c
14
53595
/* Copyright (c) 2012-2013, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/init.h> #include <linux/ioport.h> #include <mach/board.h> #include <mach/gpio.h> #include <mach/gpiomux.h> #include <mach/socinfo.h> #include <mach/board_lge.h> #define KS8851_IRQ_GPIO 94 /* soojung.lim@lge.com, 2013-05-23 * To use 24MHz GP/GCC_GP clock for V2 H/W */ int g_is_tlmm_spare_reg_value = 0; static struct gpiomux_setting ap2mdm_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_NONE, }; static struct gpiomux_setting mdm2ap_status_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_DOWN, .dir = GPIOMUX_IN, }; static struct gpiomux_setting mdm2ap_errfatal_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_DOWN, .dir = GPIOMUX_IN, }; static struct gpiomux_setting mdm2ap_pblrdy = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_NONE, .dir = GPIOMUX_IN, }; static struct gpiomux_setting ap2mdm_soft_reset_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_NONE, }; static struct gpiomux_setting ap2mdm_wakeup = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_DOWN, }; static struct msm_gpiomux_config mdm_configs[] __initdata = { /* AP2MDM_STATUS */ { .gpio = 105, .settings = { [GPIOMUX_SUSPENDED] = &ap2mdm_cfg, } }, /* MDM2AP_STATUS */ { .gpio = 46, .settings = { [GPIOMUX_SUSPENDED] = &mdm2ap_status_cfg, } }, /* MDM2AP_ERRFATAL */ { .gpio = 82, .settings = { [GPIOMUX_SUSPENDED] = &mdm2ap_errfatal_cfg, } }, /* AP2MDM_ERRFATAL */ { .gpio = 106, .settings = { [GPIOMUX_SUSPENDED] = &ap2mdm_cfg, } }, /* AP2MDM_SOFT_RESET, aka AP2MDM_PON_RESET_N */ { .gpio = 24, .settings = { [GPIOMUX_SUSPENDED] = &ap2mdm_soft_reset_cfg, } }, /* AP2MDM_WAKEUP */ { .gpio = 104, .settings = { [GPIOMUX_SUSPENDED] = &ap2mdm_wakeup, } }, /* MDM2AP_PBL_READY*/ { .gpio = 80, .settings = { [GPIOMUX_SUSPENDED] = &mdm2ap_pblrdy, } }, }; static struct gpiomux_setting gpio_uart_config = { .func = GPIOMUX_FUNC_2, .drv = GPIOMUX_DRV_16MA, .pull = GPIOMUX_PULL_NONE, .dir = GPIOMUX_OUT_HIGH, }; static struct gpiomux_setting slimbus = { .func = GPIOMUX_FUNC_1, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_KEEPER, }; #if defined(CONFIG_KS8851) || defined(CONFIG_KS8851_MODULE) static struct gpiomux_setting gpio_eth_config = { .pull = GPIOMUX_PULL_UP, .drv = GPIOMUX_DRV_2MA, .func = GPIOMUX_FUNC_GPIO, }; static struct gpiomux_setting gpio_spi_cs2_config = { .func = GPIOMUX_FUNC_4, .drv = GPIOMUX_DRV_6MA, .pull = GPIOMUX_PULL_DOWN, }; static struct gpiomux_setting gpio_spi_config = { .func = GPIOMUX_FUNC_1, .drv = GPIOMUX_DRV_12MA, .pull = GPIOMUX_PULL_NONE, }; static struct gpiomux_setting gpio_spi_cs1_config = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_6MA, .pull = GPIOMUX_PULL_UP, }; static struct msm_gpiomux_config msm_eth_configs[] = { { .gpio = KS8851_IRQ_GPIO, .settings = { [GPIOMUX_SUSPENDED] = &gpio_eth_config, } }, }; #endif static struct gpiomux_setting gpio_suspend_config[] = { { .func = GPIOMUX_FUNC_GPIO, /* IN-NP */ .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, }, { .func = GPIOMUX_FUNC_GPIO, /* O-LOW */ .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, .dir = GPIOMUX_OUT_LOW, }, }; static struct gpiomux_setting gpio_epm_config = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_DOWN, }; /* LGE_CHANGE_S, [WiFi][hayun.kim@lge.com], 2013-01-22, Wifi Bring Up */ #if defined (CONFIG_BCMDHD) || defined (CONFIG_BCMDHD_MODULE) #else static struct gpiomux_setting wcnss_5wire_suspend_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_UP, }; static struct gpiomux_setting wcnss_5wire_active_cfg = { .func = GPIOMUX_FUNC_1, .drv = GPIOMUX_DRV_6MA, .pull = GPIOMUX_PULL_DOWN, }; #endif /* LGE_CHANGE_E, [WiFi][hayun.kim@lge.com], 2013-01-22, Wifi Bring Up */ static struct gpiomux_setting gpio_i2c_config = { .func = GPIOMUX_FUNC_3, /* * Please keep I2C GPIOs drive-strength at minimum (2ma). It is a * workaround for HW issue of glitches caused by rapid GPIO current- * change. */ .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, }; static struct gpiomux_setting lcd_en_act_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_NONE, .dir = GPIOMUX_OUT_HIGH, }; static struct gpiomux_setting lcd_en_sus_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_DOWN, .dir = GPIOMUX_IN, }; #ifdef CONFIG_MACH_LGE #else static struct gpiomux_setting atmel_resout_sus_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_6MA, .pull = GPIOMUX_PULL_DOWN, }; static struct gpiomux_setting atmel_resout_act_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_6MA, .pull = GPIOMUX_PULL_UP, }; static struct gpiomux_setting atmel_int_act_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_UP, }; static struct gpiomux_setting atmel_int_sus_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_DOWN, }; #endif static struct gpiomux_setting taiko_reset = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_6MA, .pull = GPIOMUX_PULL_NONE, .dir = GPIOMUX_OUT_LOW, }; static struct gpiomux_setting taiko_int = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, }; #ifndef CONFIG_LGE_IRRC /* NOT USED: GPIO 86 is used as IRRC_RxD in G2 board */ static struct gpiomux_setting hap_lvl_shft_suspended_config = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_DOWN, }; static struct gpiomux_setting hap_lvl_shft_active_config = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_UP, }; static struct msm_gpiomux_config hap_lvl_shft_config[] __initdata = { { .gpio = 86, .settings = { [GPIOMUX_SUSPENDED] = &hap_lvl_shft_suspended_config, [GPIOMUX_ACTIVE] = &hap_lvl_shft_active_config, }, }, }; #endif #if defined(CONFIG_BACKLIGHT_LM3630) static struct gpiomux_setting lcd_bl_en_active_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_NONE, .dir = GPIOMUX_OUT_HIGH, }; static struct gpiomux_setting lcd_bl_en_suspend_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, .dir = GPIOMUX_IN, }; #endif #ifdef CONFIG_MACH_LGE #ifdef CONFIG_MAX17048_FUELGAUGE static struct gpiomux_setting max17048_i2c_sda_config = { /* GPIO_2 */ .func = GPIOMUX_FUNC_3, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_NONE, }; static struct gpiomux_setting max17048_i2c_scl_config = { /* GPIO_3 */ .func = GPIOMUX_FUNC_3, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_NONE, }; static struct gpiomux_setting max17048_int_config = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_6MA, .dir = GPIOMUX_IN, }; #endif static struct gpiomux_setting touch_id_act_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_6MA, .dir = GPIOMUX_IN, }; static struct gpiomux_setting touch_id_sus_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_6MA, .dir = GPIOMUX_IN, }; static struct gpiomux_setting touch_int_act_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_8MA, .dir = GPIOMUX_IN, }; static struct gpiomux_setting touch_int_sus_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .dir = GPIOMUX_IN, }; static struct gpiomux_setting touch_i2c_act_cfg = { .func = GPIOMUX_FUNC_3, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_NONE, }; static struct gpiomux_setting touch_i2c_sus_cfg = { .func = GPIOMUX_FUNC_3, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_DOWN, .dir = GPIOMUX_IN, }; static struct gpiomux_setting touch_reset_sus_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_6MA, .dir = GPIOMUX_IN, }; static struct gpiomux_setting touch_reset_act_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_6MA, .dir = GPIOMUX_IN, }; static struct gpiomux_setting touch_ldoen_act_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_8MA, .dir = GPIOMUX_IN, }; static struct gpiomux_setting touch_ldoen_sus_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .dir = GPIOMUX_IN, }; #endif static struct msm_gpiomux_config msm_touch_configs[] __initdata = { { .gpio = 8, /* TOUCH RESET */ .settings = { [GPIOMUX_ACTIVE] = &touch_reset_act_cfg, [GPIOMUX_SUSPENDED] = &touch_reset_sus_cfg, }, }, { .gpio = 5, /* TOUCH IRQ */ .settings = { [GPIOMUX_ACTIVE] = &touch_int_act_cfg, [GPIOMUX_SUSPENDED] = &touch_int_sus_cfg, }, }, }; #ifdef CONFIG_MACH_LGE static struct gpiomux_setting hsic_sus_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_DOWN, .dir = GPIOMUX_OUT_LOW, }; #else /* qmc original */ static struct gpiomux_setting hsic_sus_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_DOWN, }; #endif #ifndef CONFIG_MACH_LGE static struct gpiomux_setting hsic_act_cfg = { .func = GPIOMUX_FUNC_1, .drv = GPIOMUX_DRV_12MA, .pull = GPIOMUX_PULL_NONE, }; #endif static struct gpiomux_setting hsic_hub_act_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_UP, .dir = GPIOMUX_IN, }; #ifndef CONFIG_MACH_LGE static struct msm_gpiomux_config msm_hsic_configs[] = { { .gpio = 144, /*HSIC_STROBE */ .settings = { [GPIOMUX_ACTIVE] = &hsic_act_cfg, [GPIOMUX_SUSPENDED] = &hsic_sus_cfg, }, }, #if 0 /* Camera will use gpio 145 */ /* LGE_CHANGE * Camera bring up - SPRINT * 2013-03-14, jinw.kim@lge.com */ { .gpio = 145, /* HSIC_DATA */ .settings = { [GPIOMUX_ACTIVE] = &hsic_act_cfg, [GPIOMUX_SUSPENDED] = &hsic_sus_cfg, }, }, { .gpio = 80, .settings = { [GPIOMUX_ACTIVE] = &hsic_resume_act_cfg, [GPIOMUX_SUSPENDED] = &hsic_resume_susp_cfg, }, }, #endif }; #endif static struct msm_gpiomux_config msm_hsic_hub_configs[] = { { .gpio = 50, /* HSIC_HUB_INT_N */ .settings = { [GPIOMUX_ACTIVE] = &hsic_hub_act_cfg, [GPIOMUX_SUSPENDED] = &hsic_sus_cfg, }, }, }; /* LGE_CHANGE_S matthew.choi@lge.com 130319 GPIO Setting for Hall IC */ static struct gpiomux_setting hall_ic_act_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_UP, .dir = GPIOMUX_IN, }; static struct msm_gpiomux_config msm_hall_ic_configs[] = { { .gpio = 102, .settings = { [GPIOMUX_ACTIVE] = &hall_ic_act_cfg, }, }, { .gpio = 144, .settings = { [GPIOMUX_ACTIVE] = &hall_ic_act_cfg, }, }, }; /* LGE_CHANGE_E matthew.choi@lge.com 130319 GPIO Setting for Hall IC */ static struct gpiomux_setting mhl_suspend_config = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_DOWN, }; static struct gpiomux_setting mhl_active_1_cfg = { .func = GPIOMUX_FUNC_1, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_UP, .dir = GPIOMUX_OUT_HIGH, }; static struct gpiomux_setting hdmi_suspend_1_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_UP, }; static struct gpiomux_setting hdmi_suspend_2_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_DOWN, }; static struct gpiomux_setting hdmi_active_1_cfg = { .func = GPIOMUX_FUNC_1, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_UP, }; static struct gpiomux_setting hdmi_active_2_cfg = { .func = GPIOMUX_FUNC_1, .drv = GPIOMUX_DRV_16MA, .pull = GPIOMUX_PULL_DOWN, }; #if defined(CONFIG_MACH_LGE) #ifdef CONFIG_MAX17048_FUELGAUGE static struct msm_gpiomux_config msm_fuel_gauge_configs[] __initdata = { { .gpio = 2, /* BLSP1 QUP1 I2C_DAT */ .settings = { [GPIOMUX_ACTIVE] = &max17048_i2c_sda_config, [GPIOMUX_SUSPENDED] = &max17048_i2c_sda_config, }, }, { .gpio = 3, /* BLSP1 QUP1 I2C_CLK */ .settings = { [GPIOMUX_ACTIVE] = &max17048_i2c_scl_config, [GPIOMUX_SUSPENDED] = &max17048_i2c_scl_config, }, }, { .gpio = 9, /* FUEL_GAUGE_INT_N */ .settings = { [GPIOMUX_ACTIVE] = &max17048_int_config, [GPIOMUX_SUSPENDED] = &max17048_int_config, }, }, }; #endif #endif static struct msm_gpiomux_config msm_mhl_configs[] __initdata = { { /* mhl-sii8334 pwr */ .gpio = 12, .settings = { [GPIOMUX_SUSPENDED] = &mhl_suspend_config, [GPIOMUX_ACTIVE] = &mhl_active_1_cfg, }, }, { /* mhl-sii8334 intr */ .gpio = 82, .settings = { [GPIOMUX_SUSPENDED] = &mhl_suspend_config, [GPIOMUX_ACTIVE] = &mhl_active_1_cfg, }, }, }; static struct msm_gpiomux_config msm_hdmi_configs[] __initdata = { { .gpio = 31, .settings = { [GPIOMUX_ACTIVE] = &hdmi_active_1_cfg, [GPIOMUX_SUSPENDED] = &hdmi_suspend_1_cfg, }, }, { .gpio = 32, .settings = { [GPIOMUX_ACTIVE] = &hdmi_active_1_cfg, [GPIOMUX_SUSPENDED] = &hdmi_suspend_1_cfg, }, }, { .gpio = 33, .settings = { [GPIOMUX_ACTIVE] = &hdmi_active_1_cfg, [GPIOMUX_SUSPENDED] = &hdmi_suspend_1_cfg, }, }, { .gpio = 34, .settings = { [GPIOMUX_ACTIVE] = &hdmi_active_2_cfg, [GPIOMUX_SUSPENDED] = &hdmi_suspend_2_cfg, }, }, }; #ifndef CONFIG_MACH_LGE static struct gpiomux_setting gpio_uart7_active_cfg = { .func = GPIOMUX_FUNC_3, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_NONE, }; static struct gpiomux_setting gpio_uart7_suspend_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_DOWN, }; static struct msm_gpiomux_config msm_blsp2_uart7_configs[] __initdata = { { .gpio = 41, /* BLSP2 UART7 TX */ .settings = { [GPIOMUX_ACTIVE] = &gpio_uart7_active_cfg, [GPIOMUX_SUSPENDED] = &gpio_uart7_suspend_cfg, }, }, { .gpio = 42, /* BLSP2 UART7 RX */ .settings = { [GPIOMUX_ACTIVE] = &gpio_uart7_active_cfg, [GPIOMUX_SUSPENDED] = &gpio_uart7_suspend_cfg, }, }, { .gpio = 43, /* BLSP2 UART7 CTS */ .settings = { [GPIOMUX_ACTIVE] = &gpio_uart7_active_cfg, [GPIOMUX_SUSPENDED] = &gpio_uart7_suspend_cfg, }, }, { .gpio = 44, /* BLSP2 UART7 RFR */ .settings = { [GPIOMUX_ACTIVE] = &gpio_uart7_active_cfg, [GPIOMUX_SUSPENDED] = &gpio_uart7_suspend_cfg, }, }, }; #endif static struct msm_gpiomux_config msm_rumi_blsp_configs[] __initdata = { { .gpio = 45, /* BLSP2 UART8 TX */ .settings = { [GPIOMUX_SUSPENDED] = &gpio_uart_config, }, }, { .gpio = 46, /* BLSP2 UART8 RX */ .settings = { [GPIOMUX_SUSPENDED] = &gpio_uart_config, }, }, }; #if defined (CONFIG_MACH_LGE) static struct msm_gpiomux_config msm_lcd_configs_rev_a[] __initdata = { { .gpio = 58, .settings = { [GPIOMUX_ACTIVE] = &lcd_en_act_cfg, [GPIOMUX_SUSPENDED] = &lcd_en_sus_cfg, }, }, #if defined(CONFIG_BACKLIGHT_LM3630) { .gpio = 91, /* LCD_BL_EN */ .settings = { [GPIOMUX_ACTIVE] = &lcd_bl_en_active_cfg, [GPIOMUX_SUSPENDED] = &lcd_bl_en_suspend_cfg, }, }, #endif }; static struct msm_gpiomux_config msm_lcd_configs_rev_b[] __initdata = { { .gpio = 58, .settings = { [GPIOMUX_ACTIVE] = &lcd_en_act_cfg, [GPIOMUX_SUSPENDED] = &lcd_en_sus_cfg, }, }, #if defined(CONFIG_BACKLIGHT_LM3630) { .gpio = 49, /* LCD_BL_EN */ .settings = { [GPIOMUX_ACTIVE] = &lcd_bl_en_active_cfg, [GPIOMUX_SUSPENDED] = &lcd_bl_en_suspend_cfg, }, }, #endif }; #else static struct msm_gpiomux_config msm_lcd_configs[] __initdata = { { .gpio = 58, .settings = { [GPIOMUX_ACTIVE] = &lcd_en_act_cfg, [GPIOMUX_SUSPENDED] = &lcd_en_sus_cfg, }, }, #if defined(CONFIG_BACKLIGHT_LM3630) { .gpio = 91, /* LCD_BL_EN */ .settings = { [GPIOMUX_ACTIVE] = &lcd_bl_en_active_cfg, [GPIOMUX_SUSPENDED] = &lcd_bl_en_suspend_cfg, }, }, #endif }; #endif static struct msm_gpiomux_config msm_blsp_configs[] __initdata = { #if defined(CONFIG_KS8851) || defined(CONFIG_KS8851_MODULE) { .gpio = 0, /* BLSP1 QUP SPI_DATA_MOSI */ .settings = { [GPIOMUX_SUSPENDED] = &gpio_spi_config, }, }, { .gpio = 1, /* BLSP1 QUP SPI_DATA_MISO */ .settings = { [GPIOMUX_SUSPENDED] = &gpio_spi_config, }, }, { .gpio = 3, /* BLSP1 QUP SPI_CLK */ .settings = { [GPIOMUX_SUSPENDED] = &gpio_spi_config, }, }, { .gpio = 9, /* BLSP1 QUP SPI_CS2A_N */ .settings = { [GPIOMUX_SUSPENDED] = &gpio_spi_cs2_config, }, }, { .gpio = 8, /* BLSP1 QUP SPI_CS1_N */ .settings = { [GPIOMUX_SUSPENDED] = &gpio_spi_cs1_config, }, }, #endif #ifdef CONFIG_MACH_LGE { .gpio = 6, /* BLSP1 QUP2 I2C_DAT */ .settings = { [GPIOMUX_ACTIVE] = &touch_i2c_act_cfg, [GPIOMUX_SUSPENDED] = &touch_i2c_sus_cfg, }, }, { .gpio = 7, /* BLSP1 QUP2 I2C_CLK */ .settings = { [GPIOMUX_ACTIVE] = &touch_i2c_act_cfg, [GPIOMUX_SUSPENDED] = &touch_i2c_sus_cfg, }, }, #else { .gpio = 6, /* BLSP1 QUP2 I2C_DAT */ .settings = { [GPIOMUX_SUSPENDED] = &gpio_i2c_config, }, }, { .gpio = 7, /* BLSP1 QUP2 I2C_CLK */ .settings = { [GPIOMUX_SUSPENDED] = &gpio_i2c_config, }, }, #endif { .gpio = 83, /* BLSP11 QUP I2C_DAT */ .settings = { [GPIOMUX_SUSPENDED] = &gpio_i2c_config, }, }, { .gpio = 84, /* BLSP11 QUP I2C_CLK */ .settings = { [GPIOMUX_SUSPENDED] = &gpio_i2c_config, }, }, #ifdef CONFIG_MACH_LGE { .gpio = 4, /* BLSP2 UART TX */ .settings = { [GPIOMUX_ACTIVE] = &touch_id_act_cfg, [GPIOMUX_SUSPENDED] = &touch_id_sus_cfg, }, }, #else { .gpio = 4, /* BLSP2 UART TX */ .settings = { [GPIOMUX_SUSPENDED] = &gpio_uart_config, }, }, #endif #ifdef CONFIG_MACH_LGE #else { .gpio = 5, /* BLSP2 UART RX */ .settings = { [GPIOMUX_SUSPENDED] = &gpio_uart_config, }, }, #endif #ifdef CONFIG_MACH_LGE { .gpio = 0, /* BLSP2 UART TX */ .settings = { [GPIOMUX_SUSPENDED] = &gpio_uart_config, }, }, { .gpio = 1, /* BLSP2 UART RX */ .settings = { [GPIOMUX_SUSPENDED] = &gpio_uart_config, }, }, #endif #ifdef CONFIG_LGE_IRRC { .gpio = 85, /* BLSP2 UART TX */ .settings = { [GPIOMUX_SUSPENDED] = &gpio_uart_config, }, }, { .gpio = 86, /* BLSP2 UART RX */ .settings = { [GPIOMUX_SUSPENDED] = &gpio_uart_config, }, }, #endif /* LGE_CHANGE_S, [BT][younghyun.kwon@lge.com], 2013-01-29 */ #ifndef CONFIG_LGE_BLUETOOTH { .gpio = 53, /* BLSP2 QUP4 SPI_DATA_MOSI */ .settings = { [GPIOMUX_ACTIVE] = &gpio_spi_config, [GPIOMUX_SUSPENDED] = &gpio_suspend_config[1], }, }, { .gpio = 54, /* BLSP2 QUP4 SPI_DATA_MISO */ .settings = { [GPIOMUX_ACTIVE] = &gpio_spi_config, [GPIOMUX_SUSPENDED] = &gpio_suspend_config[1], }, }, { .gpio = 56, /* BLSP2 QUP4 SPI_CLK */ .settings = { [GPIOMUX_ACTIVE] = &gpio_spi_config, [GPIOMUX_SUSPENDED] = &gpio_suspend_config[0], }, }, { .gpio = 55, /* BLSP2 QUP4 SPI_CS0_N */ .settings = { [GPIOMUX_ACTIVE] = &gpio_spi_config, [GPIOMUX_SUSPENDED] = &gpio_suspend_config[0], }, }, #endif /* CONFIG_LGE_BLUETOOTH */ /* LGE_CHANGE_S, [BT][younghyun.kwon@lge.com], 2013-01-29 */ { .gpio = 81, /* EPM enable */ .settings = { [GPIOMUX_SUSPENDED] = &gpio_epm_config, }, }, }; static struct msm_gpiomux_config msm8974_slimbus_config[] __initdata = { { .gpio = 70, /* slimbus clk */ .settings = { [GPIOMUX_SUSPENDED] = &slimbus, }, }, { .gpio = 71, /* slimbus data */ .settings = { [GPIOMUX_SUSPENDED] = &slimbus, }, }, }; #ifdef CONFIG_SND_FM_RADIO static struct gpiomux_setting tert_mi2s_act_cfg = { .func = GPIOMUX_FUNC_1, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_NONE, }; static struct gpiomux_setting tert_mi2s_sus_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_DOWN, }; #if 0 static struct gpiomux_setting fm_radio_act_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_UP, }; static struct gpiomux_setting fm_radio_sus_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_DOWN, }; #endif static struct msm_gpiomux_config msm8974_tert_mi2s_configs[] __initdata = { { .gpio = 74, /* mi2s sck */ .settings = { [GPIOMUX_SUSPENDED] = &tert_mi2s_sus_cfg, [GPIOMUX_ACTIVE] = &tert_mi2s_act_cfg, }, }, { .gpio = 75, .settings = { [GPIOMUX_SUSPENDED] = &tert_mi2s_sus_cfg, [GPIOMUX_ACTIVE] = &tert_mi2s_act_cfg, }, }, { .gpio = 76, .settings = { [GPIOMUX_SUSPENDED] = &tert_mi2s_sus_cfg, [GPIOMUX_ACTIVE] = &tert_mi2s_act_cfg, }, }, { .gpio = 77, /* SD1 -Data 1 */ .settings = { [GPIOMUX_SUSPENDED] = &tert_mi2s_sus_cfg, [GPIOMUX_ACTIVE] = &tert_mi2s_act_cfg, }, }, }; #if 0 static struct msm_gpiomux_config fm_radio_configs[]= { { .gpio = 69, /* mi2s sck */ .settings = { [GPIOMUX_SUSPENDED] = &fm_radio_sus_cfg, [GPIOMUX_ACTIVE] = &fm_radio_act_cfg, }, }, }; #endif #endif static struct gpiomux_setting cam_settings[] = { { .func = GPIOMUX_FUNC_1, /*active 1*/ /* 0 */ .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, }, { .func = GPIOMUX_FUNC_1, /*suspend*/ /* 1 */ .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_DOWN, }, { .func = GPIOMUX_FUNC_1, /*i2c suspend*/ /* 2 */ .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_KEEPER, }, { .func = GPIOMUX_FUNC_GPIO, /*active 0*/ /* 3 */ .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, }, { .func = GPIOMUX_FUNC_GPIO, /*suspend 0*/ /* 4 */ .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_DOWN, }, }; static struct gpiomux_setting sd_card_det_active_config = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, .dir = GPIOMUX_IN, }; static struct gpiomux_setting sd_card_det_sleep_config = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_UP, .dir = GPIOMUX_IN, }; static struct msm_gpiomux_config sd_card_det __initdata = { .gpio = 62, .settings = { [GPIOMUX_ACTIVE] = &sd_card_det_active_config, [GPIOMUX_SUSPENDED] = &sd_card_det_sleep_config, }, }; /* LGE_CHANGE_S * Camera bring up - Separate Rev.A and B setting * In case of Rev.B, MAIN_CAM_RESET is changed from GPIO_90 to GPIO_4 * 2013-03-20, youmi.jun@lge.com */ #if defined(CONFIG_MACH_LGE) static struct msm_gpiomux_config msm_sensor_configs_rev_a[] __initdata = { { .gpio = 15, /* CAM_MCLK0 */ .settings = { [GPIOMUX_ACTIVE] = &cam_settings[0], [GPIOMUX_SUSPENDED] = &cam_settings[1], }, }, { .gpio = 16, /* CAM_MCLK1 */ .settings = { [GPIOMUX_ACTIVE] = &cam_settings[3], [GPIOMUX_SUSPENDED] = &cam_settings[4], }, }, { .gpio = 17, /* CAM_MCLK2 */ .settings = { [GPIOMUX_ACTIVE] = &cam_settings[0], [GPIOMUX_SUSPENDED] = &cam_settings[1], }, }, { .gpio = 18, /* WEBCAM1_RESET_N / CAM_MCLK3 */ .settings = { [GPIOMUX_ACTIVE] = &cam_settings[3], [GPIOMUX_SUSPENDED] = &cam_settings[4], }, }, { .gpio = 19, /* CCI_I2C_SDA0 */ .settings = { [GPIOMUX_ACTIVE] = &cam_settings[0], [GPIOMUX_SUSPENDED] = &gpio_suspend_config[0], }, }, { .gpio = 20, /* CCI_I2C_SCL0 */ .settings = { [GPIOMUX_ACTIVE] = &cam_settings[0], [GPIOMUX_SUSPENDED] = &gpio_suspend_config[0], }, }, { .gpio = 21, /* CCI_I2C_SDA1 */ .settings = { [GPIOMUX_ACTIVE] = &cam_settings[0], [GPIOMUX_SUSPENDED] = &gpio_suspend_config[0], }, }, { .gpio = 22, /* CCI_I2C_SCL1 */ .settings = { [GPIOMUX_ACTIVE] = &cam_settings[0], [GPIOMUX_SUSPENDED] = &gpio_suspend_config[0], }, }, #ifdef CONFIG_MACH_LGE { .gpio = 23, /* FLASH_LED_EN */ .settings = { [GPIOMUX_ACTIVE] = &touch_ldoen_act_cfg, [GPIOMUX_SUSPENDED] = &touch_ldoen_sus_cfg, }, }, #else { .gpio = 23, /* FLASH_LED_EN */ .settings = { [GPIOMUX_ACTIVE] = &cam_settings[0], [GPIOMUX_SUSPENDED] = &gpio_suspend_config[1], }, }, #endif { .gpio = 24, /* FLASH_LED_NOW */ .settings = { [GPIOMUX_ACTIVE] = &cam_settings[0], [GPIOMUX_SUSPENDED] = &gpio_suspend_config[1], }, }, { .gpio = 25, /* WEBCAM2_RESET_N */ .settings = { [GPIOMUX_ACTIVE] = &cam_settings[0], [GPIOMUX_SUSPENDED] = &gpio_suspend_config[1], }, }, { .gpio = 26, /* CAM_IRQ */ .settings = { [GPIOMUX_ACTIVE] = &cam_settings[0], [GPIOMUX_SUSPENDED] = &cam_settings[1], }, }, #ifdef CONFIG_MACH_LGE /* NULL - GPIO_27 : used with Motor PWM pin GPIO_28 : used with SlimPort IRQ pin */ #else /* QCT original */ { .gpio = 27, /* OIS_SYNC */ .settings = { [GPIOMUX_ACTIVE] = &cam_settings[0], [GPIOMUX_SUSPENDED] = &gpio_suspend_config[1], }, }, { .gpio = 28, /* WEBCAM1_STANDBY */ .settings = { [GPIOMUX_ACTIVE] = &cam_settings[0], [GPIOMUX_SUSPENDED] = &gpio_suspend_config[1], }, }, #endif { .gpio = 89, /* CAM1_STANDBY_N */ .settings = { [GPIOMUX_ACTIVE] = &cam_settings[0], [GPIOMUX_SUSPENDED] = &cam_settings[1], }, }, { .gpio = 90, /* CAM1_RST_N */ .settings = { [GPIOMUX_ACTIVE] = &cam_settings[3], [GPIOMUX_SUSPENDED] = &cam_settings[4], }, }, #if defined(CONFIG_BACKLIGHT_LM3630) #else { .gpio = 91, /* CAM2_STANDBY_N */ .settings = { [GPIOMUX_ACTIVE] = &cam_settings[0], [GPIOMUX_SUSPENDED] = &cam_settings[1], }, }, #endif { .gpio = 92, /* CAM2_RST_N */ .settings = { [GPIOMUX_ACTIVE] = &cam_settings[0], [GPIOMUX_SUSPENDED] = &cam_settings[1], }, }, #if defined(CONFIG_MACH_LGE) /* LGE_CHANGE_S * Camera bring up - SPRINT * 2013-03-14, jinw.kim@lge.com */ { .gpio = 57, /* 13M_VCM_EN */ .settings = { [GPIOMUX_ACTIVE] = &cam_settings[3], [GPIOMUX_SUSPENDED] = &cam_settings[4], }, }, { .gpio = 145, /* OIS_LDO_EN */ .settings = { [GPIOMUX_ACTIVE] = &cam_settings[3], [GPIOMUX_SUSPENDED] = &cam_settings[4], }, }, { .gpio = 29, /* OIS_RESET */ .settings = { [GPIOMUX_ACTIVE] = &cam_settings[3], [GPIOMUX_SUSPENDED] = &cam_settings[4], }, }, { .gpio = 96, /* 13M_VIO */ .settings = { [GPIOMUX_ACTIVE] = &cam_settings[3], [GPIOMUX_SUSPENDED] = &cam_settings[4], }, }, { .gpio = 30, /* VT_LDO_EN */ .settings = { [GPIOMUX_ACTIVE] = &cam_settings[3], [GPIOMUX_SUSPENDED] = &cam_settings[4], }, }, #endif }; static struct msm_gpiomux_config msm_sensor_configs_rev_b[] __initdata = { { .gpio = 15, /* CAM_MCLK0 */ .settings = { [GPIOMUX_ACTIVE] = &cam_settings[0], [GPIOMUX_SUSPENDED] = &cam_settings[1], }, }, { .gpio = 16, /* CAM_MCLK1 */ .settings = { [GPIOMUX_ACTIVE] = &cam_settings[3], [GPIOMUX_SUSPENDED] = &cam_settings[4], }, }, { .gpio = 17, /* CAM_MCLK2 */ .settings = { [GPIOMUX_ACTIVE] = &cam_settings[0], [GPIOMUX_SUSPENDED] = &cam_settings[1], }, }, { .gpio = 18, /* WEBCAM1_RESET_N / CAM_MCLK3 */ .settings = { [GPIOMUX_ACTIVE] = &cam_settings[3], [GPIOMUX_SUSPENDED] = &cam_settings[4], }, }, { .gpio = 19, /* CCI_I2C_SDA0 */ .settings = { [GPIOMUX_ACTIVE] = &cam_settings[0], [GPIOMUX_SUSPENDED] = &gpio_suspend_config[0], }, }, { .gpio = 20, /* CCI_I2C_SCL0 */ .settings = { [GPIOMUX_ACTIVE] = &cam_settings[0], [GPIOMUX_SUSPENDED] = &gpio_suspend_config[0], }, }, { .gpio = 21, /* CCI_I2C_SDA1 */ .settings = { [GPIOMUX_ACTIVE] = &cam_settings[0], [GPIOMUX_SUSPENDED] = &gpio_suspend_config[0], }, }, { .gpio = 22, /* CCI_I2C_SCL1 */ .settings = { [GPIOMUX_ACTIVE] = &cam_settings[0], [GPIOMUX_SUSPENDED] = &gpio_suspend_config[0], }, }, #ifdef CONFIG_MACH_LGE { .gpio = 23, /* FLASH_LED_EN */ .settings = { [GPIOMUX_ACTIVE] = &touch_ldoen_act_cfg, [GPIOMUX_SUSPENDED] = &touch_ldoen_sus_cfg, }, }, #else { .gpio = 23, /* FLASH_LED_EN */ .settings = { [GPIOMUX_ACTIVE] = &cam_settings[0], [GPIOMUX_SUSPENDED] = &gpio_suspend_config[1], }, }, #endif { .gpio = 24, /* FLASH_LED_NOW */ .settings = { [GPIOMUX_ACTIVE] = &cam_settings[0], [GPIOMUX_SUSPENDED] = &gpio_suspend_config[1], }, }, { .gpio = 25, /* WEBCAM2_RESET_N */ .settings = { [GPIOMUX_ACTIVE] = &cam_settings[0], [GPIOMUX_SUSPENDED] = &gpio_suspend_config[1], }, }, { .gpio = 26, /* CAM_IRQ */ .settings = { [GPIOMUX_ACTIVE] = &cam_settings[0], [GPIOMUX_SUSPENDED] = &cam_settings[1], }, }, #ifdef CONFIG_MACH_LGE /* NULL - GPIO_27 : used with Motor PWM pin GPIO_28 : used with SlimPort IRQ pin */ #else /* QCT original */ { .gpio = 27, /* OIS_SYNC */ .settings = { [GPIOMUX_ACTIVE] = &cam_settings[0], [GPIOMUX_SUSPENDED] = &gpio_suspend_config[1], }, }, { .gpio = 28, /* WEBCAM1_STANDBY */ .settings = { [GPIOMUX_ACTIVE] = &cam_settings[0], [GPIOMUX_SUSPENDED] = &gpio_suspend_config[1], }, }, #endif { .gpio = 89, /* CAM1_STANDBY_N */ .settings = { [GPIOMUX_ACTIVE] = &cam_settings[0], [GPIOMUX_SUSPENDED] = &cam_settings[1], }, }, { .gpio = 4, /* CAM1_RST_N */ .settings = { [GPIOMUX_ACTIVE] = &cam_settings[3], [GPIOMUX_SUSPENDED] = &cam_settings[4], }, }, #if defined(CONFIG_BACKLIGHT_LM3630) #else { .gpio = 91, /* CAM2_STANDBY_N */ .settings = { [GPIOMUX_ACTIVE] = &cam_settings[0], [GPIOMUX_SUSPENDED] = &cam_settings[1], }, }, #endif { .gpio = 92, /* CAM2_RST_N */ .settings = { [GPIOMUX_ACTIVE] = &cam_settings[0], [GPIOMUX_SUSPENDED] = &cam_settings[1], }, }, #if defined(CONFIG_MACH_LGE) /* LGE_CHANGE_S * Camera bring up - SPRINT * 2013-03-14, jinw.kim@lge.com */ { .gpio = 57, /* 13M_VCM_EN */ .settings = { [GPIOMUX_ACTIVE] = &cam_settings[3], [GPIOMUX_SUSPENDED] = &cam_settings[4], }, }, { .gpio = 145, /* OIS_LDO_EN */ .settings = { [GPIOMUX_ACTIVE] = &cam_settings[3], [GPIOMUX_SUSPENDED] = &cam_settings[4], }, }, { .gpio = 29, /* OIS_RESET */ .settings = { [GPIOMUX_ACTIVE] = &cam_settings[3], [GPIOMUX_SUSPENDED] = &cam_settings[4], }, }, { .gpio = 96, /* 13M_VIO */ .settings = { [GPIOMUX_ACTIVE] = &cam_settings[3], [GPIOMUX_SUSPENDED] = &cam_settings[4], }, }, { .gpio = 30, /* VT_LDO_EN */ .settings = { [GPIOMUX_ACTIVE] = &cam_settings[3], [GPIOMUX_SUSPENDED] = &cam_settings[4], }, }, #endif }; #endif /* LGE_CHANGE_E, Camera bring up - Separate Rev.A and B setting */ #ifndef CONFIG_MACH_LGE static struct gpiomux_setting pri_auxpcm_act_cfg = { .func = GPIOMUX_FUNC_1, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_NONE, }; static struct gpiomux_setting pri_auxpcm_sus_cfg = { .func = GPIOMUX_FUNC_1, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_DOWN, }; static struct msm_gpiomux_config msm8974_pri_auxpcm_configs[] __initdata = { { .gpio = 65, .settings = { [GPIOMUX_SUSPENDED] = &pri_auxpcm_sus_cfg, [GPIOMUX_ACTIVE] = &pri_auxpcm_act_cfg, }, }, { .gpio = 66, .settings = { [GPIOMUX_SUSPENDED] = &pri_auxpcm_sus_cfg, [GPIOMUX_ACTIVE] = &pri_auxpcm_act_cfg, }, }, { .gpio = 67, .settings = { [GPIOMUX_SUSPENDED] = &pri_auxpcm_sus_cfg, [GPIOMUX_ACTIVE] = &pri_auxpcm_act_cfg, }, }, { .gpio = 68, .settings = { [GPIOMUX_SUSPENDED] = &pri_auxpcm_sus_cfg, [GPIOMUX_ACTIVE] = &pri_auxpcm_act_cfg, }, }, }; #endif /* LGE_CHANGE_S, [WiFi][hayun.kim@lge.com], 2013-01-22, Wifi Bring Up */ #if defined (CONFIG_BCMDHD) || defined (CONFIG_BCMDHD_MODULE) #else static struct msm_gpiomux_config wcnss_5wire_interface[] = { { .gpio = 36, .settings = { [GPIOMUX_ACTIVE] = &wcnss_5wire_active_cfg, [GPIOMUX_SUSPENDED] = &wcnss_5wire_suspend_cfg, }, }, { .gpio = 37, .settings = { [GPIOMUX_ACTIVE] = &wcnss_5wire_active_cfg, [GPIOMUX_SUSPENDED] = &wcnss_5wire_suspend_cfg, }, }, { .gpio = 38, .settings = { [GPIOMUX_ACTIVE] = &wcnss_5wire_active_cfg, [GPIOMUX_SUSPENDED] = &wcnss_5wire_suspend_cfg, }, }, { .gpio = 39, .settings = { [GPIOMUX_ACTIVE] = &wcnss_5wire_active_cfg, [GPIOMUX_SUSPENDED] = &wcnss_5wire_suspend_cfg, }, }, { .gpio = 40, .settings = { [GPIOMUX_ACTIVE] = &wcnss_5wire_active_cfg, [GPIOMUX_SUSPENDED] = &wcnss_5wire_suspend_cfg, }, }, }; #endif /* LGE_CHANGE_E, [WiFi][hayun.kim@lge.com], 2013-01-22, Wifi Bring Up */ static struct msm_gpiomux_config msm_taiko_config[] __initdata = { { .gpio = 63, /* SYS_RST_N */ .settings = { [GPIOMUX_SUSPENDED] = &taiko_reset, }, }, { .gpio = 72, /* CDC_INT */ .settings = { [GPIOMUX_SUSPENDED] = &taiko_int, }, }, }; #ifdef CONFIG_SLIMPORT_ANX7808 static struct gpiomux_setting slimport_reset_act_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_UP, }; static struct gpiomux_setting slimport_int_act_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_UP, .dir = GPIOMUX_IN, }; static struct gpiomux_setting slimport_reset_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, }; static struct gpiomux_setting slimport_int_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, .dir = GPIOMUX_IN, }; static struct msm_gpiomux_config slimport_configs[] __initdata = { { .gpio = 68, /* SLIMPORT RESET */ .settings = { [GPIOMUX_ACTIVE] = &slimport_reset_act_cfg, [GPIOMUX_SUSPENDED] = &slimport_reset_cfg, }, }, { .gpio = 28, /* SLIMPORT IRQ */ .settings = { [GPIOMUX_ACTIVE] = &slimport_int_act_cfg, [GPIOMUX_SUSPENDED] = &slimport_int_cfg, }, }, }; #endif #ifdef CONFIG_MACH_LGE static struct gpiomux_setting headset_active_cfg_gpio65 ={ .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_UP, .dir = GPIOMUX_IN, }; static struct gpiomux_setting headset_active_cfg_gpio64 ={ .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_UP, .dir = GPIOMUX_IN, }; static struct msm_gpiomux_config headset_configs[] ={ { .gpio = 64, .settings = { [GPIOMUX_ACTIVE] = &headset_active_cfg_gpio64, }, }, { .gpio = 65, .settings = { [GPIOMUX_ACTIVE] = &headset_active_cfg_gpio65, }, }, }; /* sensor GPIO setting for LGPS11, TODO: Need change APDS config*/ #if 0 /*sensor disable */ static struct gpiomux_setting sensor_int_config = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, .dir = GPIOMUX_IN, }; static struct gpiomux_setting sensor_en_config = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, .dir = GPIOMUX_OUT_HIGH, }; static struct msm_gpiomux_config sensor_configs[] __initdata = { { .gpio = 87, /* BLSP12 QUP I2C_DAT */ .settings = { [GPIOMUX_SUSPENDED] = &gpio_i2c_config, }, }, { .gpio = 88, /* BLSP12 QUP I2C_CLK */ .settings = { [GPIOMUX_SUSPENDED] = &gpio_i2c_config, }, }, { .gpio = 10, /* BLSP3 QUP I2C_DAT */ .settings = { [GPIOMUX_SUSPENDED] = &gpio_i2c_config, }, }, { .gpio = 11, /* BLSP3 QUP I2C_CLK */ .settings = { [GPIOMUX_SUSPENDED] = &gpio_i2c_config, }, }, { .gpio = 3, /* GYRO_DATA_EN */ .settings = { [GPIOMUX_ACTIVE] = &sensor_en_config, [GPIOMUX_SUSPENDED] = &sensor_en_config, }, }, { .gpio = 65, /*ACCL_INT2 */ .settings = { [GPIOMUX_ACTIVE] = &sensor_int_config, [GPIOMUX_SUSPENDED] = &sensor_int_config, }, }, { .gpio = 66, /* GYRO_INT2*/ .settings = { [GPIOMUX_ACTIVE] = &sensor_int_config, [GPIOMUX_SUSPENDED] = &sensor_int_config, }, }, /* { .gpio = 67, //TODO : COMPASS_DRDY .settings = { [GPIOMUX_ACTIVE] = &sensor_int_config, [GPIOMUX_SUSPENDED] = &sensor_int_config, }, }, */ { .gpio = 73, /* ACCL_INT1*/ .settings = { [GPIOMUX_ACTIVE] = &sensor_int_config, [GPIOMUX_SUSPENDED] = &sensor_int_config, }, }, { .gpio = 74, /* PROXIMITY_INT */ .settings = { [GPIOMUX_ACTIVE] = &sensor_int_config, [GPIOMUX_SUSPENDED] = &sensor_int_config, }, }, { .gpio = 102, /* GYRO_int2 (DRDY)*/ .settings = { [GPIOMUX_ACTIVE] = &sensor_int_config, [GPIOMUX_SUSPENDED] = &sensor_int_config, }, }, }; #endif #endif #if defined(CONFIG_LGE_SM100) || defined(CONFIG_TSPDRV) static struct gpiomux_setting vibrator_suspend_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_DOWN, }; static struct gpiomux_setting vibrator_active_cfg_gpio27 = { .func = GPIOMUX_FUNC_6, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, }; static struct gpiomux_setting vibrator_active_cfg_gpio60 = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, }; static struct msm_gpiomux_config vibrator_configs[] = { { .gpio = 27, .settings = { [GPIOMUX_ACTIVE] = &vibrator_active_cfg_gpio27, [GPIOMUX_SUSPENDED] = &vibrator_suspend_cfg, }, }, { .gpio = 60, .settings = { [GPIOMUX_ACTIVE] = &vibrator_active_cfg_gpio60, [GPIOMUX_SUSPENDED] = &vibrator_suspend_cfg, }, }, }; #endif static struct gpiomux_setting sdc3_clk_actv_cfg = { .func = GPIOMUX_FUNC_2, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_NONE, }; static struct gpiomux_setting sdc3_cmd_data_0_3_actv_cfg = { .func = GPIOMUX_FUNC_2, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_UP, }; static struct gpiomux_setting sdc3_suspend_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_DOWN, }; static struct gpiomux_setting sdc3_data_1_suspend_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_UP, }; static struct msm_gpiomux_config msm8974_sdc3_configs[] __initdata = { { /* DAT3 */ .gpio = 35, .settings = { [GPIOMUX_ACTIVE] = &sdc3_cmd_data_0_3_actv_cfg, [GPIOMUX_SUSPENDED] = &sdc3_suspend_cfg, }, }, { /* DAT2 */ .gpio = 36, .settings = { [GPIOMUX_ACTIVE] = &sdc3_cmd_data_0_3_actv_cfg, [GPIOMUX_SUSPENDED] = &sdc3_suspend_cfg, }, }, { /* DAT1 */ .gpio = 37, .settings = { [GPIOMUX_ACTIVE] = &sdc3_cmd_data_0_3_actv_cfg, [GPIOMUX_SUSPENDED] = &sdc3_data_1_suspend_cfg, }, }, { /* DAT0 */ .gpio = 38, .settings = { [GPIOMUX_ACTIVE] = &sdc3_cmd_data_0_3_actv_cfg, [GPIOMUX_SUSPENDED] = &sdc3_suspend_cfg, }, }, { /* CMD */ .gpio = 39, .settings = { [GPIOMUX_ACTIVE] = &sdc3_cmd_data_0_3_actv_cfg, [GPIOMUX_SUSPENDED] = &sdc3_suspend_cfg, }, }, { /* CLK */ .gpio = 40, .settings = { [GPIOMUX_ACTIVE] = &sdc3_clk_actv_cfg, [GPIOMUX_SUSPENDED] = &sdc3_suspend_cfg, }, }, }; static void msm_gpiomux_sdc3_install(void) { msm_gpiomux_install(msm8974_sdc3_configs, ARRAY_SIZE(msm8974_sdc3_configs)); } #ifdef CONFIG_MMC_MSM_SDC4_SUPPORT static struct gpiomux_setting sdc4_clk_actv_cfg = { .func = GPIOMUX_FUNC_2, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_NONE, }; static struct gpiomux_setting sdc4_cmd_data_0_3_actv_cfg = { .func = GPIOMUX_FUNC_2, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_UP, }; static struct gpiomux_setting sdc4_suspend_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_DOWN, }; static struct gpiomux_setting sdc4_data_1_suspend_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_UP, }; static struct msm_gpiomux_config msm8974_sdc4_configs[] __initdata = { { /* DAT3 */ .gpio = 92, .settings = { [GPIOMUX_ACTIVE] = &sdc4_cmd_data_0_3_actv_cfg, [GPIOMUX_SUSPENDED] = &sdc4_suspend_cfg, }, }, { /* DAT2 */ .gpio = 94, .settings = { [GPIOMUX_ACTIVE] = &sdc4_cmd_data_0_3_actv_cfg, [GPIOMUX_SUSPENDED] = &sdc4_suspend_cfg, }, }, { /* DAT1 */ .gpio = 95, .settings = { [GPIOMUX_ACTIVE] = &sdc4_cmd_data_0_3_actv_cfg, [GPIOMUX_SUSPENDED] = &sdc4_data_1_suspend_cfg, }, }, #if 0 /* Camera will use gpio 96 */ /* LGE_CHANGE_S * Camera bring up - SPRINT * 2013-03-14, jinw.kim@lge.com */ { /* DAT0 */ .gpio = 96, .settings = { [GPIOMUX_ACTIVE] = &sdc4_cmd_data_0_3_actv_cfg, [GPIOMUX_SUSPENDED] = &sdc4_suspend_cfg, }, }, #endif { /* CMD */ .gpio = 91, .settings = { [GPIOMUX_ACTIVE] = &sdc4_cmd_data_0_3_actv_cfg, [GPIOMUX_SUSPENDED] = &sdc4_suspend_cfg, }, }, { /* CLK */ .gpio = 93, .settings = { [GPIOMUX_ACTIVE] = &sdc4_clk_actv_cfg, [GPIOMUX_SUSPENDED] = &sdc4_suspend_cfg, }, }, }; static void msm_gpiomux_sdc4_install(void) { msm_gpiomux_install(msm8974_sdc4_configs, ARRAY_SIZE(msm8974_sdc4_configs)); } #else static void msm_gpiomux_sdc4_install(void) {} #endif /* CONFIG_MMC_MSM_SDC4_SUPPORT */ /* LGE_CHANGE_S, [BT][younghyun.kwon@lge.com], 2013-01-29 */ #ifdef CONFIG_LGE_BLUETOOTH static struct gpiomux_setting bt_gpio_uart_active_config = { .func = GPIOMUX_FUNC_2, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_NONE, /* Should be PULL NONE */ }; static struct gpiomux_setting bt_gpio_uart_suspend_config = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, /* PULL Configuration */ }; static struct gpiomux_setting bt_rfkill_active_config = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_6MA, .pull = GPIOMUX_PULL_NONE, }; static struct gpiomux_setting bt_rfkill_suspend_config = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, }; static struct gpiomux_setting bt_host_wakeup_active_config = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, .dir = GPIOMUX_IN, }; static struct gpiomux_setting bt_host_wakeup_suspend_config = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, .dir = GPIOMUX_IN, }; static struct gpiomux_setting bt_wakeup_active_config = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, }; static struct gpiomux_setting bt_wakeup_suspend_config = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, }; static struct gpiomux_setting bt_pcm_active_config = { .func = GPIOMUX_FUNC_1, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_NONE, }; static struct gpiomux_setting bt_pcm_suspend_config = { .func = GPIOMUX_FUNC_1, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_DOWN, }; static struct msm_gpiomux_config bt_msm_blsp_configs[] __initdata = { { .gpio = 53, /* BLSP2 UART10 TX */ .settings = { [GPIOMUX_ACTIVE] = &bt_gpio_uart_active_config , [GPIOMUX_SUSPENDED] = &bt_gpio_uart_suspend_config , }, }, { .gpio = 54, /* BLSP2 UART10 RX */ .settings = { [GPIOMUX_ACTIVE] = &bt_gpio_uart_active_config , [GPIOMUX_SUSPENDED] = &bt_gpio_uart_suspend_config , }, }, { .gpio = 55, /* BLSP2 UART10 CTS */ .settings = { [GPIOMUX_ACTIVE] = &bt_gpio_uart_active_config , [GPIOMUX_SUSPENDED] = &bt_gpio_uart_suspend_config , }, }, { .gpio = 56, /* BLSP2 UART10 RFR */ .settings = { [GPIOMUX_ACTIVE] = &bt_gpio_uart_active_config , [GPIOMUX_SUSPENDED] = &bt_gpio_uart_suspend_config , }, }, }; static struct msm_gpiomux_config bt_rfkill_configs[] = { { .gpio = 41, .settings = { [GPIOMUX_ACTIVE] = &bt_rfkill_active_config, [GPIOMUX_SUSPENDED] = &bt_rfkill_suspend_config, }, }, }; static struct msm_gpiomux_config bt_host_wakeup_configs[] __initdata = { { .gpio = 42, .settings = { [GPIOMUX_ACTIVE] = &bt_host_wakeup_active_config, [GPIOMUX_SUSPENDED] = &bt_host_wakeup_suspend_config, }, }, }; static struct msm_gpiomux_config bt_wakeup_configs[] __initdata = { { .gpio = 62, .settings = { [GPIOMUX_ACTIVE] = &bt_wakeup_active_config, [GPIOMUX_SUSPENDED] = &bt_wakeup_suspend_config, }, }, }; static struct msm_gpiomux_config bt_pcm_configs[] __initdata = { { .gpio = 79, /* BT_PCM_CLK */ .settings = { [GPIOMUX_ACTIVE] = &bt_pcm_active_config, [GPIOMUX_SUSPENDED] = &bt_pcm_suspend_config, }, }, { .gpio = 80, /* BT_PCM_SYNC */ .settings = { [GPIOMUX_ACTIVE] = &bt_pcm_active_config, [GPIOMUX_SUSPENDED] = &bt_pcm_suspend_config, }, }, { .gpio = 81, /* BT_PCM_DIN */ .settings = { [GPIOMUX_ACTIVE] = &bt_pcm_active_config, [GPIOMUX_SUSPENDED] = &bt_pcm_suspend_config, }, }, { .gpio = 82, /* BT_PCM_DOUT */ .settings = { [GPIOMUX_ACTIVE] = &bt_pcm_active_config, [GPIOMUX_SUSPENDED] = &bt_pcm_suspend_config, }, } }; static void bluetooth_msm_gpiomux_install(void) { /* UART */ msm_gpiomux_install(bt_msm_blsp_configs, ARRAY_SIZE(bt_msm_blsp_configs)); /* RFKILL */ msm_gpiomux_install(bt_rfkill_configs, ARRAY_SIZE(bt_rfkill_configs)); /* HOST WAKE-UP */ msm_gpiomux_install(bt_host_wakeup_configs, ARRAY_SIZE(bt_host_wakeup_configs)); /* BT WAKE-UP */ msm_gpiomux_install(bt_wakeup_configs, ARRAY_SIZE(bt_wakeup_configs)); /* PCM I/F */ msm_gpiomux_install(bt_pcm_configs, ARRAY_SIZE(bt_pcm_configs)); } #endif /* CONFIG_LGE_BLUETOOTH */ /* LGE_CHANGE_E, [BT][younghyun.kwon@lge.com], 2013-01-29 */ /* LGE_CHANGE_S, [NFC][minwoo.kwon@lge.com], 2013-03-07, NFC Bring up */ #ifdef CONFIG_LGE_NFC_PN544_C3 static struct gpiomux_setting nfc_pn544_sda_cfg = { .func = GPIOMUX_FUNC_3, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_NONE, }; static struct gpiomux_setting nfc_pn544_scl_cfg = { .func = GPIOMUX_FUNC_3, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_NONE, }; static struct gpiomux_setting nfc_pn544_ven_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, .dir = GPIOMUX_OUT_LOW, }; static struct gpiomux_setting nfc_pn544_irq_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_DOWN, .dir = GPIOMUX_IN, }; static struct gpiomux_setting nfc_pn544_mode_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_NONE, .dir = GPIOMUX_OUT_HIGH, }; static struct msm_gpiomux_config msm8974_nfc_configs[] __initdata = { { /* I2C SDA */ .gpio = 83, .settings = { [GPIOMUX_ACTIVE] = &nfc_pn544_sda_cfg, [GPIOMUX_SUSPENDED] = &nfc_pn544_sda_cfg, }, }, { /* I2C SCL */ .gpio = 84, .settings = { [GPIOMUX_ACTIVE] = &nfc_pn544_scl_cfg, [GPIOMUX_SUSPENDED] = &nfc_pn544_scl_cfg, }, }, { /* VEN */ .gpio = 94, .settings = { [GPIOMUX_ACTIVE] = &nfc_pn544_ven_cfg, [GPIOMUX_SUSPENDED] = &nfc_pn544_ven_cfg, }, }, { /* IRQ */ .gpio = 59, .settings = { [GPIOMUX_ACTIVE] = &nfc_pn544_irq_cfg, [GPIOMUX_SUSPENDED] = &nfc_pn544_irq_cfg, }, }, { /* MODE *//* WAKE */ .gpio = 95, .settings = { [GPIOMUX_ACTIVE] = &nfc_pn544_mode_cfg, [GPIOMUX_SUSPENDED] = &nfc_pn544_mode_cfg, }, }, }; #endif /* LGE_CHANGE_E, [NFC][minwoo.kwon@lge.com], 2013-03-07, NFC Bring up */ #if defined(CONFIG_USB_LGE_USB3_REDRIVER) static struct gpiomux_setting usb3_rd_en_cfg = { .func = GPIOMUX_FUNC_1, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, }; static struct msm_gpiomux_config usb3_rd_en_configs[] = { { .gpio = 89, /* USB3_RD_EN */ .settings = { [GPIOMUX_ACTIVE] = &usb3_rd_en_cfg, [GPIOMUX_SUSPENDED] = &usb3_rd_en_cfg, }, }, }; #endif static struct msm_gpiomux_config apq8074_dragonboard_ts_config[] __initdata = { { /* BLSP1 QUP I2C_DATA */ .gpio = 2, .settings = { [GPIOMUX_SUSPENDED] = &gpio_i2c_config, }, }, { /* BLSP1 QUP I2C_CLK */ .gpio = 3, .settings = { [GPIOMUX_SUSPENDED] = &gpio_i2c_config, }, }, }; void __init msm_8974_init_gpiomux(void) { int rc; rc = msm_gpiomux_init_dt(); if (rc) { pr_err("%s failed %d\n", __func__, rc); return; } /* soojung.lim@lge.com, 2013-05-23 * To use 24MHz GP/GCC_GP clock for V2 H/W */ if (socinfo_get_version() >= 0x20000) { g_is_tlmm_spare_reg_value = 0x7; msm_tlmm_misc_reg_write(TLMM_SPARE_REG, 0x7); } #if defined(CONFIG_KS8851) || defined(CONFIG_KS8851_MODULE) msm_gpiomux_install(msm_eth_configs, ARRAY_SIZE(msm_eth_configs)); #endif msm_gpiomux_install(msm_blsp_configs, ARRAY_SIZE(msm_blsp_configs)); #ifndef CONFIG_MACH_LGE msm_gpiomux_install(msm_blsp2_uart7_configs, ARRAY_SIZE(msm_blsp2_uart7_configs)); #endif #if defined(CONFIG_MACH_LGE) #ifdef CONFIG_MAX17048_FUELGAUGE /* [Power] yeonhwa.so@lge.com * If MAX17048 is removed, we modify and use it*/ if (HW_REV_A <= lge_get_board_revno()) { msm_gpiomux_install(msm_fuel_gauge_configs, ARRAY_SIZE(msm_fuel_gauge_configs)); } #endif #endif /* LGE_CHANGE_S, [WiFi][hayun.kim@lge.com], 2013-01-22, Wifi Bring Up */ #if defined (CONFIG_BCMDHD) || defined (CONFIG_BCMDHD_MODULE) #else msm_gpiomux_install(wcnss_5wire_interface, ARRAY_SIZE(wcnss_5wire_interface)); #endif /* LGE_CHANGE_E, [WiFi][hayun.kim@lge.com], 2013-01-22, Wifi Bring Up */ msm_gpiomux_install(msm8974_slimbus_config, ARRAY_SIZE(msm8974_slimbus_config)); msm_gpiomux_install(msm_touch_configs, ARRAY_SIZE(msm_touch_configs)); #ifndef CONFIG_LGE_IRRC msm_gpiomux_install(hap_lvl_shft_config, ARRAY_SIZE(hap_lvl_shft_config)); #endif #ifndef CONFIG_MACH_LGE msm_gpiomux_install(msm_sensor_configs, ARRAY_SIZE(msm_sensor_configs)); #endif msm_gpiomux_install(&sd_card_det, 1); if (machine_is_apq8074() && (of_board_is_liquid() || \ of_board_is_dragonboard())) msm_gpiomux_sdc3_install(); msm_gpiomux_sdc4_install(); msm_gpiomux_install(msm_taiko_config, ARRAY_SIZE(msm_taiko_config)); #ifndef CONFIG_MACH_LGE msm_gpiomux_install(msm_hsic_configs, ARRAY_SIZE(msm_hsic_configs)); #endif msm_gpiomux_install(msm_hsic_hub_configs, ARRAY_SIZE(msm_hsic_hub_configs)); msm_gpiomux_install(msm_hdmi_configs, ARRAY_SIZE(msm_hdmi_configs)); if (of_board_is_fluid()) msm_gpiomux_install(msm_mhl_configs, ARRAY_SIZE(msm_mhl_configs)); #ifndef CONFIG_MACH_LGE msm_gpiomux_install(msm8974_pri_auxpcm_configs, ARRAY_SIZE(msm8974_pri_auxpcm_configs)); #endif #if defined(CONFIG_MACH_LGE) if (lge_get_board_revno() < HW_REV_B) msm_gpiomux_install_nowrite(msm_lcd_configs_rev_a,ARRAY_SIZE(msm_lcd_configs_rev_a)); else msm_gpiomux_install_nowrite(msm_lcd_configs_rev_b,ARRAY_SIZE(msm_lcd_configs_rev_b)); #else msm_gpiomux_install_nowrite(msm_lcd_configs,ARRAY_SIZE(msm_lcd_configs)); #endif if (of_board_is_rumi()) msm_gpiomux_install(msm_rumi_blsp_configs, ARRAY_SIZE(msm_rumi_blsp_configs)); if (socinfo_get_platform_subtype() == PLATFORM_SUBTYPE_MDM) msm_gpiomux_install(mdm_configs, ARRAY_SIZE(mdm_configs)); #ifdef CONFIG_SLIMPORT_ANX7808 msm_gpiomux_install(slimport_configs, ARRAY_SIZE(slimport_configs)); #endif #if defined(CONFIG_MACH_LGE) //msm_gpiomux_install(msm_display_configs, ARRAY_SIZE(msm_display_configs)); if (lge_get_board_revno() > HW_REV_A) msm_gpiomux_install(headset_configs,ARRAY_SIZE(headset_configs)); #endif #if 0 /* disable sensor GPIO setting (enable ADSP)*/ msm_gpiomux_install(sensor_configs, ARRAY_SIZE(sensor_configs)); #endif #if defined(CONFIG_LGE_SM100) || defined(CONFIG_TSPDRV) msm_gpiomux_install(vibrator_configs, ARRAY_SIZE(vibrator_configs)); #endif /* LGE_CHANGE_S, [BT][younghyun.kwon@lge.com], 2013-01-29 */ #ifdef CONFIG_LGE_BLUETOOTH bluetooth_msm_gpiomux_install(); #endif /* CONFIG_LGE_BLUETOOTH */ /* LGE_CHANGE_E, [BT][younghyun.kwon@lge.com], 2013-01-29 */ /* LGE_CHANGE_S, [NFC][minwoo.kwon@lge.com], 2013-03-07, NFC Bring up */ #ifdef CONFIG_LGE_NFC_PN544_C3 msm_gpiomux_install(msm8974_nfc_configs, ARRAY_SIZE(msm8974_nfc_configs)); #endif /* LGE_CHANGE_E, [NFC][minwoo.kwon@lge.com], 2013-03-07, NFC Bring up */ /* LGE_CHANGE_S matthew.choi@lge.com 130319 GPIO Setting for Hall IC */ msm_gpiomux_install(msm_hall_ic_configs, ARRAY_SIZE(msm_hall_ic_configs)); /* LGE_CHANGE_E matthew.choi@lge.com 130319 GPIO Setting for Hall IC */ #if defined(CONFIG_MACH_LGE) /* LGE_CHANGE_S * Camera bring up - Separate Rev.A and B setting * 2013-03-20, youmi.jun@lge.com */ switch(lge_get_board_revno()) { case HW_REV_A: msm_gpiomux_install(msm_sensor_configs_rev_a, ARRAY_SIZE(msm_sensor_configs_rev_a)); break; case HW_REV_B: default: msm_gpiomux_install(msm_sensor_configs_rev_b, ARRAY_SIZE(msm_sensor_configs_rev_b)); break; } /* LGE_CHANGE_E, Camera bring up - Separate Rev.A and B setting */ #endif //#if defined(CONFIG_MACH_LGE) #if defined(CONFIG_USB_LGE_USB3_REDRIVER) if(lge_get_board_revno() < HW_REV_B) { msm_gpiomux_install(usb3_rd_en_configs, ARRAY_SIZE(usb3_rd_en_configs)); } #endif #ifdef CONFIG_SND_FM_RADIO if (HW_REV_A < lge_get_board_revno()) { msm_gpiomux_install(msm8974_tert_mi2s_configs,ARRAY_SIZE(msm8974_tert_mi2s_configs)); // msm_gpiomux_install(fm_radio_configs,ARRAY_SIZE(fm_radio_configs)); } #endif if (of_board_is_dragonboard() && machine_is_apq8074()) msm_gpiomux_install(apq8074_dragonboard_ts_config, ARRAY_SIZE(apq8074_dragonboard_ts_config)); }
gpl-2.0
fmertz/hd44780-ezio
clients/lcdproc/machine_SunOS.c
14
9346
/** \file clients/lcdproc/machine_SunOS.c * Collects system information on Solaris. */ /*- * This file is part of lcdproc, the lcdproc client. * * This file is released under the GNU General Public License. * Refer to the COPYING file distributed with this package. */ #ifdef sun #include <sys/types.h> #include <sys/param.h> #include <stdlib.h> #include <stdio.h> #include <unistd.h> #include <strings.h> #include <fcntl.h> #include <dirent.h> #include <utmpx.h> #include <procfs.h> #include <kstat.h> #include <errno.h> #include <sys/utsname.h> #include <sys/fcntl.h> #include <sys/ioctl.h> #include <sys/stat.h> #include <sys/loadavg.h> #include <sys/mount.h> #include <sys/cpuvar.h> #include <sys/swap.h> #include <sys/statvfs.h> #include "machine.h" #include "main.h" #include "config.h" #include "shared/LL.h" static kstat_ctl_t *kc; int machine_init(void) { kc = NULL; kc = kstat_open(); if (kc == NULL) { perror("kstat_open"); return (FALSE); } return (TRUE); } int machine_close(void) { if (kc != NULL) { kstat_close(kc); kc = NULL; } return (TRUE); } int machine_get_battstat(int *acstat, int *battflag, int *percent) { *acstat = LCDP_AC_ON; *battflag = LCDP_BATT_ABSENT; *percent = 100; return (TRUE); } int machine_get_fs(mounts_type fs[], int *cnt) { FILE *mtab_fd; char line[256]; int x = 0, y; #ifdef STAT_STATVFS struct statvfs fsinfo; #else struct statfs fsinfo; #endif #ifdef MTAB_FILE mtab_fd = fopen(MTAB_FILE, "r"); #else #error "Can't find your mounted filesystem table file." #endif /* Get rid of old, unmounted filesystems... */ memset(fs, 0, sizeof(mounts_type) * 256); while (x < 256) { if (fgets(line, 256, mtab_fd) == NULL) { fclose(mtab_fd); *cnt = x; break; } sscanf(line, "%s %s %s", fs[x].dev, fs[x].mpoint, fs[x].type); if (strcmp(fs[x].type, "proc") #ifndef STAT_NFS && strcmp(fs[x].type, "nfs") #endif #ifndef STAT_SMBFS && strcmp(fs[x].type, "smbfs") #endif ) { #ifdef STAT_STATVFS y = statvfs(fs[x].mpoint, &fsinfo); #elif STAT_STATFS2_BSIZE y = statfs(fs[x].mpoint, &fsinfo); #elif STAT_STATFS4 y = statfs(fs[x].mpoint, &fsinfo, sizeof(fsinfo), 0); #else #error "statfs for this system noy yet supported" #endif fs[x].blocks = fsinfo.f_blocks; if (fs[x].blocks > 0) { fs[x].bsize = fsinfo.f_bsize; fs[x].bfree = fsinfo.f_bfree; fs[x].files = fsinfo.f_files; fs[x].ffree = fsinfo.f_ffree; x++; } } } fclose(mtab_fd); *cnt = x; return (TRUE); } int machine_get_load(load_type * curr_load) { static load_type last_load = {0, 0, 0, 0, 0}; load_type load; kstat_t *k_space; struct cpu_stat cinfo; k_space = kstat_lookup(kc, "cpu_stat", 0, "cpu_stat0"); if (k_space == NULL) { fprintf(stderr, "kstat lookup error\n"); return (FALSE); } if (kstat_read(kc, k_space, NULL) == -1) { fprintf(stderr, "kstat read error\n"); return (FALSE); } k_space = kstat_lookup(kc, "cpu_stat", -1, "cpu_stat0"); if (k_space == NULL) { fprintf(stderr, "kstat lookup error\n"); return (FALSE); } if (kstat_read(kc, k_space, &cinfo) == -1) { fprintf(stderr, "kstat read error\n"); return (FALSE); } load.idle = cinfo.cpu_sysinfo.cpu[CPU_IDLE]; load.user = cinfo.cpu_sysinfo.cpu[CPU_USER]; load.system = cinfo.cpu_sysinfo.cpu[CPU_KERNEL]; load.nice = cinfo.cpu_sysinfo.cpu[CPU_WAIT]; load.total = load.user + load.nice + load.system + load.idle; curr_load->user = load.user - last_load.user; curr_load->nice = load.nice - last_load.nice; curr_load->system = load.system - last_load.system; curr_load->idle = load.idle - last_load.idle; curr_load->total = load.total - last_load.total; /* struct assingment is legal in C89 */ last_load = load; return (TRUE); } int machine_get_loadavg(double *load) { double loadavg[LOADAVG_NSTATS]; if (getloadavg(loadavg, LOADAVG_NSTATS) <= LOADAVG_1MIN) return (FALSE); *load = loadavg[LOADAVG_1MIN]; return (TRUE); } int machine_get_meminfo(meminfo_type * result) { #define MAXSTRSIZE 80 swaptbl_t *s; int i, n, num; char *strtab; /* string table for path names */ s = NULL; result[0].total = sysconf(_SC_PHYS_PAGES) * sysconf(_SC_PAGESIZE) / 1024; result[0].free = sysconf(_SC_AVPHYS_PAGES) * sysconf(_SC_PAGESIZE) / 1024; result[0].shared = 0; result[0].buffers = 0; result[0].cache = 0; again: if ((num = swapctl(SC_GETNSWP, 0)) == -1) { perror("swapctl: GETNSWP"); return (FALSE); } if (num == 0) { fprintf(stderr, "No Swap Devices Configured\n"); return (FALSE); } /* allocate swaptable for num+1 entries */ if ((s = (swaptbl_t *) malloc(num * sizeof(swapent_t) + sizeof(struct swaptable))) == NULL) { perror("malloc swap"); return (FALSE); } /* allocate num+1 string holders */ if ((strtab = (char *)malloc((num + 1) * MAXSTRSIZE)) == NULL) { perror("malloc string holder"); return (FALSE); } /* initialize string pointers */ for (i = 0; i < (num + 1); i++) { s->swt_ent[i].ste_path = strtab + (i * MAXSTRSIZE); } s->swt_n = num + 1; if ((n = swapctl(SC_LIST, s)) < 0) { perror("swapctl"); return (FALSE); } /* more were added */ if (n > num) { free(s); free(strtab); goto again; } result[1].total = 0; result[1].free = 0; for (i = 0; i < n; i++) { result[1].total = result[1].total + s->swt_ent[i].ste_pages * sysconf(_SC_PAGESIZE) / 1024; result[1].free = result[1].free + s->swt_ent[i].ste_free * sysconf(_SC_PAGESIZE) / 1024; } return (TRUE); } int machine_get_procs(LinkedList * procs) { char buf[128]; DIR *proc; FILE *StatusFile; struct dirent *procdir; procinfo_type *p; char procName[16]; int procSize, procRSS, procData, procStk, procExe; int threshold = 400, unique; if ((proc = opendir("/proc")) == NULL) { perror("open /proc"); return (FALSE); } while ((procdir = readdir(proc))) { psinfo_t psinfo; if (!strchr("1234567890", procdir->d_name[0])) continue; sprintf(buf, "/proc/%s/psinfo", procdir->d_name); if ((StatusFile = fopen(buf, "r")) == NULL) { /* * Not a serious error; process has finished before * we could examine it: */ continue; } procRSS = procSize = procData = procStk = procExe = 0; fread(&psinfo, sizeof(psinfo), 1, StatusFile); strcpy(procName, psinfo.pr_fname); procSize = psinfo.pr_size; procRSS = psinfo.pr_rssize; /* * Following values not accurate, not sure what needs to be * set to */ procData = psinfo.pr_size; procStk = 0; procExe = 0; fclose(StatusFile); if (procSize > threshold) { /* Figure out if it's sharing any memory... */ unique = 1; LL_Rewind(procs); do { p = LL_Get(procs); if (p) { if (0 == strcmp(p->name, procName)) { unique = 0; p->number++; p->totl += procData + procStk + procExe; } } } while (LL_Next(procs) == 0); /* If this is the first one by this name... */ if (unique) { p = malloc(sizeof(procinfo_type)); if (p == NULL) { perror("allocating process entry"); goto end; } strcpy(p->name, procName); p->totl = procData + procStk + procExe; p->number = 1; /* TODO: Check for errors here? */ LL_Push(procs, (void *)p); } } } end: closedir(proc); return (TRUE); } int machine_get_smpload(load_type * result, int *numcpus) { static load_type last_load[MAX_CPUS]; load_type curr_load[MAX_CPUS]; int ncpu = 0; int max_cpu, count; max_cpu = sysconf(_SC_NPROCESSORS_CONF); for (count = 0; count < max_cpu; count++) { kstat_t *k_space; char buffer[16]; sprintf(buffer, "cpu_stat%d", count); k_space = kstat_lookup(kc, "cpu_stat", count, buffer); if ((k_space != NULL) && (kstat_read(kc, k_space, NULL) != -1)) { struct cpu_stat cinfo; k_space = kstat_lookup(kc, "cpu_stat", -1, buffer); kstat_read(kc, k_space, &cinfo); curr_load[ncpu].idle = cinfo.cpu_sysinfo.cpu[CPU_IDLE]; curr_load[ncpu].user = cinfo.cpu_sysinfo.cpu[CPU_USER]; curr_load[ncpu].system = cinfo.cpu_sysinfo.cpu[CPU_KERNEL]; curr_load[ncpu].nice = cinfo.cpu_sysinfo.cpu[CPU_WAIT]; curr_load[ncpu].total = curr_load[ncpu].user + curr_load[ncpu].nice + curr_load[ncpu].system + curr_load[ncpu].idle; result[ncpu].total = curr_load[ncpu].total - last_load[ncpu].total; result[ncpu].user = curr_load[ncpu].user - last_load[ncpu].user; result[ncpu].nice = curr_load[ncpu].nice - last_load[ncpu].nice; result[ncpu].system = curr_load[ncpu].system - last_load[ncpu].system; result[ncpu].idle = curr_load[ncpu].idle - last_load[ncpu].idle; /* struct assignment is legal in C89 */ last_load[ncpu] = curr_load[ncpu]; /* restrict # CPUs to min(*numcpus, MAX_CPUS) */ ncpu++; if ((ncpu >= *numcpus) || (ncpu >= MAX_CPUS)) break; } } *numcpus = ncpu; return (TRUE); } /* TODO get idle time! */ int machine_get_uptime(double *up, double *idle) { struct utmpx *u, id; load_type curr_load; *up = 0; *idle = 0; id.ut_type = BOOT_TIME; u = getutxid(&id); if (u == NULL) return (FALSE); *up = time(0) - u->ut_xtime; if (machine_get_load(&curr_load) == FALSE) *idle = 100.; else *idle = 100. * curr_load.idle / curr_load.total; return (TRUE); } /* Get network statistics */ int machine_get_iface_stats(IfaceInfo * interface) { /* Implementation missing */ return 0; } #endif /* sun */
gpl-2.0
vinriviere/m68k-atari-mint-binutils-gdb
gdb/features/tic6x-gp.c
14
2566
/* THIS FILE IS GENERATED. -*- buffer-read-only: t -*- vi:set ro: Original: tic6x-gp.xml */ #include "gdbsupport/tdesc.h" static int create_feature_tic6x_gp (struct target_desc *result, long regnum) { struct tdesc_feature *feature; feature = tdesc_create_feature (result, "org.gnu.gdb.tic6x.gp"); tdesc_create_reg (feature, "A16", regnum++, 1, NULL, 32, "uint32"); tdesc_create_reg (feature, "A17", regnum++, 1, NULL, 32, "uint32"); tdesc_create_reg (feature, "A18", regnum++, 1, NULL, 32, "uint32"); tdesc_create_reg (feature, "A19", regnum++, 1, NULL, 32, "uint32"); tdesc_create_reg (feature, "A20", regnum++, 1, NULL, 32, "uint32"); tdesc_create_reg (feature, "A21", regnum++, 1, NULL, 32, "uint32"); tdesc_create_reg (feature, "A22", regnum++, 1, NULL, 32, "uint32"); tdesc_create_reg (feature, "A23", regnum++, 1, NULL, 32, "uint32"); tdesc_create_reg (feature, "A24", regnum++, 1, NULL, 32, "uint32"); tdesc_create_reg (feature, "A25", regnum++, 1, NULL, 32, "uint32"); tdesc_create_reg (feature, "A26", regnum++, 1, NULL, 32, "uint32"); tdesc_create_reg (feature, "A27", regnum++, 1, NULL, 32, "uint32"); tdesc_create_reg (feature, "A28", regnum++, 1, NULL, 32, "uint32"); tdesc_create_reg (feature, "A29", regnum++, 1, NULL, 32, "uint32"); tdesc_create_reg (feature, "A30", regnum++, 1, NULL, 32, "uint32"); tdesc_create_reg (feature, "A31", regnum++, 1, NULL, 32, "uint32"); tdesc_create_reg (feature, "B16", regnum++, 1, NULL, 32, "uint32"); tdesc_create_reg (feature, "B17", regnum++, 1, NULL, 32, "uint32"); tdesc_create_reg (feature, "B18", regnum++, 1, NULL, 32, "uint32"); tdesc_create_reg (feature, "B19", regnum++, 1, NULL, 32, "uint32"); tdesc_create_reg (feature, "B20", regnum++, 1, NULL, 32, "uint32"); tdesc_create_reg (feature, "B21", regnum++, 1, NULL, 32, "uint32"); tdesc_create_reg (feature, "B22", regnum++, 1, NULL, 32, "uint32"); tdesc_create_reg (feature, "B23", regnum++, 1, NULL, 32, "uint32"); tdesc_create_reg (feature, "B24", regnum++, 1, NULL, 32, "uint32"); tdesc_create_reg (feature, "B25", regnum++, 1, NULL, 32, "uint32"); tdesc_create_reg (feature, "B26", regnum++, 1, NULL, 32, "uint32"); tdesc_create_reg (feature, "B27", regnum++, 1, NULL, 32, "uint32"); tdesc_create_reg (feature, "B28", regnum++, 1, NULL, 32, "uint32"); tdesc_create_reg (feature, "B29", regnum++, 1, NULL, 32, "uint32"); tdesc_create_reg (feature, "B30", regnum++, 1, NULL, 32, "uint32"); tdesc_create_reg (feature, "B31", regnum++, 1, NULL, 32, "uint32"); return regnum; }
gpl-2.0
surkovalex/xbmc
xbmc/dialogs/GUIDialogMediaFilter.cpp
14
34768
/* * Copyright (C) 2012-2015 Team Kodi * http://kodi.tv * * This Program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This Program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with Kodi; see the file COPYING. If not, see * <http://www.gnu.org/licenses/>. * */ #include "GUIDialogMediaFilter.h" #include "DbUrl.h" #include "FileItem.h" #include "GUIUserMessages.h" #include "XBDateTime.h" #include "guilib/GUIWindowManager.h" #include "guilib/LocalizeStrings.h" #include "music/MusicDatabase.h" #include "music/MusicDbUrl.h" #include "playlists/SmartPlayList.h" #include "settings/SettingUtils.h" #include "settings/lib/Setting.h" #include "settings/windows/GUIControlSettings.h" #include "utils/log.h" #include "utils/SortUtils.h" #include "utils/StringUtils.h" #include "utils/Variant.h" #include "video/VideoDatabase.h" #include "video/VideoDbUrl.h" #define CONTROL_HEADING 2 #define CONTROL_OKAY_BUTTON 28 #define CONTROL_CANCEL_BUTTON 29 #define CONTROL_CLEAR_BUTTON 30 #define CHECK_ALL -1 #define CHECK_NO 0 #define CHECK_YES 1 #define CHECK_LABEL_ALL 593 #define CHECK_LABEL_NO 106 #define CHECK_LABEL_YES 107 static const CGUIDialogMediaFilter::Filter filterList[] = { { "movies", FieldTitle, 556, SettingTypeString, "edit", "string", CDatabaseQueryRule::OPERATOR_CONTAINS }, { "movies", FieldRating, 563, SettingTypeNumber, "range", "number", CDatabaseQueryRule::OPERATOR_BETWEEN }, { "movies", FieldUserRating, 38018, SettingTypeInteger, "range", "integer", CDatabaseQueryRule::OPERATOR_BETWEEN }, //{ "movies", FieldTime, 180, SettingTypeInteger, "range", "time", CDatabaseQueryRule::OPERATOR_BETWEEN }, { "movies", FieldInProgress, 575, SettingTypeInteger, "toggle", "", CDatabaseQueryRule::OPERATOR_FALSE }, { "movies", FieldYear, 562, SettingTypeInteger, "range", "integer", CDatabaseQueryRule::OPERATOR_BETWEEN }, { "movies", FieldTag, 20459, SettingTypeList, "list", "string", CDatabaseQueryRule::OPERATOR_EQUALS }, { "movies", FieldGenre, 515, SettingTypeList, "list", "string", CDatabaseQueryRule::OPERATOR_EQUALS }, { "movies", FieldActor, 20337, SettingTypeList, "list", "string", CDatabaseQueryRule::OPERATOR_EQUALS }, { "movies", FieldDirector, 20339, SettingTypeList, "list", "string", CDatabaseQueryRule::OPERATOR_EQUALS }, { "movies", FieldStudio, 572, SettingTypeList, "list", "string", CDatabaseQueryRule::OPERATOR_EQUALS }, { "tvshows", FieldTitle, 556, SettingTypeString, "edit", "string", CDatabaseQueryRule::OPERATOR_CONTAINS }, //{ "tvshows", FieldTvShowStatus, 126, SettingTypeList, "list", "string", CDatabaseQueryRule::OPERATOR_EQUALS }, { "tvshows", FieldRating, 563, SettingTypeNumber, "range", "number", CDatabaseQueryRule::OPERATOR_BETWEEN }, { "tvshows", FieldUserRating, 38018, SettingTypeInteger, "range", "integer", CDatabaseQueryRule::OPERATOR_BETWEEN }, { "tvshows", FieldInProgress, 575, SettingTypeInteger, "toggle", "", CDatabaseQueryRule::OPERATOR_FALSE }, { "tvshows", FieldYear, 562, SettingTypeInteger, "range", "integer", CDatabaseQueryRule::OPERATOR_BETWEEN }, { "tvshows", FieldTag, 20459, SettingTypeList, "list", "string", CDatabaseQueryRule::OPERATOR_EQUALS }, { "tvshows", FieldGenre, 515, SettingTypeList, "list", "string", CDatabaseQueryRule::OPERATOR_EQUALS }, { "tvshows", FieldActor, 20337, SettingTypeList, "list", "string", CDatabaseQueryRule::OPERATOR_EQUALS }, { "tvshows", FieldDirector, 20339, SettingTypeList, "list", "string", CDatabaseQueryRule::OPERATOR_EQUALS }, { "tvshows", FieldStudio, 572, SettingTypeList, "list", "string", CDatabaseQueryRule::OPERATOR_EQUALS }, { "episodes", FieldTitle, 556, SettingTypeString, "edit", "string", CDatabaseQueryRule::OPERATOR_CONTAINS }, { "episodes", FieldRating, 563, SettingTypeNumber, "range", "number", CDatabaseQueryRule::OPERATOR_BETWEEN }, { "episodes", FieldUserRating, 38018, SettingTypeInteger, "range", "integer", CDatabaseQueryRule::OPERATOR_BETWEEN }, { "episodes", FieldAirDate, 20416, SettingTypeInteger, "range", "date", CDatabaseQueryRule::OPERATOR_BETWEEN }, { "episodes", FieldInProgress, 575, SettingTypeInteger, "toggle", "", CDatabaseQueryRule::OPERATOR_FALSE }, { "episodes", FieldActor, 20337, SettingTypeList, "list", "string", CDatabaseQueryRule::OPERATOR_EQUALS }, { "episodes", FieldDirector, 20339, SettingTypeList, "list", "string", CDatabaseQueryRule::OPERATOR_EQUALS }, { "musicvideos", FieldTitle, 556, SettingTypeString, "edit", "string", CDatabaseQueryRule::OPERATOR_CONTAINS }, { "musicvideos", FieldRating, 563, SettingTypeNumber, "range", "number", CDatabaseQueryRule::OPERATOR_BETWEEN }, { "musicvideos", FieldUserRating, 38018, SettingTypeInteger, "range", "integer", CDatabaseQueryRule::OPERATOR_BETWEEN }, { "musicvideos", FieldArtist, 557, SettingTypeList, "list", "string", CDatabaseQueryRule::OPERATOR_EQUALS }, { "musicvideos", FieldAlbum, 558, SettingTypeList, "list", "string", CDatabaseQueryRule::OPERATOR_EQUALS }, //{ "musicvideos", FieldTime, 180, SettingTypeInteger, "range", "time", CDatabaseQueryRule::OPERATOR_BETWEEN }, { "musicvideos", FieldYear, 562, SettingTypeInteger, "range", "integer", CDatabaseQueryRule::OPERATOR_BETWEEN }, { "musicvideos", FieldTag, 20459, SettingTypeList, "list", "string", CDatabaseQueryRule::OPERATOR_EQUALS }, { "musicvideos", FieldGenre, 515, SettingTypeList, "list", "string", CDatabaseQueryRule::OPERATOR_EQUALS }, { "musicvideos", FieldDirector, 20339, SettingTypeList, "list", "string", CDatabaseQueryRule::OPERATOR_EQUALS }, { "musicvideos", FieldStudio, 572, SettingTypeList, "list", "string", CDatabaseQueryRule::OPERATOR_EQUALS }, { "artists", FieldArtist, 557, SettingTypeString, "edit", "string", CDatabaseQueryRule::OPERATOR_CONTAINS }, { "artists", FieldGenre, 515, SettingTypeList, "list", "string", CDatabaseQueryRule::OPERATOR_EQUALS }, { "albums", FieldAlbum, 556, SettingTypeString, "edit", "string", CDatabaseQueryRule::OPERATOR_CONTAINS }, { "albums", FieldArtist, 557, SettingTypeList, "list", "string", CDatabaseQueryRule::OPERATOR_EQUALS }, { "albums", FieldRating, 563, SettingTypeNumber, "range", "number", CDatabaseQueryRule::OPERATOR_BETWEEN }, { "albums", FieldUserRating, 38018, SettingTypeInteger, "range", "integer", CDatabaseQueryRule::OPERATOR_BETWEEN }, { "albums", FieldAlbumType, 564, SettingTypeList, "list", "string", CDatabaseQueryRule::OPERATOR_EQUALS }, { "albums", FieldYear, 562, SettingTypeInteger, "range", "integer", CDatabaseQueryRule::OPERATOR_BETWEEN }, { "albums", FieldGenre, 515, SettingTypeList, "list", "string", CDatabaseQueryRule::OPERATOR_EQUALS }, { "albums", FieldMusicLabel, 21899, SettingTypeList, "list", "string", CDatabaseQueryRule::OPERATOR_EQUALS }, { "songs", FieldTitle, 556, SettingTypeString, "edit", "string", CDatabaseQueryRule::OPERATOR_CONTAINS }, { "songs", FieldAlbum, 558, SettingTypeList, "list", "string", CDatabaseQueryRule::OPERATOR_EQUALS }, { "songs", FieldArtist, 557, SettingTypeList, "list", "string", CDatabaseQueryRule::OPERATOR_EQUALS }, { "songs", FieldTime, 180, SettingTypeInteger, "range", "time", CDatabaseQueryRule::OPERATOR_BETWEEN }, { "songs", FieldRating, 563, SettingTypeNumber, "range", "number", CDatabaseQueryRule::OPERATOR_BETWEEN }, { "songs", FieldUserRating, 38018, SettingTypeInteger, "range", "integer", CDatabaseQueryRule::OPERATOR_BETWEEN }, { "songs", FieldYear, 562, SettingTypeInteger, "range", "integer", CDatabaseQueryRule::OPERATOR_BETWEEN }, { "songs", FieldGenre, 515, SettingTypeList, "list", "string", CDatabaseQueryRule::OPERATOR_EQUALS }, { "songs", FieldPlaycount, 567, SettingTypeInteger, "range", "integer", CDatabaseQueryRule::OPERATOR_BETWEEN }, }; #define NUM_FILTERS sizeof(filterList) / sizeof(CGUIDialogMediaFilter::Filter) CGUIDialogMediaFilter::CGUIDialogMediaFilter() : CGUIDialogSettingsManualBase(WINDOW_DIALOG_MEDIA_FILTER, "DialogSettings.xml"), m_dbUrl(NULL), m_filter(NULL) { } CGUIDialogMediaFilter::~CGUIDialogMediaFilter() { Reset(); } bool CGUIDialogMediaFilter::OnMessage(CGUIMessage& message) { switch (message.GetMessage()) { case GUI_MSG_CLICKED: { if (message.GetSenderId()== CONTROL_CLEAR_BUTTON) { m_filter->Reset(); m_filter->SetType(m_mediaType); for (std::map<std::string, Filter>::iterator filter = m_filters.begin(); filter != m_filters.end(); filter++) { filter->second.rule = NULL; filter->second.setting->Reset(); } TriggerFilter(); return true; } break; } case GUI_MSG_REFRESH_LIST: { TriggerFilter(); UpdateControls(); break; } case GUI_MSG_WINDOW_DEINIT: { Reset(); break; } default: break; } return CGUIDialogSettingsManualBase::OnMessage(message); } void CGUIDialogMediaFilter::ShowAndEditMediaFilter(const std::string &path, CSmartPlaylist &filter) { CGUIDialogMediaFilter *dialog = (CGUIDialogMediaFilter *)g_windowManager.GetWindow(WINDOW_DIALOG_MEDIA_FILTER); if (dialog == NULL) return; // initialize and show the dialog dialog->Initialize(); dialog->m_filter = &filter; // must be called after setting the filter/smartplaylist if (!dialog->SetPath(path)) return; dialog->Open(); } void CGUIDialogMediaFilter::OnWindowLoaded() { CGUIDialogSettingsManualBase::OnWindowLoaded(); // we don't need the cancel button so let's hide it SET_CONTROL_HIDDEN(CONTROL_CANCEL_BUTTON); } void CGUIDialogMediaFilter::OnInitWindow() { CGUIDialogSettingsManualBase::OnInitWindow(); UpdateControls(); } void CGUIDialogMediaFilter::OnSettingChanged(const CSetting *setting) { CGUIDialogSettingsManualBase::OnSettingChanged(setting); std::map<std::string, Filter>::iterator it = m_filters.find(setting->GetId()); if (it == m_filters.end()) return; bool remove = false; Filter& filter = it->second; if (filter.controlType == "edit") { std::string value = setting->ToString(); if (!value.empty()) { if (filter.rule == NULL) filter.rule = AddRule(filter.field, filter.ruleOperator); filter.rule->m_parameter.clear(); filter.rule->m_parameter.push_back(value); } else remove = true; } else if (filter.controlType == "toggle") { int choice = static_cast<const CSettingInt*>(setting)->GetValue(); if (choice > CHECK_ALL) { CDatabaseQueryRule::SEARCH_OPERATOR ruleOperator = choice == CHECK_YES ? CDatabaseQueryRule::OPERATOR_TRUE : CDatabaseQueryRule::OPERATOR_FALSE; if (filter.rule == NULL) filter.rule = AddRule(filter.field, ruleOperator); else filter.rule->m_operator = ruleOperator; } else remove = true; } else if (filter.controlType == "list") { std::vector<CVariant> values = CSettingUtils::GetList(static_cast<const CSettingList*>(setting)); if (!values.empty()) { if (filter.rule == NULL) filter.rule = AddRule(filter.field, filter.ruleOperator); filter.rule->m_parameter.clear(); for (std::vector<CVariant>::const_iterator itValue = values.begin(); itValue != values.end(); ++itValue) filter.rule->m_parameter.push_back(itValue->asString()); } else remove = true; } else if (filter.controlType == "range") { const CSettingList *settingList = static_cast<const CSettingList*>(setting); std::vector<CVariant> values = CSettingUtils::GetList(settingList); if (values.size() != 2) return; std::string strValueLower, strValueUpper; const CSetting *definition = settingList->GetDefinition(); if (definition->GetType() == SettingTypeInteger) { const CSettingInt *definitionInt = static_cast<const CSettingInt*>(definition); int valueLower = static_cast<int>(values.at(0).asInteger()); int valueUpper = static_cast<int>(values.at(1).asInteger()); if (valueLower > definitionInt->GetMinimum() || valueUpper < definitionInt->GetMaximum()) { if (filter.controlFormat == "date") { strValueLower = CDateTime(static_cast<time_t>(valueLower)).GetAsDBDate(); strValueUpper = CDateTime(static_cast<time_t>(valueUpper)).GetAsDBDate(); } else if (filter.controlFormat == "time") { strValueLower = CDateTime(static_cast<time_t>(valueLower)).GetAsLocalizedTime("mm:ss"); strValueUpper = CDateTime(static_cast<time_t>(valueUpper)).GetAsLocalizedTime("mm:ss"); } else { strValueLower = values.at(0).asString(); strValueUpper = values.at(1).asString(); } } } else if (definition->GetType() == SettingTypeNumber) { const CSettingNumber *definitionNumber = static_cast<const CSettingNumber*>(definition); float valueLower = values.at(0).asFloat(); float valueUpper = values.at(1).asFloat(); if (valueLower > definitionNumber->GetMinimum() || valueUpper < definitionNumber->GetMaximum()) { strValueLower = values.at(0).asString(); strValueUpper = values.at(1).asString(); } } else return; if (!strValueLower.empty() && !strValueUpper.empty()) { // prepare the filter rule if (filter.rule == NULL) filter.rule = AddRule(filter.field, filter.ruleOperator); filter.rule->m_parameter.clear(); filter.rule->m_parameter.push_back(strValueLower); filter.rule->m_parameter.push_back(strValueUpper); } else remove = true; } else return; // we need to remove the existing rule for the title if (remove && filter.rule != NULL) { DeleteRule(filter.field); filter.rule = NULL; } CGUIMessage msg(GUI_MSG_REFRESH_LIST, GetID(), 0); g_windowManager.SendThreadMessage(msg, WINDOW_DIALOG_MEDIA_FILTER); } void CGUIDialogMediaFilter::SetupView() { CGUIDialogSettingsManualBase::SetupView(); // set the heading label based on the media type uint32_t localizedMediaId = 0; if (m_mediaType == "movies") localizedMediaId = 20342; else if (m_mediaType == "tvshows") localizedMediaId = 20343; else if (m_mediaType == "episodes") localizedMediaId = 20360; else if (m_mediaType == "musicvideos") localizedMediaId = 20389; else if (m_mediaType == "artists") localizedMediaId = 133; else if (m_mediaType == "albums") localizedMediaId = 132; else if (m_mediaType == "songs") localizedMediaId = 134; // set the heading SET_CONTROL_LABEL(CONTROL_HEADING, StringUtils::Format(g_localizeStrings.Get(1275).c_str(), g_localizeStrings.Get(localizedMediaId).c_str())); SET_CONTROL_LABEL(CONTROL_OKAY_BUTTON, 186); SET_CONTROL_LABEL(CONTROL_CLEAR_BUTTON, 192); } void CGUIDialogMediaFilter::InitializeSettings() { CGUIDialogSettingsManualBase::InitializeSettings(); if (m_filter == NULL) return; Reset(true); int handledRules = 0; CSettingCategory *category = AddCategory("filter", -1); if (category == NULL) { CLog::Log(LOGERROR, "CGUIDialogMediaFilter: unable to setup filters"); return; } CSettingGroup *group = AddGroup(category); if (group == NULL) { CLog::Log(LOGERROR, "CGUIDialogMediaFilter: unable to setup filters"); return; } for (unsigned int index = 0; index < NUM_FILTERS; index++) { if (filterList[index].mediaType != m_mediaType) continue; Filter filter = filterList[index]; // check the smartplaylist if it contains a matching rule for (CDatabaseQueryRules::iterator rule = m_filter->m_ruleCombination.m_rules.begin(); rule != m_filter->m_ruleCombination.m_rules.end(); rule++) { if ((*rule)->m_field == filter.field) { filter.rule = (CSmartPlaylistRule *)rule->get(); handledRules++; break; } } std::string settingId = StringUtils::Format("filter.%s.%d", filter.mediaType.c_str(), filter.field); if (filter.controlType == "edit") { CVariant data; if (filter.rule != NULL && filter.rule->m_parameter.size() == 1) data = filter.rule->m_parameter.at(0); if (filter.settingType == SettingTypeString) filter.setting = AddEdit(group, settingId, filter.label, 0, data.asString(), true, false, filter.label, true); else if (filter.settingType == SettingTypeInteger) filter.setting = AddEdit(group, settingId, filter.label, 0, static_cast<int>(data.asInteger()), 0, 1, 0, false, static_cast<int>(filter.label), true); else if (filter.settingType == SettingTypeNumber) filter.setting = AddEdit(group, settingId, filter.label, 0, data.asFloat(), 0.0f, 1.0f, 0.0f, false, filter.label, true); } else if (filter.controlType == "toggle") { int value = CHECK_ALL; if (filter.rule != NULL) value = filter.rule->m_operator == CDatabaseQueryRule::OPERATOR_TRUE ? CHECK_YES : CHECK_NO; StaticIntegerSettingOptions entries; entries.push_back(std::pair<int, int>(CHECK_LABEL_ALL, CHECK_ALL)); entries.push_back(std::pair<int, int>(CHECK_LABEL_NO, CHECK_NO)); entries.push_back(std::pair<int, int>(CHECK_LABEL_YES, CHECK_YES)); filter.setting = AddSpinner(group, settingId, filter.label, 0, value, entries, true); } else if (filter.controlType == "list") { std::vector<std::string> values; if (filter.rule != NULL && !filter.rule->m_parameter.empty()) { values = StringUtils::Split(filter.rule->GetParameter(), DATABASEQUERY_RULE_VALUE_SEPARATOR); if (values.size() == 1 && values.at(0).empty()) values.erase(values.begin()); } filter.setting = AddList(group, settingId, filter.label, 0, values, GetStringListOptions, filter.label); } else if (filter.controlType == "range") { CVariant valueLower, valueUpper; if (filter.rule != NULL) { if (filter.rule->m_parameter.size() == 2) { valueLower = filter.rule->m_parameter.at(0); valueUpper = filter.rule->m_parameter.at(1); } else { DeleteRule(filter.field); filter.rule = NULL; } } if (filter.settingType == SettingTypeInteger) { int min, interval, max; GetRange(filter, min, interval, max); // don't create the filter if there's no real range if (min == max) break; int iValueLower = valueLower.isNull() ? min : static_cast<int>(valueLower.asInteger()); int iValueUpper = valueUpper.isNull() ? max : static_cast<int>(valueUpper.asInteger()); if (filter.controlFormat == "integer") filter.setting = AddRange(group, settingId, filter.label, 0, iValueLower, iValueUpper, min, interval, max, -1, 21469, true); else if (filter.controlFormat == "percentage") filter.setting = AddPercentageRange(group, settingId, filter.label, 0, iValueLower, iValueUpper, -1, 1, 21469, true); else if (filter.controlFormat == "date") filter.setting = AddDateRange(group, settingId, filter.label, 0, iValueLower, iValueUpper, min, interval, max, -1, 21469, true); else if (filter.controlFormat == "time") filter.setting = AddTimeRange(group, settingId, filter.label, 0, iValueLower, iValueUpper, min, interval, max, -1, 21469, true); } else if (filter.settingType == SettingTypeNumber) { float min, interval, max; GetRange(filter, min, interval, max); // don't create the filter if there's no real range if (min == max) break; float fValueLower = valueLower.isNull() ? min : valueLower.asFloat(); float fValueUpper = valueUpper.isNull() ? max : valueUpper.asFloat(); filter.setting = AddRange(group, settingId, filter.label, 0, fValueLower, fValueUpper, min, interval, max, -1, 21469, true); } } else { if (filter.rule != NULL) handledRules--; CLog::Log(LOGWARNING, "CGUIDialogMediaFilter: filter %d of media type %s with unknown control type '%s'", filter.field, filter.mediaType.c_str(), filter.controlType.c_str()); continue; } if (filter.setting == NULL) { if (filter.rule != NULL) handledRules--; CLog::Log(LOGWARNING, "CGUIDialogMediaFilter: failed to create filter %d of media type %s with control type '%s'", filter.field, filter.mediaType.c_str(), filter.controlType.c_str()); continue; } m_filters.insert(make_pair(settingId, filter)); } // make sure that no change in capacity size is needed when adding new rules // which would copy around the rules and our pointers in the Filter struct // wouldn't work anymore m_filter->m_ruleCombination.m_rules.reserve(m_filters.size() + (m_filter->m_ruleCombination.m_rules.size() - handledRules)); } bool CGUIDialogMediaFilter::SetPath(const std::string &path) { if (path.empty() || m_filter == NULL) { CLog::Log(LOGWARNING, "CGUIDialogMediaFilter::SetPath(%s): invalid path or filter", path.c_str()); return false; } delete m_dbUrl; bool video = false; if (path.find("videodb://") == 0) { m_dbUrl = new CVideoDbUrl(); video = true; } else if (path.find("musicdb://") == 0) m_dbUrl = new CMusicDbUrl(); else { CLog::Log(LOGWARNING, "CGUIDialogMediaFilter::SetPath(%s): invalid path (neither videodb:// nor musicdb://)", path.c_str()); return false; } if (!m_dbUrl->FromString(path) || (video && m_dbUrl->GetType() != "movies" && m_dbUrl->GetType() != "tvshows" && m_dbUrl->GetType() != "episodes" && m_dbUrl->GetType() != "musicvideos") || (!video && m_dbUrl->GetType() != "artists" && m_dbUrl->GetType() != "albums" && m_dbUrl->GetType() != "songs")) { CLog::Log(LOGWARNING, "CGUIDialogMediaFilter::SetPath(%s): invalid media type", path.c_str()); return false; } // remove "filter" option if (m_dbUrl->HasOption("filter")) m_dbUrl->RemoveOption("filter"); if (video) m_mediaType = ((CVideoDbUrl*)m_dbUrl)->GetItemType(); else m_mediaType = m_dbUrl->GetType(); m_filter->SetType(m_mediaType); return true; } void CGUIDialogMediaFilter::UpdateControls() { for (std::map<std::string, Filter>::iterator itFilter = m_filters.begin(); itFilter != m_filters.end(); itFilter++) { if (itFilter->second.controlType != "list") continue; std::vector<std::string> items; int size = GetItems(itFilter->second, items, true); std::string label = g_localizeStrings.Get(itFilter->second.label); BaseSettingControlPtr control = GetSettingControl(itFilter->second.setting->GetId()); if (control == NULL) continue; if (size <= 0 || (size == 1 && itFilter->second.field != FieldSet && itFilter->second.field != FieldTag)) CONTROL_DISABLE(control->GetID()); else { CONTROL_ENABLE(control->GetID()); label = StringUtils::Format(g_localizeStrings.Get(21470).c_str(), label.c_str(), size); } SET_CONTROL_LABEL(control->GetID(), label); } } void CGUIDialogMediaFilter::TriggerFilter() const { if (m_filter == NULL) return; CGUIMessage message(GUI_MSG_NOTIFY_ALL, GetID(), 0, GUI_MSG_FILTER_ITEMS, 10); // 10 for advanced g_windowManager.SendThreadMessage(message); } void CGUIDialogMediaFilter::Reset(bool filtersOnly /* = false */) { if (!filtersOnly) { delete m_dbUrl; m_dbUrl = NULL; } m_filters.clear(); } int CGUIDialogMediaFilter::GetItems(const Filter &filter, std::vector<std::string> &items, bool countOnly /* = false */) { CFileItemList selectItems; // remove the rule for the field of the filter we want to retrieve items for CSmartPlaylist tmpFilter = *m_filter; for (CDatabaseQueryRules::iterator rule = tmpFilter.m_ruleCombination.m_rules.begin(); rule != tmpFilter.m_ruleCombination.m_rules.end(); rule++) { if ((*rule)->m_field == filter.field) { tmpFilter.m_ruleCombination.m_rules.erase(rule); break; } } if (m_mediaType == "movies" || m_mediaType == "tvshows" || m_mediaType == "episodes" || m_mediaType == "musicvideos") { CVideoDatabase videodb; if (!videodb.Open()) return -1; std::set<std::string> playlists; CDatabase::Filter dbfilter; dbfilter.where = tmpFilter.GetWhereClause(videodb, playlists); VIDEODB_CONTENT_TYPE type = VIDEODB_CONTENT_MOVIES; if (m_mediaType == "tvshows") type = VIDEODB_CONTENT_TVSHOWS; else if (m_mediaType == "episodes") type = VIDEODB_CONTENT_EPISODES; else if (m_mediaType == "musicvideos") type = VIDEODB_CONTENT_MUSICVIDEOS; if (filter.field == FieldGenre) videodb.GetGenresNav(m_dbUrl->ToString(), selectItems, type, dbfilter, countOnly); else if (filter.field == FieldActor || filter.field == FieldArtist) videodb.GetActorsNav(m_dbUrl->ToString(), selectItems, type, dbfilter, countOnly); else if (filter.field == FieldDirector) videodb.GetDirectorsNav(m_dbUrl->ToString(), selectItems, type, dbfilter, countOnly); else if (filter.field == FieldStudio) videodb.GetStudiosNav(m_dbUrl->ToString(), selectItems, type, dbfilter, countOnly); else if (filter.field == FieldAlbum) videodb.GetMusicVideoAlbumsNav(m_dbUrl->ToString(), selectItems, -1, dbfilter, countOnly); else if (filter.field == FieldTag) videodb.GetTagsNav(m_dbUrl->ToString(), selectItems, type, dbfilter, countOnly); } else if (m_mediaType == "artists" || m_mediaType == "albums" || m_mediaType == "songs") { CMusicDatabase musicdb; if (!musicdb.Open()) return -1; std::set<std::string> playlists; CDatabase::Filter dbfilter; dbfilter.where = tmpFilter.GetWhereClause(musicdb, playlists); if (filter.field == FieldGenre) musicdb.GetGenresNav(m_dbUrl->ToString(), selectItems, dbfilter, countOnly); else if (filter.field == FieldArtist) musicdb.GetArtistsNav(m_dbUrl->ToString(), selectItems, m_mediaType == "albums", -1, -1, -1, dbfilter, SortDescription(), countOnly); else if (filter.field == FieldAlbum) musicdb.GetAlbumsNav(m_dbUrl->ToString(), selectItems, -1, -1, dbfilter, SortDescription(), countOnly); else if (filter.field == FieldAlbumType) musicdb.GetAlbumTypesNav(m_dbUrl->ToString(), selectItems, dbfilter, countOnly); else if (filter.field == FieldMusicLabel) musicdb.GetMusicLabelsNav(m_dbUrl->ToString(), selectItems, dbfilter, countOnly); } int size = selectItems.Size(); if (size <= 0) return 0; if (countOnly) { if (size == 1 && selectItems.Get(0)->HasProperty("total")) return (int)selectItems.Get(0)->GetProperty("total").asInteger(); return 0; } // sort the items selectItems.Sort(SortByLabel, SortOrderAscending); for (int index = 0; index < size; ++index) items.push_back(selectItems.Get(index)->GetLabel()); return items.size(); } CSmartPlaylistRule* CGUIDialogMediaFilter::AddRule(Field field, CDatabaseQueryRule::SEARCH_OPERATOR ruleOperator /* = CDatabaseQueryRule::OPERATOR_CONTAINS */) { CSmartPlaylistRule rule; rule.m_field = field; rule.m_operator = ruleOperator; m_filter->m_ruleCombination.AddRule(rule); return (CSmartPlaylistRule *)m_filter->m_ruleCombination.m_rules.back().get(); } void CGUIDialogMediaFilter::DeleteRule(Field field) { for (CDatabaseQueryRules::iterator rule = m_filter->m_ruleCombination.m_rules.begin(); rule != m_filter->m_ruleCombination.m_rules.end(); rule++) { if ((*rule)->m_field == field) { m_filter->m_ruleCombination.m_rules.erase(rule); break; } } } void CGUIDialogMediaFilter::GetStringListOptions(const CSetting *setting, std::vector< std::pair<std::string, std::string> > &list, std::string &current, void *data) { if (setting == NULL || data == NULL) return; CGUIDialogMediaFilter *mediaFilter = static_cast<CGUIDialogMediaFilter*>(data); std::map<std::string, Filter>::const_iterator itFilter = mediaFilter->m_filters.find(setting->GetId()); if (itFilter == mediaFilter->m_filters.end()) return; std::vector<std::string> items; if (mediaFilter->GetItems(itFilter->second, items, false) <= 0) return; for (std::vector<std::string>::const_iterator item = items.begin(); item != items.end(); ++item) list.push_back(make_pair(*item, *item)); } void CGUIDialogMediaFilter::GetRange(const Filter &filter, int &min, int &interval, int &max) { if (filter.field == FieldUserRating && (m_mediaType == "movies" || m_mediaType == "tvshows" || m_mediaType == "episodes"|| m_mediaType == "musicvideos" || m_mediaType == "albums" || m_mediaType == "songs")) { min = 0; interval = 1; max = 10; } else if (filter.field == FieldYear) { min = 0; interval = 1; max = 0; if (m_mediaType == "movies" || m_mediaType == "tvshows" || m_mediaType == "musicvideos") { std::string table; std::string year; if (m_mediaType == "movies") { table = "movie_view"; year = DatabaseUtils::GetField(FieldYear, MediaTypeMovie, DatabaseQueryPartWhere); } else if (m_mediaType == "tvshows") { table = "tvshow_view"; year = StringUtils::Format("strftime(\"%%Y\", %s)", DatabaseUtils::GetField(FieldYear, MediaTypeTvShow, DatabaseQueryPartWhere).c_str()); } else if (m_mediaType == "musicvideos") { table = "musicvideo_view"; year = DatabaseUtils::GetField(FieldYear, MediaTypeMusicVideo, DatabaseQueryPartWhere); } CDatabase::Filter filter; filter.where = year + " > 0"; GetMinMax(table, year, min, max, filter); } else if (m_mediaType == "albums" || m_mediaType == "songs") { std::string table; if (m_mediaType == "albums") table = "albumview"; else if (m_mediaType == "songs") table = "songview"; else return; CDatabase::Filter filter; filter.where = DatabaseUtils::GetField(FieldYear, m_mediaType, DatabaseQueryPartWhere) + " > 0"; GetMinMax(table, DatabaseUtils::GetField(FieldYear, m_mediaType, DatabaseQueryPartSelect), min, max, filter); } } else if (filter.field == FieldAirDate) { min = 0; interval = 1; max = 0; if (m_mediaType == "episodes") { std::string field = StringUtils::Format("CAST(strftime(\"%%s\", c%02d) AS INTEGER)", VIDEODB_ID_EPISODE_AIRED); GetMinMax("episode_view", field, min, max); interval = 60 * 60 * 24 * 7; // 1 week } } else if (filter.field == FieldTime) { min = 0; interval = 10; max = 0; if (m_mediaType == "songs") GetMinMax("songview", "iDuration", min, max); } else if (filter.field == FieldPlaycount) { min = 0; interval = 1; max = 0; if (m_mediaType == "songs") GetMinMax("songview", "iTimesPlayed", min, max); } } void CGUIDialogMediaFilter::GetRange(const Filter &filter, float &min, float &interval, float &max) { if (filter.field == FieldRating && (m_mediaType == "movies" || m_mediaType == "tvshows" || m_mediaType == "episodes" || m_mediaType == "musicvideos" || m_mediaType == "albums" || m_mediaType == "songs")) { min = 0.0f; interval = 0.1f; max = 10.0f; } } bool CGUIDialogMediaFilter::GetMinMax(const std::string &table, const std::string &field, int &min, int &max, const CDatabase::Filter &filter /* = CDatabase::Filter() */) { if (table.empty() || field.empty()) return false; CDatabase *db = NULL; CDbUrl *dbUrl = NULL; if (m_mediaType == "movies" || m_mediaType == "tvshows" || m_mediaType == "episodes" || m_mediaType == "musicvideos") { CVideoDatabase *videodb = new CVideoDatabase(); if (!videodb->Open()) { delete videodb; return false; } db = videodb; dbUrl = new CVideoDbUrl(); } else if (m_mediaType == "artists" || m_mediaType == "albums" || m_mediaType == "songs") { CMusicDatabase *musicdb = new CMusicDatabase(); if (!musicdb->Open()) { delete musicdb; return false; } db = musicdb; dbUrl = new CMusicDbUrl(); } if (db == NULL || !db->IsOpen() || dbUrl == NULL) { delete db; delete dbUrl; return false; } CDatabase::Filter extFilter = filter; std::string strSQLExtra; if (!db->BuildSQL(m_dbUrl->ToString(), strSQLExtra, extFilter, strSQLExtra, *dbUrl)) { delete db; delete dbUrl; return false; } std::string strSQL = "SELECT %s FROM %s "; min = static_cast<int>(strtol(db->GetSingleValue(db->PrepareSQL(strSQL, std::string("MIN(" + field + ")").c_str(), table.c_str()) + strSQLExtra).c_str(), NULL, 0)); max = static_cast<int>(strtol(db->GetSingleValue(db->PrepareSQL(strSQL, std::string("MAX(" + field + ")").c_str(), table.c_str()) + strSQLExtra).c_str(), NULL, 0)); db->Close(); delete db; delete dbUrl; return true; }
gpl-2.0
uoaerg/linux-ecn
drivers/media/pci/cx25821/cx25821-video-upstream.c
526
18596
/* * Driver for the Conexant CX25821 PCIe bridge * * Copyright (C) 2009 Conexant Systems Inc. * Authors <hiep.huynh@conexant.com>, <shu.lin@conexant.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include "cx25821-video.h" #include "cx25821-video-upstream.h" #include <linux/errno.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> MODULE_DESCRIPTION("v4l2 driver module for cx25821 based TV cards"); MODULE_AUTHOR("Hiep Huynh <hiep.huynh@conexant.com>"); MODULE_LICENSE("GPL"); static int _intr_msk = FLD_VID_SRC_RISC1 | FLD_VID_SRC_UF | FLD_VID_SRC_SYNC | FLD_VID_SRC_OPC_ERR; int cx25821_sram_channel_setup_upstream(struct cx25821_dev *dev, const struct sram_channel *ch, unsigned int bpl, u32 risc) { unsigned int i, lines; u32 cdt; if (ch->cmds_start == 0) { cx_write(ch->ptr1_reg, 0); cx_write(ch->ptr2_reg, 0); cx_write(ch->cnt2_reg, 0); cx_write(ch->cnt1_reg, 0); return 0; } bpl = (bpl + 7) & ~7; /* alignment */ cdt = ch->cdt; lines = ch->fifo_size / bpl; if (lines > 4) lines = 4; BUG_ON(lines < 2); /* write CDT */ for (i = 0; i < lines; i++) { cx_write(cdt + 16 * i, ch->fifo_start + bpl * i); cx_write(cdt + 16 * i + 4, 0); cx_write(cdt + 16 * i + 8, 0); cx_write(cdt + 16 * i + 12, 0); } /* write CMDS */ cx_write(ch->cmds_start + 0, risc); cx_write(ch->cmds_start + 4, 0); cx_write(ch->cmds_start + 8, cdt); cx_write(ch->cmds_start + 12, (lines * 16) >> 3); cx_write(ch->cmds_start + 16, ch->ctrl_start); cx_write(ch->cmds_start + 20, VID_IQ_SIZE_DW); for (i = 24; i < 80; i += 4) cx_write(ch->cmds_start + i, 0); /* fill registers */ cx_write(ch->ptr1_reg, ch->fifo_start); cx_write(ch->ptr2_reg, cdt); cx_write(ch->cnt2_reg, (lines * 16) >> 3); cx_write(ch->cnt1_reg, (bpl >> 3) - 1); return 0; } static __le32 *cx25821_update_riscprogram(struct cx25821_channel *chan, __le32 *rp, unsigned int offset, unsigned int bpl, u32 sync_line, unsigned int lines, int fifo_enable, int field_type) { struct cx25821_video_out_data *out = chan->out; unsigned int line, i; int dist_betwn_starts = bpl * 2; *(rp++) = cpu_to_le32(RISC_RESYNC | sync_line); if (USE_RISC_NOOP_VIDEO) { for (i = 0; i < NUM_NO_OPS; i++) *(rp++) = cpu_to_le32(RISC_NOOP); } /* scan lines */ for (line = 0; line < lines; line++) { *(rp++) = cpu_to_le32(RISC_READ | RISC_SOL | RISC_EOL | bpl); *(rp++) = cpu_to_le32(out->_data_buf_phys_addr + offset); *(rp++) = cpu_to_le32(0); /* bits 63-32 */ if ((lines <= NTSC_FIELD_HEIGHT) || (line < (NTSC_FIELD_HEIGHT - 1)) || !(out->is_60hz)) { offset += dist_betwn_starts; } } return rp; } static __le32 *cx25821_risc_field_upstream(struct cx25821_channel *chan, __le32 *rp, dma_addr_t databuf_phys_addr, unsigned int offset, u32 sync_line, unsigned int bpl, unsigned int lines, int fifo_enable, int field_type) { struct cx25821_video_out_data *out = chan->out; unsigned int line, i; const struct sram_channel *sram_ch = chan->sram_channels; int dist_betwn_starts = bpl * 2; /* sync instruction */ if (sync_line != NO_SYNC_LINE) *(rp++) = cpu_to_le32(RISC_RESYNC | sync_line); if (USE_RISC_NOOP_VIDEO) { for (i = 0; i < NUM_NO_OPS; i++) *(rp++) = cpu_to_le32(RISC_NOOP); } /* scan lines */ for (line = 0; line < lines; line++) { *(rp++) = cpu_to_le32(RISC_READ | RISC_SOL | RISC_EOL | bpl); *(rp++) = cpu_to_le32(databuf_phys_addr + offset); *(rp++) = cpu_to_le32(0); /* bits 63-32 */ if ((lines <= NTSC_FIELD_HEIGHT) || (line < (NTSC_FIELD_HEIGHT - 1)) || !(out->is_60hz)) /* to skip the other field line */ offset += dist_betwn_starts; /* check if we need to enable the FIFO after the first 4 lines * For the upstream video channel, the risc engine will enable * the FIFO. */ if (fifo_enable && line == 3) { *(rp++) = cpu_to_le32(RISC_WRITECR); *(rp++) = cpu_to_le32(sram_ch->dma_ctl); *(rp++) = cpu_to_le32(FLD_VID_FIFO_EN); *(rp++) = cpu_to_le32(0x00000001); } } return rp; } static int cx25821_risc_buffer_upstream(struct cx25821_channel *chan, struct pci_dev *pci, unsigned int top_offset, unsigned int bpl, unsigned int lines) { struct cx25821_video_out_data *out = chan->out; __le32 *rp; int fifo_enable = 0; /* get line count for single field */ int singlefield_lines = lines >> 1; int odd_num_lines = singlefield_lines; int frame = 0; int frame_size = 0; int databuf_offset = 0; int risc_program_size = 0; int risc_flag = RISC_CNT_RESET; unsigned int bottom_offset = bpl; dma_addr_t risc_phys_jump_addr; if (out->is_60hz) { odd_num_lines = singlefield_lines + 1; risc_program_size = FRAME1_VID_PROG_SIZE; frame_size = (bpl == Y411_LINE_SZ) ? FRAME_SIZE_NTSC_Y411 : FRAME_SIZE_NTSC_Y422; } else { risc_program_size = PAL_VID_PROG_SIZE; frame_size = (bpl == Y411_LINE_SZ) ? FRAME_SIZE_PAL_Y411 : FRAME_SIZE_PAL_Y422; } /* Virtual address of Risc buffer program */ rp = out->_dma_virt_addr; for (frame = 0; frame < NUM_FRAMES; frame++) { databuf_offset = frame_size * frame; if (UNSET != top_offset) { fifo_enable = (frame == 0) ? FIFO_ENABLE : FIFO_DISABLE; rp = cx25821_risc_field_upstream(chan, rp, out->_data_buf_phys_addr + databuf_offset, top_offset, 0, bpl, odd_num_lines, fifo_enable, ODD_FIELD); } fifo_enable = FIFO_DISABLE; /* Even Field */ rp = cx25821_risc_field_upstream(chan, rp, out->_data_buf_phys_addr + databuf_offset, bottom_offset, 0x200, bpl, singlefield_lines, fifo_enable, EVEN_FIELD); if (frame == 0) { risc_flag = RISC_CNT_RESET; risc_phys_jump_addr = out->_dma_phys_start_addr + risc_program_size; } else { risc_phys_jump_addr = out->_dma_phys_start_addr; risc_flag = RISC_CNT_INC; } /* Loop to 2ndFrameRISC or to Start of Risc * program & generate IRQ */ *(rp++) = cpu_to_le32(RISC_JUMP | RISC_IRQ1 | risc_flag); *(rp++) = cpu_to_le32(risc_phys_jump_addr); *(rp++) = cpu_to_le32(0); } return 0; } void cx25821_stop_upstream_video(struct cx25821_channel *chan) { struct cx25821_video_out_data *out = chan->out; struct cx25821_dev *dev = chan->dev; const struct sram_channel *sram_ch = chan->sram_channels; u32 tmp = 0; if (!out->_is_running) { pr_info("No video file is currently running so return!\n"); return; } /* Set the interrupt mask register, disable irq. */ cx_set(PCI_INT_MSK, cx_read(PCI_INT_MSK) & ~(1 << sram_ch->irq_bit)); /* Disable RISC interrupts */ tmp = cx_read(sram_ch->int_msk); cx_write(sram_ch->int_msk, tmp & ~_intr_msk); /* Turn OFF risc and fifo enable */ tmp = cx_read(sram_ch->dma_ctl); cx_write(sram_ch->dma_ctl, tmp & ~(FLD_VID_FIFO_EN | FLD_VID_RISC_EN)); free_irq(dev->pci->irq, chan); /* Clear data buffer memory */ if (out->_data_buf_virt_addr) memset(out->_data_buf_virt_addr, 0, out->_data_buf_size); out->_is_running = 0; out->_is_first_frame = 0; out->_frame_count = 0; out->_file_status = END_OF_FILE; tmp = cx_read(VID_CH_MODE_SEL); cx_write(VID_CH_MODE_SEL, tmp & 0xFFFFFE00); } void cx25821_free_mem_upstream(struct cx25821_channel *chan) { struct cx25821_video_out_data *out = chan->out; struct cx25821_dev *dev = chan->dev; if (out->_is_running) cx25821_stop_upstream_video(chan); if (out->_dma_virt_addr) { pci_free_consistent(dev->pci, out->_risc_size, out->_dma_virt_addr, out->_dma_phys_addr); out->_dma_virt_addr = NULL; } if (out->_data_buf_virt_addr) { pci_free_consistent(dev->pci, out->_data_buf_size, out->_data_buf_virt_addr, out->_data_buf_phys_addr); out->_data_buf_virt_addr = NULL; } } int cx25821_write_frame(struct cx25821_channel *chan, const char __user *data, size_t count) { struct cx25821_video_out_data *out = chan->out; int line_size = (out->_pixel_format == PIXEL_FRMT_411) ? Y411_LINE_SZ : Y422_LINE_SZ; int frame_size = 0; int frame_offset = 0; int curpos = out->curpos; if (out->is_60hz) frame_size = (line_size == Y411_LINE_SZ) ? FRAME_SIZE_NTSC_Y411 : FRAME_SIZE_NTSC_Y422; else frame_size = (line_size == Y411_LINE_SZ) ? FRAME_SIZE_PAL_Y411 : FRAME_SIZE_PAL_Y422; if (curpos == 0) { out->cur_frame_index = out->_frame_index; if (wait_event_interruptible(out->waitq, out->cur_frame_index != out->_frame_index)) return -EINTR; out->cur_frame_index = out->_frame_index; } frame_offset = out->cur_frame_index ? frame_size : 0; if (frame_size - curpos < count) count = frame_size - curpos; memcpy((char *)out->_data_buf_virt_addr + frame_offset + curpos, data, count); curpos += count; if (curpos == frame_size) { out->_frame_count++; curpos = 0; } out->curpos = curpos; return count; } static int cx25821_upstream_buffer_prepare(struct cx25821_channel *chan, const struct sram_channel *sram_ch, int bpl) { struct cx25821_video_out_data *out = chan->out; struct cx25821_dev *dev = chan->dev; int ret = 0; dma_addr_t dma_addr; dma_addr_t data_dma_addr; if (out->_dma_virt_addr != NULL) pci_free_consistent(dev->pci, out->upstream_riscbuf_size, out->_dma_virt_addr, out->_dma_phys_addr); out->_dma_virt_addr = pci_alloc_consistent(dev->pci, out->upstream_riscbuf_size, &dma_addr); out->_dma_virt_start_addr = out->_dma_virt_addr; out->_dma_phys_start_addr = dma_addr; out->_dma_phys_addr = dma_addr; out->_risc_size = out->upstream_riscbuf_size; if (!out->_dma_virt_addr) { pr_err("FAILED to allocate memory for Risc buffer! Returning\n"); return -ENOMEM; } /* Clear memory at address */ memset(out->_dma_virt_addr, 0, out->_risc_size); if (out->_data_buf_virt_addr != NULL) pci_free_consistent(dev->pci, out->upstream_databuf_size, out->_data_buf_virt_addr, out->_data_buf_phys_addr); /* For Video Data buffer allocation */ out->_data_buf_virt_addr = pci_alloc_consistent(dev->pci, out->upstream_databuf_size, &data_dma_addr); out->_data_buf_phys_addr = data_dma_addr; out->_data_buf_size = out->upstream_databuf_size; if (!out->_data_buf_virt_addr) { pr_err("FAILED to allocate memory for data buffer! Returning\n"); return -ENOMEM; } /* Clear memory at address */ memset(out->_data_buf_virt_addr, 0, out->_data_buf_size); /* Create RISC programs */ ret = cx25821_risc_buffer_upstream(chan, dev->pci, 0, bpl, out->_lines_count); if (ret < 0) { pr_info("Failed creating Video Upstream Risc programs!\n"); goto error; } return 0; error: return ret; } static int cx25821_video_upstream_irq(struct cx25821_channel *chan, u32 status) { struct cx25821_video_out_data *out = chan->out; struct cx25821_dev *dev = chan->dev; u32 int_msk_tmp; const struct sram_channel *channel = chan->sram_channels; int singlefield_lines = NTSC_FIELD_HEIGHT; int line_size_in_bytes = Y422_LINE_SZ; int odd_risc_prog_size = 0; dma_addr_t risc_phys_jump_addr; __le32 *rp; if (status & FLD_VID_SRC_RISC1) { /* We should only process one program per call */ u32 prog_cnt = cx_read(channel->gpcnt); /* Since we've identified our IRQ, clear our bits from the * interrupt mask and interrupt status registers */ int_msk_tmp = cx_read(channel->int_msk); cx_write(channel->int_msk, int_msk_tmp & ~_intr_msk); cx_write(channel->int_stat, _intr_msk); wake_up(&out->waitq); spin_lock(&dev->slock); out->_frame_index = prog_cnt; if (out->_is_first_frame) { out->_is_first_frame = 0; if (out->is_60hz) { singlefield_lines += 1; odd_risc_prog_size = ODD_FLD_NTSC_PROG_SIZE; } else { singlefield_lines = PAL_FIELD_HEIGHT; odd_risc_prog_size = ODD_FLD_PAL_PROG_SIZE; } if (out->_dma_virt_start_addr != NULL) { line_size_in_bytes = (out->_pixel_format == PIXEL_FRMT_411) ? Y411_LINE_SZ : Y422_LINE_SZ; risc_phys_jump_addr = out->_dma_phys_start_addr + odd_risc_prog_size; rp = cx25821_update_riscprogram(chan, out->_dma_virt_start_addr, TOP_OFFSET, line_size_in_bytes, 0x0, singlefield_lines, FIFO_DISABLE, ODD_FIELD); /* Jump to Even Risc program of 1st Frame */ *(rp++) = cpu_to_le32(RISC_JUMP); *(rp++) = cpu_to_le32(risc_phys_jump_addr); *(rp++) = cpu_to_le32(0); } } spin_unlock(&dev->slock); } else { if (status & FLD_VID_SRC_UF) pr_err("%s(): Video Received Underflow Error Interrupt!\n", __func__); if (status & FLD_VID_SRC_SYNC) pr_err("%s(): Video Received Sync Error Interrupt!\n", __func__); if (status & FLD_VID_SRC_OPC_ERR) pr_err("%s(): Video Received OpCode Error Interrupt!\n", __func__); } if (out->_file_status == END_OF_FILE) { pr_err("EOF Channel 1 Framecount = %d\n", out->_frame_count); return -1; } /* ElSE, set the interrupt mask register, re-enable irq. */ int_msk_tmp = cx_read(channel->int_msk); cx_write(channel->int_msk, int_msk_tmp |= _intr_msk); return 0; } static irqreturn_t cx25821_upstream_irq(int irq, void *dev_id) { struct cx25821_channel *chan = dev_id; struct cx25821_dev *dev = chan->dev; u32 vid_status; int handled = 0; const struct sram_channel *sram_ch; if (!dev) return -1; sram_ch = chan->sram_channels; vid_status = cx_read(sram_ch->int_stat); /* Only deal with our interrupt */ if (vid_status) handled = cx25821_video_upstream_irq(chan, vid_status); return IRQ_RETVAL(handled); } static void cx25821_set_pixelengine(struct cx25821_channel *chan, const struct sram_channel *ch, int pix_format) { struct cx25821_video_out_data *out = chan->out; struct cx25821_dev *dev = chan->dev; int width = WIDTH_D1; int height = out->_lines_count; int num_lines, odd_num_lines; u32 value; int vip_mode = OUTPUT_FRMT_656; value = ((pix_format & 0x3) << 12) | (vip_mode & 0x7); value &= 0xFFFFFFEF; value |= out->is_60hz ? 0 : 0x10; cx_write(ch->vid_fmt_ctl, value); /* set number of active pixels in each line. * Default is 720 pixels in both NTSC and PAL format */ cx_write(ch->vid_active_ctl1, width); num_lines = (height / 2) & 0x3FF; odd_num_lines = num_lines; if (out->is_60hz) odd_num_lines += 1; value = (num_lines << 16) | odd_num_lines; /* set number of active lines in field 0 (top) and field 1 (bottom) */ cx_write(ch->vid_active_ctl2, value); cx_write(ch->vid_cdt_size, VID_CDT_SIZE >> 3); } static int cx25821_start_video_dma_upstream(struct cx25821_channel *chan, const struct sram_channel *sram_ch) { struct cx25821_video_out_data *out = chan->out; struct cx25821_dev *dev = chan->dev; u32 tmp = 0; int err = 0; /* 656/VIP SRC Upstream Channel I & J and 7 - Host Bus Interface for * channel A-C */ tmp = cx_read(VID_CH_MODE_SEL); cx_write(VID_CH_MODE_SEL, tmp | 0x1B0001FF); /* Set the physical start address of the RISC program in the initial * program counter(IPC) member of the cmds. */ cx_write(sram_ch->cmds_start + 0, out->_dma_phys_addr); /* Risc IPC High 64 bits 63-32 */ cx_write(sram_ch->cmds_start + 4, 0); /* reset counter */ cx_write(sram_ch->gpcnt_ctl, 3); /* Clear our bits from the interrupt status register. */ cx_write(sram_ch->int_stat, _intr_msk); /* Set the interrupt mask register, enable irq. */ cx_set(PCI_INT_MSK, cx_read(PCI_INT_MSK) | (1 << sram_ch->irq_bit)); tmp = cx_read(sram_ch->int_msk); cx_write(sram_ch->int_msk, tmp |= _intr_msk); err = request_irq(dev->pci->irq, cx25821_upstream_irq, IRQF_SHARED, dev->name, chan); if (err < 0) { pr_err("%s: can't get upstream IRQ %d\n", dev->name, dev->pci->irq); goto fail_irq; } /* Start the DMA engine */ tmp = cx_read(sram_ch->dma_ctl); cx_set(sram_ch->dma_ctl, tmp | FLD_VID_RISC_EN); out->_is_running = 1; out->_is_first_frame = 1; return 0; fail_irq: cx25821_dev_unregister(dev); return err; } int cx25821_vidupstream_init(struct cx25821_channel *chan, int pixel_format) { struct cx25821_video_out_data *out = chan->out; struct cx25821_dev *dev = chan->dev; const struct sram_channel *sram_ch; u32 tmp; int err = 0; int data_frame_size = 0; int risc_buffer_size = 0; if (out->_is_running) { pr_info("Video Channel is still running so return!\n"); return 0; } sram_ch = chan->sram_channels; out->is_60hz = dev->tvnorm & V4L2_STD_525_60; /* 656/VIP SRC Upstream Channel I & J and 7 - Host Bus Interface for * channel A-C */ tmp = cx_read(VID_CH_MODE_SEL); cx_write(VID_CH_MODE_SEL, tmp | 0x1B0001FF); out->_is_running = 0; out->_frame_count = 0; out->_file_status = RESET_STATUS; out->_lines_count = out->is_60hz ? 480 : 576; out->_pixel_format = pixel_format; out->_line_size = (out->_pixel_format == PIXEL_FRMT_422) ? (WIDTH_D1 * 2) : (WIDTH_D1 * 3) / 2; data_frame_size = out->is_60hz ? NTSC_DATA_BUF_SZ : PAL_DATA_BUF_SZ; risc_buffer_size = out->is_60hz ? NTSC_RISC_BUF_SIZE : PAL_RISC_BUF_SIZE; out->_is_running = 0; out->_frame_count = 0; out->_file_status = RESET_STATUS; out->_lines_count = out->is_60hz ? 480 : 576; out->_pixel_format = pixel_format; out->_line_size = (out->_pixel_format == PIXEL_FRMT_422) ? (WIDTH_D1 * 2) : (WIDTH_D1 * 3) / 2; out->curpos = 0; init_waitqueue_head(&out->waitq); err = cx25821_sram_channel_setup_upstream(dev, sram_ch, out->_line_size, 0); /* setup fifo + format */ cx25821_set_pixelengine(chan, sram_ch, out->_pixel_format); out->upstream_riscbuf_size = risc_buffer_size * 2; out->upstream_databuf_size = data_frame_size * 2; /* Allocating buffers and prepare RISC program */ err = cx25821_upstream_buffer_prepare(chan, sram_ch, out->_line_size); if (err < 0) { pr_err("%s: Failed to set up Video upstream buffers!\n", dev->name); goto error; } cx25821_start_video_dma_upstream(chan, sram_ch); return 0; error: cx25821_dev_unregister(dev); return err; }
gpl-2.0
gautamMalu/linux-samsung-arndale-xen
drivers/watchdog/jz4740_wdt.c
526
6141
/* * Copyright (C) 2010, Paul Cercueil <paul@crapouillou.net> * JZ4740 Watchdog driver * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/watchdog.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/device.h> #include <linux/clk.h> #include <linux/slab.h> #include <linux/err.h> #include <asm/mach-jz4740/timer.h> #define JZ_REG_WDT_TIMER_DATA 0x0 #define JZ_REG_WDT_COUNTER_ENABLE 0x4 #define JZ_REG_WDT_TIMER_COUNTER 0x8 #define JZ_REG_WDT_TIMER_CONTROL 0xC #define JZ_WDT_CLOCK_PCLK 0x1 #define JZ_WDT_CLOCK_RTC 0x2 #define JZ_WDT_CLOCK_EXT 0x4 #define JZ_WDT_CLOCK_DIV_SHIFT 3 #define JZ_WDT_CLOCK_DIV_1 (0 << JZ_WDT_CLOCK_DIV_SHIFT) #define JZ_WDT_CLOCK_DIV_4 (1 << JZ_WDT_CLOCK_DIV_SHIFT) #define JZ_WDT_CLOCK_DIV_16 (2 << JZ_WDT_CLOCK_DIV_SHIFT) #define JZ_WDT_CLOCK_DIV_64 (3 << JZ_WDT_CLOCK_DIV_SHIFT) #define JZ_WDT_CLOCK_DIV_256 (4 << JZ_WDT_CLOCK_DIV_SHIFT) #define JZ_WDT_CLOCK_DIV_1024 (5 << JZ_WDT_CLOCK_DIV_SHIFT) #define DEFAULT_HEARTBEAT 5 #define MAX_HEARTBEAT 2048 static bool nowayout = WATCHDOG_NOWAYOUT; module_param(nowayout, bool, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); static unsigned int heartbeat = DEFAULT_HEARTBEAT; module_param(heartbeat, uint, 0); MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat period in seconds from 1 to " __MODULE_STRING(MAX_HEARTBEAT) ", default " __MODULE_STRING(DEFAULT_HEARTBEAT)); struct jz4740_wdt_drvdata { struct watchdog_device wdt; void __iomem *base; struct clk *rtc_clk; }; static int jz4740_wdt_ping(struct watchdog_device *wdt_dev) { struct jz4740_wdt_drvdata *drvdata = watchdog_get_drvdata(wdt_dev); writew(0x0, drvdata->base + JZ_REG_WDT_TIMER_COUNTER); return 0; } static int jz4740_wdt_set_timeout(struct watchdog_device *wdt_dev, unsigned int new_timeout) { struct jz4740_wdt_drvdata *drvdata = watchdog_get_drvdata(wdt_dev); unsigned int rtc_clk_rate; unsigned int timeout_value; unsigned short clock_div = JZ_WDT_CLOCK_DIV_1; rtc_clk_rate = clk_get_rate(drvdata->rtc_clk); timeout_value = rtc_clk_rate * new_timeout; while (timeout_value > 0xffff) { if (clock_div == JZ_WDT_CLOCK_DIV_1024) { /* Requested timeout too high; * use highest possible value. */ timeout_value = 0xffff; break; } timeout_value >>= 2; clock_div += (1 << JZ_WDT_CLOCK_DIV_SHIFT); } writeb(0x0, drvdata->base + JZ_REG_WDT_COUNTER_ENABLE); writew(clock_div, drvdata->base + JZ_REG_WDT_TIMER_CONTROL); writew((u16)timeout_value, drvdata->base + JZ_REG_WDT_TIMER_DATA); writew(0x0, drvdata->base + JZ_REG_WDT_TIMER_COUNTER); writew(clock_div | JZ_WDT_CLOCK_RTC, drvdata->base + JZ_REG_WDT_TIMER_CONTROL); writeb(0x1, drvdata->base + JZ_REG_WDT_COUNTER_ENABLE); wdt_dev->timeout = new_timeout; return 0; } static int jz4740_wdt_start(struct watchdog_device *wdt_dev) { jz4740_timer_enable_watchdog(); jz4740_wdt_set_timeout(wdt_dev, wdt_dev->timeout); return 0; } static int jz4740_wdt_stop(struct watchdog_device *wdt_dev) { struct jz4740_wdt_drvdata *drvdata = watchdog_get_drvdata(wdt_dev); jz4740_timer_disable_watchdog(); writeb(0x0, drvdata->base + JZ_REG_WDT_COUNTER_ENABLE); return 0; } static const struct watchdog_info jz4740_wdt_info = { .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE, .identity = "jz4740 Watchdog", }; static const struct watchdog_ops jz4740_wdt_ops = { .owner = THIS_MODULE, .start = jz4740_wdt_start, .stop = jz4740_wdt_stop, .ping = jz4740_wdt_ping, .set_timeout = jz4740_wdt_set_timeout, }; static int jz4740_wdt_probe(struct platform_device *pdev) { struct jz4740_wdt_drvdata *drvdata; struct watchdog_device *jz4740_wdt; struct resource *res; int ret; drvdata = devm_kzalloc(&pdev->dev, sizeof(struct jz4740_wdt_drvdata), GFP_KERNEL); if (!drvdata) { dev_err(&pdev->dev, "Unable to alloacate watchdog device\n"); return -ENOMEM; } if (heartbeat < 1 || heartbeat > MAX_HEARTBEAT) heartbeat = DEFAULT_HEARTBEAT; jz4740_wdt = &drvdata->wdt; jz4740_wdt->info = &jz4740_wdt_info; jz4740_wdt->ops = &jz4740_wdt_ops; jz4740_wdt->timeout = heartbeat; jz4740_wdt->min_timeout = 1; jz4740_wdt->max_timeout = MAX_HEARTBEAT; watchdog_set_nowayout(jz4740_wdt, nowayout); watchdog_set_drvdata(jz4740_wdt, drvdata); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); drvdata->base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(drvdata->base)) { ret = PTR_ERR(drvdata->base); goto err_out; } drvdata->rtc_clk = clk_get(&pdev->dev, "rtc"); if (IS_ERR(drvdata->rtc_clk)) { dev_err(&pdev->dev, "cannot find RTC clock\n"); ret = PTR_ERR(drvdata->rtc_clk); goto err_out; } ret = watchdog_register_device(&drvdata->wdt); if (ret < 0) goto err_disable_clk; platform_set_drvdata(pdev, drvdata); return 0; err_disable_clk: clk_put(drvdata->rtc_clk); err_out: return ret; } static int jz4740_wdt_remove(struct platform_device *pdev) { struct jz4740_wdt_drvdata *drvdata = platform_get_drvdata(pdev); jz4740_wdt_stop(&drvdata->wdt); watchdog_unregister_device(&drvdata->wdt); clk_put(drvdata->rtc_clk); return 0; } static struct platform_driver jz4740_wdt_driver = { .probe = jz4740_wdt_probe, .remove = jz4740_wdt_remove, .driver = { .name = "jz4740-wdt", .owner = THIS_MODULE, }, }; module_platform_driver(jz4740_wdt_driver); MODULE_AUTHOR("Paul Cercueil <paul@crapouillou.net>"); MODULE_DESCRIPTION("jz4740 Watchdog Driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:jz4740-wdt");
gpl-2.0
tgraf/net-next
drivers/rtc/rtc-88pm80x.c
782
10691
/* * Real Time Clock driver for Marvell 88PM80x PMIC * * Copyright (c) 2012 Marvell International Ltd. * Wenzeng Chen<wzch@marvell.com> * Qiao Zhou <zhouqiao@marvell.com> * * This file is subject to the terms and conditions of the GNU General * Public License. See the file "COPYING" in the main directory of this * archive for more details. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/regmap.h> #include <linux/mfd/core.h> #include <linux/mfd/88pm80x.h> #include <linux/rtc.h> #define PM800_RTC_COUNTER1 (0xD1) #define PM800_RTC_COUNTER2 (0xD2) #define PM800_RTC_COUNTER3 (0xD3) #define PM800_RTC_COUNTER4 (0xD4) #define PM800_RTC_EXPIRE1_1 (0xD5) #define PM800_RTC_EXPIRE1_2 (0xD6) #define PM800_RTC_EXPIRE1_3 (0xD7) #define PM800_RTC_EXPIRE1_4 (0xD8) #define PM800_RTC_TRIM1 (0xD9) #define PM800_RTC_TRIM2 (0xDA) #define PM800_RTC_TRIM3 (0xDB) #define PM800_RTC_TRIM4 (0xDC) #define PM800_RTC_EXPIRE2_1 (0xDD) #define PM800_RTC_EXPIRE2_2 (0xDE) #define PM800_RTC_EXPIRE2_3 (0xDF) #define PM800_RTC_EXPIRE2_4 (0xE0) #define PM800_POWER_DOWN_LOG1 (0xE5) #define PM800_POWER_DOWN_LOG2 (0xE6) struct pm80x_rtc_info { struct pm80x_chip *chip; struct regmap *map; struct rtc_device *rtc_dev; struct device *dev; struct delayed_work calib_work; int irq; int vrtc; }; static irqreturn_t rtc_update_handler(int irq, void *data) { struct pm80x_rtc_info *info = (struct pm80x_rtc_info *)data; int mask; mask = PM800_ALARM | PM800_ALARM_WAKEUP; regmap_update_bits(info->map, PM800_RTC_CONTROL, mask | PM800_ALARM1_EN, mask); rtc_update_irq(info->rtc_dev, 1, RTC_AF); return IRQ_HANDLED; } static int pm80x_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) { struct pm80x_rtc_info *info = dev_get_drvdata(dev); if (enabled) regmap_update_bits(info->map, PM800_RTC_CONTROL, PM800_ALARM1_EN, PM800_ALARM1_EN); else regmap_update_bits(info->map, PM800_RTC_CONTROL, PM800_ALARM1_EN, 0); return 0; } /* * Calculate the next alarm time given the requested alarm time mask * and the current time. */ static void rtc_next_alarm_time(struct rtc_time *next, struct rtc_time *now, struct rtc_time *alrm) { unsigned long next_time; unsigned long now_time; next->tm_year = now->tm_year; next->tm_mon = now->tm_mon; next->tm_mday = now->tm_mday; next->tm_hour = alrm->tm_hour; next->tm_min = alrm->tm_min; next->tm_sec = alrm->tm_sec; rtc_tm_to_time(now, &now_time); rtc_tm_to_time(next, &next_time); if (next_time < now_time) { /* Advance one day */ next_time += 60 * 60 * 24; rtc_time_to_tm(next_time, next); } } static int pm80x_rtc_read_time(struct device *dev, struct rtc_time *tm) { struct pm80x_rtc_info *info = dev_get_drvdata(dev); unsigned char buf[4]; unsigned long ticks, base, data; regmap_raw_read(info->map, PM800_RTC_EXPIRE2_1, buf, 4); base = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0]; dev_dbg(info->dev, "%x-%x-%x-%x\n", buf[0], buf[1], buf[2], buf[3]); /* load 32-bit read-only counter */ regmap_raw_read(info->map, PM800_RTC_COUNTER1, buf, 4); data = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0]; ticks = base + data; dev_dbg(info->dev, "get base:0x%lx, RO count:0x%lx, ticks:0x%lx\n", base, data, ticks); rtc_time_to_tm(ticks, tm); return 0; } static int pm80x_rtc_set_time(struct device *dev, struct rtc_time *tm) { struct pm80x_rtc_info *info = dev_get_drvdata(dev); unsigned char buf[4]; unsigned long ticks, base, data; if ((tm->tm_year < 70) || (tm->tm_year > 138)) { dev_dbg(info->dev, "Set time %d out of range. Please set time between 1970 to 2038.\n", 1900 + tm->tm_year); return -EINVAL; } rtc_tm_to_time(tm, &ticks); /* load 32-bit read-only counter */ regmap_raw_read(info->map, PM800_RTC_COUNTER1, buf, 4); data = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0]; base = ticks - data; dev_dbg(info->dev, "set base:0x%lx, RO count:0x%lx, ticks:0x%lx\n", base, data, ticks); buf[0] = base & 0xFF; buf[1] = (base >> 8) & 0xFF; buf[2] = (base >> 16) & 0xFF; buf[3] = (base >> 24) & 0xFF; regmap_raw_write(info->map, PM800_RTC_EXPIRE2_1, buf, 4); return 0; } static int pm80x_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) { struct pm80x_rtc_info *info = dev_get_drvdata(dev); unsigned char buf[4]; unsigned long ticks, base, data; int ret; regmap_raw_read(info->map, PM800_RTC_EXPIRE2_1, buf, 4); base = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0]; dev_dbg(info->dev, "%x-%x-%x-%x\n", buf[0], buf[1], buf[2], buf[3]); regmap_raw_read(info->map, PM800_RTC_EXPIRE1_1, buf, 4); data = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0]; ticks = base + data; dev_dbg(info->dev, "get base:0x%lx, RO count:0x%lx, ticks:0x%lx\n", base, data, ticks); rtc_time_to_tm(ticks, &alrm->time); regmap_read(info->map, PM800_RTC_CONTROL, &ret); alrm->enabled = (ret & PM800_ALARM1_EN) ? 1 : 0; alrm->pending = (ret & (PM800_ALARM | PM800_ALARM_WAKEUP)) ? 1 : 0; return 0; } static int pm80x_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) { struct pm80x_rtc_info *info = dev_get_drvdata(dev); struct rtc_time now_tm, alarm_tm; unsigned long ticks, base, data; unsigned char buf[4]; int mask; regmap_update_bits(info->map, PM800_RTC_CONTROL, PM800_ALARM1_EN, 0); regmap_raw_read(info->map, PM800_RTC_EXPIRE2_1, buf, 4); base = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0]; dev_dbg(info->dev, "%x-%x-%x-%x\n", buf[0], buf[1], buf[2], buf[3]); /* load 32-bit read-only counter */ regmap_raw_read(info->map, PM800_RTC_COUNTER1, buf, 4); data = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0]; ticks = base + data; dev_dbg(info->dev, "get base:0x%lx, RO count:0x%lx, ticks:0x%lx\n", base, data, ticks); rtc_time_to_tm(ticks, &now_tm); dev_dbg(info->dev, "%s, now time : %lu\n", __func__, ticks); rtc_next_alarm_time(&alarm_tm, &now_tm, &alrm->time); /* get new ticks for alarm in 24 hours */ rtc_tm_to_time(&alarm_tm, &ticks); dev_dbg(info->dev, "%s, alarm time: %lu\n", __func__, ticks); data = ticks - base; buf[0] = data & 0xff; buf[1] = (data >> 8) & 0xff; buf[2] = (data >> 16) & 0xff; buf[3] = (data >> 24) & 0xff; regmap_raw_write(info->map, PM800_RTC_EXPIRE1_1, buf, 4); if (alrm->enabled) { mask = PM800_ALARM | PM800_ALARM_WAKEUP | PM800_ALARM1_EN; regmap_update_bits(info->map, PM800_RTC_CONTROL, mask, mask); } else { mask = PM800_ALARM | PM800_ALARM_WAKEUP | PM800_ALARM1_EN; regmap_update_bits(info->map, PM800_RTC_CONTROL, mask, PM800_ALARM | PM800_ALARM_WAKEUP); } return 0; } static const struct rtc_class_ops pm80x_rtc_ops = { .read_time = pm80x_rtc_read_time, .set_time = pm80x_rtc_set_time, .read_alarm = pm80x_rtc_read_alarm, .set_alarm = pm80x_rtc_set_alarm, .alarm_irq_enable = pm80x_rtc_alarm_irq_enable, }; #ifdef CONFIG_PM_SLEEP static int pm80x_rtc_suspend(struct device *dev) { return pm80x_dev_suspend(dev); } static int pm80x_rtc_resume(struct device *dev) { return pm80x_dev_resume(dev); } #endif static SIMPLE_DEV_PM_OPS(pm80x_rtc_pm_ops, pm80x_rtc_suspend, pm80x_rtc_resume); static int pm80x_rtc_probe(struct platform_device *pdev) { struct pm80x_chip *chip = dev_get_drvdata(pdev->dev.parent); struct pm80x_rtc_pdata *pdata = dev_get_platdata(&pdev->dev); struct pm80x_rtc_info *info; struct device_node *node = pdev->dev.of_node; struct rtc_time tm; unsigned long ticks = 0; int ret; if (!pdata && !node) { dev_err(&pdev->dev, "pm80x-rtc requires platform data or of_node\n"); return -EINVAL; } if (!pdata) { pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) { dev_err(&pdev->dev, "failed to allocate memory\n"); return -ENOMEM; } } info = devm_kzalloc(&pdev->dev, sizeof(struct pm80x_rtc_info), GFP_KERNEL); if (!info) return -ENOMEM; info->irq = platform_get_irq(pdev, 0); if (info->irq < 0) { dev_err(&pdev->dev, "No IRQ resource!\n"); ret = -EINVAL; goto out; } info->chip = chip; info->map = chip->regmap; if (!info->map) { dev_err(&pdev->dev, "no regmap!\n"); ret = -EINVAL; goto out; } info->dev = &pdev->dev; dev_set_drvdata(&pdev->dev, info); ret = pm80x_request_irq(chip, info->irq, rtc_update_handler, IRQF_ONESHOT, "rtc", info); if (ret < 0) { dev_err(chip->dev, "Failed to request IRQ: #%d: %d\n", info->irq, ret); goto out; } ret = pm80x_rtc_read_time(&pdev->dev, &tm); if (ret < 0) { dev_err(&pdev->dev, "Failed to read initial time.\n"); goto out_rtc; } if ((tm.tm_year < 70) || (tm.tm_year > 138)) { tm.tm_year = 70; tm.tm_mon = 0; tm.tm_mday = 1; tm.tm_hour = 0; tm.tm_min = 0; tm.tm_sec = 0; ret = pm80x_rtc_set_time(&pdev->dev, &tm); if (ret < 0) { dev_err(&pdev->dev, "Failed to set initial time.\n"); goto out_rtc; } } rtc_tm_to_time(&tm, &ticks); info->rtc_dev = devm_rtc_device_register(&pdev->dev, "88pm80x-rtc", &pm80x_rtc_ops, THIS_MODULE); if (IS_ERR(info->rtc_dev)) { ret = PTR_ERR(info->rtc_dev); dev_err(&pdev->dev, "Failed to register RTC device: %d\n", ret); goto out_rtc; } /* * enable internal XO instead of internal 3.25MHz clock since it can * free running in PMIC power-down state. */ regmap_update_bits(info->map, PM800_RTC_CONTROL, PM800_RTC1_USE_XO, PM800_RTC1_USE_XO); /* remember whether this power up is caused by PMIC RTC or not */ info->rtc_dev->dev.platform_data = &pdata->rtc_wakeup; device_init_wakeup(&pdev->dev, 1); return 0; out_rtc: pm80x_free_irq(chip, info->irq, info); out: return ret; } static int pm80x_rtc_remove(struct platform_device *pdev) { struct pm80x_rtc_info *info = platform_get_drvdata(pdev); pm80x_free_irq(info->chip, info->irq, info); return 0; } static struct platform_driver pm80x_rtc_driver = { .driver = { .name = "88pm80x-rtc", .pm = &pm80x_rtc_pm_ops, }, .probe = pm80x_rtc_probe, .remove = pm80x_rtc_remove, }; module_platform_driver(pm80x_rtc_driver); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Marvell 88PM80x RTC driver"); MODULE_AUTHOR("Qiao Zhou <zhouqiao@marvell.com>"); MODULE_ALIAS("platform:88pm80x-rtc");
gpl-2.0
MartinsAD/xperia-kernel-msm7x30
fs/jfs/jfs_logmgr.c
782
60774
/* * Copyright (C) International Business Machines Corp., 2000-2004 * Portions Copyright (C) Christoph Hellwig, 2001-2002 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * jfs_logmgr.c: log manager * * for related information, see transaction manager (jfs_txnmgr.c), and * recovery manager (jfs_logredo.c). * * note: for detail, RTFS. * * log buffer manager: * special purpose buffer manager supporting log i/o requirements. * per log serial pageout of logpage * queuing i/o requests and redrive i/o at iodone * maintain current logpage buffer * no caching since append only * appropriate jfs buffer cache buffers as needed * * group commit: * transactions which wrote COMMIT records in the same in-memory * log page during the pageout of previous/current log page(s) are * committed together by the pageout of the page. * * TBD lazy commit: * transactions are committed asynchronously when the log page * containing it COMMIT is paged out when it becomes full; * * serialization: * . a per log lock serialize log write. * . a per log lock serialize group commit. * . a per log lock serialize log open/close; * * TBD log integrity: * careful-write (ping-pong) of last logpage to recover from crash * in overwrite. * detection of split (out-of-order) write of physical sectors * of last logpage via timestamp at end of each sector * with its mirror data array at trailer). * * alternatives: * lsn - 64-bit monotonically increasing integer vs * 32-bit lspn and page eor. */ #include <linux/fs.h> #include <linux/blkdev.h> #include <linux/interrupt.h> #include <linux/completion.h> #include <linux/kthread.h> #include <linux/buffer_head.h> /* for sync_blockdev() */ #include <linux/bio.h> #include <linux/freezer.h> #include <linux/delay.h> #include <linux/mutex.h> #include <linux/seq_file.h> #include "jfs_incore.h" #include "jfs_filsys.h" #include "jfs_metapage.h" #include "jfs_superblock.h" #include "jfs_txnmgr.h" #include "jfs_debug.h" /* * lbuf's ready to be redriven. Protected by log_redrive_lock (jfsIO thread) */ static struct lbuf *log_redrive_list; static DEFINE_SPINLOCK(log_redrive_lock); /* * log read/write serialization (per log) */ #define LOG_LOCK_INIT(log) mutex_init(&(log)->loglock) #define LOG_LOCK(log) mutex_lock(&((log)->loglock)) #define LOG_UNLOCK(log) mutex_unlock(&((log)->loglock)) /* * log group commit serialization (per log) */ #define LOGGC_LOCK_INIT(log) spin_lock_init(&(log)->gclock) #define LOGGC_LOCK(log) spin_lock_irq(&(log)->gclock) #define LOGGC_UNLOCK(log) spin_unlock_irq(&(log)->gclock) #define LOGGC_WAKEUP(tblk) wake_up_all(&(tblk)->gcwait) /* * log sync serialization (per log) */ #define LOGSYNC_DELTA(logsize) min((logsize)/8, 128*LOGPSIZE) #define LOGSYNC_BARRIER(logsize) ((logsize)/4) /* #define LOGSYNC_DELTA(logsize) min((logsize)/4, 256*LOGPSIZE) #define LOGSYNC_BARRIER(logsize) ((logsize)/2) */ /* * log buffer cache synchronization */ static DEFINE_SPINLOCK(jfsLCacheLock); #define LCACHE_LOCK(flags) spin_lock_irqsave(&jfsLCacheLock, flags) #define LCACHE_UNLOCK(flags) spin_unlock_irqrestore(&jfsLCacheLock, flags) /* * See __SLEEP_COND in jfs_locks.h */ #define LCACHE_SLEEP_COND(wq, cond, flags) \ do { \ if (cond) \ break; \ __SLEEP_COND(wq, cond, LCACHE_LOCK(flags), LCACHE_UNLOCK(flags)); \ } while (0) #define LCACHE_WAKEUP(event) wake_up(event) /* * lbuf buffer cache (lCache) control */ /* log buffer manager pageout control (cumulative, inclusive) */ #define lbmREAD 0x0001 #define lbmWRITE 0x0002 /* enqueue at tail of write queue; * init pageout if at head of queue; */ #define lbmRELEASE 0x0004 /* remove from write queue * at completion of pageout; * do not free/recycle it yet: * caller will free it; */ #define lbmSYNC 0x0008 /* do not return to freelist * when removed from write queue; */ #define lbmFREE 0x0010 /* return to freelist * at completion of pageout; * the buffer may be recycled; */ #define lbmDONE 0x0020 #define lbmERROR 0x0040 #define lbmGC 0x0080 /* lbmIODone to perform post-GC processing * of log page */ #define lbmDIRECT 0x0100 /* * Global list of active external journals */ static LIST_HEAD(jfs_external_logs); static struct jfs_log *dummy_log = NULL; static DEFINE_MUTEX(jfs_log_mutex); /* * forward references */ static int lmWriteRecord(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd, struct tlock * tlck); static int lmNextPage(struct jfs_log * log); static int lmLogFileSystem(struct jfs_log * log, struct jfs_sb_info *sbi, int activate); static int open_inline_log(struct super_block *sb); static int open_dummy_log(struct super_block *sb); static int lbmLogInit(struct jfs_log * log); static void lbmLogShutdown(struct jfs_log * log); static struct lbuf *lbmAllocate(struct jfs_log * log, int); static void lbmFree(struct lbuf * bp); static void lbmfree(struct lbuf * bp); static int lbmRead(struct jfs_log * log, int pn, struct lbuf ** bpp); static void lbmWrite(struct jfs_log * log, struct lbuf * bp, int flag, int cant_block); static void lbmDirectWrite(struct jfs_log * log, struct lbuf * bp, int flag); static int lbmIOWait(struct lbuf * bp, int flag); static bio_end_io_t lbmIODone; static void lbmStartIO(struct lbuf * bp); static void lmGCwrite(struct jfs_log * log, int cant_block); static int lmLogSync(struct jfs_log * log, int hard_sync); /* * statistics */ #ifdef CONFIG_JFS_STATISTICS static struct lmStat { uint commit; /* # of commit */ uint pagedone; /* # of page written */ uint submitted; /* # of pages submitted */ uint full_page; /* # of full pages submitted */ uint partial_page; /* # of partial pages submitted */ } lmStat; #endif static void write_special_inodes(struct jfs_log *log, int (*writer)(struct address_space *)) { struct jfs_sb_info *sbi; list_for_each_entry(sbi, &log->sb_list, log_list) { writer(sbi->ipbmap->i_mapping); writer(sbi->ipimap->i_mapping); writer(sbi->direct_inode->i_mapping); } } /* * NAME: lmLog() * * FUNCTION: write a log record; * * PARAMETER: * * RETURN: lsn - offset to the next log record to write (end-of-log); * -1 - error; * * note: todo: log error handler */ int lmLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd, struct tlock * tlck) { int lsn; int diffp, difft; struct metapage *mp = NULL; unsigned long flags; jfs_info("lmLog: log:0x%p tblk:0x%p, lrd:0x%p tlck:0x%p", log, tblk, lrd, tlck); LOG_LOCK(log); /* log by (out-of-transaction) JFS ? */ if (tblk == NULL) goto writeRecord; /* log from page ? */ if (tlck == NULL || tlck->type & tlckBTROOT || (mp = tlck->mp) == NULL) goto writeRecord; /* * initialize/update page/transaction recovery lsn */ lsn = log->lsn; LOGSYNC_LOCK(log, flags); /* * initialize page lsn if first log write of the page */ if (mp->lsn == 0) { mp->log = log; mp->lsn = lsn; log->count++; /* insert page at tail of logsynclist */ list_add_tail(&mp->synclist, &log->synclist); } /* * initialize/update lsn of tblock of the page * * transaction inherits oldest lsn of pages associated * with allocation/deallocation of resources (their * log records are used to reconstruct allocation map * at recovery time: inode for inode allocation map, * B+-tree index of extent descriptors for block * allocation map); * allocation map pages inherit transaction lsn at * commit time to allow forwarding log syncpt past log * records associated with allocation/deallocation of * resources only after persistent map of these map pages * have been updated and propagated to home. */ /* * initialize transaction lsn: */ if (tblk->lsn == 0) { /* inherit lsn of its first page logged */ tblk->lsn = mp->lsn; log->count++; /* insert tblock after the page on logsynclist */ list_add(&tblk->synclist, &mp->synclist); } /* * update transaction lsn: */ else { /* inherit oldest/smallest lsn of page */ logdiff(diffp, mp->lsn, log); logdiff(difft, tblk->lsn, log); if (diffp < difft) { /* update tblock lsn with page lsn */ tblk->lsn = mp->lsn; /* move tblock after page on logsynclist */ list_move(&tblk->synclist, &mp->synclist); } } LOGSYNC_UNLOCK(log, flags); /* * write the log record */ writeRecord: lsn = lmWriteRecord(log, tblk, lrd, tlck); /* * forward log syncpt if log reached next syncpt trigger */ logdiff(diffp, lsn, log); if (diffp >= log->nextsync) lsn = lmLogSync(log, 0); /* update end-of-log lsn */ log->lsn = lsn; LOG_UNLOCK(log); /* return end-of-log address */ return lsn; } /* * NAME: lmWriteRecord() * * FUNCTION: move the log record to current log page * * PARAMETER: cd - commit descriptor * * RETURN: end-of-log address * * serialization: LOG_LOCK() held on entry/exit */ static int lmWriteRecord(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd, struct tlock * tlck) { int lsn = 0; /* end-of-log address */ struct lbuf *bp; /* dst log page buffer */ struct logpage *lp; /* dst log page */ caddr_t dst; /* destination address in log page */ int dstoffset; /* end-of-log offset in log page */ int freespace; /* free space in log page */ caddr_t p; /* src meta-data page */ caddr_t src; int srclen; int nbytes; /* number of bytes to move */ int i; int len; struct linelock *linelock; struct lv *lv; struct lvd *lvd; int l2linesize; len = 0; /* retrieve destination log page to write */ bp = (struct lbuf *) log->bp; lp = (struct logpage *) bp->l_ldata; dstoffset = log->eor; /* any log data to write ? */ if (tlck == NULL) goto moveLrd; /* * move log record data */ /* retrieve source meta-data page to log */ if (tlck->flag & tlckPAGELOCK) { p = (caddr_t) (tlck->mp->data); linelock = (struct linelock *) & tlck->lock; } /* retrieve source in-memory inode to log */ else if (tlck->flag & tlckINODELOCK) { if (tlck->type & tlckDTREE) p = (caddr_t) &JFS_IP(tlck->ip)->i_dtroot; else p = (caddr_t) &JFS_IP(tlck->ip)->i_xtroot; linelock = (struct linelock *) & tlck->lock; } #ifdef _JFS_WIP else if (tlck->flag & tlckINLINELOCK) { inlinelock = (struct inlinelock *) & tlck; p = (caddr_t) & inlinelock->pxd; linelock = (struct linelock *) & tlck; } #endif /* _JFS_WIP */ else { jfs_err("lmWriteRecord: UFO tlck:0x%p", tlck); return 0; /* Probably should trap */ } l2linesize = linelock->l2linesize; moveData: ASSERT(linelock->index <= linelock->maxcnt); lv = linelock->lv; for (i = 0; i < linelock->index; i++, lv++) { if (lv->length == 0) continue; /* is page full ? */ if (dstoffset >= LOGPSIZE - LOGPTLRSIZE) { /* page become full: move on to next page */ lmNextPage(log); bp = log->bp; lp = (struct logpage *) bp->l_ldata; dstoffset = LOGPHDRSIZE; } /* * move log vector data */ src = (u8 *) p + (lv->offset << l2linesize); srclen = lv->length << l2linesize; len += srclen; while (srclen > 0) { freespace = (LOGPSIZE - LOGPTLRSIZE) - dstoffset; nbytes = min(freespace, srclen); dst = (caddr_t) lp + dstoffset; memcpy(dst, src, nbytes); dstoffset += nbytes; /* is page not full ? */ if (dstoffset < LOGPSIZE - LOGPTLRSIZE) break; /* page become full: move on to next page */ lmNextPage(log); bp = (struct lbuf *) log->bp; lp = (struct logpage *) bp->l_ldata; dstoffset = LOGPHDRSIZE; srclen -= nbytes; src += nbytes; } /* * move log vector descriptor */ len += 4; lvd = (struct lvd *) ((caddr_t) lp + dstoffset); lvd->offset = cpu_to_le16(lv->offset); lvd->length = cpu_to_le16(lv->length); dstoffset += 4; jfs_info("lmWriteRecord: lv offset:%d length:%d", lv->offset, lv->length); } if ((i = linelock->next)) { linelock = (struct linelock *) lid_to_tlock(i); goto moveData; } /* * move log record descriptor */ moveLrd: lrd->length = cpu_to_le16(len); src = (caddr_t) lrd; srclen = LOGRDSIZE; while (srclen > 0) { freespace = (LOGPSIZE - LOGPTLRSIZE) - dstoffset; nbytes = min(freespace, srclen); dst = (caddr_t) lp + dstoffset; memcpy(dst, src, nbytes); dstoffset += nbytes; srclen -= nbytes; /* are there more to move than freespace of page ? */ if (srclen) goto pageFull; /* * end of log record descriptor */ /* update last log record eor */ log->eor = dstoffset; bp->l_eor = dstoffset; lsn = (log->page << L2LOGPSIZE) + dstoffset; if (lrd->type & cpu_to_le16(LOG_COMMIT)) { tblk->clsn = lsn; jfs_info("wr: tclsn:0x%x, beor:0x%x", tblk->clsn, bp->l_eor); INCREMENT(lmStat.commit); /* # of commit */ /* * enqueue tblock for group commit: * * enqueue tblock of non-trivial/synchronous COMMIT * at tail of group commit queue * (trivial/asynchronous COMMITs are ignored by * group commit.) */ LOGGC_LOCK(log); /* init tblock gc state */ tblk->flag = tblkGC_QUEUE; tblk->bp = log->bp; tblk->pn = log->page; tblk->eor = log->eor; /* enqueue transaction to commit queue */ list_add_tail(&tblk->cqueue, &log->cqueue); LOGGC_UNLOCK(log); } jfs_info("lmWriteRecord: lrd:0x%04x bp:0x%p pn:%d eor:0x%x", le16_to_cpu(lrd->type), log->bp, log->page, dstoffset); /* page not full ? */ if (dstoffset < LOGPSIZE - LOGPTLRSIZE) return lsn; pageFull: /* page become full: move on to next page */ lmNextPage(log); bp = (struct lbuf *) log->bp; lp = (struct logpage *) bp->l_ldata; dstoffset = LOGPHDRSIZE; src += nbytes; } return lsn; } /* * NAME: lmNextPage() * * FUNCTION: write current page and allocate next page. * * PARAMETER: log * * RETURN: 0 * * serialization: LOG_LOCK() held on entry/exit */ static int lmNextPage(struct jfs_log * log) { struct logpage *lp; int lspn; /* log sequence page number */ int pn; /* current page number */ struct lbuf *bp; struct lbuf *nextbp; struct tblock *tblk; /* get current log page number and log sequence page number */ pn = log->page; bp = log->bp; lp = (struct logpage *) bp->l_ldata; lspn = le32_to_cpu(lp->h.page); LOGGC_LOCK(log); /* * write or queue the full page at the tail of write queue */ /* get the tail tblk on commit queue */ if (list_empty(&log->cqueue)) tblk = NULL; else tblk = list_entry(log->cqueue.prev, struct tblock, cqueue); /* every tblk who has COMMIT record on the current page, * and has not been committed, must be on commit queue * since tblk is queued at commit queueu at the time * of writing its COMMIT record on the page before * page becomes full (even though the tblk thread * who wrote COMMIT record may have been suspended * currently); */ /* is page bound with outstanding tail tblk ? */ if (tblk && tblk->pn == pn) { /* mark tblk for end-of-page */ tblk->flag |= tblkGC_EOP; if (log->cflag & logGC_PAGEOUT) { /* if page is not already on write queue, * just enqueue (no lbmWRITE to prevent redrive) * buffer to wqueue to ensure correct serial order * of the pages since log pages will be added * continuously */ if (bp->l_wqnext == NULL) lbmWrite(log, bp, 0, 0); } else { /* * No current GC leader, initiate group commit */ log->cflag |= logGC_PAGEOUT; lmGCwrite(log, 0); } } /* page is not bound with outstanding tblk: * init write or mark it to be redriven (lbmWRITE) */ else { /* finalize the page */ bp->l_ceor = bp->l_eor; lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_ceor); lbmWrite(log, bp, lbmWRITE | lbmRELEASE | lbmFREE, 0); } LOGGC_UNLOCK(log); /* * allocate/initialize next page */ /* if log wraps, the first data page of log is 2 * (0 never used, 1 is superblock). */ log->page = (pn == log->size - 1) ? 2 : pn + 1; log->eor = LOGPHDRSIZE; /* ? valid page empty/full at logRedo() */ /* allocate/initialize next log page buffer */ nextbp = lbmAllocate(log, log->page); nextbp->l_eor = log->eor; log->bp = nextbp; /* initialize next log page */ lp = (struct logpage *) nextbp->l_ldata; lp->h.page = lp->t.page = cpu_to_le32(lspn + 1); lp->h.eor = lp->t.eor = cpu_to_le16(LOGPHDRSIZE); return 0; } /* * NAME: lmGroupCommit() * * FUNCTION: group commit * initiate pageout of the pages with COMMIT in the order of * page number - redrive pageout of the page at the head of * pageout queue until full page has been written. * * RETURN: * * NOTE: * LOGGC_LOCK serializes log group commit queue, and * transaction blocks on the commit queue. * N.B. LOG_LOCK is NOT held during lmGroupCommit(). */ int lmGroupCommit(struct jfs_log * log, struct tblock * tblk) { int rc = 0; LOGGC_LOCK(log); /* group committed already ? */ if (tblk->flag & tblkGC_COMMITTED) { if (tblk->flag & tblkGC_ERROR) rc = -EIO; LOGGC_UNLOCK(log); return rc; } jfs_info("lmGroup Commit: tblk = 0x%p, gcrtc = %d", tblk, log->gcrtc); if (tblk->xflag & COMMIT_LAZY) tblk->flag |= tblkGC_LAZY; if ((!(log->cflag & logGC_PAGEOUT)) && (!list_empty(&log->cqueue)) && (!(tblk->xflag & COMMIT_LAZY) || test_bit(log_FLUSH, &log->flag) || jfs_tlocks_low)) { /* * No pageout in progress * * start group commit as its group leader. */ log->cflag |= logGC_PAGEOUT; lmGCwrite(log, 0); } if (tblk->xflag & COMMIT_LAZY) { /* * Lazy transactions can leave now */ LOGGC_UNLOCK(log); return 0; } /* lmGCwrite gives up LOGGC_LOCK, check again */ if (tblk->flag & tblkGC_COMMITTED) { if (tblk->flag & tblkGC_ERROR) rc = -EIO; LOGGC_UNLOCK(log); return rc; } /* upcount transaction waiting for completion */ log->gcrtc++; tblk->flag |= tblkGC_READY; __SLEEP_COND(tblk->gcwait, (tblk->flag & tblkGC_COMMITTED), LOGGC_LOCK(log), LOGGC_UNLOCK(log)); /* removed from commit queue */ if (tblk->flag & tblkGC_ERROR) rc = -EIO; LOGGC_UNLOCK(log); return rc; } /* * NAME: lmGCwrite() * * FUNCTION: group commit write * initiate write of log page, building a group of all transactions * with commit records on that page. * * RETURN: None * * NOTE: * LOGGC_LOCK must be held by caller. * N.B. LOG_LOCK is NOT held during lmGroupCommit(). */ static void lmGCwrite(struct jfs_log * log, int cant_write) { struct lbuf *bp; struct logpage *lp; int gcpn; /* group commit page number */ struct tblock *tblk; struct tblock *xtblk = NULL; /* * build the commit group of a log page * * scan commit queue and make a commit group of all * transactions with COMMIT records on the same log page. */ /* get the head tblk on the commit queue */ gcpn = list_entry(log->cqueue.next, struct tblock, cqueue)->pn; list_for_each_entry(tblk, &log->cqueue, cqueue) { if (tblk->pn != gcpn) break; xtblk = tblk; /* state transition: (QUEUE, READY) -> COMMIT */ tblk->flag |= tblkGC_COMMIT; } tblk = xtblk; /* last tblk of the page */ /* * pageout to commit transactions on the log page. */ bp = (struct lbuf *) tblk->bp; lp = (struct logpage *) bp->l_ldata; /* is page already full ? */ if (tblk->flag & tblkGC_EOP) { /* mark page to free at end of group commit of the page */ tblk->flag &= ~tblkGC_EOP; tblk->flag |= tblkGC_FREE; bp->l_ceor = bp->l_eor; lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_ceor); lbmWrite(log, bp, lbmWRITE | lbmRELEASE | lbmGC, cant_write); INCREMENT(lmStat.full_page); } /* page is not yet full */ else { bp->l_ceor = tblk->eor; /* ? bp->l_ceor = bp->l_eor; */ lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_ceor); lbmWrite(log, bp, lbmWRITE | lbmGC, cant_write); INCREMENT(lmStat.partial_page); } } /* * NAME: lmPostGC() * * FUNCTION: group commit post-processing * Processes transactions after their commit records have been written * to disk, redriving log I/O if necessary. * * RETURN: None * * NOTE: * This routine is called a interrupt time by lbmIODone */ static void lmPostGC(struct lbuf * bp) { unsigned long flags; struct jfs_log *log = bp->l_log; struct logpage *lp; struct tblock *tblk, *temp; //LOGGC_LOCK(log); spin_lock_irqsave(&log->gclock, flags); /* * current pageout of group commit completed. * * remove/wakeup transactions from commit queue who were * group committed with the current log page */ list_for_each_entry_safe(tblk, temp, &log->cqueue, cqueue) { if (!(tblk->flag & tblkGC_COMMIT)) break; /* if transaction was marked GC_COMMIT then * it has been shipped in the current pageout * and made it to disk - it is committed. */ if (bp->l_flag & lbmERROR) tblk->flag |= tblkGC_ERROR; /* remove it from the commit queue */ list_del(&tblk->cqueue); tblk->flag &= ~tblkGC_QUEUE; if (tblk == log->flush_tblk) { /* we can stop flushing the log now */ clear_bit(log_FLUSH, &log->flag); log->flush_tblk = NULL; } jfs_info("lmPostGC: tblk = 0x%p, flag = 0x%x", tblk, tblk->flag); if (!(tblk->xflag & COMMIT_FORCE)) /* * Hand tblk over to lazy commit thread */ txLazyUnlock(tblk); else { /* state transition: COMMIT -> COMMITTED */ tblk->flag |= tblkGC_COMMITTED; if (tblk->flag & tblkGC_READY) log->gcrtc--; LOGGC_WAKEUP(tblk); } /* was page full before pageout ? * (and this is the last tblk bound with the page) */ if (tblk->flag & tblkGC_FREE) lbmFree(bp); /* did page become full after pageout ? * (and this is the last tblk bound with the page) */ else if (tblk->flag & tblkGC_EOP) { /* finalize the page */ lp = (struct logpage *) bp->l_ldata; bp->l_ceor = bp->l_eor; lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_eor); jfs_info("lmPostGC: calling lbmWrite"); lbmWrite(log, bp, lbmWRITE | lbmRELEASE | lbmFREE, 1); } } /* are there any transactions who have entered lnGroupCommit() * (whose COMMITs are after that of the last log page written. * They are waiting for new group commit (above at (SLEEP 1)) * or lazy transactions are on a full (queued) log page, * select the latest ready transaction as new group leader and * wake her up to lead her group. */ if ((!list_empty(&log->cqueue)) && ((log->gcrtc > 0) || (tblk->bp->l_wqnext != NULL) || test_bit(log_FLUSH, &log->flag) || jfs_tlocks_low)) /* * Call lmGCwrite with new group leader */ lmGCwrite(log, 1); /* no transaction are ready yet (transactions are only just * queued (GC_QUEUE) and not entered for group commit yet). * the first transaction entering group commit * will elect herself as new group leader. */ else log->cflag &= ~logGC_PAGEOUT; //LOGGC_UNLOCK(log); spin_unlock_irqrestore(&log->gclock, flags); return; } /* * NAME: lmLogSync() * * FUNCTION: write log SYNCPT record for specified log * if new sync address is available * (normally the case if sync() is executed by back-ground * process). * calculate new value of i_nextsync which determines when * this code is called again. * * PARAMETERS: log - log structure * hard_sync - 1 to force all metadata to be written * * RETURN: 0 * * serialization: LOG_LOCK() held on entry/exit */ static int lmLogSync(struct jfs_log * log, int hard_sync) { int logsize; int written; /* written since last syncpt */ int free; /* free space left available */ int delta; /* additional delta to write normally */ int more; /* additional write granted */ struct lrd lrd; int lsn; struct logsyncblk *lp; unsigned long flags; /* push dirty metapages out to disk */ if (hard_sync) write_special_inodes(log, filemap_fdatawrite); else write_special_inodes(log, filemap_flush); /* * forward syncpt */ /* if last sync is same as last syncpt, * invoke sync point forward processing to update sync. */ if (log->sync == log->syncpt) { LOGSYNC_LOCK(log, flags); if (list_empty(&log->synclist)) log->sync = log->lsn; else { lp = list_entry(log->synclist.next, struct logsyncblk, synclist); log->sync = lp->lsn; } LOGSYNC_UNLOCK(log, flags); } /* if sync is different from last syncpt, * write a SYNCPT record with syncpt = sync. * reset syncpt = sync */ if (log->sync != log->syncpt) { lrd.logtid = 0; lrd.backchain = 0; lrd.type = cpu_to_le16(LOG_SYNCPT); lrd.length = 0; lrd.log.syncpt.sync = cpu_to_le32(log->sync); lsn = lmWriteRecord(log, NULL, &lrd, NULL); log->syncpt = log->sync; } else lsn = log->lsn; /* * setup next syncpt trigger (SWAG) */ logsize = log->logsize; logdiff(written, lsn, log); free = logsize - written; delta = LOGSYNC_DELTA(logsize); more = min(free / 2, delta); if (more < 2 * LOGPSIZE) { jfs_warn("\n ... Log Wrap ... Log Wrap ... Log Wrap ...\n"); /* * log wrapping * * option 1 - panic ? No.! * option 2 - shutdown file systems * associated with log ? * option 3 - extend log ? */ /* * option 4 - second chance * * mark log wrapped, and continue. * when all active transactions are completed, * mark log vaild for recovery. * if crashed during invalid state, log state * implies invald log, forcing fsck(). */ /* mark log state log wrap in log superblock */ /* log->state = LOGWRAP; */ /* reset sync point computation */ log->syncpt = log->sync = lsn; log->nextsync = delta; } else /* next syncpt trigger = written + more */ log->nextsync = written + more; /* if number of bytes written from last sync point is more * than 1/4 of the log size, stop new transactions from * starting until all current transactions are completed * by setting syncbarrier flag. */ if (!test_bit(log_SYNCBARRIER, &log->flag) && (written > LOGSYNC_BARRIER(logsize)) && log->active) { set_bit(log_SYNCBARRIER, &log->flag); jfs_info("log barrier on: lsn=0x%x syncpt=0x%x", lsn, log->syncpt); /* * We may have to initiate group commit */ jfs_flush_journal(log, 0); } return lsn; } /* * NAME: jfs_syncpt * * FUNCTION: write log SYNCPT record for specified log * * PARAMETERS: log - log structure * hard_sync - set to 1 to force metadata to be written */ void jfs_syncpt(struct jfs_log *log, int hard_sync) { LOG_LOCK(log); lmLogSync(log, hard_sync); LOG_UNLOCK(log); } /* * NAME: lmLogOpen() * * FUNCTION: open the log on first open; * insert filesystem in the active list of the log. * * PARAMETER: ipmnt - file system mount inode * iplog - log inode (out) * * RETURN: * * serialization: */ int lmLogOpen(struct super_block *sb) { int rc; struct block_device *bdev; struct jfs_log *log; struct jfs_sb_info *sbi = JFS_SBI(sb); if (sbi->flag & JFS_NOINTEGRITY) return open_dummy_log(sb); if (sbi->mntflag & JFS_INLINELOG) return open_inline_log(sb); mutex_lock(&jfs_log_mutex); list_for_each_entry(log, &jfs_external_logs, journal_list) { if (log->bdev->bd_dev == sbi->logdev) { if (memcmp(log->uuid, sbi->loguuid, sizeof(log->uuid))) { jfs_warn("wrong uuid on JFS journal\n"); mutex_unlock(&jfs_log_mutex); return -EINVAL; } /* * add file system to log active file system list */ if ((rc = lmLogFileSystem(log, sbi, 1))) { mutex_unlock(&jfs_log_mutex); return rc; } goto journal_found; } } if (!(log = kzalloc(sizeof(struct jfs_log), GFP_KERNEL))) { mutex_unlock(&jfs_log_mutex); return -ENOMEM; } INIT_LIST_HEAD(&log->sb_list); init_waitqueue_head(&log->syncwait); /* * external log as separate logical volume * * file systems to log may have n-to-1 relationship; */ bdev = open_by_devnum(sbi->logdev, FMODE_READ|FMODE_WRITE); if (IS_ERR(bdev)) { rc = -PTR_ERR(bdev); goto free; } if ((rc = bd_claim(bdev, log))) { goto close; } log->bdev = bdev; memcpy(log->uuid, sbi->loguuid, sizeof(log->uuid)); /* * initialize log: */ if ((rc = lmLogInit(log))) goto unclaim; list_add(&log->journal_list, &jfs_external_logs); /* * add file system to log active file system list */ if ((rc = lmLogFileSystem(log, sbi, 1))) goto shutdown; journal_found: LOG_LOCK(log); list_add(&sbi->log_list, &log->sb_list); sbi->log = log; LOG_UNLOCK(log); mutex_unlock(&jfs_log_mutex); return 0; /* * unwind on error */ shutdown: /* unwind lbmLogInit() */ list_del(&log->journal_list); lbmLogShutdown(log); unclaim: bd_release(bdev); close: /* close external log device */ blkdev_put(bdev, FMODE_READ|FMODE_WRITE); free: /* free log descriptor */ mutex_unlock(&jfs_log_mutex); kfree(log); jfs_warn("lmLogOpen: exit(%d)", rc); return rc; } static int open_inline_log(struct super_block *sb) { struct jfs_log *log; int rc; if (!(log = kzalloc(sizeof(struct jfs_log), GFP_KERNEL))) return -ENOMEM; INIT_LIST_HEAD(&log->sb_list); init_waitqueue_head(&log->syncwait); set_bit(log_INLINELOG, &log->flag); log->bdev = sb->s_bdev; log->base = addressPXD(&JFS_SBI(sb)->logpxd); log->size = lengthPXD(&JFS_SBI(sb)->logpxd) >> (L2LOGPSIZE - sb->s_blocksize_bits); log->l2bsize = sb->s_blocksize_bits; ASSERT(L2LOGPSIZE >= sb->s_blocksize_bits); /* * initialize log. */ if ((rc = lmLogInit(log))) { kfree(log); jfs_warn("lmLogOpen: exit(%d)", rc); return rc; } list_add(&JFS_SBI(sb)->log_list, &log->sb_list); JFS_SBI(sb)->log = log; return rc; } static int open_dummy_log(struct super_block *sb) { int rc; mutex_lock(&jfs_log_mutex); if (!dummy_log) { dummy_log = kzalloc(sizeof(struct jfs_log), GFP_KERNEL); if (!dummy_log) { mutex_unlock(&jfs_log_mutex); return -ENOMEM; } INIT_LIST_HEAD(&dummy_log->sb_list); init_waitqueue_head(&dummy_log->syncwait); dummy_log->no_integrity = 1; /* Make up some stuff */ dummy_log->base = 0; dummy_log->size = 1024; rc = lmLogInit(dummy_log); if (rc) { kfree(dummy_log); dummy_log = NULL; mutex_unlock(&jfs_log_mutex); return rc; } } LOG_LOCK(dummy_log); list_add(&JFS_SBI(sb)->log_list, &dummy_log->sb_list); JFS_SBI(sb)->log = dummy_log; LOG_UNLOCK(dummy_log); mutex_unlock(&jfs_log_mutex); return 0; } /* * NAME: lmLogInit() * * FUNCTION: log initialization at first log open. * * logredo() (or logformat()) should have been run previously. * initialize the log from log superblock. * set the log state in the superblock to LOGMOUNT and * write SYNCPT log record. * * PARAMETER: log - log structure * * RETURN: 0 - if ok * -EINVAL - bad log magic number or superblock dirty * error returned from logwait() * * serialization: single first open thread */ int lmLogInit(struct jfs_log * log) { int rc = 0; struct lrd lrd; struct logsuper *logsuper; struct lbuf *bpsuper; struct lbuf *bp; struct logpage *lp; int lsn = 0; jfs_info("lmLogInit: log:0x%p", log); /* initialize the group commit serialization lock */ LOGGC_LOCK_INIT(log); /* allocate/initialize the log write serialization lock */ LOG_LOCK_INIT(log); LOGSYNC_LOCK_INIT(log); INIT_LIST_HEAD(&log->synclist); INIT_LIST_HEAD(&log->cqueue); log->flush_tblk = NULL; log->count = 0; /* * initialize log i/o */ if ((rc = lbmLogInit(log))) return rc; if (!test_bit(log_INLINELOG, &log->flag)) log->l2bsize = L2LOGPSIZE; /* check for disabled journaling to disk */ if (log->no_integrity) { /* * Journal pages will still be filled. When the time comes * to actually do the I/O, the write is not done, and the * endio routine is called directly. */ bp = lbmAllocate(log , 0); log->bp = bp; bp->l_pn = bp->l_eor = 0; } else { /* * validate log superblock */ if ((rc = lbmRead(log, 1, &bpsuper))) goto errout10; logsuper = (struct logsuper *) bpsuper->l_ldata; if (logsuper->magic != cpu_to_le32(LOGMAGIC)) { jfs_warn("*** Log Format Error ! ***"); rc = -EINVAL; goto errout20; } /* logredo() should have been run successfully. */ if (logsuper->state != cpu_to_le32(LOGREDONE)) { jfs_warn("*** Log Is Dirty ! ***"); rc = -EINVAL; goto errout20; } /* initialize log from log superblock */ if (test_bit(log_INLINELOG,&log->flag)) { if (log->size != le32_to_cpu(logsuper->size)) { rc = -EINVAL; goto errout20; } jfs_info("lmLogInit: inline log:0x%p base:0x%Lx " "size:0x%x", log, (unsigned long long) log->base, log->size); } else { if (memcmp(logsuper->uuid, log->uuid, 16)) { jfs_warn("wrong uuid on JFS log device"); goto errout20; } log->size = le32_to_cpu(logsuper->size); log->l2bsize = le32_to_cpu(logsuper->l2bsize); jfs_info("lmLogInit: external log:0x%p base:0x%Lx " "size:0x%x", log, (unsigned long long) log->base, log->size); } log->page = le32_to_cpu(logsuper->end) / LOGPSIZE; log->eor = le32_to_cpu(logsuper->end) - (LOGPSIZE * log->page); /* * initialize for log append write mode */ /* establish current/end-of-log page/buffer */ if ((rc = lbmRead(log, log->page, &bp))) goto errout20; lp = (struct logpage *) bp->l_ldata; jfs_info("lmLogInit: lsn:0x%x page:%d eor:%d:%d", le32_to_cpu(logsuper->end), log->page, log->eor, le16_to_cpu(lp->h.eor)); log->bp = bp; bp->l_pn = log->page; bp->l_eor = log->eor; /* if current page is full, move on to next page */ if (log->eor >= LOGPSIZE - LOGPTLRSIZE) lmNextPage(log); /* * initialize log syncpoint */ /* * write the first SYNCPT record with syncpoint = 0 * (i.e., log redo up to HERE !); * remove current page from lbm write queue at end of pageout * (to write log superblock update), but do not release to * freelist; */ lrd.logtid = 0; lrd.backchain = 0; lrd.type = cpu_to_le16(LOG_SYNCPT); lrd.length = 0; lrd.log.syncpt.sync = 0; lsn = lmWriteRecord(log, NULL, &lrd, NULL); bp = log->bp; bp->l_ceor = bp->l_eor; lp = (struct logpage *) bp->l_ldata; lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_eor); lbmWrite(log, bp, lbmWRITE | lbmSYNC, 0); if ((rc = lbmIOWait(bp, 0))) goto errout30; /* * update/write superblock */ logsuper->state = cpu_to_le32(LOGMOUNT); log->serial = le32_to_cpu(logsuper->serial) + 1; logsuper->serial = cpu_to_le32(log->serial); lbmDirectWrite(log, bpsuper, lbmWRITE | lbmRELEASE | lbmSYNC); if ((rc = lbmIOWait(bpsuper, lbmFREE))) goto errout30; } /* initialize logsync parameters */ log->logsize = (log->size - 2) << L2LOGPSIZE; log->lsn = lsn; log->syncpt = lsn; log->sync = log->syncpt; log->nextsync = LOGSYNC_DELTA(log->logsize); jfs_info("lmLogInit: lsn:0x%x syncpt:0x%x sync:0x%x", log->lsn, log->syncpt, log->sync); /* * initialize for lazy/group commit */ log->clsn = lsn; return 0; /* * unwind on error */ errout30: /* release log page */ log->wqueue = NULL; bp->l_wqnext = NULL; lbmFree(bp); errout20: /* release log superblock */ lbmFree(bpsuper); errout10: /* unwind lbmLogInit() */ lbmLogShutdown(log); jfs_warn("lmLogInit: exit(%d)", rc); return rc; } /* * NAME: lmLogClose() * * FUNCTION: remove file system <ipmnt> from active list of log <iplog> * and close it on last close. * * PARAMETER: sb - superblock * * RETURN: errors from subroutines * * serialization: */ int lmLogClose(struct super_block *sb) { struct jfs_sb_info *sbi = JFS_SBI(sb); struct jfs_log *log = sbi->log; struct block_device *bdev; int rc = 0; jfs_info("lmLogClose: log:0x%p", log); mutex_lock(&jfs_log_mutex); LOG_LOCK(log); list_del(&sbi->log_list); LOG_UNLOCK(log); sbi->log = NULL; /* * We need to make sure all of the "written" metapages * actually make it to disk */ sync_blockdev(sb->s_bdev); if (test_bit(log_INLINELOG, &log->flag)) { /* * in-line log in host file system */ rc = lmLogShutdown(log); kfree(log); goto out; } if (!log->no_integrity) lmLogFileSystem(log, sbi, 0); if (!list_empty(&log->sb_list)) goto out; /* * TODO: ensure that the dummy_log is in a state to allow * lbmLogShutdown to deallocate all the buffers and call * kfree against dummy_log. For now, leave dummy_log & its * buffers in memory, and resuse if another no-integrity mount * is requested. */ if (log->no_integrity) goto out; /* * external log as separate logical volume */ list_del(&log->journal_list); bdev = log->bdev; rc = lmLogShutdown(log); bd_release(bdev); blkdev_put(bdev, FMODE_READ|FMODE_WRITE); kfree(log); out: mutex_unlock(&jfs_log_mutex); jfs_info("lmLogClose: exit(%d)", rc); return rc; } /* * NAME: jfs_flush_journal() * * FUNCTION: initiate write of any outstanding transactions to the journal * and optionally wait until they are all written to disk * * wait == 0 flush until latest txn is committed, don't wait * wait == 1 flush until latest txn is committed, wait * wait > 1 flush until all txn's are complete, wait */ void jfs_flush_journal(struct jfs_log *log, int wait) { int i; struct tblock *target = NULL; /* jfs_write_inode may call us during read-only mount */ if (!log) return; jfs_info("jfs_flush_journal: log:0x%p wait=%d", log, wait); LOGGC_LOCK(log); if (!list_empty(&log->cqueue)) { /* * This ensures that we will keep writing to the journal as long * as there are unwritten commit records */ target = list_entry(log->cqueue.prev, struct tblock, cqueue); if (test_bit(log_FLUSH, &log->flag)) { /* * We're already flushing. * if flush_tblk is NULL, we are flushing everything, * so leave it that way. Otherwise, update it to the * latest transaction */ if (log->flush_tblk) log->flush_tblk = target; } else { /* Only flush until latest transaction is committed */ log->flush_tblk = target; set_bit(log_FLUSH, &log->flag); /* * Initiate I/O on outstanding transactions */ if (!(log->cflag & logGC_PAGEOUT)) { log->cflag |= logGC_PAGEOUT; lmGCwrite(log, 0); } } } if ((wait > 1) || test_bit(log_SYNCBARRIER, &log->flag)) { /* Flush until all activity complete */ set_bit(log_FLUSH, &log->flag); log->flush_tblk = NULL; } if (wait && target && !(target->flag & tblkGC_COMMITTED)) { DECLARE_WAITQUEUE(__wait, current); add_wait_queue(&target->gcwait, &__wait); set_current_state(TASK_UNINTERRUPTIBLE); LOGGC_UNLOCK(log); schedule(); __set_current_state(TASK_RUNNING); LOGGC_LOCK(log); remove_wait_queue(&target->gcwait, &__wait); } LOGGC_UNLOCK(log); if (wait < 2) return; write_special_inodes(log, filemap_fdatawrite); /* * If there was recent activity, we may need to wait * for the lazycommit thread to catch up */ if ((!list_empty(&log->cqueue)) || !list_empty(&log->synclist)) { for (i = 0; i < 200; i++) { /* Too much? */ msleep(250); write_special_inodes(log, filemap_fdatawrite); if (list_empty(&log->cqueue) && list_empty(&log->synclist)) break; } } assert(list_empty(&log->cqueue)); #ifdef CONFIG_JFS_DEBUG if (!list_empty(&log->synclist)) { struct logsyncblk *lp; printk(KERN_ERR "jfs_flush_journal: synclist not empty\n"); list_for_each_entry(lp, &log->synclist, synclist) { if (lp->xflag & COMMIT_PAGE) { struct metapage *mp = (struct metapage *)lp; print_hex_dump(KERN_ERR, "metapage: ", DUMP_PREFIX_ADDRESS, 16, 4, mp, sizeof(struct metapage), 0); print_hex_dump(KERN_ERR, "page: ", DUMP_PREFIX_ADDRESS, 16, sizeof(long), mp->page, sizeof(struct page), 0); } else print_hex_dump(KERN_ERR, "tblock:", DUMP_PREFIX_ADDRESS, 16, 4, lp, sizeof(struct tblock), 0); } } #else WARN_ON(!list_empty(&log->synclist)); #endif clear_bit(log_FLUSH, &log->flag); } /* * NAME: lmLogShutdown() * * FUNCTION: log shutdown at last LogClose(). * * write log syncpt record. * update super block to set redone flag to 0. * * PARAMETER: log - log inode * * RETURN: 0 - success * * serialization: single last close thread */ int lmLogShutdown(struct jfs_log * log) { int rc; struct lrd lrd; int lsn; struct logsuper *logsuper; struct lbuf *bpsuper; struct lbuf *bp; struct logpage *lp; jfs_info("lmLogShutdown: log:0x%p", log); jfs_flush_journal(log, 2); /* * write the last SYNCPT record with syncpoint = 0 * (i.e., log redo up to HERE !) */ lrd.logtid = 0; lrd.backchain = 0; lrd.type = cpu_to_le16(LOG_SYNCPT); lrd.length = 0; lrd.log.syncpt.sync = 0; lsn = lmWriteRecord(log, NULL, &lrd, NULL); bp = log->bp; lp = (struct logpage *) bp->l_ldata; lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_eor); lbmWrite(log, log->bp, lbmWRITE | lbmRELEASE | lbmSYNC, 0); lbmIOWait(log->bp, lbmFREE); log->bp = NULL; /* * synchronous update log superblock * mark log state as shutdown cleanly * (i.e., Log does not need to be replayed). */ if ((rc = lbmRead(log, 1, &bpsuper))) goto out; logsuper = (struct logsuper *) bpsuper->l_ldata; logsuper->state = cpu_to_le32(LOGREDONE); logsuper->end = cpu_to_le32(lsn); lbmDirectWrite(log, bpsuper, lbmWRITE | lbmRELEASE | lbmSYNC); rc = lbmIOWait(bpsuper, lbmFREE); jfs_info("lmLogShutdown: lsn:0x%x page:%d eor:%d", lsn, log->page, log->eor); out: /* * shutdown per log i/o */ lbmLogShutdown(log); if (rc) { jfs_warn("lmLogShutdown: exit(%d)", rc); } return rc; } /* * NAME: lmLogFileSystem() * * FUNCTION: insert (<activate> = true)/remove (<activate> = false) * file system into/from log active file system list. * * PARAMETE: log - pointer to logs inode. * fsdev - kdev_t of filesystem. * serial - pointer to returned log serial number * activate - insert/remove device from active list. * * RETURN: 0 - success * errors returned by vms_iowait(). */ static int lmLogFileSystem(struct jfs_log * log, struct jfs_sb_info *sbi, int activate) { int rc = 0; int i; struct logsuper *logsuper; struct lbuf *bpsuper; char *uuid = sbi->uuid; /* * insert/remove file system device to log active file system list. */ if ((rc = lbmRead(log, 1, &bpsuper))) return rc; logsuper = (struct logsuper *) bpsuper->l_ldata; if (activate) { for (i = 0; i < MAX_ACTIVE; i++) if (!memcmp(logsuper->active[i].uuid, NULL_UUID, 16)) { memcpy(logsuper->active[i].uuid, uuid, 16); sbi->aggregate = i; break; } if (i == MAX_ACTIVE) { jfs_warn("Too many file systems sharing journal!"); lbmFree(bpsuper); return -EMFILE; /* Is there a better rc? */ } } else { for (i = 0; i < MAX_ACTIVE; i++) if (!memcmp(logsuper->active[i].uuid, uuid, 16)) { memcpy(logsuper->active[i].uuid, NULL_UUID, 16); break; } if (i == MAX_ACTIVE) { jfs_warn("Somebody stomped on the journal!"); lbmFree(bpsuper); return -EIO; } } /* * synchronous write log superblock: * * write sidestream bypassing write queue: * at file system mount, log super block is updated for * activation of the file system before any log record * (MOUNT record) of the file system, and at file system * unmount, all meta data for the file system has been * flushed before log super block is updated for deactivation * of the file system. */ lbmDirectWrite(log, bpsuper, lbmWRITE | lbmRELEASE | lbmSYNC); rc = lbmIOWait(bpsuper, lbmFREE); return rc; } /* * log buffer manager (lbm) * ------------------------ * * special purpose buffer manager supporting log i/o requirements. * * per log write queue: * log pageout occurs in serial order by fifo write queue and * restricting to a single i/o in pregress at any one time. * a circular singly-linked list * (log->wrqueue points to the tail, and buffers are linked via * bp->wrqueue field), and * maintains log page in pageout ot waiting for pageout in serial pageout. */ /* * lbmLogInit() * * initialize per log I/O setup at lmLogInit() */ static int lbmLogInit(struct jfs_log * log) { /* log inode */ int i; struct lbuf *lbuf; jfs_info("lbmLogInit: log:0x%p", log); /* initialize current buffer cursor */ log->bp = NULL; /* initialize log device write queue */ log->wqueue = NULL; /* * Each log has its own buffer pages allocated to it. These are * not managed by the page cache. This ensures that a transaction * writing to the log does not block trying to allocate a page from * the page cache (for the log). This would be bad, since page * allocation waits on the kswapd thread that may be committing inodes * which would cause log activity. Was that clear? I'm trying to * avoid deadlock here. */ init_waitqueue_head(&log->free_wait); log->lbuf_free = NULL; for (i = 0; i < LOGPAGES;) { char *buffer; uint offset; struct page *page; buffer = (char *) get_zeroed_page(GFP_KERNEL); if (buffer == NULL) goto error; page = virt_to_page(buffer); for (offset = 0; offset < PAGE_SIZE; offset += LOGPSIZE) { lbuf = kmalloc(sizeof(struct lbuf), GFP_KERNEL); if (lbuf == NULL) { if (offset == 0) free_page((unsigned long) buffer); goto error; } if (offset) /* we already have one reference */ get_page(page); lbuf->l_offset = offset; lbuf->l_ldata = buffer + offset; lbuf->l_page = page; lbuf->l_log = log; init_waitqueue_head(&lbuf->l_ioevent); lbuf->l_freelist = log->lbuf_free; log->lbuf_free = lbuf; i++; } } return (0); error: lbmLogShutdown(log); return -ENOMEM; } /* * lbmLogShutdown() * * finalize per log I/O setup at lmLogShutdown() */ static void lbmLogShutdown(struct jfs_log * log) { struct lbuf *lbuf; jfs_info("lbmLogShutdown: log:0x%p", log); lbuf = log->lbuf_free; while (lbuf) { struct lbuf *next = lbuf->l_freelist; __free_page(lbuf->l_page); kfree(lbuf); lbuf = next; } } /* * lbmAllocate() * * allocate an empty log buffer */ static struct lbuf *lbmAllocate(struct jfs_log * log, int pn) { struct lbuf *bp; unsigned long flags; /* * recycle from log buffer freelist if any */ LCACHE_LOCK(flags); LCACHE_SLEEP_COND(log->free_wait, (bp = log->lbuf_free), flags); log->lbuf_free = bp->l_freelist; LCACHE_UNLOCK(flags); bp->l_flag = 0; bp->l_wqnext = NULL; bp->l_freelist = NULL; bp->l_pn = pn; bp->l_blkno = log->base + (pn << (L2LOGPSIZE - log->l2bsize)); bp->l_ceor = 0; return bp; } /* * lbmFree() * * release a log buffer to freelist */ static void lbmFree(struct lbuf * bp) { unsigned long flags; LCACHE_LOCK(flags); lbmfree(bp); LCACHE_UNLOCK(flags); } static void lbmfree(struct lbuf * bp) { struct jfs_log *log = bp->l_log; assert(bp->l_wqnext == NULL); /* * return the buffer to head of freelist */ bp->l_freelist = log->lbuf_free; log->lbuf_free = bp; wake_up(&log->free_wait); return; } /* * NAME: lbmRedrive * * FUNCTION: add a log buffer to the log redrive list * * PARAMETER: * bp - log buffer * * NOTES: * Takes log_redrive_lock. */ static inline void lbmRedrive(struct lbuf *bp) { unsigned long flags; spin_lock_irqsave(&log_redrive_lock, flags); bp->l_redrive_next = log_redrive_list; log_redrive_list = bp; spin_unlock_irqrestore(&log_redrive_lock, flags); wake_up_process(jfsIOthread); } /* * lbmRead() */ static int lbmRead(struct jfs_log * log, int pn, struct lbuf ** bpp) { struct bio *bio; struct lbuf *bp; /* * allocate a log buffer */ *bpp = bp = lbmAllocate(log, pn); jfs_info("lbmRead: bp:0x%p pn:0x%x", bp, pn); bp->l_flag |= lbmREAD; bio = bio_alloc(GFP_NOFS, 1); bio->bi_sector = bp->l_blkno << (log->l2bsize - 9); bio->bi_bdev = log->bdev; bio->bi_io_vec[0].bv_page = bp->l_page; bio->bi_io_vec[0].bv_len = LOGPSIZE; bio->bi_io_vec[0].bv_offset = bp->l_offset; bio->bi_vcnt = 1; bio->bi_idx = 0; bio->bi_size = LOGPSIZE; bio->bi_end_io = lbmIODone; bio->bi_private = bp; submit_bio(READ_SYNC, bio); wait_event(bp->l_ioevent, (bp->l_flag != lbmREAD)); return 0; } /* * lbmWrite() * * buffer at head of pageout queue stays after completion of * partial-page pageout and redriven by explicit initiation of * pageout by caller until full-page pageout is completed and * released. * * device driver i/o done redrives pageout of new buffer at * head of pageout queue when current buffer at head of pageout * queue is released at the completion of its full-page pageout. * * LOGGC_LOCK() serializes lbmWrite() by lmNextPage() and lmGroupCommit(). * LCACHE_LOCK() serializes xflag between lbmWrite() and lbmIODone() */ static void lbmWrite(struct jfs_log * log, struct lbuf * bp, int flag, int cant_block) { struct lbuf *tail; unsigned long flags; jfs_info("lbmWrite: bp:0x%p flag:0x%x pn:0x%x", bp, flag, bp->l_pn); /* map the logical block address to physical block address */ bp->l_blkno = log->base + (bp->l_pn << (L2LOGPSIZE - log->l2bsize)); LCACHE_LOCK(flags); /* disable+lock */ /* * initialize buffer for device driver */ bp->l_flag = flag; /* * insert bp at tail of write queue associated with log * * (request is either for bp already/currently at head of queue * or new bp to be inserted at tail) */ tail = log->wqueue; /* is buffer not already on write queue ? */ if (bp->l_wqnext == NULL) { /* insert at tail of wqueue */ if (tail == NULL) { log->wqueue = bp; bp->l_wqnext = bp; } else { log->wqueue = bp; bp->l_wqnext = tail->l_wqnext; tail->l_wqnext = bp; } tail = bp; } /* is buffer at head of wqueue and for write ? */ if ((bp != tail->l_wqnext) || !(flag & lbmWRITE)) { LCACHE_UNLOCK(flags); /* unlock+enable */ return; } LCACHE_UNLOCK(flags); /* unlock+enable */ if (cant_block) lbmRedrive(bp); else if (flag & lbmSYNC) lbmStartIO(bp); else { LOGGC_UNLOCK(log); lbmStartIO(bp); LOGGC_LOCK(log); } } /* * lbmDirectWrite() * * initiate pageout bypassing write queue for sidestream * (e.g., log superblock) write; */ static void lbmDirectWrite(struct jfs_log * log, struct lbuf * bp, int flag) { jfs_info("lbmDirectWrite: bp:0x%p flag:0x%x pn:0x%x", bp, flag, bp->l_pn); /* * initialize buffer for device driver */ bp->l_flag = flag | lbmDIRECT; /* map the logical block address to physical block address */ bp->l_blkno = log->base + (bp->l_pn << (L2LOGPSIZE - log->l2bsize)); /* * initiate pageout of the page */ lbmStartIO(bp); } /* * NAME: lbmStartIO() * * FUNCTION: Interface to DD strategy routine * * RETURN: none * * serialization: LCACHE_LOCK() is NOT held during log i/o; */ static void lbmStartIO(struct lbuf * bp) { struct bio *bio; struct jfs_log *log = bp->l_log; jfs_info("lbmStartIO\n"); bio = bio_alloc(GFP_NOFS, 1); bio->bi_sector = bp->l_blkno << (log->l2bsize - 9); bio->bi_bdev = log->bdev; bio->bi_io_vec[0].bv_page = bp->l_page; bio->bi_io_vec[0].bv_len = LOGPSIZE; bio->bi_io_vec[0].bv_offset = bp->l_offset; bio->bi_vcnt = 1; bio->bi_idx = 0; bio->bi_size = LOGPSIZE; bio->bi_end_io = lbmIODone; bio->bi_private = bp; /* check if journaling to disk has been disabled */ if (log->no_integrity) { bio->bi_size = 0; lbmIODone(bio, 0); } else { submit_bio(WRITE_SYNC, bio); INCREMENT(lmStat.submitted); } } /* * lbmIOWait() */ static int lbmIOWait(struct lbuf * bp, int flag) { unsigned long flags; int rc = 0; jfs_info("lbmIOWait1: bp:0x%p flag:0x%x:0x%x", bp, bp->l_flag, flag); LCACHE_LOCK(flags); /* disable+lock */ LCACHE_SLEEP_COND(bp->l_ioevent, (bp->l_flag & lbmDONE), flags); rc = (bp->l_flag & lbmERROR) ? -EIO : 0; if (flag & lbmFREE) lbmfree(bp); LCACHE_UNLOCK(flags); /* unlock+enable */ jfs_info("lbmIOWait2: bp:0x%p flag:0x%x:0x%x", bp, bp->l_flag, flag); return rc; } /* * lbmIODone() * * executed at INTIODONE level */ static void lbmIODone(struct bio *bio, int error) { struct lbuf *bp = bio->bi_private; struct lbuf *nextbp, *tail; struct jfs_log *log; unsigned long flags; /* * get back jfs buffer bound to the i/o buffer */ jfs_info("lbmIODone: bp:0x%p flag:0x%x", bp, bp->l_flag); LCACHE_LOCK(flags); /* disable+lock */ bp->l_flag |= lbmDONE; if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) { bp->l_flag |= lbmERROR; jfs_err("lbmIODone: I/O error in JFS log"); } bio_put(bio); /* * pagein completion */ if (bp->l_flag & lbmREAD) { bp->l_flag &= ~lbmREAD; LCACHE_UNLOCK(flags); /* unlock+enable */ /* wakeup I/O initiator */ LCACHE_WAKEUP(&bp->l_ioevent); return; } /* * pageout completion * * the bp at the head of write queue has completed pageout. * * if single-commit/full-page pageout, remove the current buffer * from head of pageout queue, and redrive pageout with * the new buffer at head of pageout queue; * otherwise, the partial-page pageout buffer stays at * the head of pageout queue to be redriven for pageout * by lmGroupCommit() until full-page pageout is completed. */ bp->l_flag &= ~lbmWRITE; INCREMENT(lmStat.pagedone); /* update committed lsn */ log = bp->l_log; log->clsn = (bp->l_pn << L2LOGPSIZE) + bp->l_ceor; if (bp->l_flag & lbmDIRECT) { LCACHE_WAKEUP(&bp->l_ioevent); LCACHE_UNLOCK(flags); return; } tail = log->wqueue; /* single element queue */ if (bp == tail) { /* remove head buffer of full-page pageout * from log device write queue */ if (bp->l_flag & lbmRELEASE) { log->wqueue = NULL; bp->l_wqnext = NULL; } } /* multi element queue */ else { /* remove head buffer of full-page pageout * from log device write queue */ if (bp->l_flag & lbmRELEASE) { nextbp = tail->l_wqnext = bp->l_wqnext; bp->l_wqnext = NULL; /* * redrive pageout of next page at head of write queue: * redrive next page without any bound tblk * (i.e., page w/o any COMMIT records), or * first page of new group commit which has been * queued after current page (subsequent pageout * is performed synchronously, except page without * any COMMITs) by lmGroupCommit() as indicated * by lbmWRITE flag; */ if (nextbp->l_flag & lbmWRITE) { /* * We can't do the I/O at interrupt time. * The jfsIO thread can do it */ lbmRedrive(nextbp); } } } /* * synchronous pageout: * * buffer has not necessarily been removed from write queue * (e.g., synchronous write of partial-page with COMMIT): * leave buffer for i/o initiator to dispose */ if (bp->l_flag & lbmSYNC) { LCACHE_UNLOCK(flags); /* unlock+enable */ /* wakeup I/O initiator */ LCACHE_WAKEUP(&bp->l_ioevent); } /* * Group Commit pageout: */ else if (bp->l_flag & lbmGC) { LCACHE_UNLOCK(flags); lmPostGC(bp); } /* * asynchronous pageout: * * buffer must have been removed from write queue: * insert buffer at head of freelist where it can be recycled */ else { assert(bp->l_flag & lbmRELEASE); assert(bp->l_flag & lbmFREE); lbmfree(bp); LCACHE_UNLOCK(flags); /* unlock+enable */ } } int jfsIOWait(void *arg) { struct lbuf *bp; do { spin_lock_irq(&log_redrive_lock); while ((bp = log_redrive_list)) { log_redrive_list = bp->l_redrive_next; bp->l_redrive_next = NULL; spin_unlock_irq(&log_redrive_lock); lbmStartIO(bp); spin_lock_irq(&log_redrive_lock); } if (freezing(current)) { spin_unlock_irq(&log_redrive_lock); refrigerator(); } else { set_current_state(TASK_INTERRUPTIBLE); spin_unlock_irq(&log_redrive_lock); schedule(); __set_current_state(TASK_RUNNING); } } while (!kthread_should_stop()); jfs_info("jfsIOWait being killed!"); return 0; } /* * NAME: lmLogFormat()/jfs_logform() * * FUNCTION: format file system log * * PARAMETERS: * log - volume log * logAddress - start address of log space in FS block * logSize - length of log space in FS block; * * RETURN: 0 - success * -EIO - i/o error * * XXX: We're synchronously writing one page at a time. This needs to * be improved by writing multiple pages at once. */ int lmLogFormat(struct jfs_log *log, s64 logAddress, int logSize) { int rc = -EIO; struct jfs_sb_info *sbi; struct logsuper *logsuper; struct logpage *lp; int lspn; /* log sequence page number */ struct lrd *lrd_ptr; int npages = 0; struct lbuf *bp; jfs_info("lmLogFormat: logAddress:%Ld logSize:%d", (long long)logAddress, logSize); sbi = list_entry(log->sb_list.next, struct jfs_sb_info, log_list); /* allocate a log buffer */ bp = lbmAllocate(log, 1); npages = logSize >> sbi->l2nbperpage; /* * log space: * * page 0 - reserved; * page 1 - log superblock; * page 2 - log data page: A SYNC log record is written * into this page at logform time; * pages 3-N - log data page: set to empty log data pages; */ /* * init log superblock: log page 1 */ logsuper = (struct logsuper *) bp->l_ldata; logsuper->magic = cpu_to_le32(LOGMAGIC); logsuper->version = cpu_to_le32(LOGVERSION); logsuper->state = cpu_to_le32(LOGREDONE); logsuper->flag = cpu_to_le32(sbi->mntflag); /* ? */ logsuper->size = cpu_to_le32(npages); logsuper->bsize = cpu_to_le32(sbi->bsize); logsuper->l2bsize = cpu_to_le32(sbi->l2bsize); logsuper->end = cpu_to_le32(2 * LOGPSIZE + LOGPHDRSIZE + LOGRDSIZE); bp->l_flag = lbmWRITE | lbmSYNC | lbmDIRECT; bp->l_blkno = logAddress + sbi->nbperpage; lbmStartIO(bp); if ((rc = lbmIOWait(bp, 0))) goto exit; /* * init pages 2 to npages-1 as log data pages: * * log page sequence number (lpsn) initialization: * * pn: 0 1 2 3 n-1 * +-----+-----+=====+=====+===.....===+=====+ * lspn: N-1 0 1 N-2 * <--- N page circular file ----> * * the N (= npages-2) data pages of the log is maintained as * a circular file for the log records; * lpsn grows by 1 monotonically as each log page is written * to the circular file of the log; * and setLogpage() will not reset the page number even if * the eor is equal to LOGPHDRSIZE. In order for binary search * still work in find log end process, we have to simulate the * log wrap situation at the log format time. * The 1st log page written will have the highest lpsn. Then * the succeeding log pages will have ascending order of * the lspn starting from 0, ... (N-2) */ lp = (struct logpage *) bp->l_ldata; /* * initialize 1st log page to be written: lpsn = N - 1, * write a SYNCPT log record is written to this page */ lp->h.page = lp->t.page = cpu_to_le32(npages - 3); lp->h.eor = lp->t.eor = cpu_to_le16(LOGPHDRSIZE + LOGRDSIZE); lrd_ptr = (struct lrd *) &lp->data; lrd_ptr->logtid = 0; lrd_ptr->backchain = 0; lrd_ptr->type = cpu_to_le16(LOG_SYNCPT); lrd_ptr->length = 0; lrd_ptr->log.syncpt.sync = 0; bp->l_blkno += sbi->nbperpage; bp->l_flag = lbmWRITE | lbmSYNC | lbmDIRECT; lbmStartIO(bp); if ((rc = lbmIOWait(bp, 0))) goto exit; /* * initialize succeeding log pages: lpsn = 0, 1, ..., (N-2) */ for (lspn = 0; lspn < npages - 3; lspn++) { lp->h.page = lp->t.page = cpu_to_le32(lspn); lp->h.eor = lp->t.eor = cpu_to_le16(LOGPHDRSIZE); bp->l_blkno += sbi->nbperpage; bp->l_flag = lbmWRITE | lbmSYNC | lbmDIRECT; lbmStartIO(bp); if ((rc = lbmIOWait(bp, 0))) goto exit; } rc = 0; exit: /* * finalize log */ /* release the buffer */ lbmFree(bp); return rc; } #ifdef CONFIG_JFS_STATISTICS static int jfs_lmstats_proc_show(struct seq_file *m, void *v) { seq_printf(m, "JFS Logmgr stats\n" "================\n" "commits = %d\n" "writes submitted = %d\n" "writes completed = %d\n" "full pages submitted = %d\n" "partial pages submitted = %d\n", lmStat.commit, lmStat.submitted, lmStat.pagedone, lmStat.full_page, lmStat.partial_page); return 0; } static int jfs_lmstats_proc_open(struct inode *inode, struct file *file) { return single_open(file, jfs_lmstats_proc_show, NULL); } const struct file_operations jfs_lmstats_proc_fops = { .owner = THIS_MODULE, .open = jfs_lmstats_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; #endif /* CONFIG_JFS_STATISTICS */
gpl-2.0
VanirAOSP/kernel_samsung_smdk4412
arch/arm/kernel/irq.c
1038
4529
/* * linux/arch/arm/kernel/irq.c * * Copyright (C) 1992 Linus Torvalds * Modifications for ARM processor Copyright (C) 1995-2000 Russell King. * * Support for Dynamic Tick Timer Copyright (C) 2004-2005 Nokia Corporation. * Dynamic Tick Timer written by Tony Lindgren <tony@atomide.com> and * Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This file contains the code used by various IRQ handling routines: * asking for different IRQ's should be done through these routines * instead of just grabbing them. Thus setups with different IRQ numbers * shouldn't result in any weird surprises, and installing new handlers * should be easier. * * IRQ's are in fact implemented a bit like signal handlers for the kernel. * Naturally it's not a 1:1 relation, but there are similarities. */ #include <linux/kernel_stat.h> #include <linux/module.h> #include <linux/signal.h> #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/random.h> #include <linux/smp.h> #include <linux/init.h> #include <linux/seq_file.h> #include <linux/errno.h> #include <linux/list.h> #include <linux/kallsyms.h> #include <linux/proc_fs.h> #include <linux/ftrace.h> #include <asm/system.h> #include <asm/mach/arch.h> #include <asm/mach/irq.h> #include <asm/mach/time.h> /* * No architecture-specific irq_finish function defined in arm/arch/irqs.h. */ #ifndef irq_finish #define irq_finish(irq) do { } while (0) #endif unsigned long irq_err_count; int arch_show_interrupts(struct seq_file *p, int prec) { #ifdef CONFIG_FIQ show_fiq_list(p, prec); #endif #ifdef CONFIG_SMP show_ipi_list(p, prec); #endif #ifdef CONFIG_LOCAL_TIMERS show_local_irqs(p, prec); #endif seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count); return 0; } /* * do_IRQ handles all hardware IRQ's. Decoded IRQs should not * come via this function. Instead, they should provide their * own 'handler' */ asmlinkage void __exception_irq_entry asm_do_IRQ(unsigned int irq, struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); irq_enter(); /* * Some hardware gives randomly wrong interrupts. Rather * than crashing, do something sensible. */ if (unlikely(irq >= nr_irqs)) { if (printk_ratelimit()) printk(KERN_WARNING "Bad IRQ%u\n", irq); ack_bad_irq(irq); } else { generic_handle_irq(irq); } /* AT91 specific workaround */ irq_finish(irq); irq_exit(); set_irq_regs(old_regs); } void set_irq_flags(unsigned int irq, unsigned int iflags) { unsigned long clr = 0, set = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; if (irq >= nr_irqs) { printk(KERN_ERR "Trying to set irq flags for IRQ%d\n", irq); return; } if (iflags & IRQF_VALID) clr |= IRQ_NOREQUEST; if (iflags & IRQF_PROBE) clr |= IRQ_NOPROBE; if (!(iflags & IRQF_NOAUTOEN)) clr |= IRQ_NOAUTOEN; /* Order is clear bits in "clr" then set bits in "set" */ irq_modify_status(irq, clr, set & ~clr); } void __init init_IRQ(void) { machine_desc->init_irq(); } #ifdef CONFIG_SPARSE_IRQ int __init arch_probe_nr_irqs(void) { nr_irqs = machine_desc->nr_irqs ? machine_desc->nr_irqs : NR_IRQS; return nr_irqs; } #endif #ifdef CONFIG_HOTPLUG_CPU static bool migrate_one_irq(struct irq_data *d) { unsigned int cpu = cpumask_any_and(d->affinity, cpu_online_mask); bool ret = false; if (cpu >= nr_cpu_ids) { cpu = cpumask_any(cpu_online_mask); ret = true; } pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", d->irq, d->node, cpu); d->chip->irq_set_affinity(d, cpumask_of(cpu), true); return ret; } /* * The CPU has been marked offline. Migrate IRQs off this CPU. If * the affinity settings do not allow other CPUs, force them onto any * available CPU. */ void migrate_irqs(void) { unsigned int i, cpu = smp_processor_id(); struct irq_desc *desc; unsigned long flags; local_irq_save(flags); for_each_irq_desc(i, desc) { struct irq_data *d = &desc->irq_data; bool affinity_broken = false; raw_spin_lock(&desc->lock); do { if (desc->action == NULL) break; if (d->node != cpu) break; affinity_broken = migrate_one_irq(d); } while (0); raw_spin_unlock(&desc->lock); if (affinity_broken && printk_ratelimit()) pr_warning("IRQ%u no longer affine to CPU%u\n", i, cpu); } local_irq_restore(flags); } #endif /* CONFIG_HOTPLUG_CPU */
gpl-2.0
abanerj/linux-mac
drivers/md/persistent-data/dm-bitset.c
1806
4052
/* * Copyright (C) 2012 Red Hat, Inc. * * This file is released under the GPL. */ #include "dm-bitset.h" #include "dm-transaction-manager.h" #include <linux/export.h> #include <linux/device-mapper.h> #define DM_MSG_PREFIX "bitset" #define BITS_PER_ARRAY_ENTRY 64 /*----------------------------------------------------------------*/ static struct dm_btree_value_type bitset_bvt = { .context = NULL, .size = sizeof(__le64), .inc = NULL, .dec = NULL, .equal = NULL, }; /*----------------------------------------------------------------*/ void dm_disk_bitset_init(struct dm_transaction_manager *tm, struct dm_disk_bitset *info) { dm_array_info_init(&info->array_info, tm, &bitset_bvt); info->current_index_set = false; } EXPORT_SYMBOL_GPL(dm_disk_bitset_init); int dm_bitset_empty(struct dm_disk_bitset *info, dm_block_t *root) { return dm_array_empty(&info->array_info, root); } EXPORT_SYMBOL_GPL(dm_bitset_empty); int dm_bitset_resize(struct dm_disk_bitset *info, dm_block_t root, uint32_t old_nr_entries, uint32_t new_nr_entries, bool default_value, dm_block_t *new_root) { uint32_t old_blocks = dm_div_up(old_nr_entries, BITS_PER_ARRAY_ENTRY); uint32_t new_blocks = dm_div_up(new_nr_entries, BITS_PER_ARRAY_ENTRY); __le64 value = default_value ? cpu_to_le64(~0) : cpu_to_le64(0); __dm_bless_for_disk(&value); return dm_array_resize(&info->array_info, root, old_blocks, new_blocks, &value, new_root); } EXPORT_SYMBOL_GPL(dm_bitset_resize); int dm_bitset_del(struct dm_disk_bitset *info, dm_block_t root) { return dm_array_del(&info->array_info, root); } EXPORT_SYMBOL_GPL(dm_bitset_del); int dm_bitset_flush(struct dm_disk_bitset *info, dm_block_t root, dm_block_t *new_root) { int r; __le64 value; if (!info->current_index_set || !info->dirty) return 0; value = cpu_to_le64(info->current_bits); __dm_bless_for_disk(&value); r = dm_array_set_value(&info->array_info, root, info->current_index, &value, new_root); if (r) return r; info->current_index_set = false; info->dirty = false; return 0; } EXPORT_SYMBOL_GPL(dm_bitset_flush); static int read_bits(struct dm_disk_bitset *info, dm_block_t root, uint32_t array_index) { int r; __le64 value; r = dm_array_get_value(&info->array_info, root, array_index, &value); if (r) return r; info->current_bits = le64_to_cpu(value); info->current_index_set = true; info->current_index = array_index; info->dirty = false; return 0; } static int get_array_entry(struct dm_disk_bitset *info, dm_block_t root, uint32_t index, dm_block_t *new_root) { int r; unsigned array_index = index / BITS_PER_ARRAY_ENTRY; if (info->current_index_set) { if (info->current_index == array_index) return 0; r = dm_bitset_flush(info, root, new_root); if (r) return r; } return read_bits(info, root, array_index); } int dm_bitset_set_bit(struct dm_disk_bitset *info, dm_block_t root, uint32_t index, dm_block_t *new_root) { int r; unsigned b = index % BITS_PER_ARRAY_ENTRY; r = get_array_entry(info, root, index, new_root); if (r) return r; set_bit(b, (unsigned long *) &info->current_bits); info->dirty = true; return 0; } EXPORT_SYMBOL_GPL(dm_bitset_set_bit); int dm_bitset_clear_bit(struct dm_disk_bitset *info, dm_block_t root, uint32_t index, dm_block_t *new_root) { int r; unsigned b = index % BITS_PER_ARRAY_ENTRY; r = get_array_entry(info, root, index, new_root); if (r) return r; clear_bit(b, (unsigned long *) &info->current_bits); info->dirty = true; return 0; } EXPORT_SYMBOL_GPL(dm_bitset_clear_bit); int dm_bitset_test_bit(struct dm_disk_bitset *info, dm_block_t root, uint32_t index, dm_block_t *new_root, bool *result) { int r; unsigned b = index % BITS_PER_ARRAY_ENTRY; r = get_array_entry(info, root, index, new_root); if (r) return r; *result = test_bit(b, (unsigned long *) &info->current_bits); return 0; } EXPORT_SYMBOL_GPL(dm_bitset_test_bit); /*----------------------------------------------------------------*/
gpl-2.0
defconoi/nexusplayer
drivers/gpu/drm/exynos/exynos_hdmi.c
2062
62943
/* * Copyright (C) 2011 Samsung Electronics Co.Ltd * Authors: * Seung-Woo Kim <sw0312.kim@samsung.com> * Inki Dae <inki.dae@samsung.com> * Joonyoung Shim <jy0922.shim@samsung.com> * * Based on drivers/media/video/s5p-tv/hdmi_drv.c * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <drm/drmP.h> #include <drm/drm_edid.h> #include <drm/drm_crtc_helper.h> #include "regs-hdmi.h" #include <linux/kernel.h> #include <linux/spinlock.h> #include <linux/wait.h> #include <linux/i2c.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/delay.h> #include <linux/pm_runtime.h> #include <linux/clk.h> #include <linux/regulator/consumer.h> #include <linux/io.h> #include <linux/of_gpio.h> #include <drm/exynos_drm.h> #include "exynos_drm_drv.h" #include "exynos_drm_hdmi.h" #include "exynos_hdmi.h" #include <linux/gpio.h> #include <media/s5p_hdmi.h> #define MAX_WIDTH 1920 #define MAX_HEIGHT 1080 #define get_hdmi_context(dev) platform_get_drvdata(to_platform_device(dev)) /* AVI header and aspect ratio */ #define HDMI_AVI_VERSION 0x02 #define HDMI_AVI_LENGTH 0x0D #define AVI_PIC_ASPECT_RATIO_16_9 (2 << 4) #define AVI_SAME_AS_PIC_ASPECT_RATIO 8 /* AUI header info */ #define HDMI_AUI_VERSION 0x01 #define HDMI_AUI_LENGTH 0x0A /* HDMI infoframe to configure HDMI out packet header, AUI and AVI */ enum HDMI_PACKET_TYPE { /* refer to Table 5-8 Packet Type in HDMI specification v1.4a */ /* InfoFrame packet type */ HDMI_PACKET_TYPE_INFOFRAME = 0x80, /* Vendor-Specific InfoFrame */ HDMI_PACKET_TYPE_VSI = HDMI_PACKET_TYPE_INFOFRAME + 1, /* Auxiliary Video information InfoFrame */ HDMI_PACKET_TYPE_AVI = HDMI_PACKET_TYPE_INFOFRAME + 2, /* Audio information InfoFrame */ HDMI_PACKET_TYPE_AUI = HDMI_PACKET_TYPE_INFOFRAME + 4 }; enum hdmi_type { HDMI_TYPE13, HDMI_TYPE14, }; struct hdmi_resources { struct clk *hdmi; struct clk *sclk_hdmi; struct clk *sclk_pixel; struct clk *sclk_hdmiphy; struct clk *hdmiphy; struct regulator_bulk_data *regul_bulk; int regul_count; }; struct hdmi_tg_regs { u8 cmd[1]; u8 h_fsz[2]; u8 hact_st[2]; u8 hact_sz[2]; u8 v_fsz[2]; u8 vsync[2]; u8 vsync2[2]; u8 vact_st[2]; u8 vact_sz[2]; u8 field_chg[2]; u8 vact_st2[2]; u8 vact_st3[2]; u8 vact_st4[2]; u8 vsync_top_hdmi[2]; u8 vsync_bot_hdmi[2]; u8 field_top_hdmi[2]; u8 field_bot_hdmi[2]; u8 tg_3d[1]; }; struct hdmi_v13_core_regs { u8 h_blank[2]; u8 v_blank[3]; u8 h_v_line[3]; u8 vsync_pol[1]; u8 int_pro_mode[1]; u8 v_blank_f[3]; u8 h_sync_gen[3]; u8 v_sync_gen1[3]; u8 v_sync_gen2[3]; u8 v_sync_gen3[3]; }; struct hdmi_v14_core_regs { u8 h_blank[2]; u8 v2_blank[2]; u8 v1_blank[2]; u8 v_line[2]; u8 h_line[2]; u8 hsync_pol[1]; u8 vsync_pol[1]; u8 int_pro_mode[1]; u8 v_blank_f0[2]; u8 v_blank_f1[2]; u8 h_sync_start[2]; u8 h_sync_end[2]; u8 v_sync_line_bef_2[2]; u8 v_sync_line_bef_1[2]; u8 v_sync_line_aft_2[2]; u8 v_sync_line_aft_1[2]; u8 v_sync_line_aft_pxl_2[2]; u8 v_sync_line_aft_pxl_1[2]; u8 v_blank_f2[2]; /* for 3D mode */ u8 v_blank_f3[2]; /* for 3D mode */ u8 v_blank_f4[2]; /* for 3D mode */ u8 v_blank_f5[2]; /* for 3D mode */ u8 v_sync_line_aft_3[2]; u8 v_sync_line_aft_4[2]; u8 v_sync_line_aft_5[2]; u8 v_sync_line_aft_6[2]; u8 v_sync_line_aft_pxl_3[2]; u8 v_sync_line_aft_pxl_4[2]; u8 v_sync_line_aft_pxl_5[2]; u8 v_sync_line_aft_pxl_6[2]; u8 vact_space_1[2]; u8 vact_space_2[2]; u8 vact_space_3[2]; u8 vact_space_4[2]; u8 vact_space_5[2]; u8 vact_space_6[2]; }; struct hdmi_v13_conf { struct hdmi_v13_core_regs core; struct hdmi_tg_regs tg; }; struct hdmi_v14_conf { struct hdmi_v14_core_regs core; struct hdmi_tg_regs tg; }; struct hdmi_conf_regs { int pixel_clock; int cea_video_id; union { struct hdmi_v13_conf v13_conf; struct hdmi_v14_conf v14_conf; } conf; }; struct hdmi_context { struct device *dev; struct drm_device *drm_dev; bool hpd; bool powered; bool dvi_mode; struct mutex hdmi_mutex; void __iomem *regs; void *parent_ctx; int irq; struct i2c_client *ddc_port; struct i2c_client *hdmiphy_port; /* current hdmiphy conf regs */ struct hdmi_conf_regs mode_conf; struct hdmi_resources res; int hpd_gpio; enum hdmi_type type; }; struct hdmiphy_config { int pixel_clock; u8 conf[32]; }; /* list of phy config settings */ static const struct hdmiphy_config hdmiphy_v13_configs[] = { { .pixel_clock = 27000000, .conf = { 0x01, 0x05, 0x00, 0xD8, 0x10, 0x1C, 0x30, 0x40, 0x6B, 0x10, 0x02, 0x51, 0xDF, 0xF2, 0x54, 0x87, 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0, 0x22, 0x40, 0xE3, 0x26, 0x00, 0x00, 0x00, 0x00, }, }, { .pixel_clock = 27027000, .conf = { 0x01, 0x05, 0x00, 0xD4, 0x10, 0x9C, 0x09, 0x64, 0x6B, 0x10, 0x02, 0x51, 0xDF, 0xF2, 0x54, 0x87, 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0, 0x22, 0x40, 0xE3, 0x26, 0x00, 0x00, 0x00, 0x00, }, }, { .pixel_clock = 74176000, .conf = { 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xef, 0x5B, 0x6D, 0x10, 0x01, 0x51, 0xef, 0xF3, 0x54, 0xb9, 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0, 0x22, 0x40, 0xa5, 0x26, 0x01, 0x00, 0x00, 0x00, }, }, { .pixel_clock = 74250000, .conf = { 0x01, 0x05, 0x00, 0xd8, 0x10, 0x9c, 0xf8, 0x40, 0x6a, 0x10, 0x01, 0x51, 0xff, 0xf1, 0x54, 0xba, 0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xe0, 0x22, 0x40, 0xa4, 0x26, 0x01, 0x00, 0x00, 0x00, }, }, { .pixel_clock = 148500000, .conf = { 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xf8, 0x40, 0x6A, 0x18, 0x00, 0x51, 0xff, 0xF1, 0x54, 0xba, 0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xE0, 0x22, 0x40, 0xa4, 0x26, 0x02, 0x00, 0x00, 0x00, }, }, }; static const struct hdmiphy_config hdmiphy_v14_configs[] = { { .pixel_clock = 25200000, .conf = { 0x01, 0x51, 0x2A, 0x75, 0x40, 0x01, 0x00, 0x08, 0x82, 0x80, 0xfc, 0xd8, 0x45, 0xa0, 0xac, 0x80, 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86, 0x54, 0xf4, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80, }, }, { .pixel_clock = 27000000, .conf = { 0x01, 0xd1, 0x22, 0x51, 0x40, 0x08, 0xfc, 0x20, 0x98, 0xa0, 0xcb, 0xd8, 0x45, 0xa0, 0xac, 0x80, 0x06, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86, 0x54, 0xe4, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80, }, }, { .pixel_clock = 27027000, .conf = { 0x01, 0xd1, 0x2d, 0x72, 0x40, 0x64, 0x12, 0x08, 0x43, 0xa0, 0x0e, 0xd9, 0x45, 0xa0, 0xac, 0x80, 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86, 0x54, 0xe3, 0x24, 0x00, 0x00, 0x00, 0x01, 0x00, }, }, { .pixel_clock = 36000000, .conf = { 0x01, 0x51, 0x2d, 0x55, 0x40, 0x01, 0x00, 0x08, 0x82, 0x80, 0x0e, 0xd9, 0x45, 0xa0, 0xac, 0x80, 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86, 0x54, 0xab, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80, }, }, { .pixel_clock = 40000000, .conf = { 0x01, 0x51, 0x32, 0x55, 0x40, 0x01, 0x00, 0x08, 0x82, 0x80, 0x2c, 0xd9, 0x45, 0xa0, 0xac, 0x80, 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86, 0x54, 0x9a, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80, }, }, { .pixel_clock = 65000000, .conf = { 0x01, 0xd1, 0x36, 0x34, 0x40, 0x1e, 0x0a, 0x08, 0x82, 0xa0, 0x45, 0xd9, 0x45, 0xa0, 0xac, 0x80, 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86, 0x54, 0xbd, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80, }, }, { .pixel_clock = 74176000, .conf = { 0x01, 0xd1, 0x3e, 0x35, 0x40, 0x5b, 0xde, 0x08, 0x82, 0xa0, 0x73, 0xd9, 0x45, 0xa0, 0xac, 0x80, 0x56, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86, 0x54, 0xa6, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80, }, }, { .pixel_clock = 74250000, .conf = { 0x01, 0xd1, 0x1f, 0x10, 0x40, 0x40, 0xf8, 0x08, 0x81, 0xa0, 0xba, 0xd8, 0x45, 0xa0, 0xac, 0x80, 0x3c, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86, 0x54, 0xa5, 0x24, 0x01, 0x00, 0x00, 0x01, 0x00, }, }, { .pixel_clock = 83500000, .conf = { 0x01, 0xd1, 0x23, 0x11, 0x40, 0x0c, 0xfb, 0x08, 0x85, 0xa0, 0xd1, 0xd8, 0x45, 0xa0, 0xac, 0x80, 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86, 0x54, 0x93, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80, }, }, { .pixel_clock = 106500000, .conf = { 0x01, 0xd1, 0x2c, 0x12, 0x40, 0x0c, 0x09, 0x08, 0x84, 0xa0, 0x0a, 0xd9, 0x45, 0xa0, 0xac, 0x80, 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86, 0x54, 0x73, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80, }, }, { .pixel_clock = 108000000, .conf = { 0x01, 0x51, 0x2d, 0x15, 0x40, 0x01, 0x00, 0x08, 0x82, 0x80, 0x0e, 0xd9, 0x45, 0xa0, 0xac, 0x80, 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86, 0x54, 0xc7, 0x25, 0x03, 0x00, 0x00, 0x01, 0x80, }, }, { .pixel_clock = 146250000, .conf = { 0x01, 0xd1, 0x3d, 0x15, 0x40, 0x18, 0xfd, 0x08, 0x83, 0xa0, 0x6e, 0xd9, 0x45, 0xa0, 0xac, 0x80, 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86, 0x54, 0x50, 0x25, 0x03, 0x00, 0x00, 0x01, 0x80, }, }, { .pixel_clock = 148500000, .conf = { 0x01, 0xd1, 0x1f, 0x00, 0x40, 0x40, 0xf8, 0x08, 0x81, 0xa0, 0xba, 0xd8, 0x45, 0xa0, 0xac, 0x80, 0x3c, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86, 0x54, 0x4b, 0x25, 0x03, 0x00, 0x00, 0x01, 0x00, }, }, }; struct hdmi_infoframe { enum HDMI_PACKET_TYPE type; u8 ver; u8 len; }; static inline u32 hdmi_reg_read(struct hdmi_context *hdata, u32 reg_id) { return readl(hdata->regs + reg_id); } static inline void hdmi_reg_writeb(struct hdmi_context *hdata, u32 reg_id, u8 value) { writeb(value, hdata->regs + reg_id); } static inline void hdmi_reg_writemask(struct hdmi_context *hdata, u32 reg_id, u32 value, u32 mask) { u32 old = readl(hdata->regs + reg_id); value = (value & mask) | (old & ~mask); writel(value, hdata->regs + reg_id); } static void hdmi_v13_regs_dump(struct hdmi_context *hdata, char *prefix) { #define DUMPREG(reg_id) \ DRM_DEBUG_KMS("%s:" #reg_id " = %08x\n", prefix, \ readl(hdata->regs + reg_id)) DRM_DEBUG_KMS("%s: ---- CONTROL REGISTERS ----\n", prefix); DUMPREG(HDMI_INTC_FLAG); DUMPREG(HDMI_INTC_CON); DUMPREG(HDMI_HPD_STATUS); DUMPREG(HDMI_V13_PHY_RSTOUT); DUMPREG(HDMI_V13_PHY_VPLL); DUMPREG(HDMI_V13_PHY_CMU); DUMPREG(HDMI_V13_CORE_RSTOUT); DRM_DEBUG_KMS("%s: ---- CORE REGISTERS ----\n", prefix); DUMPREG(HDMI_CON_0); DUMPREG(HDMI_CON_1); DUMPREG(HDMI_CON_2); DUMPREG(HDMI_SYS_STATUS); DUMPREG(HDMI_V13_PHY_STATUS); DUMPREG(HDMI_STATUS_EN); DUMPREG(HDMI_HPD); DUMPREG(HDMI_MODE_SEL); DUMPREG(HDMI_V13_HPD_GEN); DUMPREG(HDMI_V13_DC_CONTROL); DUMPREG(HDMI_V13_VIDEO_PATTERN_GEN); DRM_DEBUG_KMS("%s: ---- CORE SYNC REGISTERS ----\n", prefix); DUMPREG(HDMI_H_BLANK_0); DUMPREG(HDMI_H_BLANK_1); DUMPREG(HDMI_V13_V_BLANK_0); DUMPREG(HDMI_V13_V_BLANK_1); DUMPREG(HDMI_V13_V_BLANK_2); DUMPREG(HDMI_V13_H_V_LINE_0); DUMPREG(HDMI_V13_H_V_LINE_1); DUMPREG(HDMI_V13_H_V_LINE_2); DUMPREG(HDMI_VSYNC_POL); DUMPREG(HDMI_INT_PRO_MODE); DUMPREG(HDMI_V13_V_BLANK_F_0); DUMPREG(HDMI_V13_V_BLANK_F_1); DUMPREG(HDMI_V13_V_BLANK_F_2); DUMPREG(HDMI_V13_H_SYNC_GEN_0); DUMPREG(HDMI_V13_H_SYNC_GEN_1); DUMPREG(HDMI_V13_H_SYNC_GEN_2); DUMPREG(HDMI_V13_V_SYNC_GEN_1_0); DUMPREG(HDMI_V13_V_SYNC_GEN_1_1); DUMPREG(HDMI_V13_V_SYNC_GEN_1_2); DUMPREG(HDMI_V13_V_SYNC_GEN_2_0); DUMPREG(HDMI_V13_V_SYNC_GEN_2_1); DUMPREG(HDMI_V13_V_SYNC_GEN_2_2); DUMPREG(HDMI_V13_V_SYNC_GEN_3_0); DUMPREG(HDMI_V13_V_SYNC_GEN_3_1); DUMPREG(HDMI_V13_V_SYNC_GEN_3_2); DRM_DEBUG_KMS("%s: ---- TG REGISTERS ----\n", prefix); DUMPREG(HDMI_TG_CMD); DUMPREG(HDMI_TG_H_FSZ_L); DUMPREG(HDMI_TG_H_FSZ_H); DUMPREG(HDMI_TG_HACT_ST_L); DUMPREG(HDMI_TG_HACT_ST_H); DUMPREG(HDMI_TG_HACT_SZ_L); DUMPREG(HDMI_TG_HACT_SZ_H); DUMPREG(HDMI_TG_V_FSZ_L); DUMPREG(HDMI_TG_V_FSZ_H); DUMPREG(HDMI_TG_VSYNC_L); DUMPREG(HDMI_TG_VSYNC_H); DUMPREG(HDMI_TG_VSYNC2_L); DUMPREG(HDMI_TG_VSYNC2_H); DUMPREG(HDMI_TG_VACT_ST_L); DUMPREG(HDMI_TG_VACT_ST_H); DUMPREG(HDMI_TG_VACT_SZ_L); DUMPREG(HDMI_TG_VACT_SZ_H); DUMPREG(HDMI_TG_FIELD_CHG_L); DUMPREG(HDMI_TG_FIELD_CHG_H); DUMPREG(HDMI_TG_VACT_ST2_L); DUMPREG(HDMI_TG_VACT_ST2_H); DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_L); DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_H); DUMPREG(HDMI_TG_VSYNC_BOT_HDMI_L); DUMPREG(HDMI_TG_VSYNC_BOT_HDMI_H); DUMPREG(HDMI_TG_FIELD_TOP_HDMI_L); DUMPREG(HDMI_TG_FIELD_TOP_HDMI_H); DUMPREG(HDMI_TG_FIELD_BOT_HDMI_L); DUMPREG(HDMI_TG_FIELD_BOT_HDMI_H); #undef DUMPREG } static void hdmi_v14_regs_dump(struct hdmi_context *hdata, char *prefix) { int i; #define DUMPREG(reg_id) \ DRM_DEBUG_KMS("%s:" #reg_id " = %08x\n", prefix, \ readl(hdata->regs + reg_id)) DRM_DEBUG_KMS("%s: ---- CONTROL REGISTERS ----\n", prefix); DUMPREG(HDMI_INTC_CON); DUMPREG(HDMI_INTC_FLAG); DUMPREG(HDMI_HPD_STATUS); DUMPREG(HDMI_INTC_CON_1); DUMPREG(HDMI_INTC_FLAG_1); DUMPREG(HDMI_PHY_STATUS_0); DUMPREG(HDMI_PHY_STATUS_PLL); DUMPREG(HDMI_PHY_CON_0); DUMPREG(HDMI_PHY_RSTOUT); DUMPREG(HDMI_PHY_VPLL); DUMPREG(HDMI_PHY_CMU); DUMPREG(HDMI_CORE_RSTOUT); DRM_DEBUG_KMS("%s: ---- CORE REGISTERS ----\n", prefix); DUMPREG(HDMI_CON_0); DUMPREG(HDMI_CON_1); DUMPREG(HDMI_CON_2); DUMPREG(HDMI_SYS_STATUS); DUMPREG(HDMI_PHY_STATUS_0); DUMPREG(HDMI_STATUS_EN); DUMPREG(HDMI_HPD); DUMPREG(HDMI_MODE_SEL); DUMPREG(HDMI_ENC_EN); DUMPREG(HDMI_DC_CONTROL); DUMPREG(HDMI_VIDEO_PATTERN_GEN); DRM_DEBUG_KMS("%s: ---- CORE SYNC REGISTERS ----\n", prefix); DUMPREG(HDMI_H_BLANK_0); DUMPREG(HDMI_H_BLANK_1); DUMPREG(HDMI_V2_BLANK_0); DUMPREG(HDMI_V2_BLANK_1); DUMPREG(HDMI_V1_BLANK_0); DUMPREG(HDMI_V1_BLANK_1); DUMPREG(HDMI_V_LINE_0); DUMPREG(HDMI_V_LINE_1); DUMPREG(HDMI_H_LINE_0); DUMPREG(HDMI_H_LINE_1); DUMPREG(HDMI_HSYNC_POL); DUMPREG(HDMI_VSYNC_POL); DUMPREG(HDMI_INT_PRO_MODE); DUMPREG(HDMI_V_BLANK_F0_0); DUMPREG(HDMI_V_BLANK_F0_1); DUMPREG(HDMI_V_BLANK_F1_0); DUMPREG(HDMI_V_BLANK_F1_1); DUMPREG(HDMI_H_SYNC_START_0); DUMPREG(HDMI_H_SYNC_START_1); DUMPREG(HDMI_H_SYNC_END_0); DUMPREG(HDMI_H_SYNC_END_1); DUMPREG(HDMI_V_SYNC_LINE_BEF_2_0); DUMPREG(HDMI_V_SYNC_LINE_BEF_2_1); DUMPREG(HDMI_V_SYNC_LINE_BEF_1_0); DUMPREG(HDMI_V_SYNC_LINE_BEF_1_1); DUMPREG(HDMI_V_SYNC_LINE_AFT_2_0); DUMPREG(HDMI_V_SYNC_LINE_AFT_2_1); DUMPREG(HDMI_V_SYNC_LINE_AFT_1_0); DUMPREG(HDMI_V_SYNC_LINE_AFT_1_1); DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_2_0); DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_2_1); DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_1_0); DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_1_1); DUMPREG(HDMI_V_BLANK_F2_0); DUMPREG(HDMI_V_BLANK_F2_1); DUMPREG(HDMI_V_BLANK_F3_0); DUMPREG(HDMI_V_BLANK_F3_1); DUMPREG(HDMI_V_BLANK_F4_0); DUMPREG(HDMI_V_BLANK_F4_1); DUMPREG(HDMI_V_BLANK_F5_0); DUMPREG(HDMI_V_BLANK_F5_1); DUMPREG(HDMI_V_SYNC_LINE_AFT_3_0); DUMPREG(HDMI_V_SYNC_LINE_AFT_3_1); DUMPREG(HDMI_V_SYNC_LINE_AFT_4_0); DUMPREG(HDMI_V_SYNC_LINE_AFT_4_1); DUMPREG(HDMI_V_SYNC_LINE_AFT_5_0); DUMPREG(HDMI_V_SYNC_LINE_AFT_5_1); DUMPREG(HDMI_V_SYNC_LINE_AFT_6_0); DUMPREG(HDMI_V_SYNC_LINE_AFT_6_1); DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_3_0); DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_3_1); DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_4_0); DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_4_1); DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_5_0); DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_5_1); DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_6_0); DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_6_1); DUMPREG(HDMI_VACT_SPACE_1_0); DUMPREG(HDMI_VACT_SPACE_1_1); DUMPREG(HDMI_VACT_SPACE_2_0); DUMPREG(HDMI_VACT_SPACE_2_1); DUMPREG(HDMI_VACT_SPACE_3_0); DUMPREG(HDMI_VACT_SPACE_3_1); DUMPREG(HDMI_VACT_SPACE_4_0); DUMPREG(HDMI_VACT_SPACE_4_1); DUMPREG(HDMI_VACT_SPACE_5_0); DUMPREG(HDMI_VACT_SPACE_5_1); DUMPREG(HDMI_VACT_SPACE_6_0); DUMPREG(HDMI_VACT_SPACE_6_1); DRM_DEBUG_KMS("%s: ---- TG REGISTERS ----\n", prefix); DUMPREG(HDMI_TG_CMD); DUMPREG(HDMI_TG_H_FSZ_L); DUMPREG(HDMI_TG_H_FSZ_H); DUMPREG(HDMI_TG_HACT_ST_L); DUMPREG(HDMI_TG_HACT_ST_H); DUMPREG(HDMI_TG_HACT_SZ_L); DUMPREG(HDMI_TG_HACT_SZ_H); DUMPREG(HDMI_TG_V_FSZ_L); DUMPREG(HDMI_TG_V_FSZ_H); DUMPREG(HDMI_TG_VSYNC_L); DUMPREG(HDMI_TG_VSYNC_H); DUMPREG(HDMI_TG_VSYNC2_L); DUMPREG(HDMI_TG_VSYNC2_H); DUMPREG(HDMI_TG_VACT_ST_L); DUMPREG(HDMI_TG_VACT_ST_H); DUMPREG(HDMI_TG_VACT_SZ_L); DUMPREG(HDMI_TG_VACT_SZ_H); DUMPREG(HDMI_TG_FIELD_CHG_L); DUMPREG(HDMI_TG_FIELD_CHG_H); DUMPREG(HDMI_TG_VACT_ST2_L); DUMPREG(HDMI_TG_VACT_ST2_H); DUMPREG(HDMI_TG_VACT_ST3_L); DUMPREG(HDMI_TG_VACT_ST3_H); DUMPREG(HDMI_TG_VACT_ST4_L); DUMPREG(HDMI_TG_VACT_ST4_H); DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_L); DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_H); DUMPREG(HDMI_TG_VSYNC_BOT_HDMI_L); DUMPREG(HDMI_TG_VSYNC_BOT_HDMI_H); DUMPREG(HDMI_TG_FIELD_TOP_HDMI_L); DUMPREG(HDMI_TG_FIELD_TOP_HDMI_H); DUMPREG(HDMI_TG_FIELD_BOT_HDMI_L); DUMPREG(HDMI_TG_FIELD_BOT_HDMI_H); DUMPREG(HDMI_TG_3D); DRM_DEBUG_KMS("%s: ---- PACKET REGISTERS ----\n", prefix); DUMPREG(HDMI_AVI_CON); DUMPREG(HDMI_AVI_HEADER0); DUMPREG(HDMI_AVI_HEADER1); DUMPREG(HDMI_AVI_HEADER2); DUMPREG(HDMI_AVI_CHECK_SUM); DUMPREG(HDMI_VSI_CON); DUMPREG(HDMI_VSI_HEADER0); DUMPREG(HDMI_VSI_HEADER1); DUMPREG(HDMI_VSI_HEADER2); for (i = 0; i < 7; ++i) DUMPREG(HDMI_VSI_DATA(i)); #undef DUMPREG } static void hdmi_regs_dump(struct hdmi_context *hdata, char *prefix) { if (hdata->type == HDMI_TYPE13) hdmi_v13_regs_dump(hdata, prefix); else hdmi_v14_regs_dump(hdata, prefix); } static u8 hdmi_chksum(struct hdmi_context *hdata, u32 start, u8 len, u32 hdr_sum) { int i; /* hdr_sum : header0 + header1 + header2 * start : start address of packet byte1 * len : packet bytes - 1 */ for (i = 0; i < len; ++i) hdr_sum += 0xff & hdmi_reg_read(hdata, start + i * 4); /* return 2's complement of 8 bit hdr_sum */ return (u8)(~(hdr_sum & 0xff) + 1); } static void hdmi_reg_infoframe(struct hdmi_context *hdata, struct hdmi_infoframe *infoframe) { u32 hdr_sum; u8 chksum; u32 aspect_ratio; u32 mod; u32 vic; DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); mod = hdmi_reg_read(hdata, HDMI_MODE_SEL); if (hdata->dvi_mode) { hdmi_reg_writeb(hdata, HDMI_VSI_CON, HDMI_VSI_CON_DO_NOT_TRANSMIT); hdmi_reg_writeb(hdata, HDMI_AVI_CON, HDMI_AVI_CON_DO_NOT_TRANSMIT); hdmi_reg_writeb(hdata, HDMI_AUI_CON, HDMI_AUI_CON_NO_TRAN); return; } switch (infoframe->type) { case HDMI_PACKET_TYPE_AVI: hdmi_reg_writeb(hdata, HDMI_AVI_CON, HDMI_AVI_CON_EVERY_VSYNC); hdmi_reg_writeb(hdata, HDMI_AVI_HEADER0, infoframe->type); hdmi_reg_writeb(hdata, HDMI_AVI_HEADER1, infoframe->ver); hdmi_reg_writeb(hdata, HDMI_AVI_HEADER2, infoframe->len); hdr_sum = infoframe->type + infoframe->ver + infoframe->len; /* Output format zero hardcoded ,RGB YBCR selection */ hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(1), 0 << 5 | AVI_ACTIVE_FORMAT_VALID | AVI_UNDERSCANNED_DISPLAY_VALID); aspect_ratio = AVI_PIC_ASPECT_RATIO_16_9; hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(2), aspect_ratio | AVI_SAME_AS_PIC_ASPECT_RATIO); vic = hdata->mode_conf.cea_video_id; hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(4), vic); chksum = hdmi_chksum(hdata, HDMI_AVI_BYTE(1), infoframe->len, hdr_sum); DRM_DEBUG_KMS("AVI checksum = 0x%x\n", chksum); hdmi_reg_writeb(hdata, HDMI_AVI_CHECK_SUM, chksum); break; case HDMI_PACKET_TYPE_AUI: hdmi_reg_writeb(hdata, HDMI_AUI_CON, 0x02); hdmi_reg_writeb(hdata, HDMI_AUI_HEADER0, infoframe->type); hdmi_reg_writeb(hdata, HDMI_AUI_HEADER1, infoframe->ver); hdmi_reg_writeb(hdata, HDMI_AUI_HEADER2, infoframe->len); hdr_sum = infoframe->type + infoframe->ver + infoframe->len; chksum = hdmi_chksum(hdata, HDMI_AUI_BYTE(1), infoframe->len, hdr_sum); DRM_DEBUG_KMS("AUI checksum = 0x%x\n", chksum); hdmi_reg_writeb(hdata, HDMI_AUI_CHECK_SUM, chksum); break; default: break; } } static bool hdmi_is_connected(void *ctx) { struct hdmi_context *hdata = ctx; return hdata->hpd; } static struct edid *hdmi_get_edid(void *ctx, struct drm_connector *connector) { struct edid *raw_edid; struct hdmi_context *hdata = ctx; DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); if (!hdata->ddc_port) return ERR_PTR(-ENODEV); raw_edid = drm_get_edid(connector, hdata->ddc_port->adapter); if (!raw_edid) return ERR_PTR(-ENODEV); hdata->dvi_mode = !drm_detect_hdmi_monitor(raw_edid); DRM_DEBUG_KMS("%s : width[%d] x height[%d]\n", (hdata->dvi_mode ? "dvi monitor" : "hdmi monitor"), raw_edid->width_cm, raw_edid->height_cm); return raw_edid; } static int hdmi_find_phy_conf(struct hdmi_context *hdata, u32 pixel_clock) { const struct hdmiphy_config *confs; int count, i; DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); if (hdata->type == HDMI_TYPE13) { confs = hdmiphy_v13_configs; count = ARRAY_SIZE(hdmiphy_v13_configs); } else if (hdata->type == HDMI_TYPE14) { confs = hdmiphy_v14_configs; count = ARRAY_SIZE(hdmiphy_v14_configs); } else return -EINVAL; for (i = 0; i < count; i++) if (confs[i].pixel_clock == pixel_clock) return i; DRM_DEBUG_KMS("Could not find phy config for %d\n", pixel_clock); return -EINVAL; } static int hdmi_check_timing(void *ctx, struct fb_videomode *timing) { struct hdmi_context *hdata = ctx; int ret; DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); DRM_DEBUG_KMS("[%d]x[%d] [%d]Hz [%x]\n", timing->xres, timing->yres, timing->refresh, timing->vmode); ret = hdmi_find_phy_conf(hdata, timing->pixclock); if (ret < 0) return ret; return 0; } static void hdmi_set_acr(u32 freq, u8 *acr) { u32 n, cts; switch (freq) { case 32000: n = 4096; cts = 27000; break; case 44100: n = 6272; cts = 30000; break; case 88200: n = 12544; cts = 30000; break; case 176400: n = 25088; cts = 30000; break; case 48000: n = 6144; cts = 27000; break; case 96000: n = 12288; cts = 27000; break; case 192000: n = 24576; cts = 27000; break; default: n = 0; cts = 0; break; } acr[1] = cts >> 16; acr[2] = cts >> 8 & 0xff; acr[3] = cts & 0xff; acr[4] = n >> 16; acr[5] = n >> 8 & 0xff; acr[6] = n & 0xff; } static void hdmi_reg_acr(struct hdmi_context *hdata, u8 *acr) { hdmi_reg_writeb(hdata, HDMI_ACR_N0, acr[6]); hdmi_reg_writeb(hdata, HDMI_ACR_N1, acr[5]); hdmi_reg_writeb(hdata, HDMI_ACR_N2, acr[4]); hdmi_reg_writeb(hdata, HDMI_ACR_MCTS0, acr[3]); hdmi_reg_writeb(hdata, HDMI_ACR_MCTS1, acr[2]); hdmi_reg_writeb(hdata, HDMI_ACR_MCTS2, acr[1]); hdmi_reg_writeb(hdata, HDMI_ACR_CTS0, acr[3]); hdmi_reg_writeb(hdata, HDMI_ACR_CTS1, acr[2]); hdmi_reg_writeb(hdata, HDMI_ACR_CTS2, acr[1]); if (hdata->type == HDMI_TYPE13) hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 4); else hdmi_reg_writeb(hdata, HDMI_ACR_CON, 4); } static void hdmi_audio_init(struct hdmi_context *hdata) { u32 sample_rate, bits_per_sample, frame_size_code; u32 data_num, bit_ch, sample_frq; u32 val; u8 acr[7]; sample_rate = 44100; bits_per_sample = 16; frame_size_code = 0; switch (bits_per_sample) { case 20: data_num = 2; bit_ch = 1; break; case 24: data_num = 3; bit_ch = 1; break; default: data_num = 1; bit_ch = 0; break; } hdmi_set_acr(sample_rate, acr); hdmi_reg_acr(hdata, acr); hdmi_reg_writeb(hdata, HDMI_I2S_MUX_CON, HDMI_I2S_IN_DISABLE | HDMI_I2S_AUD_I2S | HDMI_I2S_CUV_I2S_ENABLE | HDMI_I2S_MUX_ENABLE); hdmi_reg_writeb(hdata, HDMI_I2S_MUX_CH, HDMI_I2S_CH0_EN | HDMI_I2S_CH1_EN | HDMI_I2S_CH2_EN); hdmi_reg_writeb(hdata, HDMI_I2S_MUX_CUV, HDMI_I2S_CUV_RL_EN); sample_frq = (sample_rate == 44100) ? 0 : (sample_rate == 48000) ? 2 : (sample_rate == 32000) ? 3 : (sample_rate == 96000) ? 0xa : 0x0; hdmi_reg_writeb(hdata, HDMI_I2S_CLK_CON, HDMI_I2S_CLK_DIS); hdmi_reg_writeb(hdata, HDMI_I2S_CLK_CON, HDMI_I2S_CLK_EN); val = hdmi_reg_read(hdata, HDMI_I2S_DSD_CON) | 0x01; hdmi_reg_writeb(hdata, HDMI_I2S_DSD_CON, val); /* Configuration I2S input ports. Configure I2S_PIN_SEL_0~4 */ hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_0, HDMI_I2S_SEL_SCLK(5) | HDMI_I2S_SEL_LRCK(6)); hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_1, HDMI_I2S_SEL_SDATA1(1) | HDMI_I2S_SEL_SDATA2(4)); hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_2, HDMI_I2S_SEL_SDATA3(1) | HDMI_I2S_SEL_SDATA2(2)); hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_3, HDMI_I2S_SEL_DSD(0)); /* I2S_CON_1 & 2 */ hdmi_reg_writeb(hdata, HDMI_I2S_CON_1, HDMI_I2S_SCLK_FALLING_EDGE | HDMI_I2S_L_CH_LOW_POL); hdmi_reg_writeb(hdata, HDMI_I2S_CON_2, HDMI_I2S_MSB_FIRST_MODE | HDMI_I2S_SET_BIT_CH(bit_ch) | HDMI_I2S_SET_SDATA_BIT(data_num) | HDMI_I2S_BASIC_FORMAT); /* Configure register related to CUV information */ hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST_0, HDMI_I2S_CH_STATUS_MODE_0 | HDMI_I2S_2AUD_CH_WITHOUT_PREEMPH | HDMI_I2S_COPYRIGHT | HDMI_I2S_LINEAR_PCM | HDMI_I2S_CONSUMER_FORMAT); hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST_1, HDMI_I2S_CD_PLAYER); hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST_2, HDMI_I2S_SET_SOURCE_NUM(0)); hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST_3, HDMI_I2S_CLK_ACCUR_LEVEL_2 | HDMI_I2S_SET_SMP_FREQ(sample_frq)); hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST_4, HDMI_I2S_ORG_SMP_FREQ_44_1 | HDMI_I2S_WORD_LEN_MAX24_24BITS | HDMI_I2S_WORD_LEN_MAX_24BITS); hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST_CON, HDMI_I2S_CH_STATUS_RELOAD); } static void hdmi_audio_control(struct hdmi_context *hdata, bool onoff) { if (hdata->dvi_mode) return; hdmi_reg_writeb(hdata, HDMI_AUI_CON, onoff ? 2 : 0); hdmi_reg_writemask(hdata, HDMI_CON_0, onoff ? HDMI_ASP_EN : HDMI_ASP_DIS, HDMI_ASP_MASK); } static void hdmi_conf_reset(struct hdmi_context *hdata) { u32 reg; if (hdata->type == HDMI_TYPE13) reg = HDMI_V13_CORE_RSTOUT; else reg = HDMI_CORE_RSTOUT; /* resetting HDMI core */ hdmi_reg_writemask(hdata, reg, 0, HDMI_CORE_SW_RSTOUT); usleep_range(10000, 12000); hdmi_reg_writemask(hdata, reg, ~0, HDMI_CORE_SW_RSTOUT); usleep_range(10000, 12000); } static void hdmi_conf_init(struct hdmi_context *hdata) { struct hdmi_infoframe infoframe; /* disable HPD interrupts from HDMI IP block, use GPIO instead */ hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL | HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG); /* choose HDMI mode */ hdmi_reg_writemask(hdata, HDMI_MODE_SEL, HDMI_MODE_HDMI_EN, HDMI_MODE_MASK); /* disable bluescreen */ hdmi_reg_writemask(hdata, HDMI_CON_0, 0, HDMI_BLUE_SCR_EN); if (hdata->dvi_mode) { /* choose DVI mode */ hdmi_reg_writemask(hdata, HDMI_MODE_SEL, HDMI_MODE_DVI_EN, HDMI_MODE_MASK); hdmi_reg_writeb(hdata, HDMI_CON_2, HDMI_VID_PREAMBLE_DIS | HDMI_GUARD_BAND_DIS); } if (hdata->type == HDMI_TYPE13) { /* choose bluescreen (fecal) color */ hdmi_reg_writeb(hdata, HDMI_V13_BLUE_SCREEN_0, 0x12); hdmi_reg_writeb(hdata, HDMI_V13_BLUE_SCREEN_1, 0x34); hdmi_reg_writeb(hdata, HDMI_V13_BLUE_SCREEN_2, 0x56); /* enable AVI packet every vsync, fixes purple line problem */ hdmi_reg_writeb(hdata, HDMI_V13_AVI_CON, 0x02); /* force RGB, look to CEA-861-D, table 7 for more detail */ hdmi_reg_writeb(hdata, HDMI_V13_AVI_BYTE(0), 0 << 5); hdmi_reg_writemask(hdata, HDMI_CON_1, 0x10 << 5, 0x11 << 5); hdmi_reg_writeb(hdata, HDMI_V13_SPD_CON, 0x02); hdmi_reg_writeb(hdata, HDMI_V13_AUI_CON, 0x02); hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 0x04); } else { infoframe.type = HDMI_PACKET_TYPE_AVI; infoframe.ver = HDMI_AVI_VERSION; infoframe.len = HDMI_AVI_LENGTH; hdmi_reg_infoframe(hdata, &infoframe); infoframe.type = HDMI_PACKET_TYPE_AUI; infoframe.ver = HDMI_AUI_VERSION; infoframe.len = HDMI_AUI_LENGTH; hdmi_reg_infoframe(hdata, &infoframe); /* enable AVI packet every vsync, fixes purple line problem */ hdmi_reg_writemask(hdata, HDMI_CON_1, 2, 3 << 5); } } static void hdmi_v13_timing_apply(struct hdmi_context *hdata) { const struct hdmi_tg_regs *tg = &hdata->mode_conf.conf.v13_conf.tg; const struct hdmi_v13_core_regs *core = &hdata->mode_conf.conf.v13_conf.core; int tries; /* setting core registers */ hdmi_reg_writeb(hdata, HDMI_H_BLANK_0, core->h_blank[0]); hdmi_reg_writeb(hdata, HDMI_H_BLANK_1, core->h_blank[1]); hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_0, core->v_blank[0]); hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_1, core->v_blank[1]); hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_2, core->v_blank[2]); hdmi_reg_writeb(hdata, HDMI_V13_H_V_LINE_0, core->h_v_line[0]); hdmi_reg_writeb(hdata, HDMI_V13_H_V_LINE_1, core->h_v_line[1]); hdmi_reg_writeb(hdata, HDMI_V13_H_V_LINE_2, core->h_v_line[2]); hdmi_reg_writeb(hdata, HDMI_VSYNC_POL, core->vsync_pol[0]); hdmi_reg_writeb(hdata, HDMI_INT_PRO_MODE, core->int_pro_mode[0]); hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_F_0, core->v_blank_f[0]); hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_F_1, core->v_blank_f[1]); hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_F_2, core->v_blank_f[2]); hdmi_reg_writeb(hdata, HDMI_V13_H_SYNC_GEN_0, core->h_sync_gen[0]); hdmi_reg_writeb(hdata, HDMI_V13_H_SYNC_GEN_1, core->h_sync_gen[1]); hdmi_reg_writeb(hdata, HDMI_V13_H_SYNC_GEN_2, core->h_sync_gen[2]); hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_1_0, core->v_sync_gen1[0]); hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_1_1, core->v_sync_gen1[1]); hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_1_2, core->v_sync_gen1[2]); hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_2_0, core->v_sync_gen2[0]); hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_2_1, core->v_sync_gen2[1]); hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_2_2, core->v_sync_gen2[2]); hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_3_0, core->v_sync_gen3[0]); hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_3_1, core->v_sync_gen3[1]); hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_3_2, core->v_sync_gen3[2]); /* Timing generator registers */ hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_L, tg->h_fsz[0]); hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_H, tg->h_fsz[1]); hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_L, tg->hact_st[0]); hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_H, tg->hact_st[1]); hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_L, tg->hact_sz[0]); hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_H, tg->hact_sz[1]); hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_L, tg->v_fsz[0]); hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_H, tg->v_fsz[1]); hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_L, tg->vsync[0]); hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_H, tg->vsync[1]); hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_L, tg->vsync2[0]); hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_H, tg->vsync2[1]); hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_L, tg->vact_st[0]); hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_H, tg->vact_st[1]); hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_L, tg->vact_sz[0]); hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_H, tg->vact_sz[1]); hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_L, tg->field_chg[0]); hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_H, tg->field_chg[1]); hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_L, tg->vact_st2[0]); hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_H, tg->vact_st2[1]); hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, tg->vsync_top_hdmi[0]); hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_H, tg->vsync_top_hdmi[1]); hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, tg->vsync_bot_hdmi[0]); hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_H, tg->vsync_bot_hdmi[1]); hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_L, tg->field_top_hdmi[0]); hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_H, tg->field_top_hdmi[1]); hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_L, tg->field_bot_hdmi[0]); hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_H, tg->field_bot_hdmi[1]); /* waiting for HDMIPHY's PLL to get to steady state */ for (tries = 100; tries; --tries) { u32 val = hdmi_reg_read(hdata, HDMI_V13_PHY_STATUS); if (val & HDMI_PHY_STATUS_READY) break; usleep_range(1000, 2000); } /* steady state not achieved */ if (tries == 0) { DRM_ERROR("hdmiphy's pll could not reach steady state.\n"); hdmi_regs_dump(hdata, "timing apply"); } clk_disable(hdata->res.sclk_hdmi); clk_set_parent(hdata->res.sclk_hdmi, hdata->res.sclk_hdmiphy); clk_enable(hdata->res.sclk_hdmi); /* enable HDMI and timing generator */ hdmi_reg_writemask(hdata, HDMI_CON_0, ~0, HDMI_EN); if (core->int_pro_mode[0]) hdmi_reg_writemask(hdata, HDMI_TG_CMD, ~0, HDMI_TG_EN | HDMI_FIELD_EN); else hdmi_reg_writemask(hdata, HDMI_TG_CMD, ~0, HDMI_TG_EN); } static void hdmi_v14_timing_apply(struct hdmi_context *hdata) { const struct hdmi_tg_regs *tg = &hdata->mode_conf.conf.v14_conf.tg; const struct hdmi_v14_core_regs *core = &hdata->mode_conf.conf.v14_conf.core; int tries; /* setting core registers */ hdmi_reg_writeb(hdata, HDMI_H_BLANK_0, core->h_blank[0]); hdmi_reg_writeb(hdata, HDMI_H_BLANK_1, core->h_blank[1]); hdmi_reg_writeb(hdata, HDMI_V2_BLANK_0, core->v2_blank[0]); hdmi_reg_writeb(hdata, HDMI_V2_BLANK_1, core->v2_blank[1]); hdmi_reg_writeb(hdata, HDMI_V1_BLANK_0, core->v1_blank[0]); hdmi_reg_writeb(hdata, HDMI_V1_BLANK_1, core->v1_blank[1]); hdmi_reg_writeb(hdata, HDMI_V_LINE_0, core->v_line[0]); hdmi_reg_writeb(hdata, HDMI_V_LINE_1, core->v_line[1]); hdmi_reg_writeb(hdata, HDMI_H_LINE_0, core->h_line[0]); hdmi_reg_writeb(hdata, HDMI_H_LINE_1, core->h_line[1]); hdmi_reg_writeb(hdata, HDMI_HSYNC_POL, core->hsync_pol[0]); hdmi_reg_writeb(hdata, HDMI_VSYNC_POL, core->vsync_pol[0]); hdmi_reg_writeb(hdata, HDMI_INT_PRO_MODE, core->int_pro_mode[0]); hdmi_reg_writeb(hdata, HDMI_V_BLANK_F0_0, core->v_blank_f0[0]); hdmi_reg_writeb(hdata, HDMI_V_BLANK_F0_1, core->v_blank_f0[1]); hdmi_reg_writeb(hdata, HDMI_V_BLANK_F1_0, core->v_blank_f1[0]); hdmi_reg_writeb(hdata, HDMI_V_BLANK_F1_1, core->v_blank_f1[1]); hdmi_reg_writeb(hdata, HDMI_H_SYNC_START_0, core->h_sync_start[0]); hdmi_reg_writeb(hdata, HDMI_H_SYNC_START_1, core->h_sync_start[1]); hdmi_reg_writeb(hdata, HDMI_H_SYNC_END_0, core->h_sync_end[0]); hdmi_reg_writeb(hdata, HDMI_H_SYNC_END_1, core->h_sync_end[1]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_BEF_2_0, core->v_sync_line_bef_2[0]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_BEF_2_1, core->v_sync_line_bef_2[1]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_BEF_1_0, core->v_sync_line_bef_1[0]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_BEF_1_1, core->v_sync_line_bef_1[1]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_2_0, core->v_sync_line_aft_2[0]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_2_1, core->v_sync_line_aft_2[1]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_1_0, core->v_sync_line_aft_1[0]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_1_1, core->v_sync_line_aft_1[1]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_2_0, core->v_sync_line_aft_pxl_2[0]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_2_1, core->v_sync_line_aft_pxl_2[1]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_1_0, core->v_sync_line_aft_pxl_1[0]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_1_1, core->v_sync_line_aft_pxl_1[1]); hdmi_reg_writeb(hdata, HDMI_V_BLANK_F2_0, core->v_blank_f2[0]); hdmi_reg_writeb(hdata, HDMI_V_BLANK_F2_1, core->v_blank_f2[1]); hdmi_reg_writeb(hdata, HDMI_V_BLANK_F3_0, core->v_blank_f3[0]); hdmi_reg_writeb(hdata, HDMI_V_BLANK_F3_1, core->v_blank_f3[1]); hdmi_reg_writeb(hdata, HDMI_V_BLANK_F4_0, core->v_blank_f4[0]); hdmi_reg_writeb(hdata, HDMI_V_BLANK_F4_1, core->v_blank_f4[1]); hdmi_reg_writeb(hdata, HDMI_V_BLANK_F5_0, core->v_blank_f5[0]); hdmi_reg_writeb(hdata, HDMI_V_BLANK_F5_1, core->v_blank_f5[1]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_3_0, core->v_sync_line_aft_3[0]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_3_1, core->v_sync_line_aft_3[1]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_4_0, core->v_sync_line_aft_4[0]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_4_1, core->v_sync_line_aft_4[1]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_5_0, core->v_sync_line_aft_5[0]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_5_1, core->v_sync_line_aft_5[1]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_6_0, core->v_sync_line_aft_6[0]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_6_1, core->v_sync_line_aft_6[1]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_3_0, core->v_sync_line_aft_pxl_3[0]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_3_1, core->v_sync_line_aft_pxl_3[1]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_4_0, core->v_sync_line_aft_pxl_4[0]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_4_1, core->v_sync_line_aft_pxl_4[1]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_5_0, core->v_sync_line_aft_pxl_5[0]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_5_1, core->v_sync_line_aft_pxl_5[1]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_6_0, core->v_sync_line_aft_pxl_6[0]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_6_1, core->v_sync_line_aft_pxl_6[1]); hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_1_0, core->vact_space_1[0]); hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_1_1, core->vact_space_1[1]); hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_2_0, core->vact_space_2[0]); hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_2_1, core->vact_space_2[1]); hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_3_0, core->vact_space_3[0]); hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_3_1, core->vact_space_3[1]); hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_4_0, core->vact_space_4[0]); hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_4_1, core->vact_space_4[1]); hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_5_0, core->vact_space_5[0]); hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_5_1, core->vact_space_5[1]); hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_6_0, core->vact_space_6[0]); hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_6_1, core->vact_space_6[1]); /* Timing generator registers */ hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_L, tg->h_fsz[0]); hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_H, tg->h_fsz[1]); hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_L, tg->hact_st[0]); hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_H, tg->hact_st[1]); hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_L, tg->hact_sz[0]); hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_H, tg->hact_sz[1]); hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_L, tg->v_fsz[0]); hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_H, tg->v_fsz[1]); hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_L, tg->vsync[0]); hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_H, tg->vsync[1]); hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_L, tg->vsync2[0]); hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_H, tg->vsync2[1]); hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_L, tg->vact_st[0]); hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_H, tg->vact_st[1]); hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_L, tg->vact_sz[0]); hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_H, tg->vact_sz[1]); hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_L, tg->field_chg[0]); hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_H, tg->field_chg[1]); hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_L, tg->vact_st2[0]); hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_H, tg->vact_st2[1]); hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST3_L, tg->vact_st3[0]); hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST3_H, tg->vact_st3[1]); hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST4_L, tg->vact_st4[0]); hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST4_H, tg->vact_st4[1]); hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, tg->vsync_top_hdmi[0]); hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_H, tg->vsync_top_hdmi[1]); hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, tg->vsync_bot_hdmi[0]); hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_H, tg->vsync_bot_hdmi[1]); hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_L, tg->field_top_hdmi[0]); hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_H, tg->field_top_hdmi[1]); hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_L, tg->field_bot_hdmi[0]); hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_H, tg->field_bot_hdmi[1]); hdmi_reg_writeb(hdata, HDMI_TG_3D, tg->tg_3d[0]); /* waiting for HDMIPHY's PLL to get to steady state */ for (tries = 100; tries; --tries) { u32 val = hdmi_reg_read(hdata, HDMI_PHY_STATUS_0); if (val & HDMI_PHY_STATUS_READY) break; usleep_range(1000, 2000); } /* steady state not achieved */ if (tries == 0) { DRM_ERROR("hdmiphy's pll could not reach steady state.\n"); hdmi_regs_dump(hdata, "timing apply"); } clk_disable(hdata->res.sclk_hdmi); clk_set_parent(hdata->res.sclk_hdmi, hdata->res.sclk_hdmiphy); clk_enable(hdata->res.sclk_hdmi); /* enable HDMI and timing generator */ hdmi_reg_writemask(hdata, HDMI_CON_0, ~0, HDMI_EN); if (core->int_pro_mode[0]) hdmi_reg_writemask(hdata, HDMI_TG_CMD, ~0, HDMI_TG_EN | HDMI_FIELD_EN); else hdmi_reg_writemask(hdata, HDMI_TG_CMD, ~0, HDMI_TG_EN); } static void hdmi_timing_apply(struct hdmi_context *hdata) { if (hdata->type == HDMI_TYPE13) hdmi_v13_timing_apply(hdata); else hdmi_v14_timing_apply(hdata); } static void hdmiphy_conf_reset(struct hdmi_context *hdata) { u8 buffer[2]; u32 reg; clk_disable(hdata->res.sclk_hdmi); clk_set_parent(hdata->res.sclk_hdmi, hdata->res.sclk_pixel); clk_enable(hdata->res.sclk_hdmi); /* operation mode */ buffer[0] = 0x1f; buffer[1] = 0x00; if (hdata->hdmiphy_port) i2c_master_send(hdata->hdmiphy_port, buffer, 2); if (hdata->type == HDMI_TYPE13) reg = HDMI_V13_PHY_RSTOUT; else reg = HDMI_PHY_RSTOUT; /* reset hdmiphy */ hdmi_reg_writemask(hdata, reg, ~0, HDMI_PHY_SW_RSTOUT); usleep_range(10000, 12000); hdmi_reg_writemask(hdata, reg, 0, HDMI_PHY_SW_RSTOUT); usleep_range(10000, 12000); } static void hdmiphy_poweron(struct hdmi_context *hdata) { DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); if (hdata->type == HDMI_TYPE14) hdmi_reg_writemask(hdata, HDMI_PHY_CON_0, 0, HDMI_PHY_POWER_OFF_EN); } static void hdmiphy_poweroff(struct hdmi_context *hdata) { DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); if (hdata->type == HDMI_TYPE14) hdmi_reg_writemask(hdata, HDMI_PHY_CON_0, ~0, HDMI_PHY_POWER_OFF_EN); } static void hdmiphy_conf_apply(struct hdmi_context *hdata) { const u8 *hdmiphy_data; u8 buffer[32]; u8 operation[2]; u8 read_buffer[32] = {0, }; int ret; int i; if (!hdata->hdmiphy_port) { DRM_ERROR("hdmiphy is not attached\n"); return; } /* pixel clock */ i = hdmi_find_phy_conf(hdata, hdata->mode_conf.pixel_clock); if (i < 0) { DRM_ERROR("failed to find hdmiphy conf\n"); return; } if (hdata->type == HDMI_TYPE13) hdmiphy_data = hdmiphy_v13_configs[i].conf; else hdmiphy_data = hdmiphy_v14_configs[i].conf; memcpy(buffer, hdmiphy_data, 32); ret = i2c_master_send(hdata->hdmiphy_port, buffer, 32); if (ret != 32) { DRM_ERROR("failed to configure HDMIPHY via I2C\n"); return; } usleep_range(10000, 12000); /* operation mode */ operation[0] = 0x1f; operation[1] = 0x80; ret = i2c_master_send(hdata->hdmiphy_port, operation, 2); if (ret != 2) { DRM_ERROR("failed to enable hdmiphy\n"); return; } ret = i2c_master_recv(hdata->hdmiphy_port, read_buffer, 32); if (ret < 0) { DRM_ERROR("failed to read hdmiphy config\n"); return; } for (i = 0; i < ret; i++) DRM_DEBUG_KMS("hdmiphy[0x%02x] write[0x%02x] - " "recv [0x%02x]\n", i, buffer[i], read_buffer[i]); } static void hdmi_conf_apply(struct hdmi_context *hdata) { DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); hdmiphy_conf_reset(hdata); hdmiphy_conf_apply(hdata); mutex_lock(&hdata->hdmi_mutex); hdmi_conf_reset(hdata); hdmi_conf_init(hdata); mutex_unlock(&hdata->hdmi_mutex); hdmi_audio_init(hdata); /* setting core registers */ hdmi_timing_apply(hdata); hdmi_audio_control(hdata, true); hdmi_regs_dump(hdata, "start"); } static void hdmi_set_reg(u8 *reg_pair, int num_bytes, u32 value) { int i; BUG_ON(num_bytes > 4); for (i = 0; i < num_bytes; i++) reg_pair[i] = (value >> (8 * i)) & 0xff; } static void hdmi_v13_mode_set(struct hdmi_context *hdata, struct drm_display_mode *m) { struct hdmi_v13_core_regs *core = &hdata->mode_conf.conf.v13_conf.core; struct hdmi_tg_regs *tg = &hdata->mode_conf.conf.v13_conf.tg; unsigned int val; hdata->mode_conf.cea_video_id = drm_match_cea_mode((struct drm_display_mode *)m); hdata->mode_conf.pixel_clock = m->clock * 1000; hdmi_set_reg(core->h_blank, 2, m->htotal - m->hdisplay); hdmi_set_reg(core->h_v_line, 3, (m->htotal << 12) | m->vtotal); val = (m->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0; hdmi_set_reg(core->vsync_pol, 1, val); val = (m->flags & DRM_MODE_FLAG_INTERLACE) ? 1 : 0; hdmi_set_reg(core->int_pro_mode, 1, val); val = (m->hsync_start - m->hdisplay - 2); val |= ((m->hsync_end - m->hdisplay - 2) << 10); val |= ((m->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0)<<20; hdmi_set_reg(core->h_sync_gen, 3, val); /* * Quirk requirement for exynos HDMI IP design, * 2 pixels less than the actual calculation for hsync_start * and end. */ /* Following values & calculations differ for different type of modes */ if (m->flags & DRM_MODE_FLAG_INTERLACE) { /* Interlaced Mode */ val = ((m->vsync_end - m->vdisplay) / 2); val |= ((m->vsync_start - m->vdisplay) / 2) << 12; hdmi_set_reg(core->v_sync_gen1, 3, val); val = m->vtotal / 2; val |= ((m->vtotal - m->vdisplay) / 2) << 11; hdmi_set_reg(core->v_blank, 3, val); val = (m->vtotal + ((m->vsync_end - m->vsync_start) * 4) + 5) / 2; val |= m->vtotal << 11; hdmi_set_reg(core->v_blank_f, 3, val); val = ((m->vtotal / 2) + 7); val |= ((m->vtotal / 2) + 2) << 12; hdmi_set_reg(core->v_sync_gen2, 3, val); val = ((m->htotal / 2) + (m->hsync_start - m->hdisplay)); val |= ((m->htotal / 2) + (m->hsync_start - m->hdisplay)) << 12; hdmi_set_reg(core->v_sync_gen3, 3, val); hdmi_set_reg(tg->vact_st, 2, (m->vtotal - m->vdisplay) / 2); hdmi_set_reg(tg->vact_sz, 2, m->vdisplay / 2); hdmi_set_reg(tg->vact_st2, 2, 0x249);/* Reset value + 1*/ } else { /* Progressive Mode */ val = m->vtotal; val |= (m->vtotal - m->vdisplay) << 11; hdmi_set_reg(core->v_blank, 3, val); hdmi_set_reg(core->v_blank_f, 3, 0); val = (m->vsync_end - m->vdisplay); val |= ((m->vsync_start - m->vdisplay) << 12); hdmi_set_reg(core->v_sync_gen1, 3, val); hdmi_set_reg(core->v_sync_gen2, 3, 0x1001);/* Reset value */ hdmi_set_reg(core->v_sync_gen3, 3, 0x1001);/* Reset value */ hdmi_set_reg(tg->vact_st, 2, m->vtotal - m->vdisplay); hdmi_set_reg(tg->vact_sz, 2, m->vdisplay); hdmi_set_reg(tg->vact_st2, 2, 0x248); /* Reset value */ } /* Timing generator registers */ hdmi_set_reg(tg->cmd, 1, 0x0); hdmi_set_reg(tg->h_fsz, 2, m->htotal); hdmi_set_reg(tg->hact_st, 2, m->htotal - m->hdisplay); hdmi_set_reg(tg->hact_sz, 2, m->hdisplay); hdmi_set_reg(tg->v_fsz, 2, m->vtotal); hdmi_set_reg(tg->vsync, 2, 0x1); hdmi_set_reg(tg->vsync2, 2, 0x233); /* Reset value */ hdmi_set_reg(tg->field_chg, 2, 0x233); /* Reset value */ hdmi_set_reg(tg->vsync_top_hdmi, 2, 0x1); /* Reset value */ hdmi_set_reg(tg->vsync_bot_hdmi, 2, 0x233); /* Reset value */ hdmi_set_reg(tg->field_top_hdmi, 2, 0x1); /* Reset value */ hdmi_set_reg(tg->field_bot_hdmi, 2, 0x233); /* Reset value */ hdmi_set_reg(tg->tg_3d, 1, 0x0); /* Not used */ } static void hdmi_v14_mode_set(struct hdmi_context *hdata, struct drm_display_mode *m) { struct hdmi_tg_regs *tg = &hdata->mode_conf.conf.v14_conf.tg; struct hdmi_v14_core_regs *core = &hdata->mode_conf.conf.v14_conf.core; hdata->mode_conf.cea_video_id = drm_match_cea_mode((struct drm_display_mode *)m); hdata->mode_conf.pixel_clock = m->clock * 1000; hdmi_set_reg(core->h_blank, 2, m->htotal - m->hdisplay); hdmi_set_reg(core->v_line, 2, m->vtotal); hdmi_set_reg(core->h_line, 2, m->htotal); hdmi_set_reg(core->hsync_pol, 1, (m->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0); hdmi_set_reg(core->vsync_pol, 1, (m->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0); hdmi_set_reg(core->int_pro_mode, 1, (m->flags & DRM_MODE_FLAG_INTERLACE) ? 1 : 0); /* * Quirk requirement for exynos 5 HDMI IP design, * 2 pixels less than the actual calculation for hsync_start * and end. */ /* Following values & calculations differ for different type of modes */ if (m->flags & DRM_MODE_FLAG_INTERLACE) { /* Interlaced Mode */ hdmi_set_reg(core->v_sync_line_bef_2, 2, (m->vsync_end - m->vdisplay) / 2); hdmi_set_reg(core->v_sync_line_bef_1, 2, (m->vsync_start - m->vdisplay) / 2); hdmi_set_reg(core->v2_blank, 2, m->vtotal / 2); hdmi_set_reg(core->v1_blank, 2, (m->vtotal - m->vdisplay) / 2); hdmi_set_reg(core->v_blank_f0, 2, (m->vtotal + ((m->vsync_end - m->vsync_start) * 4) + 5) / 2); hdmi_set_reg(core->v_blank_f1, 2, m->vtotal); hdmi_set_reg(core->v_sync_line_aft_2, 2, (m->vtotal / 2) + 7); hdmi_set_reg(core->v_sync_line_aft_1, 2, (m->vtotal / 2) + 2); hdmi_set_reg(core->v_sync_line_aft_pxl_2, 2, (m->htotal / 2) + (m->hsync_start - m->hdisplay)); hdmi_set_reg(core->v_sync_line_aft_pxl_1, 2, (m->htotal / 2) + (m->hsync_start - m->hdisplay)); hdmi_set_reg(tg->vact_st, 2, (m->vtotal - m->vdisplay) / 2); hdmi_set_reg(tg->vact_sz, 2, m->vdisplay / 2); hdmi_set_reg(tg->vact_st2, 2, 0x249);/* Reset value + 1*/ hdmi_set_reg(tg->vact_st3, 2, 0x0); hdmi_set_reg(tg->vact_st4, 2, 0x0); } else { /* Progressive Mode */ hdmi_set_reg(core->v_sync_line_bef_2, 2, m->vsync_end - m->vdisplay); hdmi_set_reg(core->v_sync_line_bef_1, 2, m->vsync_start - m->vdisplay); hdmi_set_reg(core->v2_blank, 2, m->vtotal); hdmi_set_reg(core->v1_blank, 2, m->vtotal - m->vdisplay); hdmi_set_reg(core->v_blank_f0, 2, 0xffff); hdmi_set_reg(core->v_blank_f1, 2, 0xffff); hdmi_set_reg(core->v_sync_line_aft_2, 2, 0xffff); hdmi_set_reg(core->v_sync_line_aft_1, 2, 0xffff); hdmi_set_reg(core->v_sync_line_aft_pxl_2, 2, 0xffff); hdmi_set_reg(core->v_sync_line_aft_pxl_1, 2, 0xffff); hdmi_set_reg(tg->vact_st, 2, m->vtotal - m->vdisplay); hdmi_set_reg(tg->vact_sz, 2, m->vdisplay); hdmi_set_reg(tg->vact_st2, 2, 0x248); /* Reset value */ hdmi_set_reg(tg->vact_st3, 2, 0x47b); /* Reset value */ hdmi_set_reg(tg->vact_st4, 2, 0x6ae); /* Reset value */ } /* Following values & calculations are same irrespective of mode type */ hdmi_set_reg(core->h_sync_start, 2, m->hsync_start - m->hdisplay - 2); hdmi_set_reg(core->h_sync_end, 2, m->hsync_end - m->hdisplay - 2); hdmi_set_reg(core->vact_space_1, 2, 0xffff); hdmi_set_reg(core->vact_space_2, 2, 0xffff); hdmi_set_reg(core->vact_space_3, 2, 0xffff); hdmi_set_reg(core->vact_space_4, 2, 0xffff); hdmi_set_reg(core->vact_space_5, 2, 0xffff); hdmi_set_reg(core->vact_space_6, 2, 0xffff); hdmi_set_reg(core->v_blank_f2, 2, 0xffff); hdmi_set_reg(core->v_blank_f3, 2, 0xffff); hdmi_set_reg(core->v_blank_f4, 2, 0xffff); hdmi_set_reg(core->v_blank_f5, 2, 0xffff); hdmi_set_reg(core->v_sync_line_aft_3, 2, 0xffff); hdmi_set_reg(core->v_sync_line_aft_4, 2, 0xffff); hdmi_set_reg(core->v_sync_line_aft_5, 2, 0xffff); hdmi_set_reg(core->v_sync_line_aft_6, 2, 0xffff); hdmi_set_reg(core->v_sync_line_aft_pxl_3, 2, 0xffff); hdmi_set_reg(core->v_sync_line_aft_pxl_4, 2, 0xffff); hdmi_set_reg(core->v_sync_line_aft_pxl_5, 2, 0xffff); hdmi_set_reg(core->v_sync_line_aft_pxl_6, 2, 0xffff); /* Timing generator registers */ hdmi_set_reg(tg->cmd, 1, 0x0); hdmi_set_reg(tg->h_fsz, 2, m->htotal); hdmi_set_reg(tg->hact_st, 2, m->htotal - m->hdisplay); hdmi_set_reg(tg->hact_sz, 2, m->hdisplay); hdmi_set_reg(tg->v_fsz, 2, m->vtotal); hdmi_set_reg(tg->vsync, 2, 0x1); hdmi_set_reg(tg->vsync2, 2, 0x233); /* Reset value */ hdmi_set_reg(tg->field_chg, 2, 0x233); /* Reset value */ hdmi_set_reg(tg->vsync_top_hdmi, 2, 0x1); /* Reset value */ hdmi_set_reg(tg->vsync_bot_hdmi, 2, 0x233); /* Reset value */ hdmi_set_reg(tg->field_top_hdmi, 2, 0x1); /* Reset value */ hdmi_set_reg(tg->field_bot_hdmi, 2, 0x233); /* Reset value */ hdmi_set_reg(tg->tg_3d, 1, 0x0); } static void hdmi_mode_set(void *ctx, void *mode) { struct hdmi_context *hdata = ctx; struct drm_display_mode *m = mode; DRM_DEBUG_KMS("[%s]: xres=%d, yres=%d, refresh=%d, intl=%s\n", __func__, m->hdisplay, m->vdisplay, m->vrefresh, (m->flags & DRM_MODE_FLAG_INTERLACE) ? "INTERLACED" : "PROGERESSIVE"); if (hdata->type == HDMI_TYPE13) hdmi_v13_mode_set(hdata, mode); else hdmi_v14_mode_set(hdata, mode); } static void hdmi_get_max_resol(void *ctx, unsigned int *width, unsigned int *height) { DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); *width = MAX_WIDTH; *height = MAX_HEIGHT; } static void hdmi_commit(void *ctx) { struct hdmi_context *hdata = ctx; DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); mutex_lock(&hdata->hdmi_mutex); if (!hdata->powered) { mutex_unlock(&hdata->hdmi_mutex); return; } mutex_unlock(&hdata->hdmi_mutex); hdmi_conf_apply(hdata); } static void hdmi_poweron(struct hdmi_context *hdata) { struct hdmi_resources *res = &hdata->res; DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); mutex_lock(&hdata->hdmi_mutex); if (hdata->powered) { mutex_unlock(&hdata->hdmi_mutex); return; } hdata->powered = true; mutex_unlock(&hdata->hdmi_mutex); regulator_bulk_enable(res->regul_count, res->regul_bulk); clk_enable(res->hdmiphy); clk_enable(res->hdmi); clk_enable(res->sclk_hdmi); hdmiphy_poweron(hdata); } static void hdmi_poweroff(struct hdmi_context *hdata) { struct hdmi_resources *res = &hdata->res; DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); mutex_lock(&hdata->hdmi_mutex); if (!hdata->powered) goto out; mutex_unlock(&hdata->hdmi_mutex); /* * The TV power domain needs any condition of hdmiphy to turn off and * its reset state seems to meet the condition. */ hdmiphy_conf_reset(hdata); hdmiphy_poweroff(hdata); clk_disable(res->sclk_hdmi); clk_disable(res->hdmi); clk_disable(res->hdmiphy); regulator_bulk_disable(res->regul_count, res->regul_bulk); mutex_lock(&hdata->hdmi_mutex); hdata->powered = false; out: mutex_unlock(&hdata->hdmi_mutex); } static void hdmi_dpms(void *ctx, int mode) { struct hdmi_context *hdata = ctx; DRM_DEBUG_KMS("[%d] %s mode %d\n", __LINE__, __func__, mode); switch (mode) { case DRM_MODE_DPMS_ON: if (pm_runtime_suspended(hdata->dev)) pm_runtime_get_sync(hdata->dev); break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: if (!pm_runtime_suspended(hdata->dev)) pm_runtime_put_sync(hdata->dev); break; default: DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode); break; } } static struct exynos_hdmi_ops hdmi_ops = { /* display */ .is_connected = hdmi_is_connected, .get_edid = hdmi_get_edid, .check_timing = hdmi_check_timing, /* manager */ .mode_set = hdmi_mode_set, .get_max_resol = hdmi_get_max_resol, .commit = hdmi_commit, .dpms = hdmi_dpms, }; static irqreturn_t hdmi_irq_thread(int irq, void *arg) { struct exynos_drm_hdmi_context *ctx = arg; struct hdmi_context *hdata = ctx->ctx; mutex_lock(&hdata->hdmi_mutex); hdata->hpd = gpio_get_value(hdata->hpd_gpio); mutex_unlock(&hdata->hdmi_mutex); if (ctx->drm_dev) drm_helper_hpd_irq_event(ctx->drm_dev); return IRQ_HANDLED; } static int hdmi_resources_init(struct hdmi_context *hdata) { struct device *dev = hdata->dev; struct hdmi_resources *res = &hdata->res; static char *supply[] = { "hdmi-en", "vdd", "vdd_osc", "vdd_pll", }; int i, ret; DRM_DEBUG_KMS("HDMI resource init\n"); memset(res, 0, sizeof(*res)); /* get clocks, power */ res->hdmi = devm_clk_get(dev, "hdmi"); if (IS_ERR(res->hdmi)) { DRM_ERROR("failed to get clock 'hdmi'\n"); goto fail; } res->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi"); if (IS_ERR(res->sclk_hdmi)) { DRM_ERROR("failed to get clock 'sclk_hdmi'\n"); goto fail; } res->sclk_pixel = devm_clk_get(dev, "sclk_pixel"); if (IS_ERR(res->sclk_pixel)) { DRM_ERROR("failed to get clock 'sclk_pixel'\n"); goto fail; } res->sclk_hdmiphy = devm_clk_get(dev, "sclk_hdmiphy"); if (IS_ERR(res->sclk_hdmiphy)) { DRM_ERROR("failed to get clock 'sclk_hdmiphy'\n"); goto fail; } res->hdmiphy = devm_clk_get(dev, "hdmiphy"); if (IS_ERR(res->hdmiphy)) { DRM_ERROR("failed to get clock 'hdmiphy'\n"); goto fail; } clk_set_parent(res->sclk_hdmi, res->sclk_pixel); res->regul_bulk = devm_kzalloc(dev, ARRAY_SIZE(supply) * sizeof(res->regul_bulk[0]), GFP_KERNEL); if (!res->regul_bulk) { DRM_ERROR("failed to get memory for regulators\n"); goto fail; } for (i = 0; i < ARRAY_SIZE(supply); ++i) { res->regul_bulk[i].supply = supply[i]; res->regul_bulk[i].consumer = NULL; } ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(supply), res->regul_bulk); if (ret) { DRM_ERROR("failed to get regulators\n"); goto fail; } res->regul_count = ARRAY_SIZE(supply); return 0; fail: DRM_ERROR("HDMI resource init - failed\n"); return -ENODEV; } static struct i2c_client *hdmi_ddc, *hdmi_hdmiphy; void hdmi_attach_ddc_client(struct i2c_client *ddc) { if (ddc) hdmi_ddc = ddc; } void hdmi_attach_hdmiphy_client(struct i2c_client *hdmiphy) { if (hdmiphy) hdmi_hdmiphy = hdmiphy; } #ifdef CONFIG_OF static struct s5p_hdmi_platform_data *drm_hdmi_dt_parse_pdata (struct device *dev) { struct device_node *np = dev->of_node; struct s5p_hdmi_platform_data *pd; enum of_gpio_flags flags; u32 value; pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL); if (!pd) { DRM_ERROR("memory allocation for pdata failed\n"); goto err_data; } if (!of_find_property(np, "hpd-gpio", &value)) { DRM_ERROR("no hpd gpio property found\n"); goto err_data; } pd->hpd_gpio = of_get_named_gpio_flags(np, "hpd-gpio", 0, &flags); return pd; err_data: return NULL; } #else static struct s5p_hdmi_platform_data *drm_hdmi_dt_parse_pdata (struct device *dev) { return NULL; } #endif static struct platform_device_id hdmi_driver_types[] = { { .name = "s5pv210-hdmi", .driver_data = HDMI_TYPE13, }, { .name = "exynos4-hdmi", .driver_data = HDMI_TYPE13, }, { .name = "exynos4-hdmi14", .driver_data = HDMI_TYPE14, }, { .name = "exynos5-hdmi", .driver_data = HDMI_TYPE14, }, { /* end node */ } }; #ifdef CONFIG_OF static struct of_device_id hdmi_match_types[] = { { .compatible = "samsung,exynos5-hdmi", .data = (void *)HDMI_TYPE14, }, { /* end node */ } }; #endif static int hdmi_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct exynos_drm_hdmi_context *drm_hdmi_ctx; struct hdmi_context *hdata; struct s5p_hdmi_platform_data *pdata; struct resource *res; int ret; DRM_DEBUG_KMS("[%d]\n", __LINE__); if (dev->of_node) { pdata = drm_hdmi_dt_parse_pdata(dev); if (IS_ERR(pdata)) { DRM_ERROR("failed to parse dt\n"); return PTR_ERR(pdata); } } else { pdata = dev->platform_data; } if (!pdata) { DRM_ERROR("no platform data specified\n"); return -EINVAL; } drm_hdmi_ctx = devm_kzalloc(dev, sizeof(*drm_hdmi_ctx), GFP_KERNEL); if (!drm_hdmi_ctx) { DRM_ERROR("failed to allocate common hdmi context.\n"); return -ENOMEM; } hdata = devm_kzalloc(dev, sizeof(struct hdmi_context), GFP_KERNEL); if (!hdata) { DRM_ERROR("out of memory\n"); return -ENOMEM; } mutex_init(&hdata->hdmi_mutex); drm_hdmi_ctx->ctx = (void *)hdata; hdata->parent_ctx = (void *)drm_hdmi_ctx; platform_set_drvdata(pdev, drm_hdmi_ctx); if (dev->of_node) { const struct of_device_id *match; match = of_match_node(of_match_ptr(hdmi_match_types), dev->of_node); if (match == NULL) return -ENODEV; hdata->type = (enum hdmi_type)match->data; } else { hdata->type = (enum hdmi_type)platform_get_device_id (pdev)->driver_data; } hdata->hpd_gpio = pdata->hpd_gpio; hdata->dev = dev; ret = hdmi_resources_init(hdata); if (ret) { DRM_ERROR("hdmi_resources_init failed\n"); return -EINVAL; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); hdata->regs = devm_ioremap_resource(dev, res); if (IS_ERR(hdata->regs)) return PTR_ERR(hdata->regs); ret = devm_gpio_request(dev, hdata->hpd_gpio, "HPD"); if (ret) { DRM_ERROR("failed to request HPD gpio\n"); return ret; } /* DDC i2c driver */ if (i2c_add_driver(&ddc_driver)) { DRM_ERROR("failed to register ddc i2c driver\n"); return -ENOENT; } hdata->ddc_port = hdmi_ddc; /* hdmiphy i2c driver */ if (i2c_add_driver(&hdmiphy_driver)) { DRM_ERROR("failed to register hdmiphy i2c driver\n"); ret = -ENOENT; goto err_ddc; } hdata->hdmiphy_port = hdmi_hdmiphy; hdata->irq = gpio_to_irq(hdata->hpd_gpio); if (hdata->irq < 0) { DRM_ERROR("failed to get GPIO irq\n"); ret = hdata->irq; goto err_hdmiphy; } hdata->hpd = gpio_get_value(hdata->hpd_gpio); ret = devm_request_threaded_irq(dev, hdata->irq, NULL, hdmi_irq_thread, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, "hdmi", drm_hdmi_ctx); if (ret) { DRM_ERROR("failed to register hdmi interrupt\n"); goto err_hdmiphy; } /* Attach HDMI Driver to common hdmi. */ exynos_hdmi_drv_attach(drm_hdmi_ctx); /* register specific callbacks to common hdmi. */ exynos_hdmi_ops_register(&hdmi_ops); pm_runtime_enable(dev); return 0; err_hdmiphy: i2c_del_driver(&hdmiphy_driver); err_ddc: i2c_del_driver(&ddc_driver); return ret; } static int hdmi_remove(struct platform_device *pdev) { struct device *dev = &pdev->dev; DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); pm_runtime_disable(dev); /* hdmiphy i2c driver */ i2c_del_driver(&hdmiphy_driver); /* DDC i2c driver */ i2c_del_driver(&ddc_driver); return 0; } #ifdef CONFIG_PM_SLEEP static int hdmi_suspend(struct device *dev) { struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev); struct hdmi_context *hdata = ctx->ctx; DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); disable_irq(hdata->irq); hdata->hpd = false; if (ctx->drm_dev) drm_helper_hpd_irq_event(ctx->drm_dev); if (pm_runtime_suspended(dev)) { DRM_DEBUG_KMS("%s : Already suspended\n", __func__); return 0; } hdmi_poweroff(hdata); return 0; } static int hdmi_resume(struct device *dev) { struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev); struct hdmi_context *hdata = ctx->ctx; DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); hdata->hpd = gpio_get_value(hdata->hpd_gpio); enable_irq(hdata->irq); if (!pm_runtime_suspended(dev)) { DRM_DEBUG_KMS("%s : Already resumed\n", __func__); return 0; } hdmi_poweron(hdata); return 0; } #endif #ifdef CONFIG_PM_RUNTIME static int hdmi_runtime_suspend(struct device *dev) { struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev); struct hdmi_context *hdata = ctx->ctx; DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); hdmi_poweroff(hdata); return 0; } static int hdmi_runtime_resume(struct device *dev) { struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev); struct hdmi_context *hdata = ctx->ctx; DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); hdmi_poweron(hdata); return 0; } #endif static const struct dev_pm_ops hdmi_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(hdmi_suspend, hdmi_resume) SET_RUNTIME_PM_OPS(hdmi_runtime_suspend, hdmi_runtime_resume, NULL) }; struct platform_driver hdmi_driver = { .probe = hdmi_probe, .remove = hdmi_remove, .id_table = hdmi_driver_types, .driver = { .name = "exynos-hdmi", .owner = THIS_MODULE, .pm = &hdmi_pm_ops, .of_match_table = of_match_ptr(hdmi_match_types), }, };
gpl-2.0
peterzhu0503/kernel_rk3168_86v_yk
arch/arm/mach-s3c64xx/mach-real6410.c
2062
7760
/* linux/arch/arm/mach-s3c64xx/mach-real6410.c * * Copyright 2010 Darius Augulis <augulis.darius@gmail.com> * Copyright 2008 Openmoko, Inc. * Copyright 2008 Simtec Electronics * Ben Dooks <ben@simtec.co.uk> * http://armlinux.simtec.co.uk/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/fb.h> #include <linux/gpio.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/dm9000.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/platform_device.h> #include <linux/serial_core.h> #include <linux/types.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <mach/map.h> #include <mach/regs-fb.h> #include <mach/regs-gpio.h> #include <mach/regs-modem.h> #include <mach/regs-srom.h> #include <mach/s3c6410.h> #include <plat/adc.h> #include <plat/cpu.h> #include <plat/devs.h> #include <plat/fb.h> #include <plat/nand.h> #include <plat/regs-serial.h> #include <plat/ts.h> #include <video/platform_lcd.h> #define UCON S3C2410_UCON_DEFAULT #define ULCON (S3C2410_LCON_CS8 | S3C2410_LCON_PNONE | S3C2410_LCON_STOPB) #define UFCON (S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE) static struct s3c2410_uartcfg real6410_uartcfgs[] __initdata = { [0] = { .hwport = 0, .flags = 0, .ucon = UCON, .ulcon = ULCON, .ufcon = UFCON, }, [1] = { .hwport = 1, .flags = 0, .ucon = UCON, .ulcon = ULCON, .ufcon = UFCON, }, [2] = { .hwport = 2, .flags = 0, .ucon = UCON, .ulcon = ULCON, .ufcon = UFCON, }, [3] = { .hwport = 3, .flags = 0, .ucon = UCON, .ulcon = ULCON, .ufcon = UFCON, }, }; /* DM9000AEP 10/100 ethernet controller */ static struct resource real6410_dm9k_resource[] = { [0] = { .start = S3C64XX_PA_XM0CSN1, .end = S3C64XX_PA_XM0CSN1 + 1, .flags = IORESOURCE_MEM }, [1] = { .start = S3C64XX_PA_XM0CSN1 + 4, .end = S3C64XX_PA_XM0CSN1 + 5, .flags = IORESOURCE_MEM }, [2] = { .start = S3C_EINT(7), .end = S3C_EINT(7), .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL } }; static struct dm9000_plat_data real6410_dm9k_pdata = { .flags = (DM9000_PLATF_16BITONLY | DM9000_PLATF_NO_EEPROM), }; static struct platform_device real6410_device_eth = { .name = "dm9000", .id = -1, .num_resources = ARRAY_SIZE(real6410_dm9k_resource), .resource = real6410_dm9k_resource, .dev = { .platform_data = &real6410_dm9k_pdata, }, }; static struct s3c_fb_pd_win real6410_fb_win[] = { { .win_mode = { /* 4.3" 480x272 */ .left_margin = 3, .right_margin = 2, .upper_margin = 1, .lower_margin = 1, .hsync_len = 40, .vsync_len = 1, .xres = 480, .yres = 272, }, .max_bpp = 32, .default_bpp = 16, }, { .win_mode = { /* 7.0" 800x480 */ .left_margin = 8, .right_margin = 13, .upper_margin = 7, .lower_margin = 5, .hsync_len = 3, .vsync_len = 1, .xres = 800, .yres = 480, }, .max_bpp = 32, .default_bpp = 16, }, }; static struct s3c_fb_platdata real6410_lcd_pdata __initdata = { .setup_gpio = s3c64xx_fb_gpio_setup_24bpp, .win[0] = &real6410_fb_win[0], .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB, .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC, }; static struct mtd_partition real6410_nand_part[] = { [0] = { .name = "uboot", .size = SZ_1M, .offset = 0, }, [1] = { .name = "kernel", .size = SZ_2M, .offset = SZ_1M, }, [2] = { .name = "rootfs", .size = MTDPART_SIZ_FULL, .offset = SZ_1M + SZ_2M, }, }; static struct s3c2410_nand_set real6410_nand_sets[] = { [0] = { .name = "nand", .nr_chips = 1, .nr_partitions = ARRAY_SIZE(real6410_nand_part), .partitions = real6410_nand_part, }, }; static struct s3c2410_platform_nand real6410_nand_info = { .tacls = 25, .twrph0 = 55, .twrph1 = 40, .nr_sets = ARRAY_SIZE(real6410_nand_sets), .sets = real6410_nand_sets, }; static struct platform_device *real6410_devices[] __initdata = { &real6410_device_eth, &s3c_device_hsmmc0, &s3c_device_hsmmc1, &s3c_device_fb, &s3c_device_nand, &s3c_device_adc, &s3c_device_ts, &s3c_device_ohci, }; static struct s3c2410_ts_mach_info s3c_ts_platform __initdata = { .delay = 10000, .presc = 49, .oversampling_shift = 2, }; static void __init real6410_map_io(void) { u32 tmp; s3c64xx_init_io(NULL, 0); s3c24xx_init_clocks(12000000); s3c24xx_init_uarts(real6410_uartcfgs, ARRAY_SIZE(real6410_uartcfgs)); /* set the LCD type */ tmp = __raw_readl(S3C64XX_SPCON); tmp &= ~S3C64XX_SPCON_LCD_SEL_MASK; tmp |= S3C64XX_SPCON_LCD_SEL_RGB; __raw_writel(tmp, S3C64XX_SPCON); /* remove the LCD bypass */ tmp = __raw_readl(S3C64XX_MODEM_MIFPCON); tmp &= ~MIFPCON_LCD_BYPASS; __raw_writel(tmp, S3C64XX_MODEM_MIFPCON); } /* * real6410_features string * * 0-9 LCD configuration * */ static char real6410_features_str[12] __initdata = "0"; static int __init real6410_features_setup(char *str) { if (str) strlcpy(real6410_features_str, str, sizeof(real6410_features_str)); return 1; } __setup("real6410=", real6410_features_setup); #define FEATURE_SCREEN (1 << 0) struct real6410_features_t { int done; int lcd_index; }; static void real6410_parse_features( struct real6410_features_t *features, const char *features_str) { const char *fp = features_str; features->done = 0; features->lcd_index = 0; while (*fp) { char f = *fp++; switch (f) { case '0'...'9': /* tft screen */ if (features->done & FEATURE_SCREEN) { printk(KERN_INFO "REAL6410: '%c' ignored, " "screen type already set\n", f); } else { int li = f - '0'; if (li >= ARRAY_SIZE(real6410_fb_win)) printk(KERN_INFO "REAL6410: '%c' out " "of range LCD mode\n", f); else { features->lcd_index = li; } } features->done |= FEATURE_SCREEN; break; } } } static void __init real6410_machine_init(void) { u32 cs1; struct real6410_features_t features = { 0 }; printk(KERN_INFO "REAL6410: Option string real6410=%s\n", real6410_features_str); /* Parse the feature string */ real6410_parse_features(&features, real6410_features_str); real6410_lcd_pdata.win[0] = &real6410_fb_win[features.lcd_index]; printk(KERN_INFO "REAL6410: selected LCD display is %dx%d\n", real6410_lcd_pdata.win[0]->win_mode.xres, real6410_lcd_pdata.win[0]->win_mode.yres); s3c_fb_set_platdata(&real6410_lcd_pdata); s3c_nand_set_platdata(&real6410_nand_info); s3c24xx_ts_set_platdata(&s3c_ts_platform); /* configure nCS1 width to 16 bits */ cs1 = __raw_readl(S3C64XX_SROM_BW) & ~(S3C64XX_SROM_BW__CS_MASK << S3C64XX_SROM_BW__NCS1__SHIFT); cs1 |= ((1 << S3C64XX_SROM_BW__DATAWIDTH__SHIFT) | (1 << S3C64XX_SROM_BW__WAITENABLE__SHIFT) | (1 << S3C64XX_SROM_BW__BYTEENABLE__SHIFT)) << S3C64XX_SROM_BW__NCS1__SHIFT; __raw_writel(cs1, S3C64XX_SROM_BW); /* set timing for nCS1 suitable for ethernet chip */ __raw_writel((0 << S3C64XX_SROM_BCX__PMC__SHIFT) | (6 << S3C64XX_SROM_BCX__TACP__SHIFT) | (4 << S3C64XX_SROM_BCX__TCAH__SHIFT) | (1 << S3C64XX_SROM_BCX__TCOH__SHIFT) | (13 << S3C64XX_SROM_BCX__TACC__SHIFT) | (4 << S3C64XX_SROM_BCX__TCOS__SHIFT) | (0 << S3C64XX_SROM_BCX__TACS__SHIFT), S3C64XX_SROM_BC1); gpio_request(S3C64XX_GPF(15), "LCD power"); platform_add_devices(real6410_devices, ARRAY_SIZE(real6410_devices)); } MACHINE_START(REAL6410, "REAL6410") /* Maintainer: Darius Augulis <augulis.darius@gmail.com> */ .boot_params = S3C64XX_PA_SDRAM + 0x100, .init_irq = s3c6410_init_irq, .map_io = real6410_map_io, .init_machine = real6410_machine_init, .timer = &s3c24xx_timer, MACHINE_END
gpl-2.0
jetonbacaj/SomeKernel_G920P_PB6
drivers/uio/uio_pdrv.c
2318
2371
/* * drivers/uio/uio_pdrv.c * * Copyright (C) 2008 by Digi International Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. */ #include <linux/platform_device.h> #include <linux/uio_driver.h> #include <linux/stringify.h> #include <linux/module.h> #include <linux/slab.h> #define DRIVER_NAME "uio_pdrv" struct uio_platdata { struct uio_info *uioinfo; }; static int uio_pdrv_probe(struct platform_device *pdev) { struct uio_info *uioinfo = pdev->dev.platform_data; struct uio_platdata *pdata; struct uio_mem *uiomem; int ret = -ENODEV; int i; if (!uioinfo || !uioinfo->name || !uioinfo->version) { dev_dbg(&pdev->dev, "%s: err_uioinfo\n", __func__); goto err_uioinfo; } pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); if (!pdata) { ret = -ENOMEM; dev_dbg(&pdev->dev, "%s: err_alloc_pdata\n", __func__); goto err_alloc_pdata; } pdata->uioinfo = uioinfo; uiomem = &uioinfo->mem[0]; for (i = 0; i < pdev->num_resources; ++i) { struct resource *r = &pdev->resource[i]; if (r->flags != IORESOURCE_MEM) continue; if (uiomem >= &uioinfo->mem[MAX_UIO_MAPS]) { dev_warn(&pdev->dev, "device has more than " __stringify(MAX_UIO_MAPS) " I/O memory resources.\n"); break; } uiomem->memtype = UIO_MEM_PHYS; uiomem->addr = r->start; uiomem->size = resource_size(r); uiomem->name = r->name; ++uiomem; } while (uiomem < &uioinfo->mem[MAX_UIO_MAPS]) { uiomem->size = 0; ++uiomem; } pdata->uioinfo->priv = pdata; ret = uio_register_device(&pdev->dev, pdata->uioinfo); if (ret) { kfree(pdata); err_alloc_pdata: err_uioinfo: return ret; } platform_set_drvdata(pdev, pdata); return 0; } static int uio_pdrv_remove(struct platform_device *pdev) { struct uio_platdata *pdata = platform_get_drvdata(pdev); uio_unregister_device(pdata->uioinfo); kfree(pdata); return 0; } static struct platform_driver uio_pdrv = { .probe = uio_pdrv_probe, .remove = uio_pdrv_remove, .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, }, }; module_platform_driver(uio_pdrv); MODULE_AUTHOR("Uwe Kleine-Koenig"); MODULE_DESCRIPTION("Userspace I/O platform driver"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:" DRIVER_NAME);
gpl-2.0
danil39/Alto45
drivers/leds/leds-clevo-mail.c
2318
5509
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/platform_device.h> #include <linux/err.h> #include <linux/leds.h> #include <linux/io.h> #include <linux/dmi.h> #include <linux/i8042.h> #define CLEVO_MAIL_LED_OFF 0x0084 #define CLEVO_MAIL_LED_BLINK_1HZ 0x008A #define CLEVO_MAIL_LED_BLINK_0_5HZ 0x0083 MODULE_AUTHOR("Márton Németh <nm127@freemail.hu>"); MODULE_DESCRIPTION("Clevo mail LED driver"); MODULE_LICENSE("GPL"); static bool __initdata nodetect; module_param_named(nodetect, nodetect, bool, 0); MODULE_PARM_DESC(nodetect, "Skip DMI hardware detection"); static struct platform_device *pdev; static int __init clevo_mail_led_dmi_callback(const struct dmi_system_id *id) { pr_info("'%s' found\n", id->ident); return 1; } /* * struct clevo_mail_led_dmi_table - List of known good models * * Contains the known good models this driver is compatible with. * When adding a new model try to be as strict as possible. This * makes it possible to keep the false positives (the model is * detected as working, but in reality it is not) as low as * possible. */ static struct dmi_system_id __initdata clevo_mail_led_dmi_table[] = { { .callback = clevo_mail_led_dmi_callback, .ident = "Clevo D410J", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "VIA"), DMI_MATCH(DMI_PRODUCT_NAME, "K8N800"), DMI_MATCH(DMI_PRODUCT_VERSION, "VT8204B") } }, { .callback = clevo_mail_led_dmi_callback, .ident = "Clevo M5x0N", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "CLEVO Co."), DMI_MATCH(DMI_PRODUCT_NAME, "M5x0N") } }, { .callback = clevo_mail_led_dmi_callback, .ident = "Clevo M5x0V", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "CLEVO Co. "), DMI_MATCH(DMI_BOARD_NAME, "M5X0V "), DMI_MATCH(DMI_PRODUCT_VERSION, "VT6198") } }, { .callback = clevo_mail_led_dmi_callback, .ident = "Clevo D400P", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "Clevo"), DMI_MATCH(DMI_BOARD_NAME, "D400P"), DMI_MATCH(DMI_BOARD_VERSION, "Rev.A"), DMI_MATCH(DMI_PRODUCT_VERSION, "0106") } }, { .callback = clevo_mail_led_dmi_callback, .ident = "Clevo D410V", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "Clevo, Co."), DMI_MATCH(DMI_BOARD_NAME, "D400V/D470V"), DMI_MATCH(DMI_BOARD_VERSION, "SS78B"), DMI_MATCH(DMI_PRODUCT_VERSION, "Rev. A1") } }, { } }; MODULE_DEVICE_TABLE(dmi, clevo_mail_led_dmi_table); static void clevo_mail_led_set(struct led_classdev *led_cdev, enum led_brightness value) { i8042_lock_chip(); if (value == LED_OFF) i8042_command(NULL, CLEVO_MAIL_LED_OFF); else if (value <= LED_HALF) i8042_command(NULL, CLEVO_MAIL_LED_BLINK_0_5HZ); else i8042_command(NULL, CLEVO_MAIL_LED_BLINK_1HZ); i8042_unlock_chip(); } static int clevo_mail_led_blink(struct led_classdev *led_cdev, unsigned long *delay_on, unsigned long *delay_off) { int status = -EINVAL; i8042_lock_chip(); if (*delay_on == 0 /* ms */ && *delay_off == 0 /* ms */) { /* Special case: the leds subsystem requested us to * chose one user friendly blinking of the LED, and * start it. Let's blink the led slowly (0.5Hz). */ *delay_on = 1000; /* ms */ *delay_off = 1000; /* ms */ i8042_command(NULL, CLEVO_MAIL_LED_BLINK_0_5HZ); status = 0; } else if (*delay_on == 500 /* ms */ && *delay_off == 500 /* ms */) { /* blink the led with 1Hz */ i8042_command(NULL, CLEVO_MAIL_LED_BLINK_1HZ); status = 0; } else if (*delay_on == 1000 /* ms */ && *delay_off == 1000 /* ms */) { /* blink the led with 0.5Hz */ i8042_command(NULL, CLEVO_MAIL_LED_BLINK_0_5HZ); status = 0; } else { pr_debug("clevo_mail_led_blink(..., %lu, %lu)," " returning -EINVAL (unsupported)\n", *delay_on, *delay_off); } i8042_unlock_chip(); return status; } static struct led_classdev clevo_mail_led = { .name = "clevo::mail", .brightness_set = clevo_mail_led_set, .blink_set = clevo_mail_led_blink, .flags = LED_CORE_SUSPENDRESUME, }; static int clevo_mail_led_probe(struct platform_device *pdev) { return led_classdev_register(&pdev->dev, &clevo_mail_led); } static int clevo_mail_led_remove(struct platform_device *pdev) { led_classdev_unregister(&clevo_mail_led); return 0; } static struct platform_driver clevo_mail_led_driver = { .probe = clevo_mail_led_probe, .remove = clevo_mail_led_remove, .driver = { .name = KBUILD_MODNAME, .owner = THIS_MODULE, }, }; static int __init clevo_mail_led_init(void) { int error = 0; int count = 0; /* Check with the help of DMI if we are running on supported hardware */ if (!nodetect) { count = dmi_check_system(clevo_mail_led_dmi_table); } else { count = 1; pr_err("Skipping DMI detection. " "If the driver works on your hardware please " "report model and the output of dmidecode in tracker " "at http://sourceforge.net/projects/clevo-mailled/\n"); } if (!count) return -ENODEV; pdev = platform_device_register_simple(KBUILD_MODNAME, -1, NULL, 0); if (!IS_ERR(pdev)) { error = platform_driver_probe(&clevo_mail_led_driver, clevo_mail_led_probe); if (error) { pr_err("Can't probe platform driver\n"); platform_device_unregister(pdev); } } else error = PTR_ERR(pdev); return error; } static void __exit clevo_mail_led_exit(void) { platform_device_unregister(pdev); platform_driver_unregister(&clevo_mail_led_driver); clevo_mail_led_set(NULL, LED_OFF); } module_init(clevo_mail_led_init); module_exit(clevo_mail_led_exit);
gpl-2.0
EPDCenter/android_kernel_rikomagic_mk808
drivers/char/genrtc.c
3342
13127
/* * Real Time Clock interface for * - q40 and other m68k machines, * - HP PARISC machines * - PowerPC machines * emulate some RTC irq capabilities in software * * Copyright (C) 1999 Richard Zidlicky * * based on Paul Gortmaker's rtc.c device and * Sam Creasey Generic rtc driver * * This driver allows use of the real time clock (built into * nearly all computers) from user space. It exports the /dev/rtc * interface supporting various ioctl() and also the /proc/driver/rtc * pseudo-file for status information. * * The ioctls can be used to set the interrupt behaviour where * supported. * * The /dev/rtc interface will block on reads until an interrupt * has been received. If a RTC interrupt has already happened, * it will output an unsigned long and then block. The output value * contains the interrupt status in the low byte and the number of * interrupts since the last read in the remaining high bytes. The * /dev/rtc interface can also be used with the select(2) call. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * 1.01 fix for 2.3.X rz@linux-m68k.org * 1.02 merged with code from genrtc.c rz@linux-m68k.org * 1.03 make it more portable zippel@linux-m68k.org * 1.04 removed useless timer code rz@linux-m68k.org * 1.05 portable RTC_UIE emulation rz@linux-m68k.org * 1.06 set_rtc_time can return an error trini@kernel.crashing.org * 1.07 ported to HP PARISC (hppa) Helge Deller <deller@gmx.de> */ #define RTC_VERSION "1.07" #include <linux/module.h> #include <linux/sched.h> #include <linux/errno.h> #include <linux/miscdevice.h> #include <linux/fcntl.h> #include <linux/rtc.h> #include <linux/init.h> #include <linux/poll.h> #include <linux/proc_fs.h> #include <linux/mutex.h> #include <linux/workqueue.h> #include <asm/uaccess.h> #include <asm/system.h> #include <asm/rtc.h> /* * We sponge a minor off of the misc major. No need slurping * up another valuable major dev number for this. If you add * an ioctl, make sure you don't conflict with SPARC's RTC * ioctls. */ static DEFINE_MUTEX(gen_rtc_mutex); static DECLARE_WAIT_QUEUE_HEAD(gen_rtc_wait); /* * Bits in gen_rtc_status. */ #define RTC_IS_OPEN 0x01 /* means /dev/rtc is in use */ static unsigned char gen_rtc_status; /* bitmapped status byte. */ static unsigned long gen_rtc_irq_data; /* our output to the world */ /* months start at 0 now */ static unsigned char days_in_mo[] = {31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}; static int irq_active; #ifdef CONFIG_GEN_RTC_X static struct work_struct genrtc_task; static struct timer_list timer_task; static unsigned int oldsecs; static int lostint; static unsigned long tt_exp; static void gen_rtc_timer(unsigned long data); static volatile int stask_active; /* schedule_work */ static volatile int ttask_active; /* timer_task */ static int stop_rtc_timers; /* don't requeue tasks */ static DEFINE_SPINLOCK(gen_rtc_lock); static void gen_rtc_interrupt(unsigned long arg); /* * Routine to poll RTC seconds field for change as often as possible, * after first RTC_UIE use timer to reduce polling */ static void genrtc_troutine(struct work_struct *work) { unsigned int tmp = get_rtc_ss(); if (stop_rtc_timers) { stask_active = 0; return; } if (oldsecs != tmp){ oldsecs = tmp; timer_task.function = gen_rtc_timer; timer_task.expires = jiffies + HZ - (HZ/10); tt_exp=timer_task.expires; ttask_active=1; stask_active=0; add_timer(&timer_task); gen_rtc_interrupt(0); } else if (schedule_work(&genrtc_task) == 0) stask_active = 0; } static void gen_rtc_timer(unsigned long data) { lostint = get_rtc_ss() - oldsecs ; if (lostint<0) lostint = 60 - lostint; if (time_after(jiffies, tt_exp)) printk(KERN_INFO "genrtc: timer task delayed by %ld jiffies\n", jiffies-tt_exp); ttask_active=0; stask_active=1; if ((schedule_work(&genrtc_task) == 0)) stask_active = 0; } /* * call gen_rtc_interrupt function to signal an RTC_UIE, * arg is unused. * Could be invoked either from a real interrupt handler or * from some routine that periodically (eg 100HZ) monitors * whether RTC_SECS changed */ static void gen_rtc_interrupt(unsigned long arg) { /* We store the status in the low byte and the number of * interrupts received since the last read in the remainder * of rtc_irq_data. */ gen_rtc_irq_data += 0x100; gen_rtc_irq_data &= ~0xff; gen_rtc_irq_data |= RTC_UIE; if (lostint){ printk("genrtc: system delaying clock ticks?\n"); /* increment count so that userspace knows something is wrong */ gen_rtc_irq_data += ((lostint-1)<<8); lostint = 0; } wake_up_interruptible(&gen_rtc_wait); } /* * Now all the various file operations that we export. */ static ssize_t gen_rtc_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { unsigned long data; ssize_t retval; if (count != sizeof (unsigned int) && count != sizeof (unsigned long)) return -EINVAL; if (file->f_flags & O_NONBLOCK && !gen_rtc_irq_data) return -EAGAIN; retval = wait_event_interruptible(gen_rtc_wait, (data = xchg(&gen_rtc_irq_data, 0))); if (retval) goto out; /* first test allows optimizer to nuke this case for 32-bit machines */ if (sizeof (int) != sizeof (long) && count == sizeof (unsigned int)) { unsigned int uidata = data; retval = put_user(uidata, (unsigned int __user *)buf) ?: sizeof(unsigned int); } else { retval = put_user(data, (unsigned long __user *)buf) ?: sizeof(unsigned long); } out: return retval; } static unsigned int gen_rtc_poll(struct file *file, struct poll_table_struct *wait) { poll_wait(file, &gen_rtc_wait, wait); if (gen_rtc_irq_data != 0) return POLLIN | POLLRDNORM; return 0; } #endif /* * Used to disable/enable interrupts, only RTC_UIE supported * We also clear out any old irq data after an ioctl() that * meddles with the interrupt enable/disable bits. */ static inline void gen_clear_rtc_irq_bit(unsigned char bit) { #ifdef CONFIG_GEN_RTC_X stop_rtc_timers = 1; if (ttask_active){ del_timer_sync(&timer_task); ttask_active = 0; } while (stask_active) schedule(); spin_lock(&gen_rtc_lock); irq_active = 0; spin_unlock(&gen_rtc_lock); #endif } static inline int gen_set_rtc_irq_bit(unsigned char bit) { #ifdef CONFIG_GEN_RTC_X spin_lock(&gen_rtc_lock); if ( !irq_active ) { irq_active = 1; stop_rtc_timers = 0; lostint = 0; INIT_WORK(&genrtc_task, genrtc_troutine); oldsecs = get_rtc_ss(); init_timer(&timer_task); stask_active = 1; if (schedule_work(&genrtc_task) == 0){ stask_active = 0; } } spin_unlock(&gen_rtc_lock); gen_rtc_irq_data = 0; return 0; #else return -EINVAL; #endif } static int gen_rtc_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct rtc_time wtime; struct rtc_pll_info pll; void __user *argp = (void __user *)arg; switch (cmd) { case RTC_PLL_GET: if (get_rtc_pll(&pll)) return -EINVAL; else return copy_to_user(argp, &pll, sizeof pll) ? -EFAULT : 0; case RTC_PLL_SET: if (!capable(CAP_SYS_TIME)) return -EACCES; if (copy_from_user(&pll, argp, sizeof(pll))) return -EFAULT; return set_rtc_pll(&pll); case RTC_UIE_OFF: /* disable ints from RTC updates. */ gen_clear_rtc_irq_bit(RTC_UIE); return 0; case RTC_UIE_ON: /* enable ints for RTC updates. */ return gen_set_rtc_irq_bit(RTC_UIE); case RTC_RD_TIME: /* Read the time/date from RTC */ /* this doesn't get week-day, who cares */ memset(&wtime, 0, sizeof(wtime)); get_rtc_time(&wtime); return copy_to_user(argp, &wtime, sizeof(wtime)) ? -EFAULT : 0; case RTC_SET_TIME: /* Set the RTC */ { int year; unsigned char leap_yr; if (!capable(CAP_SYS_TIME)) return -EACCES; if (copy_from_user(&wtime, argp, sizeof(wtime))) return -EFAULT; year = wtime.tm_year + 1900; leap_yr = ((!(year % 4) && (year % 100)) || !(year % 400)); if ((wtime.tm_mon < 0 || wtime.tm_mon > 11) || (wtime.tm_mday < 1)) return -EINVAL; if (wtime.tm_mday < 0 || wtime.tm_mday > (days_in_mo[wtime.tm_mon] + ((wtime.tm_mon == 1) && leap_yr))) return -EINVAL; if (wtime.tm_hour < 0 || wtime.tm_hour >= 24 || wtime.tm_min < 0 || wtime.tm_min >= 60 || wtime.tm_sec < 0 || wtime.tm_sec >= 60) return -EINVAL; return set_rtc_time(&wtime); } } return -EINVAL; } static long gen_rtc_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int ret; mutex_lock(&gen_rtc_mutex); ret = gen_rtc_ioctl(file, cmd, arg); mutex_unlock(&gen_rtc_mutex); return ret; } /* * We enforce only one user at a time here with the open/close. * Also clear the previous interrupt data on an open, and clean * up things on a close. */ static int gen_rtc_open(struct inode *inode, struct file *file) { mutex_lock(&gen_rtc_mutex); if (gen_rtc_status & RTC_IS_OPEN) { mutex_unlock(&gen_rtc_mutex); return -EBUSY; } gen_rtc_status |= RTC_IS_OPEN; gen_rtc_irq_data = 0; irq_active = 0; mutex_unlock(&gen_rtc_mutex); return 0; } static int gen_rtc_release(struct inode *inode, struct file *file) { /* * Turn off all interrupts once the device is no longer * in use and clear the data. */ gen_clear_rtc_irq_bit(RTC_PIE|RTC_AIE|RTC_UIE); gen_rtc_status &= ~RTC_IS_OPEN; return 0; } #ifdef CONFIG_PROC_FS /* * Info exported via "/proc/driver/rtc". */ static int gen_rtc_proc_output(char *buf) { char *p; struct rtc_time tm; unsigned int flags; struct rtc_pll_info pll; p = buf; flags = get_rtc_time(&tm); p += sprintf(p, "rtc_time\t: %02d:%02d:%02d\n" "rtc_date\t: %04d-%02d-%02d\n" "rtc_epoch\t: %04u\n", tm.tm_hour, tm.tm_min, tm.tm_sec, tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, 1900); tm.tm_hour = tm.tm_min = tm.tm_sec = 0; p += sprintf(p, "alarm\t\t: "); if (tm.tm_hour <= 24) p += sprintf(p, "%02d:", tm.tm_hour); else p += sprintf(p, "**:"); if (tm.tm_min <= 59) p += sprintf(p, "%02d:", tm.tm_min); else p += sprintf(p, "**:"); if (tm.tm_sec <= 59) p += sprintf(p, "%02d\n", tm.tm_sec); else p += sprintf(p, "**\n"); p += sprintf(p, "DST_enable\t: %s\n" "BCD\t\t: %s\n" "24hr\t\t: %s\n" "square_wave\t: %s\n" "alarm_IRQ\t: %s\n" "update_IRQ\t: %s\n" "periodic_IRQ\t: %s\n" "periodic_freq\t: %ld\n" "batt_status\t: %s\n", (flags & RTC_DST_EN) ? "yes" : "no", (flags & RTC_DM_BINARY) ? "no" : "yes", (flags & RTC_24H) ? "yes" : "no", (flags & RTC_SQWE) ? "yes" : "no", (flags & RTC_AIE) ? "yes" : "no", irq_active ? "yes" : "no", (flags & RTC_PIE) ? "yes" : "no", 0L /* freq */, (flags & RTC_BATT_BAD) ? "bad" : "okay"); if (!get_rtc_pll(&pll)) p += sprintf(p, "PLL adjustment\t: %d\n" "PLL max +ve adjustment\t: %d\n" "PLL max -ve adjustment\t: %d\n" "PLL +ve adjustment factor\t: %d\n" "PLL -ve adjustment factor\t: %d\n" "PLL frequency\t: %ld\n", pll.pll_value, pll.pll_max, pll.pll_min, pll.pll_posmult, pll.pll_negmult, pll.pll_clock); return p - buf; } static int gen_rtc_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data) { int len = gen_rtc_proc_output (page); if (len <= off+count) *eof = 1; *start = page + off; len -= off; if (len>count) len = count; if (len<0) len = 0; return len; } static int __init gen_rtc_proc_init(void) { struct proc_dir_entry *r; r = create_proc_read_entry("driver/rtc", 0, NULL, gen_rtc_read_proc, NULL); if (!r) return -ENOMEM; return 0; } #else static inline int gen_rtc_proc_init(void) { return 0; } #endif /* CONFIG_PROC_FS */ /* * The various file operations we support. */ static const struct file_operations gen_rtc_fops = { .owner = THIS_MODULE, #ifdef CONFIG_GEN_RTC_X .read = gen_rtc_read, .poll = gen_rtc_poll, #endif .unlocked_ioctl = gen_rtc_unlocked_ioctl, .open = gen_rtc_open, .release = gen_rtc_release, .llseek = noop_llseek, }; static struct miscdevice rtc_gen_dev = { .minor = RTC_MINOR, .name = "rtc", .fops = &gen_rtc_fops, }; static int __init rtc_generic_init(void) { int retval; printk(KERN_INFO "Generic RTC Driver v%s\n", RTC_VERSION); retval = misc_register(&rtc_gen_dev); if (retval < 0) return retval; retval = gen_rtc_proc_init(); if (retval) { misc_deregister(&rtc_gen_dev); return retval; } return 0; } static void __exit rtc_generic_exit(void) { remove_proc_entry ("driver/rtc", NULL); misc_deregister(&rtc_gen_dev); } module_init(rtc_generic_init); module_exit(rtc_generic_exit); MODULE_AUTHOR("Richard Zidlicky"); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(RTC_MINOR);
gpl-2.0
Pesach85/lge-kernel-omap4
arch/arm/mach-netx/pfifo.c
14606
1670
/* * arch/arm/mach-netx/pfifo.c * * Copyright (c) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/io.h> #include <mach/hardware.h> #include <mach/netx-regs.h> #include <mach/pfifo.h> static DEFINE_MUTEX(pfifo_lock); static unsigned int pfifo_used = 0; int pfifo_request(unsigned int pfifo_mask) { int err = 0; unsigned int val; mutex_lock(&pfifo_lock); if (pfifo_mask & pfifo_used) { err = -EBUSY; goto out; } pfifo_used |= pfifo_mask; val = readl(NETX_PFIFO_RESET); writel(val | pfifo_mask, NETX_PFIFO_RESET); writel(val, NETX_PFIFO_RESET); out: mutex_unlock(&pfifo_lock); return err; } void pfifo_free(unsigned int pfifo_mask) { mutex_lock(&pfifo_lock); pfifo_used &= ~pfifo_mask; mutex_unlock(&pfifo_lock); } EXPORT_SYMBOL(pfifo_push); EXPORT_SYMBOL(pfifo_pop); EXPORT_SYMBOL(pfifo_fill_level); EXPORT_SYMBOL(pfifo_empty); EXPORT_SYMBOL(pfifo_request); EXPORT_SYMBOL(pfifo_free);
gpl-2.0
glewarne/S6-UniKernel
arch/arm/mach-netx/pfifo.c
14606
1670
/* * arch/arm/mach-netx/pfifo.c * * Copyright (c) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/io.h> #include <mach/hardware.h> #include <mach/netx-regs.h> #include <mach/pfifo.h> static DEFINE_MUTEX(pfifo_lock); static unsigned int pfifo_used = 0; int pfifo_request(unsigned int pfifo_mask) { int err = 0; unsigned int val; mutex_lock(&pfifo_lock); if (pfifo_mask & pfifo_used) { err = -EBUSY; goto out; } pfifo_used |= pfifo_mask; val = readl(NETX_PFIFO_RESET); writel(val | pfifo_mask, NETX_PFIFO_RESET); writel(val, NETX_PFIFO_RESET); out: mutex_unlock(&pfifo_lock); return err; } void pfifo_free(unsigned int pfifo_mask) { mutex_lock(&pfifo_lock); pfifo_used &= ~pfifo_mask; mutex_unlock(&pfifo_lock); } EXPORT_SYMBOL(pfifo_push); EXPORT_SYMBOL(pfifo_pop); EXPORT_SYMBOL(pfifo_fill_level); EXPORT_SYMBOL(pfifo_empty); EXPORT_SYMBOL(pfifo_request); EXPORT_SYMBOL(pfifo_free);
gpl-2.0
cooks8/android_kernel_samsung_smdk4x12
net/decnet/af_decnet.c
15
55103
/* * DECnet An implementation of the DECnet protocol suite for the LINUX * operating system. DECnet is implemented using the BSD Socket * interface as the means of communication with the user level. * * DECnet Socket Layer Interface * * Authors: Eduardo Marcelo Serrat <emserrat@geocities.com> * Patrick Caulfield <patrick@pandh.demon.co.uk> * * Changes: * Steve Whitehouse: Copied from Eduardo Serrat and Patrick Caulfield's * version of the code. Original copyright preserved * below. * Steve Whitehouse: Some bug fixes, cleaning up some code to make it * compatible with my routing layer. * Steve Whitehouse: Merging changes from Eduardo Serrat and Patrick * Caulfield. * Steve Whitehouse: Further bug fixes, checking module code still works * with new routing layer. * Steve Whitehouse: Additional set/get_sockopt() calls. * Steve Whitehouse: Fixed TIOCINQ ioctl to be same as Eduardo's new * code. * Steve Whitehouse: recvmsg() changed to try and behave in a POSIX like * way. Didn't manage it entirely, but its better. * Steve Whitehouse: ditto for sendmsg(). * Steve Whitehouse: A selection of bug fixes to various things. * Steve Whitehouse: Added TIOCOUTQ ioctl. * Steve Whitehouse: Fixes to username2sockaddr & sockaddr2username. * Steve Whitehouse: Fixes to connect() error returns. * Patrick Caulfield: Fixes to delayed acceptance logic. * David S. Miller: New socket locking * Steve Whitehouse: Socket list hashing/locking * Arnaldo C. Melo: use capable, not suser * Steve Whitehouse: Removed unused code. Fix to use sk->allocation * when required. * Patrick Caulfield: /proc/net/decnet now has object name/number * Steve Whitehouse: Fixed local port allocation, hashed sk list * Matthew Wilcox: Fixes for dn_ioctl() * Steve Whitehouse: New connect/accept logic to allow timeouts and * prepare for sendpage etc. */ /****************************************************************************** (c) 1995-1998 E.M. Serrat emserrat@geocities.com This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. HISTORY: Version Kernel Date Author/Comments ------- ------ ---- --------------- Version 0.0.1 2.0.30 01-dic-97 Eduardo Marcelo Serrat (emserrat@geocities.com) First Development of DECnet Socket La- yer for Linux. Only supports outgoing connections. Version 0.0.2 2.1.105 20-jun-98 Patrick J. Caulfield (patrick@pandh.demon.co.uk) Port to new kernel development version. Version 0.0.3 2.1.106 25-jun-98 Eduardo Marcelo Serrat (emserrat@geocities.com) _ Added support for incoming connections so we can start developing server apps on Linux. - Module Support Version 0.0.4 2.1.109 21-jul-98 Eduardo Marcelo Serrat (emserrat@geocities.com) _ Added support for X11R6.4. Now we can use DECnet transport for X on Linux!!! - Version 0.0.5 2.1.110 01-aug-98 Eduardo Marcelo Serrat (emserrat@geocities.com) Removed bugs on flow control Removed bugs on incoming accessdata order - Version 0.0.6 2.1.110 07-aug-98 Eduardo Marcelo Serrat dn_recvmsg fixes Patrick J. Caulfield dn_bind fixes *******************************************************************************/ #include <linux/module.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/netdevice.h> #include <linux/inet.h> #include <linux/route.h> #include <linux/netfilter.h> #include <linux/seq_file.h> #include <net/sock.h> #include <net/tcp_states.h> #include <net/flow.h> #include <asm/system.h> #include <asm/ioctls.h> #include <linux/capability.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/proc_fs.h> #include <linux/stat.h> #include <linux/init.h> #include <linux/poll.h> #include <net/net_namespace.h> #include <net/neighbour.h> #include <net/dst.h> #include <net/fib_rules.h> #include <net/dn.h> #include <net/dn_nsp.h> #include <net/dn_dev.h> #include <net/dn_route.h> #include <net/dn_fib.h> #include <net/dn_neigh.h> struct dn_sock { struct sock sk; struct dn_scp scp; }; static void dn_keepalive(struct sock *sk); #define DN_SK_HASH_SHIFT 8 #define DN_SK_HASH_SIZE (1 << DN_SK_HASH_SHIFT) #define DN_SK_HASH_MASK (DN_SK_HASH_SIZE - 1) static const struct proto_ops dn_proto_ops; static DEFINE_RWLOCK(dn_hash_lock); static struct hlist_head dn_sk_hash[DN_SK_HASH_SIZE]; static struct hlist_head dn_wild_sk; static atomic_long_t decnet_memory_allocated; static int __dn_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen, int flags); static int __dn_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen, int flags); static struct hlist_head *dn_find_list(struct sock *sk) { struct dn_scp *scp = DN_SK(sk); if (scp->addr.sdn_flags & SDF_WILD) return hlist_empty(&dn_wild_sk) ? &dn_wild_sk : NULL; return &dn_sk_hash[le16_to_cpu(scp->addrloc) & DN_SK_HASH_MASK]; } /* * Valid ports are those greater than zero and not already in use. */ static int check_port(__le16 port) { struct sock *sk; struct hlist_node *node; if (port == 0) return -1; sk_for_each(sk, node, &dn_sk_hash[le16_to_cpu(port) & DN_SK_HASH_MASK]) { struct dn_scp *scp = DN_SK(sk); if (scp->addrloc == port) return -1; } return 0; } static unsigned short port_alloc(struct sock *sk) { struct dn_scp *scp = DN_SK(sk); static unsigned short port = 0x2000; unsigned short i_port = port; while(check_port(cpu_to_le16(++port)) != 0) { if (port == i_port) return 0; } scp->addrloc = cpu_to_le16(port); return 1; } /* * Since this is only ever called from user * level, we don't need a write_lock() version * of this. */ static int dn_hash_sock(struct sock *sk) { struct dn_scp *scp = DN_SK(sk); struct hlist_head *list; int rv = -EUSERS; BUG_ON(sk_hashed(sk)); write_lock_bh(&dn_hash_lock); if (!scp->addrloc && !port_alloc(sk)) goto out; rv = -EADDRINUSE; if ((list = dn_find_list(sk)) == NULL) goto out; sk_add_node(sk, list); rv = 0; out: write_unlock_bh(&dn_hash_lock); return rv; } static void dn_unhash_sock(struct sock *sk) { write_lock(&dn_hash_lock); sk_del_node_init(sk); write_unlock(&dn_hash_lock); } static void dn_unhash_sock_bh(struct sock *sk) { write_lock_bh(&dn_hash_lock); sk_del_node_init(sk); write_unlock_bh(&dn_hash_lock); } static struct hlist_head *listen_hash(struct sockaddr_dn *addr) { int i; unsigned hash = addr->sdn_objnum; if (hash == 0) { hash = addr->sdn_objnamel; for(i = 0; i < le16_to_cpu(addr->sdn_objnamel); i++) { hash ^= addr->sdn_objname[i]; hash ^= (hash << 3); } } return &dn_sk_hash[hash & DN_SK_HASH_MASK]; } /* * Called to transform a socket from bound (i.e. with a local address) * into a listening socket (doesn't need a local port number) and rehashes * based upon the object name/number. */ static void dn_rehash_sock(struct sock *sk) { struct hlist_head *list; struct dn_scp *scp = DN_SK(sk); if (scp->addr.sdn_flags & SDF_WILD) return; write_lock_bh(&dn_hash_lock); sk_del_node_init(sk); DN_SK(sk)->addrloc = 0; list = listen_hash(&DN_SK(sk)->addr); sk_add_node(sk, list); write_unlock_bh(&dn_hash_lock); } int dn_sockaddr2username(struct sockaddr_dn *sdn, unsigned char *buf, unsigned char type) { int len = 2; *buf++ = type; switch(type) { case 0: *buf++ = sdn->sdn_objnum; break; case 1: *buf++ = 0; *buf++ = le16_to_cpu(sdn->sdn_objnamel); memcpy(buf, sdn->sdn_objname, le16_to_cpu(sdn->sdn_objnamel)); len = 3 + le16_to_cpu(sdn->sdn_objnamel); break; case 2: memset(buf, 0, 5); buf += 5; *buf++ = le16_to_cpu(sdn->sdn_objnamel); memcpy(buf, sdn->sdn_objname, le16_to_cpu(sdn->sdn_objnamel)); len = 7 + le16_to_cpu(sdn->sdn_objnamel); break; } return len; } /* * On reception of usernames, we handle types 1 and 0 for destination * addresses only. Types 2 and 4 are used for source addresses, but the * UIC, GIC are ignored and they are both treated the same way. Type 3 * is never used as I've no idea what its purpose might be or what its * format is. */ int dn_username2sockaddr(unsigned char *data, int len, struct sockaddr_dn *sdn, unsigned char *fmt) { unsigned char type; int size = len; int namel = 12; sdn->sdn_objnum = 0; sdn->sdn_objnamel = cpu_to_le16(0); memset(sdn->sdn_objname, 0, DN_MAXOBJL); if (len < 2) return -1; len -= 2; *fmt = *data++; type = *data++; switch(*fmt) { case 0: sdn->sdn_objnum = type; return 2; case 1: namel = 16; break; case 2: len -= 4; data += 4; break; case 4: len -= 8; data += 8; break; default: return -1; } len -= 1; if (len < 0) return -1; sdn->sdn_objnamel = cpu_to_le16(*data++); len -= le16_to_cpu(sdn->sdn_objnamel); if ((len < 0) || (le16_to_cpu(sdn->sdn_objnamel) > namel)) return -1; memcpy(sdn->sdn_objname, data, le16_to_cpu(sdn->sdn_objnamel)); return size - len; } struct sock *dn_sklist_find_listener(struct sockaddr_dn *addr) { struct hlist_head *list = listen_hash(addr); struct hlist_node *node; struct sock *sk; read_lock(&dn_hash_lock); sk_for_each(sk, node, list) { struct dn_scp *scp = DN_SK(sk); if (sk->sk_state != TCP_LISTEN) continue; if (scp->addr.sdn_objnum) { if (scp->addr.sdn_objnum != addr->sdn_objnum) continue; } else { if (addr->sdn_objnum) continue; if (scp->addr.sdn_objnamel != addr->sdn_objnamel) continue; if (memcmp(scp->addr.sdn_objname, addr->sdn_objname, le16_to_cpu(addr->sdn_objnamel)) != 0) continue; } sock_hold(sk); read_unlock(&dn_hash_lock); return sk; } sk = sk_head(&dn_wild_sk); if (sk) { if (sk->sk_state == TCP_LISTEN) sock_hold(sk); else sk = NULL; } read_unlock(&dn_hash_lock); return sk; } struct sock *dn_find_by_skb(struct sk_buff *skb) { struct dn_skb_cb *cb = DN_SKB_CB(skb); struct sock *sk; struct hlist_node *node; struct dn_scp *scp; read_lock(&dn_hash_lock); sk_for_each(sk, node, &dn_sk_hash[le16_to_cpu(cb->dst_port) & DN_SK_HASH_MASK]) { scp = DN_SK(sk); if (cb->src != dn_saddr2dn(&scp->peer)) continue; if (cb->dst_port != scp->addrloc) continue; if (scp->addrrem && (cb->src_port != scp->addrrem)) continue; sock_hold(sk); goto found; } sk = NULL; found: read_unlock(&dn_hash_lock); return sk; } static void dn_destruct(struct sock *sk) { struct dn_scp *scp = DN_SK(sk); skb_queue_purge(&scp->data_xmit_queue); skb_queue_purge(&scp->other_xmit_queue); skb_queue_purge(&scp->other_receive_queue); dst_release(rcu_dereference_check(sk->sk_dst_cache, 1)); } static int dn_memory_pressure; static void dn_enter_memory_pressure(struct sock *sk) { if (!dn_memory_pressure) { dn_memory_pressure = 1; } } static struct proto dn_proto = { .name = "NSP", .owner = THIS_MODULE, .enter_memory_pressure = dn_enter_memory_pressure, .memory_pressure = &dn_memory_pressure, .memory_allocated = &decnet_memory_allocated, .sysctl_mem = sysctl_decnet_mem, .sysctl_wmem = sysctl_decnet_wmem, .sysctl_rmem = sysctl_decnet_rmem, .max_header = DN_MAX_NSP_DATA_HEADER + 64, .obj_size = sizeof(struct dn_sock), }; static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp) { struct dn_scp *scp; struct sock *sk = sk_alloc(net, PF_DECnet, gfp, &dn_proto); if (!sk) goto out; if (sock) sock->ops = &dn_proto_ops; sock_init_data(sock, sk); sk->sk_backlog_rcv = dn_nsp_backlog_rcv; sk->sk_destruct = dn_destruct; sk->sk_no_check = 1; sk->sk_family = PF_DECnet; sk->sk_protocol = 0; sk->sk_allocation = gfp; sk->sk_sndbuf = sysctl_decnet_wmem[1]; sk->sk_rcvbuf = sysctl_decnet_rmem[1]; /* Initialization of DECnet Session Control Port */ scp = DN_SK(sk); scp->state = DN_O; /* Open */ scp->numdat = 1; /* Next data seg to tx */ scp->numoth = 1; /* Next oth data to tx */ scp->ackxmt_dat = 0; /* Last data seg ack'ed */ scp->ackxmt_oth = 0; /* Last oth data ack'ed */ scp->ackrcv_dat = 0; /* Highest data ack recv*/ scp->ackrcv_oth = 0; /* Last oth data ack rec*/ scp->flowrem_sw = DN_SEND; scp->flowloc_sw = DN_SEND; scp->flowrem_dat = 0; scp->flowrem_oth = 1; scp->flowloc_dat = 0; scp->flowloc_oth = 1; scp->services_rem = 0; scp->services_loc = 1 | NSP_FC_NONE; scp->info_rem = 0; scp->info_loc = 0x03; /* NSP version 4.1 */ scp->segsize_rem = 230 - DN_MAX_NSP_DATA_HEADER; /* Default: Updated by remote segsize */ scp->nonagle = 0; scp->multi_ireq = 1; scp->accept_mode = ACC_IMMED; scp->addr.sdn_family = AF_DECnet; scp->peer.sdn_family = AF_DECnet; scp->accessdata.acc_accl = 5; memcpy(scp->accessdata.acc_acc, "LINUX", 5); scp->max_window = NSP_MAX_WINDOW; scp->snd_window = NSP_MIN_WINDOW; scp->nsp_srtt = NSP_INITIAL_SRTT; scp->nsp_rttvar = NSP_INITIAL_RTTVAR; scp->nsp_rxtshift = 0; skb_queue_head_init(&scp->data_xmit_queue); skb_queue_head_init(&scp->other_xmit_queue); skb_queue_head_init(&scp->other_receive_queue); scp->persist = 0; scp->persist_fxn = NULL; scp->keepalive = 10 * HZ; scp->keepalive_fxn = dn_keepalive; init_timer(&scp->delack_timer); scp->delack_pending = 0; scp->delack_fxn = dn_nsp_delayed_ack; dn_start_slow_timer(sk); out: return sk; } /* * Keepalive timer. * FIXME: Should respond to SO_KEEPALIVE etc. */ static void dn_keepalive(struct sock *sk) { struct dn_scp *scp = DN_SK(sk); /* * By checking the other_data transmit queue is empty * we are double checking that we are not sending too * many of these keepalive frames. */ if (skb_queue_empty(&scp->other_xmit_queue)) dn_nsp_send_link(sk, DN_NOCHANGE, 0); } /* * Timer for shutdown/destroyed sockets. * When socket is dead & no packets have been sent for a * certain amount of time, they are removed by this * routine. Also takes care of sending out DI & DC * frames at correct times. */ int dn_destroy_timer(struct sock *sk) { struct dn_scp *scp = DN_SK(sk); scp->persist = dn_nsp_persist(sk); switch(scp->state) { case DN_DI: dn_nsp_send_disc(sk, NSP_DISCINIT, 0, GFP_ATOMIC); if (scp->nsp_rxtshift >= decnet_di_count) scp->state = DN_CN; return 0; case DN_DR: dn_nsp_send_disc(sk, NSP_DISCINIT, 0, GFP_ATOMIC); if (scp->nsp_rxtshift >= decnet_dr_count) scp->state = DN_DRC; return 0; case DN_DN: if (scp->nsp_rxtshift < decnet_dn_count) { /* printk(KERN_DEBUG "dn_destroy_timer: DN\n"); */ dn_nsp_send_disc(sk, NSP_DISCCONF, NSP_REASON_DC, GFP_ATOMIC); return 0; } } scp->persist = (HZ * decnet_time_wait); if (sk->sk_socket) return 0; if ((jiffies - scp->stamp) >= (HZ * decnet_time_wait)) { dn_unhash_sock(sk); sock_put(sk); return 1; } return 0; } static void dn_destroy_sock(struct sock *sk) { struct dn_scp *scp = DN_SK(sk); scp->nsp_rxtshift = 0; /* reset back off */ if (sk->sk_socket) { if (sk->sk_socket->state != SS_UNCONNECTED) sk->sk_socket->state = SS_DISCONNECTING; } sk->sk_state = TCP_CLOSE; switch(scp->state) { case DN_DN: dn_nsp_send_disc(sk, NSP_DISCCONF, NSP_REASON_DC, sk->sk_allocation); scp->persist_fxn = dn_destroy_timer; scp->persist = dn_nsp_persist(sk); break; case DN_CR: scp->state = DN_DR; goto disc_reject; case DN_RUN: scp->state = DN_DI; case DN_DI: case DN_DR: disc_reject: dn_nsp_send_disc(sk, NSP_DISCINIT, 0, sk->sk_allocation); case DN_NC: case DN_NR: case DN_RJ: case DN_DIC: case DN_CN: case DN_DRC: case DN_CI: case DN_CD: scp->persist_fxn = dn_destroy_timer; scp->persist = dn_nsp_persist(sk); break; default: printk(KERN_DEBUG "DECnet: dn_destroy_sock passed socket in invalid state\n"); case DN_O: dn_stop_slow_timer(sk); dn_unhash_sock_bh(sk); sock_put(sk); break; } } char *dn_addr2asc(__u16 addr, char *buf) { unsigned short node, area; node = addr & 0x03ff; area = addr >> 10; sprintf(buf, "%hd.%hd", area, node); return buf; } static int dn_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; if (protocol < 0 || protocol > SK_PROTOCOL_MAX) return -EINVAL; if (!net_eq(net, &init_net)) return -EAFNOSUPPORT; switch(sock->type) { case SOCK_SEQPACKET: if (protocol != DNPROTO_NSP) return -EPROTONOSUPPORT; break; case SOCK_STREAM: break; default: return -ESOCKTNOSUPPORT; } if ((sk = dn_alloc_sock(net, sock, GFP_KERNEL)) == NULL) return -ENOBUFS; sk->sk_protocol = protocol; return 0; } static int dn_release(struct socket *sock) { struct sock *sk = sock->sk; if (sk) { sock_orphan(sk); sock_hold(sk); lock_sock(sk); dn_destroy_sock(sk); release_sock(sk); sock_put(sk); } return 0; } static int dn_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) { struct sock *sk = sock->sk; struct dn_scp *scp = DN_SK(sk); struct sockaddr_dn *saddr = (struct sockaddr_dn *)uaddr; struct net_device *dev, *ldev; int rv; if (addr_len != sizeof(struct sockaddr_dn)) return -EINVAL; if (saddr->sdn_family != AF_DECnet) return -EINVAL; if (le16_to_cpu(saddr->sdn_nodeaddrl) && (le16_to_cpu(saddr->sdn_nodeaddrl) != 2)) return -EINVAL; if (le16_to_cpu(saddr->sdn_objnamel) > DN_MAXOBJL) return -EINVAL; if (saddr->sdn_flags & ~SDF_WILD) return -EINVAL; if (!capable(CAP_NET_BIND_SERVICE) && (saddr->sdn_objnum || (saddr->sdn_flags & SDF_WILD))) return -EACCES; if (!(saddr->sdn_flags & SDF_WILD)) { if (le16_to_cpu(saddr->sdn_nodeaddrl)) { rcu_read_lock(); ldev = NULL; for_each_netdev_rcu(&init_net, dev) { if (!dev->dn_ptr) continue; if (dn_dev_islocal(dev, dn_saddr2dn(saddr))) { ldev = dev; break; } } rcu_read_unlock(); if (ldev == NULL) return -EADDRNOTAVAIL; } } rv = -EINVAL; lock_sock(sk); if (sock_flag(sk, SOCK_ZAPPED)) { memcpy(&scp->addr, saddr, addr_len); sock_reset_flag(sk, SOCK_ZAPPED); rv = dn_hash_sock(sk); if (rv) sock_set_flag(sk, SOCK_ZAPPED); } release_sock(sk); return rv; } static int dn_auto_bind(struct socket *sock) { struct sock *sk = sock->sk; struct dn_scp *scp = DN_SK(sk); int rv; sock_reset_flag(sk, SOCK_ZAPPED); scp->addr.sdn_flags = 0; scp->addr.sdn_objnum = 0; /* * This stuff is to keep compatibility with Eduardo's * patch. I hope I can dispense with it shortly... */ if ((scp->accessdata.acc_accl != 0) && (scp->accessdata.acc_accl <= 12)) { scp->addr.sdn_objnamel = cpu_to_le16(scp->accessdata.acc_accl); memcpy(scp->addr.sdn_objname, scp->accessdata.acc_acc, le16_to_cpu(scp->addr.sdn_objnamel)); scp->accessdata.acc_accl = 0; memset(scp->accessdata.acc_acc, 0, 40); } /* End of compatibility stuff */ scp->addr.sdn_add.a_len = cpu_to_le16(2); rv = dn_dev_bind_default((__le16 *)scp->addr.sdn_add.a_addr); if (rv == 0) { rv = dn_hash_sock(sk); if (rv) sock_set_flag(sk, SOCK_ZAPPED); } return rv; } static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation) { struct dn_scp *scp = DN_SK(sk); DEFINE_WAIT(wait); int err; if (scp->state != DN_CR) return -EINVAL; scp->state = DN_CC; scp->segsize_loc = dst_metric_advmss(__sk_dst_get(sk)); dn_send_conn_conf(sk, allocation); prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); for(;;) { release_sock(sk); if (scp->state == DN_CC) *timeo = schedule_timeout(*timeo); lock_sock(sk); err = 0; if (scp->state == DN_RUN) break; err = sock_error(sk); if (err) break; err = sock_intr_errno(*timeo); if (signal_pending(current)) break; err = -EAGAIN; if (!*timeo) break; prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); } finish_wait(sk_sleep(sk), &wait); if (err == 0) { sk->sk_socket->state = SS_CONNECTED; } else if (scp->state != DN_CC) { sk->sk_socket->state = SS_UNCONNECTED; } return err; } static int dn_wait_run(struct sock *sk, long *timeo) { struct dn_scp *scp = DN_SK(sk); DEFINE_WAIT(wait); int err = 0; if (scp->state == DN_RUN) goto out; if (!*timeo) return -EALREADY; prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); for(;;) { release_sock(sk); if (scp->state == DN_CI || scp->state == DN_CC) *timeo = schedule_timeout(*timeo); lock_sock(sk); err = 0; if (scp->state == DN_RUN) break; err = sock_error(sk); if (err) break; err = sock_intr_errno(*timeo); if (signal_pending(current)) break; err = -ETIMEDOUT; if (!*timeo) break; prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); } finish_wait(sk_sleep(sk), &wait); out: if (err == 0) { sk->sk_socket->state = SS_CONNECTED; } else if (scp->state != DN_CI && scp->state != DN_CC) { sk->sk_socket->state = SS_UNCONNECTED; } return err; } static int __dn_connect(struct sock *sk, struct sockaddr_dn *addr, int addrlen, long *timeo, int flags) { struct socket *sock = sk->sk_socket; struct dn_scp *scp = DN_SK(sk); int err = -EISCONN; struct flowidn fld; if (sock->state == SS_CONNECTED) goto out; if (sock->state == SS_CONNECTING) { err = 0; if (scp->state == DN_RUN) { sock->state = SS_CONNECTED; goto out; } err = -ECONNREFUSED; if (scp->state != DN_CI && scp->state != DN_CC) { sock->state = SS_UNCONNECTED; goto out; } return dn_wait_run(sk, timeo); } err = -EINVAL; if (scp->state != DN_O) goto out; if (addr == NULL || addrlen != sizeof(struct sockaddr_dn)) goto out; if (addr->sdn_family != AF_DECnet) goto out; if (addr->sdn_flags & SDF_WILD) goto out; if (sock_flag(sk, SOCK_ZAPPED)) { err = dn_auto_bind(sk->sk_socket); if (err) goto out; } memcpy(&scp->peer, addr, sizeof(struct sockaddr_dn)); err = -EHOSTUNREACH; memset(&fld, 0, sizeof(fld)); fld.flowidn_oif = sk->sk_bound_dev_if; fld.daddr = dn_saddr2dn(&scp->peer); fld.saddr = dn_saddr2dn(&scp->addr); dn_sk_ports_copy(&fld, scp); fld.flowidn_proto = DNPROTO_NSP; if (dn_route_output_sock(&sk->sk_dst_cache, &fld, sk, flags) < 0) goto out; sk->sk_route_caps = sk->sk_dst_cache->dev->features; sock->state = SS_CONNECTING; scp->state = DN_CI; scp->segsize_loc = dst_metric_advmss(sk->sk_dst_cache); dn_nsp_send_conninit(sk, NSP_CI); err = -EINPROGRESS; if (*timeo) { err = dn_wait_run(sk, timeo); } out: return err; } static int dn_connect(struct socket *sock, struct sockaddr *uaddr, int addrlen, int flags) { struct sockaddr_dn *addr = (struct sockaddr_dn *)uaddr; struct sock *sk = sock->sk; int err; long timeo = sock_sndtimeo(sk, flags & O_NONBLOCK); lock_sock(sk); err = __dn_connect(sk, addr, addrlen, &timeo, 0); release_sock(sk); return err; } static inline int dn_check_state(struct sock *sk, struct sockaddr_dn *addr, int addrlen, long *timeo, int flags) { struct dn_scp *scp = DN_SK(sk); switch(scp->state) { case DN_RUN: return 0; case DN_CR: return dn_confirm_accept(sk, timeo, sk->sk_allocation); case DN_CI: case DN_CC: return dn_wait_run(sk, timeo); case DN_O: return __dn_connect(sk, addr, addrlen, timeo, flags); } return -EINVAL; } static void dn_access_copy(struct sk_buff *skb, struct accessdata_dn *acc) { unsigned char *ptr = skb->data; acc->acc_userl = *ptr++; memcpy(&acc->acc_user, ptr, acc->acc_userl); ptr += acc->acc_userl; acc->acc_passl = *ptr++; memcpy(&acc->acc_pass, ptr, acc->acc_passl); ptr += acc->acc_passl; acc->acc_accl = *ptr++; memcpy(&acc->acc_acc, ptr, acc->acc_accl); skb_pull(skb, acc->acc_accl + acc->acc_passl + acc->acc_userl + 3); } static void dn_user_copy(struct sk_buff *skb, struct optdata_dn *opt) { unsigned char *ptr = skb->data; u16 len = *ptr++; /* yes, it's 8bit on the wire */ BUG_ON(len > 16); /* we've checked the contents earlier */ opt->opt_optl = cpu_to_le16(len); opt->opt_status = 0; memcpy(opt->opt_data, ptr, len); skb_pull(skb, len + 1); } static struct sk_buff *dn_wait_for_connect(struct sock *sk, long *timeo) { DEFINE_WAIT(wait); struct sk_buff *skb = NULL; int err = 0; prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); for(;;) { release_sock(sk); skb = skb_dequeue(&sk->sk_receive_queue); if (skb == NULL) { *timeo = schedule_timeout(*timeo); skb = skb_dequeue(&sk->sk_receive_queue); } lock_sock(sk); if (skb != NULL) break; err = -EINVAL; if (sk->sk_state != TCP_LISTEN) break; err = sock_intr_errno(*timeo); if (signal_pending(current)) break; err = -EAGAIN; if (!*timeo) break; prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); } finish_wait(sk_sleep(sk), &wait); return skb == NULL ? ERR_PTR(err) : skb; } static int dn_accept(struct socket *sock, struct socket *newsock, int flags) { struct sock *sk = sock->sk, *newsk; struct sk_buff *skb = NULL; struct dn_skb_cb *cb; unsigned char menuver; int err = 0; unsigned char type; long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); struct dst_entry *dst; lock_sock(sk); if (sk->sk_state != TCP_LISTEN || DN_SK(sk)->state != DN_O) { release_sock(sk); return -EINVAL; } skb = skb_dequeue(&sk->sk_receive_queue); if (skb == NULL) { skb = dn_wait_for_connect(sk, &timeo); if (IS_ERR(skb)) { release_sock(sk); return PTR_ERR(skb); } } cb = DN_SKB_CB(skb); sk->sk_ack_backlog--; newsk = dn_alloc_sock(sock_net(sk), newsock, sk->sk_allocation); if (newsk == NULL) { release_sock(sk); kfree_skb(skb); return -ENOBUFS; } release_sock(sk); dst = skb_dst(skb); sk_dst_set(newsk, dst); skb_dst_set(skb, NULL); DN_SK(newsk)->state = DN_CR; DN_SK(newsk)->addrrem = cb->src_port; DN_SK(newsk)->services_rem = cb->services; DN_SK(newsk)->info_rem = cb->info; DN_SK(newsk)->segsize_rem = cb->segsize; DN_SK(newsk)->accept_mode = DN_SK(sk)->accept_mode; if (DN_SK(newsk)->segsize_rem < 230) DN_SK(newsk)->segsize_rem = 230; if ((DN_SK(newsk)->services_rem & NSP_FC_MASK) == NSP_FC_NONE) DN_SK(newsk)->max_window = decnet_no_fc_max_cwnd; newsk->sk_state = TCP_LISTEN; memcpy(&(DN_SK(newsk)->addr), &(DN_SK(sk)->addr), sizeof(struct sockaddr_dn)); /* * If we are listening on a wild socket, we don't want * the newly created socket on the wrong hash queue. */ DN_SK(newsk)->addr.sdn_flags &= ~SDF_WILD; skb_pull(skb, dn_username2sockaddr(skb->data, skb->len, &(DN_SK(newsk)->addr), &type)); skb_pull(skb, dn_username2sockaddr(skb->data, skb->len, &(DN_SK(newsk)->peer), &type)); *(__le16 *)(DN_SK(newsk)->peer.sdn_add.a_addr) = cb->src; *(__le16 *)(DN_SK(newsk)->addr.sdn_add.a_addr) = cb->dst; menuver = *skb->data; skb_pull(skb, 1); if (menuver & DN_MENUVER_ACC) dn_access_copy(skb, &(DN_SK(newsk)->accessdata)); if (menuver & DN_MENUVER_USR) dn_user_copy(skb, &(DN_SK(newsk)->conndata_in)); if (menuver & DN_MENUVER_PRX) DN_SK(newsk)->peer.sdn_flags |= SDF_PROXY; if (menuver & DN_MENUVER_UIC) DN_SK(newsk)->peer.sdn_flags |= SDF_UICPROXY; kfree_skb(skb); memcpy(&(DN_SK(newsk)->conndata_out), &(DN_SK(sk)->conndata_out), sizeof(struct optdata_dn)); memcpy(&(DN_SK(newsk)->discdata_out), &(DN_SK(sk)->discdata_out), sizeof(struct optdata_dn)); lock_sock(newsk); err = dn_hash_sock(newsk); if (err == 0) { sock_reset_flag(newsk, SOCK_ZAPPED); dn_send_conn_ack(newsk); /* * Here we use sk->sk_allocation since although the conn conf is * for the newsk, the context is the old socket. */ if (DN_SK(newsk)->accept_mode == ACC_IMMED) err = dn_confirm_accept(newsk, &timeo, sk->sk_allocation); } release_sock(newsk); return err; } static int dn_getname(struct socket *sock, struct sockaddr *uaddr,int *uaddr_len,int peer) { struct sockaddr_dn *sa = (struct sockaddr_dn *)uaddr; struct sock *sk = sock->sk; struct dn_scp *scp = DN_SK(sk); *uaddr_len = sizeof(struct sockaddr_dn); lock_sock(sk); if (peer) { if ((sock->state != SS_CONNECTED && sock->state != SS_CONNECTING) && scp->accept_mode == ACC_IMMED) { release_sock(sk); return -ENOTCONN; } memcpy(sa, &scp->peer, sizeof(struct sockaddr_dn)); } else { memcpy(sa, &scp->addr, sizeof(struct sockaddr_dn)); } release_sock(sk); return 0; } static unsigned int dn_poll(struct file *file, struct socket *sock, poll_table *wait) { struct sock *sk = sock->sk; struct dn_scp *scp = DN_SK(sk); int mask = datagram_poll(file, sock, wait); if (!skb_queue_empty(&scp->other_receive_queue)) mask |= POLLRDBAND; return mask; } static int dn_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct sock *sk = sock->sk; struct dn_scp *scp = DN_SK(sk); int err = -EOPNOTSUPP; long amount = 0; struct sk_buff *skb; int val; switch(cmd) { case SIOCGIFADDR: case SIOCSIFADDR: return dn_dev_ioctl(cmd, (void __user *)arg); case SIOCATMARK: lock_sock(sk); val = !skb_queue_empty(&scp->other_receive_queue); if (scp->state != DN_RUN) val = -ENOTCONN; release_sock(sk); return val; case TIOCOUTQ: amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk); if (amount < 0) amount = 0; err = put_user(amount, (int __user *)arg); break; case TIOCINQ: lock_sock(sk); skb = skb_peek(&scp->other_receive_queue); if (skb) { amount = skb->len; } else { skb_queue_walk(&sk->sk_receive_queue, skb) amount += skb->len; } release_sock(sk); err = put_user(amount, (int __user *)arg); break; default: err = -ENOIOCTLCMD; break; } return err; } static int dn_listen(struct socket *sock, int backlog) { struct sock *sk = sock->sk; int err = -EINVAL; lock_sock(sk); if (sock_flag(sk, SOCK_ZAPPED)) goto out; if ((DN_SK(sk)->state != DN_O) || (sk->sk_state == TCP_LISTEN)) goto out; sk->sk_max_ack_backlog = backlog; sk->sk_ack_backlog = 0; sk->sk_state = TCP_LISTEN; err = 0; dn_rehash_sock(sk); out: release_sock(sk); return err; } static int dn_shutdown(struct socket *sock, int how) { struct sock *sk = sock->sk; struct dn_scp *scp = DN_SK(sk); int err = -ENOTCONN; lock_sock(sk); if (sock->state == SS_UNCONNECTED) goto out; err = 0; if (sock->state == SS_DISCONNECTING) goto out; err = -EINVAL; if (scp->state == DN_O) goto out; if (how != SHUTDOWN_MASK) goto out; sk->sk_shutdown = how; dn_destroy_sock(sk); err = 0; out: release_sock(sk); return err; } static int dn_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { struct sock *sk = sock->sk; int err; lock_sock(sk); err = __dn_setsockopt(sock, level, optname, optval, optlen, 0); release_sock(sk); return err; } static int __dn_setsockopt(struct socket *sock, int level,int optname, char __user *optval, unsigned int optlen, int flags) { struct sock *sk = sock->sk; struct dn_scp *scp = DN_SK(sk); long timeo; union { struct optdata_dn opt; struct accessdata_dn acc; int mode; unsigned long win; int val; unsigned char services; unsigned char info; } u; int err; if (optlen && !optval) return -EINVAL; if (optlen > sizeof(u)) return -EINVAL; if (copy_from_user(&u, optval, optlen)) return -EFAULT; switch(optname) { case DSO_CONDATA: if (sock->state == SS_CONNECTED) return -EISCONN; if ((scp->state != DN_O) && (scp->state != DN_CR)) return -EINVAL; if (optlen != sizeof(struct optdata_dn)) return -EINVAL; if (le16_to_cpu(u.opt.opt_optl) > 16) return -EINVAL; memcpy(&scp->conndata_out, &u.opt, optlen); break; case DSO_DISDATA: if (sock->state != SS_CONNECTED && scp->accept_mode == ACC_IMMED) return -ENOTCONN; if (optlen != sizeof(struct optdata_dn)) return -EINVAL; if (le16_to_cpu(u.opt.opt_optl) > 16) return -EINVAL; memcpy(&scp->discdata_out, &u.opt, optlen); break; case DSO_CONACCESS: if (sock->state == SS_CONNECTED) return -EISCONN; if (scp->state != DN_O) return -EINVAL; if (optlen != sizeof(struct accessdata_dn)) return -EINVAL; if ((u.acc.acc_accl > DN_MAXACCL) || (u.acc.acc_passl > DN_MAXACCL) || (u.acc.acc_userl > DN_MAXACCL)) return -EINVAL; memcpy(&scp->accessdata, &u.acc, optlen); break; case DSO_ACCEPTMODE: if (sock->state == SS_CONNECTED) return -EISCONN; if (scp->state != DN_O) return -EINVAL; if (optlen != sizeof(int)) return -EINVAL; if ((u.mode != ACC_IMMED) && (u.mode != ACC_DEFER)) return -EINVAL; scp->accept_mode = (unsigned char)u.mode; break; case DSO_CONACCEPT: if (scp->state != DN_CR) return -EINVAL; timeo = sock_rcvtimeo(sk, 0); err = dn_confirm_accept(sk, &timeo, sk->sk_allocation); return err; case DSO_CONREJECT: if (scp->state != DN_CR) return -EINVAL; scp->state = DN_DR; sk->sk_shutdown = SHUTDOWN_MASK; dn_nsp_send_disc(sk, 0x38, 0, sk->sk_allocation); break; default: #ifdef CONFIG_NETFILTER return nf_setsockopt(sk, PF_DECnet, optname, optval, optlen); #endif case DSO_LINKINFO: case DSO_STREAM: case DSO_SEQPACKET: return -ENOPROTOOPT; case DSO_MAXWINDOW: if (optlen != sizeof(unsigned long)) return -EINVAL; if (u.win > NSP_MAX_WINDOW) u.win = NSP_MAX_WINDOW; if (u.win == 0) return -EINVAL; scp->max_window = u.win; if (scp->snd_window > u.win) scp->snd_window = u.win; break; case DSO_NODELAY: if (optlen != sizeof(int)) return -EINVAL; if (scp->nonagle == 2) return -EINVAL; scp->nonagle = (u.val == 0) ? 0 : 1; /* if (scp->nonagle == 1) { Push pending frames } */ break; case DSO_CORK: if (optlen != sizeof(int)) return -EINVAL; if (scp->nonagle == 1) return -EINVAL; scp->nonagle = (u.val == 0) ? 0 : 2; /* if (scp->nonagle == 0) { Push pending frames } */ break; case DSO_SERVICES: if (optlen != sizeof(unsigned char)) return -EINVAL; if ((u.services & ~NSP_FC_MASK) != 0x01) return -EINVAL; if ((u.services & NSP_FC_MASK) == NSP_FC_MASK) return -EINVAL; scp->services_loc = u.services; break; case DSO_INFO: if (optlen != sizeof(unsigned char)) return -EINVAL; if (u.info & 0xfc) return -EINVAL; scp->info_loc = u.info; break; } return 0; } static int dn_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; int err; lock_sock(sk); err = __dn_getsockopt(sock, level, optname, optval, optlen, 0); release_sock(sk); return err; } static int __dn_getsockopt(struct socket *sock, int level,int optname, char __user *optval,int __user *optlen, int flags) { struct sock *sk = sock->sk; struct dn_scp *scp = DN_SK(sk); struct linkinfo_dn link; unsigned int r_len; void *r_data = NULL; unsigned int val; if(get_user(r_len , optlen)) return -EFAULT; switch(optname) { case DSO_CONDATA: if (r_len > sizeof(struct optdata_dn)) r_len = sizeof(struct optdata_dn); r_data = &scp->conndata_in; break; case DSO_DISDATA: if (r_len > sizeof(struct optdata_dn)) r_len = sizeof(struct optdata_dn); r_data = &scp->discdata_in; break; case DSO_CONACCESS: if (r_len > sizeof(struct accessdata_dn)) r_len = sizeof(struct accessdata_dn); r_data = &scp->accessdata; break; case DSO_ACCEPTMODE: if (r_len > sizeof(unsigned char)) r_len = sizeof(unsigned char); r_data = &scp->accept_mode; break; case DSO_LINKINFO: if (r_len > sizeof(struct linkinfo_dn)) r_len = sizeof(struct linkinfo_dn); memset(&link, 0, sizeof(link)); switch(sock->state) { case SS_CONNECTING: link.idn_linkstate = LL_CONNECTING; break; case SS_DISCONNECTING: link.idn_linkstate = LL_DISCONNECTING; break; case SS_CONNECTED: link.idn_linkstate = LL_RUNNING; break; default: link.idn_linkstate = LL_INACTIVE; } link.idn_segsize = scp->segsize_rem; r_data = &link; break; default: #ifdef CONFIG_NETFILTER { int ret, len; if(get_user(len, optlen)) return -EFAULT; ret = nf_getsockopt(sk, PF_DECnet, optname, optval, &len); if (ret >= 0) ret = put_user(len, optlen); return ret; } #endif case DSO_STREAM: case DSO_SEQPACKET: case DSO_CONACCEPT: case DSO_CONREJECT: return -ENOPROTOOPT; case DSO_MAXWINDOW: if (r_len > sizeof(unsigned long)) r_len = sizeof(unsigned long); r_data = &scp->max_window; break; case DSO_NODELAY: if (r_len > sizeof(int)) r_len = sizeof(int); val = (scp->nonagle == 1); r_data = &val; break; case DSO_CORK: if (r_len > sizeof(int)) r_len = sizeof(int); val = (scp->nonagle == 2); r_data = &val; break; case DSO_SERVICES: if (r_len > sizeof(unsigned char)) r_len = sizeof(unsigned char); r_data = &scp->services_rem; break; case DSO_INFO: if (r_len > sizeof(unsigned char)) r_len = sizeof(unsigned char); r_data = &scp->info_rem; break; } if (r_data) { if (copy_to_user(optval, r_data, r_len)) return -EFAULT; if (put_user(r_len, optlen)) return -EFAULT; } return 0; } static int dn_data_ready(struct sock *sk, struct sk_buff_head *q, int flags, int target) { struct sk_buff *skb; int len = 0; if (flags & MSG_OOB) return !skb_queue_empty(q) ? 1 : 0; skb_queue_walk(q, skb) { struct dn_skb_cb *cb = DN_SKB_CB(skb); len += skb->len; if (cb->nsp_flags & 0x40) { /* SOCK_SEQPACKET reads to EOM */ if (sk->sk_type == SOCK_SEQPACKET) return 1; /* so does SOCK_STREAM unless WAITALL is specified */ if (!(flags & MSG_WAITALL)) return 1; } /* minimum data length for read exceeded */ if (len >= target) return 1; } return 0; } static int dn_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; struct dn_scp *scp = DN_SK(sk); struct sk_buff_head *queue = &sk->sk_receive_queue; size_t target = size > 1 ? 1 : 0; size_t copied = 0; int rv = 0; struct sk_buff *skb, *n; struct dn_skb_cb *cb = NULL; unsigned char eor = 0; long timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); lock_sock(sk); if (sock_flag(sk, SOCK_ZAPPED)) { rv = -EADDRNOTAVAIL; goto out; } if (sk->sk_shutdown & RCV_SHUTDOWN) { rv = 0; goto out; } rv = dn_check_state(sk, NULL, 0, &timeo, flags); if (rv) goto out; if (flags & ~(MSG_CMSG_COMPAT|MSG_PEEK|MSG_OOB|MSG_WAITALL|MSG_DONTWAIT|MSG_NOSIGNAL)) { rv = -EOPNOTSUPP; goto out; } if (flags & MSG_OOB) queue = &scp->other_receive_queue; if (flags & MSG_WAITALL) target = size; /* * See if there is data ready to read, sleep if there isn't */ for(;;) { DEFINE_WAIT(wait); if (sk->sk_err) goto out; if (!skb_queue_empty(&scp->other_receive_queue)) { if (!(flags & MSG_OOB)) { msg->msg_flags |= MSG_OOB; if (!scp->other_report) { scp->other_report = 1; goto out; } } } if (scp->state != DN_RUN) goto out; if (signal_pending(current)) { rv = sock_intr_errno(timeo); goto out; } if (dn_data_ready(sk, queue, flags, target)) break; if (flags & MSG_DONTWAIT) { rv = -EWOULDBLOCK; goto out; } prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); sk_wait_event(sk, &timeo, dn_data_ready(sk, queue, flags, target)); clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); finish_wait(sk_sleep(sk), &wait); } skb_queue_walk_safe(queue, skb, n) { unsigned int chunk = skb->len; cb = DN_SKB_CB(skb); if ((chunk + copied) > size) chunk = size - copied; if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) { rv = -EFAULT; break; } copied += chunk; if (!(flags & MSG_PEEK)) skb_pull(skb, chunk); eor = cb->nsp_flags & 0x40; if (skb->len == 0) { skb_unlink(skb, queue); kfree_skb(skb); /* * N.B. Don't refer to skb or cb after this point * in loop. */ if ((scp->flowloc_sw == DN_DONTSEND) && !dn_congested(sk)) { scp->flowloc_sw = DN_SEND; dn_nsp_send_link(sk, DN_SEND, 0); } } if (eor) { if (sk->sk_type == SOCK_SEQPACKET) break; if (!(flags & MSG_WAITALL)) break; } if (flags & MSG_OOB) break; if (copied >= target) break; } rv = copied; if (eor && (sk->sk_type == SOCK_SEQPACKET)) msg->msg_flags |= MSG_EOR; out: if (rv == 0) rv = (flags & MSG_PEEK) ? -sk->sk_err : sock_error(sk); if ((rv >= 0) && msg->msg_name) { memcpy(msg->msg_name, &scp->peer, sizeof(struct sockaddr_dn)); msg->msg_namelen = sizeof(struct sockaddr_dn); } release_sock(sk); return rv; } static inline int dn_queue_too_long(struct dn_scp *scp, struct sk_buff_head *queue, int flags) { unsigned char fctype = scp->services_rem & NSP_FC_MASK; if (skb_queue_len(queue) >= scp->snd_window) return 1; if (fctype != NSP_FC_NONE) { if (flags & MSG_OOB) { if (scp->flowrem_oth == 0) return 1; } else { if (scp->flowrem_dat == 0) return 1; } } return 0; } /* * The DECnet spec requires that the "routing layer" accepts packets which * are at least 230 bytes in size. This excludes any headers which the NSP * layer might add, so we always assume that we'll be using the maximal * length header on data packets. The variation in length is due to the * inclusion (or not) of the two 16 bit acknowledgement fields so it doesn't * make much practical difference. */ unsigned dn_mss_from_pmtu(struct net_device *dev, int mtu) { unsigned mss = 230 - DN_MAX_NSP_DATA_HEADER; if (dev) { struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr); mtu -= LL_RESERVED_SPACE(dev); if (dn_db->use_long) mtu -= 21; else mtu -= 6; mtu -= DN_MAX_NSP_DATA_HEADER; } else { /* * 21 = long header, 16 = guess at MAC header length */ mtu -= (21 + DN_MAX_NSP_DATA_HEADER + 16); } if (mtu > mss) mss = mtu; return mss; } static inline unsigned int dn_current_mss(struct sock *sk, int flags) { struct dst_entry *dst = __sk_dst_get(sk); struct dn_scp *scp = DN_SK(sk); int mss_now = min_t(int, scp->segsize_loc, scp->segsize_rem); /* Other data messages are limited to 16 bytes per packet */ if (flags & MSG_OOB) return 16; /* This works out the maximum size of segment we can send out */ if (dst) { u32 mtu = dst_mtu(dst); mss_now = min_t(int, dn_mss_from_pmtu(dst->dev, mtu), mss_now); } return mss_now; } /* * N.B. We get the timeout wrong here, but then we always did get it * wrong before and this is another step along the road to correcting * it. It ought to get updated each time we pass through the routine, * but in practise it probably doesn't matter too much for now. */ static inline struct sk_buff *dn_alloc_send_pskb(struct sock *sk, unsigned long datalen, int noblock, int *errcode) { struct sk_buff *skb = sock_alloc_send_skb(sk, datalen, noblock, errcode); if (skb) { skb->protocol = htons(ETH_P_DNA_RT); skb->pkt_type = PACKET_OUTGOING; } return skb; } static int dn_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size) { struct sock *sk = sock->sk; struct dn_scp *scp = DN_SK(sk); size_t mss; struct sk_buff_head *queue = &scp->data_xmit_queue; int flags = msg->msg_flags; int err = 0; size_t sent = 0; int addr_len = msg->msg_namelen; struct sockaddr_dn *addr = (struct sockaddr_dn *)msg->msg_name; struct sk_buff *skb = NULL; struct dn_skb_cb *cb; size_t len; unsigned char fctype; long timeo; if (flags & ~(MSG_TRYHARD|MSG_OOB|MSG_DONTWAIT|MSG_EOR|MSG_NOSIGNAL|MSG_MORE|MSG_CMSG_COMPAT)) return -EOPNOTSUPP; if (addr_len && (addr_len != sizeof(struct sockaddr_dn))) return -EINVAL; lock_sock(sk); timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); /* * The only difference between stream sockets and sequenced packet * sockets is that the stream sockets always behave as if MSG_EOR * has been set. */ if (sock->type == SOCK_STREAM) { if (flags & MSG_EOR) { err = -EINVAL; goto out; } flags |= MSG_EOR; } err = dn_check_state(sk, addr, addr_len, &timeo, flags); if (err) goto out_err; if (sk->sk_shutdown & SEND_SHUTDOWN) { err = -EPIPE; if (!(flags & MSG_NOSIGNAL)) send_sig(SIGPIPE, current, 0); goto out_err; } if ((flags & MSG_TRYHARD) && sk->sk_dst_cache) dst_negative_advice(sk); mss = scp->segsize_rem; fctype = scp->services_rem & NSP_FC_MASK; mss = dn_current_mss(sk, flags); if (flags & MSG_OOB) { queue = &scp->other_xmit_queue; if (size > mss) { err = -EMSGSIZE; goto out; } } scp->persist_fxn = dn_nsp_xmit_timeout; while(sent < size) { err = sock_error(sk); if (err) goto out; if (signal_pending(current)) { err = sock_intr_errno(timeo); goto out; } /* * Calculate size that we wish to send. */ len = size - sent; if (len > mss) len = mss; /* * Wait for queue size to go down below the window * size. */ if (dn_queue_too_long(scp, queue, flags)) { DEFINE_WAIT(wait); if (flags & MSG_DONTWAIT) { err = -EWOULDBLOCK; goto out; } prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); sk_wait_event(sk, &timeo, !dn_queue_too_long(scp, queue, flags)); clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); finish_wait(sk_sleep(sk), &wait); continue; } /* * Get a suitably sized skb. * 64 is a bit of a hack really, but its larger than any * link-layer headers and has served us well as a good * guess as to their real length. */ skb = dn_alloc_send_pskb(sk, len + 64 + DN_MAX_NSP_DATA_HEADER, flags & MSG_DONTWAIT, &err); if (err) break; if (!skb) continue; cb = DN_SKB_CB(skb); skb_reserve(skb, 64 + DN_MAX_NSP_DATA_HEADER); if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) { err = -EFAULT; goto out; } if (flags & MSG_OOB) { cb->nsp_flags = 0x30; if (fctype != NSP_FC_NONE) scp->flowrem_oth--; } else { cb->nsp_flags = 0x00; if (scp->seg_total == 0) cb->nsp_flags |= 0x20; scp->seg_total += len; if (((sent + len) == size) && (flags & MSG_EOR)) { cb->nsp_flags |= 0x40; scp->seg_total = 0; if (fctype == NSP_FC_SCMC) scp->flowrem_dat--; } if (fctype == NSP_FC_SRC) scp->flowrem_dat--; } sent += len; dn_nsp_queue_xmit(sk, skb, sk->sk_allocation, flags & MSG_OOB); skb = NULL; scp->persist = dn_nsp_persist(sk); } out: kfree_skb(skb); release_sock(sk); return sent ? sent : err; out_err: err = sk_stream_error(sk, flags, err); release_sock(sk); return err; } static int dn_device_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *dev = (struct net_device *)ptr; if (!net_eq(dev_net(dev), &init_net)) return NOTIFY_DONE; switch(event) { case NETDEV_UP: dn_dev_up(dev); break; case NETDEV_DOWN: dn_dev_down(dev); break; default: break; } return NOTIFY_DONE; } static struct notifier_block dn_dev_notifier = { .notifier_call = dn_device_event, }; extern int dn_route_rcv(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); static struct packet_type dn_dix_packet_type __read_mostly = { .type = cpu_to_be16(ETH_P_DNA_RT), .func = dn_route_rcv, }; #ifdef CONFIG_PROC_FS struct dn_iter_state { int bucket; }; static struct sock *dn_socket_get_first(struct seq_file *seq) { struct dn_iter_state *state = seq->private; struct sock *n = NULL; for(state->bucket = 0; state->bucket < DN_SK_HASH_SIZE; ++state->bucket) { n = sk_head(&dn_sk_hash[state->bucket]); if (n) break; } return n; } static struct sock *dn_socket_get_next(struct seq_file *seq, struct sock *n) { struct dn_iter_state *state = seq->private; n = sk_next(n); try_again: if (n) goto out; if (++state->bucket >= DN_SK_HASH_SIZE) goto out; n = sk_head(&dn_sk_hash[state->bucket]); goto try_again; out: return n; } static struct sock *socket_get_idx(struct seq_file *seq, loff_t *pos) { struct sock *sk = dn_socket_get_first(seq); if (sk) { while(*pos && (sk = dn_socket_get_next(seq, sk))) --*pos; } return *pos ? NULL : sk; } static void *dn_socket_get_idx(struct seq_file *seq, loff_t pos) { void *rc; read_lock_bh(&dn_hash_lock); rc = socket_get_idx(seq, &pos); if (!rc) { read_unlock_bh(&dn_hash_lock); } return rc; } static void *dn_socket_seq_start(struct seq_file *seq, loff_t *pos) { return *pos ? dn_socket_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; } static void *dn_socket_seq_next(struct seq_file *seq, void *v, loff_t *pos) { void *rc; if (v == SEQ_START_TOKEN) { rc = dn_socket_get_idx(seq, 0); goto out; } rc = dn_socket_get_next(seq, v); if (rc) goto out; read_unlock_bh(&dn_hash_lock); out: ++*pos; return rc; } static void dn_socket_seq_stop(struct seq_file *seq, void *v) { if (v && v != SEQ_START_TOKEN) read_unlock_bh(&dn_hash_lock); } #define IS_NOT_PRINTABLE(x) ((x) < 32 || (x) > 126) static void dn_printable_object(struct sockaddr_dn *dn, unsigned char *buf) { int i; switch (le16_to_cpu(dn->sdn_objnamel)) { case 0: sprintf(buf, "%d", dn->sdn_objnum); break; default: for (i = 0; i < le16_to_cpu(dn->sdn_objnamel); i++) { buf[i] = dn->sdn_objname[i]; if (IS_NOT_PRINTABLE(buf[i])) buf[i] = '.'; } buf[i] = 0; } } static char *dn_state2asc(unsigned char state) { switch(state) { case DN_O: return "OPEN"; case DN_CR: return " CR"; case DN_DR: return " DR"; case DN_DRC: return " DRC"; case DN_CC: return " CC"; case DN_CI: return " CI"; case DN_NR: return " NR"; case DN_NC: return " NC"; case DN_CD: return " CD"; case DN_RJ: return " RJ"; case DN_RUN: return " RUN"; case DN_DI: return " DI"; case DN_DIC: return " DIC"; case DN_DN: return " DN"; case DN_CL: return " CL"; case DN_CN: return " CN"; } return "????"; } static inline void dn_socket_format_entry(struct seq_file *seq, struct sock *sk) { struct dn_scp *scp = DN_SK(sk); char buf1[DN_ASCBUF_LEN]; char buf2[DN_ASCBUF_LEN]; char local_object[DN_MAXOBJL+3]; char remote_object[DN_MAXOBJL+3]; dn_printable_object(&scp->addr, local_object); dn_printable_object(&scp->peer, remote_object); seq_printf(seq, "%6s/%04X %04d:%04d %04d:%04d %01d %-16s " "%6s/%04X %04d:%04d %04d:%04d %01d %-16s %4s %s\n", dn_addr2asc(le16_to_cpu(dn_saddr2dn(&scp->addr)), buf1), scp->addrloc, scp->numdat, scp->numoth, scp->ackxmt_dat, scp->ackxmt_oth, scp->flowloc_sw, local_object, dn_addr2asc(le16_to_cpu(dn_saddr2dn(&scp->peer)), buf2), scp->addrrem, scp->numdat_rcv, scp->numoth_rcv, scp->ackrcv_dat, scp->ackrcv_oth, scp->flowrem_sw, remote_object, dn_state2asc(scp->state), ((scp->accept_mode == ACC_IMMED) ? "IMMED" : "DEFER")); } static int dn_socket_seq_show(struct seq_file *seq, void *v) { if (v == SEQ_START_TOKEN) { seq_puts(seq, "Local Remote\n"); } else { dn_socket_format_entry(seq, v); } return 0; } static const struct seq_operations dn_socket_seq_ops = { .start = dn_socket_seq_start, .next = dn_socket_seq_next, .stop = dn_socket_seq_stop, .show = dn_socket_seq_show, }; static int dn_socket_seq_open(struct inode *inode, struct file *file) { return seq_open_private(file, &dn_socket_seq_ops, sizeof(struct dn_iter_state)); } static const struct file_operations dn_socket_seq_fops = { .owner = THIS_MODULE, .open = dn_socket_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_private, }; #endif static const struct net_proto_family dn_family_ops = { .family = AF_DECnet, .create = dn_create, .owner = THIS_MODULE, }; static const struct proto_ops dn_proto_ops = { .family = AF_DECnet, .owner = THIS_MODULE, .release = dn_release, .bind = dn_bind, .connect = dn_connect, .socketpair = sock_no_socketpair, .accept = dn_accept, .getname = dn_getname, .poll = dn_poll, .ioctl = dn_ioctl, .listen = dn_listen, .shutdown = dn_shutdown, .setsockopt = dn_setsockopt, .getsockopt = dn_getsockopt, .sendmsg = dn_sendmsg, .recvmsg = dn_recvmsg, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, }; void dn_register_sysctl(void); void dn_unregister_sysctl(void); MODULE_DESCRIPTION("The Linux DECnet Network Protocol"); MODULE_AUTHOR("Linux DECnet Project Team"); MODULE_LICENSE("GPL"); MODULE_ALIAS_NETPROTO(PF_DECnet); static char banner[] __initdata = KERN_INFO "NET4: DECnet for Linux: V.2.5.68s (C) 1995-2003 Linux DECnet Project Team\n"; static int __init decnet_init(void) { int rc; printk(banner); rc = proto_register(&dn_proto, 1); if (rc != 0) goto out; dn_neigh_init(); dn_dev_init(); dn_route_init(); dn_fib_init(); sock_register(&dn_family_ops); dev_add_pack(&dn_dix_packet_type); register_netdevice_notifier(&dn_dev_notifier); proc_net_fops_create(&init_net, "decnet", S_IRUGO, &dn_socket_seq_fops); dn_register_sysctl(); out: return rc; } module_init(decnet_init); /* * Prevent DECnet module unloading until its fixed properly. * Requires an audit of the code to check for memory leaks and * initialisation problems etc. */ #if 0 static void __exit decnet_exit(void) { sock_unregister(AF_DECnet); rtnl_unregister_all(PF_DECnet); dev_remove_pack(&dn_dix_packet_type); dn_unregister_sysctl(); unregister_netdevice_notifier(&dn_dev_notifier); dn_route_cleanup(); dn_dev_cleanup(); dn_neigh_cleanup(); dn_fib_cleanup(); proc_net_remove(&init_net, "decnet"); proto_unregister(&dn_proto); rcu_barrier_bh(); /* Wait for completion of call_rcu_bh()'s */ } module_exit(decnet_exit); #endif
gpl-2.0
limeng12/r-source
src/library/grid/src/state.c
15
10006
/* * R : A Computer Language for Statistical Data Analysis * Copyright (C) 2001-3 Paul Murrell * 2003-5 The R Core Team * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, a copy is available at * http://www.r-project.org/Licenses/ */ #include "grid.h" int gridRegisterIndex; /* The gridSystemState (per device) consists of * GSS_DEVSIZE 0 = current size of device * GSS_CURRLOC 1 = current location of grid "pen" * GSS_DL 2 = grid display list * GSS_DLINDEX 3 = display list index * GSS_DLON 4 = is the display list on? * GSS_GPAR 5 = gpar settings * GSS_GPSAVED 6 = previous gpar settings * GSS_VP 7 = viewport * GSS_GLOBALINDEX 8 = index of this system state in the global list of states * GSS_GRIDDEVICE 9 = does this device contain grid output? * GSS_PREVLOC 10 = previous location of grid "pen" * GSS_ENGINEDLON 11 = are we using the graphics engine's display list? * GSS_CURRGROB 12 = current grob being drawn (for determining * the list of grobs to search when evaluating a grobwidth/height * unit via gPath) * GSS_ENGINERECORDING 13 = are we already inside a .Call.graphics call? * Used by grid.Call.graphics to avoid unnecessary recording on * engine display list * [GSS_ASK 14 = should we prompt the user before starting a new page? * Replaced by per-device setting as from R 2.7.0.] * GSS_SCALE 15 = a scale or "zoom" factor for all output * (to support "fit to window" resizing on windows device) * * NOTE: if you add to this list you MUST change the size of the vector * allocated in createGridSystemState() below. */ SEXP createGridSystemState() { return allocVector(VECSXP, 16); } void initDL(pGEDevDesc dd) { SEXP dl, dlindex; SEXP vp = gridStateElement(dd, GSS_VP); SEXP gsd = (SEXP) dd->gesd[gridRegisterIndex]->systemSpecific; /* The top-level viewport goes at the start of the display list */ PROTECT(dl = allocVector(VECSXP, 100)); SET_VECTOR_ELT(dl, 0, vp); SET_VECTOR_ELT(gsd, GSS_DL, dl); PROTECT(dlindex = allocVector(INTSXP, 1)); INTEGER(dlindex)[0] = 1; SET_VECTOR_ELT(gsd, GSS_DLINDEX, dlindex); UNPROTECT(2); } /* * This is used to init some bits of the system state * Called when a grahpics engine redraw is about to occur * NOTE that it does not init all of the state, in particular, * the display list is not initialised here (see initDL), * nor is the ROOT viewport (see initVP), * nor is the current gpar (see initGP) */ void initOtherState(pGEDevDesc dd) { SEXP currloc, prevloc, recording; SEXP state = (SEXP) dd->gesd[gridRegisterIndex]->systemSpecific; currloc = VECTOR_ELT(state, GSS_CURRLOC); REAL(currloc)[0] = NA_REAL; REAL(currloc)[1] = NA_REAL; prevloc = VECTOR_ELT(state, GSS_PREVLOC); REAL(prevloc)[0] = NA_REAL; REAL(prevloc)[1] = NA_REAL; SET_VECTOR_ELT(state, GSS_CURRGROB, R_NilValue); recording = VECTOR_ELT(state, GSS_ENGINERECORDING); LOGICAL(recording)[0] = FALSE; SET_VECTOR_ELT(state, GSS_ENGINERECORDING, recording); } void fillGridSystemState(SEXP state, pGEDevDesc dd) { SEXP devsize, currloc, prevloc; PROTECT(state); devsize = allocVector(REALSXP, 2); REAL(devsize)[0] = 0; REAL(devsize)[1] = 0; SET_VECTOR_ELT(state, GSS_DEVSIZE, devsize); /* "current location" * Initial setting relies on the fact that all values sent to devices * are in INCHES; so (0, 0) is the bottom-left corner of the device. */ currloc = allocVector(REALSXP, 2); REAL(currloc)[0] = NA_REAL; REAL(currloc)[1] = NA_REAL; SET_VECTOR_ELT(state, GSS_CURRLOC, currloc); prevloc = allocVector(REALSXP, 2); REAL(prevloc)[0] = NA_REAL; REAL(prevloc)[1] = NA_REAL; SET_VECTOR_ELT(state, GSS_PREVLOC, prevloc); SET_VECTOR_ELT(state, GSS_DLON, ScalarLogical(TRUE)); SET_VECTOR_ELT(state, GSS_ENGINEDLON, ScalarLogical(TRUE)); SET_VECTOR_ELT(state, GSS_CURRGROB, R_NilValue); SET_VECTOR_ELT(state, GSS_ENGINERECORDING, ScalarLogical(FALSE)); initGPar(dd); SET_VECTOR_ELT(state, GSS_GPSAVED, R_NilValue); /* Do NOT initialise top-level viewport or grid display list for * this device until there is some grid output */ SET_VECTOR_ELT(state, GSS_GLOBALINDEX, R_NilValue); /* Note that no grid output has occurred on the device yet. */ SET_VECTOR_ELT(state, GSS_GRIDDEVICE, ScalarLogical(FALSE)); #if 0 SET_VECTOR_ELT(state, GSS_ASK, ScalarLogical(dd->ask)); #endif SET_VECTOR_ELT(state, GSS_SCALE, ScalarReal(1.0)); UNPROTECT(1); } SEXP gridStateElement(pGEDevDesc dd, int elementIndex) { return VECTOR_ELT((SEXP) dd->gesd[gridRegisterIndex]->systemSpecific, elementIndex); } void setGridStateElement(pGEDevDesc dd, int elementIndex, SEXP value) { SET_VECTOR_ELT((SEXP) dd->gesd[gridRegisterIndex]->systemSpecific, elementIndex, value); } static void deglobaliseState(SEXP state) { int index = INTEGER(VECTOR_ELT(state, GSS_GLOBALINDEX))[0]; SET_VECTOR_ELT(findVar(install(".GRID.STATE"), R_gridEvalEnv), index, R_NilValue); } static int findStateSlot() { int i; int result = -1; SEXP globalstate = findVar(install(".GRID.STATE"), R_gridEvalEnv); for (i = 0; i < length(globalstate); i++) if (VECTOR_ELT(globalstate, i) == R_NilValue) { result = i; break; } if (result < 0) error(_("unable to store 'grid' state. Too many devices open?")); return result; } static void globaliseState(SEXP state) { int index = findStateSlot(); SEXP globalstate, indexsxp; PROTECT(globalstate = findVar(install(".GRID.STATE"), R_gridEvalEnv)); /* Record the index for deglobalisation */ PROTECT(indexsxp = allocVector(INTSXP, 1)); INTEGER(indexsxp)[0] = index; SET_VECTOR_ELT(state, GSS_GLOBALINDEX, indexsxp); SET_VECTOR_ELT(globalstate, index, state); UNPROTECT(2); } SEXP gridCallback(GEevent task, pGEDevDesc dd, SEXP data) { SEXP result = R_NilValue; SEXP valid, scale; SEXP gridState; GESystemDesc *sd; SEXP currentgp; SEXP gsd; SEXP devsize; R_GE_gcontext gc; switch (task) { case GE_InitState: /* Create the initial grid state for a device */ PROTECT(gridState = createGridSystemState()); /* Store that state with the device for easy retrieval */ sd = dd->gesd[gridRegisterIndex]; sd->systemSpecific = (void*) gridState; /* Initialise the grid state for a device */ fillGridSystemState(gridState, dd); /* Also store the state beneath a top-level variable so * that it does not get garbage-collected */ globaliseState(gridState); /* Indicate success */ result = R_BlankString; UNPROTECT(1); break; case GE_FinaliseState: sd = dd->gesd[gridRegisterIndex]; /* Simply detach the system state from the global variable * and it will be garbage-collected */ deglobaliseState((SEXP) sd->systemSpecific); /* Also set the device pointer to NULL */ sd->systemSpecific = NULL; break; case GE_SaveState: break; case GE_RestoreState: gsd = (SEXP) dd->gesd[gridRegisterIndex]->systemSpecific; PROTECT(devsize = allocVector(REALSXP, 2)); getDeviceSize(dd, &(REAL(devsize)[0]), &(REAL(devsize)[1])); SET_VECTOR_ELT(gsd, GSS_DEVSIZE, devsize); UNPROTECT(1); /* Only bother to do any grid drawing setup * if there has been grid output * on this device. */ if (LOGICAL(gridStateElement(dd, GSS_GRIDDEVICE))[0]) { if (LOGICAL(gridStateElement(dd, GSS_ENGINEDLON))[0]) { /* The graphics engine is about to replay the display list * So we "clear" the device and reset the grid graphics state */ /* There are two main situations in which this occurs: * (i) a screen is resized * In this case, it is ok-ish to do a GENewPage * because that has the desired effect and no * undesirable effects because it only happens on * a screen device -- a new page is the same as * clearing the screen * (ii) output on one device is copied to another device * In this case, a GENewPage is NOT a good thing, however, * here we will start with a new device and it will not * have any grid output so this section will not get called * SO we will not get any unwanted blank pages. * * All this is a bit fragile; ultimately, what would be ideal * is a dev->clearPage primitive for all devices in addition * to the dev->newPage primitive */ currentgp = gridStateElement(dd, GSS_GPAR); gcontextFromgpar(currentgp, 0, &gc, dd); GENewPage(&gc, dd); initGPar(dd); initVP(dd); initOtherState(dd); } else { /* * If we have turned off the graphics engine's display list * then we have to redraw the scene ourselves */ SEXP fcall; PROTECT(fcall = lang1(install("draw.all"))); eval(fcall, R_gridEvalEnv); UNPROTECT(1); } } break; case GE_CopyState: break; case GE_CheckPlot: PROTECT(valid = allocVector(LGLSXP, 1)); LOGICAL(valid)[0] = TRUE; UNPROTECT(1); result = valid; case GE_SaveSnapshotState: break; case GE_RestoreSnapshotState: break; case GE_ScalePS: /* * data is a numeric scale factor */ PROTECT(scale = allocVector(REALSXP, 1)); REAL(scale)[0] = REAL(gridStateElement(dd, GSS_SCALE))[0]* REAL(data)[0]; setGridStateElement(dd, GSS_SCALE, scale); UNPROTECT(1); break; } return result; }
gpl-2.0
amarchandole/capprobe_mptcp
drivers/md/dm-snap-persistent.c
15
22551
/* * Copyright (C) 2001-2002 Sistina Software (UK) Limited. * Copyright (C) 2006-2008 Red Hat GmbH * * This file is released under the GPL. */ #include "dm-exception-store.h" #include <linux/mm.h> #include <linux/pagemap.h> #include <linux/vmalloc.h> #include <linux/export.h> #include <linux/slab.h> #include <linux/dm-io.h> #include "dm-bufio.h" #define DM_MSG_PREFIX "persistent snapshot" #define DM_CHUNK_SIZE_DEFAULT_SECTORS 32 /* 16KB */ #define DM_PREFETCH_CHUNKS 12 /*----------------------------------------------------------------- * Persistent snapshots, by persistent we mean that the snapshot * will survive a reboot. *---------------------------------------------------------------*/ /* * We need to store a record of which parts of the origin have * been copied to the snapshot device. The snapshot code * requires that we copy exception chunks to chunk aligned areas * of the COW store. It makes sense therefore, to store the * metadata in chunk size blocks. * * There is no backward or forward compatibility implemented, * snapshots with different disk versions than the kernel will * not be usable. It is expected that "lvcreate" will blank out * the start of a fresh COW device before calling the snapshot * constructor. * * The first chunk of the COW device just contains the header. * After this there is a chunk filled with exception metadata, * followed by as many exception chunks as can fit in the * metadata areas. * * All on disk structures are in little-endian format. The end * of the exceptions info is indicated by an exception with a * new_chunk of 0, which is invalid since it would point to the * header chunk. */ /* * Magic for persistent snapshots: "SnAp" - Feeble isn't it. */ #define SNAP_MAGIC 0x70416e53 /* * The on-disk version of the metadata. */ #define SNAPSHOT_DISK_VERSION 1 #define NUM_SNAPSHOT_HDR_CHUNKS 1 struct disk_header { __le32 magic; /* * Is this snapshot valid. There is no way of recovering * an invalid snapshot. */ __le32 valid; /* * Simple, incrementing version. no backward * compatibility. */ __le32 version; /* In sectors */ __le32 chunk_size; } __packed; struct disk_exception { __le64 old_chunk; __le64 new_chunk; } __packed; struct core_exception { uint64_t old_chunk; uint64_t new_chunk; }; struct commit_callback { void (*callback)(void *, int success); void *context; }; /* * The top level structure for a persistent exception store. */ struct pstore { struct dm_exception_store *store; int version; int valid; uint32_t exceptions_per_area; /* * Now that we have an asynchronous kcopyd there is no * need for large chunk sizes, so it wont hurt to have a * whole chunks worth of metadata in memory at once. */ void *area; /* * An area of zeros used to clear the next area. */ void *zero_area; /* * An area used for header. The header can be written * concurrently with metadata (when invalidating the snapshot), * so it needs a separate buffer. */ void *header_area; /* * Used to keep track of which metadata area the data in * 'chunk' refers to. */ chunk_t current_area; /* * The next free chunk for an exception. * * When creating exceptions, all the chunks here and above are * free. It holds the next chunk to be allocated. On rare * occasions (e.g. after a system crash) holes can be left in * the exception store because chunks can be committed out of * order. * * When merging exceptions, it does not necessarily mean all the * chunks here and above are free. It holds the value it would * have held if all chunks had been committed in order of * allocation. Consequently the value may occasionally be * slightly too low, but since it's only used for 'status' and * it can never reach its minimum value too early this doesn't * matter. */ chunk_t next_free; /* * The index of next free exception in the current * metadata area. */ uint32_t current_committed; atomic_t pending_count; uint32_t callback_count; struct commit_callback *callbacks; struct dm_io_client *io_client; struct workqueue_struct *metadata_wq; }; static int alloc_area(struct pstore *ps) { int r = -ENOMEM; size_t len; len = ps->store->chunk_size << SECTOR_SHIFT; /* * Allocate the chunk_size block of memory that will hold * a single metadata area. */ ps->area = vmalloc(len); if (!ps->area) goto err_area; ps->zero_area = vzalloc(len); if (!ps->zero_area) goto err_zero_area; ps->header_area = vmalloc(len); if (!ps->header_area) goto err_header_area; return 0; err_header_area: vfree(ps->zero_area); err_zero_area: vfree(ps->area); err_area: return r; } static void free_area(struct pstore *ps) { vfree(ps->area); ps->area = NULL; vfree(ps->zero_area); ps->zero_area = NULL; vfree(ps->header_area); ps->header_area = NULL; } struct mdata_req { struct dm_io_region *where; struct dm_io_request *io_req; struct work_struct work; int result; }; static void do_metadata(struct work_struct *work) { struct mdata_req *req = container_of(work, struct mdata_req, work); req->result = dm_io(req->io_req, 1, req->where, NULL); } /* * Read or write a chunk aligned and sized block of data from a device. */ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw, int metadata) { struct dm_io_region where = { .bdev = dm_snap_cow(ps->store->snap)->bdev, .sector = ps->store->chunk_size * chunk, .count = ps->store->chunk_size, }; struct dm_io_request io_req = { .bi_rw = rw, .mem.type = DM_IO_VMA, .mem.ptr.vma = area, .client = ps->io_client, .notify.fn = NULL, }; struct mdata_req req; if (!metadata) return dm_io(&io_req, 1, &where, NULL); req.where = &where; req.io_req = &io_req; /* * Issue the synchronous I/O from a different thread * to avoid generic_make_request recursion. */ INIT_WORK_ONSTACK(&req.work, do_metadata); queue_work(ps->metadata_wq, &req.work); flush_workqueue(ps->metadata_wq); destroy_work_on_stack(&req.work); return req.result; } /* * Convert a metadata area index to a chunk index. */ static chunk_t area_location(struct pstore *ps, chunk_t area) { return NUM_SNAPSHOT_HDR_CHUNKS + ((ps->exceptions_per_area + 1) * area); } static void skip_metadata(struct pstore *ps) { uint32_t stride = ps->exceptions_per_area + 1; chunk_t next_free = ps->next_free; if (sector_div(next_free, stride) == NUM_SNAPSHOT_HDR_CHUNKS) ps->next_free++; } /* * Read or write a metadata area. Remembering to skip the first * chunk which holds the header. */ static int area_io(struct pstore *ps, int rw) { int r; chunk_t chunk; chunk = area_location(ps, ps->current_area); r = chunk_io(ps, ps->area, chunk, rw, 0); if (r) return r; return 0; } static void zero_memory_area(struct pstore *ps) { memset(ps->area, 0, ps->store->chunk_size << SECTOR_SHIFT); } static int zero_disk_area(struct pstore *ps, chunk_t area) { return chunk_io(ps, ps->zero_area, area_location(ps, area), WRITE, 0); } static int read_header(struct pstore *ps, int *new_snapshot) { int r; struct disk_header *dh; unsigned chunk_size; int chunk_size_supplied = 1; char *chunk_err; /* * Use default chunk size (or logical_block_size, if larger) * if none supplied */ if (!ps->store->chunk_size) { ps->store->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS, bdev_logical_block_size(dm_snap_cow(ps->store->snap)-> bdev) >> 9); ps->store->chunk_mask = ps->store->chunk_size - 1; ps->store->chunk_shift = ffs(ps->store->chunk_size) - 1; chunk_size_supplied = 0; } ps->io_client = dm_io_client_create(); if (IS_ERR(ps->io_client)) return PTR_ERR(ps->io_client); r = alloc_area(ps); if (r) return r; r = chunk_io(ps, ps->header_area, 0, READ, 1); if (r) goto bad; dh = ps->header_area; if (le32_to_cpu(dh->magic) == 0) { *new_snapshot = 1; return 0; } if (le32_to_cpu(dh->magic) != SNAP_MAGIC) { DMWARN("Invalid or corrupt snapshot"); r = -ENXIO; goto bad; } *new_snapshot = 0; ps->valid = le32_to_cpu(dh->valid); ps->version = le32_to_cpu(dh->version); chunk_size = le32_to_cpu(dh->chunk_size); if (ps->store->chunk_size == chunk_size) return 0; if (chunk_size_supplied) DMWARN("chunk size %u in device metadata overrides " "table chunk size of %u.", chunk_size, ps->store->chunk_size); /* We had a bogus chunk_size. Fix stuff up. */ free_area(ps); r = dm_exception_store_set_chunk_size(ps->store, chunk_size, &chunk_err); if (r) { DMERR("invalid on-disk chunk size %u: %s.", chunk_size, chunk_err); return r; } r = alloc_area(ps); return r; bad: free_area(ps); return r; } static int write_header(struct pstore *ps) { struct disk_header *dh; memset(ps->header_area, 0, ps->store->chunk_size << SECTOR_SHIFT); dh = ps->header_area; dh->magic = cpu_to_le32(SNAP_MAGIC); dh->valid = cpu_to_le32(ps->valid); dh->version = cpu_to_le32(ps->version); dh->chunk_size = cpu_to_le32(ps->store->chunk_size); return chunk_io(ps, ps->header_area, 0, WRITE, 1); } /* * Access functions for the disk exceptions, these do the endian conversions. */ static struct disk_exception *get_exception(struct pstore *ps, void *ps_area, uint32_t index) { BUG_ON(index >= ps->exceptions_per_area); return ((struct disk_exception *) ps_area) + index; } static void read_exception(struct pstore *ps, void *ps_area, uint32_t index, struct core_exception *result) { struct disk_exception *de = get_exception(ps, ps_area, index); /* copy it */ result->old_chunk = le64_to_cpu(de->old_chunk); result->new_chunk = le64_to_cpu(de->new_chunk); } static void write_exception(struct pstore *ps, uint32_t index, struct core_exception *e) { struct disk_exception *de = get_exception(ps, ps->area, index); /* copy it */ de->old_chunk = cpu_to_le64(e->old_chunk); de->new_chunk = cpu_to_le64(e->new_chunk); } static void clear_exception(struct pstore *ps, uint32_t index) { struct disk_exception *de = get_exception(ps, ps->area, index); /* clear it */ de->old_chunk = 0; de->new_chunk = 0; } /* * Registers the exceptions that are present in the current area. * 'full' is filled in to indicate if the area has been * filled. */ static int insert_exceptions(struct pstore *ps, void *ps_area, int (*callback)(void *callback_context, chunk_t old, chunk_t new), void *callback_context, int *full) { int r; unsigned int i; struct core_exception e; /* presume the area is full */ *full = 1; for (i = 0; i < ps->exceptions_per_area; i++) { read_exception(ps, ps_area, i, &e); /* * If the new_chunk is pointing at the start of * the COW device, where the first metadata area * is we know that we've hit the end of the * exceptions. Therefore the area is not full. */ if (e.new_chunk == 0LL) { ps->current_committed = i; *full = 0; break; } /* * Keep track of the start of the free chunks. */ if (ps->next_free <= e.new_chunk) ps->next_free = e.new_chunk + 1; /* * Otherwise we add the exception to the snapshot. */ r = callback(callback_context, e.old_chunk, e.new_chunk); if (r) return r; } return 0; } static int read_exceptions(struct pstore *ps, int (*callback)(void *callback_context, chunk_t old, chunk_t new), void *callback_context) { int r, full = 1; struct dm_bufio_client *client; chunk_t prefetch_area = 0; client = dm_bufio_client_create(dm_snap_cow(ps->store->snap)->bdev, ps->store->chunk_size << SECTOR_SHIFT, 1, 0, NULL, NULL); if (IS_ERR(client)) return PTR_ERR(client); /* * Setup for one current buffer + desired readahead buffers. */ dm_bufio_set_minimum_buffers(client, 1 + DM_PREFETCH_CHUNKS); /* * Keeping reading chunks and inserting exceptions until * we find a partially full area. */ for (ps->current_area = 0; full; ps->current_area++) { struct dm_buffer *bp; void *area; chunk_t chunk; if (unlikely(prefetch_area < ps->current_area)) prefetch_area = ps->current_area; if (DM_PREFETCH_CHUNKS) do { chunk_t pf_chunk = area_location(ps, prefetch_area); if (unlikely(pf_chunk >= dm_bufio_get_device_size(client))) break; dm_bufio_prefetch(client, pf_chunk, 1); prefetch_area++; if (unlikely(!prefetch_area)) break; } while (prefetch_area <= ps->current_area + DM_PREFETCH_CHUNKS); chunk = area_location(ps, ps->current_area); area = dm_bufio_read(client, chunk, &bp); if (unlikely(IS_ERR(area))) { r = PTR_ERR(area); goto ret_destroy_bufio; } r = insert_exceptions(ps, area, callback, callback_context, &full); if (!full) memcpy(ps->area, area, ps->store->chunk_size << SECTOR_SHIFT); dm_bufio_release(bp); dm_bufio_forget(client, chunk); if (unlikely(r)) goto ret_destroy_bufio; } ps->current_area--; skip_metadata(ps); r = 0; ret_destroy_bufio: dm_bufio_client_destroy(client); return r; } static struct pstore *get_info(struct dm_exception_store *store) { return (struct pstore *) store->context; } static void persistent_usage(struct dm_exception_store *store, sector_t *total_sectors, sector_t *sectors_allocated, sector_t *metadata_sectors) { struct pstore *ps = get_info(store); *sectors_allocated = ps->next_free * store->chunk_size; *total_sectors = get_dev_size(dm_snap_cow(store->snap)->bdev); /* * First chunk is the fixed header. * Then there are (ps->current_area + 1) metadata chunks, each one * separated from the next by ps->exceptions_per_area data chunks. */ *metadata_sectors = (ps->current_area + 1 + NUM_SNAPSHOT_HDR_CHUNKS) * store->chunk_size; } static void persistent_dtr(struct dm_exception_store *store) { struct pstore *ps = get_info(store); destroy_workqueue(ps->metadata_wq); /* Created in read_header */ if (ps->io_client) dm_io_client_destroy(ps->io_client); free_area(ps); /* Allocated in persistent_read_metadata */ vfree(ps->callbacks); kfree(ps); } static int persistent_read_metadata(struct dm_exception_store *store, int (*callback)(void *callback_context, chunk_t old, chunk_t new), void *callback_context) { int r, uninitialized_var(new_snapshot); struct pstore *ps = get_info(store); /* * Read the snapshot header. */ r = read_header(ps, &new_snapshot); if (r) return r; /* * Now we know correct chunk_size, complete the initialisation. */ ps->exceptions_per_area = (ps->store->chunk_size << SECTOR_SHIFT) / sizeof(struct disk_exception); ps->callbacks = dm_vcalloc(ps->exceptions_per_area, sizeof(*ps->callbacks)); if (!ps->callbacks) return -ENOMEM; /* * Do we need to setup a new snapshot ? */ if (new_snapshot) { r = write_header(ps); if (r) { DMWARN("write_header failed"); return r; } ps->current_area = 0; zero_memory_area(ps); r = zero_disk_area(ps, 0); if (r) DMWARN("zero_disk_area(0) failed"); return r; } /* * Sanity checks. */ if (ps->version != SNAPSHOT_DISK_VERSION) { DMWARN("unable to handle snapshot disk version %d", ps->version); return -EINVAL; } /* * Metadata are valid, but snapshot is invalidated */ if (!ps->valid) return 1; /* * Read the metadata. */ r = read_exceptions(ps, callback, callback_context); return r; } static int persistent_prepare_exception(struct dm_exception_store *store, struct dm_exception *e) { struct pstore *ps = get_info(store); sector_t size = get_dev_size(dm_snap_cow(store->snap)->bdev); /* Is there enough room ? */ if (size < ((ps->next_free + 1) * store->chunk_size)) return -ENOSPC; e->new_chunk = ps->next_free; /* * Move onto the next free pending, making sure to take * into account the location of the metadata chunks. */ ps->next_free++; skip_metadata(ps); atomic_inc(&ps->pending_count); return 0; } static void persistent_commit_exception(struct dm_exception_store *store, struct dm_exception *e, int valid, void (*callback) (void *, int success), void *callback_context) { unsigned int i; struct pstore *ps = get_info(store); struct core_exception ce; struct commit_callback *cb; if (!valid) ps->valid = 0; ce.old_chunk = e->old_chunk; ce.new_chunk = e->new_chunk; write_exception(ps, ps->current_committed++, &ce); /* * Add the callback to the back of the array. This code * is the only place where the callback array is * manipulated, and we know that it will never be called * multiple times concurrently. */ cb = ps->callbacks + ps->callback_count++; cb->callback = callback; cb->context = callback_context; /* * If there are exceptions in flight and we have not yet * filled this metadata area there's nothing more to do. */ if (!atomic_dec_and_test(&ps->pending_count) && (ps->current_committed != ps->exceptions_per_area)) return; /* * If we completely filled the current area, then wipe the next one. */ if ((ps->current_committed == ps->exceptions_per_area) && zero_disk_area(ps, ps->current_area + 1)) ps->valid = 0; /* * Commit exceptions to disk. */ if (ps->valid && area_io(ps, WRITE_FLUSH_FUA)) ps->valid = 0; /* * Advance to the next area if this one is full. */ if (ps->current_committed == ps->exceptions_per_area) { ps->current_committed = 0; ps->current_area++; zero_memory_area(ps); } for (i = 0; i < ps->callback_count; i++) { cb = ps->callbacks + i; cb->callback(cb->context, ps->valid); } ps->callback_count = 0; } static int persistent_prepare_merge(struct dm_exception_store *store, chunk_t *last_old_chunk, chunk_t *last_new_chunk) { struct pstore *ps = get_info(store); struct core_exception ce; int nr_consecutive; int r; /* * When current area is empty, move back to preceding area. */ if (!ps->current_committed) { /* * Have we finished? */ if (!ps->current_area) return 0; ps->current_area--; r = area_io(ps, READ); if (r < 0) return r; ps->current_committed = ps->exceptions_per_area; } read_exception(ps, ps->area, ps->current_committed - 1, &ce); *last_old_chunk = ce.old_chunk; *last_new_chunk = ce.new_chunk; /* * Find number of consecutive chunks within the current area, * working backwards. */ for (nr_consecutive = 1; nr_consecutive < ps->current_committed; nr_consecutive++) { read_exception(ps, ps->area, ps->current_committed - 1 - nr_consecutive, &ce); if (ce.old_chunk != *last_old_chunk - nr_consecutive || ce.new_chunk != *last_new_chunk - nr_consecutive) break; } return nr_consecutive; } static int persistent_commit_merge(struct dm_exception_store *store, int nr_merged) { int r, i; struct pstore *ps = get_info(store); BUG_ON(nr_merged > ps->current_committed); for (i = 0; i < nr_merged; i++) clear_exception(ps, ps->current_committed - 1 - i); r = area_io(ps, WRITE_FLUSH_FUA); if (r < 0) return r; ps->current_committed -= nr_merged; /* * At this stage, only persistent_usage() uses ps->next_free, so * we make no attempt to keep ps->next_free strictly accurate * as exceptions may have been committed out-of-order originally. * Once a snapshot has become merging, we set it to the value it * would have held had all the exceptions been committed in order. * * ps->current_area does not get reduced by prepare_merge() until * after commit_merge() has removed the nr_merged previous exceptions. */ ps->next_free = area_location(ps, ps->current_area) + ps->current_committed + 1; return 0; } static void persistent_drop_snapshot(struct dm_exception_store *store) { struct pstore *ps = get_info(store); ps->valid = 0; if (write_header(ps)) DMWARN("write header failed"); } static int persistent_ctr(struct dm_exception_store *store, unsigned argc, char **argv) { struct pstore *ps; /* allocate the pstore */ ps = kzalloc(sizeof(*ps), GFP_KERNEL); if (!ps) return -ENOMEM; ps->store = store; ps->valid = 1; ps->version = SNAPSHOT_DISK_VERSION; ps->area = NULL; ps->zero_area = NULL; ps->header_area = NULL; ps->next_free = NUM_SNAPSHOT_HDR_CHUNKS + 1; /* header and 1st area */ ps->current_committed = 0; ps->callback_count = 0; atomic_set(&ps->pending_count, 0); ps->callbacks = NULL; ps->metadata_wq = alloc_workqueue("ksnaphd", WQ_MEM_RECLAIM, 0); if (!ps->metadata_wq) { kfree(ps); DMERR("couldn't start header metadata update thread"); return -ENOMEM; } store->context = ps; return 0; } static unsigned persistent_status(struct dm_exception_store *store, status_type_t status, char *result, unsigned maxlen) { unsigned sz = 0; switch (status) { case STATUSTYPE_INFO: break; case STATUSTYPE_TABLE: DMEMIT(" P %llu", (unsigned long long)store->chunk_size); } return sz; } static struct dm_exception_store_type _persistent_type = { .name = "persistent", .module = THIS_MODULE, .ctr = persistent_ctr, .dtr = persistent_dtr, .read_metadata = persistent_read_metadata, .prepare_exception = persistent_prepare_exception, .commit_exception = persistent_commit_exception, .prepare_merge = persistent_prepare_merge, .commit_merge = persistent_commit_merge, .drop_snapshot = persistent_drop_snapshot, .usage = persistent_usage, .status = persistent_status, }; static struct dm_exception_store_type _persistent_compat_type = { .name = "P", .module = THIS_MODULE, .ctr = persistent_ctr, .dtr = persistent_dtr, .read_metadata = persistent_read_metadata, .prepare_exception = persistent_prepare_exception, .commit_exception = persistent_commit_exception, .prepare_merge = persistent_prepare_merge, .commit_merge = persistent_commit_merge, .drop_snapshot = persistent_drop_snapshot, .usage = persistent_usage, .status = persistent_status, }; int dm_persistent_snapshot_init(void) { int r; r = dm_exception_store_type_register(&_persistent_type); if (r) { DMERR("Unable to register persistent exception store type"); return r; } r = dm_exception_store_type_register(&_persistent_compat_type); if (r) { DMERR("Unable to register old-style persistent exception " "store type"); dm_exception_store_type_unregister(&_persistent_type); return r; } return r; } void dm_persistent_snapshot_exit(void) { dm_exception_store_type_unregister(&_persistent_type); dm_exception_store_type_unregister(&_persistent_compat_type); }
gpl-2.0
hor63/xcsoar
src/FLARM/FlarmDetails.cpp
15
2074
/* Copyright_License { XCSoar Glide Computer - http://www.xcsoar.org/ Copyright (C) 2000-2016 The XCSoar Project A detailed list of copyright holders can be found in the file "AUTHORS". This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. } */ #include "FLARM/FlarmDetails.hpp" #include "Global.hpp" #include "TrafficDatabases.hpp" #include "FLARM/FlarmId.hpp" #include "Util/StringCompare.hxx" #include <assert.h> const FlarmNetRecord * FlarmDetails::LookupRecord(FlarmId id) { // try to find flarm from FlarmNet.org File if (traffic_databases == nullptr) return NULL; return traffic_databases->flarm_net.FindRecordById(id); } const TCHAR * FlarmDetails::LookupCallsign(FlarmId id) { if (traffic_databases == nullptr) return nullptr; return traffic_databases->FindNameById(id); } FlarmId FlarmDetails::LookupId(const TCHAR *cn) { assert(traffic_databases != nullptr); return traffic_databases->FindIdByName(cn); } bool FlarmDetails::AddSecondaryItem(FlarmId id, const TCHAR *name) { assert(id.IsDefined()); assert(traffic_databases != nullptr); return traffic_databases->flarm_names.Set(id, name); } unsigned FlarmDetails::FindIdsByCallSign(const TCHAR *cn, FlarmId array[], unsigned size) { assert(cn != NULL); assert(!StringIsEmpty(cn)); assert(traffic_databases != nullptr); return traffic_databases->FindIdsByName(cn, array, size); }
gpl-2.0
ISTweak/android_kernel_pantech_is06
fs/ext4/inode.c
271
173049
/* * linux/fs/ext4/inode.c * * Copyright (C) 1992, 1993, 1994, 1995 * Remy Card (card@masi.ibp.fr) * Laboratoire MASI - Institut Blaise Pascal * Universite Pierre et Marie Curie (Paris VI) * * from * * linux/fs/minix/inode.c * * Copyright (C) 1991, 1992 Linus Torvalds * * Goal-directed block allocation by Stephen Tweedie * (sct@redhat.com), 1993, 1998 * Big-endian to little-endian byte-swapping/bitmaps by * David S. Miller (davem@caip.rutgers.edu), 1995 * 64-bit file support on 64-bit platforms by Jakub Jelinek * (jj@sunsite.ms.mff.cuni.cz) * * Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000 */ #include <linux/module.h> #include <linux/fs.h> #include <linux/time.h> #include <linux/jbd2.h> #include <linux/highuid.h> #include <linux/pagemap.h> #include <linux/quotaops.h> #include <linux/string.h> #include <linux/buffer_head.h> #include <linux/writeback.h> #include <linux/pagevec.h> #include <linux/mpage.h> #include <linux/namei.h> #include <linux/uio.h> #include <linux/bio.h> #include <linux/workqueue.h> #include "ext4_jbd2.h" #include "xattr.h" #include "acl.h" #include "ext4_extents.h" #include <trace/events/ext4.h> #define MPAGE_DA_EXTENT_TAIL 0x01 static inline int ext4_begin_ordered_truncate(struct inode *inode, loff_t new_size) { return jbd2_journal_begin_ordered_truncate( EXT4_SB(inode->i_sb)->s_journal, &EXT4_I(inode)->jinode, new_size); } static void ext4_invalidatepage(struct page *page, unsigned long offset); /* * Test whether an inode is a fast symlink. */ static int ext4_inode_is_fast_symlink(struct inode *inode) { int ea_blocks = EXT4_I(inode)->i_file_acl ? (inode->i_sb->s_blocksize >> 9) : 0; return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0); } /* * The ext4 forget function must perform a revoke if we are freeing data * which has been journaled. Metadata (eg. indirect blocks) must be * revoked in all cases. * * "bh" may be NULL: a metadata block may have been freed from memory * but there may still be a record of it in the journal, and that record * still needs to be revoked. * * If the handle isn't valid we're not journaling, but we still need to * call into ext4_journal_revoke() to put the buffer head. */ int ext4_forget(handle_t *handle, int is_metadata, struct inode *inode, struct buffer_head *bh, ext4_fsblk_t blocknr) { int err; might_sleep(); BUFFER_TRACE(bh, "enter"); jbd_debug(4, "forgetting bh %p: is_metadata = %d, mode %o, " "data mode %x\n", bh, is_metadata, inode->i_mode, test_opt(inode->i_sb, DATA_FLAGS)); /* Never use the revoke function if we are doing full data * journaling: there is no need to, and a V1 superblock won't * support it. Otherwise, only skip the revoke on un-journaled * data blocks. */ if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA || (!is_metadata && !ext4_should_journal_data(inode))) { if (bh) { BUFFER_TRACE(bh, "call jbd2_journal_forget"); return ext4_journal_forget(handle, bh); } return 0; } /* * data!=journal && (is_metadata || should_journal_data(inode)) */ BUFFER_TRACE(bh, "call ext4_journal_revoke"); err = ext4_journal_revoke(handle, blocknr, bh); if (err) ext4_abort(inode->i_sb, __func__, "error %d when attempting revoke", err); BUFFER_TRACE(bh, "exit"); return err; } /* * Work out how many blocks we need to proceed with the next chunk of a * truncate transaction. */ static unsigned long blocks_for_truncate(struct inode *inode) { ext4_lblk_t needed; needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9); /* Give ourselves just enough room to cope with inodes in which * i_blocks is corrupt: we've seen disk corruptions in the past * which resulted in random data in an inode which looked enough * like a regular file for ext4 to try to delete it. Things * will go a bit crazy if that happens, but at least we should * try not to panic the whole kernel. */ if (needed < 2) needed = 2; /* But we need to bound the transaction so we don't overflow the * journal. */ if (needed > EXT4_MAX_TRANS_DATA) needed = EXT4_MAX_TRANS_DATA; return EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + needed; } /* * Truncate transactions can be complex and absolutely huge. So we need to * be able to restart the transaction at a conventient checkpoint to make * sure we don't overflow the journal. * * start_transaction gets us a new handle for a truncate transaction, * and extend_transaction tries to extend the existing one a bit. If * extend fails, we need to propagate the failure up and restart the * transaction in the top-level truncate loop. --sct */ static handle_t *start_transaction(struct inode *inode) { handle_t *result; result = ext4_journal_start(inode, blocks_for_truncate(inode)); if (!IS_ERR(result)) return result; ext4_std_error(inode->i_sb, PTR_ERR(result)); return result; } /* * Try to extend this transaction for the purposes of truncation. * * Returns 0 if we managed to create more room. If we can't create more * room, and the transaction must be restarted we return 1. */ static int try_to_extend_transaction(handle_t *handle, struct inode *inode) { if (!ext4_handle_valid(handle)) return 0; if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1)) return 0; if (!ext4_journal_extend(handle, blocks_for_truncate(inode))) return 0; return 1; } /* * Restart the transaction associated with *handle. This does a commit, * so before we call here everything must be consistently dirtied against * this transaction. */ int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode, int nblocks) { int ret; /* * Drop i_data_sem to avoid deadlock with ext4_get_blocks At this * moment, get_block can be called only for blocks inside i_size since * page cache has been already dropped and writes are blocked by * i_mutex. So we can safely drop the i_data_sem here. */ BUG_ON(EXT4_JOURNAL(inode) == NULL); jbd_debug(2, "restarting handle %p\n", handle); up_write(&EXT4_I(inode)->i_data_sem); ret = ext4_journal_restart(handle, blocks_for_truncate(inode)); down_write(&EXT4_I(inode)->i_data_sem); ext4_discard_preallocations(inode); return ret; } /* * Called at the last iput() if i_nlink is zero. */ void ext4_delete_inode(struct inode *inode) { handle_t *handle; int err; if (ext4_should_order_data(inode)) ext4_begin_ordered_truncate(inode, 0); truncate_inode_pages(&inode->i_data, 0); if (is_bad_inode(inode)) goto no_delete; handle = ext4_journal_start(inode, blocks_for_truncate(inode)+3); if (IS_ERR(handle)) { ext4_std_error(inode->i_sb, PTR_ERR(handle)); /* * If we're going to skip the normal cleanup, we still need to * make sure that the in-core orphan linked list is properly * cleaned up. */ ext4_orphan_del(NULL, inode); goto no_delete; } if (IS_SYNC(inode)) ext4_handle_sync(handle); inode->i_size = 0; err = ext4_mark_inode_dirty(handle, inode); if (err) { ext4_warning(inode->i_sb, __func__, "couldn't mark inode dirty (err %d)", err); goto stop_handle; } if (inode->i_blocks) ext4_truncate(inode); /* * ext4_ext_truncate() doesn't reserve any slop when it * restarts journal transactions; therefore there may not be * enough credits left in the handle to remove the inode from * the orphan list and set the dtime field. */ if (!ext4_handle_has_enough_credits(handle, 3)) { err = ext4_journal_extend(handle, 3); if (err > 0) err = ext4_journal_restart(handle, 3); if (err != 0) { ext4_warning(inode->i_sb, __func__, "couldn't extend journal (err %d)", err); stop_handle: ext4_journal_stop(handle); goto no_delete; } } /* * Kill off the orphan record which ext4_truncate created. * AKPM: I think this can be inside the above `if'. * Note that ext4_orphan_del() has to be able to cope with the * deletion of a non-existent orphan - this is because we don't * know if ext4_truncate() actually created an orphan record. * (Well, we could do this if we need to, but heck - it works) */ ext4_orphan_del(handle, inode); EXT4_I(inode)->i_dtime = get_seconds(); /* * One subtle ordering requirement: if anything has gone wrong * (transaction abort, IO errors, whatever), then we can still * do these next steps (the fs will already have been marked as * having errors), but we can't free the inode if the mark_dirty * fails. */ if (ext4_mark_inode_dirty(handle, inode)) /* If that failed, just do the required in-core inode clear. */ clear_inode(inode); else ext4_free_inode(handle, inode); ext4_journal_stop(handle); return; no_delete: clear_inode(inode); /* We must guarantee clearing of inode... */ } typedef struct { __le32 *p; __le32 key; struct buffer_head *bh; } Indirect; static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v) { p->key = *(p->p = v); p->bh = bh; } /** * ext4_block_to_path - parse the block number into array of offsets * @inode: inode in question (we are only interested in its superblock) * @i_block: block number to be parsed * @offsets: array to store the offsets in * @boundary: set this non-zero if the referred-to block is likely to be * followed (on disk) by an indirect block. * * To store the locations of file's data ext4 uses a data structure common * for UNIX filesystems - tree of pointers anchored in the inode, with * data blocks at leaves and indirect blocks in intermediate nodes. * This function translates the block number into path in that tree - * return value is the path length and @offsets[n] is the offset of * pointer to (n+1)th node in the nth one. If @block is out of range * (negative or too large) warning is printed and zero returned. * * Note: function doesn't find node addresses, so no IO is needed. All * we need to know is the capacity of indirect blocks (taken from the * inode->i_sb). */ /* * Portability note: the last comparison (check that we fit into triple * indirect block) is spelled differently, because otherwise on an * architecture with 32-bit longs and 8Kb pages we might get into trouble * if our filesystem had 8Kb blocks. We might use long long, but that would * kill us on x86. Oh, well, at least the sign propagation does not matter - * i_block would have to be negative in the very beginning, so we would not * get there at all. */ static int ext4_block_to_path(struct inode *inode, ext4_lblk_t i_block, ext4_lblk_t offsets[4], int *boundary) { int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb); int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb); const long direct_blocks = EXT4_NDIR_BLOCKS, indirect_blocks = ptrs, double_blocks = (1 << (ptrs_bits * 2)); int n = 0; int final = 0; if (i_block < direct_blocks) { offsets[n++] = i_block; final = direct_blocks; } else if ((i_block -= direct_blocks) < indirect_blocks) { offsets[n++] = EXT4_IND_BLOCK; offsets[n++] = i_block; final = ptrs; } else if ((i_block -= indirect_blocks) < double_blocks) { offsets[n++] = EXT4_DIND_BLOCK; offsets[n++] = i_block >> ptrs_bits; offsets[n++] = i_block & (ptrs - 1); final = ptrs; } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) { offsets[n++] = EXT4_TIND_BLOCK; offsets[n++] = i_block >> (ptrs_bits * 2); offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1); offsets[n++] = i_block & (ptrs - 1); final = ptrs; } else { ext4_warning(inode->i_sb, "ext4_block_to_path", "block %lu > max in inode %lu", i_block + direct_blocks + indirect_blocks + double_blocks, inode->i_ino); } if (boundary) *boundary = final - 1 - (i_block & (ptrs - 1)); return n; } static int __ext4_check_blockref(const char *function, struct inode *inode, __le32 *p, unsigned int max) { __le32 *bref = p; unsigned int blk; while (bref < p+max) { blk = le32_to_cpu(*bref++); if (blk && unlikely(!ext4_data_block_valid(EXT4_SB(inode->i_sb), blk, 1))) { ext4_error(inode->i_sb, function, "invalid block reference %u " "in inode #%lu", blk, inode->i_ino); return -EIO; } } return 0; } #define ext4_check_indirect_blockref(inode, bh) \ __ext4_check_blockref(__func__, inode, (__le32 *)(bh)->b_data, \ EXT4_ADDR_PER_BLOCK((inode)->i_sb)) #define ext4_check_inode_blockref(inode) \ __ext4_check_blockref(__func__, inode, EXT4_I(inode)->i_data, \ EXT4_NDIR_BLOCKS) /** * ext4_get_branch - read the chain of indirect blocks leading to data * @inode: inode in question * @depth: depth of the chain (1 - direct pointer, etc.) * @offsets: offsets of pointers in inode/indirect blocks * @chain: place to store the result * @err: here we store the error value * * Function fills the array of triples <key, p, bh> and returns %NULL * if everything went OK or the pointer to the last filled triple * (incomplete one) otherwise. Upon the return chain[i].key contains * the number of (i+1)-th block in the chain (as it is stored in memory, * i.e. little-endian 32-bit), chain[i].p contains the address of that * number (it points into struct inode for i==0 and into the bh->b_data * for i>0) and chain[i].bh points to the buffer_head of i-th indirect * block for i>0 and NULL for i==0. In other words, it holds the block * numbers of the chain, addresses they were taken from (and where we can * verify that chain did not change) and buffer_heads hosting these * numbers. * * Function stops when it stumbles upon zero pointer (absent block) * (pointer to last triple returned, *@err == 0) * or when it gets an IO error reading an indirect block * (ditto, *@err == -EIO) * or when it reads all @depth-1 indirect blocks successfully and finds * the whole chain, all way to the data (returns %NULL, *err == 0). * * Need to be called with * down_read(&EXT4_I(inode)->i_data_sem) */ static Indirect *ext4_get_branch(struct inode *inode, int depth, ext4_lblk_t *offsets, Indirect chain[4], int *err) { struct super_block *sb = inode->i_sb; Indirect *p = chain; struct buffer_head *bh; *err = 0; /* i_data is not going away, no lock needed */ add_chain(chain, NULL, EXT4_I(inode)->i_data + *offsets); if (!p->key) goto no_block; while (--depth) { bh = sb_getblk(sb, le32_to_cpu(p->key)); if (unlikely(!bh)) goto failure; if (!bh_uptodate_or_lock(bh)) { if (bh_submit_read(bh) < 0) { put_bh(bh); goto failure; } /* validate block references */ if (ext4_check_indirect_blockref(inode, bh)) { put_bh(bh); goto failure; } } add_chain(++p, bh, (__le32 *)bh->b_data + *++offsets); /* Reader: end */ if (!p->key) goto no_block; } return NULL; failure: *err = -EIO; no_block: return p; } /** * ext4_find_near - find a place for allocation with sufficient locality * @inode: owner * @ind: descriptor of indirect block. * * This function returns the preferred place for block allocation. * It is used when heuristic for sequential allocation fails. * Rules are: * + if there is a block to the left of our position - allocate near it. * + if pointer will live in indirect block - allocate near that block. * + if pointer will live in inode - allocate in the same * cylinder group. * * In the latter case we colour the starting block by the callers PID to * prevent it from clashing with concurrent allocations for a different inode * in the same block group. The PID is used here so that functionally related * files will be close-by on-disk. * * Caller must make sure that @ind is valid and will stay that way. */ static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind) { struct ext4_inode_info *ei = EXT4_I(inode); __le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data; __le32 *p; ext4_fsblk_t bg_start; ext4_fsblk_t last_block; ext4_grpblk_t colour; ext4_group_t block_group; int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb)); /* Try to find previous block */ for (p = ind->p - 1; p >= start; p--) { if (*p) return le32_to_cpu(*p); } /* No such thing, so let's try location of indirect block */ if (ind->bh) return ind->bh->b_blocknr; /* * It is going to be referred to from the inode itself? OK, just put it * into the same cylinder group then. */ block_group = ei->i_block_group; if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) { block_group &= ~(flex_size-1); if (S_ISREG(inode->i_mode)) block_group++; } bg_start = ext4_group_first_block_no(inode->i_sb, block_group); last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1; /* * If we are doing delayed allocation, we don't need take * colour into account. */ if (test_opt(inode->i_sb, DELALLOC)) return bg_start; if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block) colour = (current->pid % 16) * (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16); else colour = (current->pid % 16) * ((last_block - bg_start) / 16); return bg_start + colour; } /** * ext4_find_goal - find a preferred place for allocation. * @inode: owner * @block: block we want * @partial: pointer to the last triple within a chain * * Normally this function find the preferred place for block allocation, * returns it. * Because this is only used for non-extent files, we limit the block nr * to 32 bits. */ static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block, Indirect *partial) { ext4_fsblk_t goal; /* * XXX need to get goal block from mballoc's data structures */ goal = ext4_find_near(inode, partial); goal = goal & EXT4_MAX_BLOCK_FILE_PHYS; return goal; } /** * ext4_blks_to_allocate: Look up the block map and count the number * of direct blocks need to be allocated for the given branch. * * @branch: chain of indirect blocks * @k: number of blocks need for indirect blocks * @blks: number of data blocks to be mapped. * @blocks_to_boundary: the offset in the indirect block * * return the total number of blocks to be allocate, including the * direct and indirect blocks. */ static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks, int blocks_to_boundary) { unsigned int count = 0; /* * Simple case, [t,d]Indirect block(s) has not allocated yet * then it's clear blocks on that path have not allocated */ if (k > 0) { /* right now we don't handle cross boundary allocation */ if (blks < blocks_to_boundary + 1) count += blks; else count += blocks_to_boundary + 1; return count; } count++; while (count < blks && count <= blocks_to_boundary && le32_to_cpu(*(branch[0].p + count)) == 0) { count++; } return count; } /** * ext4_alloc_blocks: multiple allocate blocks needed for a branch * @indirect_blks: the number of blocks need to allocate for indirect * blocks * * @new_blocks: on return it will store the new block numbers for * the indirect blocks(if needed) and the first direct block, * @blks: on return it will store the total number of allocated * direct blocks */ static int ext4_alloc_blocks(handle_t *handle, struct inode *inode, ext4_lblk_t iblock, ext4_fsblk_t goal, int indirect_blks, int blks, ext4_fsblk_t new_blocks[4], int *err) { struct ext4_allocation_request ar; int target, i; unsigned long count = 0, blk_allocated = 0; int index = 0; ext4_fsblk_t current_block = 0; int ret = 0; /* * Here we try to allocate the requested multiple blocks at once, * on a best-effort basis. * To build a branch, we should allocate blocks for * the indirect blocks(if not allocated yet), and at least * the first direct block of this branch. That's the * minimum number of blocks need to allocate(required) */ /* first we try to allocate the indirect blocks */ target = indirect_blks; while (target > 0) { count = target; /* allocating blocks for indirect blocks and direct blocks */ current_block = ext4_new_meta_blocks(handle, inode, goal, &count, err); if (*err) goto failed_out; BUG_ON(current_block + count > EXT4_MAX_BLOCK_FILE_PHYS); target -= count; /* allocate blocks for indirect blocks */ while (index < indirect_blks && count) { new_blocks[index++] = current_block++; count--; } if (count > 0) { /* * save the new block number * for the first direct block */ new_blocks[index] = current_block; printk(KERN_INFO "%s returned more blocks than " "requested\n", __func__); WARN_ON(1); break; } } target = blks - count ; blk_allocated = count; if (!target) goto allocated; /* Now allocate data blocks */ memset(&ar, 0, sizeof(ar)); ar.inode = inode; ar.goal = goal; ar.len = target; ar.logical = iblock; if (S_ISREG(inode->i_mode)) /* enable in-core preallocation only for regular files */ ar.flags = EXT4_MB_HINT_DATA; current_block = ext4_mb_new_blocks(handle, &ar, err); BUG_ON(current_block + ar.len > EXT4_MAX_BLOCK_FILE_PHYS); if (*err && (target == blks)) { /* * if the allocation failed and we didn't allocate * any blocks before */ goto failed_out; } if (!*err) { if (target == blks) { /* * save the new block number * for the first direct block */ new_blocks[index] = current_block; } blk_allocated += ar.len; } allocated: /* total number of blocks allocated for direct blocks */ ret = blk_allocated; *err = 0; return ret; failed_out: for (i = 0; i < index; i++) ext4_free_blocks(handle, inode, new_blocks[i], 1, 0); return ret; } /** * ext4_alloc_branch - allocate and set up a chain of blocks. * @inode: owner * @indirect_blks: number of allocated indirect blocks * @blks: number of allocated direct blocks * @offsets: offsets (in the blocks) to store the pointers to next. * @branch: place to store the chain in. * * This function allocates blocks, zeroes out all but the last one, * links them into chain and (if we are synchronous) writes them to disk. * In other words, it prepares a branch that can be spliced onto the * inode. It stores the information about that chain in the branch[], in * the same format as ext4_get_branch() would do. We are calling it after * we had read the existing part of chain and partial points to the last * triple of that (one with zero ->key). Upon the exit we have the same * picture as after the successful ext4_get_block(), except that in one * place chain is disconnected - *branch->p is still zero (we did not * set the last link), but branch->key contains the number that should * be placed into *branch->p to fill that gap. * * If allocation fails we free all blocks we've allocated (and forget * their buffer_heads) and return the error value the from failed * ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain * as described above and return 0. */ static int ext4_alloc_branch(handle_t *handle, struct inode *inode, ext4_lblk_t iblock, int indirect_blks, int *blks, ext4_fsblk_t goal, ext4_lblk_t *offsets, Indirect *branch) { int blocksize = inode->i_sb->s_blocksize; int i, n = 0; int err = 0; struct buffer_head *bh; int num; ext4_fsblk_t new_blocks[4]; ext4_fsblk_t current_block; num = ext4_alloc_blocks(handle, inode, iblock, goal, indirect_blks, *blks, new_blocks, &err); if (err) return err; branch[0].key = cpu_to_le32(new_blocks[0]); /* * metadata blocks and data blocks are allocated. */ for (n = 1; n <= indirect_blks; n++) { /* * Get buffer_head for parent block, zero it out * and set the pointer to new one, then send * parent to disk. */ bh = sb_getblk(inode->i_sb, new_blocks[n-1]); branch[n].bh = bh; lock_buffer(bh); BUFFER_TRACE(bh, "call get_create_access"); err = ext4_journal_get_create_access(handle, bh); if (err) { /* Don't brelse(bh) here; it's done in * ext4_journal_forget() below */ unlock_buffer(bh); goto failed; } memset(bh->b_data, 0, blocksize); branch[n].p = (__le32 *) bh->b_data + offsets[n]; branch[n].key = cpu_to_le32(new_blocks[n]); *branch[n].p = branch[n].key; if (n == indirect_blks) { current_block = new_blocks[n]; /* * End of chain, update the last new metablock of * the chain to point to the new allocated * data blocks numbers */ for (i = 1; i < num; i++) *(branch[n].p + i) = cpu_to_le32(++current_block); } BUFFER_TRACE(bh, "marking uptodate"); set_buffer_uptodate(bh); unlock_buffer(bh); BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); err = ext4_handle_dirty_metadata(handle, inode, bh); if (err) goto failed; } *blks = num; return err; failed: /* Allocation failed, free what we already allocated */ for (i = 1; i <= n ; i++) { BUFFER_TRACE(branch[i].bh, "call jbd2_journal_forget"); ext4_journal_forget(handle, branch[i].bh); } for (i = 0; i < indirect_blks; i++) ext4_free_blocks(handle, inode, new_blocks[i], 1, 0); ext4_free_blocks(handle, inode, new_blocks[i], num, 0); return err; } /** * ext4_splice_branch - splice the allocated branch onto inode. * @inode: owner * @block: (logical) number of block we are adding * @chain: chain of indirect blocks (with a missing link - see * ext4_alloc_branch) * @where: location of missing link * @num: number of indirect blocks we are adding * @blks: number of direct blocks we are adding * * This function fills the missing link and does all housekeeping needed in * inode (->i_blocks, etc.). In case of success we end up with the full * chain to new block and return 0. */ static int ext4_splice_branch(handle_t *handle, struct inode *inode, ext4_lblk_t block, Indirect *where, int num, int blks) { int i; int err = 0; ext4_fsblk_t current_block; /* * If we're splicing into a [td]indirect block (as opposed to the * inode) then we need to get write access to the [td]indirect block * before the splice. */ if (where->bh) { BUFFER_TRACE(where->bh, "get_write_access"); err = ext4_journal_get_write_access(handle, where->bh); if (err) goto err_out; } /* That's it */ *where->p = where->key; /* * Update the host buffer_head or inode to point to more just allocated * direct blocks blocks */ if (num == 0 && blks > 1) { current_block = le32_to_cpu(where->key) + 1; for (i = 1; i < blks; i++) *(where->p + i) = cpu_to_le32(current_block++); } /* We are done with atomic stuff, now do the rest of housekeeping */ /* had we spliced it onto indirect block? */ if (where->bh) { /* * If we spliced it onto an indirect block, we haven't * altered the inode. Note however that if it is being spliced * onto an indirect block at the very end of the file (the * file is growing) then we *will* alter the inode to reflect * the new i_size. But that is not done here - it is done in * generic_commit_write->__mark_inode_dirty->ext4_dirty_inode. */ jbd_debug(5, "splicing indirect only\n"); BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata"); err = ext4_handle_dirty_metadata(handle, inode, where->bh); if (err) goto err_out; } else { /* * OK, we spliced it into the inode itself on a direct block. */ ext4_mark_inode_dirty(handle, inode); jbd_debug(5, "splicing direct\n"); } return err; err_out: for (i = 1; i <= num; i++) { BUFFER_TRACE(where[i].bh, "call jbd2_journal_forget"); ext4_journal_forget(handle, where[i].bh); ext4_free_blocks(handle, inode, le32_to_cpu(where[i-1].key), 1, 0); } ext4_free_blocks(handle, inode, le32_to_cpu(where[num].key), blks, 0); return err; } /* * The ext4_ind_get_blocks() function handles non-extents inodes * (i.e., using the traditional indirect/double-indirect i_blocks * scheme) for ext4_get_blocks(). * * Allocation strategy is simple: if we have to allocate something, we will * have to go the whole way to leaf. So let's do it before attaching anything * to tree, set linkage between the newborn blocks, write them if sync is * required, recheck the path, free and repeat if check fails, otherwise * set the last missing link (that will protect us from any truncate-generated * removals - all blocks on the path are immune now) and possibly force the * write on the parent block. * That has a nice additional property: no special recovery from the failed * allocations is needed - we simply release blocks and do not touch anything * reachable from inode. * * `handle' can be NULL if create == 0. * * return > 0, # of blocks mapped or allocated. * return = 0, if plain lookup failed. * return < 0, error case. * * The ext4_ind_get_blocks() function should be called with * down_write(&EXT4_I(inode)->i_data_sem) if allocating filesystem * blocks (i.e., flags has EXT4_GET_BLOCKS_CREATE set) or * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system * blocks. */ static int ext4_ind_get_blocks(handle_t *handle, struct inode *inode, ext4_lblk_t iblock, unsigned int maxblocks, struct buffer_head *bh_result, int flags) { int err = -EIO; ext4_lblk_t offsets[4]; Indirect chain[4]; Indirect *partial; ext4_fsblk_t goal; int indirect_blks; int blocks_to_boundary = 0; int depth; int count = 0; ext4_fsblk_t first_block = 0; J_ASSERT(!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)); J_ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0); depth = ext4_block_to_path(inode, iblock, offsets, &blocks_to_boundary); if (depth == 0) goto out; partial = ext4_get_branch(inode, depth, offsets, chain, &err); /* Simplest case - block found, no allocation needed */ if (!partial) { first_block = le32_to_cpu(chain[depth - 1].key); clear_buffer_new(bh_result); count++; /*map more blocks*/ while (count < maxblocks && count <= blocks_to_boundary) { ext4_fsblk_t blk; blk = le32_to_cpu(*(chain[depth-1].p + count)); if (blk == first_block + count) count++; else break; } goto got_it; } /* Next simple case - plain lookup or failed read of indirect block */ if ((flags & EXT4_GET_BLOCKS_CREATE) == 0 || err == -EIO) goto cleanup; /* * Okay, we need to do block allocation. */ goal = ext4_find_goal(inode, iblock, partial); /* the number of blocks need to allocate for [d,t]indirect blocks */ indirect_blks = (chain + depth) - partial - 1; /* * Next look up the indirect map to count the totoal number of * direct blocks to allocate for this branch. */ count = ext4_blks_to_allocate(partial, indirect_blks, maxblocks, blocks_to_boundary); /* * Block out ext4_truncate while we alter the tree */ err = ext4_alloc_branch(handle, inode, iblock, indirect_blks, &count, goal, offsets + (partial - chain), partial); /* * The ext4_splice_branch call will free and forget any buffers * on the new chain if there is a failure, but that risks using * up transaction credits, especially for bitmaps where the * credits cannot be returned. Can we handle this somehow? We * may need to return -EAGAIN upwards in the worst case. --sct */ if (!err) err = ext4_splice_branch(handle, inode, iblock, partial, indirect_blks, count); if (err) goto cleanup; set_buffer_new(bh_result); ext4_update_inode_fsync_trans(handle, inode, 1); got_it: map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key)); if (count > blocks_to_boundary) set_buffer_boundary(bh_result); err = count; /* Clean up and exit */ partial = chain + depth - 1; /* the whole chain */ cleanup: while (partial > chain) { BUFFER_TRACE(partial->bh, "call brelse"); brelse(partial->bh); partial--; } BUFFER_TRACE(bh_result, "returned"); out: return err; } #ifdef CONFIG_QUOTA qsize_t *ext4_get_reserved_space(struct inode *inode) { return &EXT4_I(inode)->i_reserved_quota; } #endif /* * Calculate the number of metadata blocks need to reserve * to allocate @blocks for non extent file based file */ static int ext4_indirect_calc_metadata_amount(struct inode *inode, int blocks) { int icap = EXT4_ADDR_PER_BLOCK(inode->i_sb); int ind_blks, dind_blks, tind_blks; /* number of new indirect blocks needed */ ind_blks = (blocks + icap - 1) / icap; dind_blks = (ind_blks + icap - 1) / icap; tind_blks = 1; return ind_blks + dind_blks + tind_blks; } /* * Calculate the number of metadata blocks need to reserve * to allocate given number of blocks */ static int ext4_calc_metadata_amount(struct inode *inode, int blocks) { if (!blocks) return 0; if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) return ext4_ext_calc_metadata_amount(inode, blocks); return ext4_indirect_calc_metadata_amount(inode, blocks); } static void ext4_da_update_reserve_space(struct inode *inode, int used) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); int total, mdb, mdb_free; spin_lock(&EXT4_I(inode)->i_block_reservation_lock); /* recalculate the number of metablocks still need to be reserved */ total = EXT4_I(inode)->i_reserved_data_blocks - used; mdb = ext4_calc_metadata_amount(inode, total); /* figure out how many metablocks to release */ BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks); mdb_free = EXT4_I(inode)->i_reserved_meta_blocks - mdb; if (mdb_free) { /* Account for allocated meta_blocks */ mdb_free -= EXT4_I(inode)->i_allocated_meta_blocks; /* update fs dirty blocks counter */ percpu_counter_sub(&sbi->s_dirtyblocks_counter, mdb_free); EXT4_I(inode)->i_allocated_meta_blocks = 0; EXT4_I(inode)->i_reserved_meta_blocks = mdb; } /* update per-inode reservations */ BUG_ON(used > EXT4_I(inode)->i_reserved_data_blocks); EXT4_I(inode)->i_reserved_data_blocks -= used; spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); /* * free those over-booking quota for metadata blocks */ if (mdb_free) vfs_dq_release_reservation_block(inode, mdb_free); /* * If we have done all the pending block allocations and if * there aren't any writers on the inode, we can discard the * inode's preallocations. */ if (!total && (atomic_read(&inode->i_writecount) == 0)) ext4_discard_preallocations(inode); } static int check_block_validity(struct inode *inode, const char *msg, sector_t logical, sector_t phys, int len) { if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), phys, len)) { ext4_error(inode->i_sb, msg, "inode #%lu logical block %llu mapped to %llu " "(size %d)", inode->i_ino, (unsigned long long) logical, (unsigned long long) phys, len); return -EIO; } return 0; } /* * Return the number of contiguous dirty pages in a given inode * starting at page frame idx. */ static pgoff_t ext4_num_dirty_pages(struct inode *inode, pgoff_t idx, unsigned int max_pages) { struct address_space *mapping = inode->i_mapping; pgoff_t index; struct pagevec pvec; pgoff_t num = 0; int i, nr_pages, done = 0; if (max_pages == 0) return 0; pagevec_init(&pvec, 0); while (!done) { index = idx; nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, PAGECACHE_TAG_DIRTY, (pgoff_t)PAGEVEC_SIZE); if (nr_pages == 0) break; for (i = 0; i < nr_pages; i++) { struct page *page = pvec.pages[i]; struct buffer_head *bh, *head; lock_page(page); if (unlikely(page->mapping != mapping) || !PageDirty(page) || PageWriteback(page) || page->index != idx) { done = 1; unlock_page(page); break; } if (page_has_buffers(page)) { bh = head = page_buffers(page); do { if (!buffer_delay(bh) && !buffer_unwritten(bh)) done = 1; bh = bh->b_this_page; } while (!done && (bh != head)); } unlock_page(page); if (done) break; idx++; num++; if (num >= max_pages) break; } pagevec_release(&pvec); } return num; } /* * The ext4_get_blocks() function tries to look up the requested blocks, * and returns if the blocks are already mapped. * * Otherwise it takes the write lock of the i_data_sem and allocate blocks * and store the allocated blocks in the result buffer head and mark it * mapped. * * If file type is extents based, it will call ext4_ext_get_blocks(), * Otherwise, call with ext4_ind_get_blocks() to handle indirect mapping * based files * * On success, it returns the number of blocks being mapped or allocate. * if create==0 and the blocks are pre-allocated and uninitialized block, * the result buffer head is unmapped. If the create ==1, it will make sure * the buffer head is mapped. * * It returns 0 if plain look up failed (blocks have not been allocated), in * that casem, buffer head is unmapped * * It returns the error in case of allocation failure. */ int ext4_get_blocks(handle_t *handle, struct inode *inode, sector_t block, unsigned int max_blocks, struct buffer_head *bh, int flags) { int retval; clear_buffer_mapped(bh); clear_buffer_unwritten(bh); ext_debug("ext4_get_blocks(): inode %lu, flag %d, max_blocks %u," "logical block %lu\n", inode->i_ino, flags, max_blocks, (unsigned long)block); /* * Try to see if we can get the block without requesting a new * file system block. */ down_read((&EXT4_I(inode)->i_data_sem)); if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) { retval = ext4_ext_get_blocks(handle, inode, block, max_blocks, bh, 0); } else { retval = ext4_ind_get_blocks(handle, inode, block, max_blocks, bh, 0); } up_read((&EXT4_I(inode)->i_data_sem)); if (retval > 0 && buffer_mapped(bh)) { int ret = check_block_validity(inode, "file system corruption", block, bh->b_blocknr, retval); if (ret != 0) return ret; } /* If it is only a block(s) look up */ if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) return retval; /* * Returns if the blocks have already allocated * * Note that if blocks have been preallocated * ext4_ext_get_block() returns th create = 0 * with buffer head unmapped. */ if (retval > 0 && buffer_mapped(bh)) return retval; /* * When we call get_blocks without the create flag, the * BH_Unwritten flag could have gotten set if the blocks * requested were part of a uninitialized extent. We need to * clear this flag now that we are committed to convert all or * part of the uninitialized extent to be an initialized * extent. This is because we need to avoid the combination * of BH_Unwritten and BH_Mapped flags being simultaneously * set on the buffer_head. */ clear_buffer_unwritten(bh); /* * New blocks allocate and/or writing to uninitialized extent * will possibly result in updating i_data, so we take * the write lock of i_data_sem, and call get_blocks() * with create == 1 flag. */ down_write((&EXT4_I(inode)->i_data_sem)); /* * if the caller is from delayed allocation writeout path * we have already reserved fs blocks for allocation * let the underlying get_block() function know to * avoid double accounting */ if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) EXT4_I(inode)->i_delalloc_reserved_flag = 1; /* * We need to check for EXT4 here because migrate * could have changed the inode type in between */ if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) { retval = ext4_ext_get_blocks(handle, inode, block, max_blocks, bh, flags); } else { retval = ext4_ind_get_blocks(handle, inode, block, max_blocks, bh, flags); if (retval > 0 && buffer_new(bh)) { /* * We allocated new blocks which will result in * i_data's format changing. Force the migrate * to fail by clearing migrate flags */ EXT4_I(inode)->i_state &= ~EXT4_STATE_EXT_MIGRATE; } } if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) EXT4_I(inode)->i_delalloc_reserved_flag = 0; /* * Update reserved blocks/metadata blocks after successful * block allocation which had been deferred till now. */ if ((retval > 0) && (flags & EXT4_GET_BLOCKS_UPDATE_RESERVE_SPACE)) ext4_da_update_reserve_space(inode, retval); up_write((&EXT4_I(inode)->i_data_sem)); if (retval > 0 && buffer_mapped(bh)) { int ret = check_block_validity(inode, "file system " "corruption after allocation", block, bh->b_blocknr, retval); if (ret != 0) return ret; } return retval; } /* Maximum number of blocks we map for direct IO at once. */ #define DIO_MAX_BLOCKS 4096 int ext4_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) { handle_t *handle = ext4_journal_current_handle(); int ret = 0, started = 0; unsigned max_blocks = bh_result->b_size >> inode->i_blkbits; int dio_credits; if (create && !handle) { /* Direct IO write... */ if (max_blocks > DIO_MAX_BLOCKS) max_blocks = DIO_MAX_BLOCKS; dio_credits = ext4_chunk_trans_blocks(inode, max_blocks); handle = ext4_journal_start(inode, dio_credits); if (IS_ERR(handle)) { ret = PTR_ERR(handle); goto out; } started = 1; } ret = ext4_get_blocks(handle, inode, iblock, max_blocks, bh_result, create ? EXT4_GET_BLOCKS_CREATE : 0); if (ret > 0) { bh_result->b_size = (ret << inode->i_blkbits); ret = 0; } if (started) ext4_journal_stop(handle); out: return ret; } /* * `handle' can be NULL if create is zero */ struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode, ext4_lblk_t block, int create, int *errp) { struct buffer_head dummy; int fatal = 0, err; int flags = 0; J_ASSERT(handle != NULL || create == 0); dummy.b_state = 0; dummy.b_blocknr = -1000; buffer_trace_init(&dummy.b_history); if (create) flags |= EXT4_GET_BLOCKS_CREATE; err = ext4_get_blocks(handle, inode, block, 1, &dummy, flags); /* * ext4_get_blocks() returns number of blocks mapped. 0 in * case of a HOLE. */ if (err > 0) { if (err > 1) WARN_ON(1); err = 0; } *errp = err; if (!err && buffer_mapped(&dummy)) { struct buffer_head *bh; bh = sb_getblk(inode->i_sb, dummy.b_blocknr); if (!bh) { *errp = -EIO; goto err; } if (buffer_new(&dummy)) { J_ASSERT(create != 0); J_ASSERT(handle != NULL); /* * Now that we do not always journal data, we should * keep in mind whether this should always journal the * new buffer as metadata. For now, regular file * writes use ext4_get_block instead, so it's not a * problem. */ lock_buffer(bh); BUFFER_TRACE(bh, "call get_create_access"); fatal = ext4_journal_get_create_access(handle, bh); if (!fatal && !buffer_uptodate(bh)) { memset(bh->b_data, 0, inode->i_sb->s_blocksize); set_buffer_uptodate(bh); } unlock_buffer(bh); BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); err = ext4_handle_dirty_metadata(handle, inode, bh); if (!fatal) fatal = err; } else { BUFFER_TRACE(bh, "not a new buffer"); } if (fatal) { *errp = fatal; brelse(bh); bh = NULL; } return bh; } err: return NULL; } struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode, ext4_lblk_t block, int create, int *err) { struct buffer_head *bh; bh = ext4_getblk(handle, inode, block, create, err); if (!bh) return bh; if (buffer_uptodate(bh)) return bh; ll_rw_block(READ_META, 1, &bh); wait_on_buffer(bh); if (buffer_uptodate(bh)) return bh; put_bh(bh); *err = -EIO; return NULL; } static int walk_page_buffers(handle_t *handle, struct buffer_head *head, unsigned from, unsigned to, int *partial, int (*fn)(handle_t *handle, struct buffer_head *bh)) { struct buffer_head *bh; unsigned block_start, block_end; unsigned blocksize = head->b_size; int err, ret = 0; struct buffer_head *next; for (bh = head, block_start = 0; ret == 0 && (bh != head || !block_start); block_start = block_end, bh = next) { next = bh->b_this_page; block_end = block_start + blocksize; if (block_end <= from || block_start >= to) { if (partial && !buffer_uptodate(bh)) *partial = 1; continue; } err = (*fn)(handle, bh); if (!ret) ret = err; } return ret; } /* * To preserve ordering, it is essential that the hole instantiation and * the data write be encapsulated in a single transaction. We cannot * close off a transaction and start a new one between the ext4_get_block() * and the commit_write(). So doing the jbd2_journal_start at the start of * prepare_write() is the right place. * * Also, this function can nest inside ext4_writepage() -> * block_write_full_page(). In that case, we *know* that ext4_writepage() * has generated enough buffer credits to do the whole page. So we won't * block on the journal in that case, which is good, because the caller may * be PF_MEMALLOC. * * By accident, ext4 can be reentered when a transaction is open via * quota file writes. If we were to commit the transaction while thus * reentered, there can be a deadlock - we would be holding a quota * lock, and the commit would never complete if another thread had a * transaction open and was blocking on the quota lock - a ranking * violation. * * So what we do is to rely on the fact that jbd2_journal_stop/journal_start * will _not_ run commit under these circumstances because handle->h_ref * is elevated. We'll still have enough credits for the tiny quotafile * write. */ static int do_journal_get_write_access(handle_t *handle, struct buffer_head *bh) { if (!buffer_mapped(bh) || buffer_freed(bh)) return 0; return ext4_journal_get_write_access(handle, bh); } /* * Truncate blocks that were not used by write. We have to truncate the * pagecache as well so that corresponding buffers get properly unmapped. */ static void ext4_truncate_failed_write(struct inode *inode) { truncate_inode_pages(inode->i_mapping, inode->i_size); ext4_truncate(inode); } static int ext4_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { struct inode *inode = mapping->host; int ret, needed_blocks; handle_t *handle; int retries = 0; struct page *page; pgoff_t index; unsigned from, to; trace_ext4_write_begin(inode, pos, len, flags); /* * Reserve one block more for addition to orphan list in case * we allocate blocks but write fails for some reason */ needed_blocks = ext4_writepage_trans_blocks(inode) + 1; index = pos >> PAGE_CACHE_SHIFT; from = pos & (PAGE_CACHE_SIZE - 1); to = from + len; retry: handle = ext4_journal_start(inode, needed_blocks); if (IS_ERR(handle)) { ret = PTR_ERR(handle); goto out; } /* We cannot recurse into the filesystem as the transaction is already * started */ flags |= AOP_FLAG_NOFS; page = grab_cache_page_write_begin(mapping, index, flags); if (!page) { ext4_journal_stop(handle); ret = -ENOMEM; goto out; } *pagep = page; ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata, ext4_get_block); if (!ret && ext4_should_journal_data(inode)) { ret = walk_page_buffers(handle, page_buffers(page), from, to, NULL, do_journal_get_write_access); } if (ret) { unlock_page(page); page_cache_release(page); /* * block_write_begin may have instantiated a few blocks * outside i_size. Trim these off again. Don't need * i_size_read because we hold i_mutex. * * Add inode to orphan list in case we crash before * truncate finishes */ if (pos + len > inode->i_size && ext4_can_truncate(inode)) ext4_orphan_add(handle, inode); ext4_journal_stop(handle); if (pos + len > inode->i_size) { ext4_truncate_failed_write(inode); /* * If truncate failed early the inode might * still be on the orphan list; we need to * make sure the inode is removed from the * orphan list in that case. */ if (inode->i_nlink) ext4_orphan_del(NULL, inode); } } if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) goto retry; out: return ret; } /* For write_end() in data=journal mode */ static int write_end_fn(handle_t *handle, struct buffer_head *bh) { if (!buffer_mapped(bh) || buffer_freed(bh)) return 0; set_buffer_uptodate(bh); return ext4_handle_dirty_metadata(handle, NULL, bh); } static int ext4_generic_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { int i_size_changed = 0; struct inode *inode = mapping->host; handle_t *handle = ext4_journal_current_handle(); copied = block_write_end(file, mapping, pos, len, copied, page, fsdata); /* * No need to use i_size_read() here, the i_size * cannot change under us because we hold i_mutex. * * But it's important to update i_size while still holding page lock: * page writeout could otherwise come in and zero beyond i_size. */ if (pos + copied > inode->i_size) { i_size_write(inode, pos + copied); i_size_changed = 1; } if (pos + copied > EXT4_I(inode)->i_disksize) { /* We need to mark inode dirty even if * new_i_size is less that inode->i_size * bu greater than i_disksize.(hint delalloc) */ ext4_update_i_disksize(inode, (pos + copied)); i_size_changed = 1; } unlock_page(page); page_cache_release(page); /* * Don't mark the inode dirty under page lock. First, it unnecessarily * makes the holding time of page lock longer. Second, it forces lock * ordering of page lock and transaction start for journaling * filesystems. */ if (i_size_changed) ext4_mark_inode_dirty(handle, inode); return copied; } /* * We need to pick up the new inode size which generic_commit_write gave us * `file' can be NULL - eg, when called from page_symlink(). * * ext4 never places buffers on inode->i_mapping->private_list. metadata * buffers are managed internally. */ static int ext4_ordered_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { handle_t *handle = ext4_journal_current_handle(); struct inode *inode = mapping->host; int ret = 0, ret2; trace_ext4_ordered_write_end(inode, pos, len, copied); ret = ext4_jbd2_file_inode(handle, inode); if (ret == 0) { ret2 = ext4_generic_write_end(file, mapping, pos, len, copied, page, fsdata); copied = ret2; if (pos + len > inode->i_size && ext4_can_truncate(inode)) /* if we have allocated more blocks and copied * less. We will have blocks allocated outside * inode->i_size. So truncate them */ ext4_orphan_add(handle, inode); if (ret2 < 0) ret = ret2; } ret2 = ext4_journal_stop(handle); if (!ret) ret = ret2; if (pos + len > inode->i_size) { ext4_truncate_failed_write(inode); /* * If truncate failed early the inode might still be * on the orphan list; we need to make sure the inode * is removed from the orphan list in that case. */ if (inode->i_nlink) ext4_orphan_del(NULL, inode); } return ret ? ret : copied; } static int ext4_writeback_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { handle_t *handle = ext4_journal_current_handle(); struct inode *inode = mapping->host; int ret = 0, ret2; trace_ext4_writeback_write_end(inode, pos, len, copied); ret2 = ext4_generic_write_end(file, mapping, pos, len, copied, page, fsdata); copied = ret2; if (pos + len > inode->i_size && ext4_can_truncate(inode)) /* if we have allocated more blocks and copied * less. We will have blocks allocated outside * inode->i_size. So truncate them */ ext4_orphan_add(handle, inode); if (ret2 < 0) ret = ret2; ret2 = ext4_journal_stop(handle); if (!ret) ret = ret2; if (pos + len > inode->i_size) { ext4_truncate_failed_write(inode); /* * If truncate failed early the inode might still be * on the orphan list; we need to make sure the inode * is removed from the orphan list in that case. */ if (inode->i_nlink) ext4_orphan_del(NULL, inode); } return ret ? ret : copied; } static int ext4_journalled_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { handle_t *handle = ext4_journal_current_handle(); struct inode *inode = mapping->host; int ret = 0, ret2; int partial = 0; unsigned from, to; loff_t new_i_size; trace_ext4_journalled_write_end(inode, pos, len, copied); from = pos & (PAGE_CACHE_SIZE - 1); to = from + len; if (copied < len) { if (!PageUptodate(page)) copied = 0; page_zero_new_buffers(page, from+copied, to); } ret = walk_page_buffers(handle, page_buffers(page), from, to, &partial, write_end_fn); if (!partial) SetPageUptodate(page); new_i_size = pos + copied; if (new_i_size > inode->i_size) i_size_write(inode, pos+copied); EXT4_I(inode)->i_state |= EXT4_STATE_JDATA; if (new_i_size > EXT4_I(inode)->i_disksize) { ext4_update_i_disksize(inode, new_i_size); ret2 = ext4_mark_inode_dirty(handle, inode); if (!ret) ret = ret2; } unlock_page(page); page_cache_release(page); if (pos + len > inode->i_size && ext4_can_truncate(inode)) /* if we have allocated more blocks and copied * less. We will have blocks allocated outside * inode->i_size. So truncate them */ ext4_orphan_add(handle, inode); ret2 = ext4_journal_stop(handle); if (!ret) ret = ret2; if (pos + len > inode->i_size) { ext4_truncate_failed_write(inode); /* * If truncate failed early the inode might still be * on the orphan list; we need to make sure the inode * is removed from the orphan list in that case. */ if (inode->i_nlink) ext4_orphan_del(NULL, inode); } return ret ? ret : copied; } static int ext4_da_reserve_space(struct inode *inode, int nrblocks) { int retries = 0; struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); unsigned long md_needed, mdblocks, total = 0; /* * recalculate the amount of metadata blocks to reserve * in order to allocate nrblocks * worse case is one extent per block */ repeat: spin_lock(&EXT4_I(inode)->i_block_reservation_lock); total = EXT4_I(inode)->i_reserved_data_blocks + nrblocks; mdblocks = ext4_calc_metadata_amount(inode, total); BUG_ON(mdblocks < EXT4_I(inode)->i_reserved_meta_blocks); md_needed = mdblocks - EXT4_I(inode)->i_reserved_meta_blocks; total = md_needed + nrblocks; spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); /* * Make quota reservation here to prevent quota overflow * later. Real quota accounting is done at pages writeout * time. */ if (vfs_dq_reserve_block(inode, total)) return -EDQUOT; if (ext4_claim_free_blocks(sbi, total)) { vfs_dq_release_reservation_block(inode, total); if (ext4_should_retry_alloc(inode->i_sb, &retries)) { yield(); goto repeat; } return -ENOSPC; } spin_lock(&EXT4_I(inode)->i_block_reservation_lock); EXT4_I(inode)->i_reserved_data_blocks += nrblocks; EXT4_I(inode)->i_reserved_meta_blocks += md_needed; spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); return 0; /* success */ } static void ext4_da_release_space(struct inode *inode, int to_free) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); int total, mdb, mdb_free, release; if (!to_free) return; /* Nothing to release, exit */ spin_lock(&EXT4_I(inode)->i_block_reservation_lock); if (!EXT4_I(inode)->i_reserved_data_blocks) { /* * if there is no reserved blocks, but we try to free some * then the counter is messed up somewhere. * but since this function is called from invalidate * page, it's harmless to return without any action */ printk(KERN_INFO "ext4 delalloc try to release %d reserved " "blocks for inode %lu, but there is no reserved " "data blocks\n", to_free, inode->i_ino); spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); return; } /* recalculate the number of metablocks still need to be reserved */ total = EXT4_I(inode)->i_reserved_data_blocks - to_free; mdb = ext4_calc_metadata_amount(inode, total); /* figure out how many metablocks to release */ BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks); mdb_free = EXT4_I(inode)->i_reserved_meta_blocks - mdb; release = to_free + mdb_free; /* update fs dirty blocks counter for truncate case */ percpu_counter_sub(&sbi->s_dirtyblocks_counter, release); /* update per-inode reservations */ BUG_ON(to_free > EXT4_I(inode)->i_reserved_data_blocks); EXT4_I(inode)->i_reserved_data_blocks -= to_free; BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks); EXT4_I(inode)->i_reserved_meta_blocks = mdb; spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); vfs_dq_release_reservation_block(inode, release); } static void ext4_da_page_release_reservation(struct page *page, unsigned long offset) { int to_release = 0; struct buffer_head *head, *bh; unsigned int curr_off = 0; head = page_buffers(page); bh = head; do { unsigned int next_off = curr_off + bh->b_size; if ((offset <= curr_off) && (buffer_delay(bh))) { to_release++; clear_buffer_delay(bh); } curr_off = next_off; } while ((bh = bh->b_this_page) != head); ext4_da_release_space(page->mapping->host, to_release); } /* * Delayed allocation stuff */ /* * mpage_da_submit_io - walks through extent of pages and try to write * them with writepage() call back * * @mpd->inode: inode * @mpd->first_page: first page of the extent * @mpd->next_page: page after the last page of the extent * * By the time mpage_da_submit_io() is called we expect all blocks * to be allocated. this may be wrong if allocation failed. * * As pages are already locked by write_cache_pages(), we can't use it */ static int mpage_da_submit_io(struct mpage_da_data *mpd) { long pages_skipped; struct pagevec pvec; unsigned long index, end; int ret = 0, err, nr_pages, i; struct inode *inode = mpd->inode; struct address_space *mapping = inode->i_mapping; BUG_ON(mpd->next_page <= mpd->first_page); /* * We need to start from the first_page to the next_page - 1 * to make sure we also write the mapped dirty buffer_heads. * If we look at mpd->b_blocknr we would only be looking * at the currently mapped buffer_heads. */ index = mpd->first_page; end = mpd->next_page - 1; pagevec_init(&pvec, 0); while (index <= end) { nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE); if (nr_pages == 0) break; for (i = 0; i < nr_pages; i++) { struct page *page = pvec.pages[i]; index = page->index; if (index > end) break; index++; BUG_ON(!PageLocked(page)); BUG_ON(PageWriteback(page)); pages_skipped = mpd->wbc->pages_skipped; err = mapping->a_ops->writepage(page, mpd->wbc); if (!err && (pages_skipped == mpd->wbc->pages_skipped)) /* * have successfully written the page * without skipping the same */ mpd->pages_written++; /* * In error case, we have to continue because * remaining pages are still locked * XXX: unlock and re-dirty them? */ if (ret == 0) ret = err; } pagevec_release(&pvec); } return ret; } /* * mpage_put_bnr_to_bhs - walk blocks and assign them actual numbers * * @mpd->inode - inode to walk through * @exbh->b_blocknr - first block on a disk * @exbh->b_size - amount of space in bytes * @logical - first logical block to start assignment with * * the function goes through all passed space and put actual disk * block numbers into buffer heads, dropping BH_Delay and BH_Unwritten */ static void mpage_put_bnr_to_bhs(struct mpage_da_data *mpd, sector_t logical, struct buffer_head *exbh) { struct inode *inode = mpd->inode; struct address_space *mapping = inode->i_mapping; int blocks = exbh->b_size >> inode->i_blkbits; sector_t pblock = exbh->b_blocknr, cur_logical; struct buffer_head *head, *bh; pgoff_t index, end; struct pagevec pvec; int nr_pages, i; index = logical >> (PAGE_CACHE_SHIFT - inode->i_blkbits); end = (logical + blocks - 1) >> (PAGE_CACHE_SHIFT - inode->i_blkbits); cur_logical = index << (PAGE_CACHE_SHIFT - inode->i_blkbits); pagevec_init(&pvec, 0); while (index <= end) { /* XXX: optimize tail */ nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE); if (nr_pages == 0) break; for (i = 0; i < nr_pages; i++) { struct page *page = pvec.pages[i]; index = page->index; if (index > end) break; index++; BUG_ON(!PageLocked(page)); BUG_ON(PageWriteback(page)); BUG_ON(!page_has_buffers(page)); bh = page_buffers(page); head = bh; /* skip blocks out of the range */ do { if (cur_logical >= logical) break; cur_logical++; } while ((bh = bh->b_this_page) != head); do { if (cur_logical >= logical + blocks) break; if (buffer_delay(bh) || buffer_unwritten(bh)) { BUG_ON(bh->b_bdev != inode->i_sb->s_bdev); if (buffer_delay(bh)) { clear_buffer_delay(bh); bh->b_blocknr = pblock; } else { /* * unwritten already should have * blocknr assigned. Verify that */ clear_buffer_unwritten(bh); BUG_ON(bh->b_blocknr != pblock); } } else if (buffer_mapped(bh)) BUG_ON(bh->b_blocknr != pblock); cur_logical++; pblock++; } while ((bh = bh->b_this_page) != head); } pagevec_release(&pvec); } } /* * __unmap_underlying_blocks - just a helper function to unmap * set of blocks described by @bh */ static inline void __unmap_underlying_blocks(struct inode *inode, struct buffer_head *bh) { struct block_device *bdev = inode->i_sb->s_bdev; int blocks, i; blocks = bh->b_size >> inode->i_blkbits; for (i = 0; i < blocks; i++) unmap_underlying_metadata(bdev, bh->b_blocknr + i); } static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd, sector_t logical, long blk_cnt) { int nr_pages, i; pgoff_t index, end; struct pagevec pvec; struct inode *inode = mpd->inode; struct address_space *mapping = inode->i_mapping; index = logical >> (PAGE_CACHE_SHIFT - inode->i_blkbits); end = (logical + blk_cnt - 1) >> (PAGE_CACHE_SHIFT - inode->i_blkbits); while (index <= end) { nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE); if (nr_pages == 0) break; for (i = 0; i < nr_pages; i++) { struct page *page = pvec.pages[i]; index = page->index; if (index > end) break; index++; BUG_ON(!PageLocked(page)); BUG_ON(PageWriteback(page)); block_invalidatepage(page, 0); ClearPageUptodate(page); unlock_page(page); } } return; } static void ext4_print_free_blocks(struct inode *inode) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); printk(KERN_CRIT "Total free blocks count %lld\n", ext4_count_free_blocks(inode->i_sb)); printk(KERN_CRIT "Free/Dirty block details\n"); printk(KERN_CRIT "free_blocks=%lld\n", (long long) percpu_counter_sum(&sbi->s_freeblocks_counter)); printk(KERN_CRIT "dirty_blocks=%lld\n", (long long) percpu_counter_sum(&sbi->s_dirtyblocks_counter)); printk(KERN_CRIT "Block reservation details\n"); printk(KERN_CRIT "i_reserved_data_blocks=%u\n", EXT4_I(inode)->i_reserved_data_blocks); printk(KERN_CRIT "i_reserved_meta_blocks=%u\n", EXT4_I(inode)->i_reserved_meta_blocks); return; } /* * mpage_da_map_blocks - go through given space * * @mpd - bh describing space * * The function skips space we know is already mapped to disk blocks. * */ static int mpage_da_map_blocks(struct mpage_da_data *mpd) { int err, blks, get_blocks_flags; struct buffer_head new; sector_t next = mpd->b_blocknr; unsigned max_blocks = mpd->b_size >> mpd->inode->i_blkbits; loff_t disksize = EXT4_I(mpd->inode)->i_disksize; handle_t *handle = NULL; /* * We consider only non-mapped and non-allocated blocks */ if ((mpd->b_state & (1 << BH_Mapped)) && !(mpd->b_state & (1 << BH_Delay)) && !(mpd->b_state & (1 << BH_Unwritten))) return 0; /* * If we didn't accumulate anything to write simply return */ if (!mpd->b_size) return 0; handle = ext4_journal_current_handle(); BUG_ON(!handle); /* * Call ext4_get_blocks() to allocate any delayed allocation * blocks, or to convert an uninitialized extent to be * initialized (in the case where we have written into * one or more preallocated blocks). * * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE to * indicate that we are on the delayed allocation path. This * affects functions in many different parts of the allocation * call path. This flag exists primarily because we don't * want to change *many* call functions, so ext4_get_blocks() * will set the magic i_delalloc_reserved_flag once the * inode's allocation semaphore is taken. * * If the blocks in questions were delalloc blocks, set * EXT4_GET_BLOCKS_DELALLOC_RESERVE so the delalloc accounting * variables are updated after the blocks have been allocated. */ new.b_state = 0; get_blocks_flags = (EXT4_GET_BLOCKS_CREATE | EXT4_GET_BLOCKS_DELALLOC_RESERVE); if (mpd->b_state & (1 << BH_Delay)) get_blocks_flags |= EXT4_GET_BLOCKS_UPDATE_RESERVE_SPACE; blks = ext4_get_blocks(handle, mpd->inode, next, max_blocks, &new, get_blocks_flags); if (blks < 0) { err = blks; /* * If get block returns with error we simply * return. Later writepage will redirty the page and * writepages will find the dirty page again */ if (err == -EAGAIN) return 0; if (err == -ENOSPC && ext4_count_free_blocks(mpd->inode->i_sb)) { mpd->retval = err; return 0; } /* * get block failure will cause us to loop in * writepages, because a_ops->writepage won't be able * to make progress. The page will be redirtied by * writepage and writepages will again try to write * the same. */ ext4_msg(mpd->inode->i_sb, KERN_CRIT, "delayed block allocation failed for inode %lu at " "logical offset %llu with max blocks %zd with " "error %d\n", mpd->inode->i_ino, (unsigned long long) next, mpd->b_size >> mpd->inode->i_blkbits, err); printk(KERN_CRIT "This should not happen!! " "Data will be lost\n"); if (err == -ENOSPC) { ext4_print_free_blocks(mpd->inode); } /* invalidate all the pages */ ext4_da_block_invalidatepages(mpd, next, mpd->b_size >> mpd->inode->i_blkbits); return err; } BUG_ON(blks == 0); new.b_size = (blks << mpd->inode->i_blkbits); if (buffer_new(&new)) __unmap_underlying_blocks(mpd->inode, &new); /* * If blocks are delayed marked, we need to * put actual blocknr and drop delayed bit */ if ((mpd->b_state & (1 << BH_Delay)) || (mpd->b_state & (1 << BH_Unwritten))) mpage_put_bnr_to_bhs(mpd, next, &new); if (ext4_should_order_data(mpd->inode)) { err = ext4_jbd2_file_inode(handle, mpd->inode); if (err) return err; } /* * Update on-disk size along with block allocation. */ disksize = ((loff_t) next + blks) << mpd->inode->i_blkbits; if (disksize > i_size_read(mpd->inode)) disksize = i_size_read(mpd->inode); if (disksize > EXT4_I(mpd->inode)->i_disksize) { ext4_update_i_disksize(mpd->inode, disksize); return ext4_mark_inode_dirty(handle, mpd->inode); } return 0; } #define BH_FLAGS ((1 << BH_Uptodate) | (1 << BH_Mapped) | \ (1 << BH_Delay) | (1 << BH_Unwritten)) /* * mpage_add_bh_to_extent - try to add one more block to extent of blocks * * @mpd->lbh - extent of blocks * @logical - logical number of the block in the file * @bh - bh of the block (used to access block's state) * * the function is used to collect contig. blocks in same state */ static void mpage_add_bh_to_extent(struct mpage_da_data *mpd, sector_t logical, size_t b_size, unsigned long b_state) { sector_t next; int nrblocks = mpd->b_size >> mpd->inode->i_blkbits; /* check if thereserved journal credits might overflow */ if (!(EXT4_I(mpd->inode)->i_flags & EXT4_EXTENTS_FL)) { if (nrblocks >= EXT4_MAX_TRANS_DATA) { /* * With non-extent format we are limited by the journal * credit available. Total credit needed to insert * nrblocks contiguous blocks is dependent on the * nrblocks. So limit nrblocks. */ goto flush_it; } else if ((nrblocks + (b_size >> mpd->inode->i_blkbits)) > EXT4_MAX_TRANS_DATA) { /* * Adding the new buffer_head would make it cross the * allowed limit for which we have journal credit * reserved. So limit the new bh->b_size */ b_size = (EXT4_MAX_TRANS_DATA - nrblocks) << mpd->inode->i_blkbits; /* we will do mpage_da_submit_io in the next loop */ } } /* * First block in the extent */ if (mpd->b_size == 0) { mpd->b_blocknr = logical; mpd->b_size = b_size; mpd->b_state = b_state & BH_FLAGS; return; } next = mpd->b_blocknr + nrblocks; /* * Can we merge the block to our big extent? */ if (logical == next && (b_state & BH_FLAGS) == mpd->b_state) { mpd->b_size += b_size; return; } flush_it: /* * We couldn't merge the block to our extent, so we * need to flush current extent and start new one */ if (mpage_da_map_blocks(mpd) == 0) mpage_da_submit_io(mpd); mpd->io_done = 1; return; } static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh) { return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh); } /* * __mpage_da_writepage - finds extent of pages and blocks * * @page: page to consider * @wbc: not used, we just follow rules * @data: context * * The function finds extents of pages and scan them for all blocks. */ static int __mpage_da_writepage(struct page *page, struct writeback_control *wbc, void *data) { struct mpage_da_data *mpd = data; struct inode *inode = mpd->inode; struct buffer_head *bh, *head; sector_t logical; if (mpd->io_done) { /* * Rest of the page in the page_vec * redirty then and skip then. We will * try to write them again after * starting a new transaction */ redirty_page_for_writepage(wbc, page); unlock_page(page); return MPAGE_DA_EXTENT_TAIL; } /* * Can we merge this page to current extent? */ if (mpd->next_page != page->index) { /* * Nope, we can't. So, we map non-allocated blocks * and start IO on them using writepage() */ if (mpd->next_page != mpd->first_page) { if (mpage_da_map_blocks(mpd) == 0) mpage_da_submit_io(mpd); /* * skip rest of the page in the page_vec */ mpd->io_done = 1; redirty_page_for_writepage(wbc, page); unlock_page(page); return MPAGE_DA_EXTENT_TAIL; } /* * Start next extent of pages ... */ mpd->first_page = page->index; /* * ... and blocks */ mpd->b_size = 0; mpd->b_state = 0; mpd->b_blocknr = 0; } mpd->next_page = page->index + 1; logical = (sector_t) page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); if (!page_has_buffers(page)) { mpage_add_bh_to_extent(mpd, logical, PAGE_CACHE_SIZE, (1 << BH_Dirty) | (1 << BH_Uptodate)); if (mpd->io_done) return MPAGE_DA_EXTENT_TAIL; } else { /* * Page with regular buffer heads, just add all dirty ones */ head = page_buffers(page); bh = head; do { BUG_ON(buffer_locked(bh)); /* * We need to try to allocate * unmapped blocks in the same page. * Otherwise we won't make progress * with the page in ext4_writepage */ if (ext4_bh_delay_or_unwritten(NULL, bh)) { mpage_add_bh_to_extent(mpd, logical, bh->b_size, bh->b_state); if (mpd->io_done) return MPAGE_DA_EXTENT_TAIL; } else if (buffer_dirty(bh) && (buffer_mapped(bh))) { /* * mapped dirty buffer. We need to update * the b_state because we look at * b_state in mpage_da_map_blocks. We don't * update b_size because if we find an * unmapped buffer_head later we need to * use the b_state flag of that buffer_head. */ if (mpd->b_size == 0) mpd->b_state = bh->b_state & BH_FLAGS; } logical++; } while ((bh = bh->b_this_page) != head); } return 0; } /* * This is a special get_blocks_t callback which is used by * ext4_da_write_begin(). It will either return mapped block or * reserve space for a single block. * * For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set. * We also have b_blocknr = -1 and b_bdev initialized properly * * For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set. * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev * initialized properly. */ static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) { int ret = 0; sector_t invalid_block = ~((sector_t) 0xffff); if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es)) invalid_block = ~0; BUG_ON(create == 0); BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize); /* * first, we need to know whether the block is allocated already * preallocated blocks are unmapped but should treated * the same as allocated blocks. */ ret = ext4_get_blocks(NULL, inode, iblock, 1, bh_result, 0); if ((ret == 0) && !buffer_delay(bh_result)) { /* the block isn't (pre)allocated yet, let's reserve space */ /* * XXX: __block_prepare_write() unmaps passed block, * is it OK? */ ret = ext4_da_reserve_space(inode, 1); if (ret) /* not enough space to reserve */ return ret; map_bh(bh_result, inode->i_sb, invalid_block); set_buffer_new(bh_result); set_buffer_delay(bh_result); } else if (ret > 0) { bh_result->b_size = (ret << inode->i_blkbits); if (buffer_unwritten(bh_result)) { /* A delayed write to unwritten bh should * be marked new and mapped. Mapped ensures * that we don't do get_block multiple times * when we write to the same offset and new * ensures that we do proper zero out for * partial write. */ set_buffer_new(bh_result); set_buffer_mapped(bh_result); } ret = 0; } return ret; } /* * This function is used as a standard get_block_t calback function * when there is no desire to allocate any blocks. It is used as a * callback function for block_prepare_write(), nobh_writepage(), and * block_write_full_page(). These functions should only try to map a * single block at a time. * * Since this function doesn't do block allocations even if the caller * requests it by passing in create=1, it is critically important that * any caller checks to make sure that any buffer heads are returned * by this function are either all already mapped or marked for * delayed allocation before calling nobh_writepage() or * block_write_full_page(). Otherwise, b_blocknr could be left * unitialized, and the page write functions will be taken by * surprise. */ static int noalloc_get_block_write(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) { int ret = 0; unsigned max_blocks = bh_result->b_size >> inode->i_blkbits; BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize); /* * we don't want to do block allocation in writepage * so call get_block_wrap with create = 0 */ ret = ext4_get_blocks(NULL, inode, iblock, max_blocks, bh_result, 0); if (ret > 0) { bh_result->b_size = (ret << inode->i_blkbits); ret = 0; } return ret; } static int bget_one(handle_t *handle, struct buffer_head *bh) { get_bh(bh); return 0; } static int bput_one(handle_t *handle, struct buffer_head *bh) { put_bh(bh); return 0; } static int __ext4_journalled_writepage(struct page *page, struct writeback_control *wbc, unsigned int len) { struct address_space *mapping = page->mapping; struct inode *inode = mapping->host; struct buffer_head *page_bufs; handle_t *handle = NULL; int ret = 0; int err; page_bufs = page_buffers(page); BUG_ON(!page_bufs); walk_page_buffers(handle, page_bufs, 0, len, NULL, bget_one); /* As soon as we unlock the page, it can go away, but we have * references to buffers so we are safe */ unlock_page(page); handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode)); if (IS_ERR(handle)) { ret = PTR_ERR(handle); goto out; } ret = walk_page_buffers(handle, page_bufs, 0, len, NULL, do_journal_get_write_access); err = walk_page_buffers(handle, page_bufs, 0, len, NULL, write_end_fn); if (ret == 0) ret = err; err = ext4_journal_stop(handle); if (!ret) ret = err; walk_page_buffers(handle, page_bufs, 0, len, NULL, bput_one); EXT4_I(inode)->i_state |= EXT4_STATE_JDATA; out: return ret; } /* * Note that we don't need to start a transaction unless we're journaling data * because we should have holes filled from ext4_page_mkwrite(). We even don't * need to file the inode to the transaction's list in ordered mode because if * we are writing back data added by write(), the inode is already there and if * we are writing back data modified via mmap(), noone guarantees in which * transaction the data will hit the disk. In case we are journaling data, we * cannot start transaction directly because transaction start ranks above page * lock so we have to do some magic. * * This function can get called via... * - ext4_da_writepages after taking page lock (have journal handle) * - journal_submit_inode_data_buffers (no journal handle) * - shrink_page_list via pdflush (no journal handle) * - grab_page_cache when doing write_begin (have journal handle) * * We don't do any block allocation in this function. If we have page with * multiple blocks we need to write those buffer_heads that are mapped. This * is important for mmaped based write. So if we do with blocksize 1K * truncate(f, 1024); * a = mmap(f, 0, 4096); * a[0] = 'a'; * truncate(f, 4096); * we have in the page first buffer_head mapped via page_mkwrite call back * but other bufer_heads would be unmapped but dirty(dirty done via the * do_wp_page). So writepage should write the first block. If we modify * the mmap area beyond 1024 we will again get a page_fault and the * page_mkwrite callback will do the block allocation and mark the * buffer_heads mapped. * * We redirty the page if we have any buffer_heads that is either delay or * unwritten in the page. * * We can get recursively called as show below. * * ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() -> * ext4_writepage() * * But since we don't do any block allocation we should not deadlock. * Page also have the dirty flag cleared so we don't get recurive page_lock. */ static int ext4_writepage(struct page *page, struct writeback_control *wbc) { int ret = 0; loff_t size; unsigned int len; struct buffer_head *page_bufs; struct inode *inode = page->mapping->host; trace_ext4_writepage(inode, page); size = i_size_read(inode); if (page->index == size >> PAGE_CACHE_SHIFT) len = size & ~PAGE_CACHE_MASK; else len = PAGE_CACHE_SIZE; if (page_has_buffers(page)) { page_bufs = page_buffers(page); if (walk_page_buffers(NULL, page_bufs, 0, len, NULL, ext4_bh_delay_or_unwritten)) { /* * We don't want to do block allocation * So redirty the page and return * We may reach here when we do a journal commit * via journal_submit_inode_data_buffers. * If we don't have mapping block we just ignore * them. We can also reach here via shrink_page_list */ redirty_page_for_writepage(wbc, page); unlock_page(page); return 0; } } else { /* * The test for page_has_buffers() is subtle: * We know the page is dirty but it lost buffers. That means * that at some moment in time after write_begin()/write_end() * has been called all buffers have been clean and thus they * must have been written at least once. So they are all * mapped and we can happily proceed with mapping them * and writing the page. * * Try to initialize the buffer_heads and check whether * all are mapped and non delay. We don't want to * do block allocation here. */ ret = block_prepare_write(page, 0, len, noalloc_get_block_write); if (!ret) { page_bufs = page_buffers(page); /* check whether all are mapped and non delay */ if (walk_page_buffers(NULL, page_bufs, 0, len, NULL, ext4_bh_delay_or_unwritten)) { redirty_page_for_writepage(wbc, page); unlock_page(page); return 0; } } else { /* * We can't do block allocation here * so just redity the page and unlock * and return */ redirty_page_for_writepage(wbc, page); unlock_page(page); return 0; } /* now mark the buffer_heads as dirty and uptodate */ block_commit_write(page, 0, len); } if (PageChecked(page) && ext4_should_journal_data(inode)) { /* * It's mmapped pagecache. Add buffers and journal it. There * doesn't seem much point in redirtying the page here. */ ClearPageChecked(page); return __ext4_journalled_writepage(page, wbc, len); } if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode)) ret = nobh_writepage(page, noalloc_get_block_write, wbc); else ret = block_write_full_page(page, noalloc_get_block_write, wbc); return ret; } /* * This is called via ext4_da_writepages() to * calulate the total number of credits to reserve to fit * a single extent allocation into a single transaction, * ext4_da_writpeages() will loop calling this before * the block allocation. */ static int ext4_da_writepages_trans_blocks(struct inode *inode) { int max_blocks = EXT4_I(inode)->i_reserved_data_blocks; /* * With non-extent format the journal credit needed to * insert nrblocks contiguous block is dependent on * number of contiguous block. So we will limit * number of contiguous block to a sane value */ if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) && (max_blocks > EXT4_MAX_TRANS_DATA)) max_blocks = EXT4_MAX_TRANS_DATA; return ext4_chunk_trans_blocks(inode, max_blocks); } static int ext4_da_writepages(struct address_space *mapping, struct writeback_control *wbc) { pgoff_t index; int range_whole = 0; handle_t *handle = NULL; struct mpage_da_data mpd; struct inode *inode = mapping->host; int no_nrwrite_index_update; int pages_written = 0; long pages_skipped; unsigned int max_pages; int range_cyclic, cycled = 1, io_done = 0; int needed_blocks, ret = 0; long desired_nr_to_write, nr_to_writebump = 0; loff_t range_start = wbc->range_start; struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb); trace_ext4_da_writepages(inode, wbc); /* * No pages to write? This is mainly a kludge to avoid starting * a transaction for special inodes like journal inode on last iput() * because that could violate lock ordering on umount */ if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) return 0; /* * If the filesystem has aborted, it is read-only, so return * right away instead of dumping stack traces later on that * will obscure the real source of the problem. We test * EXT4_MF_FS_ABORTED instead of sb->s_flag's MS_RDONLY because * the latter could be true if the filesystem is mounted * read-only, and in that case, ext4_da_writepages should * *never* be called, so if that ever happens, we would want * the stack trace. */ if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED)) return -EROFS; if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) range_whole = 1; range_cyclic = wbc->range_cyclic; if (wbc->range_cyclic) { index = mapping->writeback_index; if (index) cycled = 0; wbc->range_start = index << PAGE_CACHE_SHIFT; wbc->range_end = LLONG_MAX; wbc->range_cyclic = 0; } else index = wbc->range_start >> PAGE_CACHE_SHIFT; /* * This works around two forms of stupidity. The first is in * the writeback code, which caps the maximum number of pages * written to be 1024 pages. This is wrong on multiple * levels; different architectues have a different page size, * which changes the maximum amount of data which gets * written. Secondly, 4 megabytes is way too small. XFS * forces this value to be 16 megabytes by multiplying * nr_to_write parameter by four, and then relies on its * allocator to allocate larger extents to make them * contiguous. Unfortunately this brings us to the second * stupidity, which is that ext4's mballoc code only allocates * at most 2048 blocks. So we force contiguous writes up to * the number of dirty blocks in the inode, or * sbi->max_writeback_mb_bump whichever is smaller. */ max_pages = sbi->s_max_writeback_mb_bump << (20 - PAGE_CACHE_SHIFT); if (!range_cyclic && range_whole) desired_nr_to_write = wbc->nr_to_write * 8; else desired_nr_to_write = ext4_num_dirty_pages(inode, index, max_pages); if (desired_nr_to_write > max_pages) desired_nr_to_write = max_pages; if (wbc->nr_to_write < desired_nr_to_write) { nr_to_writebump = desired_nr_to_write - wbc->nr_to_write; wbc->nr_to_write = desired_nr_to_write; } mpd.wbc = wbc; mpd.inode = mapping->host; /* * we don't want write_cache_pages to update * nr_to_write and writeback_index */ no_nrwrite_index_update = wbc->no_nrwrite_index_update; wbc->no_nrwrite_index_update = 1; pages_skipped = wbc->pages_skipped; retry: while (!ret && wbc->nr_to_write > 0) { /* * we insert one extent at a time. So we need * credit needed for single extent allocation. * journalled mode is currently not supported * by delalloc */ BUG_ON(ext4_should_journal_data(inode)); needed_blocks = ext4_da_writepages_trans_blocks(inode); /* start a new transaction*/ handle = ext4_journal_start(inode, needed_blocks); if (IS_ERR(handle)) { ret = PTR_ERR(handle); ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: " "%ld pages, ino %lu; err %d\n", __func__, wbc->nr_to_write, inode->i_ino, ret); goto out_writepages; } /* * Now call __mpage_da_writepage to find the next * contiguous region of logical blocks that need * blocks to be allocated by ext4. We don't actually * submit the blocks for I/O here, even though * write_cache_pages thinks it will, and will set the * pages as clean for write before calling * __mpage_da_writepage(). */ mpd.b_size = 0; mpd.b_state = 0; mpd.b_blocknr = 0; mpd.first_page = 0; mpd.next_page = 0; mpd.io_done = 0; mpd.pages_written = 0; mpd.retval = 0; ret = write_cache_pages(mapping, wbc, __mpage_da_writepage, &mpd); /* * If we have a contigous extent of pages and we * haven't done the I/O yet, map the blocks and submit * them for I/O. */ if (!mpd.io_done && mpd.next_page != mpd.first_page) { if (mpage_da_map_blocks(&mpd) == 0) mpage_da_submit_io(&mpd); mpd.io_done = 1; ret = MPAGE_DA_EXTENT_TAIL; } trace_ext4_da_write_pages(inode, &mpd); wbc->nr_to_write -= mpd.pages_written; ext4_journal_stop(handle); if ((mpd.retval == -ENOSPC) && sbi->s_journal) { /* commit the transaction which would * free blocks released in the transaction * and try again */ jbd2_journal_force_commit_nested(sbi->s_journal); wbc->pages_skipped = pages_skipped; ret = 0; } else if (ret == MPAGE_DA_EXTENT_TAIL) { /* * got one extent now try with * rest of the pages */ pages_written += mpd.pages_written; wbc->pages_skipped = pages_skipped; ret = 0; io_done = 1; } else if (wbc->nr_to_write) /* * There is no more writeout needed * or we requested for a noblocking writeout * and we found the device congested */ break; } if (!io_done && !cycled) { cycled = 1; index = 0; wbc->range_start = index << PAGE_CACHE_SHIFT; wbc->range_end = mapping->writeback_index - 1; goto retry; } if (pages_skipped != wbc->pages_skipped) ext4_msg(inode->i_sb, KERN_CRIT, "This should not happen leaving %s " "with nr_to_write = %ld ret = %d\n", __func__, wbc->nr_to_write, ret); /* Update index */ index += pages_written; wbc->range_cyclic = range_cyclic; if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) /* * set the writeback_index so that range_cyclic * mode will write it back later */ mapping->writeback_index = index; out_writepages: if (!no_nrwrite_index_update) wbc->no_nrwrite_index_update = 0; if (wbc->nr_to_write > nr_to_writebump) wbc->nr_to_write -= nr_to_writebump; wbc->range_start = range_start; trace_ext4_da_writepages_result(inode, wbc, ret, pages_written); return ret; } #define FALL_BACK_TO_NONDELALLOC 1 static int ext4_nonda_switch(struct super_block *sb) { s64 free_blocks, dirty_blocks; struct ext4_sb_info *sbi = EXT4_SB(sb); /* * switch to non delalloc mode if we are running low * on free block. The free block accounting via percpu * counters can get slightly wrong with percpu_counter_batch getting * accumulated on each CPU without updating global counters * Delalloc need an accurate free block accounting. So switch * to non delalloc when we are near to error range. */ free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter); dirty_blocks = percpu_counter_read_positive(&sbi->s_dirtyblocks_counter); if (2 * free_blocks < 3 * dirty_blocks || free_blocks < (dirty_blocks + EXT4_FREEBLOCKS_WATERMARK)) { /* * free block count is less that 150% of dirty blocks * or free blocks is less that watermark */ return 1; } return 0; } static int ext4_da_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { int ret, retries = 0; struct page *page; pgoff_t index; unsigned from, to; struct inode *inode = mapping->host; handle_t *handle; index = pos >> PAGE_CACHE_SHIFT; from = pos & (PAGE_CACHE_SIZE - 1); to = from + len; if (ext4_nonda_switch(inode->i_sb)) { *fsdata = (void *)FALL_BACK_TO_NONDELALLOC; return ext4_write_begin(file, mapping, pos, len, flags, pagep, fsdata); } *fsdata = (void *)0; trace_ext4_da_write_begin(inode, pos, len, flags); retry: /* * With delayed allocation, we don't log the i_disksize update * if there is delayed block allocation. But we still need * to journalling the i_disksize update if writes to the end * of file which has an already mapped buffer. */ handle = ext4_journal_start(inode, 1); if (IS_ERR(handle)) { ret = PTR_ERR(handle); goto out; } /* We cannot recurse into the filesystem as the transaction is already * started */ flags |= AOP_FLAG_NOFS; page = grab_cache_page_write_begin(mapping, index, flags); if (!page) { ext4_journal_stop(handle); ret = -ENOMEM; goto out; } *pagep = page; ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata, ext4_da_get_block_prep); if (ret < 0) { unlock_page(page); ext4_journal_stop(handle); page_cache_release(page); /* * block_write_begin may have instantiated a few blocks * outside i_size. Trim these off again. Don't need * i_size_read because we hold i_mutex. */ if (pos + len > inode->i_size) ext4_truncate_failed_write(inode); } if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) goto retry; out: return ret; } /* * Check if we should update i_disksize * when write to the end of file but not require block allocation */ static int ext4_da_should_update_i_disksize(struct page *page, unsigned long offset) { struct buffer_head *bh; struct inode *inode = page->mapping->host; unsigned int idx; int i; bh = page_buffers(page); idx = offset >> inode->i_blkbits; for (i = 0; i < idx; i++) bh = bh->b_this_page; if (!buffer_mapped(bh) || (buffer_delay(bh)) || buffer_unwritten(bh)) return 0; return 1; } static int ext4_da_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { struct inode *inode = mapping->host; int ret = 0, ret2; handle_t *handle = ext4_journal_current_handle(); loff_t new_i_size; unsigned long start, end; int write_mode = (int)(unsigned long)fsdata; if (write_mode == FALL_BACK_TO_NONDELALLOC) { if (ext4_should_order_data(inode)) { return ext4_ordered_write_end(file, mapping, pos, len, copied, page, fsdata); } else if (ext4_should_writeback_data(inode)) { return ext4_writeback_write_end(file, mapping, pos, len, copied, page, fsdata); } else { BUG(); } } trace_ext4_da_write_end(inode, pos, len, copied); start = pos & (PAGE_CACHE_SIZE - 1); end = start + copied - 1; /* * generic_write_end() will run mark_inode_dirty() if i_size * changes. So let's piggyback the i_disksize mark_inode_dirty * into that. */ new_i_size = pos + copied; if (new_i_size > EXT4_I(inode)->i_disksize) { if (ext4_da_should_update_i_disksize(page, end)) { down_write(&EXT4_I(inode)->i_data_sem); if (new_i_size > EXT4_I(inode)->i_disksize) { /* * Updating i_disksize when extending file * without needing block allocation */ if (ext4_should_order_data(inode)) ret = ext4_jbd2_file_inode(handle, inode); EXT4_I(inode)->i_disksize = new_i_size; } up_write(&EXT4_I(inode)->i_data_sem); /* We need to mark inode dirty even if * new_i_size is less that inode->i_size * bu greater than i_disksize.(hint delalloc) */ ext4_mark_inode_dirty(handle, inode); } } ret2 = generic_write_end(file, mapping, pos, len, copied, page, fsdata); copied = ret2; if (ret2 < 0) ret = ret2; ret2 = ext4_journal_stop(handle); if (!ret) ret = ret2; return ret ? ret : copied; } static void ext4_da_invalidatepage(struct page *page, unsigned long offset) { /* * Drop reserved blocks */ BUG_ON(!PageLocked(page)); if (!page_has_buffers(page)) goto out; ext4_da_page_release_reservation(page, offset); out: ext4_invalidatepage(page, offset); return; } /* * Force all delayed allocation blocks to be allocated for a given inode. */ int ext4_alloc_da_blocks(struct inode *inode) { trace_ext4_alloc_da_blocks(inode); if (!EXT4_I(inode)->i_reserved_data_blocks && !EXT4_I(inode)->i_reserved_meta_blocks) return 0; /* * We do something simple for now. The filemap_flush() will * also start triggering a write of the data blocks, which is * not strictly speaking necessary (and for users of * laptop_mode, not even desirable). However, to do otherwise * would require replicating code paths in: * * ext4_da_writepages() -> * write_cache_pages() ---> (via passed in callback function) * __mpage_da_writepage() --> * mpage_add_bh_to_extent() * mpage_da_map_blocks() * * The problem is that write_cache_pages(), located in * mm/page-writeback.c, marks pages clean in preparation for * doing I/O, which is not desirable if we're not planning on * doing I/O at all. * * We could call write_cache_pages(), and then redirty all of * the pages by calling redirty_page_for_writeback() but that * would be ugly in the extreme. So instead we would need to * replicate parts of the code in the above functions, * simplifying them becuase we wouldn't actually intend to * write out the pages, but rather only collect contiguous * logical block extents, call the multi-block allocator, and * then update the buffer heads with the block allocations. * * For now, though, we'll cheat by calling filemap_flush(), * which will map the blocks, and start the I/O, but not * actually wait for the I/O to complete. */ return filemap_flush(inode->i_mapping); } /* * bmap() is special. It gets used by applications such as lilo and by * the swapper to find the on-disk block of a specific piece of data. * * Naturally, this is dangerous if the block concerned is still in the * journal. If somebody makes a swapfile on an ext4 data-journaling * filesystem and enables swap, then they may get a nasty shock when the * data getting swapped to that swapfile suddenly gets overwritten by * the original zero's written out previously to the journal and * awaiting writeback in the kernel's buffer cache. * * So, if we see any bmap calls here on a modified, data-journaled file, * take extra steps to flush any blocks which might be in the cache. */ static sector_t ext4_bmap(struct address_space *mapping, sector_t block) { struct inode *inode = mapping->host; journal_t *journal; int err; if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) && test_opt(inode->i_sb, DELALLOC)) { /* * With delalloc we want to sync the file * so that we can make sure we allocate * blocks for file */ filemap_write_and_wait(mapping); } if (EXT4_JOURNAL(inode) && EXT4_I(inode)->i_state & EXT4_STATE_JDATA) { /* * This is a REALLY heavyweight approach, but the use of * bmap on dirty files is expected to be extremely rare: * only if we run lilo or swapon on a freshly made file * do we expect this to happen. * * (bmap requires CAP_SYS_RAWIO so this does not * represent an unprivileged user DOS attack --- we'd be * in trouble if mortal users could trigger this path at * will.) * * NB. EXT4_STATE_JDATA is not set on files other than * regular files. If somebody wants to bmap a directory * or symlink and gets confused because the buffer * hasn't yet been flushed to disk, they deserve * everything they get. */ EXT4_I(inode)->i_state &= ~EXT4_STATE_JDATA; journal = EXT4_JOURNAL(inode); jbd2_journal_lock_updates(journal); err = jbd2_journal_flush(journal); jbd2_journal_unlock_updates(journal); if (err) return 0; } return generic_block_bmap(mapping, block, ext4_get_block); } static int ext4_readpage(struct file *file, struct page *page) { return mpage_readpage(page, ext4_get_block); } static int ext4_readpages(struct file *file, struct address_space *mapping, struct list_head *pages, unsigned nr_pages) { return mpage_readpages(mapping, pages, nr_pages, ext4_get_block); } static void ext4_invalidatepage(struct page *page, unsigned long offset) { journal_t *journal = EXT4_JOURNAL(page->mapping->host); /* * If it's a full truncate we just forget about the pending dirtying */ if (offset == 0) ClearPageChecked(page); if (journal) jbd2_journal_invalidatepage(journal, page, offset); else block_invalidatepage(page, offset); } static int ext4_releasepage(struct page *page, gfp_t wait) { journal_t *journal = EXT4_JOURNAL(page->mapping->host); WARN_ON(PageChecked(page)); if (!page_has_buffers(page)) return 0; if (journal) return jbd2_journal_try_to_free_buffers(journal, page, wait); else return try_to_free_buffers(page); } /* * O_DIRECT for ext3 (or indirect map) based files * * If the O_DIRECT write will extend the file then add this inode to the * orphan list. So recovery will truncate it back to the original size * if the machine crashes during the write. * * If the O_DIRECT write is intantiating holes inside i_size and the machine * crashes then stale disk data _may_ be exposed inside the file. But current * VFS code falls back into buffered path in that case so we are safe. */ static ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t offset, unsigned long nr_segs) { struct file *file = iocb->ki_filp; struct inode *inode = file->f_mapping->host; struct ext4_inode_info *ei = EXT4_I(inode); handle_t *handle; ssize_t ret; int orphan = 0; size_t count = iov_length(iov, nr_segs); int retries = 0; if (rw == WRITE) { loff_t final_size = offset + count; if (final_size > inode->i_size) { /* Credits for sb + inode write */ handle = ext4_journal_start(inode, 2); if (IS_ERR(handle)) { ret = PTR_ERR(handle); goto out; } ret = ext4_orphan_add(handle, inode); if (ret) { ext4_journal_stop(handle); goto out; } orphan = 1; ei->i_disksize = inode->i_size; ext4_journal_stop(handle); } } retry: ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, offset, nr_segs, ext4_get_block, NULL); if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) goto retry; if (orphan) { int err; /* Credits for sb + inode write */ handle = ext4_journal_start(inode, 2); if (IS_ERR(handle)) { /* This is really bad luck. We've written the data * but cannot extend i_size. Bail out and pretend * the write failed... */ ret = PTR_ERR(handle); goto out; } if (inode->i_nlink) ext4_orphan_del(handle, inode); if (ret > 0) { loff_t end = offset + ret; if (end > inode->i_size) { ei->i_disksize = end; i_size_write(inode, end); /* * We're going to return a positive `ret' * here due to non-zero-length I/O, so there's * no way of reporting error returns from * ext4_mark_inode_dirty() to userspace. So * ignore it. */ ext4_mark_inode_dirty(handle, inode); } } err = ext4_journal_stop(handle); if (ret == 0) ret = err; } out: return ret; } static int ext4_get_block_dio_write(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) { handle_t *handle = NULL; int ret = 0; unsigned max_blocks = bh_result->b_size >> inode->i_blkbits; int dio_credits; ext4_debug("ext4_get_block_dio_write: inode %lu, create flag %d\n", inode->i_ino, create); /* * DIO VFS code passes create = 0 flag for write to * the middle of file. It does this to avoid block * allocation for holes, to prevent expose stale data * out when there is parallel buffered read (which does * not hold the i_mutex lock) while direct IO write has * not completed. DIO request on holes finally falls back * to buffered IO for this reason. * * For ext4 extent based file, since we support fallocate, * new allocated extent as uninitialized, for holes, we * could fallocate blocks for holes, thus parallel * buffered IO read will zero out the page when read on * a hole while parallel DIO write to the hole has not completed. * * when we come here, we know it's a direct IO write to * to the middle of file (<i_size) * so it's safe to override the create flag from VFS. */ create = EXT4_GET_BLOCKS_DIO_CREATE_EXT; if (max_blocks > DIO_MAX_BLOCKS) max_blocks = DIO_MAX_BLOCKS; dio_credits = ext4_chunk_trans_blocks(inode, max_blocks); handle = ext4_journal_start(inode, dio_credits); if (IS_ERR(handle)) { ret = PTR_ERR(handle); goto out; } ret = ext4_get_blocks(handle, inode, iblock, max_blocks, bh_result, create); if (ret > 0) { bh_result->b_size = (ret << inode->i_blkbits); ret = 0; } ext4_journal_stop(handle); out: return ret; } static void ext4_free_io_end(ext4_io_end_t *io) { BUG_ON(!io); iput(io->inode); kfree(io); } static void dump_aio_dio_list(struct inode * inode) { #ifdef EXT4_DEBUG struct list_head *cur, *before, *after; ext4_io_end_t *io, *io0, *io1; if (list_empty(&EXT4_I(inode)->i_aio_dio_complete_list)){ ext4_debug("inode %lu aio dio list is empty\n", inode->i_ino); return; } ext4_debug("Dump inode %lu aio_dio_completed_IO list \n", inode->i_ino); list_for_each_entry(io, &EXT4_I(inode)->i_aio_dio_complete_list, list){ cur = &io->list; before = cur->prev; io0 = container_of(before, ext4_io_end_t, list); after = cur->next; io1 = container_of(after, ext4_io_end_t, list); ext4_debug("io 0x%p from inode %lu,prev 0x%p,next 0x%p\n", io, inode->i_ino, io0, io1); } #endif } /* * check a range of space and convert unwritten extents to written. */ static int ext4_end_aio_dio_nolock(ext4_io_end_t *io) { struct inode *inode = io->inode; loff_t offset = io->offset; size_t size = io->size; int ret = 0; ext4_debug("end_aio_dio_onlock: io 0x%p from inode %lu,list->next 0x%p," "list->prev 0x%p\n", io, inode->i_ino, io->list.next, io->list.prev); if (list_empty(&io->list)) return ret; if (io->flag != DIO_AIO_UNWRITTEN) return ret; if (offset + size <= i_size_read(inode)) ret = ext4_convert_unwritten_extents(inode, offset, size); if (ret < 0) { printk(KERN_EMERG "%s: failed to convert unwritten" "extents to written extents, error is %d" " io is still on inode %lu aio dio list\n", __func__, ret, inode->i_ino); return ret; } /* clear the DIO AIO unwritten flag */ io->flag = 0; return ret; } /* * work on completed aio dio IO, to convert unwritten extents to extents */ static void ext4_end_aio_dio_work(struct work_struct *work) { ext4_io_end_t *io = container_of(work, ext4_io_end_t, work); struct inode *inode = io->inode; int ret = 0; mutex_lock(&inode->i_mutex); ret = ext4_end_aio_dio_nolock(io); if (ret >= 0) { if (!list_empty(&io->list)) list_del_init(&io->list); ext4_free_io_end(io); } mutex_unlock(&inode->i_mutex); } /* * This function is called from ext4_sync_file(). * * When AIO DIO IO is completed, the work to convert unwritten * extents to written is queued on workqueue but may not get immediately * scheduled. When fsync is called, we need to ensure the * conversion is complete before fsync returns. * The inode keeps track of a list of completed AIO from DIO path * that might needs to do the conversion. This function walks through * the list and convert the related unwritten extents to written. */ int flush_aio_dio_completed_IO(struct inode *inode) { ext4_io_end_t *io; int ret = 0; int ret2 = 0; if (list_empty(&EXT4_I(inode)->i_aio_dio_complete_list)) return ret; dump_aio_dio_list(inode); while (!list_empty(&EXT4_I(inode)->i_aio_dio_complete_list)){ io = list_entry(EXT4_I(inode)->i_aio_dio_complete_list.next, ext4_io_end_t, list); /* * Calling ext4_end_aio_dio_nolock() to convert completed * IO to written. * * When ext4_sync_file() is called, run_queue() may already * about to flush the work corresponding to this io structure. * It will be upset if it founds the io structure related * to the work-to-be schedule is freed. * * Thus we need to keep the io structure still valid here after * convertion finished. The io structure has a flag to * avoid double converting from both fsync and background work * queue work. */ ret = ext4_end_aio_dio_nolock(io); if (ret < 0) ret2 = ret; else list_del_init(&io->list); } return (ret2 < 0) ? ret2 : 0; } static ext4_io_end_t *ext4_init_io_end (struct inode *inode) { ext4_io_end_t *io = NULL; io = kmalloc(sizeof(*io), GFP_NOFS); if (io) { igrab(inode); io->inode = inode; io->flag = 0; io->offset = 0; io->size = 0; io->error = 0; INIT_WORK(&io->work, ext4_end_aio_dio_work); INIT_LIST_HEAD(&io->list); } return io; } static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset, ssize_t size, void *private) { ext4_io_end_t *io_end = iocb->private; struct workqueue_struct *wq; /* if not async direct IO or dio with 0 bytes write, just return */ if (!io_end || !size) return; ext_debug("ext4_end_io_dio(): io_end 0x%p" "for inode %lu, iocb 0x%p, offset %llu, size %llu\n", iocb->private, io_end->inode->i_ino, iocb, offset, size); /* if not aio dio with unwritten extents, just free io and return */ if (io_end->flag != DIO_AIO_UNWRITTEN){ ext4_free_io_end(io_end); iocb->private = NULL; return; } io_end->offset = offset; io_end->size = size; wq = EXT4_SB(io_end->inode->i_sb)->dio_unwritten_wq; /* queue the work to convert unwritten extents to written */ queue_work(wq, &io_end->work); /* Add the io_end to per-inode completed aio dio list*/ list_add_tail(&io_end->list, &EXT4_I(io_end->inode)->i_aio_dio_complete_list); iocb->private = NULL; } /* * For ext4 extent files, ext4 will do direct-io write to holes, * preallocated extents, and those write extend the file, no need to * fall back to buffered IO. * * For holes, we fallocate those blocks, mark them as unintialized * If those blocks were preallocated, we mark sure they are splited, but * still keep the range to write as unintialized. * * The unwrritten extents will be converted to written when DIO is completed. * For async direct IO, since the IO may still pending when return, we * set up an end_io call back function, which will do the convertion * when async direct IO completed. * * If the O_DIRECT write will extend the file then add this inode to the * orphan list. So recovery will truncate it back to the original size * if the machine crashes during the write. * */ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t offset, unsigned long nr_segs) { struct file *file = iocb->ki_filp; struct inode *inode = file->f_mapping->host; ssize_t ret; size_t count = iov_length(iov, nr_segs); loff_t final_size = offset + count; if (rw == WRITE && final_size <= inode->i_size) { /* * We could direct write to holes and fallocate. * * Allocated blocks to fill the hole are marked as uninitialized * to prevent paralel buffered read to expose the stale data * before DIO complete the data IO. * * As to previously fallocated extents, ext4 get_block * will just simply mark the buffer mapped but still * keep the extents uninitialized. * * for non AIO case, we will convert those unwritten extents * to written after return back from blockdev_direct_IO. * * for async DIO, the conversion needs to be defered when * the IO is completed. The ext4 end_io callback function * will be called to take care of the conversion work. * Here for async case, we allocate an io_end structure to * hook to the iocb. */ iocb->private = NULL; EXT4_I(inode)->cur_aio_dio = NULL; if (!is_sync_kiocb(iocb)) { iocb->private = ext4_init_io_end(inode); if (!iocb->private) return -ENOMEM; /* * we save the io structure for current async * direct IO, so that later ext4_get_blocks() * could flag the io structure whether there * is a unwritten extents needs to be converted * when IO is completed. */ EXT4_I(inode)->cur_aio_dio = iocb->private; } ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, offset, nr_segs, ext4_get_block_dio_write, ext4_end_io_dio); if (iocb->private) EXT4_I(inode)->cur_aio_dio = NULL; /* * The io_end structure takes a reference to the inode, * that structure needs to be destroyed and the * reference to the inode need to be dropped, when IO is * complete, even with 0 byte write, or failed. * * In the successful AIO DIO case, the io_end structure will be * desctroyed and the reference to the inode will be dropped * after the end_io call back function is called. * * In the case there is 0 byte write, or error case, since * VFS direct IO won't invoke the end_io call back function, * we need to free the end_io structure here. */ if (ret != -EIOCBQUEUED && ret <= 0 && iocb->private) { ext4_free_io_end(iocb->private); iocb->private = NULL; } else if (ret > 0 && (EXT4_I(inode)->i_state & EXT4_STATE_DIO_UNWRITTEN)) { int err; /* * for non AIO case, since the IO is already * completed, we could do the convertion right here */ err = ext4_convert_unwritten_extents(inode, offset, ret); if (err < 0) ret = err; EXT4_I(inode)->i_state &= ~EXT4_STATE_DIO_UNWRITTEN; } return ret; } /* for write the the end of file case, we fall back to old way */ return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs); } static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t offset, unsigned long nr_segs) { struct file *file = iocb->ki_filp; struct inode *inode = file->f_mapping->host; if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) return ext4_ext_direct_IO(rw, iocb, iov, offset, nr_segs); return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs); } /* * Pages can be marked dirty completely asynchronously from ext4's journalling * activity. By filemap_sync_pte(), try_to_unmap_one(), etc. We cannot do * much here because ->set_page_dirty is called under VFS locks. The page is * not necessarily locked. * * We cannot just dirty the page and leave attached buffers clean, because the * buffers' dirty state is "definitive". We cannot just set the buffers dirty * or jbddirty because all the journalling code will explode. * * So what we do is to mark the page "pending dirty" and next time writepage * is called, propagate that into the buffers appropriately. */ static int ext4_journalled_set_page_dirty(struct page *page) { SetPageChecked(page); return __set_page_dirty_nobuffers(page); } static const struct address_space_operations ext4_ordered_aops = { .readpage = ext4_readpage, .readpages = ext4_readpages, .writepage = ext4_writepage, .sync_page = block_sync_page, .write_begin = ext4_write_begin, .write_end = ext4_ordered_write_end, .bmap = ext4_bmap, .invalidatepage = ext4_invalidatepage, .releasepage = ext4_releasepage, .direct_IO = ext4_direct_IO, .migratepage = buffer_migrate_page, .is_partially_uptodate = block_is_partially_uptodate, .error_remove_page = generic_error_remove_page, }; static const struct address_space_operations ext4_writeback_aops = { .readpage = ext4_readpage, .readpages = ext4_readpages, .writepage = ext4_writepage, .sync_page = block_sync_page, .write_begin = ext4_write_begin, .write_end = ext4_writeback_write_end, .bmap = ext4_bmap, .invalidatepage = ext4_invalidatepage, .releasepage = ext4_releasepage, .direct_IO = ext4_direct_IO, .migratepage = buffer_migrate_page, .is_partially_uptodate = block_is_partially_uptodate, .error_remove_page = generic_error_remove_page, }; static const struct address_space_operations ext4_journalled_aops = { .readpage = ext4_readpage, .readpages = ext4_readpages, .writepage = ext4_writepage, .sync_page = block_sync_page, .write_begin = ext4_write_begin, .write_end = ext4_journalled_write_end, .set_page_dirty = ext4_journalled_set_page_dirty, .bmap = ext4_bmap, .invalidatepage = ext4_invalidatepage, .releasepage = ext4_releasepage, .is_partially_uptodate = block_is_partially_uptodate, .error_remove_page = generic_error_remove_page, }; static const struct address_space_operations ext4_da_aops = { .readpage = ext4_readpage, .readpages = ext4_readpages, .writepage = ext4_writepage, .writepages = ext4_da_writepages, .sync_page = block_sync_page, .write_begin = ext4_da_write_begin, .write_end = ext4_da_write_end, .bmap = ext4_bmap, .invalidatepage = ext4_da_invalidatepage, .releasepage = ext4_releasepage, .direct_IO = ext4_direct_IO, .migratepage = buffer_migrate_page, .is_partially_uptodate = block_is_partially_uptodate, .error_remove_page = generic_error_remove_page, }; void ext4_set_aops(struct inode *inode) { if (ext4_should_order_data(inode) && test_opt(inode->i_sb, DELALLOC)) inode->i_mapping->a_ops = &ext4_da_aops; else if (ext4_should_order_data(inode)) inode->i_mapping->a_ops = &ext4_ordered_aops; else if (ext4_should_writeback_data(inode) && test_opt(inode->i_sb, DELALLOC)) inode->i_mapping->a_ops = &ext4_da_aops; else if (ext4_should_writeback_data(inode)) inode->i_mapping->a_ops = &ext4_writeback_aops; else inode->i_mapping->a_ops = &ext4_journalled_aops; } /* * ext4_block_truncate_page() zeroes out a mapping from file offset `from' * up to the end of the block which corresponds to `from'. * This required during truncate. We need to physically zero the tail end * of that block so it doesn't yield old data if the file is later grown. */ int ext4_block_truncate_page(handle_t *handle, struct address_space *mapping, loff_t from) { ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT; unsigned offset = from & (PAGE_CACHE_SIZE-1); unsigned blocksize, length, pos; ext4_lblk_t iblock; struct inode *inode = mapping->host; struct buffer_head *bh; struct page *page; int err = 0; page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT, mapping_gfp_mask(mapping) & ~__GFP_FS); if (!page) return -EINVAL; blocksize = inode->i_sb->s_blocksize; length = blocksize - (offset & (blocksize - 1)); iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits); /* * For "nobh" option, we can only work if we don't need to * read-in the page - otherwise we create buffers to do the IO. */ if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode) && PageUptodate(page)) { zero_user(page, offset, length); set_page_dirty(page); goto unlock; } if (!page_has_buffers(page)) create_empty_buffers(page, blocksize, 0); /* Find the buffer that contains "offset" */ bh = page_buffers(page); pos = blocksize; while (offset >= pos) { bh = bh->b_this_page; iblock++; pos += blocksize; } err = 0; if (buffer_freed(bh)) { BUFFER_TRACE(bh, "freed: skip"); goto unlock; } if (!buffer_mapped(bh)) { BUFFER_TRACE(bh, "unmapped"); ext4_get_block(inode, iblock, bh, 0); /* unmapped? It's a hole - nothing to do */ if (!buffer_mapped(bh)) { BUFFER_TRACE(bh, "still unmapped"); goto unlock; } } /* Ok, it's mapped. Make sure it's up-to-date */ if (PageUptodate(page)) set_buffer_uptodate(bh); if (!buffer_uptodate(bh)) { err = -EIO; ll_rw_block(READ, 1, &bh); wait_on_buffer(bh); /* Uhhuh. Read error. Complain and punt. */ if (!buffer_uptodate(bh)) goto unlock; } if (ext4_should_journal_data(inode)) { BUFFER_TRACE(bh, "get write access"); err = ext4_journal_get_write_access(handle, bh); if (err) goto unlock; } zero_user(page, offset, length); BUFFER_TRACE(bh, "zeroed end of block"); err = 0; if (ext4_should_journal_data(inode)) { err = ext4_handle_dirty_metadata(handle, inode, bh); } else { if (ext4_should_order_data(inode)) err = ext4_jbd2_file_inode(handle, inode); mark_buffer_dirty(bh); } unlock: unlock_page(page); page_cache_release(page); return err; } /* * Probably it should be a library function... search for first non-zero word * or memcmp with zero_page, whatever is better for particular architecture. * Linus? */ static inline int all_zeroes(__le32 *p, __le32 *q) { while (p < q) if (*p++) return 0; return 1; } /** * ext4_find_shared - find the indirect blocks for partial truncation. * @inode: inode in question * @depth: depth of the affected branch * @offsets: offsets of pointers in that branch (see ext4_block_to_path) * @chain: place to store the pointers to partial indirect blocks * @top: place to the (detached) top of branch * * This is a helper function used by ext4_truncate(). * * When we do truncate() we may have to clean the ends of several * indirect blocks but leave the blocks themselves alive. Block is * partially truncated if some data below the new i_size is refered * from it (and it is on the path to the first completely truncated * data block, indeed). We have to free the top of that path along * with everything to the right of the path. Since no allocation * past the truncation point is possible until ext4_truncate() * finishes, we may safely do the latter, but top of branch may * require special attention - pageout below the truncation point * might try to populate it. * * We atomically detach the top of branch from the tree, store the * block number of its root in *@top, pointers to buffer_heads of * partially truncated blocks - in @chain[].bh and pointers to * their last elements that should not be removed - in * @chain[].p. Return value is the pointer to last filled element * of @chain. * * The work left to caller to do the actual freeing of subtrees: * a) free the subtree starting from *@top * b) free the subtrees whose roots are stored in * (@chain[i].p+1 .. end of @chain[i].bh->b_data) * c) free the subtrees growing from the inode past the @chain[0]. * (no partially truncated stuff there). */ static Indirect *ext4_find_shared(struct inode *inode, int depth, ext4_lblk_t offsets[4], Indirect chain[4], __le32 *top) { Indirect *partial, *p; int k, err; *top = 0; /* Make k index the deepest non-null offest + 1 */ for (k = depth; k > 1 && !offsets[k-1]; k--) ; partial = ext4_get_branch(inode, k, offsets, chain, &err); /* Writer: pointers */ if (!partial) partial = chain + k-1; /* * If the branch acquired continuation since we've looked at it - * fine, it should all survive and (new) top doesn't belong to us. */ if (!partial->key && *partial->p) /* Writer: end */ goto no_top; for (p = partial; (p > chain) && all_zeroes((__le32 *) p->bh->b_data, p->p); p--) ; /* * OK, we've found the last block that must survive. The rest of our * branch should be detached before unlocking. However, if that rest * of branch is all ours and does not grow immediately from the inode * it's easier to cheat and just decrement partial->p. */ if (p == chain + k - 1 && p > chain) { p->p--; } else { *top = *p->p; /* Nope, don't do this in ext4. Must leave the tree intact */ #if 0 *p->p = 0; #endif } /* Writer: end */ while (partial > p) { brelse(partial->bh); partial--; } no_top: return partial; } /* * Zero a number of block pointers in either an inode or an indirect block. * If we restart the transaction we must again get write access to the * indirect block for further modification. * * We release `count' blocks on disk, but (last - first) may be greater * than `count' because there can be holes in there. */ static void ext4_clear_blocks(handle_t *handle, struct inode *inode, struct buffer_head *bh, ext4_fsblk_t block_to_free, unsigned long count, __le32 *first, __le32 *last) { __le32 *p; int is_metadata = S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode); if (try_to_extend_transaction(handle, inode)) { if (bh) { BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); ext4_handle_dirty_metadata(handle, inode, bh); } ext4_mark_inode_dirty(handle, inode); ext4_truncate_restart_trans(handle, inode, blocks_for_truncate(inode)); if (bh) { BUFFER_TRACE(bh, "retaking write access"); ext4_journal_get_write_access(handle, bh); } } /* * Any buffers which are on the journal will be in memory. We * find them on the hash table so jbd2_journal_revoke() will * run jbd2_journal_forget() on them. We've already detached * each block from the file, so bforget() in * jbd2_journal_forget() should be safe. * * AKPM: turn on bforget in jbd2_journal_forget()!!! */ for (p = first; p < last; p++) { u32 nr = le32_to_cpu(*p); if (nr) { struct buffer_head *tbh; *p = 0; tbh = sb_find_get_block(inode->i_sb, nr); ext4_forget(handle, is_metadata, inode, tbh, nr); } } ext4_free_blocks(handle, inode, block_to_free, count, is_metadata); } /** * ext4_free_data - free a list of data blocks * @handle: handle for this transaction * @inode: inode we are dealing with * @this_bh: indirect buffer_head which contains *@first and *@last * @first: array of block numbers * @last: points immediately past the end of array * * We are freeing all blocks refered from that array (numbers are stored as * little-endian 32-bit) and updating @inode->i_blocks appropriately. * * We accumulate contiguous runs of blocks to free. Conveniently, if these * blocks are contiguous then releasing them at one time will only affect one * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't * actually use a lot of journal space. * * @this_bh will be %NULL if @first and @last point into the inode's direct * block pointers. */ static void ext4_free_data(handle_t *handle, struct inode *inode, struct buffer_head *this_bh, __le32 *first, __le32 *last) { ext4_fsblk_t block_to_free = 0; /* Starting block # of a run */ unsigned long count = 0; /* Number of blocks in the run */ __le32 *block_to_free_p = NULL; /* Pointer into inode/ind corresponding to block_to_free */ ext4_fsblk_t nr; /* Current block # */ __le32 *p; /* Pointer into inode/ind for current block */ int err; if (this_bh) { /* For indirect block */ BUFFER_TRACE(this_bh, "get_write_access"); err = ext4_journal_get_write_access(handle, this_bh); /* Important: if we can't update the indirect pointers * to the blocks, we can't free them. */ if (err) return; } for (p = first; p < last; p++) { nr = le32_to_cpu(*p); if (nr) { /* accumulate blocks to free if they're contiguous */ if (count == 0) { block_to_free = nr; block_to_free_p = p; count = 1; } else if (nr == block_to_free + count) { count++; } else { ext4_clear_blocks(handle, inode, this_bh, block_to_free, count, block_to_free_p, p); block_to_free = nr; block_to_free_p = p; count = 1; } } } if (count > 0) ext4_clear_blocks(handle, inode, this_bh, block_to_free, count, block_to_free_p, p); if (this_bh) { BUFFER_TRACE(this_bh, "call ext4_handle_dirty_metadata"); /* * The buffer head should have an attached journal head at this * point. However, if the data is corrupted and an indirect * block pointed to itself, it would have been detached when * the block was cleared. Check for this instead of OOPSing. */ if ((EXT4_JOURNAL(inode) == NULL) || bh2jh(this_bh)) ext4_handle_dirty_metadata(handle, inode, this_bh); else ext4_error(inode->i_sb, __func__, "circular indirect block detected, " "inode=%lu, block=%llu", inode->i_ino, (unsigned long long) this_bh->b_blocknr); } } /** * ext4_free_branches - free an array of branches * @handle: JBD handle for this transaction * @inode: inode we are dealing with * @parent_bh: the buffer_head which contains *@first and *@last * @first: array of block numbers * @last: pointer immediately past the end of array * @depth: depth of the branches to free * * We are freeing all blocks refered from these branches (numbers are * stored as little-endian 32-bit) and updating @inode->i_blocks * appropriately. */ static void ext4_free_branches(handle_t *handle, struct inode *inode, struct buffer_head *parent_bh, __le32 *first, __le32 *last, int depth) { ext4_fsblk_t nr; __le32 *p; if (ext4_handle_is_aborted(handle)) return; if (depth--) { struct buffer_head *bh; int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb); p = last; while (--p >= first) { nr = le32_to_cpu(*p); if (!nr) continue; /* A hole */ /* Go read the buffer for the next level down */ bh = sb_bread(inode->i_sb, nr); /* * A read failure? Report error and clear slot * (should be rare). */ if (!bh) { ext4_error(inode->i_sb, "ext4_free_branches", "Read failure, inode=%lu, block=%llu", inode->i_ino, nr); continue; } /* This zaps the entire block. Bottom up. */ BUFFER_TRACE(bh, "free child branches"); ext4_free_branches(handle, inode, bh, (__le32 *) bh->b_data, (__le32 *) bh->b_data + addr_per_block, depth); /* * We've probably journalled the indirect block several * times during the truncate. But it's no longer * needed and we now drop it from the transaction via * jbd2_journal_revoke(). * * That's easy if it's exclusively part of this * transaction. But if it's part of the committing * transaction then jbd2_journal_forget() will simply * brelse() it. That means that if the underlying * block is reallocated in ext4_get_block(), * unmap_underlying_metadata() will find this block * and will try to get rid of it. damn, damn. * * If this block has already been committed to the * journal, a revoke record will be written. And * revoke records must be emitted *before* clearing * this block's bit in the bitmaps. */ ext4_forget(handle, 1, inode, bh, bh->b_blocknr); /* * Everything below this this pointer has been * released. Now let this top-of-subtree go. * * We want the freeing of this indirect block to be * atomic in the journal with the updating of the * bitmap block which owns it. So make some room in * the journal. * * We zero the parent pointer *after* freeing its * pointee in the bitmaps, so if extend_transaction() * for some reason fails to put the bitmap changes and * the release into the same transaction, recovery * will merely complain about releasing a free block, * rather than leaking blocks. */ if (ext4_handle_is_aborted(handle)) return; if (try_to_extend_transaction(handle, inode)) { ext4_mark_inode_dirty(handle, inode); ext4_truncate_restart_trans(handle, inode, blocks_for_truncate(inode)); } ext4_free_blocks(handle, inode, nr, 1, 1); if (parent_bh) { /* * The block which we have just freed is * pointed to by an indirect block: journal it */ BUFFER_TRACE(parent_bh, "get_write_access"); if (!ext4_journal_get_write_access(handle, parent_bh)){ *p = 0; BUFFER_TRACE(parent_bh, "call ext4_handle_dirty_metadata"); ext4_handle_dirty_metadata(handle, inode, parent_bh); } } } } else { /* We have reached the bottom of the tree. */ BUFFER_TRACE(parent_bh, "free data blocks"); ext4_free_data(handle, inode, parent_bh, first, last); } } int ext4_can_truncate(struct inode *inode) { if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) return 0; if (S_ISREG(inode->i_mode)) return 1; if (S_ISDIR(inode->i_mode)) return 1; if (S_ISLNK(inode->i_mode)) return !ext4_inode_is_fast_symlink(inode); return 0; } /* * ext4_truncate() * * We block out ext4_get_block() block instantiations across the entire * transaction, and VFS/VM ensures that ext4_truncate() cannot run * simultaneously on behalf of the same inode. * * As we work through the truncate and commmit bits of it to the journal there * is one core, guiding principle: the file's tree must always be consistent on * disk. We must be able to restart the truncate after a crash. * * The file's tree may be transiently inconsistent in memory (although it * probably isn't), but whenever we close off and commit a journal transaction, * the contents of (the filesystem + the journal) must be consistent and * restartable. It's pretty simple, really: bottom up, right to left (although * left-to-right works OK too). * * Note that at recovery time, journal replay occurs *before* the restart of * truncate against the orphan inode list. * * The committed inode has the new, desired i_size (which is the same as * i_disksize in this case). After a crash, ext4_orphan_cleanup() will see * that this inode's truncate did not complete and it will again call * ext4_truncate() to have another go. So there will be instantiated blocks * to the right of the truncation point in a crashed ext4 filesystem. But * that's fine - as long as they are linked from the inode, the post-crash * ext4_truncate() run will find them and release them. */ void ext4_truncate(struct inode *inode) { handle_t *handle; struct ext4_inode_info *ei = EXT4_I(inode); __le32 *i_data = ei->i_data; int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb); struct address_space *mapping = inode->i_mapping; ext4_lblk_t offsets[4]; Indirect chain[4]; Indirect *partial; __le32 nr = 0; int n; ext4_lblk_t last_block; unsigned blocksize = inode->i_sb->s_blocksize; if (!ext4_can_truncate(inode)) return; if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC)) ei->i_state |= EXT4_STATE_DA_ALLOC_CLOSE; if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) { ext4_ext_truncate(inode); return; } handle = start_transaction(inode); if (IS_ERR(handle)) return; /* AKPM: return what? */ last_block = (inode->i_size + blocksize-1) >> EXT4_BLOCK_SIZE_BITS(inode->i_sb); if (inode->i_size & (blocksize - 1)) if (ext4_block_truncate_page(handle, mapping, inode->i_size)) goto out_stop; n = ext4_block_to_path(inode, last_block, offsets, NULL); if (n == 0) goto out_stop; /* error */ /* * OK. This truncate is going to happen. We add the inode to the * orphan list, so that if this truncate spans multiple transactions, * and we crash, we will resume the truncate when the filesystem * recovers. It also marks the inode dirty, to catch the new size. * * Implication: the file must always be in a sane, consistent * truncatable state while each transaction commits. */ if (ext4_orphan_add(handle, inode)) goto out_stop; /* * From here we block out all ext4_get_block() callers who want to * modify the block allocation tree. */ down_write(&ei->i_data_sem); ext4_discard_preallocations(inode); /* * The orphan list entry will now protect us from any crash which * occurs before the truncate completes, so it is now safe to propagate * the new, shorter inode size (held for now in i_size) into the * on-disk inode. We do this via i_disksize, which is the value which * ext4 *really* writes onto the disk inode. */ ei->i_disksize = inode->i_size; if (n == 1) { /* direct blocks */ ext4_free_data(handle, inode, NULL, i_data+offsets[0], i_data + EXT4_NDIR_BLOCKS); goto do_indirects; } partial = ext4_find_shared(inode, n, offsets, chain, &nr); /* Kill the top of shared branch (not detached) */ if (nr) { if (partial == chain) { /* Shared branch grows from the inode */ ext4_free_branches(handle, inode, NULL, &nr, &nr+1, (chain+n-1) - partial); *partial->p = 0; /* * We mark the inode dirty prior to restart, * and prior to stop. No need for it here. */ } else { /* Shared branch grows from an indirect block */ BUFFER_TRACE(partial->bh, "get_write_access"); ext4_free_branches(handle, inode, partial->bh, partial->p, partial->p+1, (chain+n-1) - partial); } } /* Clear the ends of indirect blocks on the shared branch */ while (partial > chain) { ext4_free_branches(handle, inode, partial->bh, partial->p + 1, (__le32*)partial->bh->b_data+addr_per_block, (chain+n-1) - partial); BUFFER_TRACE(partial->bh, "call brelse"); brelse(partial->bh); partial--; } do_indirects: /* Kill the remaining (whole) subtrees */ switch (offsets[0]) { default: nr = i_data[EXT4_IND_BLOCK]; if (nr) { ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1); i_data[EXT4_IND_BLOCK] = 0; } case EXT4_IND_BLOCK: nr = i_data[EXT4_DIND_BLOCK]; if (nr) { ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2); i_data[EXT4_DIND_BLOCK] = 0; } case EXT4_DIND_BLOCK: nr = i_data[EXT4_TIND_BLOCK]; if (nr) { ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3); i_data[EXT4_TIND_BLOCK] = 0; } case EXT4_TIND_BLOCK: ; } up_write(&ei->i_data_sem); inode->i_mtime = inode->i_ctime = ext4_current_time(inode); ext4_mark_inode_dirty(handle, inode); /* * In a multi-transaction truncate, we only make the final transaction * synchronous */ if (IS_SYNC(inode)) ext4_handle_sync(handle); out_stop: /* * If this was a simple ftruncate(), and the file will remain alive * then we need to clear up the orphan record which we created above. * However, if this was a real unlink then we were called by * ext4_delete_inode(), and we allow that function to clean up the * orphan info for us. */ if (inode->i_nlink) ext4_orphan_del(handle, inode); ext4_journal_stop(handle); } /* * ext4_get_inode_loc returns with an extra refcount against the inode's * underlying buffer_head on success. If 'in_mem' is true, we have all * data in memory that is needed to recreate the on-disk version of this * inode. */ static int __ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc, int in_mem) { struct ext4_group_desc *gdp; struct buffer_head *bh; struct super_block *sb = inode->i_sb; ext4_fsblk_t block; int inodes_per_block, inode_offset; iloc->bh = NULL; if (!ext4_valid_inum(sb, inode->i_ino)) return -EIO; iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb); gdp = ext4_get_group_desc(sb, iloc->block_group, NULL); if (!gdp) return -EIO; /* * Figure out the offset within the block group inode table */ inodes_per_block = (EXT4_BLOCK_SIZE(sb) / EXT4_INODE_SIZE(sb)); inode_offset = ((inode->i_ino - 1) % EXT4_INODES_PER_GROUP(sb)); block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block); iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb); bh = sb_getblk(sb, block); if (!bh) { ext4_error(sb, "ext4_get_inode_loc", "unable to read " "inode block - inode=%lu, block=%llu", inode->i_ino, block); return -EIO; } if (!buffer_uptodate(bh)) { lock_buffer(bh); /* * If the buffer has the write error flag, we have failed * to write out another inode in the same block. In this * case, we don't have to read the block because we may * read the old inode data successfully. */ if (buffer_write_io_error(bh) && !buffer_uptodate(bh)) set_buffer_uptodate(bh); if (buffer_uptodate(bh)) { /* someone brought it uptodate while we waited */ unlock_buffer(bh); goto has_buffer; } /* * If we have all information of the inode in memory and this * is the only valid inode in the block, we need not read the * block. */ if (in_mem) { struct buffer_head *bitmap_bh; int i, start; start = inode_offset & ~(inodes_per_block - 1); /* Is the inode bitmap in cache? */ bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp)); if (!bitmap_bh) goto make_io; /* * If the inode bitmap isn't in cache then the * optimisation may end up performing two reads instead * of one, so skip it. */ if (!buffer_uptodate(bitmap_bh)) { brelse(bitmap_bh); goto make_io; } for (i = start; i < start + inodes_per_block; i++) { if (i == inode_offset) continue; if (ext4_test_bit(i, bitmap_bh->b_data)) break; } brelse(bitmap_bh); if (i == start + inodes_per_block) { /* all other inodes are free, so skip I/O */ memset(bh->b_data, 0, bh->b_size); set_buffer_uptodate(bh); unlock_buffer(bh); goto has_buffer; } } make_io: /* * If we need to do any I/O, try to pre-readahead extra * blocks from the inode table. */ if (EXT4_SB(sb)->s_inode_readahead_blks) { ext4_fsblk_t b, end, table; unsigned num; table = ext4_inode_table(sb, gdp); /* s_inode_readahead_blks is always a power of 2 */ b = block & ~(EXT4_SB(sb)->s_inode_readahead_blks-1); if (table > b) b = table; end = b + EXT4_SB(sb)->s_inode_readahead_blks; num = EXT4_INODES_PER_GROUP(sb); if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) num -= ext4_itable_unused_count(sb, gdp); table += num / inodes_per_block; if (end > table) end = table; while (b <= end) sb_breadahead(sb, b++); } /* * There are other valid inodes in the buffer, this inode * has in-inode xattrs, or we don't have this inode in memory. * Read the block from disk. */ get_bh(bh); bh->b_end_io = end_buffer_read_sync; submit_bh(READ_META, bh); wait_on_buffer(bh); if (!buffer_uptodate(bh)) { ext4_error(sb, __func__, "unable to read inode block - inode=%lu, " "block=%llu", inode->i_ino, block); brelse(bh); return -EIO; } } has_buffer: iloc->bh = bh; return 0; } int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc) { /* We have all inode data except xattrs in memory here. */ return __ext4_get_inode_loc(inode, iloc, !(EXT4_I(inode)->i_state & EXT4_STATE_XATTR)); } void ext4_set_inode_flags(struct inode *inode) { unsigned int flags = EXT4_I(inode)->i_flags; inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC); if (flags & EXT4_SYNC_FL) inode->i_flags |= S_SYNC; if (flags & EXT4_APPEND_FL) inode->i_flags |= S_APPEND; if (flags & EXT4_IMMUTABLE_FL) inode->i_flags |= S_IMMUTABLE; if (flags & EXT4_NOATIME_FL) inode->i_flags |= S_NOATIME; if (flags & EXT4_DIRSYNC_FL) inode->i_flags |= S_DIRSYNC; } /* Propagate flags from i_flags to EXT4_I(inode)->i_flags */ void ext4_get_inode_flags(struct ext4_inode_info *ei) { unsigned int flags = ei->vfs_inode.i_flags; ei->i_flags &= ~(EXT4_SYNC_FL|EXT4_APPEND_FL| EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL|EXT4_DIRSYNC_FL); if (flags & S_SYNC) ei->i_flags |= EXT4_SYNC_FL; if (flags & S_APPEND) ei->i_flags |= EXT4_APPEND_FL; if (flags & S_IMMUTABLE) ei->i_flags |= EXT4_IMMUTABLE_FL; if (flags & S_NOATIME) ei->i_flags |= EXT4_NOATIME_FL; if (flags & S_DIRSYNC) ei->i_flags |= EXT4_DIRSYNC_FL; } static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode, struct ext4_inode_info *ei) { blkcnt_t i_blocks ; struct inode *inode = &(ei->vfs_inode); struct super_block *sb = inode->i_sb; if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) { /* we are using combined 48 bit field */ i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 | le32_to_cpu(raw_inode->i_blocks_lo); if (ei->i_flags & EXT4_HUGE_FILE_FL) { /* i_blocks represent file system block size */ return i_blocks << (inode->i_blkbits - 9); } else { return i_blocks; } } else { return le32_to_cpu(raw_inode->i_blocks_lo); } } struct inode *ext4_iget(struct super_block *sb, unsigned long ino) { struct ext4_iloc iloc; struct ext4_inode *raw_inode; struct ext4_inode_info *ei; struct inode *inode; journal_t *journal = EXT4_SB(sb)->s_journal; long ret; int block; inode = iget_locked(sb, ino); if (!inode) return ERR_PTR(-ENOMEM); if (!(inode->i_state & I_NEW)) return inode; ei = EXT4_I(inode); iloc.bh = 0; ret = __ext4_get_inode_loc(inode, &iloc, 0); if (ret < 0) goto bad_inode; raw_inode = ext4_raw_inode(&iloc); inode->i_mode = le16_to_cpu(raw_inode->i_mode); inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low); inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low); if (!(test_opt(inode->i_sb, NO_UID32))) { inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16; inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16; } inode->i_nlink = le16_to_cpu(raw_inode->i_links_count); ei->i_state = 0; ei->i_dir_start_lookup = 0; ei->i_dtime = le32_to_cpu(raw_inode->i_dtime); /* We now have enough fields to check if the inode was active or not. * This is needed because nfsd might try to access dead inodes * the test is that same one that e2fsck uses * NeilBrown 1999oct15 */ if (inode->i_nlink == 0) { if (inode->i_mode == 0 || !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) { /* this inode is deleted */ ret = -ESTALE; goto bad_inode; } /* The only unlinked inodes we let through here have * valid i_mode and are being read by the orphan * recovery code: that's fine, we're about to complete * the process of deleting those. */ } ei->i_flags = le32_to_cpu(raw_inode->i_flags); inode->i_blocks = ext4_inode_blocks(raw_inode, ei); ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo); if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_64BIT)) ei->i_file_acl |= ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32; inode->i_size = ext4_isize(raw_inode); ei->i_disksize = inode->i_size; #ifdef CONFIG_QUOTA ei->i_reserved_quota = 0; #endif inode->i_generation = le32_to_cpu(raw_inode->i_generation); ei->i_block_group = iloc.block_group; ei->i_last_alloc_group = ~0; /* * NOTE! The in-memory inode i_data array is in little-endian order * even on big-endian machines: we do NOT byteswap the block numbers! */ for (block = 0; block < EXT4_N_BLOCKS; block++) ei->i_data[block] = raw_inode->i_block[block]; INIT_LIST_HEAD(&ei->i_orphan); /* * Set transaction id's of transactions that have to be committed * to finish f[data]sync. We set them to currently running transaction * as we cannot be sure that the inode or some of its metadata isn't * part of the transaction - the inode could have been reclaimed and * now it is reread from disk. */ if (journal) { transaction_t *transaction; tid_t tid; spin_lock(&journal->j_state_lock); if (journal->j_running_transaction) transaction = journal->j_running_transaction; else transaction = journal->j_committing_transaction; if (transaction) tid = transaction->t_tid; else tid = journal->j_commit_sequence; spin_unlock(&journal->j_state_lock); ei->i_sync_tid = tid; ei->i_datasync_tid = tid; } if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize); if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > EXT4_INODE_SIZE(inode->i_sb)) { ret = -EIO; goto bad_inode; } if (ei->i_extra_isize == 0) { /* The extra space is currently unused. Use it. */ ei->i_extra_isize = sizeof(struct ext4_inode) - EXT4_GOOD_OLD_INODE_SIZE; } else { __le32 *magic = (void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize; if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC)) ei->i_state |= EXT4_STATE_XATTR; } } else ei->i_extra_isize = 0; EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode); EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode); EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode); EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode); inode->i_version = le32_to_cpu(raw_inode->i_disk_version); if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi)) inode->i_version |= (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32; } ret = 0; if (ei->i_file_acl && !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) { ext4_error(sb, __func__, "bad extended attribute block %llu in inode #%lu", ei->i_file_acl, inode->i_ino); ret = -EIO; goto bad_inode; } else if (ei->i_flags & EXT4_EXTENTS_FL) { if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || (S_ISLNK(inode->i_mode) && !ext4_inode_is_fast_symlink(inode))) /* Validate extent which is part of inode */ ret = ext4_ext_check_inode(inode); } else if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || (S_ISLNK(inode->i_mode) && !ext4_inode_is_fast_symlink(inode))) { /* Validate block references which are part of inode */ ret = ext4_check_inode_blockref(inode); } if (ret) goto bad_inode; if (S_ISREG(inode->i_mode)) { inode->i_op = &ext4_file_inode_operations; inode->i_fop = &ext4_file_operations; ext4_set_aops(inode); } else if (S_ISDIR(inode->i_mode)) { inode->i_op = &ext4_dir_inode_operations; inode->i_fop = &ext4_dir_operations; } else if (S_ISLNK(inode->i_mode)) { if (ext4_inode_is_fast_symlink(inode)) { inode->i_op = &ext4_fast_symlink_inode_operations; nd_terminate_link(ei->i_data, inode->i_size, sizeof(ei->i_data) - 1); } else { inode->i_op = &ext4_symlink_inode_operations; ext4_set_aops(inode); } } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { inode->i_op = &ext4_special_inode_operations; if (raw_inode->i_block[0]) init_special_inode(inode, inode->i_mode, old_decode_dev(le32_to_cpu(raw_inode->i_block[0]))); else init_special_inode(inode, inode->i_mode, new_decode_dev(le32_to_cpu(raw_inode->i_block[1]))); } else { ret = -EIO; ext4_error(inode->i_sb, __func__, "bogus i_mode (%o) for inode=%lu", inode->i_mode, inode->i_ino); goto bad_inode; } brelse(iloc.bh); ext4_set_inode_flags(inode); unlock_new_inode(inode); return inode; bad_inode: brelse(iloc.bh); iget_failed(inode); return ERR_PTR(ret); } static int ext4_inode_blocks_set(handle_t *handle, struct ext4_inode *raw_inode, struct ext4_inode_info *ei) { struct inode *inode = &(ei->vfs_inode); u64 i_blocks = inode->i_blocks; struct super_block *sb = inode->i_sb; if (i_blocks <= ~0U) { /* * i_blocks can be represnted in a 32 bit variable * as multiple of 512 bytes */ raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); raw_inode->i_blocks_high = 0; ei->i_flags &= ~EXT4_HUGE_FILE_FL; return 0; } if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) return -EFBIG; if (i_blocks <= 0xffffffffffffULL) { /* * i_blocks can be represented in a 48 bit variable * as multiple of 512 bytes */ raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32); ei->i_flags &= ~EXT4_HUGE_FILE_FL; } else { ei->i_flags |= EXT4_HUGE_FILE_FL; /* i_block is stored in file system block size */ i_blocks = i_blocks >> (inode->i_blkbits - 9); raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32); } return 0; } /* * Post the struct inode info into an on-disk inode location in the * buffer-cache. This gobbles the caller's reference to the * buffer_head in the inode location struct. * * The caller must have write access to iloc->bh. */ static int ext4_do_update_inode(handle_t *handle, struct inode *inode, struct ext4_iloc *iloc) { struct ext4_inode *raw_inode = ext4_raw_inode(iloc); struct ext4_inode_info *ei = EXT4_I(inode); struct buffer_head *bh = iloc->bh; int err = 0, rc, block; /* For fields not not tracking in the in-memory inode, * initialise them to zero for new inodes. */ if (ei->i_state & EXT4_STATE_NEW) memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size); ext4_get_inode_flags(ei); raw_inode->i_mode = cpu_to_le16(inode->i_mode); if (!(test_opt(inode->i_sb, NO_UID32))) { raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid)); raw_inode->i_gid_low = cpu_to_le16(low_16_bits(inode->i_gid)); /* * Fix up interoperability with old kernels. Otherwise, old inodes get * re-used with the upper 16 bits of the uid/gid intact */ if (!ei->i_dtime) { raw_inode->i_uid_high = cpu_to_le16(high_16_bits(inode->i_uid)); raw_inode->i_gid_high = cpu_to_le16(high_16_bits(inode->i_gid)); } else { raw_inode->i_uid_high = 0; raw_inode->i_gid_high = 0; } } else { raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(inode->i_uid)); raw_inode->i_gid_low = cpu_to_le16(fs_high2lowgid(inode->i_gid)); raw_inode->i_uid_high = 0; raw_inode->i_gid_high = 0; } raw_inode->i_links_count = cpu_to_le16(inode->i_nlink); EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode); EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode); EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode); EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode); if (ext4_inode_blocks_set(handle, raw_inode, ei)) goto out_brelse; raw_inode->i_dtime = cpu_to_le32(ei->i_dtime); raw_inode->i_flags = cpu_to_le32(ei->i_flags); if (EXT4_SB(inode->i_sb)->s_es->s_creator_os != cpu_to_le32(EXT4_OS_HURD)) raw_inode->i_file_acl_high = cpu_to_le16(ei->i_file_acl >> 32); raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl); ext4_isize_set(raw_inode, ei->i_disksize); if (ei->i_disksize > 0x7fffffffULL) { struct super_block *sb = inode->i_sb; if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_LARGE_FILE) || EXT4_SB(sb)->s_es->s_rev_level == cpu_to_le32(EXT4_GOOD_OLD_REV)) { /* If this is the first large file * created, add a flag to the superblock. */ err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh); if (err) goto out_brelse; ext4_update_dynamic_rev(sb); EXT4_SET_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_LARGE_FILE); sb->s_dirt = 1; ext4_handle_sync(handle); err = ext4_handle_dirty_metadata(handle, inode, EXT4_SB(sb)->s_sbh); } } raw_inode->i_generation = cpu_to_le32(inode->i_generation); if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { if (old_valid_dev(inode->i_rdev)) { raw_inode->i_block[0] = cpu_to_le32(old_encode_dev(inode->i_rdev)); raw_inode->i_block[1] = 0; } else { raw_inode->i_block[0] = 0; raw_inode->i_block[1] = cpu_to_le32(new_encode_dev(inode->i_rdev)); raw_inode->i_block[2] = 0; } } else for (block = 0; block < EXT4_N_BLOCKS; block++) raw_inode->i_block[block] = ei->i_data[block]; raw_inode->i_disk_version = cpu_to_le32(inode->i_version); if (ei->i_extra_isize) { if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi)) raw_inode->i_version_hi = cpu_to_le32(inode->i_version >> 32); raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize); } BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); rc = ext4_handle_dirty_metadata(handle, inode, bh); if (!err) err = rc; ei->i_state &= ~EXT4_STATE_NEW; ext4_update_inode_fsync_trans(handle, inode, 0); out_brelse: brelse(bh); ext4_std_error(inode->i_sb, err); return err; } /* * ext4_write_inode() * * We are called from a few places: * * - Within generic_file_write() for O_SYNC files. * Here, there will be no transaction running. We wait for any running * trasnaction to commit. * * - Within sys_sync(), kupdate and such. * We wait on commit, if tol to. * * - Within prune_icache() (PF_MEMALLOC == true) * Here we simply return. We can't afford to block kswapd on the * journal commit. * * In all cases it is actually safe for us to return without doing anything, * because the inode has been copied into a raw inode buffer in * ext4_mark_inode_dirty(). This is a correctness thing for O_SYNC and for * knfsd. * * Note that we are absolutely dependent upon all inode dirtiers doing the * right thing: they *must* call mark_inode_dirty() after dirtying info in * which we are interested. * * It would be a bug for them to not do this. The code: * * mark_inode_dirty(inode) * stuff(); * inode->i_size = expr; * * is in error because a kswapd-driven write_inode() could occur while * `stuff()' is running, and the new i_size will be lost. Plus the inode * will no longer be on the superblock's dirty inode list. */ int ext4_write_inode(struct inode *inode, int wait) { int err; if (current->flags & PF_MEMALLOC) return 0; if (EXT4_SB(inode->i_sb)->s_journal) { if (ext4_journal_current_handle()) { jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n"); dump_stack(); return -EIO; } if (!wait) return 0; err = ext4_force_commit(inode->i_sb); } else { struct ext4_iloc iloc; err = ext4_get_inode_loc(inode, &iloc); if (err) return err; if (wait) sync_dirty_buffer(iloc.bh); if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) { ext4_error(inode->i_sb, __func__, "IO error syncing inode, " "inode=%lu, block=%llu", inode->i_ino, (unsigned long long)iloc.bh->b_blocknr); err = -EIO; } } return err; } /* * ext4_setattr() * * Called from notify_change. * * We want to trap VFS attempts to truncate the file as soon as * possible. In particular, we want to make sure that when the VFS * shrinks i_size, we put the inode on the orphan list and modify * i_disksize immediately, so that during the subsequent flushing of * dirty pages and freeing of disk blocks, we can guarantee that any * commit will leave the blocks being flushed in an unused state on * disk. (On recovery, the inode will get truncated and the blocks will * be freed, so we have a strong guarantee that no future commit will * leave these blocks visible to the user.) * * Another thing we have to assure is that if we are in ordered mode * and inode is still attached to the committing transaction, we must * we start writeout of all the dirty pages which are being truncated. * This way we are sure that all the data written in the previous * transaction are already on disk (truncate waits for pages under * writeback). * * Called with inode->i_mutex down. */ int ext4_setattr(struct dentry *dentry, struct iattr *attr) { struct inode *inode = dentry->d_inode; int error, rc = 0; const unsigned int ia_valid = attr->ia_valid; error = inode_change_ok(inode, attr); if (error) return error; if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) || (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) { handle_t *handle; /* (user+group)*(old+new) structure, inode write (sb, * inode block, ? - but truncate inode update has it) */ handle = ext4_journal_start(inode, (EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb)+ EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb))+3); if (IS_ERR(handle)) { error = PTR_ERR(handle); goto err_out; } error = vfs_dq_transfer(inode, attr) ? -EDQUOT : 0; if (error) { ext4_journal_stop(handle); return error; } /* Update corresponding info in inode so that everything is in * one transaction */ if (attr->ia_valid & ATTR_UID) inode->i_uid = attr->ia_uid; if (attr->ia_valid & ATTR_GID) inode->i_gid = attr->ia_gid; error = ext4_mark_inode_dirty(handle, inode); ext4_journal_stop(handle); } if (attr->ia_valid & ATTR_SIZE) { if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); if (attr->ia_size > sbi->s_bitmap_maxbytes) { error = -EFBIG; goto err_out; } } } if (S_ISREG(inode->i_mode) && attr->ia_valid & ATTR_SIZE && attr->ia_size < inode->i_size) { handle_t *handle; handle = ext4_journal_start(inode, 3); if (IS_ERR(handle)) { error = PTR_ERR(handle); goto err_out; } error = ext4_orphan_add(handle, inode); EXT4_I(inode)->i_disksize = attr->ia_size; rc = ext4_mark_inode_dirty(handle, inode); if (!error) error = rc; ext4_journal_stop(handle); if (ext4_should_order_data(inode)) { error = ext4_begin_ordered_truncate(inode, attr->ia_size); if (error) { /* Do as much error cleanup as possible */ handle = ext4_journal_start(inode, 3); if (IS_ERR(handle)) { ext4_orphan_del(NULL, inode); goto err_out; } ext4_orphan_del(handle, inode); ext4_journal_stop(handle); goto err_out; } } } rc = inode_setattr(inode, attr); /* If inode_setattr's call to ext4_truncate failed to get a * transaction handle at all, we need to clean up the in-core * orphan list manually. */ if (inode->i_nlink) ext4_orphan_del(NULL, inode); if (!rc && (ia_valid & ATTR_MODE)) rc = ext4_acl_chmod(inode); err_out: ext4_std_error(inode->i_sb, error); if (!error) error = rc; return error; } int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) { struct inode *inode; unsigned long delalloc_blocks; inode = dentry->d_inode; generic_fillattr(inode, stat); /* * We can't update i_blocks if the block allocation is delayed * otherwise in the case of system crash before the real block * allocation is done, we will have i_blocks inconsistent with * on-disk file blocks. * We always keep i_blocks updated together with real * allocation. But to not confuse with user, stat * will return the blocks that include the delayed allocation * blocks for this file. */ spin_lock(&EXT4_I(inode)->i_block_reservation_lock); delalloc_blocks = EXT4_I(inode)->i_reserved_data_blocks; spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); stat->blocks += (delalloc_blocks << inode->i_sb->s_blocksize_bits)>>9; return 0; } static int ext4_indirect_trans_blocks(struct inode *inode, int nrblocks, int chunk) { int indirects; /* if nrblocks are contiguous */ if (chunk) { /* * With N contiguous data blocks, it need at most * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) indirect blocks * 2 dindirect blocks * 1 tindirect block */ indirects = nrblocks / EXT4_ADDR_PER_BLOCK(inode->i_sb); return indirects + 3; } /* * if nrblocks are not contiguous, worse case, each block touch * a indirect block, and each indirect block touch a double indirect * block, plus a triple indirect block */ indirects = nrblocks * 2 + 1; return indirects; } static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk) { if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) return ext4_indirect_trans_blocks(inode, nrblocks, chunk); return ext4_ext_index_trans_blocks(inode, nrblocks, chunk); } /* * Account for index blocks, block groups bitmaps and block group * descriptor blocks if modify datablocks and index blocks * worse case, the indexs blocks spread over different block groups * * If datablocks are discontiguous, they are possible to spread over * different block groups too. If they are contiugous, with flexbg, * they could still across block group boundary. * * Also account for superblock, inode, quota and xattr blocks */ int ext4_meta_trans_blocks(struct inode *inode, int nrblocks, int chunk) { ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb); int gdpblocks; int idxblocks; int ret = 0; /* * How many index blocks need to touch to modify nrblocks? * The "Chunk" flag indicating whether the nrblocks is * physically contiguous on disk * * For Direct IO and fallocate, they calls get_block to allocate * one single extent at a time, so they could set the "Chunk" flag */ idxblocks = ext4_index_trans_blocks(inode, nrblocks, chunk); ret = idxblocks; /* * Now let's see how many group bitmaps and group descriptors need * to account */ groups = idxblocks; if (chunk) groups += 1; else groups += nrblocks; gdpblocks = groups; if (groups > ngroups) groups = ngroups; if (groups > EXT4_SB(inode->i_sb)->s_gdb_count) gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count; /* bitmaps and block group descriptor blocks */ ret += groups + gdpblocks; /* Blocks for super block, inode, quota and xattr blocks */ ret += EXT4_META_TRANS_BLOCKS(inode->i_sb); return ret; } /* * Calulate the total number of credits to reserve to fit * the modification of a single pages into a single transaction, * which may include multiple chunks of block allocations. * * This could be called via ext4_write_begin() * * We need to consider the worse case, when * one new block per extent. */ int ext4_writepage_trans_blocks(struct inode *inode) { int bpp = ext4_journal_blocks_per_page(inode); int ret; ret = ext4_meta_trans_blocks(inode, bpp, 0); /* Account for data blocks for journalled mode */ if (ext4_should_journal_data(inode)) ret += bpp; return ret; } /* * Calculate the journal credits for a chunk of data modification. * * This is called from DIO, fallocate or whoever calling * ext4_get_blocks() to map/allocate a chunk of contigous disk blocks. * * journal buffers for data blocks are not included here, as DIO * and fallocate do no need to journal data buffers. */ int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks) { return ext4_meta_trans_blocks(inode, nrblocks, 1); } /* * The caller must have previously called ext4_reserve_inode_write(). * Give this, we know that the caller already has write access to iloc->bh. */ int ext4_mark_iloc_dirty(handle_t *handle, struct inode *inode, struct ext4_iloc *iloc) { int err = 0; if (test_opt(inode->i_sb, I_VERSION)) inode_inc_iversion(inode); /* the do_update_inode consumes one bh->b_count */ get_bh(iloc->bh); /* ext4_do_update_inode() does jbd2_journal_dirty_metadata */ err = ext4_do_update_inode(handle, inode, iloc); put_bh(iloc->bh); return err; } /* * On success, We end up with an outstanding reference count against * iloc->bh. This _must_ be cleaned up later. */ int ext4_reserve_inode_write(handle_t *handle, struct inode *inode, struct ext4_iloc *iloc) { int err; err = ext4_get_inode_loc(inode, iloc); if (!err) { BUFFER_TRACE(iloc->bh, "get_write_access"); err = ext4_journal_get_write_access(handle, iloc->bh); if (err) { brelse(iloc->bh); iloc->bh = NULL; } } ext4_std_error(inode->i_sb, err); return err; } /* * Expand an inode by new_extra_isize bytes. * Returns 0 on success or negative error number on failure. */ static int ext4_expand_extra_isize(struct inode *inode, unsigned int new_extra_isize, struct ext4_iloc iloc, handle_t *handle) { struct ext4_inode *raw_inode; struct ext4_xattr_ibody_header *header; struct ext4_xattr_entry *entry; if (EXT4_I(inode)->i_extra_isize >= new_extra_isize) return 0; raw_inode = ext4_raw_inode(&iloc); header = IHDR(inode, raw_inode); entry = IFIRST(header); /* No extended attributes present */ if (!(EXT4_I(inode)->i_state & EXT4_STATE_XATTR) || header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) { memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0, new_extra_isize); EXT4_I(inode)->i_extra_isize = new_extra_isize; return 0; } /* try to expand with EAs present */ return ext4_expand_extra_isize_ea(inode, new_extra_isize, raw_inode, handle); } /* * What we do here is to mark the in-core inode as clean with respect to inode * dirtiness (it may still be data-dirty). * This means that the in-core inode may be reaped by prune_icache * without having to perform any I/O. This is a very good thing, * because *any* task may call prune_icache - even ones which * have a transaction open against a different journal. * * Is this cheating? Not really. Sure, we haven't written the * inode out, but prune_icache isn't a user-visible syncing function. * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync) * we start and wait on commits. * * Is this efficient/effective? Well, we're being nice to the system * by cleaning up our inodes proactively so they can be reaped * without I/O. But we are potentially leaving up to five seconds' * worth of inodes floating about which prune_icache wants us to * write out. One way to fix that would be to get prune_icache() * to do a write_super() to free up some memory. It has the desired * effect. */ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode) { struct ext4_iloc iloc; struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); static unsigned int mnt_count; int err, ret; might_sleep(); err = ext4_reserve_inode_write(handle, inode, &iloc); if (ext4_handle_valid(handle) && EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize && !(EXT4_I(inode)->i_state & EXT4_STATE_NO_EXPAND)) { /* * We need extra buffer credits since we may write into EA block * with this same handle. If journal_extend fails, then it will * only result in a minor loss of functionality for that inode. * If this is felt to be critical, then e2fsck should be run to * force a large enough s_min_extra_isize. */ if ((jbd2_journal_extend(handle, EXT4_DATA_TRANS_BLOCKS(inode->i_sb))) == 0) { ret = ext4_expand_extra_isize(inode, sbi->s_want_extra_isize, iloc, handle); if (ret) { EXT4_I(inode)->i_state |= EXT4_STATE_NO_EXPAND; if (mnt_count != le16_to_cpu(sbi->s_es->s_mnt_count)) { ext4_warning(inode->i_sb, __func__, "Unable to expand inode %lu. Delete" " some EAs or run e2fsck.", inode->i_ino); mnt_count = le16_to_cpu(sbi->s_es->s_mnt_count); } } } } if (!err) err = ext4_mark_iloc_dirty(handle, inode, &iloc); return err; } /* * ext4_dirty_inode() is called from __mark_inode_dirty() * * We're really interested in the case where a file is being extended. * i_size has been changed by generic_commit_write() and we thus need * to include the updated inode in the current transaction. * * Also, vfs_dq_alloc_block() will always dirty the inode when blocks * are allocated to the file. * * If the inode is marked synchronous, we don't honour that here - doing * so would cause a commit on atime updates, which we don't bother doing. * We handle synchronous inodes at the highest possible level. */ void ext4_dirty_inode(struct inode *inode) { handle_t *handle; handle = ext4_journal_start(inode, 2); if (IS_ERR(handle)) goto out; ext4_mark_inode_dirty(handle, inode); ext4_journal_stop(handle); out: return; } #if 0 /* * Bind an inode's backing buffer_head into this transaction, to prevent * it from being flushed to disk early. Unlike * ext4_reserve_inode_write, this leaves behind no bh reference and * returns no iloc structure, so the caller needs to repeat the iloc * lookup to mark the inode dirty later. */ static int ext4_pin_inode(handle_t *handle, struct inode *inode) { struct ext4_iloc iloc; int err = 0; if (handle) { err = ext4_get_inode_loc(inode, &iloc); if (!err) { BUFFER_TRACE(iloc.bh, "get_write_access"); err = jbd2_journal_get_write_access(handle, iloc.bh); if (!err) err = ext4_handle_dirty_metadata(handle, inode, iloc.bh); brelse(iloc.bh); } } ext4_std_error(inode->i_sb, err); return err; } #endif int ext4_change_inode_journal_flag(struct inode *inode, int val) { journal_t *journal; handle_t *handle; int err; /* * We have to be very careful here: changing a data block's * journaling status dynamically is dangerous. If we write a * data block to the journal, change the status and then delete * that block, we risk forgetting to revoke the old log record * from the journal and so a subsequent replay can corrupt data. * So, first we make sure that the journal is empty and that * nobody is changing anything. */ journal = EXT4_JOURNAL(inode); if (!journal) return 0; if (is_journal_aborted(journal)) return -EROFS; jbd2_journal_lock_updates(journal); jbd2_journal_flush(journal); /* * OK, there are no updates running now, and all cached data is * synced to disk. We are now in a completely consistent state * which doesn't have anything in the journal, and we know that * no filesystem updates are running, so it is safe to modify * the inode's in-core data-journaling state flag now. */ if (val) EXT4_I(inode)->i_flags |= EXT4_JOURNAL_DATA_FL; else EXT4_I(inode)->i_flags &= ~EXT4_JOURNAL_DATA_FL; ext4_set_aops(inode); jbd2_journal_unlock_updates(journal); /* Finally we can mark the inode as dirty. */ handle = ext4_journal_start(inode, 1); if (IS_ERR(handle)) return PTR_ERR(handle); err = ext4_mark_inode_dirty(handle, inode); ext4_handle_sync(handle); ext4_journal_stop(handle); ext4_std_error(inode->i_sb, err); return err; } static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh) { return !buffer_mapped(bh); } int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) { struct page *page = vmf->page; loff_t size; unsigned long len; int ret = -EINVAL; void *fsdata; struct file *file = vma->vm_file; struct inode *inode = file->f_path.dentry->d_inode; struct address_space *mapping = inode->i_mapping; /* * Get i_alloc_sem to stop truncates messing with the inode. We cannot * get i_mutex because we are already holding mmap_sem. */ down_read(&inode->i_alloc_sem); size = i_size_read(inode); if (page->mapping != mapping || size <= page_offset(page) || !PageUptodate(page)) { /* page got truncated from under us? */ goto out_unlock; } ret = 0; if (PageMappedToDisk(page)) goto out_unlock; if (page->index == size >> PAGE_CACHE_SHIFT) len = size & ~PAGE_CACHE_MASK; else len = PAGE_CACHE_SIZE; lock_page(page); /* * return if we have all the buffers mapped. This avoid * the need to call write_begin/write_end which does a * journal_start/journal_stop which can block and take * long time */ if (page_has_buffers(page)) { if (!walk_page_buffers(NULL, page_buffers(page), 0, len, NULL, ext4_bh_unmapped)) { unlock_page(page); goto out_unlock; } } unlock_page(page); /* * OK, we need to fill the hole... Do write_begin write_end * to do block allocation/reservation.We are not holding * inode.i__mutex here. That allow * parallel write_begin, * write_end call. lock_page prevent this from happening * on the same page though */ ret = mapping->a_ops->write_begin(file, mapping, page_offset(page), len, AOP_FLAG_UNINTERRUPTIBLE, &page, &fsdata); if (ret < 0) goto out_unlock; ret = mapping->a_ops->write_end(file, mapping, page_offset(page), len, len, page, fsdata); if (ret < 0) goto out_unlock; ret = 0; out_unlock: if (ret) ret = VM_FAULT_SIGBUS; up_read(&inode->i_alloc_sem); return ret; }
gpl-2.0